# -*- coding: utf-8 -*-
'''
Genesis Add-on
Copyright (C) 2015 lambda
-Mofidied by The Crew
-Copyright (C) 2019 The Crew
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see .
'''
import re
import os
import sys
import traceback
import base64
from kodi_six import xbmc, xbmcaddon, xbmcvfs
import six
from six.moves import urllib_parse, urllib_request, http_cookiejar, html_parser
profile = functions_dir = xbmc.translatePath(xbmcaddon.Addon().getAddonInfo('profile'))
try: from sqlite3 import dbapi2 as database
except: from pysqlite2 import dbapi2 as database
from resources.lib.modules import client
from resources.lib.modules import control
def fetch(regex):
try:
cacheFile = os.path.join(control.dataPath, 'regex.db')
dbcon = database.connect(cacheFile)
dbcur = dbcon.cursor()
dbcur.execute("SELECT * FROM regex WHERE regex = '%s'" % regex)
regex = dbcur.fetchone()[1]
return regex
except Exception:
return
def insert(data):
try:
control.makeFile(control.dataPath)
cacheFile = os.path.join(control.dataPath, 'regex.db')
dbcon = database.connect(cacheFile)
dbcur = dbcon.cursor()
dbcur.execute("CREATE TABLE IF NOT EXISTS regex (""regex TEXT, ""response TEXT, ""UNIQUE(regex)"");")
for i in data:
try:
dbcur.execute("INSERT INTO regex Values (?, ?)", (i['regex'], i['response']))
except Exception:
pass
dbcon.commit()
except Exception:
return
def clear():
try:
cacheFile = os.path.join(control.dataPath, 'regex.db')
dbcon = database.connect(cacheFile)
dbcur = dbcon.cursor()
dbcur.execute("DROP TABLE IF EXISTS regex")
dbcur.execute("VACUUM")
dbcon.commit()
except Exception:
pass
def resolve(regex):
try:
vanilla = re.compile('(.+)', re.MULTILINE | re.DOTALL).findall(regex)[0]
cddata = re.compile('<\!\[CDATA\[(.+?)\]\]>', re.MULTILINE | re.DOTALL).findall(regex)
for i in cddata:
regex = regex.replace('', urllib_parse.quote_plus(i))
regexs = re.compile('(.+)', re.MULTILINE | re.DOTALL).findall(regex)[0]
regexs = re.compile('(.+?)', re.MULTILINE | re.DOTALL).findall(regexs)
regexs = [re.compile('<(.+?)>(.*?)', re.MULTILINE | re.DOTALL).findall(i) for i in regexs]
regexs = [dict([(client.replaceHTMLCodes(x[0]), client.replaceHTMLCodes(urllib_parse.unquote_plus(x[1]))) for x in i]) for i in regexs]
regexs = [(i['name'], i) for i in regexs]
regexs = dict(regexs)
url = regex.split('', 1)[0].strip()
url = client.replaceHTMLCodes(url)
url = six.ensure_str(url)
r = getRegexParsed(regexs, url)
try:
ln = ''
ret = r[1]
listrepeat = r[2]['listrepeat']
regexname = r[2]['name']
for obj in ret:
try:
item = listrepeat
for i in list(range(len(obj)+1)):
item = item.replace('[%s.param%s]' % (regexname, str(i)), obj[i-1])
item2 = vanilla
for i in list(range(len(obj)+1)):
item2 = item2.replace('[%s.param%s]' % (regexname, str(i)), obj[i-1])
item2 = re.compile('(.+?)', re.MULTILINE | re.DOTALL).findall(item2)
item2 = [x for x in item2 if not '%s' % regexname in x]
item2 = ''.join(item2)
ln += '\n- %s\n%s
\n' % (item, item2)
except Exception:
pass
return ln
except Exception:
pass
if r[1] is True:
return r[0]
except Exception:
return
#TC 2/01/19 started
class NoRedirection(urllib_request.HTTPErrorProcessor):
def http_response(self, request, response):
return response
https_response = http_response
def getRegexParsed(regexs, url,cookieJar=None,forCookieJarOnly=False,recursiveCall=False,cachedPages={}, rawPost=False, cookie_jar_file=None):#0,1,2 = URL, regexOnly, CookieJarOnly
#cachedPages = {}
#print 'url',url
doRegexs = re.compile('\$doregex\[([^\]]*)\]').findall(url)
# print 'doRegexs',doRegexs,regexs
setresolved=True
for k in doRegexs:
if k in regexs:
#print 'processing ' ,k
m = regexs[k]
#print m
cookieJarParam=False
if 'cookiejar' in m: # so either create or reuse existing jar
#print 'cookiejar exists',m['cookiejar']
cookieJarParam=m['cookiejar']
if '$doregex' in cookieJarParam:
cookieJar=getRegexParsed(regexs, m['cookiejar'],cookieJar,True, True,cachedPages)
cookieJarParam=True
else:
cookieJarParam=True
#print 'm[cookiejar]',m['cookiejar'],cookieJar
if cookieJarParam:
if cookieJar==None:
#print 'create cookie jar'
cookie_jar_file=None
if 'open[' in m['cookiejar']:
cookie_jar_file=m['cookiejar'].split('open[')[1].split(']')[0]
# print 'cookieJar from file name',cookie_jar_file
cookieJar=getCookieJar(cookie_jar_file)
# print 'cookieJar from file',cookieJar
if cookie_jar_file:
saveCookieJar(cookieJar,cookie_jar_file)
#cookieJar = http_cookiejar.LWPCookieJar()
#print 'cookieJar new',cookieJar
elif 'save[' in m['cookiejar']:
cookie_jar_file=m['cookiejar'].split('save[')[1].split(']')[0]
complete_path=os.path.join(profile,cookie_jar_file)
# print 'complete_path',complete_path
saveCookieJar(cookieJar,cookie_jar_file)
if m['page'] and '$doregex' in m['page']:
pg=getRegexParsed(regexs, m['page'],cookieJar,recursiveCall=True,cachedPages=cachedPages)
if len(pg)==0:
pg='http://regexfailed'
m['page']=pg
if 'setcookie' in m and m['setcookie'] and '$doregex' in m['setcookie']:
m['setcookie']=getRegexParsed(regexs, m['setcookie'],cookieJar,recursiveCall=True,cachedPages=cachedPages)
if 'appendcookie' in m and m['appendcookie'] and '$doregex' in m['appendcookie']:
m['appendcookie']=getRegexParsed(regexs, m['appendcookie'],cookieJar,recursiveCall=True,cachedPages=cachedPages)
if 'post' in m and '$doregex' in m['post']:
m['post']=getRegexParsed(regexs, m['post'],cookieJar,recursiveCall=True,cachedPages=cachedPages)
# print 'post is now',m['post']
if 'rawpost' in m and '$doregex' in m['rawpost']:
m['rawpost']=getRegexParsed(regexs, m['rawpost'],cookieJar,recursiveCall=True,cachedPages=cachedPages,rawPost=True)
#print 'rawpost is now',m['rawpost']
if 'rawpost' in m and '$epoctime$' in m['rawpost']:
m['rawpost']=m['rawpost'].replace('$epoctime$',getEpocTime())
if 'rawpost' in m and '$epoctime2$' in m['rawpost']:
m['rawpost']=m['rawpost'].replace('$epoctime2$',getEpocTime2())
link=''
if m['page'] and m['page'] in cachedPages and not 'ignorecache' in m and forCookieJarOnly==False :
#print 'using cache page',m['page']
link = cachedPages[m['page']]
else:
if m['page'] and not m['page']=='' and m['page'].startswith('http'):
if '$epoctime$' in m['page']:
m['page']=m['page'].replace('$epoctime$',getEpocTime())
if '$epoctime2$' in m['page']:
m['page']=m['page'].replace('$epoctime2$',getEpocTime2())
#print 'Ingoring Cache',m['page']
page_split=m['page'].split('|')
pageUrl=page_split[0]
header_in_page=None
if len(page_split)>1:
header_in_page=page_split[1]
# if
# proxy = urllib_request.ProxyHandler({ ('https' ? proxytouse[:5]=="https":"http") : proxytouse})
# opener = urllib_request.build_opener(proxy)
# urllib_request.install_opener(opener)
# print 'urllib_request.getproxies',urllib_request.getproxies()
current_proxies=urllib_request.ProxyHandler(urllib_request.getproxies())
#print 'getting pageUrl',pageUrl
req = urllib_request.Request(pageUrl)
if 'proxy' in m:
proxytouse= m['proxy']
# print 'proxytouse',proxytouse
# urllib_request.getproxies= lambda: {}
if pageUrl[:5]=="https":
proxy = urllib_request.ProxyHandler({ 'https' : proxytouse})
#req.set_proxy(proxytouse, 'https')
else:
proxy = urllib_request.ProxyHandler({ 'http' : proxytouse})
#req.set_proxy(proxytouse, 'http')
opener = urllib_request.build_opener(proxy)
urllib_request.install_opener(opener)
req.add_header('User-Agent', 'Mozilla/5.0 (Windows NT 6.1; rv:14.0) Gecko/20100101 Firefox/14.0.1')
proxytouse=None
if 'referer' in m:
req.add_header('Referer', m['referer'])
if 'accept' in m:
req.add_header('Accept', m['accept'])
if 'agent' in m:
req.add_header('User-agent', m['agent'])
if 'x-req' in m:
req.add_header('X-Requested-With', m['x-req'])
if 'x-addr' in m:
req.add_header('x-addr', m['x-addr'])
if 'x-forward' in m:
req.add_header('X-Forwarded-For', m['x-forward'])
if 'setcookie' in m:
# print 'adding cookie',m['setcookie']
req.add_header('Cookie', m['setcookie'])
if 'appendcookie' in m:
# print 'appending cookie to cookiejar',m['appendcookie']
cookiestoApend=m['appendcookie']
cookiestoApend=cookiestoApend.split(';')
for h in cookiestoApend:
n,v=h.split('=')
w,n= n.split(':')
ck = http_cookiejar.Cookie(version=0, name=n, value=v, port=None, port_specified=False, domain=w, domain_specified=False, domain_initial_dot=False, path='/', path_specified=True, secure=False, expires=None, discard=True, comment=None, comment_url=None, rest={'HttpOnly': None}, rfc2109=False)
cookieJar.set_cookie(ck)
if 'origin' in m:
req.add_header('Origin', m['origin'])
if header_in_page:
header_in_page=header_in_page.split('&')
for h in header_in_page:
n,v=h.split('=')
req.add_header(n,v)
if not cookieJar==None:
# print 'cookieJarVal',cookieJar
cookie_handler = urllib_request.HTTPCookieProcessor(cookieJar)
opener = urllib_request.build_opener(cookie_handler, urllib_request.HTTPBasicAuthHandler(), urllib_request.HTTPHandler())
opener = urllib_request.install_opener(opener)
# print 'noredirect','noredirect' in m
if 'noredirect' in m:
opener = urllib_request.build_opener(cookie_handler,NoRedirection, urllib_request.HTTPBasicAuthHandler(), urllib_request.HTTPHandler())
opener = urllib_request.install_opener(opener)
elif 'noredirect' in m:
opener = urllib_request.build_opener(NoRedirection, urllib_request.HTTPBasicAuthHandler(), urllib_request.HTTPHandler())
opener = urllib_request.install_opener(opener)
if 'connection' in m:
# print '..........................connection//////.',m['connection']
from keepalive import HTTPHandler
keepalive_handler = HTTPHandler()
opener = urllib_request.build_opener(keepalive_handler)
urllib_request.install_opener(opener)
#print 'after cookie jar'
post=None
if 'post' in m:
postData=m['post']
#if '$LiveStreamRecaptcha' in postData:
# (captcha_challenge,catpcha_word,idfield)=processRecaptcha(m['page'],cookieJar)
# if captcha_challenge:
# postData=postData.replace('$LiveStreamRecaptcha','manual_recaptcha_challenge_field:'+captcha_challenge+',recaptcha_response_field:'+catpcha_word+',id:'+idfield)
splitpost=postData.split(',');
post={}
for p in splitpost:
n=p.split(':')[0];
v=p.split(':')[1];
post[n]=v
post = urllib_parse.urlencode(post)
if 'rawpost' in m:
post=m['rawpost']
#if '$LiveStreamRecaptcha' in post:
# (captcha_challenge,catpcha_word,idfield)=processRecaptcha(m['page'],cookieJar)
# if captcha_challenge:
# post=post.replace('$LiveStreamRecaptcha','&manual_recaptcha_challenge_field='+captcha_challenge+'&recaptcha_response_field='+catpcha_word+'&id='+idfield)
link=''
try:
if post:
response = urllib_request.urlopen(req,post)
else:
response = urllib_request.urlopen(req)
if response.info().get('Content-Encoding') == 'gzip':
import gzip
buf = six.BytesIO( response.read())
f = gzip.GzipFile(fileobj=buf)
link = f.read()
else:
link=response.read()
link = control.six_decode(link)
if 'proxy' in m and not current_proxies is None:
urllib_request.install_opener(urllib_request.build_opener(current_proxies))
link=javascriptUnEscape(link)
#print repr(link)
#print link This just print whole webpage in LOG
if 'includeheaders' in m:
#link+=str(response.headers.get('Set-Cookie'))
link+='$$HEADERS_START$$:'
for b in response.headers:
link+= b+':'+response.headers.get(b)+'\n'
link+='$$HEADERS_END$$:'
# print link
response.close()
except:
pass
cachedPages[m['page']] = link
#print link
#print 'store link for',m['page'],forCookieJarOnly
if forCookieJarOnly:
return cookieJar# do nothing
elif m['page'] and not m['page'].startswith('http'):
if m['page'].startswith('$pyFunction:'):
val=doEval(m['page'].split('$pyFunction:')[1],'',cookieJar,m )
if forCookieJarOnly:
return cookieJar# do nothing
link=val
link=javascriptUnEscape(link)
else:
link=m['page']
if '$doregex' in m['expres']:
m['expres']=getRegexParsed(regexs, m['expres'],cookieJar,recursiveCall=True,cachedPages=cachedPages)
if not m['expres']=='':
#print 'doing it ',m['expres']
if '$LiveStreamCaptcha' in m['expres']:
val=askCaptcha(m,link,cookieJar)
#print 'url and val',url,val
url = url.replace("$doregex[" + k + "]", val)
elif m['expres'].startswith('$pyFunction:') or '#$pyFunction' in m['expres']:
#print 'expeeeeeeeeeeeeeeeeeee',m['expres']
val=''
if m['expres'].startswith('$pyFunction:'):
val=doEval(m['expres'].split('$pyFunction:')[1],link,cookieJar,m)
else:
val=doEvalFunction(m['expres'],link,cookieJar,m)
if 'ActivateWindow' in m['expres']: return
if forCookieJarOnly:
return cookieJar# do nothing
if 'listrepeat' in m:
listrepeat=m['listrepeat']
return listrepeat,eval(val), m,regexs,cookieJar
try:
url = url.replace(u"$doregex[" + k + "]", val)
except: url = url.replace("$doregex[" + k + "]", control.six_decode(val))
else:
if 'listrepeat' in m:
listrepeat=m['listrepeat']
ret=re.findall(m['expres'],link)
return listrepeat,ret, m,regexs
val=''
if not link=='':
#print 'link',link
reg = re.compile(m['expres']).search(link)
try:
val=reg.group(1).strip()
except: traceback.print_exc()
elif m['page']=='' or m['page']==None:
val=m['expres']
if rawPost:
# print 'rawpost'
val=urllib_parse.quote_plus(val)
if 'htmlunescape' in m:
#val=urllib_parse.unquote_plus(val)
val=html_parser.HTMLParser().unescape(val)
try:
url = url.replace("$doregex[" + k + "]", val)
except: url = url.replace("$doregex[" + k + "]", control.six_decode(val))
#print 'ur',url
#return val
else:
url = url.replace("$doregex[" + k + "]",'')
if '$epoctime$' in url:
url=url.replace('$epoctime$',getEpocTime())
if '$epoctime2$' in url:
url=url.replace('$epoctime2$',getEpocTime2())
if '$GUID$' in url:
import uuid
url=url.replace('$GUID$',str(uuid.uuid1()).upper())
if '$get_cookies$' in url:
url=url.replace('$get_cookies$',getCookiesString(cookieJar))
if recursiveCall: return url
#print 'final url',repr(url)
if url=="":
return
else:
return url,setresolved
def get_unwise( str_eval):
page_value=""
try:
ss="w,i,s,e=("+str_eval+')'
exec(ss)
page_value=unwise_func(w,i,s,e)
except: traceback.print_exc(file=sys.stdout)
#print 'unpacked',page_value
return page_value
def unwise_func( w, i, s, e):
lIll = 0;
ll1I = 0;
Il1l = 0;
ll1l = [];
l1lI = [];
while True:
if (lIll < 5):
l1lI.append(w[lIll])
elif (lIll < len(w)):
ll1l.append(w[lIll]);
lIll+=1;
if (ll1I < 5):
l1lI.append(i[ll1I])
elif (ll1I < len(i)):
ll1l.append(i[ll1I])
ll1I+=1;
if (Il1l < 5):
l1lI.append(s[Il1l])
elif (Il1l < len(s)):
ll1l.append(s[Il1l]);
Il1l+=1;
if (len(w) + len(i) + len(s) + len(e) == len(ll1l) + len(l1lI) + len(e)):
break;
lI1l = ''.join(ll1l)#.join('');
I1lI = ''.join(l1lI)#.join('');
ll1I = 0;
l1ll = [];
for lIll in list(range(0,len(ll1l),2)):
#print 'array i',lIll,len(ll1l)
ll11 = -1;
if ( ord(I1lI[ll1I]) % 2):
ll11 = 1;
#print 'val is ', lI1l[lIll: lIll+2]
l1ll.append(chr( int(lI1l[lIll: lIll+2], 36) - ll11));
ll1I+=1;
if (ll1I >= len(l1lI)):
ll1I = 0;
ret=''.join(l1ll)
if 'eval(function(w,i,s,e)' in ret:
# print 'STILL GOing'
ret=re.compile('eval\(function\(w,i,s,e\).*}\((.*?)\)').findall(ret)[0]
return get_unwise(ret)
else:
# print 'FINISHED'
return ret
def get_unpacked( page_value, regex_for_text='', iterations=1, total_iteration=1):
try:
reg_data=None
if page_value.startswith("http"):
page_value= getUrl(page_value)
# print 'page_value',page_value
if regex_for_text and len(regex_for_text)>0:
try:
page_value=re.compile(regex_for_text).findall(page_value)[0] #get the js variable
except: return 'NOTPACKED'
page_value=unpack(page_value,iterations,total_iteration)
except:
page_value='UNPACKEDFAILED'
traceback.print_exc(file=sys.stdout)
# print 'unpacked',page_value
if 'sav1live.tv' in page_value:
page_value=page_value.replace('sav1live.tv','sawlive.tv') #quick fix some bug somewhere
# print 'sav1 unpacked',page_value
return page_value
def unpack(sJavascript,iteration=1, totaliterations=2 ):
# print 'iteration',iteration
if sJavascript.startswith('var _0xcb8a='):
aSplit=sJavascript.split('var _0xcb8a=')
ss="myarray="+aSplit[1].split("eval(")[0]
exec(ss)
a1=62
c1=int(aSplit[1].split(",62,")[1].split(',')[0])
p1=myarray[0]
k1=myarray[3]
if six.PY3:
with open('temp file'+str(iteration)+'.js', "wb") as filewriter:
filewriter.write(str(k1))
elif six.PY2:
with open('temp file'+str(iteration)+'.js', "w") as filewriter:
filewriter.write(str(k1))
#aa=1/0
else:
if "rn p}('" in sJavascript:
aSplit = sJavascript.split("rn p}('")
else:
aSplit = sJavascript.split("rn A}('")
# print aSplit
p1,a1,c1,k1=('','0','0','')
ss="p1,a1,c1,k1=('"+aSplit[1].split(".spli")[0]+')'
exec(ss)
k1=k1.split('|')
aSplit = aSplit[1].split("))'")
e = ''
d = ''#32823
#sUnpacked = str(__unpack(p, a, c, k, e, d))
sUnpacked1 = str(__unpack(p1, a1, c1, k1, e, d,iteration))
#print sUnpacked[:200]+'....'+sUnpacked[-100:], len(sUnpacked)
# print sUnpacked1[:200]+'....'+sUnpacked1[-100:], len(sUnpacked1)
#exec('sUnpacked1="'+sUnpacked1+'"')
if iteration>=totaliterations:
# print 'final res',sUnpacked1[:200]+'....'+sUnpacked1[-100:], len(sUnpacked1)
return sUnpacked1#.replace('\\\\', '\\')
else:
# print 'final res for this iteration is',iteration
return unpack(sUnpacked1,iteration+1)#.replace('\\', ''),iteration)#.replace('\\', '');#unpack(sUnpacked.replace('\\', ''))
def __unpack(p, a, c, k, e, d, iteration,v=1):
#with open('before file'+str(iteration)+'.js', "wb") as filewriter:
# filewriter.write(str(p))
while (c >= 1):
c = c -1
if (k[c]):
aa=str(__itoaNew(c, a))
if v==1:
p=re.sub('\\b' + aa +'\\b', k[c], p)# THIS IS Bloody slow!
else:
p=findAndReplaceWord(p,aa,k[c])
#p=findAndReplaceWord(p,aa,k[c])
#with open('after file'+str(iteration)+'.js', "wb") as filewriter:
# filewriter.write(str(p))
return p
def __itoa(num, radix):
# print 'num red',num, radix
result = ""
if num==0: return '0'
while num > 0:
result = "0123456789abcdefghijklmnopqrstuvwxyz"[num % radix] + result
num /= radix
return result
def __itoaNew(cc, a):
aa="" if cc < a else __itoaNew(int(cc / a),a)
cc = (cc % a)
bb=chr(cc + 29) if cc> 35 else str(__itoa(cc,36))
return aa+bb
def findAndReplaceWord(source_str, word_to_find,replace_with):
splits=None
splits=source_str.split(word_to_find)
if len(splits)>1:
new_string=[]
current_index=0
for current_split in splits:
#print 'here',i
new_string.append(current_split)
val=word_to_find#by default assume it was wrong to split
#if its first one and item is blank then check next item is valid or not
if current_index==len(splits)-1:
val='' # last one nothing to append normally
else:
if len(current_split)==0: #if blank check next one with current split value
if ( len(splits[current_index+1])==0 and word_to_find[0].lower() not in 'abcdefghijklmnopqrstuvwxyz1234567890_') or (len(splits[current_index+1])>0 and splits[current_index+1][0].lower() not in 'abcdefghijklmnopqrstuvwxyz1234567890_'):# first just just check next
val=replace_with
#not blank, then check current endvalue and next first value
else:
if (splits[current_index][-1].lower() not in 'abcdefghijklmnopqrstuvwxyz1234567890_') and (( len(splits[current_index+1])==0 and word_to_find[0].lower() not in 'abcdefghijklmnopqrstuvwxyz1234567890_') or (len(splits[current_index+1])>0 and splits[current_index+1][0].lower() not in 'abcdefghijklmnopqrstuvwxyz1234567890_')):# first just just check next
val=replace_with
new_string.append(val)
current_index+=1
#aaaa=1/0
source_str=''.join(new_string)
return source_str
def re_me(data, re_patten):
match = ''
m = re.search(re_patten, data)
if m != None:
match = m.group(1)
else:
match = ''
return match
def getCookiesString(cookieJar):
try:
cookieString=""
for index, cookie in enumerate(cookieJar):
cookieString+=cookie.name + "=" + cookie.value +";"
except: pass
#print 'cookieString',cookieString
return cookieString
def saveCookieJar(cookieJar,COOKIEFILE):
try:
complete_path=os.path.join(profile,COOKIEFILE)
cookieJar.save(complete_path,ignore_discard=True)
except: pass
def getCookieJar(COOKIEFILE):
cookieJar=None
if COOKIEFILE:
try:
complete_path=os.path.join(profile,COOKIEFILE)
cookieJar = http_cookiejar.LWPCookieJar()
cookieJar.load(complete_path,ignore_discard=True)
except:
cookieJar=None
if not cookieJar:
cookieJar = http_cookiejar.LWPCookieJar()
return cookieJar
def doEval(fun_call,page_data,Cookie_Jar,m):
ret_val=''
#print fun_call
if functions_dir not in sys.path:
sys.path.append(functions_dir)
# print fun_call
try:
py_file='import '+fun_call.split('.')[0]
# print py_file,sys.path
exec(py_file)
# print 'done'
except:
#print 'error in import'
traceback.print_exc(file=sys.stdout)
# print 'ret_val='+fun_call
exec('ret_val='+fun_call)
# print ret_val
#exec('ret_val=1+1')
try:
return str(ret_val)
except: return ret_val
def doEvalFunction(fun_call,page_data,Cookie_Jar,m):
# print 'doEvalFunction'
ret_val=''
if functions_dir not in sys.path:
sys.path.append(functions_dir)
f=open(functions_dir+"/LSProdynamicCode.py","w")
f.write(fun_call);
f.close()
import LSProdynamicCode
ret_val=LSProdynamicCode.GetLSProData(page_data,Cookie_Jar,m)
try:
return str(ret_val)
except: return ret_val
def getUrl(url, cookieJar=None,post=None, timeout=20, headers=None, noredir=False):
cookie_handler = urllib_request.HTTPCookieProcessor(cookieJar)
if noredir:
opener = urllib_request.build_opener(NoRedirection,cookie_handler, urllib_request.HTTPBasicAuthHandler(), urllib_request.HTTPHandler())
else:
opener = urllib_request.build_opener(cookie_handler, urllib_request.HTTPBasicAuthHandler(), urllib_request.HTTPHandler())
#opener = urllib_request.install_opener(opener)
req = urllib_request.Request(url)
req.add_header('User-Agent','Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/33.0.1750.154 Safari/537.36')
if headers:
for h,hv in headers:
req.add_header(h,hv)
response = opener.open(req,post,timeout=timeout)
link=response.read()
response.close()
return link
def get_decode(str,reg=None):
if reg:
str=re.findall(reg, str)[0]
s1 = urllib_parse.unquote(str[0: len(str)-1]);
t = '';
for i in list(range(len(s1))):
t += chr(ord(s1[i]) - s1[len(s1)-1]);
t=urllib_parse.unquote(t)
# print t
return t
def javascriptUnEscape(str):
js=re.findall('unescape\(\'(.*?)\'',str)
# print 'js',js
if (not js==None) and len(js)>0:
for j in js:
#print urllib_parse.unquote(j)
str=str.replace(j ,urllib_parse.unquote(j))
return str
def getEpocTime():
import time
return str(int(time.time()*1000))
def getEpocTime2():
import time
return str(int(time.time()))