fofa 快速导出结果
正文
# -*- coding:UTF-8 -*-
import os
import csv
import json
import time
import base64
import requests
import codecs
import pandas as pd
url = 'https://fofa.so/api/v1/search/all?email=eamil地址&key=32位的apikey'
header = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/53.0.2785.143 Safari/537.36','Connection': 'keep-alive','accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8'}
qbase64_str = input('请输入要搜索的内容例如:title=""')
pages = input('请输入要爬行页数的范围:')
qbase64 = base64.b64encode(qbase64_str.encode('utf-8'))
nowtime = time.strftime("%Y-%m-%d", time.localtime())
try:
for page in range(1,int(pages) +1):
print('第一页结果:'+ str(page))
data = {
'qbase64':qbase64.decode('utf-8'),
'pages':page,
'size':100,
'full':False,
}
datas = json.dumps(data)
req = requests.post(url = url,data = data,headers = header,timeout = 10)
res = req.json()
print(type(res))
print(res['results'])
fileName = nowtime + '_FofaApi_results' + '.csv'
with codecs.open(fileName, 'a+', 'utf-8') as csvfile:
filednames = ['网址', 'IP地址', '端口信息']
for detail in res['results']:
urlinfo = detail[0]
ipinfo = detail[1]
portinfo = detail[2]
writer = csv.DictWriter(csvfile, fieldnames=filednames)
writer.writeheader()
writer.writerow(
{'网址': detail[0], 'IP地址': detail[1], '端口信息': detail[2]}
)
csvfile.close()
# 去重
df = pd.read_csv(nowtime + '_FofaApi_results' + '.csv', header=0)
datalist = df.drop_duplicates(['网址'])
datalist.to_csv(nowtime + '_FofaApi_results_去重版本' + '.csv')
os.remove(nowtime + '_FofaApi_results' + '.csv')
# print(Scan_list)
print('api调用导出完毕!')
except Exception as e:
print('程序抛错,原因:' + str(e))
Leave a Reply