从巨潮网按公告时间批量下载上市公司年报
import socket
import time
import csv
import requests
import datetime
from dateutil.relativedelta import relativedelta
socket.setdefaulttimeout(20)#这里对整个socket层设置超时时间。后续文件中如果再使用到socket,不必再设置
#下载pdf公告的函数
def downloadpdf(url, file):
resp = requests.get(url)
f = open(file, 'wb')
f.write(resp.content)
f.close()
#新建csv文件,存储公告详细信息
csvf = open('年报.csv','a+', encoding='gbk', newline='')
writer = csv.writer(csvf)
writer.writerow(('公司简称','股票代码', '发布时间', '公告标题', '公告pdf下载地址'))
#新建txt文件,存储有问题的网址
csvf2 = open('年报pdf\\未下载的网址.csv','a+', encoding='gbk', newline='')
writer2 = csv.writer(csvf2)
writer2.writerow(('时间','网址'))
//以下从网站上看参数,
headers = {'User-Agent':' '}
cookies = {'Cookie': ' '}
url = ' '
//巨潮网貌似有一个bug,100页之后无法翻页,所以把爬取时间单位设置为月,以免漏掉公告。
bg = datetime.datetime.strptime('20190201', '%Y%m%d')
while bg>datetime.datetime.strptime('20181201', '%Y%m%d'):
end = bg+relativedelta(months=1)
bgstr = bg.strftime('%Y-%m-%d')
endstr = end.strftime('%Y-%m-%d')
daterange = bgstr+'~'+endstr
for page in range(101):
try:
data = {''}//网站上查参数
#发起请求,采集
sleep_download_time =10
time.sleep(sleep_download_time)
resp = requests.post(url, params=data, headers=headers, cookies=cookies)
pdfs = resp.json()['announcements']
resp.close()
for pdf in pdfs:
announcementTitle = pdf['announcementTitle']
if announcementTitle.find(u"摘要") ==-1:
secName = pdf['secName']
secCode =pdf['secCode']
adjunctUrl = 'http://static.cninfo.com.cn/'+pdf['adjunctUrl']
pdffile = '\\年报pdf\\'+'sec'+str(pdf['secCode'])+'_'+announcementTitle+'.pdf'
downloadpdf(url=adjunctUrl, file=pdffile)
print('sec'+str(pdf['secCode'])+'_'+announcementTitle+'.pdf已下载完成')
#announcementTypeName = pdf['announcementTypeName']
announcementTime = pdf['announcementTime']
#print(secName, secCode, announcementTime, announcementTitle, adjunctUrl, announcementTypeName)
writer.writerow((secName, secCode, announcementTime, announcementTitle, adjunctUrl))
except:
print('出问题的网址', resp.url)
writer2.writerow((data['seDate'],resp.url))
bg=bg-relativedelta(months=1)
print(data['seDate']+'已下载完成')
csvf.close()
csvf2.close()