YYL469

【spiderContent.py】实现文章内容爬取

  1 +import time
  2 +import requests
  3 +import csv
  4 +import os
  5 +from datetime import datetime
  6 +
  7 +def init():
  8 + if not os.path.exists('./articleData.csv'):
  9 + with open('./articleData.csv','w',encoding='utf-8',newline='') as csvFile:
  10 + writer = csv.writer(csvFile)
  11 + writer.writerow([
  12 + 'id',
  13 + 'likeNum',
  14 + 'commentsLen',
  15 + 'reposts_count',
  16 + 'region',
  17 + 'content',
  18 + 'contentLen',
  19 + 'created_at',
  20 + 'type',
  21 + 'detailUrl',# followBtnCode>uid + mblogid
  22 + 'authorAvatar',
  23 + 'authorName',
  24 + 'authorDetail',
  25 + 'isVip' # v_plus
  26 + ])
  27 +
  28 +def writerRow(row):
  29 + with open('./articleData.csv', 'a', encoding='utf-8', newline='') as csvFile:
  30 + writer = csv.writer(csvFile)
  31 + writer.writerow(row)
  32 +
  33 +def get_data(url,params):
  34 + headers = {
  35 + 'Cookie':'SINAGLOBAL=2555941826014.1074.1676801766625; ULV=1719829459275:6:1:2:4660996305989.918.1719827559898:1719743122299; UOR=,,www.baidu.com; XSRF-TOKEN=VtLXviYSIs8lor7sz4iGyigL; SUB=_2A25LhvU9DeRhGeFH6FIX-S3MyD2IHXVo-gj1rDV8PUJbkNAGLRXMkW1Ne2nhI3Gle25QJK0Z99J3trq_NZn6YKJ-; SUBP=0033WrSXqPxfM725Ws9jqgMF55529P9D9WW3Mv8V5EupQbbKh.vaZIwU5JpX5KzhUgL.FoM4e05c1Ke7e022dJLoIp7LxKML1KBLBKnLxKqL1hnLBoM41hz41hqReKqN; WBPSESS=Dt2hbAUaXfkVprjyrAZT_LRaDLsnxG-kIbeYwnBb5OUKZiwfVr_UrcYfWuqG-4ZVDM5HeU3HXkDNK_thfRfdS9Ao6ezT30jDksv-CpaVmlTAqGUHjJ7PYkH5aCK4HLxmRq14ZalmQNwzfWMPa4y0VNRLuYdg7L1s49ymNq_5v5vusoz0r4ki6u-MHGraF0fbUTgX14x0kHayEwOoxfLI-w==; SCF=AqmJWo31oFV5itnRgWNU1-wHQTL6PmkBLf3gDuqpdqAIfaWguDTMre6Oxjf5Uzs74JAh2r0DdV1sJ1g6m-wJ5NQ.; _s_tentry=-; Apache=4660996305989.918.1719827559898; PC_TOKEN=7955a7ab1f; appkey=; geetest_token=602cd4e3a7ed1898808f8adfe1a2048b; ALF=1722421868',
  36 + 'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:127.0) Gecko/20100101 Firefox/127.0'
  37 + }
  38 + response = requests.get(url,headers=headers,params=params)
  39 + if response.status_code == 200:
  40 + return response.json()['statuses']
  41 + else:
  42 + return None
  43 +
  44 +def getAllTypeList():
  45 + typeList = []
  46 + with open('./navData.csv','r',encoding='utf-8') as reader:
  47 + readerCsv = csv.reader(reader)
  48 + next(reader)
  49 + for nav in readerCsv:
  50 + typeList.append(nav)
  51 + return typeList
  52 +
  53 +def parse_json(response,type):
  54 + for artice in response:
  55 + id = artice['id']
  56 + likeNum = artice['attitudes_count']
  57 + commentsLen = artice['comments_count']
  58 + reposts_count = artice['reposts_count']
  59 + try:
  60 + region = artice['region_name'].replace('发布于 ', '')
  61 + except:
  62 + region = '无'
  63 + content = artice['text_raw']
  64 + contentLen = artice['textLength']
  65 + created_at = datetime.strptime(artice['created_at'],'%a %b %d %H:%M:%S %z %Y').strftime('%Y-%m-%d')
  66 + type = type
  67 + try:
  68 + detailUrl = 'https://weibo.com/' + str(artice['id']) + '/' + str(artice['mblogid'])
  69 + except:
  70 + detailUrl = '无'
  71 + authorAvatar = artice['user']['avatar_large']
  72 + authorName = artice['user']['screen_name']
  73 + authorDetail = 'https://weibo.com/u/' + str(artice['user']['id'])
  74 + isVip = artice['user']['v_plus']
  75 + writerRow([
  76 + id,
  77 + likeNum,
  78 + commentsLen,
  79 + reposts_count,
  80 + region,
  81 + content,
  82 + contentLen,
  83 + created_at,
  84 + type,
  85 + detailUrl,
  86 + authorAvatar,
  87 + authorName,
  88 + authorDetail,
  89 + isVip
  90 + ])
  91 +
  92 +def start(typeNum=1,pageNum=1):
  93 + articleUrl = 'https://weibo.com/ajax/feed/hottimeline'
  94 + init()
  95 + typeList = getAllTypeList()
  96 + typeNumCount = 0
  97 + for type in typeList:
  98 + if typeNumCount > typeNum:return
  99 + time.sleep(2)
  100 + for page in range(0,pageNum):
  101 + print('正在爬取的类型:%s 中的第%s页文章数据' % (type[0],page + 1))
  102 + time.sleep(1)
  103 + parmas = {
  104 + 'group_id':type[1],
  105 + 'containerid':type[2],
  106 + 'max_id':page,
  107 + 'count':10,
  108 + 'extparam':'discover|new_feed'
  109 + }
  110 + response = get_data(articleUrl,parmas)
  111 + parse_json(response,type[0])
  112 + typeNumCount += 1
  113 +
  114 +if __name__ == '__main__':
  115 + start()
  116 +
  117 +
  118 +
  119 +
  120 +
  121 +
  122 +
  123 +