YYL469

【spiderComments.py】实现文章评论爬取

  1 +import time
  2 +import requests
  3 +import csv
  4 +import os
  5 +from datetime import datetime
  6 +
  7 +def init():
  8 + if not os.path.exists('./articleComments.csv'):
  9 + with open('./articleComments.csv','w',encoding='utf-8',newline='') as csvFile:
  10 + writer = csv.writer(csvFile)
  11 + writer.writerow([
  12 + 'articleId',
  13 + 'created_at',
  14 + 'likes_counts',
  15 + 'region',
  16 + 'content',
  17 + 'authorName',
  18 + 'authorGender',
  19 + 'authorAddress',
  20 + 'authorAvatar'
  21 + ])
  22 +
  23 +def writerRow(row):
  24 + with open('./articleComments.csv', 'a', encoding='utf-8', newline='') as csvFile:
  25 + writer = csv.writer(csvFile)
  26 + writer.writerow(row)
  27 +
  28 +def get_data(url,params):
  29 + headers = {
  30 + 'Cookie':'SINAGLOBAL=2555941826014.1074.1676801766625; ULV=1719829459275:6:1:2:4660996305989.918.1719827559898:1719743122299; UOR=,,www.baidu.com; XSRF-TOKEN=VtLXviYSIs8lor7sz4iGyigL; SUB=_2A25LhvU9DeRhGeFH6FIX-S3MyD2IHXVo-gj1rDV8PUJbkNAGLRXMkW1Ne2nhI3Gle25QJK0Z99J3trq_NZn6YKJ-; SUBP=0033WrSXqPxfM725Ws9jqgMF55529P9D9WW3Mv8V5EupQbbKh.vaZIwU5JpX5KzhUgL.FoM4e05c1Ke7e022dJLoIp7LxKML1KBLBKnLxKqL1hnLBoM41hz41hqReKqN; WBPSESS=Dt2hbAUaXfkVprjyrAZT_LRaDLsnxG-kIbeYwnBb5OUKZiwfVr_UrcYfWuqG-4ZVDM5HeU3HXkDNK_thfRfdS9Ao6ezT30jDksv-CpaVmlTAqGUHjJ7PYkH5aCK4HLxmRq14ZalmQNwzfWMPa4y0VNRLuYdg7L1s49ymNq_5v5vusoz0r4ki6u-MHGraF0fbUTgX14x0kHayEwOoxfLI-w==; SCF=AqmJWo31oFV5itnRgWNU1-wHQTL6PmkBLf3gDuqpdqAIfaWguDTMre6Oxjf5Uzs74JAh2r0DdV1sJ1g6m-wJ5NQ.; _s_tentry=-; Apache=4660996305989.918.1719827559898; PC_TOKEN=7955a7ab1f; appkey=; geetest_token=602cd4e3a7ed1898808f8adfe1a2048b; ALF=1722421868',
  31 + 'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:127.0) Gecko/20100101 Firefox/127.0'
  32 + }
  33 + response = requests.get(url,headers=headers,params=params)
  34 + if response.status_code == 200:
  35 + return response.json()['data']
  36 + else:
  37 + return None
  38 +
  39 +def getAllArticleList():
  40 + artileList = []
  41 + with open('./articleData.csv','r',encoding='utf-8') as reader:
  42 + readerCsv = csv.reader(reader)
  43 + next(reader)
  44 + for nav in readerCsv:
  45 + artileList.append(nav)
  46 + return artileList
  47 +
  48 +def parse_json(response,artileId):
  49 + for comment in response:
  50 + created_at = datetime.strptime(comment['created_at'],'%a %b %d %H:%M:%S %z %Y').strftime('%Y-%m-%d')
  51 + likes_counts = comment['like_counts']
  52 + try:
  53 + region = comment['source'].replace('来自', '')
  54 + except:
  55 + region = '无'
  56 + content = comment['text_raw']
  57 + authorName = comment['user']['screen_name']
  58 + authorGender = comment['user']['gender']
  59 + authorAddress = comment['user']['location']
  60 + authorAvatar = comment['user']['avatar_large']
  61 + writerRow([
  62 + artileId,
  63 + created_at,
  64 + likes_counts,
  65 + region,
  66 + content,
  67 + authorName,
  68 + authorGender,
  69 + authorAddress,
  70 + authorAvatar
  71 + ])
  72 +
  73 +def start():
  74 + commentUrl = 'https://weibo.com/ajax/statuses/buildComments'
  75 + init()
  76 + articleList = getAllArticleList()
  77 + for article in articleList:
  78 + articleId = article[0]
  79 + print('正在爬取id值为%s的文章评论' % articleId)
  80 + time.sleep(2)
  81 + params = {
  82 + 'id':int(articleId),
  83 + 'is_show_bulletin':2
  84 + }
  85 + response = get_data(commentUrl,params)
  86 + parse_json(response,articleId)
  87 +
  88 +
  89 +
  90 +if __name__ == '__main__':
  91 + start()
  92 +
  93 +
  94 +
  95 +
  96 +
  97 +
  98 +
  99 +