戒酒的李白
Committed by GitHub

Merge pull request #12 from lintsinghua/main

Optimized the web scraping functionality to support multi-account crawling and enhance performance.
... ... @@ -2,99 +2,87 @@ import time
import requests
import csv
import os
import random
from datetime import datetime
from .settings import articleAddr,commentsAddr
from .settings import articleAddr, commentsAddr
from requests.exceptions import RequestException
# 初始化,创建评论数据文件
def init():
if not os.path.exists(commentsAddr):
with open(commentsAddr,'w',encoding='utf-8',newline='') as csvFile:
with open(commentsAddr, 'w', encoding='utf-8', newline='') as csvFile:
writer = csv.writer(csvFile)
writer.writerow([
'articleId',
'created_at',
'likes_counts',
'region',
'content',
'authorName',
'authorGender',
'authorAddress',
'authorAvatar'
'articleId', 'created_at', 'likes_counts', 'region', 'content',
'authorName', 'authorGender', 'authorAddress', 'authorAvatar'
])
# 写入评论数据到CSV
def write(row):
with open(commentsAddr, 'a', encoding='utf-8', newline='') as csvFile:
writer = csv.writer(csvFile)
writer.writerow(row)
def fetchData(url,params):
headers = {
'Cookie':'SINAGLOBAL=2555941826014.1074.1676801766625; ULV=1719829459275:6:1:2:4660996305989.918.1719827559898:1719743122299; UOR=,,www.baidu.com; XSRF-TOKEN=VtLXviYSIs8lor7sz4iGyigL; SUB=_2A25LhvU9DeRhGeFH6FIX-S3MyD2IHXVo-gj1rDV8PUJbkNAGLRXMkW1Ne2nhI3Gle25QJK0Z99J3trq_NZn6YKJ-; SUBP=0033WrSXqPxfM725Ws9jqgMF55529P9D9WW3Mv8V5EupQbbKh.vaZIwU5JpX5KzhUgL.FoM4e05c1Ke7e022dJLoIp7LxKML1KBLBKnLxKqL1hnLBoM41hz41hqReKqN; WBPSESS=Dt2hbAUaXfkVprjyrAZT_LRaDLsnxG-kIbeYwnBb5OUKZiwfVr_UrcYfWuqG-4ZVDM5HeU3HXkDNK_thfRfdS9Ao6ezT30jDksv-CpaVmlTAqGUHjJ7PYkH5aCK4HLxmRq14ZalmQNwzfWMPa4y0VNRLuYdg7L1s49ymNq_5v5vusoz0r4ki6u-MHGraF0fbUTgX14x0kHayEwOoxfLI-w==; SCF=AqmJWo31oFV5itnRgWNU1-wHQTL6PmkBLf3gDuqpdqAIfaWguDTMre6Oxjf5Uzs74JAh2r0DdV1sJ1g6m-wJ5NQ.; _s_tentry=-; Apache=4660996305989.918.1719827559898; PC_TOKEN=7955a7ab1f; appkey=; geetest_token=602cd4e3a7ed1898808f8adfe1a2048b; ALF=1722421868',
'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:127.0) Gecko/20100101 Firefox/127.0'
}
response = requests.get(url,headers=headers,params=params)
# 获取数据,支持多账号随机切换
def fetchData(url, params, headers_list):
headers = random.choice(headers_list)
try:
response = requests.get(url, headers=headers, params=params, timeout=10)
if response.status_code == 200:
return response.json()['data']
else:
return None
except RequestException as e:
print(f"请求失败:{e}")
return None
# 获取文章列表
def getArticleList():
articleList = []
with open(articleAddr,'r',encoding='utf-8') as reader:
with open(articleAddr, 'r', encoding='utf-8') as reader:
readerCsv = csv.reader(reader)
next(reader)
for nav in readerCsv:
articleList.append(nav)
return articleList
def readJson(response,artileId):
# 解析评论数据
def readJson(response, articleId):
for comment in response:
created_at = datetime.strptime(comment['created_at'],'%a %b %d %H:%M:%S %z %Y').strftime('%Y-%m-%d')
created_at = datetime.strptime(comment['created_at'], '%a %b %d %H:%M:%S %z %Y').strftime('%Y-%m-%d')
likes_counts = comment['like_counts']
try:
region = comment['source'].replace('来自', '')
except:
region = '无'
region = comment.get('source', '无').replace('来自', '')
content = comment['text_raw']
authorName = comment['user']['screen_name']
authorGender = comment['user']['gender']
authorAddress = comment['user']['location']
authorAvatar = comment['user']['avatar_large']
write([
artileId,
created_at,
likes_counts,
region,
content,
authorName,
authorGender,
authorAddress,
authorAvatar
])
write([articleId, created_at, likes_counts, region, content, authorName, authorGender, authorAddress, authorAvatar])
def start():
# 启动爬虫
def start(headers_list, delay=2):
commentUrl = 'https://weibo.com/ajax/statuses/buildComments'
init()
articleList = getArticleList()
for article in articleList:
articleId = article[0]
print('正在爬取id值为%s的文章评论' % articleId)
time.sleep(2)
params = {
'id':int(articleId),
'is_show_bulletin':2
}
response = fetchData(commentUrl,params)
readJson(response,articleId)
print(f'正在爬取id值为{articleId}的文章评论')
time.sleep(random.uniform(1, delay)) # 随机延时,避免频繁访问
params = {'id': int(articleId), 'is_show_bulletin': 2}
response = fetchData(commentUrl, params, headers_list)
if response:
readJson(response, articleId)
if __name__ == '__main__':
start()
# 这里的headers_list应该包含多个账号的cookie
headers_list = [
{
'Cookie': 'your_cookie_here',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:127.0) Gecko/20100101 Firefox/127.0'
},
{
'Cookie': 'another_cookie_here',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:127.0) Gecko/20100101 Firefox/127.0'
}
]
start(headers_list)
... ...
... ... @@ -2,123 +2,97 @@ import time
import requests
import csv
import os
import random
from datetime import datetime
from .settings import navAddr,articleAddr
from .settings import navAddr, articleAddr
from requests.exceptions import RequestException
# 初始化文章数据文件
def init():
if not os.path.exists(articleAddr):
with open(articleAddr,'w',encoding='utf-8',newline='') as csvFile:
with open(articleAddr, 'w', encoding='utf-8', newline='') as csvFile:
writer = csv.writer(csvFile)
writer.writerow([
'id',
'likeNum',
'commentsLen',
'reposts_count',
'region',
'content',
'contentLen',
'created_at',
'type',
'detailUrl',# followBtnCode>uid + mblogid
'authorAvatar',
'authorName',
'authorDetail',
'isVip' # v_plus
'id', 'likeNum', 'commentsLen', 'reposts_count', 'region', 'content', 'contentLen',
'created_at', 'type', 'detailUrl', 'authorAvatar', 'authorName', 'authorDetail', 'isVip'
])
# 写入数据到CSV
def write(row):
with open(articleAddr, 'a', encoding='utf-8', newline='') as csvFile:
writer = csv.writer(csvFile)
writer.writerow(row)
def fetchData(url,params):
headers = {
'Cookie':'SINAGLOBAL=2555941826014.1074.1676801766625; ULV=1719829459275:6:1:2:4660996305989.918.1719827559898:1719743122299; UOR=,,www.baidu.com; XSRF-TOKEN=VtLXviYSIs8lor7sz4iGyigL; SUB=_2A25LhvU9DeRhGeFH6FIX-S3MyD2IHXVo-gj1rDV8PUJbkNAGLRXMkW1Ne2nhI3Gle25QJK0Z99J3trq_NZn6YKJ-; SUBP=0033WrSXqPxfM725Ws9jqgMF55529P9D9WW3Mv8V5EupQbbKh.vaZIwU5JpX5KzhUgL.FoM4e05c1Ke7e022dJLoIp7LxKML1KBLBKnLxKqL1hnLBoM41hz41hqReKqN; WBPSESS=Dt2hbAUaXfkVprjyrAZT_LRaDLsnxG-kIbeYwnBb5OUKZiwfVr_UrcYfWuqG-4ZVDM5HeU3HXkDNK_thfRfdS9Ao6ezT30jDksv-CpaVmlTAqGUHjJ7PYkH5aCK4HLxmRq14ZalmQNwzfWMPa4y0VNRLuYdg7L1s49ymNq_5v5vusoz0r4ki6u-MHGraF0fbUTgX14x0kHayEwOoxfLI-w==; SCF=AqmJWo31oFV5itnRgWNU1-wHQTL6PmkBLf3gDuqpdqAIfaWguDTMre6Oxjf5Uzs74JAh2r0DdV1sJ1g6m-wJ5NQ.; _s_tentry=-; Apache=4660996305989.918.1719827559898; PC_TOKEN=7955a7ab1f; appkey=; geetest_token=602cd4e3a7ed1898808f8adfe1a2048b; ALF=1722421868',
'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:127.0) Gecko/20100101 Firefox/127.0'
}
response = requests.get(url,headers=headers,params=params)
# 获取数据,支持多账号
def fetchData(url, params, headers_list):
headers = random.choice(headers_list)
try:
response = requests.get(url, headers=headers, params=params, timeout=10)
if response.status_code == 200:
return response.json()['statuses']
else:
return None
except RequestException as e:
print(f"请求失败:{e}")
return None
# 获取类型列表
def getTypeList():
typeList = []
with open(navAddr,'r',encoding='utf-8') as reader:
with open(navAddr, 'r', encoding='utf-8') as reader:
readerCsv = csv.reader(reader)
next(reader)
for nav in readerCsv:
typeList.append(nav)
return typeList
def readJson(response,type):
for artice in response:
id = artice['id']
likeNum = artice['attitudes_count']
commentsLen = artice['comments_count']
reposts_count = artice['reposts_count']
try:
region = artice['region_name'].replace('发布于 ', '')
except:
region = '无'
content = artice['text_raw']
contentLen = artice['textLength']
created_at = datetime.strptime(artice['created_at'],'%a %b %d %H:%M:%S %z %Y').strftime('%Y-%m-%d')
type = type
try:
detailUrl = 'https://weibo.com/' + str(artice['id']) + '/' + str(artice['mblogid'])
except:
detailUrl = '无'
authorAvatar = artice['user']['avatar_large']
authorName = artice['user']['screen_name']
authorDetail = 'https://weibo.com/u/' + str(artice['user']['id'])
isVip = artice['user']['v_plus']
write([
id,
likeNum,
commentsLen,
reposts_count,
region,
content,
contentLen,
created_at,
type,
detailUrl,
authorAvatar,
authorName,
authorDetail,
isVip
])
# 解析文章数据
def readJson(response, type):
for article in response:
id = article['id']
likeNum = article['attitudes_count']
commentsLen = article['comments_count']
reposts_count = article['reposts_count']
region = article.get('region_name', '无').replace('发布于 ', '')
content = article['text_raw']
contentLen = article['textLength']
created_at = datetime.strptime(article['created_at'], '%a %b %d %H:%M:%S %z %Y').strftime('%Y-%m-%d')
detailUrl = f"https://weibo.com/{article['id']}/{article['mblogid']}" if 'mblogid' in article else '无'
authorAvatar = article['user']['avatar_large']
authorName = article['user']['screen_name']
authorDetail = f"https://weibo.com/u/{article['user']['id']}"
isVip = article['user']['v_plus']
write([id, likeNum, commentsLen, reposts_count, region, content, contentLen, created_at, type, detailUrl, authorAvatar, authorName, authorDetail, isVip])
def start(typeNum=14,pageNum=3):
# 启动爬虫
def start(headers_list, typeNum=14, pageNum=3, delay=2):
articleUrl = 'https://weibo.com/ajax/feed/hottimeline'
init()
typeList = getTypeList()
typeNumCount = 0
for type in typeList:
if typeNumCount > typeNum:return
time.sleep(2)
for page in range(0,pageNum):
print('正在爬取的类型:%s 中的第%s页文章数据' % (type[0],page + 1))
time.sleep(1)
parmas = {
'group_id':type[1],
'containerid':type[2],
'max_id':page,
'count':10,
'extparam':'discover|new_feed'
for type in typeList[:typeNum]:
for page in range(pageNum):
print(f'正在爬取的类型:{type[0]} 中的第{page + 1}页文章数据')
time.sleep(random.uniform(1, delay)) # 随机延时
params = {
'group_id': type[1],
'containerid': type[2],
'max_id': page,
'count': 10,
'extparam': 'discover|new_feed'
}
response = fetchData(articleUrl,parmas)
readJson(response,type[0])
typeNumCount += 1
response = fetchData(articleUrl, params, headers_list)
if response:
readJson(response, type[0])
if __name__ == '__main__':
start()
headers_list = [
{
'Cookie': 'your_cookie_here',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:127.0) Gecko/20100101 Firefox/127.0'
},
{
'Cookie': 'another_cookie_here',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:127.0) Gecko/20100101 Firefox/127.0'
}
]
start(headers_list)
... ...
... ... @@ -2,54 +2,64 @@ import requests
import csv
import numpy as np
import os
import random
from .settings import navAddr
from requests.exceptions import RequestException
# 初始化导航数据文件
def init():
if not os.path.exists(navAddr):
with open(navAddr,'w',encoding='utf-8',newline='') as csvFile:
with open(navAddr, 'w', encoding='utf-8', newline='') as csvFile:
writer = csv.writer(csvFile)
writer.writerow([
'typeName',
'gid',
'containerid'
])
writer.writerow(['typeName', 'gid', 'containerid'])
# 写入导航数据
def write(row):
with open(navAddr, 'a', encoding='utf-8', newline='') as csvFile:
writer = csv.writer(csvFile)
writer.writerow(row)
def fetchData(url):
headers = {
'Cookie':'SINAGLOBAL=2555941826014.1074.1676801766625; ULV=1719829459275:6:1:2:4660996305989.918.1719827559898:1719743122299; UOR=,,www.baidu.com; XSRF-TOKEN=VtLXviYSIs8lor7sz4iGyigL; SUB=_2A25LhvU9DeRhGeFH6FIX-S3MyD2IHXVo-gj1rDV8PUJbkNAGLRXMkW1Ne2nhI3Gle25QJK0Z99J3trq_NZn6YKJ-; SUBP=0033WrSXqPxfM725Ws9jqgMF55529P9D9WW3Mv8V5EupQbbKh.vaZIwU5JpX5KzhUgL.FoM4e05c1Ke7e022dJLoIp7LxKML1KBLBKnLxKqL1hnLBoM41hz41hqReKqN; WBPSESS=Dt2hbAUaXfkVprjyrAZT_LRaDLsnxG-kIbeYwnBb5OUKZiwfVr_UrcYfWuqG-4ZVDM5HeU3HXkDNK_thfRfdS9Ao6ezT30jDksv-CpaVmlTAqGUHjJ7PYkH5aCK4HLxmRq14ZalmQNwzfWMPa4y0VNRLuYdg7L1s49ymNq_5v5vusoz0r4ki6u-MHGraF0fbUTgX14x0kHayEwOoxfLI-w==; SCF=AqmJWo31oFV5itnRgWNU1-wHQTL6PmkBLf3gDuqpdqAIfaWguDTMre6Oxjf5Uzs74JAh2r0DdV1sJ1g6m-wJ5NQ.; _s_tentry=-; Apache=4660996305989.918.1719827559898; PC_TOKEN=7955a7ab1f; appkey=; geetest_token=602cd4e3a7ed1898808f8adfe1a2048b; ALF=1722421868',
'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:127.0) Gecko/20100101 Firefox/127.0'
}
params = {
'is_new_segment':1,
'fetch_hot':1
}
response = requests.get(url,headers=headers,params=params)
# 获取数据,支持多账号
def fetchData(url, headers_list):
headers = random.choice(headers_list)
try:
response = requests.get(url, headers=headers, timeout=10)
if response.status_code == 200:
return response.json()
return response.json()['data']['modules']
else:
return None
except RequestException as e:
print(f"请求失败:{e}")
return None
# 解析导航数据
def readJson(response):
navList = np.append(response['groups'][3]['group'],response['groups'][4]['group'])
for nav in navList:
navName = nav['title']
gid = nav['gid']
containerid = nav['containerid']
write([
navName,
gid,
containerid
])
for module in response:
if 'type' in module and 'typeName' in module:
typeName = module['typeName']
for submodule in module['modules']:
if 'id' in submodule and 'containerid' in submodule:
gid = submodule['id']
containerid = submodule['containerid']
write([typeName, gid, containerid])
def start():
# 启动爬虫
def start(headers_list):
navUrl = 'https://weibo.com/ajax/side/hot'
init()
url = 'https://weibo.com/ajax/feed/allGroups'
response = fetchData(url)
response = fetchData(navUrl, headers_list)
if response:
readJson(response)
if __name__ == '__main__':
start()
\ No newline at end of file
headers_list = [
{
'Cookie': 'your_cookie_here',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:127.0) Gecko/20100101 Firefox/127.0'
},
{
'Cookie': 'another_cookie_here',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:127.0) Gecko/20100101 Firefox/127.0'
}
]
start(headers_list)
... ...