Merge branch 'main' of https://github.com/666ghj/Weibo_PublicOpinion_AnalysisSystem
Showing
5 changed files
with
266 additions
and
0 deletions
createTables.sql
0 → 100644
| 1 | +SET FOREIGN_KEY_CHECKS=0; | ||
| 2 | + | ||
| 3 | +-- ---------------------------- | ||
| 4 | +-- article表 | ||
| 5 | +-- ---------------------------- | ||
| 6 | +CREATE TABLE `article` ( | ||
| 7 | + `id` bigint(20) DEFAULT NULL, | ||
| 8 | + `likeNum` bigint(20) DEFAULT NULL, | ||
| 9 | + `commentsLen` bigint(20) DEFAULT NULL, | ||
| 10 | + `reposts_count` bigint(20) DEFAULT NULL, | ||
| 11 | + `region` text, | ||
| 12 | + `content` text, | ||
| 13 | + `contentLen` bigint(20) DEFAULT NULL, | ||
| 14 | + `created_at` text, | ||
| 15 | + `type` text, | ||
| 16 | + `detailUrl` text, | ||
| 17 | + `authorAvatar` text, | ||
| 18 | + `authorName` text, | ||
| 19 | + `authorDetail` text, | ||
| 20 | + `isVip` double DEFAULT NULL | ||
| 21 | +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci; | ||
| 22 | + | ||
| 23 | +-- ---------------------------- | ||
| 24 | +-- comments表 | ||
| 25 | +-- ---------------------------- | ||
| 26 | +CREATE TABLE `comments` ( | ||
| 27 | + `articleId` bigint(20) DEFAULT NULL, | ||
| 28 | + `created_at` text, | ||
| 29 | + `likes_counts` bigint(20) DEFAULT NULL, | ||
| 30 | + `region` text, | ||
| 31 | + `content` text, | ||
| 32 | + `authorName` text, | ||
| 33 | + `authorGender` text, | ||
| 34 | + `authorAddress` text, | ||
| 35 | + `authorAvatar` text | ||
| 36 | +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci; | ||
| 37 | + | ||
| 38 | +-- ---------------------------- | ||
| 39 | +-- user表 | ||
| 40 | +-- ---------------------------- | ||
| 41 | +CREATE TABLE `user` ( | ||
| 42 | + `username` varchar(255) DEFAULT NULL, | ||
| 43 | + `password` varchar(255) DEFAULT NULL, | ||
| 44 | + `id` int(11) NOT NULL AUTO_INCREMENT, | ||
| 45 | + `createTime` varchar(255) DEFAULT NULL, | ||
| 46 | + PRIMARY KEY (`id`) | ||
| 47 | +) ENGINE=InnoDB AUTO_INCREMENT=4 DEFAULT CHARSET=utf8; |
spider/main.py
0 → 100644
| 1 | +from spiderContent import start as spiderContentStart | ||
| 2 | +from spiderComments import start as spiderCommentsStart | ||
| 3 | +import os | ||
| 4 | +from sqlalchemy import create_engine | ||
| 5 | +import pandas as pd | ||
| 6 | + | ||
| 7 | +engine = create_engine('mysql+pymysql://XiaoXueQi:XiaoXueQi@10.92.35.13/Weibo_PublicOpinion_AnalysisSystem?charset=utf8mb4') | ||
| 8 | + | ||
| 9 | +def save_to_sql(): | ||
| 10 | + try: | ||
| 11 | + artileOldPd = pd.read_sql('select * from article',engine) | ||
| 12 | + articleNewPd = pd.read_csv('articleData.csv') | ||
| 13 | + commentOldPd = pd.read_sql('select * from comments',engine) | ||
| 14 | + commentNewPd = pd.read_csv('articleComments.csv') | ||
| 15 | + | ||
| 16 | + concatArticlePd = pd.concat([articleNewPd,artileOldPd],join='inner') | ||
| 17 | + concatCommentsPd = pd.concat([commentNewPd,commentOldPd],join='inner') | ||
| 18 | + | ||
| 19 | + concatArticlePd.drop_duplicates(subset='id',keep='last',inplace=True) | ||
| 20 | + concatCommentsPd.drop_duplicates(subset='content',keep='last',inplace=True) | ||
| 21 | + | ||
| 22 | + concatArticlePd.to_sql('article', con=engine, if_exists='replace', index=False) | ||
| 23 | + concatCommentsPd.to_sql('comments', con=engine, if_exists='replace', index=False) | ||
| 24 | + except: | ||
| 25 | + articleNewPd = pd.read_csv('articleData.csv') | ||
| 26 | + commentNewPd = pd.read_csv('articleComments.csv') | ||
| 27 | + articleNewPd.to_sql('article',con=engine,if_exists='replace',index=False) | ||
| 28 | + commentNewPd.to_sql('comments',con=engine,if_exists='replace',index=False) | ||
| 29 | + | ||
| 30 | + os.remove('./articleData.csv') | ||
| 31 | + os.remove('./articleComments.csv') | ||
| 32 | + | ||
| 33 | +def main(): | ||
| 34 | + print('正在爬取文章数据') | ||
| 35 | + spiderContentStart(1,1) | ||
| 36 | + print('正在爬取文章评论数据') | ||
| 37 | + spiderCommentsStart() | ||
| 38 | + print('正在存储数据') | ||
| 39 | + save_to_sql() | ||
| 40 | + | ||
| 41 | + | ||
| 42 | +if __name__ == '__main__': | ||
| 43 | + main() |
spider/spiderComments.py
0 → 100644
| 1 | +import time | ||
| 2 | +import requests | ||
| 3 | +import csv | ||
| 4 | +import os | ||
| 5 | +from datetime import datetime | ||
| 6 | + | ||
| 7 | +def init(): | ||
| 8 | + if not os.path.exists('./articleComments.csv'): | ||
| 9 | + with open('./articleComments.csv','w',encoding='utf-8',newline='') as csvFile: | ||
| 10 | + writer = csv.writer(csvFile) | ||
| 11 | + writer.writerow([ | ||
| 12 | + 'articleId', | ||
| 13 | + 'created_at', | ||
| 14 | + 'likes_counts', | ||
| 15 | + 'region', | ||
| 16 | + 'content', | ||
| 17 | + 'authorName', | ||
| 18 | + 'authorGender', | ||
| 19 | + 'authorAddress', | ||
| 20 | + 'authorAvatar' | ||
| 21 | + ]) | ||
| 22 | + | ||
| 23 | +def writerRow(row): | ||
| 24 | + with open('./articleComments.csv', 'a', encoding='utf-8', newline='') as csvFile: | ||
| 25 | + writer = csv.writer(csvFile) | ||
| 26 | + writer.writerow(row) | ||
| 27 | + | ||
| 28 | +def get_data(url,params): | ||
| 29 | + headers = { | ||
| 30 | + 'Cookie':'SINAGLOBAL=2555941826014.1074.1676801766625; ULV=1719829459275:6:1:2:4660996305989.918.1719827559898:1719743122299; UOR=,,www.baidu.com; XSRF-TOKEN=VtLXviYSIs8lor7sz4iGyigL; SUB=_2A25LhvU9DeRhGeFH6FIX-S3MyD2IHXVo-gj1rDV8PUJbkNAGLRXMkW1Ne2nhI3Gle25QJK0Z99J3trq_NZn6YKJ-; SUBP=0033WrSXqPxfM725Ws9jqgMF55529P9D9WW3Mv8V5EupQbbKh.vaZIwU5JpX5KzhUgL.FoM4e05c1Ke7e022dJLoIp7LxKML1KBLBKnLxKqL1hnLBoM41hz41hqReKqN; WBPSESS=Dt2hbAUaXfkVprjyrAZT_LRaDLsnxG-kIbeYwnBb5OUKZiwfVr_UrcYfWuqG-4ZVDM5HeU3HXkDNK_thfRfdS9Ao6ezT30jDksv-CpaVmlTAqGUHjJ7PYkH5aCK4HLxmRq14ZalmQNwzfWMPa4y0VNRLuYdg7L1s49ymNq_5v5vusoz0r4ki6u-MHGraF0fbUTgX14x0kHayEwOoxfLI-w==; SCF=AqmJWo31oFV5itnRgWNU1-wHQTL6PmkBLf3gDuqpdqAIfaWguDTMre6Oxjf5Uzs74JAh2r0DdV1sJ1g6m-wJ5NQ.; _s_tentry=-; Apache=4660996305989.918.1719827559898; PC_TOKEN=7955a7ab1f; appkey=; geetest_token=602cd4e3a7ed1898808f8adfe1a2048b; ALF=1722421868', | ||
| 31 | + 'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:127.0) Gecko/20100101 Firefox/127.0' | ||
| 32 | + } | ||
| 33 | + response = requests.get(url,headers=headers,params=params) | ||
| 34 | + if response.status_code == 200: | ||
| 35 | + return response.json()['data'] | ||
| 36 | + else: | ||
| 37 | + return None | ||
| 38 | + | ||
| 39 | +def getAllArticleList(): | ||
| 40 | + artileList = [] | ||
| 41 | + with open('./articleData.csv','r',encoding='utf-8') as reader: | ||
| 42 | + readerCsv = csv.reader(reader) | ||
| 43 | + next(reader) | ||
| 44 | + for nav in readerCsv: | ||
| 45 | + artileList.append(nav) | ||
| 46 | + return artileList | ||
| 47 | + | ||
| 48 | +def parse_json(response,artileId): | ||
| 49 | + for comment in response: | ||
| 50 | + created_at = datetime.strptime(comment['created_at'],'%a %b %d %H:%M:%S %z %Y').strftime('%Y-%m-%d') | ||
| 51 | + likes_counts = comment['like_counts'] | ||
| 52 | + try: | ||
| 53 | + region = comment['source'].replace('来自', '') | ||
| 54 | + except: | ||
| 55 | + region = '无' | ||
| 56 | + content = comment['text_raw'] | ||
| 57 | + authorName = comment['user']['screen_name'] | ||
| 58 | + authorGender = comment['user']['gender'] | ||
| 59 | + authorAddress = comment['user']['location'] | ||
| 60 | + authorAvatar = comment['user']['avatar_large'] | ||
| 61 | + writerRow([ | ||
| 62 | + artileId, | ||
| 63 | + created_at, | ||
| 64 | + likes_counts, | ||
| 65 | + region, | ||
| 66 | + content, | ||
| 67 | + authorName, | ||
| 68 | + authorGender, | ||
| 69 | + authorAddress, | ||
| 70 | + authorAvatar | ||
| 71 | + ]) | ||
| 72 | + | ||
| 73 | +def start(): | ||
| 74 | + commentUrl = 'https://weibo.com/ajax/statuses/buildComments' | ||
| 75 | + init() | ||
| 76 | + articleList = getAllArticleList() | ||
| 77 | + for article in articleList: | ||
| 78 | + articleId = article[0] | ||
| 79 | + print('正在爬取id值为%s的文章评论' % articleId) | ||
| 80 | + time.sleep(2) | ||
| 81 | + params = { | ||
| 82 | + 'id':int(articleId), | ||
| 83 | + 'is_show_bulletin':2 | ||
| 84 | + } | ||
| 85 | + response = get_data(commentUrl,params) | ||
| 86 | + parse_json(response,articleId) | ||
| 87 | + | ||
| 88 | + | ||
| 89 | + | ||
| 90 | +if __name__ == '__main__': | ||
| 91 | + start() | ||
| 92 | + | ||
| 93 | + | ||
| 94 | + | ||
| 95 | + | ||
| 96 | + | ||
| 97 | + | ||
| 98 | + | ||
| 99 | + |
| @@ -152,3 +152,59 @@ def getCommentCharDataTwo():# 统计评论数据中不同性别的数量 | @@ -152,3 +152,59 @@ def getCommentCharDataTwo():# 统计评论数据中不同性别的数量 | ||
| 152 | }) | 152 | }) |
| 153 | return resultData | 153 | return resultData |
| 154 | 154 | ||
| 155 | +def getYuQingCharDataOne():# 统计热词中正面、中性、负面的数量 | ||
| 156 | + hotWordList = getAllHotWords() | ||
| 157 | + xData = ['正面','中性','负面'] | ||
| 158 | + yData = [0,0,0] | ||
| 159 | + for word in hotWordList: | ||
| 160 | + emotionValue = SnowNLP(word[0]).sentiments | ||
| 161 | + if emotionValue > 0.5: | ||
| 162 | + yData[0] += 1 | ||
| 163 | + elif emotionValue == 0.5: | ||
| 164 | + yData[1] += 1 | ||
| 165 | + elif emotionValue < 0.5: | ||
| 166 | + yData[2] += 1 | ||
| 167 | + finalData = [{ | ||
| 168 | + 'name':x, | ||
| 169 | + 'value':yData[index] | ||
| 170 | + } for index,x in enumerate(xData)] | ||
| 171 | + return xData,yData,finalData | ||
| 172 | + | ||
| 173 | +def getYuQingCharDataTwo():# 统计评论列表和文章列表中的情感值 | ||
| 174 | + xData = ['正面', '中性', '负面'] | ||
| 175 | + finalData1 = [{ | ||
| 176 | + 'name':x, | ||
| 177 | + 'value':0 | ||
| 178 | + } for x in xData] | ||
| 179 | + finalData2 = [{ | ||
| 180 | + 'name': x, | ||
| 181 | + 'value': 0 | ||
| 182 | + } for x in xData] | ||
| 183 | + | ||
| 184 | + for comment in commentList: | ||
| 185 | + emotionValue = SnowNLP(comment[4]).sentiments | ||
| 186 | + if emotionValue > 0.5: | ||
| 187 | + finalData1[0]['value'] += 1 | ||
| 188 | + elif emotionValue == 0.5: | ||
| 189 | + finalData1[1]['value'] += 1 | ||
| 190 | + elif emotionValue < 0.5: | ||
| 191 | + finalData1[2]['value'] += 1 | ||
| 192 | + for artile in articleList: | ||
| 193 | + emotionValue = SnowNLP(artile[5]).sentiments | ||
| 194 | + if emotionValue > 0.5: | ||
| 195 | + finalData2[0]['value'] += 1 | ||
| 196 | + elif emotionValue == 0.5: | ||
| 197 | + finalData2[1]['value'] += 1 | ||
| 198 | + elif emotionValue < 0.5: | ||
| 199 | + finalData2[2]['value'] += 1 | ||
| 200 | + return finalData1,finalData2 | ||
| 201 | + | ||
| 202 | +def getYuQingCharDataThree():# 提取前10个热词及其对应的出现频率 | ||
| 203 | + hotWordList = getAllHotWords() | ||
| 204 | + xData = [] | ||
| 205 | + yData = [] | ||
| 206 | + for i in hotWordList[:10]: | ||
| 207 | + xData.append(i[0]) | ||
| 208 | + yData.append(int(i[1])) | ||
| 209 | + return xData,yData | ||
| 210 | + |
utils/getTableData.py
0 → 100644
| 1 | +from utils.getPublicData import getAllArticleData | ||
| 2 | +from snownlp import SnowNLP | ||
| 3 | + | ||
| 4 | +def getTableDataList(flag): | ||
| 5 | + if flag: | ||
| 6 | + tableList = [] | ||
| 7 | + articeList = getAllArticleData() | ||
| 8 | + for article in articeList: | ||
| 9 | + item = list(article) | ||
| 10 | + value = '' | ||
| 11 | + if SnowNLP(item[5]).sentiments > 0.5: | ||
| 12 | + value = '正面' | ||
| 13 | + elif SnowNLP(item[5]).sentiments < 0.5: | ||
| 14 | + value = '负面' | ||
| 15 | + else: | ||
| 16 | + value = '中性' | ||
| 17 | + item.append(value) | ||
| 18 | + tableList.append(item) | ||
| 19 | + return tableList | ||
| 20 | + else: | ||
| 21 | + return getAllArticleData() |
-
Please register or login to post a comment