大概算是个副业推荐?按照前两天在坛里看到帖子的逻辑,省略了数据库部分,直接让gpt给我写了一个代码,用了一下能出文章,不过这文章有没有人看就见仁见智了。
import requests
import logging
from collections import Counter
import re
import json
# 设置日志记录配置
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
# 设置 OpenAI API 密钥和 base_url
api_key = 'sk-xxxxxx'
api_base = "https://gpt.com"
# 定义函数:从指定API获取热搜信息
def fetch_hot_topics():
urls = [
'https://api.suxun.site/api/hotlist?id=douyin',
'https://api.suxun.site/api/hotlist?id=weibo'
# 可以添加更多数据源
]
hot_topics = []
for url in urls:
try:
response = requests.get(url)
response.raise_for_status() # 检查请求是否成功
topics = response.json().get('data', [])
hot_topics.extend([topic['title'] for topic in topics])
except requests.RequestException as e:
logging.error(f"请求 {url} 失败: {e}")
return hot_topics
# 定义函数:预处理和标准化话题
def preprocess_topics(topics):
return [re.sub(r'\W+', '', topic).lower() for topic in topics]
# 定义函数:合并相同类似的热门话题
def merge_topics(topics):
headers = {
'Content-Type': 'application/json',
'Authorization': f'Bearer {api_key}',
}
data = {
"model": "gpt-4-turbo",
"messages": [
{"role": "system", "content": "你是一个专业的文本处理助手。我将给你一些话题列表,请将相似的话题用,间隔后放在同一行,不同的话题换行"},
{"role": "user", "content": f"我给出的话题是:\n{topics}\n"}
],
"max_tokens": 8000
}
try:
response = requests.post(f"{api_base}/chat/completions", headers=headers, json=data)
response.raise_for_status() # 检查请求是否成功
response_data = response.json()
merged_topics = response_data['choices'][0]['message']['content'].split('\n')
return merged_topics
except requests.RequestException as e:
logging.error(f"请求合并话题失败: {e}")
except KeyError as e:
logging.error(f"响应中缺少预期的字段: {e}")
except json.JSONDecodeError as e:
logging.error(f"JSON 解析错误: {e}")
return []
# 定义函数:获取最热的10条话题
def get_top_10_topics(merged_topics):
# 统计每行的合并话题中逗号和中文逗号的数量
def count_commas(topic):
return topic.count(',') + topic.count(',')
# 按照逗号和中文逗号的总数进行排序
sorted_topics = sorted(merged_topics, key=count_commas, reverse=True)
# 获取排序后前10条数据,并去除空格后返回
top_10_topics = [topic.replace(' ', '') for topic in sorted_topics[:10]]
return top_10_topics
# 定义函数:搜索新闻信息
def search_article(summary):
headers = {
'Content-Type': 'application/json',
'Authorization': f'Bearer {api_key}',
}
data = {
"model": "kimi",
"stream": False,
"messages": [
{"role": "system", "content": "你是一个专业的AI助手。"},
{"role": "user", "content": f"搜索:\n{summary}\n相关的最新10条新闻,给出每条新闻的大致内容及其要点"}
],
"max_tokens": 8000
}
try:
response = requests.post(f"{api_base}/chat/completions", headers=headers, json=data)
response.raise_for_status() # 检查请求是否成功
response_data = response.json()
return response_data['choices'][0]['message']['content'].strip()
except requests.RequestException as e:
logging.error(f"请求搜索新闻信息失败: {e}")
except KeyError as e:
logging.error(f"响应中缺少预期的字段: {e}")
return "无法获取新闻信息"
# 定义函数:写文章
def write_article(summary):
headers = {
'Content-Type': 'application/json',
'Authorization': f'Bearer {api_key}',
}
data = {
"model": "gpt-4o",
"messages": [
{"role": "system", "content": "你是一个资深的自媒体创作者"},
{"role": "user", "content": f"新闻内容:{summary} \n 根据新闻内容,写一篇贴近生活、幽默风趣的文章,内容包括标题、引言、正文和结论。"}
],
"max_tokens": 8000
}
try:
response = requests.post(f"{api_base}/chat/completions", headers=headers, json=data)
response.raise_for_status() # 检查请求是否成功
response_data = response.json()
article = response_data['choices'][0]['message']['content'].strip()
return article
except requests.RequestException as e:
logging.error(f"请求写文章失败: {e}")
except KeyError as e:
logging.error(f"响应中缺少预期的字段: {e}")
return "无法生成文章", "文章生成失败"
# 定义函数:保存文章为txt文件
def save_article(topic, article):
valid_topic = ''.join(e for e in topic if e.isalnum())
filename = f'{valid_topic}.txt'
# 检查 article 的类型
if isinstance(article, str):
content = article
elif isinstance(article, tuple):
content = ' '.join(str(item) for item in article)
else:
raise TypeError("Unsupported type for article")
with open(filename, 'w', encoding='utf-8') as file:
file.write(content)
# 执行整个流程
def run_once():
hot_topics = fetch_hot_topics()
processed_topics = preprocess_topics(hot_topics)
merged_topics = merge_topics(processed_topics)
top_10_topics = get_top_10_topics(merged_topics)
for topic in top_10_topics:
searchD = search_article(topic)
article = write_article(searchD)
save_article(topic, article)
print(f"已生成文章并保存为 {topic}.txt")
# 运行一次
run_once()
效果如下