QB无RSS种子下载推送 - python版
python3安装必要库
apt install python3-requests python3-bs4 -y
import os
import requests
from bs4 import BeautifulSoup
def download_torrents(url, keyword):
# 发起请求
response = requests.get(url)
# 检查响应状态码
if response.status_code == 200:
# 解析HTML内容
soup = BeautifulSoup(response.content, 'html.parser')
# 查找包含关键词的链接
target_links = []
for link in soup.find_all('a', href=True):
if keyword in link.text:
target_links.append(link['href'])
# 修改链接并下载.torrent文件
download_path = "/root/tr" # 下载路径
log_file_path = "/root/tr/ls.log" # 日志文件路径
os.makedirs(download_path, exist_ok=True) # 确保目录存在
# 读取已下载的文件列表
downloaded_files = set()
if os.path.exists(log_file_path):
with open(log_file_path, "r") as log_file:
downloaded_files = set(log_file.read().splitlines())
for link in target_links:
# 修改链接
download_link = link.replace("view", "download")
download_link = "https://222.com" + download_link + ".torrent"
# 下载.torrent文件(如果文件不存在且未下载过)
torrent_file_name = download_link.split("/")[-1]
torrent_file_path = os.path.join(download_path, torrent_file_name)
if torrent_file_name not in downloaded_files and not os.path.exists(torrent_file_path):
with open(torrent_file_path, "wb") as torrent_file:
torrent_response = requests.get(download_link)
torrent_file.write(torrent_response.content)
# 记录已下载的文件到日志文件
with open(log_file_path, "a") as log_file:
log_file.write(torrent_file_name + "\n")
print(f"{torrent_file_name} 下载完成")
else:
print(f"{torrent_file_name} 已存在或已下载,跳过下载")
else:
print("无法获取页面内容")
# 目标URL
urls = [
("https://222.com/user/ifbfb", "韩国"), # 韩国
("https://222.com/user/Runbkk", "FHD"), # FHD
]
for url, keyword in urls:
download_torrents(url, keyword)