背景:公司需要大量美女头像用来批量改头像
分享下代码,顺便水一下
代码改一改也可以获取别的类型的头像,不一定局限于美女
第1个
import requests
from bs4 import BeautifulSoup
import os
import threading
# 下载单张图片
def download_image(img_url, save_dir):
try:
img_data = requests.get(img_url, timeout=10).content
img_name = os.path.join(save_dir, img_url.split("/")[-1])
with open(img_name, "wb") as f:
f.write(img_data)
print(f"下载成功: {img_url} -> {img_name}")
except Exception as e:
print(f"下载失败: {img_url}, 错误信息: {e}")
# 爬取详情页中的图片
def scrape_detail_page(detail_url, save_dir):
try:
print(f"正在爬取详情页: {detail_url}")
response = requests.get(detail_url, timeout=10)
if response.status_code != 200:
print(f"无法访问详情页: {detail_url}, 状态码: {response.status_code}")
return
soup = BeautifulSoup(response.content, "html.parser")
img_tags = soup.select("#content p img") # 选择详情页中的图片标签
for img_tag in img_tags:
img_url = img_tag["src"]
download_image(img_url, save_dir)
except Exception as e:
print(f"爬取详情页失败: {detail_url}, 错误信息: {e}")
# 爬取主页面,获取所有详情页链接
def scrape_images(base_url, save_dir, thread_count):
if not os.path.exists(save_dir):
os.makedirs(save_dir)
page = 1
threads = []
while True:
try:
# 生成分页 URL
url = base_url.replace("_1", f"_{page}")
print(f"正在爬取页面: {url}")
response = requests.get(url, timeout=10)
if response.status_code != 200:
print(f"无法访问页面: {url}, 状态码: {response.status_code}")
break
soup = BeautifulSoup(response.content, "html.parser")
detail_links = soup.select("ul.g-gxlist-imgbox li a")
if not detail_links:
print("未找到更多详情页链接,爬取结束。")
break
for link in detail_links:
detail_url = "https://www.qqtn.com" + link["href"]
# 创建线程爬取详情页
thread = threading.Thread(target=scrape_detail_page, args=(detail_url, save_dir))
threads.append(thread)
thread.start()
# 控制线程数量
while len(threads) >= thread_count:
for t in threads:
t.join(0.1)
threads = [t for t in threads if t.is_alive()]
page += 1
except Exception as e:
print(f"爬取页面失败: {url}, 错误信息: {e}")
continue
# 等待所有线程结束
for t in threads:
t.join()
# 示例用法
base_url = "https://www.qqtn.com/tx/nvshengtx_1.html"
save_directory = "qqtn_images"
thread_count = 5 # 可调整线程数量
scrape_images(base_url, save_directory, thread_count)
第2个
import requests
from bs4 import BeautifulSoup
import os
import threading
# 下载单张图片并保存到本地
def download_image(img_url, save_dir):
try:
img_data = requests.get(img_url, timeout=10).content
img_name = os.path.join(save_dir, img_url.split("/")[-1])
with open(img_name, "wb") as f:
f.write(img_data)
print(f"下载成功: {img_url} -> {img_name}")
except Exception as e:
print(f"下载失败: {img_url}, 错误信息: {e}")
# 爬取详情页中的图片
def scrape_detail_page(detail_url, save_dir):
try:
print(f"正在爬取详情页: {detail_url}")
response = requests.get(detail_url, timeout=10)
if response.status_code != 200:
print(f"无法访问详情页: {detail_url}, 状态码: {response.status_code}")
return
soup = BeautifulSoup(response.content, "html.parser")
img_tags = soup.find_all("img", alt=True)
for img_tag in img_tags:
img_url = img_tag["src"]
download_image(img_url, save_dir)
except Exception as e:
print(f"爬取详情页失败: {detail_url}, 错误信息: {e}")
# 爬取主页面的图片和详情链接
def scrape_images(base_url, save_dir, thread_count):
if not os.path.exists(save_dir):
os.makedirs(save_dir)
page = 1
threads = []
while True:
try:
# 生成分页 URL
url = f"{base_url}index_{page}.html" if page > 1 else base_url
print(f"正在爬取页面: {url}")
response = requests.get(url, timeout=10)
if response.status_code != 200:
print(f"无法访问页面: {url}, 状态码: {response.status_code}")
break
soup = BeautifulSoup(response.content, "html.parser")
detail_links = soup.select("ul.g-gxlist-imgbox li a")
if not detail_links:
print("未找到更多详情页链接,爬取结束。")
break
for link in detail_links:
detail_url = link["href"]
# 创建线程爬取详情页
thread = threading.Thread(target=scrape_detail_page, args=(detail_url, save_dir))
threads.append(thread)
thread.start()
# 控制线程数量
while len(threads) >= thread_count:
for t in threads:
t.join(0.1)
threads = [t for t in threads if t.is_alive()]
page += 1
except Exception as e:
print(f"爬取页面失败: {url}, 错误信息: {e}")
continue
# 等待所有线程结束
for t in threads:
t.join()
# 示例用法
base_url = "http://www.imeitou.com/nvsheng/mnns/"
save_directory = "images"
thread_count = 5 # 可调整线程数量
scrape_images(base_url, save_directory, thread_count)