Python 官方文档:入门教程 => 点击学习
爬取单张图片 # 爬取单张图片import requests # 导入requests库url = "https://file.lsjlt.com/upload/f/202309/12/54vwhbwy2re.jpg" # 图片地址re
爬取单张图片
# 爬取单张图片import requests # 导入requests库url = "https://file.lsjlt.com/upload/f/202309/12/54vwhbwy2re.jpg" # 图片地址response = requests.get(url) # 获取图片with open("img/test1.jpg", "wb") as f: # wb:写入二进制文件 f.write(response.content) # 写入图片print("图片下载完成")
爬取批量图片
# 爬取批量图片import requests # 导入requests库import os # 导入os库from bs4 import BeautifulSoup # 从bs4库中导入BeautifulSoupname_path = 'img2'if not os.path.exists(name_path): # 判断文件夹是否存在 os.mkdir(name_path) # 创建文件夹def getUrl(): url = "Https://sc.chinaz.com/tupian/gudianmeinvtupian.html" # 图片地址 response = requests.get(url) img_txt = BeautifulSoup(response.content, "html.parser") # 解析网页 find = img_txt.find("div", attrs={'class': 'tupian-list com-img-txt-list'}) # 查找图片 find_all = find.find_all("div", attrs={'class': 'item'}) # 查找所有图片 for i in find_all: url = 'https:' + i.find('img').get('data-original') # 获取图片地址 name = i.find('a').text # 获取图片名字 # print(name, url) try: getImg(url, name) # 调用getImg方法 except: # 相当于java中的catch print("下载失败"); continue # 如果下载失败,跳过def getImg(ImageUrl, ImageName): response = requests.get(ImageUrl).content # 获取图片 with open(f'{name_path}/{ImageName}.jpg', 'wb') as f: # 保存图片,wb表示写入二进制文件 f.write(response) print(ImageName, "下载完成")if __name__ == '__main__': getUrl()
如果一个网页的图片很多,可以进行分页爬取
# 分页爬取图片import requests # 导入requests库import os # 导入os库from bs4 import BeautifulSoup # 从bs4库中导入BeautifulSoupname_path = 'img2'if not os.path.exists(name_path): # 判断文件夹是否存在 os.mkdir(name_path) # 创建文件夹Sum = 0 # 用于记录下载的图片数量def getUrl(num): if num == '1': # 第一页特殊处理 url = "https://sc.chinaz.com/tupian/gudianmeinvtupian.html" else: url = f"https://sc.chinaz.com/tupian/gudianmeinvtupian_{num}.html" # 图片地址 response = requests.get(url) img_txt = BeautifulSoup(response.content, "html.parser") # 解析网页 find = img_txt.find("div", attrs={'class': 'tupian-list com-img-txt-list'}) # 查找图片 find_all = find.find_all("div", attrs={'class': 'item'}) # 查找所有图片 for i in find_all: url = 'https:' + i.find('img').get('data-original') # 获取图片地址 name = i.find('a').text # 获取图片名字 # print(name, url) try: getImg(url, name) # 调用getImg方法 except: # 相当于java中的catch print("下载失败"); continue # 如果下载失败,跳过def getImg(ImageUrl, ImageName): response = requests.get(ImageUrl).content # 获取图片 with open(f'{name_path}/{ImageName}.jpg', 'wb') as f: # 保存图片,wb表示写入二进制文件 f.write(response) print(ImageName, "下载完成") global Sum Sum += 1if __name__ == '__main__': num = input_num = input("请输入要爬取的总页数:[1-7]\n") if (int(num) > 7): print("输入有误,最大为7") exit() else: for i in range(1, int(num) + 1): getUrl(num) print(f"第{i}页爬取完成") print(f"共下载{Sum}张图片")
来源地址:https://blog.csdn.net/weixin_46713492/article/details/131214432
--结束END--
本文标题: 用python爬取某个图片网站的图片
本文链接: https://www.lsjlt.com/news/404265.html(转载时请注明来源链接)
有问题或投稿请发送至: 邮箱/279061341@qq.com QQ/279061341
下载Word文档到电脑,方便收藏和打印~
2024-03-01
2024-03-01
2024-03-01
2024-02-29
2024-02-29
2024-02-29
2024-02-29
2024-02-29
2024-02-29
2024-02-29
回答
回答
回答
回答
回答
回答
回答
回答
回答
回答
0