python3爬蟲京東圖片,並保存圖片文件至本地。
url="https://search.jd.com/Search?keyword="+key+"&wq="+key+"&page="+str(i*2-1)
'data-lazy-img="(.*?)"'
import urllib.request
import re
import requests
headers = ("User-Agent","Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:80.0) Gecko/20100101 Firefox/80.0")
opener =urllib.request.build_opener()
opener.addheaders = [headers]
urllib.request.install_opener(opener)
keyname = "洋河"#輸入商品名稱
key = urllib.request.quote(keyname)
for i in range(1,2):
url = "https://search.jd.com/Search?keyword="+key+"&wq="+key+"&page="+str(i*2-1);
data = urllib.request.urlopen(url).read().decode("utf-8","ignore")
print(data)
pat = 'data-lazy-img="(.*?)"'
imagelist = re.compile(pat).findall(data)
for j in range(1,len(imagelist)):
b1 = imagelist[j].replace('/n7', '/n0')
print("第"+str(i)+"頁第"+str(j)+"張爬取成功")
newurl = "http:"+b1
print(newurl)
r = requests.get(newurl,stream=True)
with open('C:/Users/lishu/Desktop/tensorflow/pc/yh/'+"第"+str(i)+"頁第"+str(j)+"張"+".jpg", 'wb') as f:
for html in r.iter_content():
f.write(html)
import urllib.request
import re
import requests
keyname = "洋河"#輸入商品名稱
key = urllib.request.quote(keyname)
headers = ("User-Agent","Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:80.0) Gecko/20100101 Firefox/80.0")
opener =urllib.request.build_opener()
opener.addheaders = [headers]
urllib.request.install_opener(opener)
for i in range(1,2):#爬取頁數
url = "https://search.jd.com/Search?keyword="+key+"&wq="+key+"&page="+str(i*2-1);
data = urllib.request.urlopen(url).read().decode("utf-8","ignore")
pat = 'data-lazy-img="(.*?)"'
imagelist = re.compile(pat).findall(data)
for j in range(1,len(imagelist)):
b1 = imagelist[j].replace('/n7', '/n0')
print("第"+str(i)+"頁第"+str(j)+"張爬取成功")
newurl = "http:"+b1
print(newurl)
r = requests.get(newurl,stream=True)
with open('C:/Users/lishu/Desktop/tensorflow/pc/yh/'+"第"+str(i)+"頁第"+str(j)+"張"+".jpg", 'wb') as f:
for html in r.iter_content():
f.write(html)
主要針對urllib.request.urlretrieve()文件路徑不能保存中文目錄的情況,使用requests.get()保存圖片到本地。