python3 Crawler JD pictures , And save the picture file to local .
url="https://search.jd.com/Search?keyword="+key+"&wq="+key+"&page="+str(i*2-1)
'data-lazy-img="(.*?)"'
import urllib.request
import re
import requests
headers = ("User-Agent","Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:80.0) Gecko/20100101 Firefox/80.0")
opener =urllib.request.build_opener()
opener.addheaders = [headers]
urllib.request.install_opener(opener)
keyname = " yanghe "# Enter the product name
key = urllib.request.quote(keyname)
for i in range(1,2):
url = "https://search.jd.com/Search?keyword="+key+"&wq="+key+"&page="+str(i*2-1);
data = urllib.request.urlopen(url).read().decode("utf-8","ignore")
print(data)
pat = 'data-lazy-img="(.*?)"'
imagelist = re.compile(pat).findall(data)
for j in range(1,len(imagelist)):
b1 = imagelist[j].replace('/n7', '/n0')
print(" The first "+str(i)+" Page No. "+str(j)+" Take... Success ")
newurl = "http:"+b1
print(newurl)
r = requests.get(newurl,stream=True)
with open('C:/Users/lishu/Desktop/tensorflow/pc/yh/'+" The first "+str(i)+" Page No. "+str(j)+" Zhang "+".jpg", 'wb') as f:
for html in r.iter_content():
f.write(html)
import urllib.request
import re
import requests
keyname = " yanghe "# Enter the product name
key = urllib.request.quote(keyname)
headers = ("User-Agent","Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:80.0) Gecko/20100101 Firefox/80.0")
opener =urllib.request.build_opener()
opener.addheaders = [headers]
urllib.request.install_opener(opener)
for i in range(1,2):# Crawl pages
url = "https://search.jd.com/Search?keyword="+key+"&wq="+key+"&page="+str(i*2-1);
data = urllib.request.urlopen(url).read().decode("utf-8","ignore")
pat = 'data-lazy-img="(.*?)"'
imagelist = re.compile(pat).findall(data)
for j in range(1,len(imagelist)):
b1 = imagelist[j].replace('/n7', '/n0')
print(" The first "+str(i)+" Page No. "+str(j)+" Take... Success ")
newurl = "http:"+b1
print(newurl)
r = requests.get(newurl,stream=True)
with open('C:/Users/lishu/Desktop/tensorflow/pc/yh/'+" The first "+str(i)+" Page No. "+str(j)+" Zhang "+".jpg", 'wb') as f:
for html in r.iter_content():
f.write(html)
Mainly aimed at urllib.request.urlretrieve() The Chinese directory cannot be saved in the file path , Use requests.get() Save pictures to local .