程序師世界是廣大編程愛好者互助、分享、學習的平台,程序師世界有你更精彩!
首頁
編程語言
C語言|JAVA編程
Python編程
網頁編程
ASP編程|PHP編程
JSP編程
數據庫知識
MYSQL數據庫|SqlServer數據庫
Oracle數據庫|DB2數據庫
您现在的位置: 程式師世界 >> 編程語言 >  >> 更多編程語言 >> Python

My Python crawler code example (1)

編輯:Python

Hello everyone , I meet you again , I'm your friend, Quan Jun .

From the chain home website crawler Shijiazhuang eligible housing information , And save to file , The listing information includes the name 、 Construction area of 、 The total price 、 Area 、 Area inside the set, etc . The area where it is located 、 The inside area needs to be calculated on the details page .

Mainly used requests+BeautifulSoup Third-party module , The specific use method can be Baidu .

The first edition is 2019 year 4 Written in the month of , Currently expired .

The second edition is 12 Written in the month of .

The first edition

#!/usr/bin/python
from bs4 import BeautifulSoup
import requests
def getHouseList(url):
house =[]
headers = {'User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/21.0.1180.71 Safari/537.1 LBBROWSER'}
#get Get information from the web
res = requests.get(url,headers=headers)
# Parsing content
soup = BeautifulSoup(res.content,'lxml')
# housing resources title
housename_divs = soup.find_all('div',class_='title')
for housename_div in housename_divs:
housename_as=housename_div.find_all('a')
for housename_a in housename_as:
housename=[]
# title
housename.append(housename_a.get_text())
# Hyperlinks
housename.append(housename_a['href'])
house.append(housename)
huseinfo_divs = soup.find_all('div',class_='houseInfo')
for i in range(len(huseinfo_divs)):
info = huseinfo_divs[i].get_text()
infos = info.split('|')
# Community name
house[i].append(infos[0])
# House type
house[i].append(infos[1])
# Square meters
house[i].append(infos[2])
# Total inquiry price
house_prices = soup.find_all('div',class_='totalPrice')
for i in range(len(house_prices)):
# Price
price = house_prices[i].get_text()
house[i].append(price)
return house
# Crawl house details : Area 、 The inside area
def houseinfo(url):
headers = {'User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/21.0.1180.71 Safari/537.1 LBBROWSER'}
res = requests.get(url,headers=headers)
soup = BeautifulSoup(res.content,'lxml')
msg =[]
# Area
areainfos = soup.find_all('span',class_='info')
for areainfo in areainfos:
# Just get the first a The content of the label is enough
area = areainfo.find('a')
if(not area):
continue
hrefStr = area['href']
if(hrefStr.startswith('javascript')):
continue
msg.append(area.get_text())
break
# Calculate the area of the house according to the house type
infolist = soup.find_all('div',id='infoList')
num = []
for info in infolist:
cols = info.find_all('div',class_='col')
for i in cols:
pingmi = i.get_text()
try:
a = float(pingmi[:-2])
num.append(a)
except ValueError:
continue
msg.append(sum(num))
return msg
# Write the source information into txt file
def writeFile(houseinfo):
f = open('d:/ housing resources .txt','a',encoding='utf8')
# houseinfo.join('\n')
f.write(houseinfo+'\n')
f.close()
# The main function
def main():
for i in range(1,100):
print('----- Separator ',i,'-------')
if i==1:
url ='https://sjz.lianjia.com/ershoufang/hy1f2f5sf1l3l2l4a2a3a4/'
else:
url='https://sjz.lianjia.com/ershoufang/pg'+str(i)+'hy1f2f5sf1l3l2l4a2a3a4/'
houses =getHouseList(url)
for house in houses:
link = house[1]
if(not link.startswith('http')):
continue
mianji = houseinfo(link)
# The inner area of the sleeve 、 The area is added to the housing information
house.extend(mianji)
print(house)
info = " ".join([str(x) for x in house])
writeFile(info)
if __name__ == '__main__':
main()

From the chain home website 8849 A list of houses , But the page can only be displayed 31( Number of pages )*100( Total page number )=3100 Houses , Nothing else was found .

The second edition :

Get the housing information of a community , And write excel.

#!/usr/bin/python
from bs4 import BeautifulSoup
import requests
import xlwt
def getHouseList(url):
house =[]
headers = {'User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/21.0.1180.71 Safari/537.1 LBBROWSER'}
#get Get information from the web
res = requests.get(url,headers=headers)
# Parsing content
soup = BeautifulSoup(res.content,'html.parser')
# housing resources title
housename_divs = soup.find_all('div',class_='title')
for housename_div in housename_divs:
housename_as=housename_div.find_all('a')
for housename_a in housename_as:
housename=[]
# title
housename.append(housename_a.get_text())
# Hyperlinks
housename.append(housename_a.get('href'))
house.append(housename)
huseinfo_divs = soup.find_all('div',class_='houseInfo')
for i in range(len(huseinfo_divs)):
info = huseinfo_divs[i].get_text()
infos = info.split('|')
# Community name
house[i].append(infos[0])
# House type
house[i].append(infos[1])
# Square meters
house[i].append(infos[2])
# Total inquiry price
house_prices = soup.find_all('div',class_='totalPrice')
for i in range(len(house_prices)):
# Price
price = house_prices[i].get_text()
house[i].append(price)
return house
# Crawl house details : Area 、 The inside area
def houseinfo(url):
headers = {'User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/21.0.1180.71 Safari/537.1 LBBROWSER'}
res = requests.get(url,headers=headers)
soup = BeautifulSoup(res.content,'html.parser')
msg =[]
# Area
areainfos = soup.find_all('span',class_='info')
for areainfo in areainfos:
# Just get the first a The content of the label is enough
area = areainfo.find('a')
if(not area):
continue
hrefStr = area['href']
if(hrefStr.startswith('javascript')):
continue
msg.append(area.get_text())
break
# Calculate the area of the house according to the house type
infolist = soup.find_all('div',id='infoList')
num = []
for info in infolist:
cols = info.find_all('div',class_='col')
for i in cols:
pingmi = i.get_text()
try:
a = float(pingmi[:-2])
num.append(a)
except ValueError:
continue
msg.append(sum(num))
return msg
# Write the source information into excel file
def writeExcel(excelPath,houses):
workbook = xlwt.Workbook()
# Get the first one sheet page
sheet = workbook.add_sheet('git')
row0=[' title ',' Link address ',' House type ',' area ',' toward ',' The total price ',' Area ',' The inside area ']
for i in range(0,len(row0)):
sheet.write(0,i,row0[i])
for i in range(0,len(houses)):
house = houses[i]
print(house)
for j in range(0,len(house)):
sheet.write(i+1,j,house[j])
workbook.save(excelPath)
# The main function
def main():
data = []
for i in range(1,5):
print('----- Separator ',i,'-------')
if i==1:
url ='https://sjz.lianjia.com/ershoufang/l2rs%E5%92%8C%E5%B9%B3%E4%B8%96%E5%AE%B6/'
else:
url='https://sjz.lianjia.com/ershoufang/pg'+str(i)+'l2rs%E5%92%8C%E5%B9%B3%E4%B8%96%E5%AE%B6/'
houses =getHouseList(url)
for house in houses:
link = house[1]
if(not link or not link.startswith('http')):
continue
mianji = houseinfo(link)
# The inner area of the sleeve 、 The area is added to the housing information
house.extend(mianji)
data.extend(houses)
writeExcel('d:/house.xls',data)
if __name__ == '__main__':
main()

Publisher : Full stack programmer stack length , Reprint please indicate the source :https://javaforall.cn/133817.html Link to the original text :https://javaforall.cn


  1. 上一篇文章:
  2. 下一篇文章:
Copyright © 程式師世界 All Rights Reserved