欢迎您访问 最编程 本站为您分享编程语言代码,编程技术文章!
您现在的位置是: 首页

抓取链家网站上的上海二手房数据并进行分析建模

最编程 2024-06-28 21:17:57
...

一.分析网页结构并编写程序

import requests
import csv
import time
import math
import random
from lxml import etree
from multiprocessing.dummy import Pool
def getPage(url):
    time.sleep(random.choice([2, 2.5, 3, 3.5]))
    page = requests.get(url, headers={
   
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 OPR/57.0.3098.110"})
    return etree.HTML(page.text)
def csvWrite(item):
    with open("lianjia_sh_ershoufang_data.csv", "a", encoding="utf-8", newline="") as f:
        csv.writer(f).writerow(item)
def get_areas_regions_urls():
    areas = [
        "pudong",
        "minhang",
        "baoshan",
        "xuhui",
        "putuo",
        "yangpu",
        "changning",
        "songjiang",
        "jiading",
        "huangpu",
        "jingan",
        "zhabei",
        "hongkou",
        "qingpu",
        "fengxian",
        "jinshan",
        "chongming",
        "shanghaizhoubian"]
    areas_regions_urls = []#这是我们要返回的元组列表,其内每一个元组将包含地区、地点、和url
    for area in areas:
        page = getPage("https://sh.lianjia.com/ershoufang/" + area)
        region_names = page.xpath("/html/body/div[3]/div/div[1]/dl[2]/dd/div[1]/div[2]/a/text()")#获取地点名
        region_urls = page.xpath("/html/body/div[3]/div/div[1]/dl[2]/dd/div[1]/div[2]/a/@href")#获取地点对应的url
        for url in region_urls:
            #创建元组并将其写入目标列表
            areas_regions_urls.append((area,region_names[region_urls.index(url)], "https://gz.lianjia.com"+url))
            # print(area,region_names[region_urls.index(url)],"https://gz.lianjia.com"+url)
        # print("Region urls in Area {} have been added!".format(area))
    print("All regions urls have been added")
    return areas_regions_urls
def region_spider(x):
    #获取信息条数
    info_num = int(getPage(x[2]).xpath("/html/body/div[4]/div[1]/div[2]/h2/span/text()")[0])
    #计算信息页数(已知每页最多30条数据)
    page_num = math.ceil(info_num/30)
    # print("{}有{}条数据,共{}页".format(x[1],info_num,page_num))
    for url in [x[2]+"pg" + str(num+1) for num in range(page_num)]:
        page = getPage(url)
        for house in page.xpath("/html/body/div[4]/div[1]/ul/li"):
            try:
                # print(house.xpath("div[1]/div[1]/a/text()")[0])
                #x代表get_areas_regions_urls()返回的列表中的每一个元组,则x[0]代表地区,x[1]代表地点,x[2]代表url
                Area = x[0]
                Region = x[1]
                info = house.xpath("div[1]/div[2]/div/text()")[0].split("|")
                #由于别墅房源和普通房源的网页结构稍有不同,所以这里我们需要做一个判断
                if info[1].strip()[-2:]=="别墅":
                    Garden = house.xpath("div[1]/div[2]/div/a/text()")[0]
                    Layout = info[2]
                    Acreage = info[3].strip()
                    Direction = info[4].strip()
                    Renovation = info[5].strip()
                    Elevator = info[6].strip()
                    Price = int(house.

推荐阅读