Python爬蟲練習原始碼

2020-10-20 12:01:02

Python爬蟲

1、xpath

58同城二手房-xpath

import requests
from lxml import etree
if __name__=="__main__":
    url='https://jh.58.com/ershoufang/'
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.89 Safari/537.36'
    }
    response = requests.get(url=url,headers=headers).text
    tree=etree.HTML(response)
    li_list=tree.xpath('//ul[@class="house-list-wrap"]/li')
    for li in li_list:
        title=li.xpath('./div[2]/h2/a/text()')[0]
        print(title)

城市品質-xpath

import requests
from lxml import etree
headers={
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.89 Safari/537.36'
    }
url='https://www.aqistudy.cn/historydata/'
page_text=requests.get(url=url,headers=headers).text
tree=etree.HTML(page_text)
hot_city=tree.xpath('//div[@class="bottom"]/ul/li/a/text()')
all_city=tree.xpath('//div[@class="bottom"]/ul/div[2]/li/a/text()')
print(hot_city)

圖片儲存-xpath

import requests
from lxml import etree
import os
dirname='star1'
if not os.path.exists(dirname):
    os.mkdir(dirname)
url='http://pic.netbian.com/4kmingxing/index_%d.html'#爬取多頁內容
for i in range(1,6):
    if i==1:
        new_url='http://pic.netbian.com/4kmingxing/'
    else:
        new_url=format(url%i)
    #url='http://pic.netbian.com/4kmingxing/'爬取一頁的內容
    headers={
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.89 Safari/537.36'
    }
    response=requests.get(url=new_url,headers=headers)
    response.encoding='gbk'
    page_text=response.text
    tree=etree.HTML(page_text)
    li_list=tree.xpath('//div[@class="slist"]/ul/li')
    for li in li_list:
        title=li.xpath('./a/img/@alt')[0]+'.jpg'
        img_src='http://pic.netbian.com'+li.xpath('./a/img/@src')[0]
        img_data=requests.get(url=img_src,headers=headers).content
        imgpath=dirname+'/'+title
        with open(imgpath,'wb') as fp:
            fp.write(img_data)
        print(title,'儲存成功!')

2、協程

await應用

#範例一
'''import asyncio
async def func():
    print("請稍後...")
    response=await asyncio.sleep(2)
    print("歡迎",response)
asyncio.run(func())'''

#範例二
'''import asyncio
async def others():
    print("start")
    await asyncio.sleep(2)
    print("end")
    return "返回值"
async def func():
    print("執行協程函數內部程式碼")
    response=await others()
    print("IO請求結束,結果為:",response)
asyncio.run(func())'''


#範例三
import asyncio
async def others():
    print("start")
    await asyncio.sleep(2)
    print("end")
    return "返回值"
async def func():
    print("執行協程函數內部程式碼")
    response1=await others()
    print("IO請求結束,結果為:",response1)
    response2 = await others()
    print("IO請求結束,結果為:", response2)
asyncio.run(func())

協程基礎

import asyncio
import time
async def get_request(url):
    print("正在請求:",url)
    time.sleep(2)
    print("請求已完成!")
    return 'jackson'
def back(t):
    #result返回的就是特殊函數的返回值
    print('t.result返回的是:',t.result())
if __name__=="__main__":
    #這是一個協程物件
    c=get_request('www.baidu.com')
    #任務物件就是對協程的進一步封裝
    task=asyncio.ensure_future(c)
    #繫結一個回撥函數
    task.add_done_callback(back)
    #建立事件迴圈物件
    loop=asyncio.get_event_loop()
    #將任務物件註冊到事件迴圈中且開啟事件迴圈
    loop.run_until_complete(task)

task物件

'''import asyncio
async def func():
    print(1)
    await asyncio.sleep(2)
    print(2)
    return "返回值"
async def main():
    print("main開始")
    task1=asyncio.create_task(func())
    task2=asyncio.create_task(func())
    print("main結束")
    re1=await task1
    re2=await task2
    print(re1,re2)
asyncio.run(main())'''



import asyncio
async def func():
    print(1)
    await asyncio.sleep(2)
    print(2)
    return "返回值"
async def main():
    print("mian開始")
    task_list=[
        asyncio.create_task(func()),
        asyncio.create_task(func())
    ]
    print("main結束")
    result=await asyncio.wait(task_list)
    print(result)
asyncio.run(main())

greenlet

from greenlet import greenlet
def func1():
    print(1)
    res2.switch()
    print(2)
    res2.switch()
def func2():
    print(3)
    res1.switch()
    print(4)
res1=greenlet(func1)
res2=greenlet(func2)
res1.switch()

yield

def func1():
    yield 1
    yield from func2()
    yield 2
def func2():
    yield 3
    yield 4
f1=func1()
for item in f1:
    print(item)

多工協程

import asyncio
import time
'''async def get_request(url):
    print("正在請求:",url)
    time.sleep(2)#time是不支援非同步模組的程式碼
    print("請求已完成!")
    return 'jackson'
'''
async def get_request(url):
    print("正在請求:",url)
    await asyncio.sleep(2)#支援非同步模組的程式碼
    print("請求已完成!")
    return 'jackson'
def back(t):
    #result返回的就是特殊函數的返回值
    print('t.result返回的是:',t.result())
urls=[
    'www.baidu1.0.com',
    'www.baidu2.0.com',
    'www.baidu3.0.com'
]
if __name__=="__main__":
    start=time.time()
    tasks=[]
    #建立協程物件
    for url in urls:
        c=get_request(url)
        #建立任務物件
        task=asyncio.ensure_future(c)
        task.add_done_callback(back)
        tasks.append(task)
    #建立事件迴圈物件
    loop=asyncio.get_event_loop()
    #loop.run_until_complete(tasks)
    #必須使用wait對tasks進行封裝才能執行成功
    loop.run_until_complete(asyncio.wait(tasks))
    print("總耗時:", time.time() - start)

多工的非同步爬蟲

import asyncio
import time
import aiohttp
urls=[
        'http://127.0.0.1:8000/jackson',
        'http://127.0.0.1:8000/jing',
        'http://127.0.0.1:8000/jack',
    ]
'''async def get_request(url):
    #requests是一個不支援非同步的模組
    page_text=requests.get(url=url).text
    return page_text
    '''
async def get_request(url):
    #範例化好一個請求物件
   async with aiohttp.ClientSession() as se:
        #呼叫get發起請求,返回一個響應物件
       async with await se.get(url=url) as response:
            #獲取字串形式的響應資料
            page_text=await response.text()
            return page_text
if __name__=="__main__":
    start = time.time()
    tasks = []
    # 建立協程物件
    for url in urls:
        c = get_request(url)
        # 建立任務物件
        task = asyncio.ensure_future(c)
        tasks.append(task)
    # 建立事件迴圈物件
    loop = asyncio.get_event_loop()
    # loop.run_until_complete(tasks)
    # 必須使用wait對tasks進行封裝才能執行成功
    loop.run_until_complete(asyncio.wait(tasks))
    print("總耗時:", time.time() - start)

執行緒池-梨視訊

import requests
from lxml import etree
import re
from multiprocessing.dummy import Pool
headers = {
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.89 Safari/537.36'
}
url='https://www.pearvideo.com/category_5'
response = requests.get(url=url,headers=headers).text
tree=etree.HTML(response)
li_list=tree.xpath('//ul[@id="listvideoListUl"]/li')
urls=[]#儲存所有視訊的連結和名字
for li in li_list:
    detail_url='https://www.pearvideo.com/'+li.xpath('./div/a/@href')[0]
    name=li.xpath('./div/a/div[2]/text()')[0]+'.MP4'
    #對詳情頁的url發起請求
    detail_response=requests.get(url=detail_url,headers=headers).text
    #從詳情頁中解析出視訊的地址(url)
    ex='srcUrl="(.*?)",vdoUrl'
    video_url=re.findall(ex,detail_response)[0]
    dic={
        'name':name,
        'url':video_url
    }
    urls.append(dic)
#對視訊連結髮起請求獲取視訊的二進位制資料,然後將視訊資料進行返回
def get_video_data(dic):
    url=dic['url']
    print(dic['name'],'正在下載...')
    data=requests.get(url=url,headers=headers).content
    #持久化儲存操作
    with open(dic['name'],'wb') as fp:
        fp.write(data)
        print(dic['name'],'下載完成')
#使用執行緒池對視訊資料進行請求(較為耗時的阻塞操作)
pool=Pool(4)
pool.map(get_video_data,urls)
pool.close()
pool.join()

3、selenium

selenium自動化操作-移動html

from selenium import webdriver
import time
#匯入動作鏈對應的類
from selenium.webdriver import ActionChains
bro=webdriver.Chrome(executable_path='E:/firefoxdownloads/chromedriver.exe')
bro.get('https://www.runoob.com/try/try.php?filename=jqueryui-api-droppable')
#如果定位的標籤是存在於iframe標籤中的則必須進行標籤定位
bro.switch_to.frame('iframeResult')#切換瀏覽器定位的作用域
div=bro.find_element_by_id('draggable')
#動作鏈
action=ActionChains(bro)
#點選長按指定的標籤
action.click_and_hold(div)
for i in range(5):
    #perform立即執行動作鏈操作
    #move_by_offset(x,y)
    action.move_by_offset(20,0).perform()
    time.sleep(0.3)
#釋放動作鏈
action.release()
bro.quit()

selenium自動化-淘寶搜尋

from selenium import webdriver
import time
#基於瀏覽器的驅動程式範例化一個瀏覽器物件
bro=webdriver.Chrome(executable_path='E:/firefoxdownloads/chromedriver.exe')
#對目的網站發起請求
bro.get('https://www.jd.com')
#標籤定位
search_text=bro.find_element_by_xpath('//*[@id="key"]')
#標籤互動
search_text.send_keys('iphone11')
#點選搜尋按鈕
bth=bro.find_element_by_xpath('//*[@id="search"]/div/div[2]/button')
bth.click()
time.sleep(2)
#在搜尋結果頁面進行滾輪向下滑動的操作(執行js操作:js注入)
bro.execute_script('window.scrollTo(0,document.body.scrollHeight)')
time.sleep(2)
bro.get('https://www.baidu.com')
time.sleep(2)
#回退
bro.back()
time.sleep(2)
#前進
bro.forward()
bro.quit()

selenium實現QQ空間登入

from selenium import webdriver
import time
from selenium.webdriver import ActionChains
bro=webdriver.Chrome(executable_path='E:/firefoxdownloads/chromedriver.exe')
bro.get('https://qzone.qq.com/')
bro.switch_to.frame('login_frame')
a_tag=bro.find_element_by_id('switcher_plogin')
a_tag.click()
username=bro.find_element_by_id('u')
password=bro.find_element_by_id('p')
time.sleep(2)
username.send_keys('')
time.sleep(2)
password.send_keys('')
time.sleep(2)
btn=bro.find_element_by_id('login_button')
btn.click()
time.sleep(2)
bro.quit()

4、scrapy

1、進入到檔案

例如:cd scrapy操作

2、建立專案

例如:scrapy startproject qiubaipro

3、進入qiubaipro

例如:cd qiubaipro

4、建立爬蟲原始檔

例如:scrapy genspider qiubai www.baidu

.com(qiubai是檔名)

5、執行工程

例如:scrapy crawl qiubai

6、段子爬取命令(基於終端的持久化儲存)

scrapy crawl qiubai -o qiubai.csv

[外連圖片轉存失敗,源站可能有防盜鏈機制,建議將圖片儲存下來直接上傳(img-TvOX2w28-1603003392292)(C:\Users\Administrator\AppData\Roaming\Typora\typora-user-images\image-20200824094432380.png)]

import scrapy


class QiubaiSpider(scrapy.Spider):
    name = 'qiubai'
    #allowed_domains = ['www.xxx.com']
    start_urls = ['https://www.qiushibaike.com/text/']

    def parse(self, response):
        #解析:作者的名稱+段子內容
        div_list=response.xpath('//div[@class="col1 old-style-col1"]/div')
        all_data=[]#儲存所有解析到的資料
        for div in div_list:
            #xpath返回的是列表,但列表元素一定是selector型別的物件
            #extract可以將selector物件中data引數儲存的字串提取出來
            author=div.xpath('./div[1]/a[2]/h2/text()')[0].extract()
            #列表呼叫了extract之後,則表示將列表中每一個selector物件中data對應的字串提取出來
            content=div.xpath('./a[1]/div/span//text()').extract()
            content=''.join(content)#轉為字串型別
            dic={
                'author':author,
                'content':content
            }
            all_data.append(dic)
        return all_data

7、setting設定

USER_AGENT = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.89 Safari/537.36'
ROBOTSTXT_OBEY = False
LOG_LEVEL='ERROR'

8、基於管道的持久化儲存

一、qiubai.py

import scrapy
from qiubaipro.items import QiubaiproItem

class QiubaiSpider(scrapy.Spider):
    name = 'qiubai'
    #allowed_domains = ['www.xxx.com']
    start_urls = ['https://www.qiushibaike.com/text/']
    def parse(self, response):
        #解析:作者的名稱+段子內容
        div_list=response.xpath('//div[@class="col1 old-style-col1"]/div')
        all_data=[]#儲存所有解析到的資料
        for div in div_list:
            #xpath返回的是列表,但列表元素一定是selector型別的物件
            #extract可以將selector物件中data引數儲存的字串提取出來
            author=div.xpath('./div[1]/a[2]/h2/text()')[0].extract()
            #列表呼叫了extract之後,則表示將列表中每一個selector物件中data對應的字串提取出來
            content=div.xpath('./a[1]/div/span//text()').extract()
            content=''.join(content)#轉為字串型別
            item=QiubaiproItem()
            item['author']=author
            item['content']=content
            yield item#將item提交給管道

二、item.py

# Define here the models for your scraped items
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/items.html

import scrapy


class QiubaiproItem(scrapy.Item):
    # define the fields for your item here like:
    # name = scrapy.Field()
    author=scrapy.Field()
    content=scrapy.Field()
    #pass

三、setting.py

# Scrapy settings for qiubaipro project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
#     https://docs.scrapy.org/en/latest/topics/settings.html
#     https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
#     https://docs.scrapy.org/en/latest/topics/spider-middleware.html

BOT_NAME = 'qiubaipro'

SPIDER_MODULES = ['qiubaipro.spiders']
NEWSPIDER_MODULE = 'qiubaipro.spiders'
pipeline.html
ITEM_PIPELINES = {
   'qiubaipro.pipelines.QiubaiproPipeline': 300,
}

四、pipelines.py

# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html


# useful for handling different item types with a single interface
from itemadapter import ItemAdapter


class QiubaiproPipeline:
    fp=None
    #重寫父類別的一個方法:該方法只在開始爬蟲的時候呼叫一次
    def open_spider(self,spider):
        print("開始爬蟲...")
        self.fp=open('./qiubai.txt','w',encoding='utf-8')
    #專門用來處理item型別物件
    #該方法可以接收爬蟲檔案提交過來的item物件
    #該方法每接收到一個item就會被呼叫一次
    def process_item(self, item, spider):
        author=item['author']
        content=item['content']
        self.fp.write(author+':'+content+'\n')
        return item
    def close_spider(self,spider):
        print("結束爬蟲!")
        self.fp.close()

9、站長素材圖片爬取

一、img.py

import scrapy
from imgpro.items import ImgproItem

class ImgSpider(scrapy.Spider):
    name = 'img'
    #allowed_domains = ['www.xxx.com']
    start_urls = ['http://sc.chinaz.com/tupian/']

    def parse(self, response):
        div_list=response.xpath('//div[@id="container"]/div')
        for div in div_list:
            #使用偽屬性,只有滑動才能顯示src,本身為src2
            src=div.xpath('./div/a/img/@src2').extract_first()
            item=ImgproItem()
            item['src']=src
       		yield item

二、item.py

# Define here the models for your scraped items
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/items.html

import scrapy


class ImgproItem(scrapy.Item):
    # define the fields for your item here like:
    src = scrapy.Field()
    pass

三、setting.py

USER_AGENT = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.89 Safari/537.36'
ROBOTSTXT_OBEY = False
LOG_LEVEL='ERROR'
#指定圖片儲存目錄
IMAGES_STORE='./imgs'
https://docs.scrapy.org/en/latest/topics/extensions.html
#需要改變pipelines.py檔案指定的imgpipeline
ITEM_PIPELINES = {
   'imgpro.pipelines.imgpipeline': 300,
}

四、pipelines.py

# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html


# useful for handling different item types with a single interface
from itemadapter import ItemAdapter


# class ImgproPipeline:
#     def process_item(self, item, spider):
#         return item
from scrapy.pipelines.images import ImagesPipeline
import scrapy
class imgpipeline(ImagesPipeline):
    #可以根據圖片地址進行圖片資料的請求
    def get_media_requests(self, item, info):
        yield scrapy.Request(item['src'])
    #指定圖片儲存的路徑
    def file_path(self, request, response=None, info=None):
        imgname=request.url.split('/')[-1]
        return imgname
    def item_completed(self, results, item, info):
        return item #返回下一個即將執行的管道類

10、網易新聞-響應物件的篡改

一、wangyi.py

import scrapy
from selenium import webdriver
from wangyipro.items import WangyiproItem
class WangyiSpider(scrapy.Spider):
    name = 'wangyi'
    #allowed_domains = ['www.xxx.com']
    start_urls = ['https://news.163.com/']
    models_url=[]#儲存五個板塊對應的url
    #解析五大板塊對應的詳情頁url
    def __init__(self):
        self.bro=webdriver.Chrome(executable_path='E:/firefoxdownloads/chromedriver.exe')
    def parse(self, response):
        li_list=response.xpath('//*[@id="index2016_wrap"]/div[1]/div[2]/div[2]/div[2]/div[2]/div/ul/li')
        alist=[3,4,6,7,8]
        for index in alist:
            model_url=li_list[index].xpath('./a/@href').extract_first()
            self.models_url.append(model_url)
        #依次對每一個板塊對應的頁面進行請求
        for url in self.models_url:#對每一個板塊的url進行請求傳送
            yield scrapy.Request(url,callback=self.parse_model)
        #每一個板塊對應的新聞標題相關的內容都是動態載入的
    def parse_model(self,response):
        #解析每一個板塊對應新聞的標題和新聞詳情頁url
        div_list=response.xpath('/html/body/div/div[3]/div[4]/div[1]/div/div/ul/li/div/div')
        for div in div_list:
            title=div.xpath('./div/div[1]/h3/a/text()').extract_first()
            new_detail_url=div.xpath('./div/div[1]/h3/a/@href').extract_first()
            item=WangyiproItem()
            item['title']=title
            #對新聞詳情頁的url發起請求
            yield scrapy.Request(url=new_detail_url,callback=self.parse_detail,meta={'item':item})
    def parse_detail(self,response):#解析新聞內容
        content=response.xpath('//*[@id="endText"]//text()').extract()
        content=''.join(content)
        item=response.meta['item']
        item['content']=content
        yield item
    def closed(self,spider):
        self.bro.quit()

二、pipelines.py

# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html


# useful for handling different item types with a single interface
from itemadapter import ItemAdapter


class WangyiproPipeline:
    def process_item(self, item, spider):
        print(item)
        return item

三、items.py

# Define here the models for your scraped items
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/items.html

import scrapy


class WangyiproItem(scrapy.Item):
    # define the fields for your item here like:
    title = scrapy.Field()
    content = scrapy.Field()

四、middlewares.py

# Define here the models for your spider middleware
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/spider-middleware.html

from scrapy import signals

# useful for handling different item types with a single interface
from itemadapter import is_item, ItemAdapter

from scrapy.http import HtmlResponse
from time import sleep
class WangyiproDownloaderMiddleware:
    # Not all methods need to be defined. If a method is not defined,
    # scrapy acts as if the downloader middleware does not modify the
    # passed objects.
    def process_request(self, request, spider):
        # Called for each request that goes through the downloader
        # middleware.

        # Must either:
        # - return None: continue processing this request
        # - or return a Response object
        # - or return a Request object
        # - or raise IgnoreRequest: process_exception() methods of
        #   installed downloader middleware will be called
        return None
    #該方法攔截五大板塊對應的響應物件進行篡改
    def process_response(self, request, response, spider):#spider爬蟲物件
        bro=spider.bro  #獲取在爬蟲中定義的瀏覽器物件
        #挑選出指定的響應物件進行篡改,通過url制定request,再通過request制定response
        if request.url in spider.models_url:
            bro.get(request.url)#五個板塊對應的url進行請求
            sleep(2)
            page_text=bro.page_source  #包含了動態載入的新聞資料
            #response #五大板塊對應的響應物件
            #針對定位到的這些response進行篡改
            #範例化一個新的響應物件(符合需求:包含動態載入出的新聞資料),代替原來舊的響應物件
            #如何獲取動態載入出的新聞資料?
            new_response=HtmlResponse(url=request.url,body=page_text,encoding='utf-8',request=request)
            return new_response
        else:
            #response #其他請求對應的響應物件
            return response

    def process_exception(self, request, exception, spider):
        # Called when a download handler or a process_request()
        # (from other downloader middleware) raises an exception.

        # Must either:
        # - return None: continue processing this exception
        # - return a Response object: stops process_exception() chain
        # - return a Request object: stops process_exception() chain
        pass

五、setting.py

BOT_NAME = 'wangyipro'
SPIDER_MODULES = ['wangyipro.spiders']
NEWSPIDER_MODULE = 'wangyipro.spiders'
USER_AGENT = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.89 Safari/537.36'
ROBOTSTXT_OBEY = False
LOG_LEVEL='ERROR'
DOWNLOADER_MIDDLEWARES = {
   'wangyipro.middlewares.WangyiproDownloaderMiddleware': 543,
}
ITEM_PIPELINES = {
   'wangyipro.pipelines.WangyiproPipeline': 300,
}