Python爬取網站樹(網路爬蟲)

2020-11-13 13:01:00

一、網頁檔案:
(1)book.html

<h3>計算機</h3>
<ul>
    <li><a href="database.html">資料庫</a></li>
    <li><a href="program.html">程式設計</a></li>
    <li><a href="network.html">計算機網路</a></li>
</ul>

(2)database.html

<h3>資料庫</h3>
<ul>
    <li><a href="mysql.html">MySQL 資料庫</a></li>
</ul>

(3) program.html

<h3>程式設計</h3>
<ul>
    <li><a href="python.html">Python 程式設計</a></li>
    <li><a href="java.html">Java 程式設計</a></li>
</ul>

(4) network.html

<h3>計算機網路</h3>

(5)mysql.html

<h3>MySQL 資料庫</h3>

(6)python.html

<h3>Python 程式設計</h3>

(7) java.html

<h3>Java 程式設計</h3>

二、伺服器端:

import flask
import os

app = flask.Flask(__name__)


def getFile(fileName):
    data = b""
    if os.path.exists(fileName):
        fobj = open(fileName, "rb")
        data = fobj.read()
        fobj.close()
    return data


@app.route("/")
def index():
    return getFile("books.html")


@app.route("/<section>")
def process(section):
    data = ""
    if section != "":
        data = getFile(section)
    return data


if __name__ == "__main__":
    app.run()


三、使用者端:

①深度優先爬取資料

# coding=gbk
from bs4 import BeautifulSoup
import urllib.request
class Stack:
    def __init__(self):
        self.st=[]
    def pop(self):
        return self.st.pop()
    def push(self,obj):
        self.st.append(obj)
    def empty(self):
        return len(self.st)==0
def spider(url):
    stack=Stack()
    stack.push(url)
    while not stack.empty():
        url=stack.pop()
        try:
            data=urllib.request.urlopen(url)
            data=data.read()
            data=data.decode()
            soup=BeautifulSoup(data,"html.parser")
            print(soup.find("h3").text)
            links=soup.select("a")  # 得到一個列表
            for i in range(len(links)-1,-1,-1):
                href=links[i]["href"]
                url=start_url+"/"+href
                stack.push(url)
        except Exception as err:
            print(err)
start_url = "http://127.0.0.1:5000"
spider(start_url)
print("The End")


②廣度優先爬取資料:

# coding=gbk
from bs4 import BeautifulSoup
import urllib.request
class Queue:
    def __init__(self):
        self.st=[]
    def fetch(self):
        return self.st.pop(0)
    def enter(self,obj):
        self.st.append(obj)
    def empty(self):
        return len(self.st)==0
def spider(url):
    queue=Queue()
    queue.enter(url)
    while not queue.empty():
        url=queue.fetch()
        try:
            data=urllib.request.urlopen(url)
            data=data.read()
            data=data.decode()
            soup=BeautifulSoup(data,"html.parser")
            print(soup.find("h3").text)
            links=soup.select("a")  # 得到一個列表
            for link in links:
                href=link["href"]
                url=start_url+"/"+href
                queue.enter(url)
        except Exception as err:
            print(err)
start_url = "http://127.0.0.1:5000"
spider(start_url)
print("The End")

四、補充:

list = [1,2,3,4]
# print(list.pop(0))    # 彈出佇列的第一個元素
print(list.pop())   # 預設彈出佇列的最後一個元素

list = [1, 2, 3, 4, 5]
for i in range(len(list) - 1, -1, -1):  # range(start,end,step)
    print(list[i])