1. 程式人生 > >go語言應用:爬蟲

go語言應用:爬蟲

1.爬蟲思路:

1)明確目標(要知道你準備在哪個範圍或網站去搜索)

2)爬(將所有的網站的內容全部爬下來)

3)取(去掉對我們沒用處的資料)

4)處理資料(按照我們想要的方式儲存和使用)

2.百度貼吧爬蟲

package main

import (
	"fmt"
	"net/http"
	"os"
	"strconv"
)
//爬取網頁內容
func HttpGet(url string) (result string, err error) {
	resp, err1 := http.Get(url)
	if err1 != nil {
		fmt.Println("http.Get err=", err)
		return
	}
	defer resp.Body.Close()
//讀取網頁Body中的內容
	buf := make([]byte, 4*1024)
	for {
		n, err := resp.Body.Read(buf)
		if n == 0 {//讀取結束或者出現問題了
			fmt.Println("resp.Body.Read err=", err)
			break
		}
		result += string(buf[:n])
	}
	return
}
func DoWork(start, end int) {
	fmt.Printf("正在爬取%d到%d的頁面", start, end)
//明確目標(要知道準備在哪個範圍或網站去搜索)
//下一頁+50
	for i := start; i <= end; i++ {
		url := "https://tieba.baidu.com/f?kw=%E7%BB%9D%E5%9C%B0%E6%B1%82%E7%94%9F&ie=utf-8&pn=" + strconv.Itoa((i-1)*50)
//爬(將所有的網站的內容全部爬下來)
		result, err := HttpGet(url)
		if err != nil {
			fmt.Println("HttpGet err=", err)
			continue
		}
//把爬下來的內容寫到檔案中
		fileName := strconv.Itoa(i) + ".html"
		f, err1 := os.Create(fileName)
		if err1 != nil {
			fmt.Println("os.Create err=", err1)
			continue
		}
		f.WriteString(result)
		f.Close()//關閉檔案
	}
}
func main() {
	var start, end int
	fmt.Printf("請輸入起始頁(大於等於1):")
	fmt.Scan(&start)
	fmt.Printf("請輸入終止頁(大於等於起始頁):")
	fmt.Scan(&end)
	DoWork(start, end)
}

3.併發百度貼吧爬蟲

package main

import (
	"fmt"
	"net/http"
	"os"
	"strconv"
)
//爬取網站內容
func HttpGet(url string) (result string, err error) {
	resp, err1 := http.Get(url)
	if err1 != nil {
		err = err1
		return
	}
	defer resp.Body.Close()
//讀取網頁內容
	buf := make([]byte, 4*1024)
	for {
		n, err := resp.Body.Read(buf)
		if n == 0 {
			fmt.Println("resp.Body.Read err=", err)
			break
		}
		result += string(buf[:n])
	}
	return
}
//爬取一個網頁
func SpiderPage(i int, page chan int) {
	url := "https://tieba.baidu.com/f?kw=%E7%BB%9D%E5%9C%B0%E6%B1%82%E7%94%9F&ie=utf-8&pn=" + strconv.Itoa((i-1)*50)
//爬(將所有的網站的內容全部爬下來)
	result, err := HttpGet(url)
	if err != nil {
		fmt.Println("HttpGet err", err)
		return
	}
//把內容寫入到檔案
	fileName := strconv.Itoa(i) + ".html"
	f, err1 := os.Create(fileName)
	if err1 != nil {
		fmt.Println("os.Create err=", err1)
		return
	}
	f.WriteString(result)
	f.Close()//關閉檔案
	page <- i
}
func DoWork(start, end int) {
	fmt.Printf("正在爬取第%d頁到第%d頁\n", start, end)
	page := make(chan int) //不能光宣告不給分配空間
//明確目標(要知道準備在哪個範圍或網站去搜索),下一頁+50
	for i := start; i <= end; i++ {
		go SpiderPage(i, page)
	}
	for i := start; i <= end; i++ {
		fmt.Printf("第%d個頁面爬取完成", <-page)
	}
}
func main() {
	var start, end int
	fmt.Printf("請輸入起始頁(大於等於1):")
	fmt.Scan(&start)
	fmt.Printf("請輸入終止頁(大於等於起始頁):")
	fmt.Scan(&end)
	DoWork(start, end)
}

4.併發,爬取網頁連結

package main

import (
	"fmt"
	"net/http"
	"os"
	"regexp"
	"strconv"
	"strings"
)

func HttpGet(url string) (result string, err error) {
	resp, err1 := http.Get(url)//傳送Get請求
	if err1 != nil {
		err = err1
		return
	}
	defer resp.Body.Close()
    //讀取網頁內容
	buf := make([]byte, 4*1024)
	for {
		n, err := resp.Body.Read(buf)
		if n == 0 {
			fmt.Println("resp.Body.Read err=", err)
			break
		}
		result += string(buf[:n])
	}
	return
}
func SpiderOneJoy(url string) (title, content string, err error) {
	result, err1 := HttpGet(url)
	if err1 != nil {
		err = err1
		return
	}
    //取標題:<h1>標題</h1>
	re := regexp.MustCompile(`<h1>(?s:(.*?))</h1>`)
	if re == nil {
		err = fmt.Errorf("%s", "regexp.MustCompile err")
		return
	}
	tmpTitle := re.FindAllStringSubmatch(result, 1)
	for _, data := range tmpTitle {
		title = data[1]
		title = strings.Replace(title, "\t", "", -1)
		break
	}
    //取內容:
	re = regexp.MustCompile(`<div class="content-txt pt10">(?s:(.*?))<a id="prev" href="`)
	if re == nil {
		err = fmt.Errorf("%s", "regexp.MustCompile err")
		return
	}
	tmpContent := re.FindAllStringSubmatch(result, -1)
	for _, data := range tmpContent {
		content = data[1]
		content = strings.Replace(content, "\r\n", "", -1)
		content = strings.Replace(content, "\n", "", -1)
		content = strings.Replace(content, "\r", "", -1)
		content = strings.Replace(content, " ", "", -1)
		content = strings.Replace(content, "\t", "", -1)
		content = strings.Replace(content, "<br/>", "", -1)
		content = strings.Replace(content, "<br />", "", -1)
		break
	}
	return
}
func StoreJoyToFile(i int, fileTitle, fileContent []string) {
	//新建檔案
    fileName := strconv.Itoa(i) + ".txt"
	f, err := os.Create(fileName)
	if err != nil {
		fmt.Println("os.Create err=", err)
		return
	}
	defer f.Close()
	len := len(fileTitle)
    //往檔案裡寫內容
	for i := 0; i < len; i++ {
		f.WriteString(fileTitle[i] + "\n")
		f.WriteString(fileContent[i] + "\n")
		f.WriteString("---------\n")
	}
}
func SpiderPage(i int, page chan int) {
    //明確爬取的網址
	url := "https://www.pengfu.com/xiaohua_" + strconv.Itoa(i) + ".html"
	fmt.Printf("正在爬取第%d個網頁:%s\n", i, url)
    //開始爬取頁面的內容
	result, err := HttpGet(url)
	if err != nil {
		fmt.Println("HttpGet err=", err)
		return
	}
    //取內容,<h1 class="dp-b"><a href="一個段子url連線 "
	re := regexp.MustCompile(`<h1 class="dp-b"><a href="(?s:(.*?))"`)
	if re == nil {
		fmt.Println("regexp.MustCompile err")
		return
	}
    //取關鍵資訊
	joyUrls := re.FindAllStringSubmatch(result, -1)
	fileTitle := make([]string, 0)
	fileContent := make([]string, 0)
	for _, data := range joyUrls {
		url := data[1]
        //爬子頁面
		title, content, err := SpiderOneJoy(url)
		if err != nil {
			fmt.Println("SpiderOneJoy err=", err)
			continue
		}
		fileTitle = append(fileTitle, title)//追加內容
		fileContent = append(fileContent, content)//追加內容
	}
	StoreJoyToFile(i, fileTitle, fileContent)
	page <- i//爬完之後,將頁編號加入通道
}
func DoWork(start, end int) {
	fmt.Printf("準備爬取第%d頁到%d頁的網址\n", start, end)
	page := make(chan int)
	for i := start; i <= end; i++ {
        //定義一個函式,爬主頁面
		go SpiderPage(i, page)
	}
	for i := start; i <= end; i++ {
		fmt.Printf("第%d頁爬取結束\n", <-page)
	}
}
func main() {
	var start, end int
	fmt.Printf("請輸入起始頁(大於等於1):")
	fmt.Scan(&start)
	fmt.Printf("請輸入終止頁(大於等於起始頁):")
	fmt.Scan(&end)
	DoWork(start, end)//工作函式
}