基本配置:

  • Python版本:3.6
  • 相关模块:
from bs4 import BeautifulSoup
import requests, sys
'''
遇到不懂的问题?Python学习交流群:1136201545满足你的需求,资料都已经上传群文件,可以自行下载!
'''

下载《17K》网小说

class downloader(object):
    def __init__(self):
        """
        :rtype: object
        """
        self.server = 'http://www.17k.com'  # 网站地址
        self.target = 'http://www.17k.com/list/2731559.html'  # 目录URL地址
        self.names = []            #存放章节名
        self.urls = []            #存放章节链接
        self.nums = 0            #章节数

获取下载链接

    def get_downloader_url(self):
        req = requests.get(self.target)  # 获取URL地址内容
        req.encoding = 'utf-8'  # 转码
        html = req.text  # 保存get到的内容
        div_bf = BeautifulSoup(html, 'html.parser')  # 使用BeautifulSoup进行解析
        div = div_bf.find_all('div', class_='Main List')  # 查询符合条件的内容
 
        a_bf = BeautifulSoup(str(div[0]),'html.parser')#重新解析
        a = a_bf.find_all('a')#查找元素为'a'的内容
 
        self.nums = len(a[11:])
        for each in a[11:]:
            self.names.append(each.string)#把章节名称添加到变量
            self.urls.append(self.server + each.get('href'))#把章节URL地址添加到变量

获取章节内容

    def get_contents(self,target):
        req = requests.get(target)#获取URL地址内容
        req.encoding = 'utf-8'#解码
        html = req.text
        bf = BeautifulSoup(html, 'html.parser')#解析
        texts = bf.find_all('div', class_='readAreaBox content')#使用查找章节正文
        texts = texts[0].text.replace('\xa0' * 8, '\n\n')#使用replace方法,剔除空格,替换为回车进行分段
        return texts

将爬取的文章内容写入文件

    def writer(self, name, path, text):
        write_flag = True
        with open(path, 'a', encoding='utf-8') as f:
            f.write(name + '\n')
            f.writelines(text)
            f.write('\n\n')
 
if __name__ == "__main__":
    dl = downloader()
    dl.get_downloader_url()
    print('《正道潜龙》开始下载:')
    for i in range(dl.nums):
        dl.writer(str(dl.names[i]), '正道潜龙.txt', dl.get_contents(dl.urls[i]))
        sys.stdout.write("  已下载:%.3f%%" %  float(i/dl.nums) + '\r')
        sys.stdout.flush()
    print('《正道潜龙》下载完成')

本文转载:CSDN博客