####前言
其实很简短就是利用爬虫的第三方库Requests与BeautifulSoup。
其实就几行代码,但希望没有开发基础的人也能一下子看明白,所以大神请绕行。
####基本环境配置

  • python 版本:3.6
  • IDE :pycharm
    ####相关模块
import requests
import urllib.request
import os
import time
from bs4 import BeautifulSoup

####实现效果图
image.png
####完整代码

import requests
import urllib.request
import os
import time
from bs4 import BeautifulSoup
'''
遇到不懂的问题?Python学习交流群:821460695满足你的需求,资料都已经上传群文件,可以自行下载!
'''
header={'User-Agent':'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.62 Safari/537.36'}
url=['http://sh.58.com/zufang/pn{}/?ClickID=2'.format(number) for number in range(6,51)]#分页抓取
adminCout=6
for arurl in url:
    adminCout=adminCout+1
    res=requests.get(arurl,headers=header)
    soup=BeautifulSoup(res.text,'html.parser')
    arryImg=soup.select('.img_list img')
    print(arryImg)
    count = 0;
    for img in arryImg:
        print(img['lazy_src'])
        _url = img['lazy_src']
        pathName = "E:\\2333\\" + str(adminCout)+"_"+str(count) + ".jpg"  # 设置路径和文件名
        result = urllib.request.urlopen(_url)  # 打开链接,和python2.x不同请注意了
        data = result.read()  # 否则开始下载到本地
        with open(pathName, "wb") as code:
            code.write(data)
            code.close()
            count = count + 1  # 计数+1
            print("正在下载第:", count)
        time.sleep(30)

本文转载:CSDN博客