'''
Created on 2017年5月11日
根据用户输入的图片网站地址,关键词搜索后,下载搜索结果的全部图片
@author: Nick
'''
from selenium import webdriver
import requests,bs4,os
imageUrlList = {}
pageNum = 1
#
#例如图片网站:http://www.123rf.com.cn/
url = 'http://www.123rf.com.cn/'
#打开图片网站:http://www.123rf.com.cn/
downLoadImage = webdriver.Firefox()
downLoadImage.get(url)
#关键词搜索图片
selectKeyWord = input('Please enter select keyword:')
downLoadImage.find_element_by_name('keyword').clear()
downLoadImage.find_element_by_name('keyword').send_keys('美人')
#通过相对路径定位到搜索按钮
downLoadImage.find_element_by_xpath("//form[@id='search-form']/div/span/button").click()
print(downLoadImage.find_elements_by_xpath("//div['class=gallery-item-thumb-content']"))
#http://www.123rf.com.cn/search.php?keyword=%E7%BE%8E%E4%BA%BA&page=1
#下载当前列表页,并取得图片名称和url的键值对,存放在字典表中
def baoCunImageUrl(keyWord,pageNum):
urlSearch = url + 'search.php?keyword=' + keyWord +'&page=' + str(pageNum)
resImageUrl = requests.get(urlSearch)
try:
resImageUrl.raise_for_status()
except Exception as exc:
print('There was a problem:%s'%(exc))
# print(resImage.text)
bs4ImageDownLoadUrl = bs4.BeautifulSoup(resImageUrl.text)
for i in range(len(bs4ImageDownLoadUrl.select('.uitooltip'))):
imageUrlList[bs4ImageDownLoadUrl.select('.uitooltip')[i].get('picid')] = bs4ImageDownLoadUrl.select('.uitooltip')[i].get('src')
#循环遍历列表所有页面取出全部图片的标题和url
while True:
if downLoadImage.find_elements_by_xpath("//div/a['class=btn btn-search-pagination-next']"):
baoCunImageUrl(selectKeyWord,pageNum)
downLoadImage.find_elements_by_xpath("//div/a['class=btn btn-search-pagination-next']")[-1].click()
pageNum = pageNum + 1
else:
break
#循环遍历字典下载图片并保存在本地
for k,v in imageUrlList.items():
resImage = requests.get(str(v))
openImage = open('C:\\Users\\Nick\\Desktop\\python\\drawing\\2\\image\\' + str(k) +'.jpg','ab' )
for chunk in resImage.iter_content(10000):
openImage.write(chunk)
openImage.close()
Python+selenium实现图片网站搜索后下载搜索结果的全部照片
本文转载:CSDN博客