Python 图片爬虫开发的一点经验

思路

关于利用python下载图片,常用的方法是urllib.urlretrieve,不过Scrapy也提供了ImagesPipeline

  1. urllib.urlretrieve
  2. ImagePipeline说明

常见流程

使用Scrapy框架的常见流程。

  1. 创建项目
  2. 定义Item
  3. 自定义爬虫
  4. 实现Pipeline
  5. 进行设置

创建项目

1
scrapy startproject piccrawler

定义Item

1
2
3
4
5
6
7
import scrapy
class PiccrawlerItem(scrapy.Item):
# define the fields for your item here like:
# name = scrapy.Field()
movie_id = scrapy.Field()
movie_picture = scrapy.Field()

自定义爬虫

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
# -*- coding:utf-8 -*-
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
from scrapy.spiders import Spider
from scrapy.http import Request
from scrapy.selector import HtmlXPathSelector
from piccrawler.items import PiccrawlerItem
import re
import urllib
class DoubanSpider(Spider):
name = "douban"
allowed_domains = ["movie.douban.com"]
start_urls = []
def start_requests(self):
file_object = open('movie_name.txt','r')
try:
url_head = "http://movie.douban.com/subject_search?search_text="
for line in file_object:
# print "查看文件内容:", line
self.start_urls.append(url_head + line)
for url in self.start_urls:
yield self.make_requests_from_url(url)
finally:
file_object.close()
# years_object.close()
def parse(self, response):
#open("test.html",'wb').write(response.body)
hxs = HtmlXPathSelector(response)
movie_link = hxs.select('//*[@id="content"]/div/div[1]/div[2]/table[1]/tr/td[1]/a/@href').extract()
# print "获取第一个搜索结果链接:", movie_link[0]
if movie_link:
yield Request(movie_link[0],callback=self.parse_item)
def parse_item(self,response):
hxs = HtmlXPathSelector(response)
movie_picture = hxs.select('//*[@id="mainpic"]/a/img/@src').extract()
item = PiccrawlerItem()
item['movie_picture'] = ''.join(movie_picture).strip()
#用来给爬到的图片命令的,这个文件里只有一行数据,因为我会在我的main.py文件中调用scrapy爬虫,会在main.py中不断更新这个文件
movie_id_file = open('movie_id.txt','r')
try:
for line in movie_id_file:
item['movie_id'] = line.strip()
# print "图片id:", line.strip()
if movie_picture:
urllib.urlretrieve(movie_picture[0].strip(),'pictures/' + line.strip() + '.jpg')
finally:
movie_id_file.close()
yield item

实现Pipeline

此处不用Pipeline

main调用

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
# -*- coding:utf-8 -*-
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
import os
# os.system(r"scrapy crawl douban")
# print os.getcwd()
# 读取电影数据
movies_info = open('movies.sql','r')
try:
for line in movies_info:
# 将每条电影数据里面的需要的数据提取出来
# print line
movie_infos = line.split(',',4)
# print "moive_infos:",movie_infos
movie_id = movie_infos[1]
movie_title = movie_infos[2]
# print movie_id + ":" + movie_title
write_name = movie_title.replace('_','+')
write_name = write_name.replace('\'','')
print "name is :" + write_name
# 把电影名写到中间文件中去,让爬虫读取
movie_name_file = open('movie_name.txt','w')
try:
movie_name_file.write(write_name)
finally:
movie_name_file.close()
# 把电影id写到中间文件中去,让爬虫读取
movie_id_file = open('movie_id.txt','w')
try:
movie_id_file.write(movie_id)
finally:
movie_id_file.close()
# 该爬虫程序会从movie_name中读取电影名来爬虫
os.system(r"scrapy crawl douban")
finally:
movies_info.close()

movie.sql 格式

1
2
3
4
20474,120001,"状告恶魔",2010,0,0
20475,120002,"大空头",2015,0,0
20476,120003,"间谍之桥",2015,0,0
20477,120004,"云中行走",2015,0,0

使用方法

1
ptyhon main.py

参考资料

[1] 用Scrapy爬虫下载图片(豆瓣电影图片)
[2] ImagePipeline说明