Scrapy連接到各類資料庫(SQLite,Mysql,Mongodb,Redis)

這次我給大家講講如何使用scrapy連接到(SQLite,Mysql,Mongodb,Redis)資料庫,並把爬取的數據存儲到相應的資料庫中。

一、SQLite

1.修改pipelines.py文件加入如下代碼

# 爬取到的數據寫入到SQLite資料庫import sqlite3class SQLitePipeline(object): #打開資料庫 def open_spider(self, spider): db_name = spider.settings.get(SQLITE_DB_NAME, scrapy.db) self.db_conn = sqlite3.connect(db_name) self.db_cur = self.db_conn.cursor() #關閉資料庫 def close_spider(self, spider): self.db_conn.commit() self.db_conn.close() #對數據進行處理 def process_item(self, item, spider): self.insert_db(item) return item #插入數據 def insert_db(self, item): values = ( item[upc], item[name], item[price], item[review_rating], item[review_num], item[stock], ) sql = INSERT INTO books VALUES(?,?,?,?,?,?) self.db_cur.execute(sql, values)

2.修改settings.py文件,加入如下代碼

# sqlite 配置SQLITE_DB_NAME = scrapy.db

在settings啟動管道文件

ITEM_PIPELINES = { toscrape_book.pipelines.SQLitePipeline: 400,}

二、mysql

1.修改pipelines.py文件加入如下代碼

# 爬取到的數據寫入到MySQL資料庫import pymysqlclass MySQLPipeline(object): # 打開資料庫 def open_spider(self, spider): db = spider.settings.get(MYSQL_DB_NAME,scrapy_db) host = spider.settings.get(MYSQL_HOST, localhost) port = spider.settings.get(MYSQL_PORT, 3306) user = spider.settings.get(MYSQL_USER, root) passwd = spider.settings.get(MYSQL_PASSWORD, 123456) self.db_conn =pymysql.connect(host=host, port=port, db=db, user=user, passwd=passwd, charset=utf8) self.db_cur = self.db_conn.cursor() # 關閉資料庫 def close_spider(self, spider): self.db_conn.commit() self.db_conn.close() # 對數據進行處理 def process_item(self, item, spider): self.insert_db(item) return item #插入數據 def insert_db(self, item): values = ( item[upc], item[name], item[price], item[review_rating], item[review_num], item[stock], ) sql = INSERT INTO books VALUES(%s,%s,%s,%s,%s,%s) self.db_cur.execute(sql, values)

2.修改settings.py文件,加入如下代碼

# mysql 配置MYSQL_DB_NAME = scrapy_dbMYSQL_HOST = 127.0.0.1MYSQL_USER = rootMYSQL_PASSWORD = 123456

在settings啟動管道文件

ITEM_PIPELINES = { toscrape_book.pipelines.MySQLPipeline: 401,}

三、mongodb

1.修改pipelines.py文件加入如下代碼

# 爬取到的數據寫入到Mongodb資料庫from pymongo import MongoClientfrom scrapy import Itemclass MongoDBPipeline(object): # 打開資料庫 def open_spider(self, spider): db_uri = spider.settings.get(MONGODB_URI, mongodb://localhost:27017) db_name = spider.settings.get(MONOGDB_DB_NAME, scrapy_db) self.db_client = MongoClient(db_uri) self.db = self.db_client[db_name] # 關閉資料庫 def close_spider(self, spider): self.db_client.close() # 對數據進行處理 def process_item(self, item, spider): self.insert_db(item) return item # 插入數據 def insert_db(self, item): if isinstance(item, Item): item = dict(item) self.db.books.insert(item)

2.修改settings.py文件,加入如下代碼

# mongodb 配置MONGODB_URI = mongodb://127.0.0.1:27017MONGODB_DB_NAME = scrapy_db

在settings啟動管道文件

ITEM_PIPELINES = { toscrape_book.pipelines.MongoDBPipeline: 403,}

四、redis

1.修改pipelines.py文件加入如下代碼

# 爬取到的數據寫入到redis資料庫import redisfrom scrapy import Itemclass RedisPipeline(object): # 打開資料庫 def open_spider(self, spider): db_host = spider.settings.get(REDIS_HOST, localhost) db_port = spider.settings.get(REDIS_PORT, 6379) db_index = spider.settings.get(REDIS_DB_INDEX, 0) self.db_conn = redis.StrictRedis(host=db_host, port=db_port, db=db_index) self.item_i = 0 # 關閉資料庫 def close_spider(self, spider): self.db_conn.connection_pool.disconnect() # 處理數據 def process_item(self, item, spider): self.insert_db(item) return item # 插入數據 def insert_db(self, item): if isinstance(item, Item): item = dict(item) self.item_i += 1 self.db_conn.hmset(book:{}.format(self.item_i), item)

2.修改settings.py文件,加入如下代碼

# redis 配置REDIS_HOST = 127.0.0.1REDIS_PORT = 6379REDIS_DB_INDEX = 0

在settings啟動管道文件

ITEM_PIPELINES = { toscrape_book.pipelines.RedisPipeline: 404,}

scrapy 連接各數據的設置並不複雜,首先在pipelines文件中建立管道,建立個數據的連接,然後處理數據,關閉連接。接下來我們在settings文件中定義各類資料庫的基本配置,然後在item_pipelines中啟動相應的管道


推薦閱讀:

TAG:scrapy | Python | 爬蟲計算機網路 |