init
This commit is contained in:
commit
1159685066
0
config_spider/__init__.py
Normal file
0
config_spider/__init__.py
Normal file
41
config_spider/items.py
Normal file
41
config_spider/items.py
Normal file
@ -0,0 +1,41 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Define here the models for your scraped items
|
||||
#
|
||||
# See documentation in:
|
||||
# https://docs.scrapy.org/en/latest/topics/items.html
|
||||
|
||||
import scrapy
|
||||
|
||||
|
||||
class Item(scrapy.Item):
|
||||
_id = scrapy.Field()
|
||||
task_id = scrapy.Field()
|
||||
ts = scrapy.Field()
|
||||
job_list_title = scrapy.Field()
|
||||
job_url = scrapy.Field()
|
||||
com_name1 = scrapy.Field()
|
||||
job_title = scrapy.Field()
|
||||
job_num = scrapy.Field()
|
||||
job_xueli = scrapy.Field()
|
||||
job_yingjie = scrapy.Field()
|
||||
job_des = scrapy.Field()
|
||||
com_jiesao = scrapy.Field()
|
||||
com_name2 = scrapy.Field()
|
||||
job_xinzi = scrapy.Field()
|
||||
com_url = scrapy.Field()
|
||||
linkman = scrapy.Field()
|
||||
linkemail = scrapy.Field()
|
||||
site_link = scrapy.Field()
|
||||
linktelpic = scrapy.Field()
|
||||
com_name = scrapy.Field()
|
||||
com_hangye = scrapy.Field()
|
||||
com_guimo = scrapy.Field()
|
||||
com_manguimo = scrapy.Field()
|
||||
com_fuli = scrapy.Field()
|
||||
com_teshe = scrapy.Field()
|
||||
com_dec = scrapy.Field()
|
||||
com_fanwei = scrapy.Field()
|
||||
com_address = scrapy.Field()
|
||||
com_add = scrapy.Field()
|
||||
|
103
config_spider/middlewares.py
Normal file
103
config_spider/middlewares.py
Normal file
@ -0,0 +1,103 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Define here the models for your spider middleware
|
||||
#
|
||||
# See documentation in:
|
||||
# https://docs.scrapy.org/en/latest/topics/spider-middleware.html
|
||||
|
||||
from scrapy import signals
|
||||
|
||||
|
||||
class ConfigSpiderSpiderMiddleware(object):
|
||||
# Not all methods need to be defined. If a method is not defined,
|
||||
# scrapy acts as if the spider middleware does not modify the
|
||||
# passed objects.
|
||||
|
||||
@classmethod
|
||||
def from_crawler(cls, crawler):
|
||||
# This method is used by Scrapy to create your spiders.
|
||||
s = cls()
|
||||
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
|
||||
return s
|
||||
|
||||
def process_spider_input(self, response, spider):
|
||||
# Called for each response that goes through the spider
|
||||
# middleware and into the spider.
|
||||
|
||||
# Should return None or raise an exception.
|
||||
return None
|
||||
|
||||
def process_spider_output(self, response, result, spider):
|
||||
# Called with the results returned from the Spider, after
|
||||
# it has processed the response.
|
||||
|
||||
# Must return an iterable of Request, dict or Item objects.
|
||||
for i in result:
|
||||
yield i
|
||||
|
||||
def process_spider_exception(self, response, exception, spider):
|
||||
# Called when a spider or process_spider_input() method
|
||||
# (from other spider middleware) raises an exception.
|
||||
|
||||
# Should return either None or an iterable of Request, dict
|
||||
# or Item objects.
|
||||
pass
|
||||
|
||||
def process_start_requests(self, start_requests, spider):
|
||||
# Called with the start requests of the spider, and works
|
||||
# similarly to the process_spider_output() method, except
|
||||
# that it doesn’t have a response associated.
|
||||
|
||||
# Must return only requests (not items).
|
||||
for r in start_requests:
|
||||
yield r
|
||||
|
||||
def spider_opened(self, spider):
|
||||
spider.logger.info('Spider opened: %s' % spider.name)
|
||||
|
||||
|
||||
class ConfigSpiderDownloaderMiddleware(object):
|
||||
# Not all methods need to be defined. If a method is not defined,
|
||||
# scrapy acts as if the downloader middleware does not modify the
|
||||
# passed objects.
|
||||
|
||||
@classmethod
|
||||
def from_crawler(cls, crawler):
|
||||
# This method is used by Scrapy to create your spiders.
|
||||
s = cls()
|
||||
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
|
||||
return s
|
||||
|
||||
def process_request(self, request, spider):
|
||||
# Called for each request that goes through the downloader
|
||||
# middleware.
|
||||
|
||||
# Must either:
|
||||
# - return None: continue processing this request
|
||||
# - or return a Response object
|
||||
# - or return a Request object
|
||||
# - or raise IgnoreRequest: process_exception() methods of
|
||||
# installed downloader middleware will be called
|
||||
return None
|
||||
|
||||
def process_response(self, request, response, spider):
|
||||
# Called with the response returned from the downloader.
|
||||
|
||||
# Must either;
|
||||
# - return a Response object
|
||||
# - return a Request object
|
||||
# - or raise IgnoreRequest
|
||||
return response
|
||||
|
||||
def process_exception(self, request, exception, spider):
|
||||
# Called when a download handler or a process_request()
|
||||
# (from other downloader middleware) raises an exception.
|
||||
|
||||
# Must either:
|
||||
# - return None: continue processing this exception
|
||||
# - return a Response object: stops process_exception() chain
|
||||
# - return a Request object: stops process_exception() chain
|
||||
pass
|
||||
|
||||
def spider_opened(self, spider):
|
||||
spider.logger.info('Spider opened: %s' % spider.name)
|
27
config_spider/pipelines.py
Normal file
27
config_spider/pipelines.py
Normal file
@ -0,0 +1,27 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Define your item pipelines here
|
||||
#
|
||||
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
|
||||
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html
|
||||
|
||||
import os
|
||||
from pymongo import MongoClient
|
||||
|
||||
mongo = MongoClient(
|
||||
host=os.environ.get('CRAWLAB_MONGO_HOST') or 'localhost',
|
||||
port=int(os.environ.get('CRAWLAB_MONGO_PORT') or 27017),
|
||||
username=os.environ.get('CRAWLAB_MONGO_USERNAME'),
|
||||
password=os.environ.get('CRAWLAB_MONGO_PASSWORD'),
|
||||
authSource=os.environ.get('CRAWLAB_MONGO_AUTHSOURCE') or 'admin'
|
||||
)
|
||||
db = mongo[os.environ.get('CRAWLAB_MONGO_DB') or 'test']
|
||||
col = db[os.environ.get('CRAWLAB_COLLECTION') or 'test']
|
||||
task_id = os.environ.get('CRAWLAB_TASK_ID')
|
||||
|
||||
class ConfigSpiderPipeline(object):
|
||||
def process_item(self, item, spider):
|
||||
item['task_id'] = task_id
|
||||
if col is not None:
|
||||
col.save(item)
|
||||
return item
|
111
config_spider/settings.py
Normal file
111
config_spider/settings.py
Normal file
@ -0,0 +1,111 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
import os
|
||||
import re
|
||||
import json
|
||||
|
||||
# Scrapy settings for config_spider project
|
||||
#
|
||||
# For simplicity, this file contains only settings considered important or
|
||||
# commonly used. You can find more settings consulting the documentation:
|
||||
#
|
||||
# https://docs.scrapy.org/en/latest/topics/settings.html
|
||||
# https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
|
||||
# https://docs.scrapy.org/en/latest/topics/spider-middleware.html
|
||||
|
||||
BOT_NAME = 'Crawlab Configurable Spider'
|
||||
|
||||
SPIDER_MODULES = ['config_spider.spiders']
|
||||
NEWSPIDER_MODULE = 'config_spider.spiders'
|
||||
|
||||
|
||||
# Crawl responsibly by identifying yourself (and your website) on the user-agent
|
||||
USER_AGENT = 'Crawlab Spider'
|
||||
|
||||
# Obey robots.txt rules
|
||||
ROBOTSTXT_OBEY = True
|
||||
|
||||
# Configure maximum concurrent requests performed by Scrapy (default: 16)
|
||||
#CONCURRENT_REQUESTS = 32
|
||||
|
||||
# Configure a delay for requests for the same website (default: 0)
|
||||
# See https://docs.scrapy.org/en/latest/topics/settings.html#download-delay
|
||||
# See also autothrottle settings and docs
|
||||
#DOWNLOAD_DELAY = 3
|
||||
# The download delay setting will honor only one of:
|
||||
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
|
||||
#CONCURRENT_REQUESTS_PER_IP = 16
|
||||
|
||||
# Disable cookies (enabled by default)
|
||||
#COOKIES_ENABLED = False
|
||||
|
||||
# Disable Telnet Console (enabled by default)
|
||||
#TELNETCONSOLE_ENABLED = False
|
||||
|
||||
# Override the default request headers:
|
||||
#DEFAULT_REQUEST_HEADERS = {
|
||||
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
|
||||
# 'Accept-Language': 'en',
|
||||
#}
|
||||
|
||||
# Enable or disable spider middlewares
|
||||
# See https://docs.scrapy.org/en/latest/topics/spider-middleware.html
|
||||
#SPIDER_MIDDLEWARES = {
|
||||
# 'config_spider.middlewares.ConfigSpiderSpiderMiddleware': 543,
|
||||
#}
|
||||
|
||||
# Enable or disable downloader middlewares
|
||||
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
|
||||
#DOWNLOADER_MIDDLEWARES = {
|
||||
# 'config_spider.middlewares.ConfigSpiderDownloaderMiddleware': 543,
|
||||
#}
|
||||
|
||||
# Enable or disable extensions
|
||||
# See https://docs.scrapy.org/en/latest/topics/extensions.html
|
||||
#EXTENSIONS = {
|
||||
# 'scrapy.extensions.telnet.TelnetConsole': None,
|
||||
#}
|
||||
|
||||
# Configure item pipelines
|
||||
# See https://docs.scrapy.org/en/latest/topics/item-pipeline.html
|
||||
ITEM_PIPELINES = {
|
||||
'config_spider.pipelines.ConfigSpiderPipeline': 300,
|
||||
}
|
||||
|
||||
# Enable and configure the AutoThrottle extension (disabled by default)
|
||||
# See https://docs.scrapy.org/en/latest/topics/autothrottle.html
|
||||
#AUTOTHROTTLE_ENABLED = True
|
||||
# The initial download delay
|
||||
#AUTOTHROTTLE_START_DELAY = 5
|
||||
# The maximum download delay to be set in case of high latencies
|
||||
#AUTOTHROTTLE_MAX_DELAY = 60
|
||||
# The average number of requests Scrapy should be sending in parallel to
|
||||
# each remote server
|
||||
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
|
||||
# Enable showing throttling stats for every response received:
|
||||
#AUTOTHROTTLE_DEBUG = False
|
||||
|
||||
# Enable and configure HTTP caching (disabled by default)
|
||||
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
|
||||
#HTTPCACHE_ENABLED = True
|
||||
#HTTPCACHE_EXPIRATION_SECS = 0
|
||||
#HTTPCACHE_DIR = 'httpcache'
|
||||
#HTTPCACHE_IGNORE_HTTP_CODES = []
|
||||
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
|
||||
|
||||
for setting_env_name in [x for x in os.environ.keys() if x.startswith('CRAWLAB_SETTING_')]:
|
||||
setting_name = setting_env_name.replace('CRAWLAB_SETTING_', '')
|
||||
setting_value = os.environ.get(setting_env_name)
|
||||
if setting_value.lower() == 'true':
|
||||
setting_value = True
|
||||
elif setting_value.lower() == 'false':
|
||||
setting_value = False
|
||||
elif re.search(r'^\d+$', setting_value) is not None:
|
||||
setting_value = int(setting_value)
|
||||
elif re.search(r'^\{.*\}$', setting_value.strip()) is not None:
|
||||
setting_value = json.loads(setting_value)
|
||||
elif re.search(r'^\[.*\]$', setting_value.strip()) is not None:
|
||||
setting_value = json.loads(setting_value)
|
||||
else:
|
||||
pass
|
||||
locals()[setting_name] = setting_value
|
||||
|
4
config_spider/spiders/__init__.py
Normal file
4
config_spider/spiders/__init__.py
Normal file
@ -0,0 +1,4 @@
|
||||
# This package will contain the spiders of your Scrapy project
|
||||
#
|
||||
# Please refer to the documentation for information on how to create and manage
|
||||
# your spiders.
|
66
config_spider/spiders/spider.py
Normal file
66
config_spider/spiders/spider.py
Normal file
@ -0,0 +1,66 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
import scrapy
|
||||
import re
|
||||
from config_spider.items import Item
|
||||
from urllib.parse import urljoin, urlparse
|
||||
|
||||
def get_real_url(response, url):
|
||||
if re.search(r'^https?', url):
|
||||
return url
|
||||
elif re.search(r'^\/\/', url):
|
||||
u = urlparse(response.url)
|
||||
return u.scheme + url
|
||||
return urljoin(response.url, url)
|
||||
|
||||
class ConfigSpider(scrapy.Spider):
|
||||
name = 'config_spider'
|
||||
|
||||
def start_requests(self):
|
||||
yield scrapy.Request(url='https://linyi.58.com/renshizhuguan/pn3/?PGTID=0d302892-001f-99e7-35c5-0a69a314c4dd&ClickID=3', callback=self.parse_list)
|
||||
|
||||
def parse_list(self, response):
|
||||
prev_item = response.meta.get('item')
|
||||
for elem in response.css('li.job_item'):
|
||||
item = Item()
|
||||
item['job_list_title'] = elem.css('span.name::text').extract_first()
|
||||
item['job_url'] = elem.css('div.job_name > a::attr("href")').extract_first()
|
||||
item['com_name1'] = elem.css('div.comp_name > a::text').extract_first()
|
||||
if prev_item is not None:
|
||||
for key, value in prev_item.items():
|
||||
item[key] = value
|
||||
yield scrapy.Request(url=get_real_url(response, item['job_url']), callback=self.parse_job_info, meta={'item': item})
|
||||
next_url = response.css('a.next::attr("href")').extract_first()
|
||||
yield scrapy.Request(url=get_real_url(response, next_url), callback=self.parse_list, meta={'item': prev_item})
|
||||
|
||||
def parse_job_info(self, response):
|
||||
item = Item() if response.meta.get('item') is None else response.meta.get('item')
|
||||
item['job_title'] = response.css('span.pos_name::text').extract_first()
|
||||
item['job_num'] = response.css('span.pad_left_none::text').extract_first()
|
||||
item['job_xueli'] = response.css('.pos_base_condition > span:nth-last-child(2)::text').extract_first()
|
||||
item['job_yingjie'] = response.css('span.border_right_None::text').extract_first()
|
||||
item['job_des'] = response.css('div.des *::text').extract_first()
|
||||
item['com_jiesao'] = response.css('div.comp_intro *::text').extract_first()
|
||||
item['com_name2'] = response.css('div.baseInfo_link > a::text').extract_first()
|
||||
item['job_xinzi'] = response.css('span.pos_salary::text').extract_first()
|
||||
item['com_url'] = response.css('div.baseInfo_link > a::attr("href")').extract_first()
|
||||
yield scrapy.Request(url=get_real_url(response, item['com_url']), callback=self.parse_com_info, meta={'item': item})
|
||||
|
||||
def parse_com_info(self, response):
|
||||
item = Item() if response.meta.get('item') is None else response.meta.get('item')
|
||||
item['linkman'] = response.css('div.c_detail_item:nth-child(1) > em::text').extract_first()
|
||||
item['linkemail'] = response.css('div.c_detail:nth-child(2) > div.c_detail_item:nth-child(2) > em::text').extract_first()
|
||||
item['site_link'] = response.css('div.c_detail:nth-child(3) > div.c_detail_item:nth-child(2) > em::text').extract_first()
|
||||
item['linktelpic'] = response.css('div.phone-protect > img::attr("src")').extract_first()
|
||||
item['com_name'] = response.css('div.nan_title > h2::text').extract_first()
|
||||
item['com_hangye'] = response.css('div.basic > p:nth-child(3) *::text').extract_first()
|
||||
item['com_guimo'] = response.css('div.basic > p:nth-child(4) *::text').extract_first()
|
||||
item['com_manguimo'] = response.css('div.basic > p:nth-child(5) *::text').extract_first()
|
||||
item['com_fuli'] = response.css('div.welfare > div.w_label > span::text').extract_first()
|
||||
item['com_teshe'] = response.css('div.feature > div.w_label > span::text').extract_first()
|
||||
item['com_dec'] = response.css('div.introduction_box > p *::text').extract_first()
|
||||
item['com_fanwei'] = response.css('div.buiness > div.b_detail > p:nth-child(8) *::text').extract_first()
|
||||
item['com_address'] = response.css('div.buiness > div.b_detail > p:nth-child(9) *::text').extract_first()
|
||||
item['com_add'] = response.css('p.a_address::text').extract_first()
|
||||
yield item
|
||||
|
||||
|
11
scrapy.cfg
Normal file
11
scrapy.cfg
Normal file
@ -0,0 +1,11 @@
|
||||
# Automatically created by: scrapy startproject
|
||||
#
|
||||
# For more information about the [deploy] section see:
|
||||
# https://scrapyd.readthedocs.io/en/latest/deploy.html
|
||||
|
||||
[settings]
|
||||
default = config_spider.settings
|
||||
|
||||
[deploy]
|
||||
#url = http://localhost:6800/
|
||||
project = config_spider
|
Loading…
Reference in New Issue
Block a user