Skip to content

Commit cbd8bdd

Browse files
author
ravishhankar
committed
Added spider for Naukri.com #1 Fixed
1 parent 9eaf431 commit cbd8bdd

File tree

8 files changed

+329
-0
lines changed

8 files changed

+329
-0
lines changed

joble/__init__.py

Whitespace-only changes.

joble/items.py

+14
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,14 @@
1+
# -*- coding: utf-8 -*-
2+
3+
# Define here the models for your scraped items
4+
#
5+
# See documentation in:
6+
# https://docs.scrapy.org/en/latest/topics/items.html
7+
8+
import scrapy
9+
10+
11+
class JobleItem(scrapy.Item):
12+
# define the fields for your item here like:
13+
# name = scrapy.Field()
14+
pass

joble/middlewares.py

+103
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,103 @@
1+
# -*- coding: utf-8 -*-
2+
3+
# Define here the models for your spider middleware
4+
#
5+
# See documentation in:
6+
# https://docs.scrapy.org/en/latest/topics/spider-middleware.html
7+
8+
from scrapy import signals
9+
10+
11+
class JobleSpiderMiddleware:
12+
# Not all methods need to be defined. If a method is not defined,
13+
# scrapy acts as if the spider middleware does not modify the
14+
# passed objects.
15+
16+
@classmethod
17+
def from_crawler(cls, crawler):
18+
# This method is used by Scrapy to create your spiders.
19+
s = cls()
20+
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
21+
return s
22+
23+
def process_spider_input(self, response, spider):
24+
# Called for each response that goes through the spider
25+
# middleware and into the spider.
26+
27+
# Should return None or raise an exception.
28+
return None
29+
30+
def process_spider_output(self, response, result, spider):
31+
# Called with the results returned from the Spider, after
32+
# it has processed the response.
33+
34+
# Must return an iterable of Request, dict or Item objects.
35+
for i in result:
36+
yield i
37+
38+
def process_spider_exception(self, response, exception, spider):
39+
# Called when a spider or process_spider_input() method
40+
# (from other spider middleware) raises an exception.
41+
42+
# Should return either None or an iterable of Request, dict
43+
# or Item objects.
44+
pass
45+
46+
def process_start_requests(self, start_requests, spider):
47+
# Called with the start requests of the spider, and works
48+
# similarly to the process_spider_output() method, except
49+
# that it doesn’t have a response associated.
50+
51+
# Must return only requests (not items).
52+
for r in start_requests:
53+
yield r
54+
55+
def spider_opened(self, spider):
56+
spider.logger.info('Spider opened: %s' % spider.name)
57+
58+
59+
class JobleDownloaderMiddleware:
60+
# Not all methods need to be defined. If a method is not defined,
61+
# scrapy acts as if the downloader middleware does not modify the
62+
# passed objects.
63+
64+
@classmethod
65+
def from_crawler(cls, crawler):
66+
# This method is used by Scrapy to create your spiders.
67+
s = cls()
68+
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
69+
return s
70+
71+
def process_request(self, request, spider):
72+
# Called for each request that goes through the downloader
73+
# middleware.
74+
75+
# Must either:
76+
# - return None: continue processing this request
77+
# - or return a Response object
78+
# - or return a Request object
79+
# - or raise IgnoreRequest: process_exception() methods of
80+
# installed downloader middleware will be called
81+
return None
82+
83+
def process_response(self, request, response, spider):
84+
# Called with the response returned from the downloader.
85+
86+
# Must either;
87+
# - return a Response object
88+
# - return a Request object
89+
# - or raise IgnoreRequest
90+
return response
91+
92+
def process_exception(self, request, exception, spider):
93+
# Called when a download handler or a process_request()
94+
# (from other downloader middleware) raises an exception.
95+
96+
# Must either:
97+
# - return None: continue processing this exception
98+
# - return a Response object: stops process_exception() chain
99+
# - return a Request object: stops process_exception() chain
100+
pass
101+
102+
def spider_opened(self, spider):
103+
spider.logger.info('Spider opened: %s' % spider.name)

joble/pipelines.py

+11
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,11 @@
1+
# -*- coding: utf-8 -*-
2+
3+
# Define your item pipelines here
4+
#
5+
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
6+
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html
7+
8+
9+
class JoblePipeline:
10+
def process_item(self, item, spider):
11+
return item

joble/settings.py

+90
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,90 @@
1+
# -*- coding: utf-8 -*-
2+
3+
# Scrapy settings for joble project
4+
#
5+
# For simplicity, this file contains only settings considered important or
6+
# commonly used. You can find more settings consulting the documentation:
7+
#
8+
# https://docs.scrapy.org/en/latest/topics/settings.html
9+
# https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
10+
# https://docs.scrapy.org/en/latest/topics/spider-middleware.html
11+
12+
BOT_NAME = 'joble'
13+
14+
SPIDER_MODULES = ['joble.spiders']
15+
NEWSPIDER_MODULE = 'joble.spiders'
16+
17+
18+
# Crawl responsibly by identifying yourself (and your website) on the user-agent
19+
#USER_AGENT = 'joble (+http://www.yourdomain.com)'
20+
21+
# Obey robots.txt rules
22+
ROBOTSTXT_OBEY = True
23+
24+
# Configure maximum concurrent requests performed by Scrapy (default: 16)
25+
#CONCURRENT_REQUESTS = 32
26+
27+
# Configure a delay for requests for the same website (default: 0)
28+
# See https://docs.scrapy.org/en/latest/topics/settings.html#download-delay
29+
# See also autothrottle settings and docs
30+
#DOWNLOAD_DELAY = 3
31+
# The download delay setting will honor only one of:
32+
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
33+
#CONCURRENT_REQUESTS_PER_IP = 16
34+
35+
# Disable cookies (enabled by default)
36+
#COOKIES_ENABLED = False
37+
38+
# Disable Telnet Console (enabled by default)
39+
#TELNETCONSOLE_ENABLED = False
40+
41+
# Override the default request headers:
42+
#DEFAULT_REQUEST_HEADERS = {
43+
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
44+
# 'Accept-Language': 'en',
45+
#}
46+
47+
# Enable or disable spider middlewares
48+
# See https://docs.scrapy.org/en/latest/topics/spider-middleware.html
49+
#SPIDER_MIDDLEWARES = {
50+
# 'joble.middlewares.JobleSpiderMiddleware': 543,
51+
#}
52+
53+
# Enable or disable downloader middlewares
54+
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
55+
#DOWNLOADER_MIDDLEWARES = {
56+
# 'joble.middlewares.JobleDownloaderMiddleware': 543,
57+
#}
58+
59+
# Enable or disable extensions
60+
# See https://docs.scrapy.org/en/latest/topics/extensions.html
61+
#EXTENSIONS = {
62+
# 'scrapy.extensions.telnet.TelnetConsole': None,
63+
#}
64+
65+
# Configure item pipelines
66+
# See https://docs.scrapy.org/en/latest/topics/item-pipeline.html
67+
#ITEM_PIPELINES = {
68+
# 'joble.pipelines.JoblePipeline': 300,
69+
#}
70+
71+
# Enable and configure the AutoThrottle extension (disabled by default)
72+
# See https://docs.scrapy.org/en/latest/topics/autothrottle.html
73+
#AUTOTHROTTLE_ENABLED = True
74+
# The initial download delay
75+
#AUTOTHROTTLE_START_DELAY = 5
76+
# The maximum download delay to be set in case of high latencies
77+
#AUTOTHROTTLE_MAX_DELAY = 60
78+
# The average number of requests Scrapy should be sending in parallel to
79+
# each remote server
80+
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
81+
# Enable showing throttling stats for every response received:
82+
#AUTOTHROTTLE_DEBUG = False
83+
84+
# Enable and configure HTTP caching (disabled by default)
85+
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
86+
#HTTPCACHE_ENABLED = True
87+
#HTTPCACHE_EXPIRATION_SECS = 0
88+
#HTTPCACHE_DIR = 'httpcache'
89+
#HTTPCACHE_IGNORE_HTTP_CODES = []
90+
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'

joble/spiders/__init__.py

+4
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,4 @@
1+
# This package will contain the spiders of your Scrapy project
2+
#
3+
# Please refer to the documentation for information on how to create and manage
4+
# your spiders.

joble/spiders/naukri.py

+96
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,96 @@
1+
# -*- coding: utf-8 -*-
2+
import scrapy
3+
import urllib
4+
5+
6+
class NaukriSpider(scrapy.Spider):
7+
name = 'naukri'
8+
allowed_domains = ['naukri.com']
9+
start_urls = ['https://www.naukri.com']
10+
11+
def __init__(self, keyword=None, count=20, city=None):
12+
self.count = count
13+
self.keyword = keyword
14+
self.city = city
15+
16+
def get_url(self):
17+
base_url = 'https://www.naukri.com/jobapi/v3/search?'
18+
params = {
19+
'noOfResults': self.count,
20+
'urlType': 'search_by_key_loc',
21+
'searchType': 'adv',
22+
'keyword': self.keyword,
23+
'location': self.city,
24+
'sort': 'r',
25+
'k': self.keyword,
26+
'l': self.city,
27+
'seoKey': '{}-jobs-in-{}'.format(self.keyword, self.city) if self.city else '{}-jobs'.format(self.keyword),
28+
'src': 'jobsearchDesk',
29+
'latLong': ''
30+
}
31+
default = ['keyword', 'sort', 'l', 'k', 'location']
32+
if self.city is None:
33+
for key in default:
34+
params.pop(key)
35+
return {
36+
'url': base_url,
37+
'body': params
38+
}
39+
40+
def parse(self, response):
41+
if self.keyword:
42+
record = self.get_url()
43+
yield scrapy.Request(url=record['url']+urllib.parse.urlencode(record['body']),
44+
headers={'User-Agent': "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:74.0) Gecko/20100101 Firefox/74.0", 'appid': '109', 'systemid': '109'},
45+
meta={'keyword': self.keyword},
46+
callback=self.jobData)
47+
else:
48+
yield scrapy.Request('https://www.naukri.com/jobs-by-category',
49+
callback=self.get_by_category)
50+
51+
def get_by_category(self, response):
52+
for j in response.xpath('//div[@class="lmrWrap wrap"]/div/div/div/a'):
53+
title = j.xpath('text()').get().strip()
54+
url = j.xpath('@href').get().strip()
55+
yield scrapy.Request(url,
56+
callback=self.job_list,
57+
meta={'keyword': title, 'count': 0, 'plink': url})
58+
59+
def job_list(self, response):
60+
plink = response.meta['plink'].split('/')[-1]
61+
keyword = plink.split('-jobs')[0]
62+
seokeys = keyword+'-jobs'
63+
ids = plink.split('=')[-1]
64+
joburl = 'https://www.naukri.com/jobapi/v3/search?noOfResults=20&urlType=search_by_keyword&searchType=adv&keyword={}&xt=catsrch&functionAreaId={}&seoKey={}&src=jobsearchDesk&latLong='.format(keyword, ids, seokeys)
65+
yield scrapy.Request(joburl,
66+
headers={'Referer': response.meta['plink'],
67+
'User-Agent': "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:74.0) Gecko/20100101 Firefox/74.0",
68+
'appid': '109',
69+
'systemid': '109'},
70+
meta={'url': response.meta['plink'], 'keyword': keyword, 'ids': ids, 'seokeys': seokeys},
71+
callback=self.jobData)
72+
73+
def jobData(self, response):
74+
data = response.text
75+
data = data.replace('false', 'False')
76+
data = data.replace('true', 'True')
77+
jobdata = eval(data)
78+
if jobdata.get('jobDetails'):
79+
for job in jobdata['jobDetails']:
80+
place = job['placeholders']
81+
detail = {}
82+
for p in place:
83+
key, value = p.values()
84+
detail[key] = value
85+
details = {
86+
'category': response.meta['keyword'],
87+
'title': job['title'],
88+
'jobId': job['jobId'],
89+
'companyName': job['companyName'],
90+
'skills': job.get('tagsAndSkills'),
91+
'joburl': job['jdURL'],
92+
'postedon': job['footerPlaceholderLabel'],
93+
'descreption': job.get('jobDescription')
94+
}
95+
final_result = {**detail, **details}
96+
yield final_result

scrapy.cfg

+11
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,11 @@
1+
# Automatically created by: scrapy startproject
2+
#
3+
# For more information about the [deploy] section see:
4+
# https://scrapyd.readthedocs.io/en/latest/deploy.html
5+
6+
[settings]
7+
default = joble.settings
8+
9+
[deploy]
10+
#url = http://localhost:6800/
11+
project = joble

0 commit comments

Comments
 (0)