Scrapy does not scan all start_url's

I have a list of ~ 2211 start urls and scrapy is crawling some but not all of them. When I set start_url as a single url, it crawls the url, if i have a url in a large list, scrapy does not crawl.

Is there a limit set on start_urls

?

My code:

from pymongo import MongoClient
import re
from scrapy.selector import Selector
#from scrapy.contrib.spiders import CrawlSpider, Rule
from scrapy.contrib.linkextractors.sgml import SgmlLinkExtractor

from mongo.items import MongoItem
import scrapy
import json
from scrapy.http import Request
from bs4 import BeautifulSoup as BS


uri = "mongodb://asdf@asdf.ac.commerce.com:23423423/"
client = MongoClient(uri)
db = client['page_content']
collection3 = db['category_page_content']
copyblocks3 = collection3.distinct('cwc')
copyblockss = str(copyblocks3)

hrefs = re.findall(r'href=[\'"]?([^\'" >]+)', copyblockss)

class MongoSpider(scrapy.Spider):
    name = "collections3"
    allowed_domains = ["www.ecommerce.com"]
    handle_httpstatus_list = [502, 503, 504, 400, 408, 404]
    start_urls = hrefs

    def parse(self, response):
        hxs = Selector(response)
        sites = response.selector.xpath('//html')
        items = []

        if response.status == 404:
            for site in sites:
                item = MongoItem()
                item['url'] = response.url
                item['status'] = response.status
                item['original_url'] = response.meta.get('redirect_urls', [response.url])[0]
                items.append(item)

                htmlvar = item['original_url']
                change_list = list(collection3.find({"cwc":{"$regex":htmlvar}}))

                alldata = dict()
                cwcblockdic = ""
                for a in change_list:
                    alldata.update(a)
                ids = alldata['_id']
                cwcblock = alldata['cwc']
                cwcblockdic = cwcblockdic + cwcblock

                soup = BS(cwcblockdic)
                wholehref = soup.find(href=htmlvar)
                try:
                    anchortext = soup.findAll(href=htmlvar)[0].text
                except:
                    anchortext = wholehref.get_text()
                soup.find(href=htmlvar).replaceWith(anchortext)
                soup = str(soup)
                newlist = soup.replace('<html><body>', '').replace('</body></html>','')

                print "this is the anchor:", anchortext
                print "this is the href:", wholehref
                print "this is newlist:", newlist
                print "this is the id:", ids
                print "this is pagetype: CP"

                for item in change_list:
                    item['cwc'] = newlist
                    collection3.update({'_id':ids}, {"$set":{"cwc":item['cwc']}}, upsert=False)
            return items

        elif hxs.xpath('/html/head/title/text()[contains(.,"invalid")]'):
            for site in sites:
                item = MongoItem()
                item['url'] = response.url
                item['status'] = response.status
                item['original_url'] = response.meta.get('redirect_urls', [response.url])[0]
                items.append(item)

                htmlvar = item['original_url']
                change_list = list(collection3.find({"cwc":{"$regex":htmlvar}}))

                alldata = dict()
                cwcblockdic = ""
                for a in change_list:
                    alldata.update(a)
                ids = alldata['_id']
                cwcblock = alldata['cwc']
                cwcblockdic = cwcblockdic + cwcblock

                soup = BS(cwcblockdic)
                wholehref = soup.find(href=htmlvar)
                try:
                    anchortext = soup.findAll(href=htmlvar)[0].text
                except:
                    anchortext = wholehref.get_text()
                soup.find(href=htmlvar).replaceWith(anchortext)
                soup = str(soup)
                newlist = soup.replace('<html><body>', '').replace('</body></html>','')

                print "this is the anchor:", anchortext
                print "this is the href:", wholehref
                print "this is newlist:", newlist
                print "this is the id:", ids
                print "this is pagetype: CP"

                for item in change_list:
                    item['cwc'] = newlist
                    collection3.update({'_id':ids}, {"$set":{"cwc":item['cwc']}}, upsert=False)
            return items

        elif hxs.xpath('//head/link[@rel="canonical"]/@href[contains(.,"invalid-category-id")]'):
            for site in sites:
                item = MongoItem()
                item['url'] = response.url
                item['status'] = response.status
                item['original_url'] = response.meta.get('redirect_urls', [response.url])[0]
                items.append(item)

                htmlvar = item['original_url']
                change_list = list(collection3.find({"cwc":{"$regex":htmlvar}}))

                alldata = dict()
                cwcblockdic = ""
                for a in change_list:
                    alldata.update(a)
                ids = alldata['_id']
                cwcblock = alldata['cwc']
                cwcblockdic = cwcblockdic + cwcblock

                soup = BS(cwcblockdic)
                wholehref = soup.find(href=htmlvar)
                try:
                    anchortext = soup.findAll(href=htmlvar)[0].text
                except:
                    anchortext = wholehref.get_text()
                soup.find(href=htmlvar).replaceWith(anchortext)
                soup = str(soup)
                newlist = soup.replace('<html><body>', '').replace('</body></html>','')

                print "this is the anchor:", anchortext
                print "this is the href:", wholehref
                print "this is newlist:", newlist
                print "this is the id:", ids
                print "this is pagetype: CP"

                for item in change_list:
                    item['cwc'] = newlist
                    collection3.update({'_id':ids}, {"$set":{"cwc":item['cwc']}}, upsert=False)
            return items

        else:
            if hxs.xpath('//*[@class="result-summary-container"]/text()[contains(.,"Showing 0 of")]'):
                for site in sites:
                    item = MongoItem()
                    item['url'] = response.url
                    item['status'] = response.status
                    item['original_url'] = response.meta.get('redirect_urls', [response.url])[0]
                    items.append(item)

                    htmlvar = item['original_url']
                    change_list = list(collection3.find({"cwc":{"$regex":htmlvar}}))

                    alldata = dict()
                    cwcblockdic = ""
                    for a in change_list:
                        alldata.update(a)
                    ids = alldata['_id']
                    cwcblock = alldata['cwc']
                    cwcblockdic = cwcblockdic + cwcblock

                    soup = BS(cwcblockdic)
                    wholehref = soup.find(href=htmlvar)
                    try:
                        anchortext = soup.findAll(href=htmlvar)[0].text
                    except:
                        anchortext = wholehref.get_text()
                    soup.find(href=htmlvar).replaceWith(anchortext)
                    soup = str(soup)
                    newlist = soup.replace('<html><body>', '').replace('</body></html>','')

                    print "this is the anchor:", anchortext
                    print "this is the href:", wholehref
                    print "this is newlist:", newlist
                    print "this is the id:", ids
                    print "this is pagetype: CP"

                    for item in change_list:
                        item['cwc'] = newlist
                        collection3.update({'_id':ids}, {"$set":{"cwc":item['cwc']}}, upsert=False)
                return items

      

+3


source to share


2 answers


This may be one of the reasons, but still valid: there are duplicate URLs in the url list:

>>> urls = [...]  # list of urls you've posted
>>> len(urls)
2221
>>> len(set(urls))
1177

      



And Scrapy

will filter duplicate requests by default.

+1


source


If you are using



def start_requests(self):

    ''' Your start url logic '''

    yield scrapy.Requests(url=url, callback=self.parse, dont_filter=True)

      

0


source







All Articles