Selenium and Scrapy integration to click past page and then save cookies

I've been browsing stackoverflow for a couple of hours and still haven't been able to find a suitable answer for what I'm currently doing. I want to use Selenium to go through the start page to click it and then transfer cookies to Scrapy to then scan the database. So far, I keep redirecting to the first login page.

I relied on grabbing cookies and putting them in the request from this response authorization authorization with cookie

class HooversTest(scrapy.Spider):
    global starturls
    name = "hooversTest"
    allowed_domains = ["http://subscriber.hoovers.com"]
    login_page = ["http://subscriber.hoovers.com/H/home/index.html"]
    start_urls = ["http://subscriber.hoovers.com/H/company360/overview.html?companyId=99566395", 
              "http://subscriber.hoovers.com/H/company360/overview.html?companyId=10723000000000"]



def login(self, response):
    return Request(url=self.login_page,
        cookies=self.get_cookies(), callback=self.after_login)

def get_cookies(self):
    self.driver = webdriver.Firefox()
    self.driver.get("http://www.mergentonline.com/Hoovers/continue.php?status=sucess")
    elem = self.driver.find_element_by_name("Continue")
    elem.click()
    time.sleep(15)
    cookies = self.driver.get_cookies()
    #reduce(lambda r, d: r.update(d) or r, cookies, {})
    self.driver.close()
    return cookies

def parse(self, response):
    return Request(url="http://subscriber.hoovers.com/H/company360/overview.html?companyId=99566395",
        cookies=self.get_cookies(), callback=self.after_login)


def after_login(self, response):
    hxs = HtmlXPathSelector(response)
    print hxs.select('//title').extract()

      

+3


source to share





All Articles