Sunday 10 January 2016

Web Scraping https://www.dmoz.org/ with SCRAPY in Python -1 [ dmoz.py ]

In [1]:
# All this code is to be run at the Console and not tin the iPython / Jupyter Note book 
# The errors seen below appear only in this demonstrative - iPython / Jupyter Note book 
# Code is inspired by - https://github.com/scrapy/dirbot

from scrapy.spiders import Spider
from scrapy.selector import Selector

from dirbot.items import Website
---------------------------------------------------------------------------
ImportError                               Traceback (most recent call last)
<ipython-input-1-8d8b97cb8d25> in <module>()
      2 from scrapy.selector import Selector
      3 
----> 4 from dirbot.items import Website

ImportError: No module named dirbot.items
In [2]:
class DmozSpider(Spider):
    name = "dmoz"
    allowed_domains = ["dmoz.org"]
    start_urls = [
        "http://www.dmoz.org/Computers/Programming/Languages/Python/Books/",
        "http://www.dmoz.org/Computers/Programming/Languages/Python/Resources/",
    ]
In [ ]:
def parse(self, response):
        """
        The lines below is a spider contract. For more info see:
        http://doc.scrapy.org/en/latest/topics/contracts.html

        @url http://www.dmoz.org/Computers/Programming/Languages/Python/Resources/
        @scrapes name
        """
        sel = Selector(response)
        sites = sel.xpath('//ul[@class="directory-url"]/li')
        items = []

        for site in sites:
            item = Website()
            item['name'] = site.xpath('a/text()').extract()
            item['url'] = site.xpath('a/@href').extract()
            item['description'] = site.xpath('text()').re('-\s[^\n]*\\r')
            items.append(item)

        return items


#---------------


No comments:

Post a Comment