ScrapyFy
Go to Store
This Actor is unavailable because the developer has decided to deprecate it. Would you like to try a similar Actor instead?
See alternative ActorsScrapyFy
jupri/scrapyfy
Scrapy Runner
You can access the ScrapyFy programmatically from your own applications by using the Apify API. You can choose the language preference from below. To use the Apify API, you’ll need an Apify account and your API token, found in Integrations settings in Apify Console.
1echo '{
2 "spiders_code": "from urllib.parse import urljoin\\r\\n\\r\\n### multiple spiders can be specified\\r\\n\\r\\nclass TitleSpider(scrapy.Spider):\\r\\n\\r\\n name = '\''title_spider'\''\\r\\n allowed_domains = [\\"apify.com\\"]\\r\\n start_urls = [\\"https://apify.com\\"]\\r\\n\\r\\n custom_settings = {\\r\\n '\''REQUEST_FINGERPRINTER_IMPLEMENTATION'\'' : '\''2.7'\'',\\r\\n # Obey robots.txt rules\\r\\n '\''ROBOTSTXT_OBEY'\'' : True,\\r\\n '\''DEPTH_LIMIT'\'' : 2,\\r\\n '\''LOG_ENABLED'\'' : False,\\r\\n #'\''CLOSESPIDER_PAGECOUNT'\'' : 5,\\r\\n '\''CLOSESPIDER_ITEMCOUNT'\'' : 5,\\r\\n }\\r\\n\\r\\n def parse(self, response):\\r\\n yield {\\r\\n '\''url'\'': response.url,\\r\\n '\''title'\'': response.css('\''title::text'\'').extract_first(),\\r\\n }\\r\\n for link_href in response.css('\''a::attr(\\"href\\")'\''):\\r\\n link_url = urljoin(response.url, link_href.get())\\r\\n if link_url.startswith(('\''http://'\'', '\''https://'\'')):\\r\\n yield scrapy.Request(link_url)",
3 "DEFAULT_REQUEST_HEADERS": {
4 "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8",
5 "Accept-Language": "en"
6 },
7 "DOWNLOADER_MIDDLEWARES": {},
8 "DOWNLOADER_MIDDLEWARES_BASE": {
9 "scrapy.downloadermiddlewares.robotstxt.RobotsTxtMiddleware": 100,
10 "scrapy.downloadermiddlewares.httpauth.HttpAuthMiddleware": 300,
11 "scrapy.downloadermiddlewares.downloadtimeout.DownloadTimeoutMiddleware": 350,
12 "scrapy.downloadermiddlewares.defaultheaders.DefaultHeadersMiddleware": 400,
13 "scrapy.downloadermiddlewares.useragent.UserAgentMiddleware": 500,
14 "scrapy.downloadermiddlewares.retry.RetryMiddleware": 550,
15 "scrapy.downloadermiddlewares.ajaxcrawl.AjaxCrawlMiddleware": 560,
16 "scrapy.downloadermiddlewares.redirect.MetaRefreshMiddleware": 580,
17 "scrapy.downloadermiddlewares.httpcompression.HttpCompressionMiddleware": 590,
18 "scrapy.downloadermiddlewares.redirect.RedirectMiddleware": 600,
19 "scrapy.downloadermiddlewares.cookies.CookiesMiddleware": 700,
20 "scrapy.downloadermiddlewares.httpproxy.HttpProxyMiddleware": 750,
21 "scrapy.downloadermiddlewares.stats.DownloaderStats": 850,
22 "scrapy.downloadermiddlewares.httpcache.HttpCacheMiddleware": 900
23 },
24 "DOWNLOAD_HANDLERS": {},
25 "DOWNLOAD_HANDLERS_BASE": {
26 "data": "scrapy.core.downloader.handlers.datauri.DataURIDownloadHandler",
27 "file": "scrapy.core.downloader.handlers.file.FileDownloadHandler",
28 "http": "scrapy.core.downloader.handlers.http.HTTPDownloadHandler",
29 "https": "scrapy.core.downloader.handlers.http.HTTPDownloadHandler",
30 "s3": "scrapy.core.downloader.handlers.s3.S3DownloadHandler",
31 "ftp": "scrapy.core.downloader.handlers.ftp.FTPDownloadHandler"
32 },
33 "EXTENSIONS": {},
34 "EXTENSIONS_BASE": {
35 "scrapy.extensions.corestats.CoreStats": 0,
36 "scrapy.extensions.telnet.TelnetConsole": 0,
37 "scrapy.extensions.memusage.MemoryUsage": 0,
38 "scrapy.extensions.memdebug.MemoryDebugger": 0,
39 "scrapy.extensions.closespider.CloseSpider": 0,
40 "scrapy.extensions.feedexport.FeedExporter": 0,
41 "scrapy.extensions.logstats.LogStats": 0,
42 "scrapy.extensions.spiderstate.SpiderState": 0,
43 "scrapy.extensions.throttle.AutoThrottle": 0
44 },
45 "FEEDS": {},
46 "FEED_EXPORTERS": {},
47 "FEED_EXPORTERS_BASE": {
48 "json": "scrapy.exporters.JsonItemExporter",
49 "jsonlines": "scrapy.exporters.JsonLinesItemExporter",
50 "jsonl": "scrapy.exporters.JsonLinesItemExporter",
51 "jl": "scrapy.exporters.JsonLinesItemExporter",
52 "csv": "scrapy.exporters.CsvItemExporter",
53 "xml": "scrapy.exporters.XmlItemExporter",
54 "marshal": "scrapy.exporters.MarshalItemExporter",
55 "pickle": "scrapy.exporters.PickleItemExporter"
56 },
57 "FEED_STORAGES": {},
58 "FEED_STORAGES_BASE": {
59 "": "scrapy.extensions.feedexport.FileFeedStorage",
60 "file": "scrapy.extensions.feedexport.FileFeedStorage",
61 "ftp": "scrapy.extensions.feedexport.FTPFeedStorage",
62 "gs": "scrapy.extensions.feedexport.GCSFeedStorage",
63 "s3": "scrapy.extensions.feedexport.S3FeedStorage",
64 "stdout": "scrapy.extensions.feedexport.StdoutFeedStorage"
65 },
66 "HTTPCACHE_IGNORE_HTTP_CODES": [],
67 "HTTPCACHE_IGNORE_RESPONSE_CACHE_CONTROLS": [],
68 "HTTPCACHE_IGNORE_SCHEMES": [
69 "file"
70 ],
71 "ITEM_PIPELINES": {},
72 "ITEM_PIPELINES_BASE": {},
73 "MEMDEBUG_NOTIFY": [],
74 "MEMUSAGE_NOTIFY_MAIL": [],
75 "METAREFRESH_IGNORE_TAGS": [],
76 "RETRY_HTTP_CODES": [
77 500,
78 502,
79 503,
80 504,
81 522,
82 524,
83 408,
84 429
85 ],
86 "SPIDER_CONTRACTS": {},
87 "SPIDER_CONTRACTS_BASE": {
88 "scrapy.contracts.default.UrlContract": 1,
89 "scrapy.contracts.default.CallbackKeywordArgumentsContract": 1,
90 "scrapy.contracts.default.ReturnsContract": 2,
91 "scrapy.contracts.default.ScrapesContract": 3
92 },
93 "SPIDER_MIDDLEWARES": {},
94 "SPIDER_MIDDLEWARES_BASE": {
95 "scrapy.spidermiddlewares.httperror.HttpErrorMiddleware": 50,
96 "scrapy.spidermiddlewares.offsite.OffsiteMiddleware": 500,
97 "scrapy.spidermiddlewares.referer.RefererMiddleware": 700,
98 "scrapy.spidermiddlewares.urllength.UrlLengthMiddleware": 800,
99 "scrapy.spidermiddlewares.depth.DepthMiddleware": 900
100 },
101 "SPIDER_MODULES": [],
102 "STATSMAILER_RCPTS": [],
103 "TELNETCONSOLE_PORT": [
104 6023,
105 6073
106 ]
107}' |
108apify call jupri/scrapyfy --silent --output-dataset
ScrapyFy API through CLI
The Apify CLI is the official tool that allows you to use ScrapyFy locally, providing convenience functions and automatic retries on errors.
Install the Apify CLI
1npm i -g apify-cli
2apify login
Other API clients include:
Developer
Maintained by Community
Categories