Back to template gallery

Playwright + Chrome

Crawler example that uses headless Chrome driven by Playwright to scrape a website. Headless browsers render JavaScript and can help when getting blocked.

Language

python

Tools

playwright

Use cases

Web scraping

src/main.py

src/__main__.py

1"""
2This module defines the `main()` coroutine for the Apify Actor, executed from the `__main__.py` file.
3
4Feel free to modify this file to suit your specific needs.
5
6To build Apify Actors, utilize the Apify SDK toolkit, read more at the official documentation:
7https://docs.apify.com/sdk/python
8"""
9
10from urllib.parse import urljoin
11
12from playwright.async_api import async_playwright
13
14from apify import Actor
15
16# To run this Actor locally, you need to have the Playwright browsers installed.
17# Run `playwright install --with-deps` in the Actor's virtual environment to install them.
18# When running on the Apify platform, they are already included in the Actor's Docker image.
19
20
21async def main() -> None:
22    """
23    The main coroutine is being executed using `asyncio.run()`, so do not attempt to make a normal function
24    out of it, it will not work. Asynchronous execution is required for communication with Apify platform,
25    and it also enhances performance in the field of web scraping significantly.
26    """
27    async with Actor:
28        # Read the Actor input
29        actor_input = await Actor.get_input() or {}
30        start_urls = actor_input.get('start_urls', [{'url': 'https://apify.com'}])
31        max_depth = actor_input.get('max_depth', 1)
32
33        if not start_urls:
34            Actor.log.info('No start URLs specified in actor input, exiting...')
35            await Actor.exit()
36
37        # Enqueue the starting URLs in the default request queue
38        default_queue = await Actor.open_request_queue()
39        for start_url in start_urls:
40            url = start_url.get('url')
41            Actor.log.info(f'Enqueuing {url} ...')
42            await default_queue.add_request({'url': url, 'userData': {'depth': 0}})
43
44        # Launch Playwright an open a new browser context
45        Actor.log.info('Launching Playwright...')
46        async with async_playwright() as playwright:
47            browser = await playwright.chromium.launch(headless=Actor.config.headless)
48            context = await browser.new_context()
49
50            # Process the requests in the queue one by one
51            while request := await default_queue.fetch_next_request():
52                url = request['url']
53                depth = request['userData']['depth']
54                Actor.log.info(f'Scraping {url} ...')
55
56                try:
57                    # Open the URL in a new Playwright page
58                    page = await context.new_page()
59                    await page.goto(url)
60
61                    # If we haven't reached the max depth,
62                    # look for nested links and enqueue their targets
63                    if depth < max_depth:
64                        for link in await page.locator('a').all():
65                            link_href = await link.get_attribute('href')
66                            link_url = urljoin(url, link_href)
67                            if link_url.startswith(('http://', 'https://')):
68                                Actor.log.info(f'Enqueuing {link_url} ...')
69                                await default_queue.add_request({
70                                    'url': link_url,
71                                    'userData': {'depth': depth + 1},
72                                })
73
74                    # Push the title of the page into the default dataset
75                    title = await page.title()
76                    await Actor.push_data({'url': url, 'title': title})
77                except Exception:
78                    Actor.log.exception(f'Cannot extract data from {url}.')
79                finally:
80                    await page.close()
81                    await default_queue.mark_request_as_handled(request)

Playwright template

Included features

  • Apify SDK for Python - a toolkit for building Apify Actors and scrapers in Python
  • Input schema - define and easily validate a schema for your Actor's input
  • Request queue - queues into which you can put the URLs you want to scrape
  • Dataset - store structured data where each object stored has the same attributes
  • Playwright - a browser automation library

Resources

Already have a solution in mind?

Sign up for a free Apify account and deploy your code to the platform in just a few minutes! If you want a head start without coding it yourself, browse our Store of existing solutions.