1"""
2This module defines the `main()` coroutine for the Apify Actor, executed from the `__main__.py` file.
3
4Feel free to modify this file to suit your specific needs.
5
6To build Apify Actors, utilize the Apify SDK toolkit, read more at the official documentation:
7https://docs.apify.com/sdk/python
8"""
9
10from urllib.parse import urljoin
11
12from bs4 import BeautifulSoup
13from httpx import AsyncClient
14
15from apify import Actor
16
17
18async def main() -> None:
19 """
20 The main coroutine is being executed using `asyncio.run()`, so do not attempt to make a normal function
21 out of it, it will not work. Asynchronous execution is required for communication with Apify platform,
22 and it also enhances performance in the field of web scraping significantly.
23 """
24 async with Actor:
25
26 actor_input = await Actor.get_input() or {}
27 start_urls = actor_input.get('start_urls', [{'url': 'https://apify.com'}])
28 max_depth = actor_input.get('max_depth', 1)
29
30 if not start_urls:
31 Actor.log.info('No start URLs specified in actor input, exiting...')
32 await Actor.exit()
33
34
35 default_queue = await Actor.open_request_queue()
36 for start_url in start_urls:
37 url = start_url.get('url')
38 Actor.log.info(f'Enqueuing {url} ...')
39 await default_queue.add_request({'url': url, 'userData': {'depth': 0}})
40
41
42 while request := await default_queue.fetch_next_request():
43 url = request['url']
44 depth = request['userData']['depth']
45 Actor.log.info(f'Scraping {url} ...')
46
47 try:
48
49 async with AsyncClient() as client:
50 response = await client.get(url, follow_redirects=True)
51
52
53 soup = BeautifulSoup(response.content, 'html.parser')
54
55
56
57 if depth < max_depth:
58 for link in soup.find_all('a'):
59 link_href = link.get('href')
60 link_url = urljoin(url, link_href)
61 if link_url.startswith(('http://', 'https://')):
62 Actor.log.info(f'Enqueuing {link_url} ...')
63 await default_queue.add_request({
64 'url': link_url,
65 'userData': {'depth': depth + 1},
66 })
67
68
69 title = soup.title.string if soup.title else None
70 await Actor.push_data({'url': url, 'title': title})
71 except Exception:
72 Actor.log.exception(f'Cannot extract data from {url}.')
73 finally:
74
75 await default_queue.mark_request_as_handled(request)