1"""
2International Jobs Board Scraper - Multi-Board Aggregator
3Scrapes job listings from UK, US, EU and remote job boards with unified output.
4
5Supported boards:
6 UK: Reed, Totaljobs, CV-Library, CWJobs, Indeed UK, GOV.UK Find a Job
7 US: USAJobs, Indeed US
8 EU: Indeed DE/FR/NL, Arbeitnow
9 Global: Adzuna (multi-country API), RemoteOK
10"""
11
12import asyncio
13import os
14import random
15import re
16import statistics
17
18import httpx
19from apify import Actor
20from playwright.async_api import async_playwright
21
22from .utils import make_headers
23from .boards.reed import ReedScraper
24from .boards.totaljobs import TotaljobsScraper
25from .boards.cvlibrary import CVLibraryScraper
26from .boards.cwjobs import CWJobsScraper
27from .boards.indeed import IndeedUKScraper, IndeedScraper
28from .boards.findajob import FindAJobScraper
29from .boards.adzuna import AdzunaScraper
30from .boards.usajobs import USAJobsScraper
31from .boards.remoteok import RemoteOKScraper
32from .boards.arbeitnow import ArbeitnowScraper
33
34
35
36BOARD_REGISTRY = {
37
38 "reed": ReedScraper,
39 "totaljobs": TotaljobsScraper,
40 "cvlibrary": CVLibraryScraper,
41 "cwjobs": CWJobsScraper,
42 "indeed": IndeedUKScraper,
43 "findajob": FindAJobScraper,
44
45 "usajobs": USAJobsScraper,
46 "indeed_us": lambda client, **kw: IndeedScraper(client, base_url="https://www.indeed.com", source="indeed.com", **kw),
47
48 "indeed_de": lambda client, **kw: IndeedScraper(client, base_url="https://de.indeed.com", source="indeed.de", **kw),
49 "indeed_fr": lambda client, **kw: IndeedScraper(client, base_url="https://fr.indeed.com", source="indeed.fr", **kw),
50 "indeed_nl": lambda client, **kw: IndeedScraper(client, base_url="https://nl.indeed.com", source="indeed.nl", **kw),
51 "indeed_au": lambda client, **kw: IndeedScraper(client, base_url="https://au.indeed.com", source="indeed.au", **kw),
52
53 "remoteok": RemoteOKScraper,
54 "arbeitnow": ArbeitnowScraper,
55
56}
57
58
59BROWSER_BOARDS = {"totaljobs", "cwjobs", "cvlibrary", "indeed", "reed",
60 "indeed_us", "indeed_de", "indeed_fr", "indeed_nl", "indeed_au"}
61
62
63HTTP_ONLY_BOARDS = {"findajob", "usajobs", "remoteok", "arbeitnow", "adzuna"}
64
65
66COUNTRY_DEFAULTS = {
67 "uk": ["reed", "totaljobs", "cvlibrary", "cwjobs", "indeed", "findajob", "adzuna"],
68 "us": ["usajobs", "indeed_us", "adzuna", "remoteok"],
69 "de": ["indeed_de", "adzuna", "arbeitnow"],
70 "fr": ["indeed_fr", "adzuna"],
71 "nl": ["indeed_nl", "adzuna"],
72 "au": ["indeed_au", "adzuna"],
73 "remote": ["remoteok", "arbeitnow", "adzuna"],
74}
75
76
77ADZUNA_COUNTRY_MAP = {
78 "uk": "gb", "us": "us", "de": "de", "fr": "fr",
79 "nl": "nl", "au": "au", "remote": "gb",
80}
81
82
83STEALTH_ARGS = [
84 "--no-sandbox",
85 "--disable-dev-shm-usage",
86 "--disable-blink-features=AutomationControlled",
87 "--disable-features=IsolateOrigins,site-per-process",
88 "--disable-infobars",
89 "--disable-background-networking",
90 "--disable-default-apps",
91 "--disable-extensions",
92 "--disable-sync",
93 "--disable-translate",
94 "--no-first-run",
95 "--ignore-certificate-errors",
96 "--window-size=1920,1080",
97]
98
99
100async def run_board(scraper, keyword, location, max_per_board, job_type, salary_min) -> list[dict]:
101 """Run a single board scraper with error handling."""
102 try:
103 return await scraper.search(
104 keyword=keyword,
105 location=location,
106 max_results=max_per_board,
107 job_type=job_type,
108 salary_min=salary_min,
109 )
110 except Exception as e:
111 Actor.log.error(f"[{scraper.source_name}] Scraper failed: {e}")
112 return []
113
114
115def categorize_job(title: str) -> str:
116 """Infer a job category from the title using keyword matching.
117
118 Returns one of a fixed set of categories, or "Other" if no match.
119 Checked in order — first match wins, so more specific patterns come first.
120 """
121 if not title:
122 return "Other"
123 t = title.lower()
124
125
126 if any(k in t for k in ("software eng", "software dev", "full stack", "fullstack",
127 "full-stack", "frontend", "front-end", "front end",
128 "backend", "back-end", "back end", "web dev",
129 "mobile dev", "ios dev", "android dev", "flutter",
130 "react", "angular", "vue.js", "node.js", "python dev",
131 "java dev", ".net dev", "c# dev", "c++ dev", "rust dev",
132 "golang", "ruby dev", "php dev", "programmer", "coder",
133 "software architect", "principal engineer",
134 "lead developer", "lead engineer", "tech lead")):
135 return "Software Development"
136
137 if any(k in t for k in ("data scien", "data analy", "data eng", "machine learn",
138 "ml eng", "ai eng", "artificial intelligence",
139 "deep learn", "nlp", "computer vision",
140 "business intellig", "bi analyst", "bi developer",
141 "analytics eng", "data architect")):
142 return "Data & Analytics"
143
144 if any(k in t for k in ("devops", "sre", "site reliab", "platform eng",
145 "cloud eng", "cloud arch", "infrastructure",
146 "kubernetes", "docker", "terraform", "aws eng",
147 "azure eng", "gcp eng", "systems eng", "linux eng",
148 "network eng", "devsecops")):
149 return "DevOps & Infrastructure"
150
151 if any(k in t for k in ("cyber", "security eng", "security analy",
152 "penetration", "pen test", "infosec",
153 "information security", "soc analyst",
154 "security architect", "security consult")):
155 return "Cybersecurity"
156
157 if any(k in t for k in ("qa ", "qa eng", "test eng", "tester", "sdet",
158 "automation eng", "quality assurance", "test analy",
159 "test lead", "test manager")):
160 return "QA & Testing"
161
162 if any(k in t for k in ("product manager", "product owner", "product lead",
163 "head of product", "vp product", "cpo",
164 "product director")):
165 return "Product Management"
166
167 if any(k in t for k in ("project manager", "programme manager",
168 "program manager", "delivery manager",
169 "scrum master", "agile coach", "release manager",
170 "pmo", "project coordinator", "delivery lead")):
171 return "Project & Delivery Management"
172
173 if any(k in t for k in ("ux ", "ui ", "ux/ui", "ui/ux", "user experience",
174 "user interface", "product design", "interaction design",
175 "visual design", "graphic design", "web design",
176 "design lead", "creative director")):
177 return "Design & UX"
178
179 if any(k in t for k in ("it manager", "it director", "cto", "cio",
180 "head of engineering", "head of it",
181 "vp engineering", "engineering manager",
182 "development manager", "it service",
183 "service desk", "helpdesk", "help desk",
184 "it support", "desktop support", "it admin",
185 "systems admin", "it operations")):
186 return "IT Management & Support"
187
188 if any(k in t for k in ("dba", "database admin", "database eng",
189 "database dev", "sql dev", "etl dev",
190 "data warehouse", "database architect")):
191 return "Database & BI"
192
193 if any(k in t for k in ("solution architect", "enterprise architect",
194 "technical architect", "integration architect",
195 "it consult", "technology consult",
196 "technical consult", "sap consult",
197 "salesforce", "dynamics 365", "erp consult",
198 "crm consult", "business analyst")):
199 return "IT Consulting & Architecture"
200
201
202 if any(k in t for k in ("accountant", "accounting", "finance manager",
203 "financial analy", "financial controller",
204 "bookkeeper", "payroll", "tax ", "audit",
205 "treasury", "credit analy", "fund manager",
206 "investment analy", "actuary", "cfo")):
207 return "Finance & Accounting"
208
209 if any(k in t for k in ("marketing", "seo ", "ppc ", "content writer",
210 "copywriter", "social media", "digital market",
211 "brand manager", "communications", "pr manager",
212 "public relations", "cmo")):
213 return "Marketing & Communications"
214
215 if any(k in t for k in ("sales manager", "sales exec", "sales rep",
216 "account manager", "account exec",
217 "business develop", "bdr ", "sdr ",
218 "commercial manager", "sales director")):
219 return "Sales & Business Development"
220
221 if any(k in t for k in ("hr ", "human resource", "recruiter", "recruitment",
222 "talent acqui", "people manager", "people partner",
223 "learning and dev", "l&d ", "training manager",
224 "compensation", "reward", "hrbp")):
225 return "HR & Recruitment"
226
227 if any(k in t for k in ("nurse", "doctor", "clinical", "pharmacist",
228 "physiotherapist", "occupational therap",
229 "healthcare", "medical", "gp ", "surgeon",
230 "dental", "radiographer", "midwife",
231 "care assistant", "support worker")):
232 return "Healthcare & Medical"
233
234 if any(k in t for k in ("teacher", "lecturer", "professor", "tutor",
235 "teaching assistant", "education", "headteacher",
236 "head of department", "send ", "pastoral")):
237 return "Education & Training"
238
239 if any(k in t for k in ("mechanical eng", "electrical eng", "civil eng",
240 "structural eng", "chemical eng", "process eng",
241 "manufacturing eng", "maintenance eng",
242 "building services", "quantity surveyor",
243 "site manager", "construction manager")):
244 return "Engineering"
245
246 if any(k in t for k in ("solicitor", "barrister", "paralegal", "legal counsel",
247 "legal advisor", "lawyer", "conveyancer",
248 "compliance officer", "regulatory")):
249 return "Legal & Compliance"
250
251 if any(k in t for k in ("supply chain", "logistics", "procurement",
252 "warehouse", "buyer", "purchasing",
253 "operations manager", "operations director",
254 "facilities", "fleet manager")):
255 return "Operations & Logistics"
256
257 if any(k in t for k in ("customer service", "customer support",
258 "call centre", "call center", "contact centre",
259 "client service", "customer success")):
260 return "Customer Service"
261
262 if any(k in t for k in ("admin", "office manager", "receptionist",
263 "personal assistant", "executive assistant",
264 "secretary", "office coordinator")):
265 return "Administration"
266
267 return "Other"
268
269
270def normalize_job(job: dict) -> dict:
271 """Ensure all job dicts have every expected field (no undefined in output)."""
272 defaults = {
273 "title": "",
274 "company": "",
275 "location": "",
276 "salary_raw": "",
277 "salary_min": None,
278 "salary_max": None,
279 "salary_currency": "GBP",
280 "salary_period": "",
281 "snippet": "",
282
283 "employment_type": "",
284 "date_posted": "",
285 "valid_through": "",
286 "url": "",
287 "job_id": "",
288 "source": "",
289 "category": "",
290 }
291 normalized = {**defaults, **{k: v for k, v in job.items() if v is not None and v != ""}}
292 normalized.pop("full_description", None)
293
294 for key in ["title", "company", "location", "salary_raw", "snippet",
295 "employment_type", "date_posted",
296 "valid_through", "url", "job_id", "source", "category",
297 "salary_period", "salary_currency"]:
298 if normalized.get(key) is None:
299 normalized[key] = defaults[key]
300
301 if not normalized["category"]:
302 normalized["category"] = categorize_job(normalized["title"])
303 return normalized
304
305
306def deduplicate_jobs(jobs: list[dict]) -> list[dict]:
307 """
308 Remove duplicate jobs across boards.
309 Uses a combination of normalised title + company + location.
310 """
311 seen = set()
312 unique = []
313
314 for job in jobs:
315
316 title = job.get("title", "").lower().strip()
317 company = job.get("company", "").lower().strip()
318 location = job.get("location", "").lower().strip()
319
320 fingerprint = f"{title}|{company}|{location}"
321
322 if fingerprint not in seen:
323 seen.add(fingerprint)
324 unique.append(job)
325
326 return unique
327
328
329def compute_salary_benchmarks(jobs: list[dict]) -> list[dict]:
330 """Compute salary benchmarks grouped by title keyword and location."""
331 from collections import defaultdict
332
333
334 buckets = defaultdict(list)
335 for job in jobs:
336 sal_min = job.get("salary_min")
337 sal_max = job.get("salary_max")
338 period = job.get("salary_period", "annum")
339
340
341 if not sal_min and not sal_max:
342 continue
343
344
345 mid = ((sal_min or sal_max) + (sal_max or sal_min)) / 2
346 if period == "day":
347 mid *= 220
348 elif period == "hour":
349 mid *= 1760
350 elif period == "month":
351 mid *= 12
352 elif period == "week":
353 mid *= 52
354
355 if mid < 5000 or mid > 500000:
356 continue
357
358 title = job.get("title", "").lower().strip()
359 location = job.get("location", "").lower().strip()
360
361 loc_key = location.split(",")[0].strip() if location else "unknown"
362 buckets[(title, loc_key)].append(mid)
363
364 benchmarks = []
365 for (title, loc), salaries in buckets.items():
366 if len(salaries) < 2:
367 continue
368 salaries.sort()
369 benchmarks.append({
370 "benchmark_title": title,
371 "benchmark_location": loc,
372 "count": len(salaries),
373 "salary_mean": round(statistics.mean(salaries)),
374 "salary_median": round(statistics.median(salaries)),
375 "salary_p25": round(salaries[len(salaries) // 4]),
376 "salary_p75": round(salaries[(len(salaries) * 3) // 4]),
377 "salary_min": round(min(salaries)),
378 "salary_max": round(max(salaries)),
379 "_type": "salary_benchmark",
380 })
381
382 return sorted(benchmarks, key=lambda b: b["count"], reverse=True)
383
384
385async def main() -> None:
386 async with Actor:
387
388 actor_input = await Actor.get_input() or {}
389
390
391 keyword = actor_input.get("custom_keyword") or actor_input.get("keyword", "software engineer")
392 if keyword == "__custom__":
393 keyword = actor_input.get("custom_keyword", "software engineer")
394 location = actor_input.get("custom_location") or actor_input.get("location", "London")
395 if location == "__custom__":
396 location = actor_input.get("custom_location", "London")
397
398 unlimited = actor_input.get("unlimited", False)
399 max_results = 0 if unlimited else max(actor_input.get("max_results", 100), 100)
400 salary_min = actor_input.get("salary_min") or None
401 job_type = actor_input.get("job_type", "all")
402 fetch_details = False
403 country = actor_input.get("country", "uk")
404 salary_benchmark = actor_input.get("salary_benchmark", False)
405
406
407 selected_boards = actor_input.get("boards") or COUNTRY_DEFAULTS.get(country, COUNTRY_DEFAULTS["uk"])
408
409
410 adzuna_app_id = actor_input.get("adzuna_app_id", "")
411 adzuna_app_key = actor_input.get("adzuna_app_key", "")
412
413
414 deduplicate = actor_input.get("deduplicate", True)
415
416 Actor.log.info(f"Starting multi-board scrape: '{keyword}' in '{location}' (country={country})")
417 Actor.log.info(f"Boards: {selected_boards}")
418 Actor.log.info(f"Max results: {max_results or 'unlimited'} | Job type: {job_type} | Details: {fetch_details}")
419
420
421
422 num_boards = len(selected_boards)
423 if max_results == 0:
424 max_per_board = 10000
425 elif num_boards > 0:
426 max_per_board = max_results // num_boards
427 else:
428 max_per_board = max_results
429
430
431 headers = make_headers()
432 proxy_config = None
433 http_proxy_url = None
434 is_on_apify = os.environ.get("APIFY_IS_AT_HOME", "0") == "1"
435 need_browser = any(b in BROWSER_BOARDS for b in selected_boards)
436
437 if is_on_apify:
438 proxy_country = ADZUNA_COUNTRY_MAP.get(country, "GB").upper()
439 try:
440 proxy_config = await Actor.create_proxy_configuration(
441 groups=["RESIDENTIAL"],
442 country_code=proxy_country,
443 )
444 http_proxy_url = await proxy_config.new_url(
445 session_id=f"http_main_{random.randint(1000, 9999)}"
446 )
447 Actor.log.info(f"Using Apify residential proxy ({proxy_country}) for HTTP + browser")
448 except Exception as e:
449 Actor.log.warning(f"Proxy config failed: {e}")
450
451
452 async with httpx.AsyncClient(
453 headers=headers,
454 timeout=30.0,
455 proxy=http_proxy_url,
456 ) as client:
457
458
459 browser = None
460 playwright_instance = None
461
462 if need_browser:
463 Actor.log.info("Launching Playwright browser for JS-heavy boards...")
464 try:
465 playwright_instance = await async_playwright().start()
466
467
468
469 launch_kwargs = {
470 "headless": True,
471 "args": STEALTH_ARGS,
472 }
473 if proxy_config:
474
475 initial_proxy_url = await proxy_config.new_url(
476 session_id=f"browser_init_{random.randint(1000, 9999)}"
477 )
478 from urllib.parse import urlparse
479 parsed = urlparse(initial_proxy_url)
480 launch_kwargs["proxy"] = {
481 "server": f"{parsed.scheme}://{parsed.hostname}:{parsed.port}",
482 "username": parsed.username or "",
483 "password": parsed.password or "",
484 }
485 Actor.log.info(f"Browser launched with proxy: {parsed.hostname}:{parsed.port}")
486
487 browser = await playwright_instance.chromium.launch(**launch_kwargs)
488 Actor.log.info("Playwright browser launched successfully")
489 except Exception as e:
490 Actor.log.error(f"Failed to launch Playwright: {e}")
491 Actor.log.warning("Browser boards will fall back to httpx (may return fewer results)")
492
493 try:
494
495 browser_scrapers = []
496 api_scrapers = []
497 adzuna_country = ADZUNA_COUNTRY_MAP.get(country, "gb")
498
499 for board_name in selected_boards:
500 if board_name == "adzuna":
501 scraper = AdzunaScraper(
502 client,
503 delay=0.5,
504 app_id=adzuna_app_id,
505 app_key=adzuna_app_key,
506 country=adzuna_country,
507 )
508 api_scrapers.append(scraper)
509
510 elif board_name in BOARD_REGISTRY:
511 factory = BOARD_REGISTRY[board_name]
512
513 if board_name in BROWSER_BOARDS and browser and proxy_config:
514
515 safe_name = re.sub(r"[^a-zA-Z0-9._~]", "_", board_name)
516 proxy_url = await proxy_config.new_url(
517 session_id=f"jobs_{safe_name}_{random.randint(1000, 9999)}"
518 )
519 if callable(factory) and not isinstance(factory, type):
520 scraper = factory(client, delay=1.5, browser=browser,
521 proxy_url=proxy_url, proxy_config=proxy_config)
522 else:
523 scraper = factory(client, delay=1.5, browser=browser,
524 proxy_url=proxy_url, proxy_config=proxy_config)
525 browser_scrapers.append(scraper)
526 elif board_name in BROWSER_BOARDS and browser:
527 if callable(factory) and not isinstance(factory, type):
528 scraper = factory(client, delay=1.5, browser=browser)
529 else:
530 scraper = factory(client, delay=1.5, browser=browser)
531 browser_scrapers.append(scraper)
532 else:
533
534 extra = {}
535 if browser:
536 extra["browser"] = browser
537 if proxy_config:
538 safe_name = re.sub(r"[^a-zA-Z0-9._~]", "_", board_name)
539 purl = await proxy_config.new_url(
540 session_id=f"jobs_{safe_name}_{random.randint(1000, 9999)}"
541 )
542 extra["proxy_url"] = purl
543 extra["proxy_config"] = proxy_config
544 if callable(factory) and not isinstance(factory, type):
545 scraper = factory(client, delay=0.5, **extra)
546 else:
547 scraper = factory(client, delay=0.5, **extra)
548 api_scrapers.append(scraper)
549
550
551 if hasattr(scraper, "fetch_details"):
552 scraper.fetch_details = fetch_details
553 else:
554 Actor.log.warning(f"Unknown board: {board_name}, skipping.")
555
556 all_scrapers = api_scrapers + browser_scrapers
557 if not all_scrapers:
558 Actor.log.error("No valid boards selected!")
559 return
560
561 all_jobs = []
562 dedup_seen = set()
563
564 def dedup_and_normalize(jobs: list[dict]) -> list[dict]:
565 """Normalize and deduplicate a batch of jobs."""
566 accepted = []
567 for job in jobs:
568 normalized = normalize_job(job)
569 if deduplicate:
570 fp = (f"{normalized.get('title','').lower().strip()}|"
571 f"{normalized.get('company','').lower().strip()}|"
572 f"{normalized.get('location','').lower().strip()}")
573 if fp in dedup_seen:
574 continue
575 dedup_seen.add(fp)
576 accepted.append(normalized)
577 return accepted
578
579 if unlimited:
580
581
582
583
584 BATCH = 100
585 Actor.log.info("Unlimited mode: cycling boards in rounds of 100...")
586
587
588
589 if api_scrapers:
590 Actor.log.info(f"Running {len(api_scrapers)} API scrapers in parallel...")
591 api_results = await asyncio.gather(
592 *[run_board(s, keyword, location, BATCH, job_type, salary_min)
593 for s in api_scrapers],
594 )
595 for scraper, jobs in zip(api_scrapers, api_results):
596 accepted = dedup_and_normalize(jobs)
597 Actor.log.info(f"[{scraper.source_name}] {len(jobs)} scraped, {len(accepted)} pushed")
598 for job in accepted:
599 await Actor.push_data(job)
600 all_jobs.extend(accepted)
601
602
603 if browser_scrapers:
604 for scraper in browser_scrapers:
605 Actor.log.info(f"── Starting {scraper.source_name} ──")
606 jobs = await run_board(scraper, keyword, location, BATCH, job_type, salary_min)
607 accepted = dedup_and_normalize(jobs)
608 Actor.log.info(f"[{scraper.source_name}] {len(jobs)} scraped, {len(accepted)} pushed")
609 for job in accepted:
610 await Actor.push_data(job)
611 all_jobs.extend(accepted)
612
613 else:
614
615
616
617 board_results = {}
618
619 if api_scrapers:
620 Actor.log.info(f"Running {len(api_scrapers)} API scrapers in parallel...")
621 api_results = await asyncio.gather(
622 *[run_board(s, keyword, location, max_per_board, job_type, salary_min)
623 for s in api_scrapers],
624 )
625 for scraper, jobs in zip(api_scrapers, api_results):
626 Actor.log.info(f"═══ {scraper.source_name} returned {len(jobs)} jobs ═══")
627 board_results[scraper.source_name] = jobs
628
629 if browser_scrapers:
630 Actor.log.info(f"Running {len(browser_scrapers)} browser scrapers sequentially...")
631 for scraper in browser_scrapers:
632 Actor.log.info(f"── Starting {scraper.source_name} ──")
633 jobs = await run_board(scraper, keyword, location, max_per_board, job_type, salary_min)
634 Actor.log.info(f"═══ {scraper.source_name} returned {len(jobs)} jobs ═══")
635 board_results[scraper.source_name] = jobs
636
637
638 for source in board_results:
639 board_results[source] = dedup_and_normalize(board_results[source])
640 Actor.log.info(f"[{source}] {len(board_results[source])} after dedup")
641
642
643 board_iters = {src: iter(jobs) for src, jobs in board_results.items() if jobs}
644 while board_iters:
645 exhausted = []
646 for src in list(board_iters):
647 job = next(board_iters[src], None)
648 if job is None:
649 exhausted.append(src)
650 else:
651 all_jobs.append(job)
652 for src in exhausted:
653 del board_iters[src]
654
655
656 Actor.log.info(f"Pushing {len(all_jobs)} jobs to dataset...")
657 for job in all_jobs:
658 await Actor.push_data(job)
659
660
661 if salary_benchmark:
662 benchmarks = compute_salary_benchmarks(all_jobs)
663 Actor.log.info(f"Generated {len(benchmarks)} salary benchmarks")
664 for b in benchmarks:
665 await Actor.push_data(b)
666
667 finally:
668
669 if browser:
670 try:
671 await browser.close()
672 except Exception:
673 pass
674 if playwright_instance:
675 try:
676 await playwright_instance.stop()
677 except Exception:
678 pass
679
680
681 source_counts = {}
682 for job in all_jobs:
683 src = job.get("source", "unknown")
684 source_counts[src] = source_counts.get(src, 0) + 1
685
686 Actor.log.info(f"╔══════════════════════════════════════╗")
687 Actor.log.info(f"║ SCRAPE COMPLETE ║")
688 Actor.log.info(f"║ Total jobs: {len(all_jobs):<23}║")
689 for src, count in sorted(source_counts.items()):
690 Actor.log.info(f"║ {src}: {count:<26}║")
691 Actor.log.info(f"╚══════════════════════════════════════╝")
692
693
694if __name__ == "__main__":
695 asyncio.run(main())