1import { Actor } from 'apify';
2import { PuppeteerCrawler, Dataset, RequestQueue } from 'crawlee';
3
4await Actor.init();
5
6const {
7 startUrls = [{ url: 'https://x.com/baconbrix/status/1910752770593816703?s=12' }],
8 proxyConfig = null,
9} = await Actor.getInput() ?? {};
10
11if (!startUrls || startUrls.length !== 1) {
12 throw new Error('startUrls must be an array with exactly one URL');
13}
14
15const proxyConfiguration = proxyConfig
16 ? await Actor.createProxyConfiguration(proxyConfig)
17 : await Actor.createProxyConfiguration();
18
19const requestQueue = await RequestQueue.open();
20
21const crawler = new PuppeteerCrawler({
22 proxyConfiguration,
23 requestQueue,
24 maxRequestsPerCrawl: 1,
25
26 launchContext: {
27 useChrome: true,
28 launchOptions: {
29 headless: true,
30 },
31 },
32
33 requestHandler: async ({ page, request, log }) => {
34 log.info(`Scraping X post: ${request.url}`);
35
36
37 const maxRetries = 3;
38 let retries = 0;
39 let articleFound = false;
40
41 while (retries < maxRetries && !articleFound) {
42 try {
43 await page.waitForSelector('article', { timeout: 20000 });
44 articleFound = true;
45 } catch (e) {
46 retries++;
47 if (retries < maxRetries) {
48 log.warning(`Article not found, retrying (${retries}/${maxRetries})...`);
49 await page.reload({ waitUntil: 'networkidle2' });
50 } else {
51 throw new Error(`Article not found after ${maxRetries} attempts on ${request.url}`);
52 }
53 }
54 }
55
56 const data = await page.evaluate(() => {
57 const result = {
58 url: window.location.href,
59 title: '',
60 image: '',
61 ogTitle: '',
62 ogDescription: '',
63 ogImage: '',
64 description: '',
65 rawImages: [],
66 };
67
68
69 const article = document.querySelector('article');
70 if (article) {
71 const textElement = article.querySelector('div[lang]');
72 if (textElement) result.title = textElement.innerText.trim();
73
74 const textElements = article.querySelectorAll('div[lang]');
75 const allText = Array.from(textElements)
76 .map(el => el.innerText.trim())
77 .filter(Boolean)
78 .join('\n');
79
80 result.content = allText;
81
82 const imgTags = article.querySelectorAll('img');
83 const imgUrls = Array.from(imgTags)
84 .map(img => img.src)
85 .filter(src => !src.includes('profile_images') && !src.includes('emoji'));
86 if (imgUrls.length > 0) {
87 result.image = imgUrls[0];
88 result.rawImages = imgUrls;
89 }
90 }
91
92
93 const getMeta = (name) =>
94 document.querySelector(`meta[property="${name}"]`)?.content ||
95 document.querySelector(`meta[name="${name}"]`)?.content || '';
96
97 result.ogTitle = getMeta('og:title');
98 result.ogDescription = getMeta('og:description');
99 result.ogImage = getMeta('og:image');
100 result.description = getMeta('description');
101
102 return result;
103 });
104
105 await Dataset.pushData(data);
106 },
107});
108
109await requestQueue.addRequest({ url: startUrls[0].url });
110await crawler.run();
111await Actor.exit();