Playwright Scraper avatar

Playwright Scraper

Try for free

No credit card required

Go to Store
Playwright Scraper

Playwright Scraper

apify/playwright-scraper
Try for free

No credit card required

Crawls websites with the headless Chromium, Chrome, or Firefox browser and Playwright library using a provided server-side Node.js code. Supports both recursive crawling and a list of URLs. Supports login to a website.

You can access the Playwright Scraper programmatically from your own applications by using the Apify API. You can choose the language preference from below. To use the Apify API, you’ll need an Apify account and your API token, found in Integrations settings in Apify Console.

1{
2  "openapi": "3.0.1",
3  "info": {
4    "version": "1.0",
5    "x-build-id": "0AhyTliuyqvDZdbJz"
6  },
7  "servers": [
8    {
9      "url": "https://api.apify.com/v2"
10    }
11  ],
12  "paths": {
13    "/acts/apify~playwright-scraper/run-sync-get-dataset-items": {
14      "post": {
15        "operationId": "run-sync-get-dataset-items-apify-playwright-scraper",
16        "x-openai-isConsequential": false,
17        "summary": "Executes an Actor, waits for its completion, and returns Actor's dataset items in response.",
18        "tags": [
19          "Run Actor"
20        ],
21        "requestBody": {
22          "required": true,
23          "content": {
24            "application/json": {
25              "schema": {
26                "$ref": "#/components/schemas/inputSchema"
27              }
28            }
29          }
30        },
31        "parameters": [
32          {
33            "name": "token",
34            "in": "query",
35            "required": true,
36            "schema": {
37              "type": "string"
38            },
39            "description": "Enter your Apify token here"
40          }
41        ],
42        "responses": {
43          "200": {
44            "description": "OK"
45          }
46        }
47      }
48    },
49    "/acts/apify~playwright-scraper/runs": {
50      "post": {
51        "operationId": "runs-sync-apify-playwright-scraper",
52        "x-openai-isConsequential": false,
53        "summary": "Executes an Actor and returns information about the initiated run in response.",
54        "tags": [
55          "Run Actor"
56        ],
57        "requestBody": {
58          "required": true,
59          "content": {
60            "application/json": {
61              "schema": {
62                "$ref": "#/components/schemas/inputSchema"
63              }
64            }
65          }
66        },
67        "parameters": [
68          {
69            "name": "token",
70            "in": "query",
71            "required": true,
72            "schema": {
73              "type": "string"
74            },
75            "description": "Enter your Apify token here"
76          }
77        ],
78        "responses": {
79          "200": {
80            "description": "OK",
81            "content": {
82              "application/json": {
83                "schema": {
84                  "$ref": "#/components/schemas/runsResponseSchema"
85                }
86              }
87            }
88          }
89        }
90      }
91    },
92    "/acts/apify~playwright-scraper/run-sync": {
93      "post": {
94        "operationId": "run-sync-apify-playwright-scraper",
95        "x-openai-isConsequential": false,
96        "summary": "Executes an Actor, waits for completion, and returns the OUTPUT from Key-value store in response.",
97        "tags": [
98          "Run Actor"
99        ],
100        "requestBody": {
101          "required": true,
102          "content": {
103            "application/json": {
104              "schema": {
105                "$ref": "#/components/schemas/inputSchema"
106              }
107            }
108          }
109        },
110        "parameters": [
111          {
112            "name": "token",
113            "in": "query",
114            "required": true,
115            "schema": {
116              "type": "string"
117            },
118            "description": "Enter your Apify token here"
119          }
120        ],
121        "responses": {
122          "200": {
123            "description": "OK"
124          }
125        }
126      }
127    }
128  },
129  "components": {
130    "schemas": {
131      "inputSchema": {
132        "type": "object",
133        "required": [
134          "startUrls",
135          "pageFunction",
136          "proxyConfiguration"
137        ],
138        "properties": {
139          "startUrls": {
140            "title": "Start URLs",
141            "type": "array",
142            "description": "URLs to start with",
143            "items": {
144              "type": "object",
145              "required": [
146                "url"
147              ],
148              "properties": {
149                "url": {
150                  "type": "string",
151                  "title": "URL of a web page",
152                  "format": "uri"
153                }
154              }
155            }
156          },
157          "globs": {
158            "title": "Glob Patterns",
159            "type": "array",
160            "description": "Glob patterns to match links in the page that you want to enqueue. Combine with Link selector to tell the scraper where to find links. Omitting the Glob patterns will cause the scraper to enqueue all links matched by the Link selector.",
161            "default": [],
162            "items": {
163              "type": "object",
164              "required": [
165                "glob"
166              ],
167              "properties": {
168                "glob": {
169                  "type": "string",
170                  "title": "Glob of a web page"
171                }
172              }
173            }
174          },
175          "pseudoUrls": {
176            "title": "Pseudo-URLs",
177            "type": "array",
178            "description": "Pseudo-URLs to match links in the page that you want to enqueue. Combine with Link selector to tell the scraper where to find links. Omitting the Pseudo-URLs will cause the scraper to enqueue all links matched by the Link selector.",
179            "default": [],
180            "items": {
181              "type": "object",
182              "required": [
183                "purl"
184              ],
185              "properties": {
186                "purl": {
187                  "type": "string",
188                  "title": "Pseudo-URL of a web page"
189                }
190              }
191            }
192          },
193          "excludes": {
194            "title": "Exclude Glob Patterns",
195            "type": "array",
196            "description": "Glob patterns to match links in the page that you want to exclude from being enqueued.",
197            "default": [],
198            "items": {
199              "type": "object",
200              "required": [
201                "glob"
202              ],
203              "properties": {
204                "glob": {
205                  "type": "string",
206                  "title": "Glob of a web page"
207                }
208              }
209            }
210          },
211          "linkSelector": {
212            "title": "Link selector",
213            "type": "string",
214            "description": "CSS selector matching elements with 'href' attributes that should be enqueued. To enqueue urls from <code><div class=\"my-class\" href=...></code> tags, you would enter <strong>div.my-class</strong>. Leave empty to ignore all links."
215          },
216          "keepUrlFragments": {
217            "title": "Keep URL fragments",
218            "type": "boolean",
219            "description": "URL fragments (the parts of URL after a <code>#</code>) are not considered when the scraper determines whether a URL has already been visited. This means that when adding URLs such as <code>https://example.com/#foo</code> and <code>https://example.com/#bar</code>, only the first will be visited. Turn this option on to tell the scraper to visit both.",
220            "default": false
221          },
222          "pageFunction": {
223            "title": "Page function",
224            "type": "string",
225            "description": "Function executed for each request"
226          },
227          "proxyConfiguration": {
228            "title": "Proxy configuration",
229            "type": "object",
230            "description": "Specifies proxy servers that will be used by the scraper in order to hide its origin.<br><br>For details, see <a href='https://apify.com/apify/playwright-scraper#proxy-configuration' target='_blank' rel='noopener'>Proxy configuration</a> in README.",
231            "default": {
232              "useApifyProxy": true
233            }
234          },
235          "proxyRotation": {
236            "title": "Proxy rotation",
237            "enum": [
238              "RECOMMENDED",
239              "PER_REQUEST",
240              "UNTIL_FAILURE"
241            ],
242            "type": "string",
243            "description": "This property indicates the strategy of proxy rotation and can only be used in conjunction with Apify Proxy. The recommended setting automatically picks the best proxies from your available pool and rotates them evenly, discarding proxies that become blocked or unresponsive. If this strategy does not work for you for any reason, you may configure the scraper to either use a new proxy for each request, or to use one proxy as long as possible, until the proxy fails. IMPORTANT: This setting will only use your available Apify Proxy pool, so if you don't have enough proxies for a given task, no rotation setting will produce satisfactory results.",
244            "default": "RECOMMENDED"
245          },
246          "sessionPoolName": {
247            "title": "Session pool name",
248            "pattern": "[0-9A-z-]",
249            "minLength": 3,
250            "maxLength": 200,
251            "type": "string",
252            "description": "<b>Use only english alphanumeric characters dashes and underscores.</b> A session is a representation of a user. It has it's own IP and cookies which are then used together to emulate a real user. Usage of the sessions is controlled by the Proxy rotation option. By providing a session pool name, you enable sharing of those sessions across multiple actor runs. This is very useful when you need specific cookies for accessing the websites or when a lot of your proxies are already blocked. Instead of trying randomly, a list of working sessions will be saved and a new actor run can reuse those sessions. Note that the IP lock on sessions expires after 24 hours, unless the session is used again in that window."
253          },
254          "initialCookies": {
255            "title": "Initial cookies",
256            "type": "array",
257            "description": "The provided cookies will be pre-set to all pages the scraper opens.",
258            "default": []
259          },
260          "launcher": {
261            "title": "Browser Type",
262            "enum": [
263              "chromium",
264              "firefox"
265            ],
266            "type": "string",
267            "description": "Choose the browser to launch.",
268            "default": "chromium"
269          },
270          "useChrome": {
271            "title": "Use Chrome (could only be used if Chromium Browser is selected)",
272            "type": "boolean",
273            "description": "The scraper will use a real Chrome browser instead of a Chromium masking as Chrome. Using this option may help with bypassing certain anti-scraping protections, but risks that the scraper will be unstable or not work at all.",
274            "default": false
275          },
276          "headless": {
277            "title": "Run browsers in headless mode",
278            "type": "boolean",
279            "description": "By default, browsers run in headless mode. You can toggle this off to run them in headful mode, which can help with certain rare anti-scraping protections but is slower and more costly.",
280            "default": true
281          },
282          "ignoreSslErrors": {
283            "title": "Ignore SSL errors",
284            "type": "boolean",
285            "description": "Scraper will ignore SSL certificate errors.",
286            "default": false
287          },
288          "ignoreCorsAndCsp": {
289            "title": "Ignore CORS and CSP",
290            "type": "boolean",
291            "description": "Scraper will ignore CSP (content security policy) and CORS (cross origin resource sharing) settings of visited pages and requested domains. This enables you to freely use XHR/Fetch to make HTTP requests from the scraper.",
292            "default": false
293          },
294          "downloadMedia": {
295            "title": "Download media",
296            "type": "boolean",
297            "description": "Scraper will download media such as images, fonts, videos and sounds. Disabling this may speed up the scrape, but certain websites could stop working correctly.",
298            "default": true
299          },
300          "downloadCss": {
301            "title": "Download CSS",
302            "type": "boolean",
303            "description": "Scraper will download CSS stylesheets. Disabling this may speed up the scrape, but certain websites could stop working correctly.",
304            "default": true
305          },
306          "maxRequestRetries": {
307            "title": "Max request retries",
308            "minimum": 0,
309            "type": "integer",
310            "description": "Maximum number of times the request for the page will be retried in case of an error. Setting it to 0 means that the request will be attempted once and will not be retried if it fails.",
311            "default": 3
312          },
313          "maxPagesPerCrawl": {
314            "title": "Max pages per run",
315            "minimum": 0,
316            "type": "integer",
317            "description": "Maximum number of pages that the scraper will open. 0 means unlimited.",
318            "default": 0
319          },
320          "maxResultsPerCrawl": {
321            "title": "Max result records",
322            "minimum": 0,
323            "type": "integer",
324            "description": "Maximum number of results that will be saved to dataset. The scraper will terminate afterwards. 0 means unlimited.",
325            "default": 0
326          },
327          "maxCrawlingDepth": {
328            "title": "Max crawling depth",
329            "minimum": 0,
330            "type": "integer",
331            "description": "Defines how many links away from the StartURLs will the scraper descend. 0 means unlimited.",
332            "default": 0
333          },
334          "maxConcurrency": {
335            "title": "Max concurrency",
336            "minimum": 1,
337            "type": "integer",
338            "description": "Defines how many pages can be processed by the scraper in parallel. The scraper automatically increases and decreases concurrency based on available system resources. Use this option to set a hard limit.",
339            "default": 50
340          },
341          "pageLoadTimeoutSecs": {
342            "title": "Page load timeout",
343            "minimum": 1,
344            "type": "integer",
345            "description": "Maximum time the scraper will allow a web page to load in seconds.",
346            "default": 60
347          },
348          "pageFunctionTimeoutSecs": {
349            "title": "Page function timeout",
350            "minimum": 1,
351            "type": "integer",
352            "description": "Maximum time the scraper will wait for the page function to execute in seconds.",
353            "default": 60
354          },
355          "waitUntil": {
356            "title": "Navigation wait until",
357            "enum": [
358              "networkidle",
359              "load",
360              "domcontentloaded"
361            ],
362            "type": "string",
363            "description": "The scraper will wait until the selected events are triggered in the page before executing the page function. Available events are <code>domcontentloaded</code>, <code>load</code> and <code>networkidle</code> <a href=\"https://playwright.dev/docs/api/class-page#page-goto-option-wait-until\" target=\"_blank\">See Playwright docs</a>.",
364            "default": "networkidle"
365          },
366          "preNavigationHooks": {
367            "title": "Pre-navigation hooks",
368            "type": "string",
369            "description": "Async functions that are sequentially evaluated before the navigation. Good for setting additional cookies or browser properties before navigation. The function accepts two parameters, `crawlingContext` and `gotoOptions`, which are passed to the `page.goto()` function the crawler calls to navigate."
370          },
371          "postNavigationHooks": {
372            "title": "Post-navigation hooks",
373            "type": "string",
374            "description": "Async functions that are sequentially evaluated after the navigation. Good for checking if the navigation was successful. The function accepts `crawlingContext` as the only parameter."
375          },
376          "closeCookieModals": {
377            "title": "Dismiss cookie modals",
378            "type": "boolean",
379            "description": "Using the [I don't care about cookies](https://addons.mozilla.org/en-US/firefox/addon/i-dont-care-about-cookies/) browser extension. When on, the crawler will automatically try to dismiss cookie consent modals. This can be useful when crawling European websites that show cookie consent modals.",
380            "default": false
381          },
382          "maxScrollHeightPixels": {
383            "title": "Maximum scrolling distance in pixels",
384            "type": "integer",
385            "description": "The crawler will scroll down the page until all content is loaded or the maximum scrolling distance is reached. Setting this to `0` disables scrolling altogether.",
386            "default": 5000
387          },
388          "debugLog": {
389            "title": "Debug log",
390            "type": "boolean",
391            "description": "Debug messages will be included in the log. Use <code>context.log.debug('message')</code> to log your own debug messages.",
392            "default": false
393          },
394          "browserLog": {
395            "title": "Browser log",
396            "type": "boolean",
397            "description": "Console messages from the Browser will be included in the log. This may result in the log being flooded by error messages, warnings and other messages of little value, especially with high concurrency.",
398            "default": false
399          },
400          "customData": {
401            "title": "Custom data",
402            "type": "object",
403            "description": "This object will be available on pageFunction's context as customData.",
404            "default": {}
405          },
406          "datasetName": {
407            "title": "Dataset name",
408            "type": "string",
409            "description": "Name or ID of the dataset that will be used for storing results. If left empty, the default dataset of the run will be used."
410          },
411          "keyValueStoreName": {
412            "title": "Key-value store name",
413            "type": "string",
414            "description": "Name or ID of the key-value store that will be used for storing records. If left empty, the default key-value store of the run will be used."
415          },
416          "requestQueueName": {
417            "title": "Request queue name",
418            "type": "string",
419            "description": "Name of the request queue that will be used for storing requests. If left empty, the default request queue of the run will be used."
420          }
421        }
422      },
423      "runsResponseSchema": {
424        "type": "object",
425        "properties": {
426          "data": {
427            "type": "object",
428            "properties": {
429              "id": {
430                "type": "string"
431              },
432              "actId": {
433                "type": "string"
434              },
435              "userId": {
436                "type": "string"
437              },
438              "startedAt": {
439                "type": "string",
440                "format": "date-time",
441                "example": "2025-01-08T00:00:00.000Z"
442              },
443              "finishedAt": {
444                "type": "string",
445                "format": "date-time",
446                "example": "2025-01-08T00:00:00.000Z"
447              },
448              "status": {
449                "type": "string",
450                "example": "READY"
451              },
452              "meta": {
453                "type": "object",
454                "properties": {
455                  "origin": {
456                    "type": "string",
457                    "example": "API"
458                  },
459                  "userAgent": {
460                    "type": "string"
461                  }
462                }
463              },
464              "stats": {
465                "type": "object",
466                "properties": {
467                  "inputBodyLen": {
468                    "type": "integer",
469                    "example": 2000
470                  },
471                  "rebootCount": {
472                    "type": "integer",
473                    "example": 0
474                  },
475                  "restartCount": {
476                    "type": "integer",
477                    "example": 0
478                  },
479                  "resurrectCount": {
480                    "type": "integer",
481                    "example": 0
482                  },
483                  "computeUnits": {
484                    "type": "integer",
485                    "example": 0
486                  }
487                }
488              },
489              "options": {
490                "type": "object",
491                "properties": {
492                  "build": {
493                    "type": "string",
494                    "example": "latest"
495                  },
496                  "timeoutSecs": {
497                    "type": "integer",
498                    "example": 300
499                  },
500                  "memoryMbytes": {
501                    "type": "integer",
502                    "example": 1024
503                  },
504                  "diskMbytes": {
505                    "type": "integer",
506                    "example": 2048
507                  }
508                }
509              },
510              "buildId": {
511                "type": "string"
512              },
513              "defaultKeyValueStoreId": {
514                "type": "string"
515              },
516              "defaultDatasetId": {
517                "type": "string"
518              },
519              "defaultRequestQueueId": {
520                "type": "string"
521              },
522              "buildNumber": {
523                "type": "string",
524                "example": "1.0.0"
525              },
526              "containerUrl": {
527                "type": "string"
528              },
529              "usage": {
530                "type": "object",
531                "properties": {
532                  "ACTOR_COMPUTE_UNITS": {
533                    "type": "integer",
534                    "example": 0
535                  },
536                  "DATASET_READS": {
537                    "type": "integer",
538                    "example": 0
539                  },
540                  "DATASET_WRITES": {
541                    "type": "integer",
542                    "example": 0
543                  },
544                  "KEY_VALUE_STORE_READS": {
545                    "type": "integer",
546                    "example": 0
547                  },
548                  "KEY_VALUE_STORE_WRITES": {
549                    "type": "integer",
550                    "example": 1
551                  },
552                  "KEY_VALUE_STORE_LISTS": {
553                    "type": "integer",
554                    "example": 0
555                  },
556                  "REQUEST_QUEUE_READS": {
557                    "type": "integer",
558                    "example": 0
559                  },
560                  "REQUEST_QUEUE_WRITES": {
561                    "type": "integer",
562                    "example": 0
563                  },
564                  "DATA_TRANSFER_INTERNAL_GBYTES": {
565                    "type": "integer",
566                    "example": 0
567                  },
568                  "DATA_TRANSFER_EXTERNAL_GBYTES": {
569                    "type": "integer",
570                    "example": 0
571                  },
572                  "PROXY_RESIDENTIAL_TRANSFER_GBYTES": {
573                    "type": "integer",
574                    "example": 0
575                  },
576                  "PROXY_SERPS": {
577                    "type": "integer",
578                    "example": 0
579                  }
580                }
581              },
582              "usageTotalUsd": {
583                "type": "number",
584                "example": 0.00005
585              },
586              "usageUsd": {
587                "type": "object",
588                "properties": {
589                  "ACTOR_COMPUTE_UNITS": {
590                    "type": "integer",
591                    "example": 0
592                  },
593                  "DATASET_READS": {
594                    "type": "integer",
595                    "example": 0
596                  },
597                  "DATASET_WRITES": {
598                    "type": "integer",
599                    "example": 0
600                  },
601                  "KEY_VALUE_STORE_READS": {
602                    "type": "integer",
603                    "example": 0
604                  },
605                  "KEY_VALUE_STORE_WRITES": {
606                    "type": "number",
607                    "example": 0.00005
608                  },
609                  "KEY_VALUE_STORE_LISTS": {
610                    "type": "integer",
611                    "example": 0
612                  },
613                  "REQUEST_QUEUE_READS": {
614                    "type": "integer",
615                    "example": 0
616                  },
617                  "REQUEST_QUEUE_WRITES": {
618                    "type": "integer",
619                    "example": 0
620                  },
621                  "DATA_TRANSFER_INTERNAL_GBYTES": {
622                    "type": "integer",
623                    "example": 0
624                  },
625                  "DATA_TRANSFER_EXTERNAL_GBYTES": {
626                    "type": "integer",
627                    "example": 0
628                  },
629                  "PROXY_RESIDENTIAL_TRANSFER_GBYTES": {
630                    "type": "integer",
631                    "example": 0
632                  },
633                  "PROXY_SERPS": {
634                    "type": "integer",
635                    "example": 0
636                  }
637                }
638              }
639            }
640          }
641        }
642      }
643    }
644  }
645}

Playwright Scraper OpenAPI definition

OpenAPI is a standard for designing and describing RESTful APIs, allowing developers to define API structure, endpoints, and data formats in a machine-readable way. It simplifies API development, integration, and documentation.

OpenAPI is effective when used with AI agents and GPTs by standardizing how these systems interact with various APIs, for reliable integrations and efficient communication.

By defining machine-readable API specifications, OpenAPI allows AI models like GPTs to understand and use varied data sources, improving accuracy. This accelerates development, reduces errors, and provides context-aware responses, making OpenAPI a core component for AI applications.

You can download the OpenAPI definitions for Playwright Scraper from the options below:

If you’d like to learn more about how OpenAPI powers GPTs, read our blog post.

You can also check out our other API clients:

Developer
Maintained by Apify

Actor Metrics

  • 95 monthly users

  • 25 stars

  • 98% runs succeeded

  • 35 days response time

  • Created in Aug 2022

  • Modified 8 months ago