![GPT Scraper avatar](https://images.apifyusercontent.com/jR-gdWlKbVmv6dv2De-ANF6L5msCaRlaulo-Y9uSxto/rs:fill:250:250/cb:1/aHR0cHM6Ly9hcGlmeS1pbWFnZS11cGxvYWRzLXByb2QuczMuYW1hem9uYXdzLmNvbS9wYU90Ymp2eVVpTnNyMVFtcy84aHRpSFM4a25MVExONzd0Mi1HUFRfU2NyYXBlci5wbmc.webp)
GPT Scraper
Pay $9.00 for 1,000 pages
![GPT Scraper](https://images.apifyusercontent.com/jR-gdWlKbVmv6dv2De-ANF6L5msCaRlaulo-Y9uSxto/rs:fill:250:250/cb:1/aHR0cHM6Ly9hcGlmeS1pbWFnZS11cGxvYWRzLXByb2QuczMuYW1hem9uYXdzLmNvbS9wYU90Ymp2eVVpTnNyMVFtcy84aHRpSFM4a25MVExONzd0Mi1HUFRfU2NyYXBlci5wbmc.webp)
GPT Scraper
Pay $9.00 for 1,000 pages
Extract data from any website and feed it into GPT via the OpenAI API. Use ChatGPT to proofread content, analyze sentiment, summarize reviews, extract contact details, and much more.
You can access the GPT Scraper programmatically from your own applications by using the Apify API. You can choose the language preference from below. To use the Apify API, you’ll need an Apify account and your API token, found in Integrations settings in Apify Console.
1{
2 "openapi": "3.0.1",
3 "info": {
4 "version": "0.0",
5 "x-build-id": "DultqBbOXMMEYnkJc"
6 },
7 "servers": [
8 {
9 "url": "https://api.apify.com/v2"
10 }
11 ],
12 "paths": {
13 "/acts/drobnikj~gpt-scraper/run-sync-get-dataset-items": {
14 "post": {
15 "operationId": "run-sync-get-dataset-items-drobnikj-gpt-scraper",
16 "x-openai-isConsequential": false,
17 "summary": "Executes an Actor, waits for its completion, and returns Actor's dataset items in response.",
18 "tags": [
19 "Run Actor"
20 ],
21 "requestBody": {
22 "required": true,
23 "content": {
24 "application/json": {
25 "schema": {
26 "$ref": "#/components/schemas/inputSchema"
27 }
28 }
29 }
30 },
31 "parameters": [
32 {
33 "name": "token",
34 "in": "query",
35 "required": true,
36 "schema": {
37 "type": "string"
38 },
39 "description": "Enter your Apify token here"
40 }
41 ],
42 "responses": {
43 "200": {
44 "description": "OK"
45 }
46 }
47 }
48 },
49 "/acts/drobnikj~gpt-scraper/runs": {
50 "post": {
51 "operationId": "runs-sync-drobnikj-gpt-scraper",
52 "x-openai-isConsequential": false,
53 "summary": "Executes an Actor and returns information about the initiated run in response.",
54 "tags": [
55 "Run Actor"
56 ],
57 "requestBody": {
58 "required": true,
59 "content": {
60 "application/json": {
61 "schema": {
62 "$ref": "#/components/schemas/inputSchema"
63 }
64 }
65 }
66 },
67 "parameters": [
68 {
69 "name": "token",
70 "in": "query",
71 "required": true,
72 "schema": {
73 "type": "string"
74 },
75 "description": "Enter your Apify token here"
76 }
77 ],
78 "responses": {
79 "200": {
80 "description": "OK",
81 "content": {
82 "application/json": {
83 "schema": {
84 "$ref": "#/components/schemas/runsResponseSchema"
85 }
86 }
87 }
88 }
89 }
90 }
91 },
92 "/acts/drobnikj~gpt-scraper/run-sync": {
93 "post": {
94 "operationId": "run-sync-drobnikj-gpt-scraper",
95 "x-openai-isConsequential": false,
96 "summary": "Executes an Actor, waits for completion, and returns the OUTPUT from Key-value store in response.",
97 "tags": [
98 "Run Actor"
99 ],
100 "requestBody": {
101 "required": true,
102 "content": {
103 "application/json": {
104 "schema": {
105 "$ref": "#/components/schemas/inputSchema"
106 }
107 }
108 }
109 },
110 "parameters": [
111 {
112 "name": "token",
113 "in": "query",
114 "required": true,
115 "schema": {
116 "type": "string"
117 },
118 "description": "Enter your Apify token here"
119 }
120 ],
121 "responses": {
122 "200": {
123 "description": "OK"
124 }
125 }
126 }
127 }
128 },
129 "components": {
130 "schemas": {
131 "inputSchema": {
132 "type": "object",
133 "required": [
134 "startUrls",
135 "instructions"
136 ],
137 "properties": {
138 "startUrls": {
139 "title": "Start URLs",
140 "type": "array",
141 "description": "A static list of URLs to scrape. <br><br>For details, see <a href='https://apify.com/drobnikj/extended-gpt-scraper#start-urls' target='_blank' rel='noopener'>Start URLs</a> in README.",
142 "items": {
143 "type": "object",
144 "required": [
145 "url"
146 ],
147 "properties": {
148 "url": {
149 "type": "string",
150 "title": "URL of a web page",
151 "format": "uri"
152 }
153 }
154 }
155 },
156 "instructions": {
157 "title": "Instructions for GPT",
158 "type": "string",
159 "description": "Instruct GPT how to generate text. For example: \"Summarize this page in three sentences.\"<br><br>You can instruct OpenAI to answer with \"skip this page\", which will skip the page. For example: \"Summarize this page in three sentences. If the page is about Apify Proxy, answer with 'skip this page'.\"."
160 },
161 "includeUrlGlobs": {
162 "title": "Include URLs (globs)",
163 "type": "array",
164 "description": "Glob patterns matching URLs of pages that will be included in crawling. Combine them with the link selector to tell the scraper where to find links. You need to use both globs and link selector to crawl further pages.",
165 "default": [],
166 "items": {
167 "type": "object",
168 "required": [
169 "glob"
170 ],
171 "properties": {
172 "glob": {
173 "type": "string",
174 "title": "Glob of a web page"
175 }
176 }
177 }
178 },
179 "excludeUrlGlobs": {
180 "title": "Exclude URLs (globs)",
181 "type": "array",
182 "description": "Glob patterns matching URLs of pages that will be excluded from crawling. Note that this affects only links found on pages, but not Start URLs, which are always crawled.",
183 "default": [],
184 "items": {
185 "type": "object",
186 "required": [
187 "glob"
188 ],
189 "properties": {
190 "glob": {
191 "type": "string",
192 "title": "Glob of a web page"
193 }
194 }
195 }
196 },
197 "maxCrawlingDepth": {
198 "title": "Max crawling depth",
199 "minimum": 0,
200 "type": "integer",
201 "description": "This specifies how many links away from the <b>Start URLs</b> the scraper will descend. This value is a safeguard against infinite crawling depths for misconfigured scrapers.<br><br>If set to <code>0</code>, there is no limit.",
202 "default": 99999999
203 },
204 "maxPagesPerCrawl": {
205 "title": "Max pages per run",
206 "minimum": 0,
207 "type": "integer",
208 "description": "Maximum number of pages that the scraper will open. 0 means unlimited.",
209 "default": 10
210 },
211 "linkSelector": {
212 "title": "Link selector",
213 "type": "string",
214 "description": "This is a CSS selector that says which links on the page (<code><a></code> elements with <code>href</code> attribute) should be followed and added to the request queue. To filter the links added to the queue, use the <b>Pseudo-URLs</b> setting.<br><br>If <b>Link selector</b> is empty, the page links are ignored.<br><br>For details, see <a href='https://apify.com/drobnikj/extended-gpt-scraper#link-selector' target='_blank' rel='noopener'>Link selector</a> in README."
215 },
216 "initialCookies": {
217 "title": "Initial cookies",
218 "type": "array",
219 "description": "Cookies that will be pre-set to all pages the scraper opens. This is useful for pages that require login. The value is expected to be a JSON array of objects with `name`, `value`, 'domain' and 'path' properties. For example: `[{\"name\": \"cookieName\", \"value\": \"cookieValue\"}, \"domain\": \".domain.com\", \"path\": \"/\"}]`.\n\nYou can use the [EditThisCookie](https://chrome.google.com/webstore/detail/editthiscookie/fngmhnnpilhplaeedifhccceomclgfbg) browser extension to copy browser cookies in this format, and paste it here.",
220 "default": []
221 },
222 "proxyConfiguration": {
223 "title": "Proxy configuration",
224 "type": "object",
225 "description": "This specifies the proxy servers that will be used by the scraper in order to hide its origin.<br><br>For details, see <a href='https://apify.com/drobnikj/extended-gpt-scraper#proxy-configuration' target='_blank' rel='noopener'>Proxy configuration</a> in README.",
226 "default": {
227 "useApifyProxy": false
228 }
229 },
230 "temperature": {
231 "title": "Temperature",
232 "type": "string",
233 "description": "Controls randomness: Lowering results in less random completions. As the temperature approaches zero, the model will become deterministic and repetitive. For consistent results, we recommend setting the temperature to 0.",
234 "default": "0"
235 },
236 "topP": {
237 "title": "TopP",
238 "type": "string",
239 "description": "Controls diversity via nucleus sampling: 0.5 means half of all likelihood-weighted options are considered.",
240 "default": "1"
241 },
242 "frequencyPenalty": {
243 "title": "Frequency penalty",
244 "type": "string",
245 "description": "How much to penalize new tokens based on their existing frequency in the text so far. Decreases the model's likelihood to repeat the same line verbatim.",
246 "default": "0"
247 },
248 "presencePenalty": {
249 "title": "Presence penalty",
250 "type": "string",
251 "description": "How much to penalize new tokens based on whether they appear in the text so far. Increases the model's likelihood to talk about new topics.",
252 "default": "0"
253 },
254 "targetSelector": {
255 "title": "Content selector",
256 "type": "string",
257 "description": "A CSS selector of the HTML element on the page that will be used in the instruction. Instead of a whole page, you can use only part of the page. For example: \"div#content\"."
258 },
259 "removeElementsCssSelector": {
260 "title": "Remove HTML elements (CSS selector)",
261 "type": "string",
262 "description": "A CSS selector matching HTML elements that will be removed from the DOM, before sending it to GPT processing. This is useful to skip irrelevant page content and save on GPT input tokens. \n\nBy default, the Actor removes usually unwanted elements like scripts, styles and inline images. You can disable the removal by setting this value to some non-existent CSS selector like `dummy_keep_everything`.",
263 "default": "script, style, noscript, path, svg, xlink"
264 },
265 "pageFormatInRequest": {
266 "title": "Page format in request",
267 "enum": [
268 "HTML",
269 "Markdown"
270 ],
271 "type": "string",
272 "description": "In what format to send the content extracted from the page to the GPT. Markdown will take less space allowing for larger requests, while HTML may help include some information like attributes that may otherwise be omitted.",
273 "default": "Markdown"
274 },
275 "dynamicContentWaitSecs": {
276 "title": "Wait for dynamic content (seconds)",
277 "minimum": 0,
278 "maximum": 10,
279 "type": "integer",
280 "description": "The maximum time to wait for dynamic page content to load. The crawler will continue either if this time elapses, or if it detects the network became idle as there are no more requests for additional resources.",
281 "default": 0
282 },
283 "removeLinkUrls": {
284 "title": "Remove link URLs",
285 "type": "boolean",
286 "description": "Removes web link URLs while keeping the text content they display.\n- This helps reduce the total page content by eliminating unnecessary URLs before sending to GPT\n- Useful if you are hitting maximum input tokens limits",
287 "default": false
288 },
289 "useStructureOutput": {
290 "title": "Use JSON schema to format answer",
291 "type": "boolean",
292 "description": "If true, the answer will be transformed into a structured format based on the schema in the `jsonAnswer` attribute."
293 },
294 "schema": {
295 "title": "JSON schema format",
296 "type": "object",
297 "description": "Defines how the output will be stored in structured format using the [JSON Schema](https://json-schema.org/understanding-json-schema/). Keep in mind that it uses [function](https://platform.openai.com/docs/api-reference/chat/create#chat/create-functions), so by setting the description of the fields and the correct title, you can get better results."
298 },
299 "schemaDescription": {
300 "title": "Schema description",
301 "type": "string",
302 "description": "Description of the schema function. Use this to provide more context for the schema.\n\nBy default, the `instructions` field's value is used as the schema description, you can change it here."
303 },
304 "saveSnapshots": {
305 "title": "Save debug snapshots",
306 "type": "boolean",
307 "description": "For each page store its HTML, screenshot and parsed content (markdown/HTML as it was sent to ChatGPT) adding links to these into the output",
308 "default": true
309 }
310 }
311 },
312 "runsResponseSchema": {
313 "type": "object",
314 "properties": {
315 "data": {
316 "type": "object",
317 "properties": {
318 "id": {
319 "type": "string"
320 },
321 "actId": {
322 "type": "string"
323 },
324 "userId": {
325 "type": "string"
326 },
327 "startedAt": {
328 "type": "string",
329 "format": "date-time",
330 "example": "2025-01-08T00:00:00.000Z"
331 },
332 "finishedAt": {
333 "type": "string",
334 "format": "date-time",
335 "example": "2025-01-08T00:00:00.000Z"
336 },
337 "status": {
338 "type": "string",
339 "example": "READY"
340 },
341 "meta": {
342 "type": "object",
343 "properties": {
344 "origin": {
345 "type": "string",
346 "example": "API"
347 },
348 "userAgent": {
349 "type": "string"
350 }
351 }
352 },
353 "stats": {
354 "type": "object",
355 "properties": {
356 "inputBodyLen": {
357 "type": "integer",
358 "example": 2000
359 },
360 "rebootCount": {
361 "type": "integer",
362 "example": 0
363 },
364 "restartCount": {
365 "type": "integer",
366 "example": 0
367 },
368 "resurrectCount": {
369 "type": "integer",
370 "example": 0
371 },
372 "computeUnits": {
373 "type": "integer",
374 "example": 0
375 }
376 }
377 },
378 "options": {
379 "type": "object",
380 "properties": {
381 "build": {
382 "type": "string",
383 "example": "latest"
384 },
385 "timeoutSecs": {
386 "type": "integer",
387 "example": 300
388 },
389 "memoryMbytes": {
390 "type": "integer",
391 "example": 1024
392 },
393 "diskMbytes": {
394 "type": "integer",
395 "example": 2048
396 }
397 }
398 },
399 "buildId": {
400 "type": "string"
401 },
402 "defaultKeyValueStoreId": {
403 "type": "string"
404 },
405 "defaultDatasetId": {
406 "type": "string"
407 },
408 "defaultRequestQueueId": {
409 "type": "string"
410 },
411 "buildNumber": {
412 "type": "string",
413 "example": "1.0.0"
414 },
415 "containerUrl": {
416 "type": "string"
417 },
418 "usage": {
419 "type": "object",
420 "properties": {
421 "ACTOR_COMPUTE_UNITS": {
422 "type": "integer",
423 "example": 0
424 },
425 "DATASET_READS": {
426 "type": "integer",
427 "example": 0
428 },
429 "DATASET_WRITES": {
430 "type": "integer",
431 "example": 0
432 },
433 "KEY_VALUE_STORE_READS": {
434 "type": "integer",
435 "example": 0
436 },
437 "KEY_VALUE_STORE_WRITES": {
438 "type": "integer",
439 "example": 1
440 },
441 "KEY_VALUE_STORE_LISTS": {
442 "type": "integer",
443 "example": 0
444 },
445 "REQUEST_QUEUE_READS": {
446 "type": "integer",
447 "example": 0
448 },
449 "REQUEST_QUEUE_WRITES": {
450 "type": "integer",
451 "example": 0
452 },
453 "DATA_TRANSFER_INTERNAL_GBYTES": {
454 "type": "integer",
455 "example": 0
456 },
457 "DATA_TRANSFER_EXTERNAL_GBYTES": {
458 "type": "integer",
459 "example": 0
460 },
461 "PROXY_RESIDENTIAL_TRANSFER_GBYTES": {
462 "type": "integer",
463 "example": 0
464 },
465 "PROXY_SERPS": {
466 "type": "integer",
467 "example": 0
468 }
469 }
470 },
471 "usageTotalUsd": {
472 "type": "number",
473 "example": 0.00005
474 },
475 "usageUsd": {
476 "type": "object",
477 "properties": {
478 "ACTOR_COMPUTE_UNITS": {
479 "type": "integer",
480 "example": 0
481 },
482 "DATASET_READS": {
483 "type": "integer",
484 "example": 0
485 },
486 "DATASET_WRITES": {
487 "type": "integer",
488 "example": 0
489 },
490 "KEY_VALUE_STORE_READS": {
491 "type": "integer",
492 "example": 0
493 },
494 "KEY_VALUE_STORE_WRITES": {
495 "type": "number",
496 "example": 0.00005
497 },
498 "KEY_VALUE_STORE_LISTS": {
499 "type": "integer",
500 "example": 0
501 },
502 "REQUEST_QUEUE_READS": {
503 "type": "integer",
504 "example": 0
505 },
506 "REQUEST_QUEUE_WRITES": {
507 "type": "integer",
508 "example": 0
509 },
510 "DATA_TRANSFER_INTERNAL_GBYTES": {
511 "type": "integer",
512 "example": 0
513 },
514 "DATA_TRANSFER_EXTERNAL_GBYTES": {
515 "type": "integer",
516 "example": 0
517 },
518 "PROXY_RESIDENTIAL_TRANSFER_GBYTES": {
519 "type": "integer",
520 "example": 0
521 },
522 "PROXY_SERPS": {
523 "type": "integer",
524 "example": 0
525 }
526 }
527 }
528 }
529 }
530 }
531 }
532 }
533 }
534}
GPT Scraper OpenAPI definition
OpenAPI is a standard for designing and describing RESTful APIs, allowing developers to define API structure, endpoints, and data formats in a machine-readable way. It simplifies API development, integration, and documentation.
OpenAPI is effective when used with AI agents and GPTs by standardizing how these systems interact with various APIs, for reliable integrations and efficient communication.
By defining machine-readable API specifications, OpenAPI allows AI models like GPTs to understand and use varied data sources, improving accuracy. This accelerates development, reduces errors, and provides context-aware responses, making OpenAPI a core component for AI applications.
You can download the OpenAPI definitions for GPT Scraper from the options below:
If you’d like to learn more about how OpenAPI powers GPTs, read our blog post.
You can also check out our other API clients:
Actor Metrics
144 monthly users
-
76 stars
>99% runs succeeded
1.6 days response time
Created in Mar 2023
Modified a month ago