
Reddit Scraper Lite
This Actor is paid per event

Reddit Scraper Lite
This Actor is paid per event
Pay Per Result, unlimited Reddit web scraper to crawl posts, comments, communities, and users without login. Limit web scraping by number of posts or items and extract all data in a dataset in multiple formats.
You can access the Reddit Scraper Lite programmatically from your own applications by using the Apify API. You can choose the language preference from below. To use the Apify API, youโll need an Apify account and your API token, found in Integrations settings in Apify Console.
1{
2 "openapi": "3.0.1",
3 "info": {
4 "version": "4.16",
5 "x-build-id": "wyEDaa2BurAl1Co5j"
6 },
7 "servers": [
8 {
9 "url": "https://api.apify.com/v2"
10 }
11 ],
12 "paths": {
13 "/acts/trudax~reddit-scraper-lite/run-sync-get-dataset-items": {
14 "post": {
15 "operationId": "run-sync-get-dataset-items-trudax-reddit-scraper-lite",
16 "x-openai-isConsequential": false,
17 "summary": "Executes an Actor, waits for its completion, and returns Actor's dataset items in response.",
18 "tags": [
19 "Run Actor"
20 ],
21 "requestBody": {
22 "required": true,
23 "content": {
24 "application/json": {
25 "schema": {
26 "$ref": "#/components/schemas/inputSchema"
27 }
28 }
29 }
30 },
31 "parameters": [
32 {
33 "name": "token",
34 "in": "query",
35 "required": true,
36 "schema": {
37 "type": "string"
38 },
39 "description": "Enter your Apify token here"
40 }
41 ],
42 "responses": {
43 "200": {
44 "description": "OK"
45 }
46 }
47 }
48 },
49 "/acts/trudax~reddit-scraper-lite/runs": {
50 "post": {
51 "operationId": "runs-sync-trudax-reddit-scraper-lite",
52 "x-openai-isConsequential": false,
53 "summary": "Executes an Actor and returns information about the initiated run in response.",
54 "tags": [
55 "Run Actor"
56 ],
57 "requestBody": {
58 "required": true,
59 "content": {
60 "application/json": {
61 "schema": {
62 "$ref": "#/components/schemas/inputSchema"
63 }
64 }
65 }
66 },
67 "parameters": [
68 {
69 "name": "token",
70 "in": "query",
71 "required": true,
72 "schema": {
73 "type": "string"
74 },
75 "description": "Enter your Apify token here"
76 }
77 ],
78 "responses": {
79 "200": {
80 "description": "OK",
81 "content": {
82 "application/json": {
83 "schema": {
84 "$ref": "#/components/schemas/runsResponseSchema"
85 }
86 }
87 }
88 }
89 }
90 }
91 },
92 "/acts/trudax~reddit-scraper-lite/run-sync": {
93 "post": {
94 "operationId": "run-sync-trudax-reddit-scraper-lite",
95 "x-openai-isConsequential": false,
96 "summary": "Executes an Actor, waits for completion, and returns the OUTPUT from Key-value store in response.",
97 "tags": [
98 "Run Actor"
99 ],
100 "requestBody": {
101 "required": true,
102 "content": {
103 "application/json": {
104 "schema": {
105 "$ref": "#/components/schemas/inputSchema"
106 }
107 }
108 }
109 },
110 "parameters": [
111 {
112 "name": "token",
113 "in": "query",
114 "required": true,
115 "schema": {
116 "type": "string"
117 },
118 "description": "Enter your Apify token here"
119 }
120 ],
121 "responses": {
122 "200": {
123 "description": "OK"
124 }
125 }
126 }
127 }
128 },
129 "components": {
130 "schemas": {
131 "inputSchema": {
132 "type": "object",
133 "required": [
134 "proxy"
135 ],
136 "properties": {
137 "startUrls": {
138 "title": "Start URLs",
139 "type": "array",
140 "description": "If you already have URL(s) of page(s) you wish to scrape, you can set them here. If you want to use the search field below, remove all startUrls here.",
141 "items": {
142 "type": "object",
143 "required": [
144 "url"
145 ],
146 "properties": {
147 "url": {
148 "type": "string",
149 "title": "URL of a web page",
150 "format": "uri"
151 }
152 }
153 }
154 },
155 "skipComments": {
156 "title": "Skip comments",
157 "type": "boolean",
158 "description": "This will skip scrapping comments when going through posts",
159 "default": false
160 },
161 "skipUserPosts": {
162 "title": "Skip user posts",
163 "type": "boolean",
164 "description": "This will skip scrapping user posts when going through user activity",
165 "default": false
166 },
167 "skipCommunity": {
168 "title": "Skip community",
169 "type": "boolean",
170 "description": "This will skip scrapping community info but will still get community posts if they were not skipped.",
171 "default": false
172 },
173 "searches": {
174 "title": "Search Term",
175 "type": "array",
176 "description": "Here you can provide a search query which will be used to search Reddit`s topics.",
177 "items": {
178 "type": "string"
179 }
180 },
181 "searchPosts": {
182 "title": "Search for posts",
183 "type": "boolean",
184 "description": "Will search for posts with the provided search",
185 "default": true
186 },
187 "searchComments": {
188 "title": "Search for comments",
189 "type": "boolean",
190 "description": "Will search for comments with the provided search",
191 "default": false
192 },
193 "searchCommunities": {
194 "title": "Search for communities",
195 "type": "boolean",
196 "description": "Will search for communities with the provided search",
197 "default": false
198 },
199 "searchUsers": {
200 "title": "Search for users",
201 "type": "boolean",
202 "description": "Will search for users with the provided search",
203 "default": false
204 },
205 "sort": {
206 "title": "Sort search",
207 "enum": [
208 "",
209 "relevance",
210 "hot",
211 "top",
212 "new",
213 "rising",
214 "comments"
215 ],
216 "type": "string",
217 "description": "Sort search by Relevance, Hot, Top, New or Comments",
218 "default": "new"
219 },
220 "time": {
221 "title": "Filter by date (Posts only)",
222 "enum": [
223 "all",
224 "hour",
225 "day",
226 "week",
227 "month",
228 "year"
229 ],
230 "type": "string",
231 "description": "Filter posts by last hour, week, day, month or year"
232 },
233 "includeNSFW": {
234 "title": "Include NSFW content",
235 "type": "boolean",
236 "description": "You can choose to include or exclude NSFW content from your search",
237 "default": true
238 },
239 "maxItems": {
240 "title": "Maximum number of items to be saved",
241 "type": "integer",
242 "description": "The maximum number of items that will be saved in the dataset. If you are scrapping for Communities&Users, remember to consider that each category inside a community is saved as a separated item.",
243 "default": 10
244 },
245 "maxPostCount": {
246 "title": "Limit of posts scraped inside a single page",
247 "type": "integer",
248 "description": "The maximum number of posts that will be scraped for each Posts Page or Communities&Users URL",
249 "default": 10
250 },
251 "postDateLimit": {
252 "title": "Post date limit",
253 "type": "string",
254 "description": "Use this value when you want to get only posts after a specific date"
255 },
256 "maxComments": {
257 "title": "Limit of comments scraped inside a single page",
258 "type": "integer",
259 "description": "The maximum number of comments that will be scraped for each Comments Page. If you don't want to scrape comments you can set this to zero.",
260 "default": 10
261 },
262 "maxCommunitiesCount": {
263 "title": "Limit of `Communities`'s pages scraped",
264 "type": "integer",
265 "description": "The maximum number of `Communities`'s pages that will be scraped if your search or startUrl is a Communities type.",
266 "default": 2
267 },
268 "maxUserCount": {
269 "title": "Limit of `Users`'s pages scraped",
270 "type": "integer",
271 "description": "The maximum number of `Users`'s pages that will be scraped.",
272 "default": 2
273 },
274 "scrollTimeout": {
275 "title": "Page scroll timeout (seconds)",
276 "type": "integer",
277 "description": "Set the timeout in seconds in which the page will stop scrolling down to load new items",
278 "default": 40
279 },
280 "proxy": {
281 "title": "Proxy configuration",
282 "type": "object",
283 "description": "Either use Apify proxy, or provide your own proxy servers.",
284 "default": {
285 "useApifyProxy": true,
286 "apifyProxyGroups": [
287 "RESIDENTIAL"
288 ]
289 }
290 },
291 "debugMode": {
292 "title": "Debug Mode",
293 "type": "boolean",
294 "description": "Activate to see detailed logs",
295 "default": false
296 }
297 }
298 },
299 "runsResponseSchema": {
300 "type": "object",
301 "properties": {
302 "data": {
303 "type": "object",
304 "properties": {
305 "id": {
306 "type": "string"
307 },
308 "actId": {
309 "type": "string"
310 },
311 "userId": {
312 "type": "string"
313 },
314 "startedAt": {
315 "type": "string",
316 "format": "date-time",
317 "example": "2025-01-08T00:00:00.000Z"
318 },
319 "finishedAt": {
320 "type": "string",
321 "format": "date-time",
322 "example": "2025-01-08T00:00:00.000Z"
323 },
324 "status": {
325 "type": "string",
326 "example": "READY"
327 },
328 "meta": {
329 "type": "object",
330 "properties": {
331 "origin": {
332 "type": "string",
333 "example": "API"
334 },
335 "userAgent": {
336 "type": "string"
337 }
338 }
339 },
340 "stats": {
341 "type": "object",
342 "properties": {
343 "inputBodyLen": {
344 "type": "integer",
345 "example": 2000
346 },
347 "rebootCount": {
348 "type": "integer",
349 "example": 0
350 },
351 "restartCount": {
352 "type": "integer",
353 "example": 0
354 },
355 "resurrectCount": {
356 "type": "integer",
357 "example": 0
358 },
359 "computeUnits": {
360 "type": "integer",
361 "example": 0
362 }
363 }
364 },
365 "options": {
366 "type": "object",
367 "properties": {
368 "build": {
369 "type": "string",
370 "example": "latest"
371 },
372 "timeoutSecs": {
373 "type": "integer",
374 "example": 300
375 },
376 "memoryMbytes": {
377 "type": "integer",
378 "example": 1024
379 },
380 "diskMbytes": {
381 "type": "integer",
382 "example": 2048
383 }
384 }
385 },
386 "buildId": {
387 "type": "string"
388 },
389 "defaultKeyValueStoreId": {
390 "type": "string"
391 },
392 "defaultDatasetId": {
393 "type": "string"
394 },
395 "defaultRequestQueueId": {
396 "type": "string"
397 },
398 "buildNumber": {
399 "type": "string",
400 "example": "1.0.0"
401 },
402 "containerUrl": {
403 "type": "string"
404 },
405 "usage": {
406 "type": "object",
407 "properties": {
408 "ACTOR_COMPUTE_UNITS": {
409 "type": "integer",
410 "example": 0
411 },
412 "DATASET_READS": {
413 "type": "integer",
414 "example": 0
415 },
416 "DATASET_WRITES": {
417 "type": "integer",
418 "example": 0
419 },
420 "KEY_VALUE_STORE_READS": {
421 "type": "integer",
422 "example": 0
423 },
424 "KEY_VALUE_STORE_WRITES": {
425 "type": "integer",
426 "example": 1
427 },
428 "KEY_VALUE_STORE_LISTS": {
429 "type": "integer",
430 "example": 0
431 },
432 "REQUEST_QUEUE_READS": {
433 "type": "integer",
434 "example": 0
435 },
436 "REQUEST_QUEUE_WRITES": {
437 "type": "integer",
438 "example": 0
439 },
440 "DATA_TRANSFER_INTERNAL_GBYTES": {
441 "type": "integer",
442 "example": 0
443 },
444 "DATA_TRANSFER_EXTERNAL_GBYTES": {
445 "type": "integer",
446 "example": 0
447 },
448 "PROXY_RESIDENTIAL_TRANSFER_GBYTES": {
449 "type": "integer",
450 "example": 0
451 },
452 "PROXY_SERPS": {
453 "type": "integer",
454 "example": 0
455 }
456 }
457 },
458 "usageTotalUsd": {
459 "type": "number",
460 "example": 0.00005
461 },
462 "usageUsd": {
463 "type": "object",
464 "properties": {
465 "ACTOR_COMPUTE_UNITS": {
466 "type": "integer",
467 "example": 0
468 },
469 "DATASET_READS": {
470 "type": "integer",
471 "example": 0
472 },
473 "DATASET_WRITES": {
474 "type": "integer",
475 "example": 0
476 },
477 "KEY_VALUE_STORE_READS": {
478 "type": "integer",
479 "example": 0
480 },
481 "KEY_VALUE_STORE_WRITES": {
482 "type": "number",
483 "example": 0.00005
484 },
485 "KEY_VALUE_STORE_LISTS": {
486 "type": "integer",
487 "example": 0
488 },
489 "REQUEST_QUEUE_READS": {
490 "type": "integer",
491 "example": 0
492 },
493 "REQUEST_QUEUE_WRITES": {
494 "type": "integer",
495 "example": 0
496 },
497 "DATA_TRANSFER_INTERNAL_GBYTES": {
498 "type": "integer",
499 "example": 0
500 },
501 "DATA_TRANSFER_EXTERNAL_GBYTES": {
502 "type": "integer",
503 "example": 0
504 },
505 "PROXY_RESIDENTIAL_TRANSFER_GBYTES": {
506 "type": "integer",
507 "example": 0
508 },
509 "PROXY_SERPS": {
510 "type": "integer",
511 "example": 0
512 }
513 }
514 }
515 }
516 }
517 }
518 }
519 }
520 }
521}
Reddit Scraper OpenAPI definition
OpenAPI is a standard for designing and describing RESTful APIs, allowing developers to define API structure, endpoints, and data formats in a machine-readable way. It simplifies API development, integration, and documentation.
OpenAPI is effective when used with AI agents and GPTs by standardizing how these systems interact with various APIs, for reliable integrations and efficient communication.
By defining machine-readable API specifications, OpenAPI allows AI models like GPTs to understand and use varied data sources, improving accuracy. This accelerates development, reduces errors, and provides context-aware responses, making OpenAPI a core component for AI applications.
You can download the OpenAPI definitions for Reddit Scraper Lite from the options below:
If youโd like to learn more about how OpenAPI powers GPTs, read our blog post.
You can also check out our other API clients:
Actor Metrics
437 monthly users
-
92 bookmarks
92% runs succeeded
22 hours response time
Created in Jun 2020
Modified 3 days ago