Reddit Scraper
Pricing
from $0.60 / 1,000 posts
Reddit Scraper
Extract posts, comments, user profiles, and search results from Reddit. Pure HTTP, no API key required.
Reddit Scraper
Pricing
from $0.60 / 1,000 posts
Extract posts, comments, user profiles, and search results from Reddit. Pure HTTP, no API key required.
You can access the Reddit Scraper programmatically from your own applications by using the Apify API. You can also choose the language preference from below. To use the Apify API, you’ll need an Apify account and your API token, found in Integrations settings in Apify Console.
{ "openapi": "3.0.1", "info": { "version": "0.2", "x-build-id": "h6NMQn4zR7PGPe4ew" }, "servers": [ { "url": "https://api.apify.com/v2" } ], "paths": { "/acts/prodiger~reddit-scraper/run-sync-get-dataset-items": { "post": { "operationId": "run-sync-get-dataset-items-prodiger-reddit-scraper", "x-openai-isConsequential": false, "summary": "Executes an Actor, waits for its completion, and returns Actor's dataset items in response.", "tags": [ "Run Actor" ], "requestBody": { "required": true, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/inputSchema" } } } }, "parameters": [ { "name": "token", "in": "query", "required": true, "schema": { "type": "string" }, "description": "Enter your Apify token here" } ], "responses": { "200": { "description": "OK" } } } }, "/acts/prodiger~reddit-scraper/runs": { "post": { "operationId": "runs-sync-prodiger-reddit-scraper", "x-openai-isConsequential": false, "summary": "Executes an Actor and returns information about the initiated run in response.", "tags": [ "Run Actor" ], "requestBody": { "required": true, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/inputSchema" } } } }, "parameters": [ { "name": "token", "in": "query", "required": true, "schema": { "type": "string" }, "description": "Enter your Apify token here" } ], "responses": { "200": { "description": "OK", "content": { "application/json": { "schema": { "$ref": "#/components/schemas/runsResponseSchema" } } } } } } }, "/acts/prodiger~reddit-scraper/run-sync": { "post": { "operationId": "run-sync-prodiger-reddit-scraper", "x-openai-isConsequential": false, "summary": "Executes an Actor, waits for completion, and returns the OUTPUT from Key-value store in response.", "tags": [ "Run Actor" ], "requestBody": { "required": true, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/inputSchema" } } } }, "parameters": [ { "name": "token", "in": "query", "required": true, "schema": { "type": "string" }, "description": "Enter your Apify token here" } ], "responses": { "200": { "description": "OK" } } } } }, "components": { "schemas": { "inputSchema": { "type": "object", "properties": { "urls": { "title": "Reddit URLs", "type": "array", "description": "Reddit URLs to scrape. Accepts subreddit listings (https://www.reddit.com/r/technology/), individual posts (https://www.reddit.com/r/x/comments/abc123/...), user profiles (https://www.reddit.com/user/spez/), and search URLs (https://www.reddit.com/search/?q=...). Shortcut forms 'r/technology' and bare 'technology' are accepted as subreddit names.", "items": { "type": "object", "required": [ "url" ], "properties": { "url": { "type": "string", "title": "URL of a web page", "format": "uri" } } } }, "searchQuery": { "title": "Search query", "type": "string", "description": "Optional. Search Reddit for this query. Combine with 'searchSubreddit' to limit search to a single subreddit. Either 'urls' or 'searchQuery' must be provided." }, "searchSubreddit": { "title": "Search subreddit (optional)", "type": "string", "description": "When 'searchQuery' is set, restrict search to this subreddit (without the r/ prefix)." }, "sort": { "title": "Sort order", "enum": [ "hot", "new", "top", "rising", "relevance" ], "type": "string", "description": "Listing sort order. 'relevance' applies to search only.", "default": "hot" }, "timeFilter": { "title": "Time filter", "enum": [ "hour", "day", "week", "month", "year", "all" ], "type": "string", "description": "Time window. Applies to sort=top and sort=relevance.", "default": "week" }, "maxPostsPerSource": { "title": "Max posts per source", "minimum": 0, "maximum": 100000, "type": "integer", "description": "Maximum number of posts to scrape per source (subreddit, user, search). Set to 0 for unlimited (capped by Reddit's ~1000-item-per-listing ceiling).", "default": 100 }, "includeComments": { "title": "Include comments", "type": "boolean", "description": "Also scrape comments for each post. Increases run time and cost. Required for 'jsonl-finetune' and 'rag-markdown' output formats to have content to bundle.", "default": false }, "maxCommentsPerPost": { "title": "Max comments per post", "minimum": 1, "maximum": 1000, "type": "integer", "description": "Maximum comments to scrape per post. Hard-capped at 1000 to bound proxy/compute cost.", "default": 100 }, "commentDepth": { "title": "Comment depth", "minimum": 1, "maximum": 10, "type": "integer", "description": "Maximum reply nesting depth (1 = top-level comments only).", "default": 3 }, "filterKeywords": { "title": "Filter keywords (optional)", "type": "array", "description": "Only keep posts whose title or body contains at least one of these terms (case-insensitive). Leave empty to keep all posts. Filtered posts are not charged.", "items": { "type": "string" } }, "outputFormat": { "title": "Output format", "enum": [ "default", "jsonl-finetune", "rag-markdown" ], "type": "string", "description": "default = standard JSON. jsonl-finetune = OpenAI chat-format records for LLM SFT. rag-markdown = self-contained markdown documents for vector-DB ingestion. AI formats bundle comments into the post record at no extra charge.", "default": "default" }, "maxRequestRetries": { "title": "Max request retries", "minimum": 1, "maximum": 10, "type": "integer", "description": "Number of retry attempts for failed requests (429, 5xx, network errors).", "default": 5 }, "proxyConfiguration": { "title": "Proxy configuration", "type": "object", "description": "Apify proxy. RESIDENTIAL is the default — Reddit aggressively blocks datacenter IP ranges in 2026, so DATACENTER will likely fail on sustained runs. Override only if you understand the trade-off.", "default": { "useApifyProxy": true, "apifyProxyGroups": [ "RESIDENTIAL" ] } } } }, "runsResponseSchema": { "type": "object", "properties": { "data": { "type": "object", "properties": { "id": { "type": "string" }, "actId": { "type": "string" }, "userId": { "type": "string" }, "startedAt": { "type": "string", "format": "date-time", "example": "2025-01-08T00:00:00.000Z" }, "finishedAt": { "type": "string", "format": "date-time", "example": "2025-01-08T00:00:00.000Z" }, "status": { "type": "string", "example": "READY" }, "meta": { "type": "object", "properties": { "origin": { "type": "string", "example": "API" }, "userAgent": { "type": "string" } } }, "stats": { "type": "object", "properties": { "inputBodyLen": { "type": "integer", "example": 2000 }, "rebootCount": { "type": "integer", "example": 0 }, "restartCount": { "type": "integer", "example": 0 }, "resurrectCount": { "type": "integer", "example": 0 }, "computeUnits": { "type": "integer", "example": 0 } } }, "options": { "type": "object", "properties": { "build": { "type": "string", "example": "latest" }, "timeoutSecs": { "type": "integer", "example": 300 }, "memoryMbytes": { "type": "integer", "example": 1024 }, "diskMbytes": { "type": "integer", "example": 2048 } } }, "buildId": { "type": "string" }, "defaultKeyValueStoreId": { "type": "string" }, "defaultDatasetId": { "type": "string" }, "defaultRequestQueueId": { "type": "string" }, "buildNumber": { "type": "string", "example": "1.0.0" }, "containerUrl": { "type": "string" }, "usage": { "type": "object", "properties": { "ACTOR_COMPUTE_UNITS": { "type": "integer", "example": 0 }, "DATASET_READS": { "type": "integer", "example": 0 }, "DATASET_WRITES": { "type": "integer", "example": 0 }, "KEY_VALUE_STORE_READS": { "type": "integer", "example": 0 }, "KEY_VALUE_STORE_WRITES": { "type": "integer", "example": 1 }, "KEY_VALUE_STORE_LISTS": { "type": "integer", "example": 0 }, "REQUEST_QUEUE_READS": { "type": "integer", "example": 0 }, "REQUEST_QUEUE_WRITES": { "type": "integer", "example": 0 }, "DATA_TRANSFER_INTERNAL_GBYTES": { "type": "integer", "example": 0 }, "DATA_TRANSFER_EXTERNAL_GBYTES": { "type": "integer", "example": 0 }, "PROXY_RESIDENTIAL_TRANSFER_GBYTES": { "type": "integer", "example": 0 }, "PROXY_SERPS": { "type": "integer", "example": 0 } } }, "usageTotalUsd": { "type": "number", "example": 0.00005 }, "usageUsd": { "type": "object", "properties": { "ACTOR_COMPUTE_UNITS": { "type": "integer", "example": 0 }, "DATASET_READS": { "type": "integer", "example": 0 }, "DATASET_WRITES": { "type": "integer", "example": 0 }, "KEY_VALUE_STORE_READS": { "type": "integer", "example": 0 }, "KEY_VALUE_STORE_WRITES": { "type": "number", "example": 0.00005 }, "KEY_VALUE_STORE_LISTS": { "type": "integer", "example": 0 }, "REQUEST_QUEUE_READS": { "type": "integer", "example": 0 }, "REQUEST_QUEUE_WRITES": { "type": "integer", "example": 0 }, "DATA_TRANSFER_INTERNAL_GBYTES": { "type": "integer", "example": 0 }, "DATA_TRANSFER_EXTERNAL_GBYTES": { "type": "integer", "example": 0 }, "PROXY_RESIDENTIAL_TRANSFER_GBYTES": { "type": "integer", "example": 0 }, "PROXY_SERPS": { "type": "integer", "example": 0 } } } } } } } } }}OpenAPI is a standard for designing and describing RESTful APIs, allowing developers to define API structure, endpoints, and data formats in a machine-readable way. It simplifies API development, integration, and documentation.
OpenAPI is effective when used with AI agents and GPTs by standardizing how these systems interact with various APIs, for reliable integrations and efficient communication.
By defining machine-readable API specifications, OpenAPI allows AI models like GPTs to understand and use varied data sources, improving accuracy. This accelerates development, reduces errors, and provides context-aware responses, making OpenAPI a core component for AI applications.
You can download the OpenAPI definitions for Reddit Scraper from the options below:
If you’d like to learn more about how OpenAPI powers GPTs, read our blog post.
You can also check out our other API clients: