YouTube Transcriber
Pricing
from $5.00 / 1,000 caption transcripts
Go to Apify Store
YouTube Transcriber
Transcribe YouTube videos. Captions when available, OpenAI Whisper fallback (BYOK) for the rest. No YouTube account needed.
YouTube Transcriber
Pricing
from $5.00 / 1,000 caption transcripts
Transcribe YouTube videos. Captions when available, OpenAI Whisper fallback (BYOK) for the rest. No YouTube account needed.
You can access the YouTube Transcriber programmatically from your own applications by using the Apify API. You can also choose the language preference from below. To use the Apify API, you’ll need an Apify account and your API token, found in Integrations settings in Apify Console.
{ "openapi": "3.0.1", "info": { "version": "0.2", "x-build-id": "wmmcWxuPrAUco9Yj4" }, "servers": [ { "url": "https://api.apify.com/v2" } ], "paths": { "/acts/prodiger~youtube-transcriber/run-sync-get-dataset-items": { "post": { "operationId": "run-sync-get-dataset-items-prodiger-youtube-transcriber", "x-openai-isConsequential": false, "summary": "Executes an Actor, waits for its completion, and returns Actor's dataset items in response.", "tags": [ "Run Actor" ], "requestBody": { "required": true, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/inputSchema" } } } }, "parameters": [ { "name": "token", "in": "query", "required": true, "schema": { "type": "string" }, "description": "Enter your Apify token here" } ], "responses": { "200": { "description": "OK" } } } }, "/acts/prodiger~youtube-transcriber/runs": { "post": { "operationId": "runs-sync-prodiger-youtube-transcriber", "x-openai-isConsequential": false, "summary": "Executes an Actor and returns information about the initiated run in response.", "tags": [ "Run Actor" ], "requestBody": { "required": true, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/inputSchema" } } } }, "parameters": [ { "name": "token", "in": "query", "required": true, "schema": { "type": "string" }, "description": "Enter your Apify token here" } ], "responses": { "200": { "description": "OK", "content": { "application/json": { "schema": { "$ref": "#/components/schemas/runsResponseSchema" } } } } } } }, "/acts/prodiger~youtube-transcriber/run-sync": { "post": { "operationId": "run-sync-prodiger-youtube-transcriber", "x-openai-isConsequential": false, "summary": "Executes an Actor, waits for completion, and returns the OUTPUT from Key-value store in response.", "tags": [ "Run Actor" ], "requestBody": { "required": true, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/inputSchema" } } } }, "parameters": [ { "name": "token", "in": "query", "required": true, "schema": { "type": "string" }, "description": "Enter your Apify token here" } ], "responses": { "200": { "description": "OK" } } } } }, "components": { "schemas": { "inputSchema": { "type": "object", "required": [ "videoUrls" ], "properties": { "videoUrls": { "title": "Video URLs", "type": "array", "description": "YouTube video URLs (any standard format) or bare 11-character video IDs. Each URL is canonicalized + validated against a strict hostname allowlist before any download.", "items": { "type": "string" } }, "preferredLanguage": { "title": "Preferred caption language", "type": "string", "description": "BCP-47 language code (e.g. 'en', 'es', 'fr'). When the requested caption language isn't available, the actor falls through to Whisper.", "default": "en" }, "transcriptMethod": { "title": "Transcript method", "enum": [ "auto", "captions", "whisper" ], "type": "string", "description": "auto = captions first, fall through to Whisper. captions = captions only. whisper = Whisper only.", "default": "auto" }, "openaiApiKey": { "title": "OpenAI API key (optional)", "type": "string", "description": "Your OpenAI API key. Optional — only needed when a video has no captions in your preferred language and you want Whisper to transcribe it. With no key: captions still work; videos without captions are skipped with a clear reason. The key is never logged or stored. Required only when transcriptMethod='whisper'." }, "whisperModel": { "title": "Whisper model", "enum": [ "whisper-1" ], "type": "string", "description": "OpenAI Whisper model. Only whisper-1 supports verbose_json segment timestamps required by the JSON output format.", "default": "whisper-1" }, "outputFormat": { "title": "Output format", "enum": [ "text", "json" ], "type": "string", "description": "text = plain transcript. json = array of {start, end, text} segments.", "default": "text" }, "includeTimestamps": { "title": "Include timestamps in text output", "type": "boolean", "description": "When outputFormat=text, prefix each segment with [HH:MM:SS]. Has no effect on JSON output (timestamps always present there).", "default": false }, "maxDurationMinutes": { "title": "Max video duration (minutes)", "minimum": 1, "maximum": 300, "type": "integer", "description": "Skip videos longer than this. Default 18 to keep audio under OpenAI Whisper's 25 MB hard limit even at higher bitrates.", "default": 18 }, "maxWhisperMinutesPerRun": { "title": "Max Whisper minutes per run", "minimum": 0, "maximum": 10000, "type": "integer", "description": "Cap on total minutes of audio sent to Whisper. Bounds your OpenAI bill per run. Set 0 for unlimited.", "default": 60 }, "proxyConfiguration": { "title": "Proxy configuration", "type": "object", "description": "Apify proxy. RESIDENTIAL is the default — YouTube blocks datacenter IPs aggressively in 2026.", "default": { "useApifyProxy": true, "apifyProxyGroups": [ "RESIDENTIAL" ] } } } }, "runsResponseSchema": { "type": "object", "properties": { "data": { "type": "object", "properties": { "id": { "type": "string" }, "actId": { "type": "string" }, "userId": { "type": "string" }, "startedAt": { "type": "string", "format": "date-time", "example": "2025-01-08T00:00:00.000Z" }, "finishedAt": { "type": "string", "format": "date-time", "example": "2025-01-08T00:00:00.000Z" }, "status": { "type": "string", "example": "READY" }, "meta": { "type": "object", "properties": { "origin": { "type": "string", "example": "API" }, "userAgent": { "type": "string" } } }, "stats": { "type": "object", "properties": { "inputBodyLen": { "type": "integer", "example": 2000 }, "rebootCount": { "type": "integer", "example": 0 }, "restartCount": { "type": "integer", "example": 0 }, "resurrectCount": { "type": "integer", "example": 0 }, "computeUnits": { "type": "integer", "example": 0 } } }, "options": { "type": "object", "properties": { "build": { "type": "string", "example": "latest" }, "timeoutSecs": { "type": "integer", "example": 300 }, "memoryMbytes": { "type": "integer", "example": 1024 }, "diskMbytes": { "type": "integer", "example": 2048 } } }, "buildId": { "type": "string" }, "defaultKeyValueStoreId": { "type": "string" }, "defaultDatasetId": { "type": "string" }, "defaultRequestQueueId": { "type": "string" }, "buildNumber": { "type": "string", "example": "1.0.0" }, "containerUrl": { "type": "string" }, "usage": { "type": "object", "properties": { "ACTOR_COMPUTE_UNITS": { "type": "integer", "example": 0 }, "DATASET_READS": { "type": "integer", "example": 0 }, "DATASET_WRITES": { "type": "integer", "example": 0 }, "KEY_VALUE_STORE_READS": { "type": "integer", "example": 0 }, "KEY_VALUE_STORE_WRITES": { "type": "integer", "example": 1 }, "KEY_VALUE_STORE_LISTS": { "type": "integer", "example": 0 }, "REQUEST_QUEUE_READS": { "type": "integer", "example": 0 }, "REQUEST_QUEUE_WRITES": { "type": "integer", "example": 0 }, "DATA_TRANSFER_INTERNAL_GBYTES": { "type": "integer", "example": 0 }, "DATA_TRANSFER_EXTERNAL_GBYTES": { "type": "integer", "example": 0 }, "PROXY_RESIDENTIAL_TRANSFER_GBYTES": { "type": "integer", "example": 0 }, "PROXY_SERPS": { "type": "integer", "example": 0 } } }, "usageTotalUsd": { "type": "number", "example": 0.00005 }, "usageUsd": { "type": "object", "properties": { "ACTOR_COMPUTE_UNITS": { "type": "integer", "example": 0 }, "DATASET_READS": { "type": "integer", "example": 0 }, "DATASET_WRITES": { "type": "integer", "example": 0 }, "KEY_VALUE_STORE_READS": { "type": "integer", "example": 0 }, "KEY_VALUE_STORE_WRITES": { "type": "number", "example": 0.00005 }, "KEY_VALUE_STORE_LISTS": { "type": "integer", "example": 0 }, "REQUEST_QUEUE_READS": { "type": "integer", "example": 0 }, "REQUEST_QUEUE_WRITES": { "type": "integer", "example": 0 }, "DATA_TRANSFER_INTERNAL_GBYTES": { "type": "integer", "example": 0 }, "DATA_TRANSFER_EXTERNAL_GBYTES": { "type": "integer", "example": 0 }, "PROXY_RESIDENTIAL_TRANSFER_GBYTES": { "type": "integer", "example": 0 }, "PROXY_SERPS": { "type": "integer", "example": 0 } } } } } } } } }}OpenAPI is a standard for designing and describing RESTful APIs, allowing developers to define API structure, endpoints, and data formats in a machine-readable way. It simplifies API development, integration, and documentation.
OpenAPI is effective when used with AI agents and GPTs by standardizing how these systems interact with various APIs, for reliable integrations and efficient communication.
By defining machine-readable API specifications, OpenAPI allows AI models like GPTs to understand and use varied data sources, improving accuracy. This accelerates development, reduces errors, and provides context-aware responses, making OpenAPI a core component for AI applications.
You can download the OpenAPI definitions for YouTube Transcriber from the options below:
If you’d like to learn more about how OpenAPI powers GPTs, read our blog post.
You can also check out our other API clients: