Cold Email Personalizer
Pricing
from $0.01 / 1,000 results
Go to Apify Store
Cold Email Personalizer
Generate personalized cold emails (subject lines + email + follow-ups) from lead/company context.
Cold Email Personalizer
Pricing
from $0.01 / 1,000 results
Generate personalized cold emails (subject lines + email + follow-ups) from lead/company context.
You can access the Cold Email Personalizer programmatically from your own applications by using the Apify API. You can also choose the language preference from below. To use the Apify API, you’ll need an Apify account and your API token, found in Integrations settings in Apify Console.
{ "openapi": "3.0.1", "info": { "version": "0.1", "x-build-id": "KMREiF2KexaP4vKzV" }, "servers": [ { "url": "https://api.apify.com/v2" } ], "paths": { "/acts/amcllc~cold-email-personalizer/run-sync-get-dataset-items": { "post": { "operationId": "run-sync-get-dataset-items-amcllc-cold-email-personalizer", "x-openai-isConsequential": false, "summary": "Executes an Actor, waits for its completion, and returns Actor's dataset items in response.", "tags": [ "Run Actor" ], "requestBody": { "required": true, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/inputSchema" } } } }, "parameters": [ { "name": "token", "in": "query", "required": true, "schema": { "type": "string" }, "description": "Enter your Apify token here" } ], "responses": { "200": { "description": "OK" } } } }, "/acts/amcllc~cold-email-personalizer/runs": { "post": { "operationId": "runs-sync-amcllc-cold-email-personalizer", "x-openai-isConsequential": false, "summary": "Executes an Actor and returns information about the initiated run in response.", "tags": [ "Run Actor" ], "requestBody": { "required": true, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/inputSchema" } } } }, "parameters": [ { "name": "token", "in": "query", "required": true, "schema": { "type": "string" }, "description": "Enter your Apify token here" } ], "responses": { "200": { "description": "OK", "content": { "application/json": { "schema": { "$ref": "#/components/schemas/runsResponseSchema" } } } } } } }, "/acts/amcllc~cold-email-personalizer/run-sync": { "post": { "operationId": "run-sync-amcllc-cold-email-personalizer", "x-openai-isConsequential": false, "summary": "Executes an Actor, waits for completion, and returns the OUTPUT from Key-value store in response.", "tags": [ "Run Actor" ], "requestBody": { "required": true, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/inputSchema" } } } }, "parameters": [ { "name": "token", "in": "query", "required": true, "schema": { "type": "string" }, "description": "Enter your Apify token here" } ], "responses": { "200": { "description": "OK" } } } } }, "components": { "schemas": { "inputSchema": { "type": "object", "properties": { "leads": { "title": "Leads", "type": "array", "description": "List of leads to generate cold email packs for.", "default": [ { "companyUrl": "https://example.com", "companyName": "Example Co", "contactName": "Alex", "contactRole": "Head of Marketing", "linkedinUrl": "https://linkedin.com/company/example", "notes": "Series A, hiring growth roles. Has pricing page." } ] }, "offer": { "title": "Your offer (what you sell)", "type": "string", "description": "One paragraph describing your product/service and target buyer.", "default": "We help B2B SaaS teams generate more qualified demos with cold outbound personalization and deliverability-safe sequences." }, "valueProps": { "title": "Value props", "type": "array", "description": "Optional bullet value props the model should use (2-6 is ideal).", "default": [ "Short time-to-value", "No long-term contract", "Works with your CRM" ], "items": { "type": "string" } }, "tone": { "title": "Tone", "enum": [ "friendly", "direct", "professional", "casual" ], "type": "string", "description": "Writing style for the emails.", "default": "direct" }, "cta": { "title": "CTA", "type": "string", "description": "Preferred call-to-action (e.g. ask for 10-min call, ask permission, etc).", "default": "Open to a quick 10-minute chat next week?" }, "senderName": { "title": "Sender name", "type": "string", "description": "Name that should be used in the email signature.", "default": "Your Name" }, "senderRole": { "title": "Sender role", "type": "string", "description": "Sender role/title used in the email signature.", "default": "Founder" }, "senderCompany": { "title": "Sender company", "type": "string", "description": "Company used in the email signature.", "default": "Your Company" }, "includeFollowUps": { "title": "Include follow-ups", "type": "boolean", "description": "Generate follow-up messages in addition to the initial email.", "default": true }, "maxLeads": { "title": "Max leads per run", "type": "integer", "description": "Hard cap to limit spend. Extra leads will be returned without AI output.", "default": 50 }, "aiConcurrency": { "title": "AI concurrency", "type": "integer", "description": "How many email packs to generate in parallel.", "default": 3 }, "aiEnrichmentEnabled": { "title": "AI generation enabled", "type": "boolean", "description": "If disabled, the Actor will only validate input and return a report.", "default": true }, "llmApiStyle": { "title": "LLM API style", "enum": [ "ollama", "openai" ], "type": "string", "description": "Which API format to use: 'ollama' (native /api/chat) or 'openai' (OpenAI-compatible /v1/chat/completions).", "default": "ollama" }, "llmBaseUrl": { "title": "LLM base URL", "type": "string", "description": "Base URL for the LLM API. For local Ollama: http://localhost:11434. For Ollama Cloud, set the Cloud base URL here or via LLM_BASE_URL env var.", "default": "http://localhost:11434" }, "llmApiKey": { "title": "LLM API key (optional)", "type": "string", "description": "API key for the LLM provider. Prefer setting LLM_API_KEY (or OLLAMA_API_KEY) as an Actor secret instead of passing it in input." }, "llmModel": { "title": "LLM model", "type": "string", "description": "Model used for generation. Manage this via LLM_MODEL/OLLAMA_MODEL env vars if you prefer.", "default": "llama3.1:8b" }, "llmChatCompletionsUrl": { "title": "LLM chat completions URL (advanced)", "type": "string", "description": "Optional full URL override for OpenAI-compatible chat completions (e.g., https://.../v1/chat/completions). Takes precedence over llmBaseUrl when llmApiStyle='openai'." }, "aiMaxOutputTokens": { "title": "AI max output tokens", "type": "integer", "description": "Maximum output tokens requested per lead.", "default": 900 }, "aiTemperature": { "title": "AI temperature", "type": "number", "description": "Lower values are more deterministic.", "default": 0.4 }, "apifyPpeEnabled": { "title": "Pay per event (PPE) enabled", "type": "boolean", "description": "Enable if monetized with Apify pay-per-event. Disable for local development.", "default": true }, "apifyPpeEventName": { "title": "PPE event name", "type": "string", "description": "Event to charge per generated email pack.", "default": "email-pack" }, "apifyPpeChargeOn": { "title": "PPE charge timing", "enum": [ "success", "attempt" ], "type": "string", "description": "When to charge: 'success' (default) or 'attempt'.", "default": "success" }, "apifyPpeStopOnLimit": { "title": "Stop when user limit reached", "type": "boolean", "description": "If enabled, stops further generations when the user's max cost limit is reached.", "default": true } } }, "runsResponseSchema": { "type": "object", "properties": { "data": { "type": "object", "properties": { "id": { "type": "string" }, "actId": { "type": "string" }, "userId": { "type": "string" }, "startedAt": { "type": "string", "format": "date-time", "example": "2025-01-08T00:00:00.000Z" }, "finishedAt": { "type": "string", "format": "date-time", "example": "2025-01-08T00:00:00.000Z" }, "status": { "type": "string", "example": "READY" }, "meta": { "type": "object", "properties": { "origin": { "type": "string", "example": "API" }, "userAgent": { "type": "string" } } }, "stats": { "type": "object", "properties": { "inputBodyLen": { "type": "integer", "example": 2000 }, "rebootCount": { "type": "integer", "example": 0 }, "restartCount": { "type": "integer", "example": 0 }, "resurrectCount": { "type": "integer", "example": 0 }, "computeUnits": { "type": "integer", "example": 0 } } }, "options": { "type": "object", "properties": { "build": { "type": "string", "example": "latest" }, "timeoutSecs": { "type": "integer", "example": 300 }, "memoryMbytes": { "type": "integer", "example": 1024 }, "diskMbytes": { "type": "integer", "example": 2048 } } }, "buildId": { "type": "string" }, "defaultKeyValueStoreId": { "type": "string" }, "defaultDatasetId": { "type": "string" }, "defaultRequestQueueId": { "type": "string" }, "buildNumber": { "type": "string", "example": "1.0.0" }, "containerUrl": { "type": "string" }, "usage": { "type": "object", "properties": { "ACTOR_COMPUTE_UNITS": { "type": "integer", "example": 0 }, "DATASET_READS": { "type": "integer", "example": 0 }, "DATASET_WRITES": { "type": "integer", "example": 0 }, "KEY_VALUE_STORE_READS": { "type": "integer", "example": 0 }, "KEY_VALUE_STORE_WRITES": { "type": "integer", "example": 1 }, "KEY_VALUE_STORE_LISTS": { "type": "integer", "example": 0 }, "REQUEST_QUEUE_READS": { "type": "integer", "example": 0 }, "REQUEST_QUEUE_WRITES": { "type": "integer", "example": 0 }, "DATA_TRANSFER_INTERNAL_GBYTES": { "type": "integer", "example": 0 }, "DATA_TRANSFER_EXTERNAL_GBYTES": { "type": "integer", "example": 0 }, "PROXY_RESIDENTIAL_TRANSFER_GBYTES": { "type": "integer", "example": 0 }, "PROXY_SERPS": { "type": "integer", "example": 0 } } }, "usageTotalUsd": { "type": "number", "example": 0.00005 }, "usageUsd": { "type": "object", "properties": { "ACTOR_COMPUTE_UNITS": { "type": "integer", "example": 0 }, "DATASET_READS": { "type": "integer", "example": 0 }, "DATASET_WRITES": { "type": "integer", "example": 0 }, "KEY_VALUE_STORE_READS": { "type": "integer", "example": 0 }, "KEY_VALUE_STORE_WRITES": { "type": "number", "example": 0.00005 }, "KEY_VALUE_STORE_LISTS": { "type": "integer", "example": 0 }, "REQUEST_QUEUE_READS": { "type": "integer", "example": 0 }, "REQUEST_QUEUE_WRITES": { "type": "integer", "example": 0 }, "DATA_TRANSFER_INTERNAL_GBYTES": { "type": "integer", "example": 0 }, "DATA_TRANSFER_EXTERNAL_GBYTES": { "type": "integer", "example": 0 }, "PROXY_RESIDENTIAL_TRANSFER_GBYTES": { "type": "integer", "example": 0 }, "PROXY_SERPS": { "type": "integer", "example": 0 } } } } } } } } }}OpenAPI is a standard for designing and describing RESTful APIs, allowing developers to define API structure, endpoints, and data formats in a machine-readable way. It simplifies API development, integration, and documentation.
OpenAPI is effective when used with AI agents and GPTs by standardizing how these systems interact with various APIs, for reliable integrations and efficient communication.
By defining machine-readable API specifications, OpenAPI allows AI models like GPTs to understand and use varied data sources, improving accuracy. This accelerates development, reduces errors, and provides context-aware responses, making OpenAPI a core component for AI applications.
You can download the OpenAPI definitions for Cold Email Personalizer from the options below:
If you’d like to learn more about how OpenAPI powers GPTs, read our blog post.
You can also check out our other API clients: