
Open AI Completion
Deprecated
Pricing
Pay per usage
Go to Store

Open AI Completion
Deprecated
Provides a simple but powerful text-in, text-out interface to any OpenAI models. You input some text as a prompt, and the model will generate a text completion that attempts to match whatever context or pattern you gave it. https://beta.openai.com/docs/guides/completion
0.0 (0)
Pricing
Pay per usage
0
Total users
16
Monthly users
1
Last modified
4 years ago
.gitignore
# This file tells Git which files shouldn't be added to source control
Dockerfile
# First, specify the base Docker image.# You can see the Docker images from Apify at https://hub.docker.com/r/apify/.# You can also use any other image from Docker Hub.FROM apify/actor-python:3.9
# Second, copy just requirements.txt into the actor image,# since it should be the only file that affects "pip install" in the next step,# in order to speed up the buildCOPY requirements.txt ./
# Install the packages specified in requirements.txt,# Print the installed Python version, pip version# and all installed packages with their versions for debuggingRUN echo "Python version:" \ && python --version \ && echo "Pip version:" \ && pip --version \ && echo "Installing dependencies from requirements.txt:" \ && pip install -r requirements.txt \ && echo "All installed Python packages:" \ && pip freeze
# Next, copy the remaining files and directories with the source code.# Since we do this after installing the dependencies, quick build will be really fast# for most source file changes.COPY . ./
# Specify how to launch the source code of your actor.# By default, the main.py file is runCMD python3 main.py
INPUT_SCHEMA.json
{ "title": "Input schema for the example_python actor.", "type": "object", "schemaVersion": 1, "properties": { "api_key": { "title": "API key", "type": "string", "description": "Enter [OpenAI API Key](https://beta.openai.com/docs/developer-quickstart/your-api-keys)", "editor": "textfield" }, "prompt": { "title": "Prompt", "type": "array", "description": "Enter OpenAI prompt message", "editor": "stringList", "minItems": 1 }, "engine": { "title": "Engine", "type": "string", "description": "Select <a href='https://beta.openai.com/docs/api-reference/engines'>OpenAI engine</a>", "editor": "select", "default": "davinci", "prefill": "davinci", "enum": ["davinci","curie","babbage","ada","davinci-instruct-beta","curie-instruct-beta", "davinci-codex", "cushman-codex", "content-filter-alpha"], "enumTitles": ["Base: DaVinci","Base: Curie", "Base: Babbage", "Base: Ada", "Instruct: DaVinci", "Instruct: Curie", "Codex: DaVinci", "Codex: Cushman", "Content filter"] }, "temperature": { "title": "Temperature", "type": "string", "description": "What sampling temperature to use. Higher values means the model will take more risks. Try 0.9 for more creative applications, and 0 (argmax sampling) for ones with a well-defined answer.<br />We generally recommend altering this or top_p but not both.", "editor": "textfield", "default": "0" }, "max_tokens": { "title": "Max. tokens", "type": "integer", "description": "The maximum number of tokens to generate in the completion.<br />The token count of your prompt plus max_tokens cannot exceed the model's context length. Most models have a context length of 2048 tokens (except davinci-codex, which supports 4096).", "default": 16, "maximum": 4096, "minimum": 1 }, "top_p": { "title": "Top p", "type": "string", "description": "An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.<br />We generally recommend altering this or temperature but not both.", "editor": "textfield", "default": "1" }, "frequency_penalty": { "title": "Frequency penalty", "type": "string", "description": "Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.", "editor": "textfield", "default": "0" }, "presence_penalty": { "title": "Presence penalty", "type": "string", "description": "Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics.", "editor": "textfield", "default": "0" }, "stop": { "title": "Stop sequence", "type": "array", "description": "Define one or more sequences that when generated force GPT-3 to stop.", "editor": "json", "prefill": ["\n"], "default": ["\n"] } }, "required": ["api_key","prompt"]}
apify.json
{ "name": "openai-completion-actor", "version": "0.0", "buildTag": "latest", "env": null}
main.py
1import json2import os3import openai4
5from apify_client import ApifyClient6
7
8# Run the main function of the script, if the script is executed directly9if __name__ == '__main__':10 # Initialize the main ApifyClient instance11 client = ApifyClient(os.environ['APIFY_TOKEN'], api_url=os.environ['APIFY_API_BASE_URL'])12
13 # Get the resource subclient for working with the default key-value store of the actor14 default_kv_store_client = client.key_value_store(os.environ['APIFY_DEFAULT_KEY_VALUE_STORE_ID'])15
16 # Get the value of the actor input and print it17 actor_input = default_kv_store_client.get_record(os.environ['APIFY_INPUT_KEY'])['value']18 print('Actor input:')19 print(json.dumps(actor_input, indent=2))20
21 # Get OpenAI API key (https://beta.openai.com/docs/developer-quickstart/your-api-keys)22 openai.api_key = actor_input["api_key"]23
24 responses = []25 dataset = []26 # Walk through all the prompts (using the same other parameters)27 for prompt in actor_input['prompt']:28 response = openai.Completion.create(29 engine=actor_input['engine'],30 prompt=prompt,31 temperature=float(actor_input['temperature']),32 max_tokens=actor_input['max_tokens'],33 top_p=float(actor_input['top_p']),34 frequency_penalty=float(actor_input['frequency_penalty']),35 presence_penalty=float(actor_input['presence_penalty']),36 stop=actor_input['stop']37 )38 responses.append(response.choices[0])39 dataset.append({40 "prompt": prompt,41 "response": response.choices[0].text42 })43
44 # Set the 'OUTPUT' key-value store record to the same value as the input45 default_kv_store_client.set_record('OUTPUT', responses)46
47 # Get the resource subclient for working with the default dataset of the actor48 default_dataset_client = client.dataset(os.environ['APIFY_DEFAULT_DATASET_ID'])49
50 # Push responses to the default dataset51 default_dataset_client.push_items(dataset)
requirements.txt
1# Add your dependencies here.2# See https://pip.pypa.io/en/latest/cli/pip_install/#requirements-file-format3# for how to format them4openai