Open AI Completion
DeprecatedView all Actors
This Actor is unavailable because the developer has decided to deprecate it. Would you like to try a similar Actor instead?
See alternative ActorsOpen AI Completion
tkapler/openai-completion-actor
Provides a simple but powerful text-in, text-out interface to any OpenAI models. You input some text as a prompt, and the model will generate a text completion that attempts to match whatever context or pattern you gave it. https://beta.openai.com/docs/guides/completion
.gitignore
1# This file tells Git which files shouldn't be added to source control
Dockerfile
1# First, specify the base Docker image.
2# You can see the Docker images from Apify at https://hub.docker.com/r/apify/.
3# You can also use any other image from Docker Hub.
4FROM apify/actor-python:3.9
5
6# Second, copy just requirements.txt into the actor image,
7# since it should be the only file that affects "pip install" in the next step,
8# in order to speed up the build
9COPY requirements.txt ./
10
11# Install the packages specified in requirements.txt,
12# Print the installed Python version, pip version
13# and all installed packages with their versions for debugging
14RUN echo "Python version:" \
15 && python --version \
16 && echo "Pip version:" \
17 && pip --version \
18 && echo "Installing dependencies from requirements.txt:" \
19 && pip install -r requirements.txt \
20 && echo "All installed Python packages:" \
21 && pip freeze
22
23# Next, copy the remaining files and directories with the source code.
24# Since we do this after installing the dependencies, quick build will be really fast
25# for most source file changes.
26COPY . ./
27
28# Specify how to launch the source code of your actor.
29# By default, the main.py file is run
30CMD python3 main.py
INPUT_SCHEMA.json
1{
2 "title": "Input schema for the example_python actor.",
3 "type": "object",
4 "schemaVersion": 1,
5 "properties": {
6 "api_key": {
7 "title": "API key",
8 "type": "string",
9 "description": "Enter [OpenAI API Key](https://beta.openai.com/docs/developer-quickstart/your-api-keys)",
10 "editor": "textfield"
11 },
12 "prompt": {
13 "title": "Prompt",
14 "type": "array",
15 "description": "Enter OpenAI prompt message",
16 "editor": "stringList",
17 "minItems": 1
18 },
19 "engine": {
20 "title": "Engine",
21 "type": "string",
22 "description": "Select <a href='https://beta.openai.com/docs/api-reference/engines'>OpenAI engine</a>",
23 "editor": "select",
24 "default": "davinci",
25 "prefill": "davinci",
26 "enum": ["davinci","curie","babbage","ada","davinci-instruct-beta","curie-instruct-beta", "davinci-codex", "cushman-codex", "content-filter-alpha"],
27 "enumTitles": ["Base: DaVinci","Base: Curie", "Base: Babbage", "Base: Ada", "Instruct: DaVinci", "Instruct: Curie", "Codex: DaVinci", "Codex: Cushman", "Content filter"]
28 },
29 "temperature": {
30 "title": "Temperature",
31 "type": "string",
32 "description": "What sampling temperature to use. Higher values means the model will take more risks. Try 0.9 for more creative applications, and 0 (argmax sampling) for ones with a well-defined answer.<br />We generally recommend altering this or top_p but not both.",
33 "editor": "textfield",
34 "default": "0"
35 },
36 "max_tokens": {
37 "title": "Max. tokens",
38 "type": "integer",
39 "description": "The maximum number of tokens to generate in the completion.<br />The token count of your prompt plus max_tokens cannot exceed the model's context length. Most models have a context length of 2048 tokens (except davinci-codex, which supports 4096).",
40 "default": 16,
41 "maximum": 4096,
42 "minimum": 1
43 },
44 "top_p": {
45 "title": "Top p",
46 "type": "string",
47 "description": "An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.<br />We generally recommend altering this or temperature but not both.",
48 "editor": "textfield",
49 "default": "1"
50 },
51 "frequency_penalty": {
52 "title": "Frequency penalty",
53 "type": "string",
54 "description": "Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.",
55 "editor": "textfield",
56 "default": "0"
57 },
58 "presence_penalty": {
59 "title": "Presence penalty",
60 "type": "string",
61 "description": "Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics.",
62 "editor": "textfield",
63 "default": "0"
64 },
65 "stop": {
66 "title": "Stop sequence",
67 "type": "array",
68 "description": "Define one or more sequences that when generated force GPT-3 to stop.",
69 "editor": "json",
70 "prefill": ["\n"],
71 "default": ["\n"]
72 }
73 },
74 "required": ["api_key","prompt"]
75}
apify.json
1{
2 "name": "openai-completion-actor",
3 "version": "0.0",
4 "buildTag": "latest",
5 "env": null
6}
main.py
1import json
2import os
3import openai
4
5from apify_client import ApifyClient
6
7
8# Run the main function of the script, if the script is executed directly
9if __name__ == '__main__':
10 # Initialize the main ApifyClient instance
11 client = ApifyClient(os.environ['APIFY_TOKEN'], api_url=os.environ['APIFY_API_BASE_URL'])
12
13 # Get the resource subclient for working with the default key-value store of the actor
14 default_kv_store_client = client.key_value_store(os.environ['APIFY_DEFAULT_KEY_VALUE_STORE_ID'])
15
16 # Get the value of the actor input and print it
17 actor_input = default_kv_store_client.get_record(os.environ['APIFY_INPUT_KEY'])['value']
18 print('Actor input:')
19 print(json.dumps(actor_input, indent=2))
20
21 # Get OpenAI API key (https://beta.openai.com/docs/developer-quickstart/your-api-keys)
22 openai.api_key = actor_input["api_key"]
23
24 responses = []
25 dataset = []
26 # Walk through all the prompts (using the same other parameters)
27 for prompt in actor_input['prompt']:
28 response = openai.Completion.create(
29 engine=actor_input['engine'],
30 prompt=prompt,
31 temperature=float(actor_input['temperature']),
32 max_tokens=actor_input['max_tokens'],
33 top_p=float(actor_input['top_p']),
34 frequency_penalty=float(actor_input['frequency_penalty']),
35 presence_penalty=float(actor_input['presence_penalty']),
36 stop=actor_input['stop']
37 )
38 responses.append(response.choices[0])
39 dataset.append({
40 "prompt": prompt,
41 "response": response.choices[0].text
42 })
43
44 # Set the 'OUTPUT' key-value store record to the same value as the input
45 default_kv_store_client.set_record('OUTPUT', responses)
46
47 # Get the resource subclient for working with the default dataset of the actor
48 default_dataset_client = client.dataset(os.environ['APIFY_DEFAULT_DATASET_ID'])
49
50 # Push responses to the default dataset
51 default_dataset_client.push_items(dataset)
requirements.txt
1# Add your dependencies here.
2# See https://pip.pypa.io/en/latest/cli/pip_install/#requirements-file-format
3# for how to format them
4openai
Developer
Maintained by Community
Categories