Skip to content

Commit d57c942

Browse files
committed
Added a sample lambda function.
1 parent 35a6f97 commit d57c942

File tree

1 file changed

+48
-0
lines changed

1 file changed

+48
-0
lines changed

lambda_function.py

+48
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,48 @@
1+
import openai
2+
import json
3+
import datetime
4+
5+
def query_completion(prompt: str, engine: str = 'text-davinci-003', temperature: float = 0.5, max_tokens: int = 1500, top_p: int = 1, frequency_penalty: int = 0.5, presence_penalty: int = 0.2) -> object:
6+
"""
7+
Function for querying GPT-3.
8+
"""
9+
estimated_prompt_tokens = int(len(prompt.split()) * 1.6)
10+
estimated_answer_tokens = 2049 - estimated_prompt_tokens
11+
response = openai.Completion.create(
12+
engine=engine,
13+
prompt=prompt,
14+
temperature=temperature,
15+
max_tokens=min(4096-estimated_prompt_tokens, max_tokens),
16+
top_p=top_p,
17+
frequency_penalty=frequency_penalty,
18+
presence_penalty=presence_penalty
19+
)
20+
return response
21+
22+
def lambda_handler(event, context):
23+
'''Provide an event that contains the following keys:
24+
- prompt: text of an open ai prompt
25+
'''
26+
27+
openai.api_key = "YOUR_KEY_HERE"
28+
29+
print("Init:")
30+
print(datetime.datetime.now())
31+
print("Event:")
32+
print(event)
33+
34+
body = json.loads(event['body'])
35+
prompt = body['prompt']
36+
37+
max_tokens = 1500
38+
39+
response = query_completion(prompt)
40+
response_text = response['choices'][0]['text'].strip()
41+
42+
response = {
43+
"statusCode": 200,
44+
"headers": {},
45+
"body": response_text
46+
}
47+
48+
return response

0 commit comments

Comments
 (0)