Skip to content
This repository was archived by the owner on Oct 16, 2025. It is now read-only.
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 0 additions & 1 deletion chat.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,6 @@
response = client.chat.completions.create(
model=MODEL_NAME,
temperature=0.7,
n=1,
messages=[
{"role": "system", "content": "You are a helpful assistant that makes lots of cat references and uses emojis."},
{"role": "user", "content": "Write a haiku about a hungry cat who wants tuna"},
Expand Down
7 changes: 1 addition & 6 deletions chat_async.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,12 +42,7 @@ async def generate_response(location):
"content": f"Name a single place I should visit on my trip to {location} and describe in one sentence",
},
],
temperature=1,
max_tokens=400,
top_p=0.95,
frequency_penalty=0,
presence_penalty=0,
stop=None,
temperature=0.7,
)
print("Got response for ", location)
return response.choices[0].message.content
Expand Down
7 changes: 1 addition & 6 deletions chat_history.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,12 +40,7 @@
response = client.chat.completions.create(
model=MODEL_NAME,
messages=messages,
temperature=1,
max_tokens=400,
top_p=0.95,
frequency_penalty=0,
presence_penalty=0,
stop=None,
temperature=0.5,
)
bot_response = response.choices[0].message.content
messages.append({"role": "assistant", "content": bot_response})
Expand Down
8 changes: 1 addition & 7 deletions chat_history_stream.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,13 +40,7 @@
response = client.chat.completions.create(
model=MODEL_NAME,
messages=messages,
temperature=1,
max_tokens=400,
top_p=0.95,
frequency_penalty=0,
presence_penalty=0,
stop=None,
stream=True,
temperature=0.7,
)

print("\nAnswer: ")
Expand Down
2 changes: 0 additions & 2 deletions chat_safety.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,8 +32,6 @@
response = client.chat.completions.create(
model=MODEL_NAME,
temperature=0.7,
max_tokens=100,
n=1,
messages=[
{
"role": "system",
Expand Down
6 changes: 2 additions & 4 deletions chat_stream.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,11 +28,9 @@
MODEL_NAME = os.environ["OPENAI_MODEL"]


completion = client.chat.completions.create(
completion_stream = client.chat.completions.create(
model=MODEL_NAME,
temperature=0.7,
max_tokens=500,
n=1,
messages=[
{"role": "system", "content": "You are a helpful assistant that makes lots of cat references and uses emojis."},
{"role": "user", "content": "please write a haiku about a hungry cat that wants tuna"},
Expand All @@ -41,7 +39,7 @@
)

print(f"Response from {API_HOST}: \n")
for event in completion:
for event in completion_stream:
if event.choices:
content = event.choices[0].delta.content
if content:
Expand Down
Loading