Skip to content

Commit bbaf285

Browse files
committed
added spanish folder
1 parent 1b864fd commit bbaf285

16 files changed

+1006
-0
lines changed

spanish/chained_calls.py

Lines changed: 80 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,80 @@
1+
import os
2+
3+
import azure.identity
4+
import openai
5+
from dotenv import load_dotenv
6+
7+
# Setup the OpenAI client to use either Azure, OpenAI.com, or Ollama API
8+
load_dotenv(override=True)
9+
API_HOST = os.getenv("API_HOST")
10+
11+
if API_HOST == "azure":
12+
13+
token_provider = azure.identity.get_bearer_token_provider(
14+
azure.identity.DefaultAzureCredential(), "https://cognitiveservices.azure.com/.default"
15+
)
16+
client = openai.AzureOpenAI(
17+
api_version=os.environ["AZURE_OPENAI_VERSION"],
18+
azure_endpoint=os.environ["AZURE_OPENAI_ENDPOINT"],
19+
azure_ad_token_provider=token_provider,
20+
)
21+
MODEL_NAME = os.environ["AZURE_OPENAI_DEPLOYMENT"]
22+
23+
elif API_HOST == "ollama":
24+
25+
client = openai.OpenAI(base_url=os.environ["OLLAMA_ENDPOINT"], api_key="nokeyneeded")
26+
MODEL_NAME = os.environ["OLLAMA_MODEL"]
27+
28+
elif API_HOST == "github":
29+
30+
client = openai.OpenAI(base_url="https://models.inference.ai.azure.com", api_key=os.environ["GITHUB_TOKEN"])
31+
MODEL_NAME = os.environ["GITHUB_MODEL"]
32+
33+
else:
34+
35+
client = openai.OpenAI(api_key=os.environ["OPENAI_KEY"])
36+
MODEL_NAME = os.environ["OPENAI_MODEL"]
37+
38+
39+
response = client.chat.completions.create(
40+
model=MODEL_NAME,
41+
temperature=0.7,
42+
messages=[{"role": "user", "content": "Explica cómo funcionan los LLM en un solo párrafo."}],
43+
)
44+
45+
explanation = response.choices[0].message.content
46+
print("Explicación: ", explanation)
47+
response = client.chat.completions.create(
48+
model=MODEL_NAME,
49+
temperature=0.7,
50+
messages=[
51+
{
52+
"role": "user",
53+
"content": (
54+
"Eres un editor. Revisa la explicación y proporciona comentarios detallados sobre claridad, coherencia "
55+
"y cautivación (pero no la edites tú mismo):\n\n"
56+
)
57+
+ explanation,
58+
}
59+
],
60+
)
61+
62+
feedback = response.choices[0].message.content
63+
print("\n\nRetroalimentación: ", feedback)
64+
65+
response = client.chat.completions.create(
66+
model=MODEL_NAME,
67+
temperature=0.7,
68+
messages=[
69+
{
70+
"role": "user",
71+
"content": (
72+
"Revisa el artículo utilizando los siguientes comentarios, pero mantenlo a un solo párrafo."
73+
f"\nExplicación:\n{explanation}\n\nComentarios:\n{feedback}"
74+
),
75+
}
76+
],
77+
)
78+
79+
final_article = response.choices[0].message.content
80+
print("\n\nFinal Article: ", final_article)

spanish/chat.py

Lines changed: 50 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,50 @@
1+
import os
2+
3+
import azure.identity
4+
import openai
5+
from dotenv import load_dotenv
6+
7+
# Setup the OpenAI client to use either Azure, OpenAI.com, or Ollama API
8+
load_dotenv(override=True)
9+
API_HOST = os.getenv("API_HOST")
10+
11+
if API_HOST == "azure":
12+
13+
token_provider = azure.identity.get_bearer_token_provider(
14+
azure.identity.DefaultAzureCredential(), "https://cognitiveservices.azure.com/.default"
15+
)
16+
client = openai.AzureOpenAI(
17+
api_version=os.environ["AZURE_OPENAI_VERSION"],
18+
azure_endpoint=os.environ["AZURE_OPENAI_ENDPOINT"],
19+
azure_ad_token_provider=token_provider,
20+
)
21+
MODEL_NAME = os.environ["AZURE_OPENAI_DEPLOYMENT"]
22+
23+
elif API_HOST == "ollama":
24+
25+
client = openai.OpenAI(base_url=os.environ["OLLAMA_ENDPOINT"], api_key="nokeyneeded")
26+
MODEL_NAME = os.environ["OLLAMA_MODEL"]
27+
28+
elif API_HOST == "github":
29+
30+
client = openai.OpenAI(base_url="https://models.inference.ai.azure.com", api_key=os.environ["GITHUB_TOKEN"])
31+
MODEL_NAME = os.environ["GITHUB_MODEL"]
32+
33+
else:
34+
35+
client = openai.OpenAI(api_key=os.environ["OPENAI_KEY"])
36+
MODEL_NAME = os.environ["OPENAI_MODEL"]
37+
38+
39+
response = client.chat.completions.create(
40+
model=MODEL_NAME,
41+
temperature=0.7,
42+
n=1,
43+
messages=[
44+
{"role": "system", "content": "Eres un asistente útil que hace muchas referencias a gatos y usa emojis."},
45+
{"role": "user", "content": "Escribe un haiku sobre un gato hambriento que quiere atún"},
46+
],
47+
)
48+
49+
print(f"Response from {API_HOST}: \n")
50+
print(response.choices[0].message.content)

spanish/chat_async.py

Lines changed: 69 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,69 @@
1+
import asyncio
2+
import os
3+
4+
import azure.identity
5+
import openai
6+
from dotenv import load_dotenv
7+
8+
# Setup the OpenAI client to use either Azure, OpenAI.com, or Ollama API
9+
load_dotenv(override=True)
10+
API_HOST = os.getenv("API_HOST")
11+
12+
if API_HOST == "azure":
13+
token_provider = azure.identity.get_bearer_token_provider(
14+
azure.identity.DefaultAzureCredential(), "https://cognitiveservices.azure.com/.default"
15+
)
16+
client = openai.AsyncAzureOpenAI(
17+
api_version=os.environ["AZURE_OPENAI_VERSION"],
18+
azure_endpoint=os.environ["AZURE_OPENAI_ENDPOINT"],
19+
azure_ad_token_provider=token_provider,
20+
)
21+
MODEL_NAME = os.environ["AZURE_OPENAI_DEPLOYMENT"]
22+
elif API_HOST == "ollama":
23+
client = openai.AsyncOpenAI(base_url=os.environ["OLLAMA_ENDPOINT"], api_key="nokeyneeded")
24+
MODEL_NAME = os.environ["OLLAMA_MODEL"]
25+
elif API_HOST == "github":
26+
client = openai.AsyncOpenAI(base_url="https://models.inference.ai.azure.com", api_key=os.environ["GITHUB_TOKEN"])
27+
MODEL_NAME = os.environ["GITHUB_MODEL"]
28+
else:
29+
client = openai.AsyncOpenAI(api_key=os.environ["OPENAI_KEY"])
30+
MODEL_NAME = os.environ["OPENAI_MODEL"]
31+
32+
33+
async def generate_response(location):
34+
print("Generating response for", location)
35+
response = await client.chat.completions.create(
36+
model=MODEL_NAME,
37+
messages=[
38+
{"role": "system", "content": "You are a helpful assistant."},
39+
{
40+
"role": "user",
41+
"content": f"Name a single place I should visit on my trip to {location} and describe in one sentence",
42+
},
43+
],
44+
temperature=1,
45+
max_tokens=400,
46+
top_p=0.95,
47+
frequency_penalty=0,
48+
presence_penalty=0,
49+
stop=None,
50+
)
51+
print("Got response for ", location)
52+
return response.choices[0].message.content
53+
54+
55+
async def single():
56+
print(await generate_response("Tokyo"))
57+
58+
59+
async def multiple():
60+
answers = await asyncio.gather(
61+
generate_response("Tokyo"),
62+
generate_response("Berkeley"),
63+
generate_response("Singapore"),
64+
)
65+
for answer in answers:
66+
print(answer, "\n")
67+
68+
69+
asyncio.run(single())

spanish/chat_history.py

Lines changed: 55 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,55 @@
1+
import os
2+
3+
import azure.identity
4+
import openai
5+
from dotenv import load_dotenv
6+
7+
# Setup the OpenAI client to use either Azure, OpenAI.com, or Ollama API
8+
load_dotenv(override=True)
9+
API_HOST = os.getenv("API_HOST")
10+
11+
if API_HOST == "azure":
12+
token_provider = azure.identity.get_bearer_token_provider(
13+
azure.identity.DefaultAzureCredential(), "https://cognitiveservices.azure.com/.default"
14+
)
15+
client = openai.AzureOpenAI(
16+
api_version=os.environ["AZURE_OPENAI_VERSION"],
17+
azure_endpoint=os.environ["AZURE_OPENAI_ENDPOINT"],
18+
azure_ad_token_provider=token_provider,
19+
)
20+
MODEL_NAME = os.environ["AZURE_OPENAI_DEPLOYMENT"]
21+
elif API_HOST == "ollama":
22+
client = openai.OpenAI(base_url=os.environ["OLLAMA_ENDPOINT"], api_key="nokeyneeded")
23+
MODEL_NAME = os.environ["OLLAMA_MODEL"]
24+
elif API_HOST == "github":
25+
client = openai.OpenAI(base_url="https://models.inference.ai.azure.com", api_key=os.environ["GITHUB_TOKEN"])
26+
MODEL_NAME = os.environ["GITHUB_MODEL"]
27+
else:
28+
client = openai.OpenAI(api_key=os.environ["OPENAI_KEY"])
29+
MODEL_NAME = os.environ["OPENAI_MODEL"]
30+
31+
32+
messages = [
33+
{"role": "system", "content": "I am a teaching assistant helping with Python questions for Berkeley CS 61A."},
34+
]
35+
36+
while True:
37+
question = input("\nYour question: ")
38+
print("Sending question...")
39+
40+
messages.append({"role": "user", "content": question})
41+
response = client.chat.completions.create(
42+
model=MODEL_NAME,
43+
messages=messages,
44+
temperature=1,
45+
max_tokens=400,
46+
top_p=0.95,
47+
frequency_penalty=0,
48+
presence_penalty=0,
49+
stop=None,
50+
)
51+
bot_response = response.choices[0].message.content
52+
messages.append({"role": "assistant", "content": bot_response})
53+
54+
print("Answer: ")
55+
print(bot_response)

spanish/chat_history_stream.py

Lines changed: 61 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,61 @@
1+
import os
2+
3+
import azure.identity
4+
import openai
5+
from dotenv import load_dotenv
6+
7+
# Setup the OpenAI client to use either Azure, OpenAI.com, or Ollama API
8+
load_dotenv(override=True)
9+
API_HOST = os.getenv("API_HOST")
10+
11+
if API_HOST == "azure":
12+
token_provider = azure.identity.get_bearer_token_provider(
13+
azure.identity.DefaultAzureCredential(), "https://cognitiveservices.azure.com/.default"
14+
)
15+
client = openai.AzureOpenAI(
16+
api_version=os.environ["AZURE_OPENAI_VERSION"],
17+
azure_endpoint=os.environ["AZURE_OPENAI_ENDPOINT"],
18+
azure_ad_token_provider=token_provider,
19+
)
20+
MODEL_NAME = os.environ["AZURE_OPENAI_DEPLOYMENT"]
21+
elif API_HOST == "ollama":
22+
client = openai.OpenAI(base_url=os.environ["OLLAMA_ENDPOINT"], api_key="nokeyneeded")
23+
MODEL_NAME = os.environ["OLLAMA_MODEL"]
24+
elif API_HOST == "github":
25+
client = openai.OpenAI(base_url="https://models.inference.ai.azure.com", api_key=os.environ["GITHUB_TOKEN"])
26+
MODEL_NAME = os.environ["GITHUB_MODEL"]
27+
else:
28+
client = openai.OpenAI(api_key=os.environ["OPENAI_KEY"])
29+
MODEL_NAME = os.environ["OPENAI_MODEL"]
30+
31+
32+
messages = [
33+
{"role": "system", "content": "I am a large language model."},
34+
]
35+
36+
while True:
37+
question = input("\nYour question: ")
38+
print("Sending question...")
39+
40+
messages.append({"role": "user", "content": question})
41+
response = client.chat.completions.create(
42+
model=MODEL_NAME,
43+
messages=messages,
44+
temperature=1,
45+
max_tokens=400,
46+
top_p=0.95,
47+
frequency_penalty=0,
48+
presence_penalty=0,
49+
stop=None,
50+
stream=True,
51+
)
52+
53+
print("\nAnswer: ")
54+
bot_response = ""
55+
for event in response:
56+
if event.choices and event.choices[0].delta.content:
57+
content = event.choices[0].delta.content
58+
print(content, end="", flush=True)
59+
bot_response += content
60+
print("\n")
61+
messages.append({"role": "assistant", "content": bot_response})

spanish/chat_langchain.py

Lines changed: 45 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,45 @@
1+
import os
2+
3+
import azure.identity
4+
from dotenv import load_dotenv
5+
from langchain_core.prompts import ChatPromptTemplate
6+
from langchain_openai import AzureChatOpenAI, ChatOpenAI
7+
8+
# Setup the OpenAI client to use either Azure, OpenAI.com, or Ollama API
9+
load_dotenv(override=True)
10+
API_HOST = os.getenv("API_HOST")
11+
12+
if API_HOST == "azure":
13+
token_provider = azure.identity.get_bearer_token_provider(
14+
azure.identity.DefaultAzureCredential(), "https://cognitiveservices.azure.com/.default"
15+
)
16+
llm = AzureChatOpenAI(
17+
azure_endpoint=os.environ["AZURE_OPENAI_ENDPOINT"],
18+
azure_deployment=os.environ["AZURE_OPENAI_DEPLOYMENT"],
19+
openai_api_version=os.environ["AZURE_OPENAI_VERSION"],
20+
azure_ad_token_provider=token_provider,
21+
)
22+
elif API_HOST == "ollama":
23+
llm = ChatOpenAI(
24+
model_name=os.environ["OLLAMA_MODEL"],
25+
openai_api_base=os.environ["OLLAMA_ENDPOINT"],
26+
openai_api_key=os.environ["OPENAI_KEY"],
27+
)
28+
elif API_HOST == "github":
29+
llm = ChatOpenAI(
30+
model_name=os.environ["GITHUB_MODEL"],
31+
openai_api_base="https://models.inference.ai.azure.com",
32+
openai_api_key=os.environ["GITHUB_TOKEN"],
33+
)
34+
else:
35+
llm = ChatOpenAI(model_name=os.environ["OPENAI_MODEL"], openai_api_key=os.environ["OPENAI_KEY"])
36+
37+
38+
prompt = ChatPromptTemplate.from_messages(
39+
[("system", "Eres un asistente útil que hace muchas referencias a gatos y usa emojis."), ("user", "{input}")]
40+
)
41+
chain = prompt | llm
42+
response = chain.invoke({"input": "escribe un haiku sobre un gato hambriento que quiere atún"})
43+
44+
print(f"Respuesta de {API_HOST}: \n")
45+
print(response.content)

0 commit comments

Comments
 (0)