Skip to content

Commit 805cbe7

Browse files
authoredMar 3, 2025··
Merge pull request #14 from pamelafox/chainedcalls
Change os.environ calls
2 parents 1ac52e2 + c734eb3 commit 805cbe7

22 files changed

+51
-52
lines changed
 

‎CHANGELOG.md

-13
This file was deleted.

‎chained_calls.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -35,8 +35,8 @@
3535

3636
else:
3737

38-
client = openai.OpenAI(api_key=os.getenv("OPENAI_KEY"))
39-
MODEL_NAME = os.getenv("OPENAI_MODEL")
38+
client = openai.OpenAI(api_key=os.environ["OPENAI_KEY"])
39+
MODEL_NAME = os.environ["OPENAI_MODEL"]
4040

4141

4242
response = client.chat.completions.create(

‎chat.py

+4-4
Original file line numberDiff line numberDiff line change
@@ -30,13 +30,13 @@
3030

3131
elif API_HOST == "github":
3232

33-
client = openai.OpenAI(base_url="https://models.inference.ai.azure.com", api_key=os.getenv("GITHUB_TOKEN"))
34-
MODEL_NAME = os.getenv("GITHUB_MODEL")
33+
client = openai.OpenAI(base_url="https://models.inference.ai.azure.com", api_key=os.environ["GITHUB_TOKEN"])
34+
MODEL_NAME = os.environ["GITHUB_MODEL"]
3535

3636
else:
3737

38-
client = openai.OpenAI(api_key=os.getenv("OPENAI_KEY"))
39-
MODEL_NAME = os.getenv("OPENAI_MODEL")
38+
client = openai.OpenAI(api_key=os.environ["OPENAI_KEY"])
39+
MODEL_NAME = os.environ["OPENAI_MODEL"]
4040

4141

4242
response = client.chat.completions.create(

‎chat_async.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -29,8 +29,8 @@
2929
client = openai.AsyncOpenAI(base_url="https://models.inference.ai.azure.com", api_key=os.getenv("GITHUB_TOKEN"))
3030
MODEL_NAME = os.getenv("GITHUB_MODEL")
3131
else:
32-
client = openai.AsyncOpenAI(api_key=os.getenv("OPENAI_KEY"))
33-
MODEL_NAME = os.getenv("OPENAI_MODEL")
32+
client = openai.AsyncOpenAI(api_key=os.environ["OPENAI_KEY"])
33+
MODEL_NAME = os.environ["OPENAI_MODEL"]
3434

3535

3636
async def generate_response(location):

‎chat_history.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -28,8 +28,8 @@
2828
client = openai.OpenAI(base_url="https://models.inference.ai.azure.com", api_key=os.getenv("GITHUB_TOKEN"))
2929
MODEL_NAME = os.getenv("GITHUB_MODEL")
3030
else:
31-
client = openai.OpenAI(api_key=os.getenv("OPENAI_KEY"))
32-
MODEL_NAME = os.getenv("OPENAI_MODEL")
31+
client = openai.OpenAI(api_key=os.environ["OPENAI_KEY"])
32+
MODEL_NAME = os.environ["OPENAI_MODEL"]
3333

3434

3535
messages = [

‎chat_history_stream.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -28,8 +28,8 @@
2828
client = openai.OpenAI(base_url="https://models.inference.ai.azure.com", api_key=os.getenv("GITHUB_TOKEN"))
2929
MODEL_NAME = os.getenv("GITHUB_MODEL")
3030
else:
31-
client = openai.OpenAI(api_key=os.getenv("OPENAI_KEY"))
32-
MODEL_NAME = os.getenv("OPENAI_MODEL")
31+
client = openai.OpenAI(api_key=os.environ["OPENAI_KEY"])
32+
MODEL_NAME = os.environ["OPENAI_MODEL"]
3333

3434

3535
messages = [

‎chat_langchain.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -32,7 +32,7 @@
3232
openai_api_key=os.getenv("GITHUB_TOKEN"),
3333
)
3434
else:
35-
llm = ChatOpenAI(model_name=os.getenv("OPENAI_MODEL"), openai_api_key=os.getenv("OPENAI_KEY"))
35+
llm = ChatOpenAI(model_name=os.environ["OPENAI_MODEL"], openai_api_key=os.environ["OPENAI_KEY"])
3636

3737

3838
prompt = ChatPromptTemplate.from_messages(

‎chat_llamaindex.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -35,7 +35,7 @@
3535
is_chat_model=True,
3636
)
3737
else:
38-
llm = OpenAI(model=os.getenv("OPENAI_MODEL"), api_key=os.getenv("OPENAI_KEY"))
38+
llm = OpenAI(model=os.environ["OPENAI_MODEL"], api_key=os.environ["OPENAI_KEY"])
3939

4040
chat_msgs = [
4141
ChatMessage(

‎chat_pydanticai.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -31,9 +31,9 @@
3131
model = OpenAIModel(os.environ["OPENAI_MODEL"], api_key=os.environ["OPENAI_KEY"])
3232

3333

34-
agent = Agent(model, system_prompt="Be concise: 1 sentence only.")
34+
agent = Agent(model, system_prompt="You are a helpful assistant that makes lots of cat references and uses emojis.")
3535

36-
result = agent.run_sync("Where does 'hello world' come from?")
36+
result = agent.run_sync("Write a haiku about a hungry cat who wants tuna")
3737

3838
print(f"Response from {API_HOST}: \n")
3939
print(result.data)

‎chat_safety.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -28,8 +28,8 @@
2828
client = openai.OpenAI(base_url="https://models.inference.ai.azure.com", api_key=os.getenv("GITHUB_TOKEN"))
2929
MODEL_NAME = os.getenv("GITHUB_MODEL")
3030
else:
31-
client = openai.OpenAI(api_key=os.getenv("OPENAI_KEY"))
32-
MODEL_NAME = os.getenv("OPENAI_MODEL")
31+
client = openai.OpenAI(api_key=os.environ["OPENAI_KEY"])
32+
MODEL_NAME = os.environ["OPENAI_MODEL"]
3333

3434
try:
3535
response = client.chat.completions.create(

‎chat_stream.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -28,8 +28,8 @@
2828
client = openai.OpenAI(base_url="https://models.inference.ai.azure.com", api_key=os.getenv("GITHUB_TOKEN"))
2929
MODEL_NAME = os.getenv("GITHUB_MODEL")
3030
else:
31-
client = openai.OpenAI(api_key=os.getenv("OPENAI_KEY"))
32-
MODEL_NAME = os.getenv("OPENAI_MODEL")
31+
client = openai.OpenAI(api_key=os.environ["OPENAI_KEY"])
32+
MODEL_NAME = os.environ["OPENAI_MODEL"]
3333

3434

3535
completion = client.chat.completions.create(

‎few_shot_examples.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -35,8 +35,8 @@
3535

3636
else:
3737

38-
client = openai.OpenAI(api_key=os.getenv("OPENAI_KEY"))
39-
MODEL_NAME = os.getenv("OPENAI_MODEL")
38+
client = openai.OpenAI(api_key=os.environ["OPENAI_KEY"])
39+
MODEL_NAME = os.environ["OPENAI_MODEL"]
4040

4141

4242
SYSTEM_MESSAGE = """

‎function_calling.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -35,8 +35,8 @@
3535

3636
else:
3737

38-
client = openai.OpenAI(api_key=os.getenv("OPENAI_KEY"))
39-
MODEL_NAME = os.getenv("OPENAI_MODEL")
38+
client = openai.OpenAI(api_key=os.environ["OPENAI_KEY"])
39+
MODEL_NAME = os.environ["OPENAI_MODEL"]
4040

4141

4242
tools = [

‎function_calling_call.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -32,8 +32,8 @@
3232
MODEL_NAME = os.getenv("GITHUB_MODEL")
3333

3434
else:
35-
client = openai.OpenAI(api_key=os.getenv("OPENAI_KEY"))
36-
MODEL_NAME = os.getenv("OPENAI_MODEL")
35+
client = openai.OpenAI(api_key=os.environ["OPENAI_KEY"])
36+
MODEL_NAME = os.environ["OPENAI_MODEL"]
3737

3838

3939
def lookup_weather(city_name=None, zip_code=None):

‎function_calling_multiple.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -35,8 +35,8 @@
3535

3636
else:
3737

38-
client = openai.OpenAI(api_key=os.getenv("OPENAI_KEY"))
39-
MODEL_NAME = os.getenv("OPENAI_MODEL")
38+
client = openai.OpenAI(api_key=os.environ["OPENAI_KEY"])
39+
MODEL_NAME = os.environ["OPENAI_MODEL"]
4040

4141

4242
tools = [

‎http/.env.sample

+3-2
Original file line numberDiff line numberDiff line change
@@ -1,2 +1,3 @@
1-
AZURE_OPENAI_SERVICE=
2-
AUTH_TOKEN=
1+
SERVICE=
2+
DEPLOYMENT=
3+
TOKEN=
File renamed without changes.

‎http/chat_completion_ollama.http

+10
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,10 @@
1+
POST http://localhost:11434/v1/chat/completions
2+
Content-Type: application/json
3+
4+
{
5+
"model": "phi3.5:latest",
6+
"messages": [{"role":"system","content":"You are an AI assistant that answers questions with short clear answers."},
7+
{"role":"user","content":"How fast is the Prius V?"}],
8+
"max_tokens": 800,
9+
"temperature": 0.7
10+
}
File renamed without changes.

‎prompt_engineering.py

+7-7
Original file line numberDiff line numberDiff line change
@@ -35,19 +35,19 @@
3535

3636
else:
3737

38-
client = openai.OpenAI(api_key=os.getenv("OPENAI_KEY"))
39-
MODEL_NAME = os.getenv("OPENAI_MODEL")
38+
client = openai.OpenAI(api_key=os.environ["OPENAI_KEY"])
39+
MODEL_NAME = os.environ["OPENAI_MODEL"]
4040

4141

4242
SYSTEM_MESSAGE = """
43-
I want you to act like Elmo from Sesame Street.
44-
I want you to respond and answer like Elmo using the tone, manner and vocabulary that Elmo would use.
45-
Do not write any explanations. Only answer like Elmo.
46-
You must know all of the knowledge of Elmo, and nothing more.
43+
I want you to act like Yoda from Star Wars.
44+
I want you to respond and answer like Yoda using the tone, manner and vocabulary that Yoda would use.
45+
Do not write any explanations. Only answer like Yoda.
46+
You must know all of the knowledge of Yoda, and nothing more.
4747
"""
4848

4949
USER_MESSAGE = """
50-
Hi Elmo, how are you doing today?
50+
What is an LLM?
5151
"""
5252

5353
response = client.chat.completions.create(

‎requirements.txt

+1
Original file line numberDiff line numberDiff line change
@@ -6,3 +6,4 @@ langchain-openai
66
llama-index-llms-azure-openai
77
llama-index-llms-openai
88
llama-index-llms-openai-like
9+
pydantic-ai

‎retrieval_augmented_generation.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -32,8 +32,8 @@
3232
MODEL_NAME = os.getenv("GITHUB_MODEL")
3333

3434
else:
35-
client = openai.OpenAI(api_key=os.getenv("OPENAI_KEY"))
36-
MODEL_NAME = os.getenv("OPENAI_MODEL")
35+
client = openai.OpenAI(api_key=os.environ["OPENAI_KEY"])
36+
MODEL_NAME = os.environ["OPENAI_MODEL"]
3737

3838

3939
USER_MESSAGE = "how fast is the prius v?"

0 commit comments

Comments
 (0)
Please sign in to comment.