|
3 | 3 | import os
|
4 | 4 | import openai
|
5 | 5 | from langchain.prompts import PromptTemplate
|
6 |
| -from langchain.llms import OpenAI |
7 | 6 | from langchain.llms.openai import AzureOpenAI
|
8 | 7 |
|
9 | 8 | app = func.FunctionApp()
|
10 | 9 |
|
| 10 | + |
11 | 11 | @app.function_name(name='ask')
|
12 | 12 | @app.route(route='ask', auth_level='anonymous', methods=['POST'])
|
13 | 13 | def main(req):
|
14 | 14 |
|
15 |
| - prompt = req.params.get('prompt') |
16 |
| - if not prompt: |
17 |
| - try: |
18 |
| - req_body = req.get_json() |
19 |
| - except ValueError: |
20 |
| - raise RuntimeError("prompt data must be set in POST.") |
| 15 | + prompt = req.params.get('prompt') |
| 16 | + if not prompt: |
| 17 | + try: |
| 18 | + req_body = req.get_json() |
| 19 | + except ValueError: |
| 20 | + raise RuntimeError("prompt data must be set in POST.") |
21 | 21 | else:
|
22 |
| - prompt = req_body.get('prompt') |
| 22 | + prompt = req_body.get('prompt') |
23 | 23 | if not prompt:
|
24 | 24 | raise RuntimeError("prompt data must be set in POST.")
|
25 | 25 |
|
26 |
| - # init OpenAI: Replace these with your own values, either in environment variables or directly here |
27 |
| - USE_LANGCHAIN = os.getenv("USE_LANGCHAIN", 'True').lower() in ('true', '1', 't') |
| 26 | + # init OpenAI: Replace these with your own values, either in env vars |
28 | 27 | AZURE_OPENAI_KEY = os.environ.get("AZURE_OPENAI_KEY")
|
29 | 28 | AZURE_OPENAI_ENDPOINT = os.environ.get("AZURE_OPENAI_ENDPOINT")
|
30 |
| - AZURE_OPENAI_SERVICE = os.environ.get("AZURE_OPENAI_SERVICE") or "myopenai" |
31 |
| - AZURE_OPENAI_GPT_DEPLOYMENT = os.environ.get("AZURE_OPENAI_GPT_DEPLOYMENT") or "davinci" |
32 |
| - AZURE_OPENAI_CHATGPT_DEPLOYMENT = os.environ.get("AZURE_OPENAI_CHATGPT_DEPLOYMENT") or "chat" #GPT turbo |
| 29 | + AZURE_OPENAI_CHATGPT_DEPLOYMENT = os.environ.get( |
| 30 | + "AZURE_OPENAI_CHATGPT_DEPLOYMENT") or "chat" |
33 | 31 | if 'AZURE_OPENAI_KEY' not in os.environ:
|
34 |
| - raise RuntimeError("No 'AZURE_OPENAI_KEY' env var set. Please see Readme.") |
| 32 | + raise RuntimeError("No 'AZURE_OPENAI_KEY' env var set.") |
35 | 33 |
|
36 | 34 | # configure azure openai for langchain and/or llm
|
37 | 35 | openai.api_key = AZURE_OPENAI_KEY
|
38 |
| - openai.api_base = AZURE_OPENAI_ENDPOINT # your endpoint should look like the following https://YOUR_RESOURCE_NAME.openai.azure.com/ |
| 36 | + openai.api_base = AZURE_OPENAI_ENDPOINT |
39 | 37 | openai.api_type = 'azure'
|
40 |
| - openai.api_version = '2023-05-15' # this may change in the future |
41 |
| - # for langchain, set this version in environment variables using OPENAI_API_VERSION |
42 |
| - |
43 |
| - if bool(USE_LANGCHAIN): |
44 |
| - logging.info('Using Langchain') |
45 | 38 |
|
46 |
| - llm = AzureOpenAI(deployment_name=AZURE_OPENAI_CHATGPT_DEPLOYMENT, temperature=0.3, openai_api_key=AZURE_OPENAI_KEY) |
47 |
| - llm_prompt = PromptTemplate( |
48 |
| - input_variables=["human_prompt"], |
49 |
| - template="The following is a conversation with an AI assistant. The assistant is helpful.\n\nAI: I am an AI created by OpenAI. How can I help you today?\nHuman: {human_prompt}?", |
50 |
| - ) |
51 |
| - from langchain.chains import LLMChain |
52 |
| - chain = LLMChain(llm=llm, prompt=llm_prompt) |
53 |
| - return chain.run(prompt) |
| 39 | + # this may change in the future |
| 40 | + # set this version in environment variables using OPENAI_API_VERSION |
| 41 | + openai.api_version = '2023-05-15' |
54 | 42 |
|
55 |
| - else: |
56 |
| - logging.info('Using ChatGPT LLM directly') |
| 43 | + logging.info('Using Langchain') |
57 | 44 |
|
58 |
| - completion = openai.Completion.create( |
59 |
| - engine=AZURE_OPENAI_CHATGPT_DEPLOYMENT, |
60 |
| - prompt=generate_prompt(prompt), |
61 |
| - temperature=0.3, |
62 |
| - max_tokens=200 |
| 45 | + llm = AzureOpenAI( |
| 46 | + deployment_name=AZURE_OPENAI_CHATGPT_DEPLOYMENT, |
| 47 | + temperature=0.3, |
| 48 | + openai_api_key=AZURE_OPENAI_KEY |
63 | 49 | )
|
64 |
| - return completion.choices[0].text |
65 |
| - |
66 |
| - |
67 |
| -def generate_prompt(prompt): |
68 |
| - capitalized_prompt = prompt.capitalize() |
69 |
| - |
70 |
| - # Chat |
71 |
| - return f'The following is a conversation with an AI assistant. The assistant is helpful, creative, clever, and very friendly.\n\nHuman: Hello, who are you?\nAI: I am an AI created by OpenAI. How can I help you today?\nHuman: {capitalized_prompt}' |
72 |
| - |
73 |
| - # Classification |
74 |
| - #return 'The following is a list of companies and the categories they fall into:\n\nApple, Facebook, Fedex\n\nApple\nCategory: ' |
75 |
| - |
76 |
| - # Natural language to Python |
77 |
| - #return '\"\"\"\n1. Create a list of first names\n2. Create a list of last names\n3. Combine them randomly into a list of 100 full names\n\"\"\"' |
| 50 | + llm_prompt = PromptTemplate( |
| 51 | + input_variables=["human_prompt"], |
| 52 | + template="The following is a conversation with an AI assistant. " + |
| 53 | + "The assistant is helpful.\n\n" + |
| 54 | + "A:How can I help you today?\nHuman: {human_prompt}?", |
| 55 | + ) |
| 56 | + from langchain.chains import LLMChain |
| 57 | + chain = LLMChain(llm=llm, prompt=llm_prompt) |
| 58 | + return chain.run(prompt) |
0 commit comments