diff --git a/.github/workflows/push.yml b/.github/workflows/push.yml index 21d08f8..8ab90d9 100644 --- a/.github/workflows/push.yml +++ b/.github/workflows/push.yml @@ -14,8 +14,8 @@ env: BOT_CURSOR: ${{ vars.BOT_CURSOR }} DYNAMODB_TABLE_NAME: ${{ vars.DYNAMODB_TABLE_NAME }} KNOWLEDGE_BASE_ID: ${{ vars.KNOWLEDGE_BASE_ID }} - MODEL_ID_IMAGE: ${{ vars.MODEL_ID_IMAGE }} MODEL_ID_TEXT: ${{ vars.MODEL_ID_TEXT }} + PERSONAL_MESSAGE: ${{ vars.PERSONAL_MESSAGE }} SYSTEM_MESSAGE: ${{ vars.SYSTEM_MESSAGE }} AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} @@ -56,10 +56,10 @@ jobs: echo "BOT_CURSOR=${BOT_CURSOR}" >> .env echo "DYNAMODB_TABLE_NAME=${DYNAMODB_TABLE_NAME}" >> .env echo "KNOWLEDGE_BASE_ID=${KNOWLEDGE_BASE_ID}" >> .env - echo "MODEL_ID_IMAGE=${MODEL_ID_IMAGE}" >> .env echo "MODEL_ID_TEXT=${MODEL_ID_TEXT}" >> .env echo "SLACK_BOT_TOKEN=${SLACK_BOT_TOKEN}" >> .env echo "SLACK_SIGNING_SECRET=${SLACK_SIGNING_SECRET}" >> .env + echo "PERSONAL_MESSAGE=${PERSONAL_MESSAGE}" >> .env echo "SYSTEM_MESSAGE=${SYSTEM_MESSAGE}" >> .env - name: Deploy to AWS Lambda ๐Ÿš€ diff --git a/handler.py b/handler.py index 565da86..912cbd6 100644 --- a/handler.py +++ b/handler.py @@ -39,6 +39,9 @@ ALLOWED_CHANNEL_IDS = os.environ.get("ALLOWED_CHANNEL_IDS", "None") # Set up System messages +PERSONAL_MESSAGE = os.environ.get( + "PERSONAL_MESSAGE", "๋‹น์‹ ์€ ์นœ์ ˆํ•˜๊ณ  ์ „๋ฌธ์ ์ธ AI ๋น„์„œ ์ž…๋‹ˆ๋‹ค." +) SYSTEM_MESSAGE = os.environ.get("SYSTEM_MESSAGE", "None") MAX_LEN_SLACK = int(os.environ.get("MAX_LEN_SLACK", 3000)) @@ -304,16 +307,14 @@ def conversation(say: Say, thread_ts, query, channel, client_msg_id): latest_ts = result["ts"] prompts = [] - prompts.append( - "Human: You are a advisor AI system, and provides answers to questions by using fact based and statistical information when possible." - ) - prompts.append( - "If you don't know the answer, just say that you don't know, don't try to make up an answer." - ) + prompts.append("Human: {}".format(PERSONAL_MESSAGE)) + prompts.append("๋‹ต๋ณ€์„ ๋ชจ๋ฅด๋ฉด ๋ชจ๋ฅธ๋‹ค๊ณ  ํ•˜์„ธ์š”. ๋‹ต์„ ์ง€์–ด๋‚ด๋ ค๊ณ  ํ•˜์ง€ ๋งˆ์„ธ์š”.") if SYSTEM_MESSAGE != "None": prompts.append(SYSTEM_MESSAGE) + prompts.append(" ํƒœ๊ทธ๋กœ ๊ฐ์‹ธ์ง„ ์งˆ๋ฌธ์— ๋‹ต๋ณ€์„ ์ œ๊ณตํ•˜์„ธ์š”.") + try: # Get the knowledge base contexts if KNOWLEDGE_BASE_ID != "None": @@ -322,7 +323,7 @@ def conversation(say: Say, thread_ts, query, channel, client_msg_id): contexts = invoke_knowledge_base(query) prompts.append( - "Use the following pieces of information to provide a concise answer to the question enclosed in tags." + " ์— ์ •๋ณด๊ฐ€ ์ œ๊ณต ๋˜๋ฉด, ํ•ด๋‹น ์ •๋ณด๋ฅผ ์‚ฌ์šฉํ•˜์—ฌ ๋‹ต๋ณ€ํ•ด ์ฃผ์„ธ์š”." ) prompts.append("") prompts.append("\n\n".join(contexts)) @@ -334,9 +335,12 @@ def conversation(say: Say, thread_ts, query, channel, client_msg_id): contexts = conversations_replies(channel, thread_ts, client_msg_id) - prompts.append("") + prompts.append( + " ์— ์ •๋ณด๊ฐ€ ์ œ๊ณต ๋˜๋ฉด, ๋Œ€ํ™” ๊ธฐ๋ก์„ ์ฐธ๊ณ ํ•˜์—ฌ ๋‹ต๋ณ€ํ•ด ์ฃผ์„ธ์š”." + ) + prompts.append("") prompts.append("\n\n".join(contexts)) - prompts.append("") + prompts.append("") # Add the question to the prompts prompts.append("") @@ -345,7 +349,6 @@ def conversation(say: Say, thread_ts, query, channel, client_msg_id): prompts.append("") prompts.append("") - # prompts.append("The response should be specific and use statistics or numbers when possible.") prompts.append("Assistant:") # Combine the prompts