Skip to content

Commit fcd5e8b

Browse files
committed
first CI
Just a little hack
1 parent 56434a7 commit fcd5e8b

10 files changed

+236
-1
lines changed

.DS_Store

6 KB
Binary file not shown.

.gitignore

+4
Original file line numberDiff line numberDiff line change
@@ -150,3 +150,7 @@ cython_debug/
150150
# and can be added to the global gitignore or merged into this file. For a more nuclear
151151
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
152152
#.idea/
153+
*.db
154+
*.py[cod]
155+
.web
156+
__pycache__/

README.md

+5-1
Original file line numberDiff line numberDiff line change
@@ -1,2 +1,6 @@
11
# hello_reflex
2-
2+
3+
1. pip install -r requirements.txt
4+
2. Copy your .env file into the base folder
5+
3. cd app
6+
4. reflex run

app/app/__init__.py

Whitespace-only changes.

app/app/app.py

+84
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,84 @@
1+
import reflex as rx
2+
3+
from app import style
4+
from app.state import State
5+
6+
7+
8+
9+
def chat() -> rx.Component:
10+
return rx.box(
11+
rx.text(State.chat_history, style=style.question_style)
12+
#rx.foreach(
13+
# State.chat_history,
14+
# lambda messages: qa(messages[0], messages[1]),
15+
#)
16+
)
17+
18+
19+
def action_bar() -> rx.Component:
20+
return rx.hstack(
21+
rx.input(
22+
value=State.question,
23+
placeholder="Put in a review...",
24+
on_change=State.set_question,
25+
style=style.input_style,
26+
),
27+
rx.button(
28+
"Ask",
29+
on_click=State.answer,
30+
style=style.button_style,
31+
),
32+
)
33+
color = "rgb(107,99,246)"
34+
35+
#def upload():
36+
# """The main view."""
37+
# return rx.vstack(
38+
# rx.upload(
39+
# rx.vstack(
40+
# rx.button(
41+
# "Select File",
42+
# color=color,
43+
# bg="white",
44+
# border=f"1px solid {color}",
45+
# ),
46+
# rx.text(
47+
# "Drag and drop files here or click to select files"
48+
# ),
49+
# ),
50+
# border=f"1px dotted {color}",
51+
# padding="5em",
52+
# ),
53+
# rx.hstack(rx.foreach(rx.selected_files, rx.text)),
54+
# rx.button(
55+
# "Upload",
56+
# on_click=lambda: State.handle_upload(
57+
# rx.upload_files()
58+
# ),
59+
# ),
60+
# rx.button(
61+
# "Clear",
62+
# on_click=rx.clear_selected_files,
63+
# ),
64+
# rx.foreach(
65+
# State.img, lambda img: rx.image(src=img)
66+
# ),
67+
# padding="5em",
68+
# )
69+
70+
def index() -> rx.Component:
71+
return rx.container(
72+
action_bar(),
73+
chat(),
74+
)
75+
76+
77+
app = rx.App()
78+
app.add_page(index)
79+
app.compile()
80+
81+
82+
83+
84+

app/app/state.py

+108
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,108 @@
1+
# state.py
2+
import reflex as rx
3+
import os
4+
5+
from dotenv import load_dotenv
6+
7+
from langchain.prompts import PromptTemplate, FewShotPromptTemplate
8+
from langchain.memory import ConversationBufferMemory
9+
from ibm_watson_machine_learning.foundation_models import Model
10+
from ibm_watson_machine_learning.metanames import GenTextParamsMetaNames as GenParams
11+
from ibm_watson_machine_learning.foundation_models.extensions.langchain import WatsonxLLM
12+
from langchain.chains import LLMChain
13+
14+
15+
16+
class State(rx.State):
17+
18+
# The current question being asked.
19+
question: str
20+
21+
# Keep track of the chat history as a list of (question, answer) tuples.
22+
chat_history: str
23+
24+
async def handle_upload(
25+
self, files: list[rx.UploadFile]
26+
):
27+
"""Handle the upload of file(s).
28+
29+
Args:
30+
files: The uploaded files.
31+
"""
32+
for file in files:
33+
upload_data = await file.read()
34+
outfile = rx.get_asset_path(file.filename)
35+
36+
# Save the file.
37+
with open(outfile, "wb") as file_object:
38+
file_object.write(upload_data)
39+
40+
# Update the img var.
41+
self.img.append(file.filename)
42+
#print(self.data)
43+
44+
#config Watsonx.ai environment
45+
load_dotenv()
46+
api_key = os.getenv("API_KEY", None)
47+
ibm_cloud_url = os.getenv("IBM_CLOUD_URL", None)
48+
project_id = os.getenv("PROJECT_ID", None)
49+
if api_key is None or ibm_cloud_url is None or project_id is None:
50+
raise Exception("Ensure you copied the .env file that you created earlier into the same directory as this notebook")
51+
else:
52+
creds = {
53+
"url": ibm_cloud_url,
54+
"apikey": api_key
55+
}
56+
57+
def send_to_watsonxai(self):
58+
59+
#assert not any(map(lambda prompt: len(prompt) < 1, prompts)), "make sure none of the prompts in the inputs prompts are empty"
60+
61+
# Instantiate parameters for text generation
62+
model_params = {
63+
GenParams.DECODING_METHOD: 'sample',
64+
GenParams.MIN_NEW_TOKENS: 3,
65+
GenParams.MAX_NEW_TOKENS: 10,
66+
GenParams.RANDOM_SEED: 42,
67+
GenParams.TEMPERATURE: .1,
68+
GenParams.REPETITION_PENALTY: 2.0,
69+
}
70+
71+
72+
# Instantiate a model proxy object to send your requests
73+
model = Model(
74+
model_id="meta-llama/llama-2-70b-chat",
75+
params=model_params,
76+
credentials=self.creds,
77+
project_id=self.project_id)
78+
79+
llm = WatsonxLLM(model)
80+
81+
template = """extract the emotions the reviewer expressed return answer as a comma separated list
82+
Example: Review: I had relatives in the nursing home, which was understaffed, they have multiple needs, and they need a lot of care. They are understaffed and they need more than one staff to assist with caring for the residents. My client has sorosis of the liver, her mind is gone, patience is required to take care of your patient. Output: Sadness, Worry
83+
Review text: '''{review}'''
84+
Output:
85+
"""
86+
87+
prompt = PromptTemplate(
88+
input_variables=["review"], template=template
89+
)
90+
#memory = ConversationBufferMemory(memory_key="chat_history")
91+
92+
llm_chain = LLMChain(
93+
llm=llm,
94+
prompt=prompt,
95+
verbose=True,
96+
#memory=memory,
97+
)
98+
#print(memory.history)
99+
return(llm_chain.predict(review=self.question))
100+
101+
def answer(self):
102+
# Our chatbot is not very smart right now...
103+
self.chat_history = ""
104+
self.chat_history = f"Review: {self.question} \n\n\n --------------------Sentiment: {self.send_to_watsonxai()}"
105+
106+
# Clear the question input.
107+
self.question = ""
108+

app/app/style.py

+27
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,27 @@
1+
# style.py
2+
3+
# Common styles for questions and answers.
4+
shadow = "rgba(0, 0, 0, 0.15) 0px 2px 8px"
5+
chat_margin = "20%"
6+
message_style = dict(
7+
padding="1em",
8+
border_radius="5px",
9+
margin_y="0.5em",
10+
box_shadow=shadow,
11+
max_width="30em",
12+
display="inline-block",
13+
)
14+
15+
# Set specific styles for questions and answers.
16+
question_style = message_style | dict(
17+
bg="#F5EFFE", margin_left=chat_margin
18+
)
19+
answer_style = message_style | dict(
20+
bg="#DEEAFD", margin_right=chat_margin
21+
)
22+
23+
# Styles for the action bar.
24+
input_style = dict(
25+
border_width="1px", padding="1em", box_shadow=shadow
26+
)
27+
button_style = dict(bg="#CEFFEE", box_shadow=shadow)

app/assets/favicon.ico

14.7 KB
Binary file not shown.

app/rxconfig.py

+5
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,5 @@
1+
import reflex as rx
2+
3+
config = rx.Config(
4+
app_name="app",
5+
)

requirements.txt

+3
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,3 @@
1+
reflex
2+
ibm-watson-machine-learning >= 1.0.327
3+
langchain

0 commit comments

Comments
 (0)