Skip to content

Commit fc0339c

Browse files
committed
refactor: Update code to use prompt.content instead of prompt.prompt Fix installation library path
1 parent 41c071b commit fc0339c

File tree

8 files changed

+64
-20
lines changed

8 files changed

+64
-20
lines changed

โ€ŽREADME.md

Lines changed: 41 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -22,8 +22,14 @@ pip install git+https://github.com/unclecode/promptuner.git
2222

2323
### Usage
2424

25-
Here's a basic example of how to use promptuner:
25+
Here's a basic example of how to use promptuner. First make sure to set your Anthropic Api Key in the environment variable `ANTHROPIC_API_KEY`.
26+
```
27+
export ANTHROPIC_API_KEY=YOUR_ANTHROPIC_API_KEY
28+
```
29+
30+
I use Claude only for generating the prompt, which I found better than other models, however you may try other models as well. Specially the recent ones like `Llama3.1-70b` or `Llama3.1-405b`.
2631

32+
1. Create the Prompt
2733
```python
2834
from promptuner import Prompt
2935
from promptuner.decorators import *
@@ -50,9 +56,14 @@ prompt.train()
5056

5157
# Print the generated prompt template
5258
print("Generated Prompt Template:")
53-
print(prompt.prompt)
54-
prompt.save("data/email_analysis_prompt.json")
59+
print(prompt.content)
60+
prompt.save("email_analysis_prompt.json")
61+
```
62+
63+
2. Use the Prompt
64+
You may simply use the generated prompt, replace the generate variables with the actual values and pass the prompt to your favorite model. Another way is to use the `promptuner` library to execute the prompt for you.
5565

66+
```python
5667
# Sample email content
5768
EMAIL_CONTENT = """
5869
@@ -71,10 +82,34 @@ John Doe
7182
Project Manager
7283
"""
7384

85+
prompt Prompt.load("email_analysis_prompt.json")
7486
# Define class labels
7587
CLASS_LABELS = "Work-related, Personal, Spam, Urgent, Newsletter, Other"
7688

77-
# Use the prompt to analyze the email
89+
# First Method: Use the generated prompt directly
90+
new_prompt = prompt.content.replace("{{EMAIL_CONTENT}}", EMAIL_CONTENT)
91+
new_prompt = prompt.content.replace("{{CLASS_LABELS}}", CLASS_LABELS)
92+
from openai import OpenAI
93+
client = OpenAI()
94+
95+
completion = client.chat.completions.create(
96+
model="gpt-4o",
97+
messages=[
98+
{"role": "system", "content": "You are a helpful assistant."},
99+
{"role": "user", "content": new_prompt}
100+
]
101+
)
102+
103+
answer = completion.choices[0].message
104+
tag = "analysis"
105+
pattern = f"<{tag}>(.*?)</{tag}>"
106+
match = re.search(pattern, answer, re.DOTALL)
107+
if match:
108+
result = match.group(1).strip()
109+
print("\nEmail Analysis Results:")
110+
print(result)
111+
112+
# Second Method: Use the promptuner library to execute the prompt
78113
response = prompt(
79114
variable_values={
80115
"EMAIL_CONTENT": EMAIL_CONTENT,
@@ -94,7 +129,9 @@ print("\nTags:")
94129
for tag, content in response['tags'].items():
95130
if tag != "analysis":
96131
print(f"<{tag}>\n{content}\n</{tag}>")
132+
97133
```
134+
98135
For more examples check the `docs/examples` folder.
99136

100137
## Stay Tuned

โ€Žpromptuner/__init__.py

Lines changed: 12 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,7 @@ def __init__(self, task: str, variables: List[str] = None, metaprompt: str = "de
1616
self.task = task
1717
self.variables = variables or []
1818
self.decorators = []
19-
self.prompt = None
19+
self.content = None
2020
self.token_count = None
2121
self.model_name = model_name or MODEL_NAME
2222
self.answer_tag = kwargs.get("answer_tag", "result")
@@ -29,8 +29,12 @@ def __init__(self, task: str, variables: List[str] = None, metaprompt: str = "de
2929

3030
# Load metaprompt
3131
__location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
32+
# Load metaprompt
33+
package_dir = os.path.dirname(os.path.abspath(__file__))
34+
metaprompts_dir = os.path.join(package_dir, "metaprompts")
35+
metaprompt_file = os.path.join(metaprompts_dir, f"{metaprompt}.md")
3236
try:
33-
with open(os.path.join(__location__, "metaprompts", f"{metaprompt}.md"), "r") as file:
37+
with open(metaprompt_file, "r") as file:
3438
self.metaprompt = file.read()
3539
except FileNotFoundError:
3640
raise ValueError(f"Metaprompt file '{metaprompt}.md' not found")
@@ -78,16 +82,16 @@ def train(self, **kwargs):
7882
remove_empty_tags = lambda x: re.sub(r"<(\w+)></\1>$", "", x)
7983

8084
between_tags = extract_between_tags("Instructions", metaprompt_response)[0]
81-
self.prompt = remove_empty_tags(between_tags).strip()
85+
self.content = remove_empty_tags(between_tags).strip()
8286

8387
pattern = r"{(.*)}"
84-
self.variables = list(set(re.findall(pattern, self.prompt)))
88+
self.variables = list(set(re.findall(pattern, self.content)))
8589

8690
def run(self, variable_values: Dict[str, str], model_name: str = None, api_key: str = None, **kwargs) -> Union[str, Dict]:
8791
return self(variable_values, model_name, api_key, **kwargs)
8892

8993
def __call__(self, variable_values: Dict[str, str], model_name: str = None, api_key: str = None, **kwargs) -> Union[str, Dict]:
90-
if not self.prompt:
94+
if not self.content:
9195
raise ValueError("Prompt hasn't been trained yet. Call the train() method first.")
9296

9397
prompt = self.replace_variables(variable_values)
@@ -113,7 +117,7 @@ def __call__(self, variable_values: Dict[str, str], model_name: str = None, api_
113117
return {"answer": content, "tags": tags_contnet, "raw": response.choices[0].message.content}
114118

115119
def replace_variables(self, variable_values: Dict[str, str]) -> str:
116-
prompt_with_variables = self.prompt
120+
prompt_with_variables = self.content
117121
for variable, value in variable_values.items():
118122
if variable not in self.variables:
119123
continue
@@ -122,14 +126,14 @@ def replace_variables(self, variable_values: Dict[str, str]) -> str:
122126

123127
def save(self, path: str):
124128
with open(path, "w") as file:
125-
json.dump({"task": self.task, "prompt": self.prompt, "variables": self.variables}, file)
129+
json.dump({"task": self.task, "prompt": self.content, "variables": self.variables}, file)
126130

127131
@staticmethod
128132
def load(path: str) -> "Prompt":
129133
with open(path, "r") as file:
130134
data = json.load(file)
131135
prompt = Prompt(data["task"])
132-
prompt.prompt = data["prompt"]
136+
prompt.content = data["prompt"]
133137
prompt.variables = data["variables"]
134138
return prompt
135139

โ€Žpromptuner/docs/examples/email_classifier.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -37,7 +37,7 @@
3737

3838
# Print the generated prompt template
3939
print("Generated Prompt Template:")
40-
print(prompt.prompt)
40+
print(prompt.content)
4141
prompt.save("data/email_analysis_prompt.json")
4242

4343
# Sample email content
@@ -90,4 +90,4 @@
9090
# Optionally, load the saved prompt
9191
# loaded_prompt = Prompt.load("data/email_analysis_prompt.json")
9292
# print("\nLoaded Prompt Template:")
93-
# print(loaded_prompt.prompt)
93+
# print(loaded_prompt.content)

โ€Žpromptuner/docs/examples/knowledge_graph.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -32,7 +32,7 @@
3232

3333
# Print the generated prompt template
3434
print("Generated Prompt Template:")
35-
print(prompt.prompt)
35+
print(prompt.content)
3636
prompt.save("data/knowledge_graph_prompt.json")
3737

3838
# Sample passage

โ€Žpromptuner/docs/examples/summarizer.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,7 @@
1818

1919
# Print the generated prompt template
2020
print("Generated Prompt Template:")
21-
print(prompt.prompt)
21+
print(prompt.content)
2222

2323
# Load a sample passage
2424
SAMPLE_PASSAGE = """
@@ -54,4 +54,4 @@
5454
# Optionally, load the saved prompt
5555
# loaded_prompt = Prompt.load("data/saved_prompt.json")
5656
# print("\nLoaded Prompt Template:")
57-
# print(loaded_prompt.prompt)
57+
# print(loaded_prompt.content)

โ€Žpromptuner/docs/quickstart.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,7 @@
1818
# Create prompt
1919
promptuner = promptuner()
2020
prompt = promptuner(TASK, variables)
21-
print(prompt.prompt)
21+
print(prompt.content)
2222

2323
# Execute prompt
2424
print(prompt.replace_variables({"PASSAGE": SAMPLE_PASSAGE}))
@@ -32,4 +32,4 @@
3232
# Test saving and loading prompt
3333
prompt.save(__current__ / "prompt.json")
3434
prompt = Prompt.load(__current__ / "prompt.json")
35-
print(prompt.prompt)
35+
print(prompt.content)

โ€Žserver/api.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -48,7 +48,7 @@ async def generate_prompt(request: PromptRequest):
4848
# Train the prompt
4949
prompt.train()
5050

51-
return PromptResponse(prompt=prompt.prompt, token_count=prompt.token_count)
51+
return PromptResponse(prompt=prompt.content, token_count=prompt.token_count)
5252
except Exception as e:
5353
raise HTTPException(status_code=500, detail=str(e))
5454

โ€Žsetup.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -31,6 +31,9 @@
3131
],
3232
packages=find_packages(),
3333
include_package_data=True,
34+
package_data={
35+
'promptuner': ['metaprompts/*.md'],
36+
},
3437
install_requires=required,
3538
entry_points={
3639
"console_scripts": [

0 commit comments

Comments
ย (0)