Skip to content

Commit 63aa5d1

Browse files
authored
Pretty print result classes (#196)
These classes print a really long, unreadable output by default. This trims it to the main useful info (i.e. output, last agent, overview of run). --- [//]: # (BEGIN SAPLING FOOTER) * __->__ #196 * #195
2 parents 09d70c0 + 64e263b commit 63aa5d1

File tree

7 files changed

+338
-1
lines changed

7 files changed

+338
-1
lines changed

Makefile

+8
Original file line numberDiff line numberDiff line change
@@ -18,6 +18,14 @@ mypy:
1818
tests:
1919
uv run pytest
2020

21+
.PHONY: snapshots-fix
22+
snapshots-fix:
23+
uv run pytest --inline-snapshot=fix
24+
25+
.PHONY: snapshots-create
26+
snapshots-create:
27+
uv run pytest --inline-snapshot=create
28+
2129
.PHONY: old_version_tests
2230
old_version_tests:
2331
UV_PROJECT_ENVIRONMENT=.venv_39 uv run --python 3.9 -m pytest

pyproject.toml

+5-1
Original file line numberDiff line numberDiff line change
@@ -47,6 +47,7 @@ dev = [
4747
"mkdocstrings[python]>=0.28.0",
4848
"coverage>=7.6.12",
4949
"playwright==1.50.0",
50+
"inline-snapshot>=0.20.7",
5051
]
5152
[tool.uv.workspace]
5253
members = ["agents"]
@@ -116,4 +117,7 @@ filterwarnings = [
116117
]
117118
markers = [
118119
"allow_call_model_methods: mark test as allowing calls to real model implementations",
119-
]
120+
]
121+
122+
[tool.inline-snapshot]
123+
format-command="ruff format --stdin-filename {filename}"

src/agents/result.py

+7
Original file line numberDiff line numberDiff line change
@@ -17,6 +17,7 @@
1717
from .logger import logger
1818
from .stream_events import StreamEvent
1919
from .tracing import Trace
20+
from .util._pretty_print import pretty_print_result, pretty_print_run_result_streaming
2021

2122
if TYPE_CHECKING:
2223
from ._run_impl import QueueCompleteSentinel
@@ -89,6 +90,9 @@ def last_agent(self) -> Agent[Any]:
8990
"""The last agent that was run."""
9091
return self._last_agent
9192

93+
def __str__(self) -> str:
94+
return pretty_print_result(self)
95+
9296

9397
@dataclass
9498
class RunResultStreaming(RunResultBase):
@@ -216,3 +220,6 @@ def _cleanup_tasks(self):
216220

217221
if self._output_guardrails_task and not self._output_guardrails_task.done():
218222
self._output_guardrails_task.cancel()
223+
224+
def __str__(self) -> str:
225+
return pretty_print_run_result_streaming(self)

src/agents/util/_pretty_print.py

+56
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,56 @@
1+
from typing import TYPE_CHECKING
2+
3+
from pydantic import BaseModel
4+
5+
if TYPE_CHECKING:
6+
from ..result import RunResult, RunResultBase, RunResultStreaming
7+
8+
9+
def _indent(text: str, indent_level: int) -> str:
10+
indent_string = " " * indent_level
11+
return "\n".join(f"{indent_string}{line}" for line in text.splitlines())
12+
13+
14+
def _final_output_str(result: "RunResultBase") -> str:
15+
if result.final_output is None:
16+
return "None"
17+
elif isinstance(result.final_output, str):
18+
return result.final_output
19+
elif isinstance(result.final_output, BaseModel):
20+
return result.final_output.model_dump_json(indent=2)
21+
else:
22+
return str(result.final_output)
23+
24+
25+
def pretty_print_result(result: "RunResult") -> str:
26+
output = "RunResult:"
27+
output += f'\n- Last agent: Agent(name="{result.last_agent.name}", ...)'
28+
output += (
29+
f"\n- Final output ({type(result.final_output).__name__}):\n"
30+
f"{_indent(_final_output_str(result), 2)}"
31+
)
32+
output += f"\n- {len(result.new_items)} new item(s)"
33+
output += f"\n- {len(result.raw_responses)} raw response(s)"
34+
output += f"\n- {len(result.input_guardrail_results)} input guardrail result(s)"
35+
output += f"\n- {len(result.output_guardrail_results)} output guardrail result(s)"
36+
output += "\n(See `RunResult` for more details)"
37+
38+
return output
39+
40+
41+
def pretty_print_run_result_streaming(result: "RunResultStreaming") -> str:
42+
output = "RunResultStreaming:"
43+
output += f'\n- Current agent: Agent(name="{result.current_agent.name}", ...)'
44+
output += f"\n- Current turn: {result.current_turn}"
45+
output += f"\n- Max turns: {result.max_turns}"
46+
output += f"\n- Is complete: {result.is_complete}"
47+
output += (
48+
f"\n- Final output ({type(result.final_output).__name__}):\n"
49+
f"{_indent(_final_output_str(result), 2)}"
50+
)
51+
output += f"\n- {len(result.new_items)} new item(s)"
52+
output += f"\n- {len(result.raw_responses)} raw response(s)"
53+
output += f"\n- {len(result.input_guardrail_results)} input guardrail result(s)"
54+
output += f"\n- {len(result.output_guardrail_results)} output guardrail result(s)"
55+
output += "\n(See `RunResultStreaming` for more details)"
56+
return output

tests/README.md

+25
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,25 @@
1+
# Tests
2+
3+
Before running any tests, make sure you have `uv` installed (and ideally run `make sync` after).
4+
5+
## Running tests
6+
7+
```
8+
make tests
9+
```
10+
11+
## Snapshots
12+
13+
We use [inline-snapshots](https://15r10nk.github.io/inline-snapshot/latest/) for some tests. If your code adds new snapshot tests or breaks existing ones, you can fix/create them. After fixing/creating snapshots, run `make tests` again to verify the tests pass.
14+
15+
### Fixing snapshots
16+
17+
```
18+
make snapshots-fix
19+
```
20+
21+
### Creating snapshots
22+
23+
```
24+
make snapshots-update
25+
```

tests/test_pretty_print.py

+201
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,201 @@
1+
import json
2+
3+
import pytest
4+
from inline_snapshot import snapshot
5+
from pydantic import BaseModel
6+
7+
from agents import Agent, Runner
8+
from agents.agent_output import _WRAPPER_DICT_KEY
9+
from agents.util._pretty_print import pretty_print_result, pretty_print_run_result_streaming
10+
from tests.fake_model import FakeModel
11+
12+
from .test_responses import get_final_output_message, get_text_message
13+
14+
15+
@pytest.mark.asyncio
16+
async def test_pretty_result():
17+
model = FakeModel()
18+
model.set_next_output([get_text_message("Hi there")])
19+
20+
agent = Agent(name="test_agent", model=model)
21+
result = await Runner.run(agent, input="Hello")
22+
23+
assert pretty_print_result(result) == snapshot("""\
24+
RunResult:
25+
- Last agent: Agent(name="test_agent", ...)
26+
- Final output (str):
27+
Hi there
28+
- 1 new item(s)
29+
- 1 raw response(s)
30+
- 0 input guardrail result(s)
31+
- 0 output guardrail result(s)
32+
(See `RunResult` for more details)\
33+
""")
34+
35+
36+
@pytest.mark.asyncio
37+
async def test_pretty_run_result_streaming():
38+
model = FakeModel()
39+
model.set_next_output([get_text_message("Hi there")])
40+
41+
agent = Agent(name="test_agent", model=model)
42+
result = Runner.run_streamed(agent, input="Hello")
43+
async for _ in result.stream_events():
44+
pass
45+
46+
assert pretty_print_run_result_streaming(result) == snapshot("""\
47+
RunResultStreaming:
48+
- Current agent: Agent(name="test_agent", ...)
49+
- Current turn: 1
50+
- Max turns: 10
51+
- Is complete: True
52+
- Final output (str):
53+
Hi there
54+
- 1 new item(s)
55+
- 1 raw response(s)
56+
- 0 input guardrail result(s)
57+
- 0 output guardrail result(s)
58+
(See `RunResultStreaming` for more details)\
59+
""")
60+
61+
62+
class Foo(BaseModel):
63+
bar: str
64+
65+
66+
@pytest.mark.asyncio
67+
async def test_pretty_run_result_structured_output():
68+
model = FakeModel()
69+
model.set_next_output(
70+
[
71+
get_text_message("Test"),
72+
get_final_output_message(Foo(bar="Hi there").model_dump_json()),
73+
]
74+
)
75+
76+
agent = Agent(name="test_agent", model=model, output_type=Foo)
77+
result = await Runner.run(agent, input="Hello")
78+
79+
assert pretty_print_result(result) == snapshot("""\
80+
RunResult:
81+
- Last agent: Agent(name="test_agent", ...)
82+
- Final output (Foo):
83+
{
84+
"bar": "Hi there"
85+
}
86+
- 2 new item(s)
87+
- 1 raw response(s)
88+
- 0 input guardrail result(s)
89+
- 0 output guardrail result(s)
90+
(See `RunResult` for more details)\
91+
""")
92+
93+
94+
@pytest.mark.asyncio
95+
async def test_pretty_run_result_streaming_structured_output():
96+
model = FakeModel()
97+
model.set_next_output(
98+
[
99+
get_text_message("Test"),
100+
get_final_output_message(Foo(bar="Hi there").model_dump_json()),
101+
]
102+
)
103+
104+
agent = Agent(name="test_agent", model=model, output_type=Foo)
105+
result = Runner.run_streamed(agent, input="Hello")
106+
107+
async for _ in result.stream_events():
108+
pass
109+
110+
assert pretty_print_run_result_streaming(result) == snapshot("""\
111+
RunResultStreaming:
112+
- Current agent: Agent(name="test_agent", ...)
113+
- Current turn: 1
114+
- Max turns: 10
115+
- Is complete: True
116+
- Final output (Foo):
117+
{
118+
"bar": "Hi there"
119+
}
120+
- 2 new item(s)
121+
- 1 raw response(s)
122+
- 0 input guardrail result(s)
123+
- 0 output guardrail result(s)
124+
(See `RunResultStreaming` for more details)\
125+
""")
126+
127+
128+
@pytest.mark.asyncio
129+
async def test_pretty_run_result_list_structured_output():
130+
model = FakeModel()
131+
model.set_next_output(
132+
[
133+
get_text_message("Test"),
134+
get_final_output_message(
135+
json.dumps(
136+
{
137+
_WRAPPER_DICT_KEY: [
138+
Foo(bar="Hi there").model_dump(),
139+
Foo(bar="Hi there 2").model_dump(),
140+
]
141+
}
142+
)
143+
),
144+
]
145+
)
146+
147+
agent = Agent(name="test_agent", model=model, output_type=list[Foo])
148+
result = await Runner.run(agent, input="Hello")
149+
150+
assert pretty_print_result(result) == snapshot("""\
151+
RunResult:
152+
- Last agent: Agent(name="test_agent", ...)
153+
- Final output (list):
154+
[Foo(bar='Hi there'), Foo(bar='Hi there 2')]
155+
- 2 new item(s)
156+
- 1 raw response(s)
157+
- 0 input guardrail result(s)
158+
- 0 output guardrail result(s)
159+
(See `RunResult` for more details)\
160+
""")
161+
162+
163+
@pytest.mark.asyncio
164+
async def test_pretty_run_result_streaming_list_structured_output():
165+
model = FakeModel()
166+
model.set_next_output(
167+
[
168+
get_text_message("Test"),
169+
get_final_output_message(
170+
json.dumps(
171+
{
172+
_WRAPPER_DICT_KEY: [
173+
Foo(bar="Test").model_dump(),
174+
Foo(bar="Test 2").model_dump(),
175+
]
176+
}
177+
)
178+
),
179+
]
180+
)
181+
182+
agent = Agent(name="test_agent", model=model, output_type=list[Foo])
183+
result = Runner.run_streamed(agent, input="Hello")
184+
185+
async for _ in result.stream_events():
186+
pass
187+
188+
assert pretty_print_run_result_streaming(result) == snapshot("""\
189+
RunResultStreaming:
190+
- Current agent: Agent(name="test_agent", ...)
191+
- Current turn: 1
192+
- Max turns: 10
193+
- Is complete: True
194+
- Final output (list):
195+
[Foo(bar='Test'), Foo(bar='Test 2')]
196+
- 2 new item(s)
197+
- 1 raw response(s)
198+
- 0 input guardrail result(s)
199+
- 0 output guardrail result(s)
200+
(See `RunResultStreaming` for more details)\
201+
""")

0 commit comments

Comments
 (0)