Skip to content

Commit 9f333c1

Browse files
committed
[FEATURE] Load Agentic Configurations from a Dictionary
1 parent 8c63d75 commit 9f333c1

39 files changed

+8901
-170
lines changed

.gitignore

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,4 @@
1+
.DS_Store
12
build
23
__pycache__*
34
.coverage*

pyproject.toml

Lines changed: 7 additions & 46 deletions
Original file line numberDiff line numberDiff line change
@@ -69,6 +69,8 @@ docs = [
6969
]
7070
litellm = [
7171
"litellm>=1.73.1,<2.0.0",
72+
# https://github.com/BerriAI/litellm/issues/13711
73+
"openai<1.100.0",
7274
]
7375
llamaapi = [
7476
"llama-api-client>=0.1.0,<1.0.0",
@@ -92,7 +94,9 @@ writer = [
9294
sagemaker = [
9395
"boto3>=1.26.0,<2.0.0",
9496
"botocore>=1.29.0,<2.0.0",
95-
"boto3-stubs[sagemaker-runtime]>=1.26.0,<2.0.0"
97+
"boto3-stubs[sagemaker-runtime]>=1.26.0,<2.0.0",
98+
# uses OpenAI as part of the implementation
99+
"openai>=1.68.0,<2.0.0",
96100
]
97101

98102
a2a = [
@@ -104,50 +108,7 @@ a2a = [
104108
"starlette>=0.46.2,<1.0.0",
105109
]
106110
all = [
107-
# anthropic
108-
"anthropic>=0.21.0,<1.0.0",
109-
110-
# dev
111-
"commitizen>=4.4.0,<5.0.0",
112-
"hatch>=1.0.0,<2.0.0",
113-
"moto>=5.1.0,<6.0.0",
114-
"mypy>=1.15.0,<2.0.0",
115-
"pre-commit>=3.2.0,<4.2.0",
116-
"pytest>=8.0.0,<9.0.0",
117-
"pytest-asyncio>=0.26.0,<0.27.0",
118-
"pytest-cov>=4.1.0,<5.0.0",
119-
"pytest-xdist>=3.0.0,<4.0.0",
120-
"ruff>=0.4.4,<0.5.0",
121-
122-
# docs
123-
"sphinx>=5.0.0,<6.0.0",
124-
"sphinx-rtd-theme>=1.0.0,<2.0.0",
125-
"sphinx-autodoc-typehints>=1.12.0,<2.0.0",
126-
127-
# litellm
128-
"litellm>=1.72.6,<1.73.0",
129-
130-
# llama
131-
"llama-api-client>=0.1.0,<1.0.0",
132-
133-
# mistral
134-
"mistralai>=1.8.2",
135-
136-
# ollama
137-
"ollama>=0.4.8,<1.0.0",
138-
139-
# openai
140-
"openai>=1.68.0,<2.0.0",
141-
142-
# otel
143-
"opentelemetry-exporter-otlp-proto-http>=1.30.0,<2.0.0",
144-
145-
# a2a
146-
"a2a-sdk[sql]>=0.3.0,<0.4.0",
147-
"uvicorn>=0.34.2,<1.0.0",
148-
"httpx>=0.28.1,<1.0.0",
149-
"fastapi>=0.115.12,<1.0.0",
150-
"starlette>=0.46.2,<1.0.0",
111+
"strands-agents[a2a,anthropic,dev,docs,litellm,llamaapi,mistral,ollama,openai,otel]",
151112
]
152113

153114
[tool.hatch.version]
@@ -159,7 +120,7 @@ features = ["anthropic", "litellm", "llamaapi", "ollama", "openai", "otel", "mis
159120
dependencies = [
160121
"mypy>=1.15.0,<2.0.0",
161122
"ruff>=0.11.6,<0.12.0",
162-
"strands-agents @ {root:uri}"
123+
"strands-agents @ {root:uri}",
163124
]
164125

165126
[tool.hatch.envs.hatch-static-analysis.scripts]

src/strands/agent/agent.py

Lines changed: 31 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -642,8 +642,11 @@ def _record_tool_execution(
642642
tool_result: The result returned by the tool.
643643
user_message_override: Optional custom message to include.
644644
"""
645+
# Filter tool input parameters to only include those defined in tool spec
646+
filtered_input = self._filter_tool_parameters_for_recording(tool["name"], tool["input"])
647+
645648
# Create user message describing the tool call
646-
input_parameters = json.dumps(tool["input"], default=lambda o: f"<<non-serializable: {type(o).__qualname__}>>")
649+
input_parameters = json.dumps(filtered_input, default=lambda o: f"<<non-serializable: {type(o).__qualname__}>>")
647650

648651
user_msg_content: list[ContentBlock] = [
649652
{"text": (f"agent.tool.{tool['name']} direct tool call.\nInput parameters: {input_parameters}\n")}
@@ -653,14 +656,21 @@ def _record_tool_execution(
653656
if user_message_override:
654657
user_msg_content.insert(0, {"text": f"{user_message_override}\n"})
655658

659+
# Create filtered tool use for message history
660+
filtered_tool: ToolUse = {
661+
"toolUseId": tool["toolUseId"],
662+
"name": tool["name"],
663+
"input": filtered_input,
664+
}
665+
656666
# Create the message sequence
657667
user_msg: Message = {
658668
"role": "user",
659669
"content": user_msg_content,
660670
}
661671
tool_use_msg: Message = {
662672
"role": "assistant",
663-
"content": [{"toolUse": tool}],
673+
"content": [{"toolUse": filtered_tool}],
664674
}
665675
tool_result_msg: Message = {
666676
"role": "user",
@@ -717,6 +727,25 @@ def _end_agent_trace_span(
717727

718728
self.tracer.end_agent_span(**trace_attributes)
719729

730+
def _filter_tool_parameters_for_recording(self, tool_name: str, input_params: dict[str, Any]) -> dict[str, Any]:
731+
"""Filter input parameters to only include those defined in the tool specification.
732+
733+
Args:
734+
tool_name: Name of the tool to get specification for
735+
input_params: Original input parameters
736+
737+
Returns:
738+
Filtered parameters containing only those defined in tool spec
739+
"""
740+
all_tools_config = self.tool_registry.get_all_tools_config()
741+
tool_spec = all_tools_config.get(tool_name)
742+
743+
if not tool_spec or "inputSchema" not in tool_spec:
744+
return input_params.copy()
745+
746+
properties = tool_spec["inputSchema"]["json"]["properties"]
747+
return {k: v for k, v in input_params.items() if k in properties}
748+
720749
def _append_message(self, message: Message) -> None:
721750
"""Appends a message to the agent's list of messages and invokes the callbacks for the MessageCreatedEvent."""
722751
self.messages.append(message)

src/strands/event_loop/streaming.py

Lines changed: 13 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -40,10 +40,12 @@ def remove_blank_messages_content_text(messages: Messages) -> Messages:
4040
# only modify assistant messages
4141
if "role" in message and message["role"] != "assistant":
4242
continue
43-
4443
if "content" in message:
4544
content = message["content"]
4645
has_tool_use = any("toolUse" in item for item in content)
46+
if len(content) == 0:
47+
content.append({"text": "[blank text]"})
48+
continue
4749

4850
if has_tool_use:
4951
# Remove blank 'text' items for assistant messages
@@ -194,16 +196,18 @@ def handle_content_block_stop(state: dict[str, Any]) -> dict[str, Any]:
194196
state["text"] = ""
195197

196198
elif reasoning_text:
197-
content.append(
198-
{
199-
"reasoningContent": {
200-
"reasoningText": {
201-
"text": state["reasoningText"],
202-
"signature": state["signature"],
203-
}
199+
content_block: ContentBlock = {
200+
"reasoningContent": {
201+
"reasoningText": {
202+
"text": state["reasoningText"],
204203
}
205204
}
206-
)
205+
}
206+
207+
if "signature" in state:
208+
content_block["reasoningContent"]["reasoningText"]["signature"] = state["signature"]
209+
210+
content.append(content_block)
207211
state["reasoningText"] = ""
208212

209213
return state
@@ -263,7 +267,6 @@ async def process_stream(chunks: AsyncIterable[StreamEvent]) -> AsyncGenerator[d
263267
"text": "",
264268
"current_tool_use": {},
265269
"reasoningText": "",
266-
"signature": "",
267270
}
268271
state["content"] = state["message"]["content"]
269272

@@ -272,7 +275,6 @@ async def process_stream(chunks: AsyncIterable[StreamEvent]) -> AsyncGenerator[d
272275

273276
async for chunk in chunks:
274277
yield {"callback": {"event": chunk}}
275-
276278
if "messageStart" in chunk:
277279
state["message"] = handle_message_start(chunk["messageStart"], state["message"])
278280
elif "contentBlockStart" in chunk:
@@ -312,7 +314,6 @@ async def stream_messages(
312314
logger.debug("model=<%s> | streaming messages", model)
313315

314316
messages = remove_blank_messages_content_text(messages)
315-
316317
chunks = model.stream(messages, tool_specs if tool_specs else None, system_prompt)
317318

318319
async for event in process_stream(chunks):
Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,8 @@
1+
"""Contains logic that loads agent configurations from YAML files."""
2+
3+
from .agent import AgentConfigLoader
4+
from .graph import GraphConfigLoader
5+
from .swarm import SwarmConfigLoader
6+
from .tools import AgentAsToolWrapper, ToolConfigLoader
7+
8+
__all__ = ["AgentConfigLoader", "ToolConfigLoader", "AgentAsToolWrapper", "GraphConfigLoader", "SwarmConfigLoader"]
Lines changed: 25 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,25 @@
1+
"""Agent configuration loader module."""
2+
3+
from .agent_config_loader import AgentConfigLoader
4+
from .pydantic_factory import PydanticModelFactory
5+
from .schema_registry import SchemaRegistry
6+
from .structured_output_errors import (
7+
ModelCreationError,
8+
OutputValidationError,
9+
SchemaImportError,
10+
SchemaRegistryError,
11+
SchemaValidationError,
12+
StructuredOutputError,
13+
)
14+
15+
__all__ = [
16+
"AgentConfigLoader",
17+
"PydanticModelFactory",
18+
"SchemaRegistry",
19+
"StructuredOutputError",
20+
"SchemaValidationError",
21+
"ModelCreationError",
22+
"OutputValidationError",
23+
"SchemaRegistryError",
24+
"SchemaImportError",
25+
]

0 commit comments

Comments
 (0)