Skip to content

Commit d224b26

Browse files
committed
feat: record usage metadata on compaction events
1 parent 4f3b733 commit d224b26

File tree

2 files changed

+37
-2
lines changed

2 files changed

+37
-2
lines changed

src/google/adk/apps/llm_event_summarizer.py

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -104,9 +104,12 @@ async def maybe_summarize_events(
104104
contents=[Content(role='user', parts=[Part(text=prompt)])],
105105
)
106106
summary_content = None
107+
usage_metadata = None
107108
async for llm_response in self._llm.generate_content_async(
108109
llm_request, stream=False
109110
):
111+
if llm_response.usage_metadata is not None:
112+
usage_metadata = llm_response.usage_metadata
110113
if llm_response.content:
111114
summary_content = llm_response.content
112115
break
@@ -132,4 +135,5 @@ async def maybe_summarize_events(
132135
author='user',
133136
actions=actions,
134137
invocation_id=Event.new_id(),
138+
usage_metadata=usage_metadata,
135139
)

tests/unittests/apps/test_llm_event_summarizer.py

Lines changed: 33 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -25,6 +25,7 @@
2525
from google.genai.types import Content
2626
from google.genai.types import FunctionCall
2727
from google.genai.types import FunctionResponse
28+
from google.genai.types import GenerateContentResponseUsageMetadata
2829
from google.genai.types import Part
2930
import pytest
3031

@@ -57,7 +58,9 @@ async def test_maybe_compact_events_success(self):
5758
expected_prompt = self.compactor._DEFAULT_PROMPT_TEMPLATE.format(
5859
conversation_history=expected_conversation_history
5960
)
60-
mock_llm_response = Mock(content=Content(parts=[Part(text='Summary')]))
61+
mock_llm_response = Mock(
62+
content=Content(parts=[Part(text='Summary')]), usage_metadata=None
63+
)
6164

6265
async def async_gen():
6366
yield mock_llm_response
@@ -90,11 +93,39 @@ async def async_gen():
9093
self.assertEqual(llm_request.contents[0].parts[0].text, expected_prompt)
9194
self.assertFalse(kwargs['stream'])
9295

96+
async def test_maybe_compact_events_includes_usage_metadata(self):
97+
events = [
98+
self._create_event(1.0, 'Hello', 'user'),
99+
self._create_event(2.0, 'Hi there!', 'model'),
100+
]
101+
usage_metadata = GenerateContentResponseUsageMetadata(
102+
prompt_token_count=10,
103+
candidates_token_count=5,
104+
total_token_count=15,
105+
)
106+
mock_llm_response = Mock(
107+
content=Content(parts=[Part(text='Summary')]),
108+
usage_metadata=usage_metadata,
109+
)
110+
111+
async def async_gen():
112+
yield mock_llm_response
113+
114+
self.mock_llm.generate_content_async.return_value = async_gen()
115+
116+
compacted_event = await self.compactor.maybe_summarize_events(events=events)
117+
118+
self.assertIsNotNone(compacted_event)
119+
self.assertIsNotNone(compacted_event.usage_metadata)
120+
self.assertEqual(compacted_event.usage_metadata.prompt_token_count, 10)
121+
self.assertEqual(compacted_event.usage_metadata.candidates_token_count, 5)
122+
self.assertEqual(compacted_event.usage_metadata.total_token_count, 15)
123+
93124
async def test_maybe_compact_events_empty_llm_response(self):
94125
events = [
95126
self._create_event(1.0, 'Hello', 'user'),
96127
]
97-
mock_llm_response = Mock(content=None)
128+
mock_llm_response = Mock(content=None, usage_metadata=None)
98129

99130
async def async_gen():
100131
yield mock_llm_response

0 commit comments

Comments
 (0)