Skip to content

Commit e7878c3

Browse files
committed
address review feedback
1 parent ea6ef5b commit e7878c3

File tree

5 files changed

+17
-8
lines changed

5 files changed

+17
-8
lines changed

contributing/samples/hello_world_gemma3_ollama/agent.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -72,8 +72,8 @@ async def check_prime(nums: list[int]) -> str:
7272
instruction="""
7373
You roll dice and answer questions about the outcome of the dice rolls.
7474
You can roll dice of different sizes.
75-
You can use multiple tools in parallel by calling functions in parallel(in one request and in one round).
76-
It is ok to discuss previous dice roles, and comment on the dice rolls.
75+
You can use multiple tools in parallel by calling functions in parallel (in one request and in one round).
76+
It is ok to discuss previous dice rolls, and comment on the dice rolls.
7777
When you are asked to roll a die, you must call the roll_die tool with the number of sides. Be sure to pass in an integer. Do not pass in a string.
7878
You should never roll a die on your own.
7979
When checking prime numbers, call the check_prime tool with a list of integers. Be sure to pass in a list of integers. You should never pass in a string.

contributing/samples/hello_world_gemma3_ollama/main.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -41,7 +41,7 @@ async def main():
4141
artifact_service=artifact_service,
4242
session_service=session_service,
4343
)
44-
session_11 = await session_service.create_session(
44+
session_1 = await session_service.create_session(
4545
app_name=app_name, user_id=user_id_1
4646
)
4747

@@ -61,12 +61,12 @@ async def run_prompt(session: Session, new_message: str):
6161
start_time = time.time()
6262
print('Start time:', start_time)
6363
print('------------------------------------')
64-
await run_prompt(session_11, 'Hi, introduce yourself.')
64+
await run_prompt(session_1, 'Hi, introduce yourself.')
6565
await run_prompt(
66-
session_11, 'Roll a die with 100 sides and check if it is prime'
66+
session_1, 'Roll a die with 100 sides and check if it is prime'
6767
)
68-
await run_prompt(session_11, 'Roll it again.')
69-
await run_prompt(session_11, 'What numbers did I get?')
68+
await run_prompt(session_1, 'Roll it again.')
69+
await run_prompt(session_1, 'What numbers did I get?')
7070
end_time = time.time()
7171
print('------------------------------------')
7272
print('End time:', end_time)

pyproject.toml

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -113,6 +113,7 @@ test = [
113113
"a2a-sdk>=0.3.0,<0.4.0;python_version>='3.10'",
114114
"anthropic>=0.43.0", # For anthropic model tests
115115
"crewai[tools];python_version>='3.10'", # For CrewaiTool tests
116+
"instructor>=1.11.3", # For instructor (Gemma3 parsing)
116117
"kubernetes>=29.0.0", # For GkeCodeExecutor
117118
"langchain-community>=0.3.17",
118119
"langgraph>=0.2.60, <0.4.8", # For LangGraphAgent
@@ -144,14 +145,14 @@ extensions = [
144145
"beautifulsoup4>=3.2.2", # For load_web_page tool.
145146
"crewai[tools];python_version>='3.10'", # For CrewaiTool
146147
"docker>=7.0.0", # For ContainerCodeExecutor
148+
"instructor>=1.11.3", # For instructor (Gemma3 parsing)
147149
"kubernetes>=29.0.0", # For GkeCodeExecutor
148150
"langgraph>=0.2.60, <0.4.8", # For LangGraphAgent
149151
"litellm>=1.75.5", # For LiteLlm class. Currently has OpenAI limitations. TODO: once LiteLlm fix it
150152
"llama-index-readers-file>=0.4.0", # For retrieval using LlamaIndex.
151153
"llama-index-embeddings-google-genai>=0.3.0", # For files retrieval using LlamaIndex.
152154
"lxml>=5.3.0", # For load_web_page tool.
153155
"toolbox-core>=0.1.0", # For tools.toolbox_toolset.ToolboxToolset
154-
"instructor>=1.11.3", # For instructor (Gemma3 parsing)
155156
]
156157

157158
otel-gcp = ["opentelemetry-instrumentation-google-genai>=0.3b0, <1.0.0"]

src/google/adk/models/gemma_llm.py

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -243,7 +243,14 @@ def _extract_function_calls_from_response(llm_response: LlmResponse):
243243

244244
try:
245245
import instructor
246+
except ImportError as e:
247+
logger.warning(
248+
"The 'instructor' package is required for Gemma3 function calling but is not installed. "
249+
"Text response will be returned. To enable function calling, run: pip install \"google-adk[extensions]\""
250+
)
251+
return
246252

253+
try:
247254
json_candidate = instructor.utils.extract_json_from_codeblock(response_text)
248255

249256
if not json_candidate:

tests/unittests/models/test_gemma_llm.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -122,6 +122,7 @@ async def test_gemma_gemini_preprocess_request(llm_request):
122122
assert llm_request.contents[0].parts[0].text == want_content_text
123123

124124

125+
@pytest.mark.asyncio
125126
async def test_gemma_gemini_preprocess_request_with_tools(
126127
llm_request_with_tools,
127128
):

0 commit comments

Comments
 (0)