Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
15 changes: 15 additions & 0 deletions src/google/adk/flows/llm_flows/functions.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@
import copy
import functools
import inspect
import json
import logging
import threading
from typing import Any
Expand Down Expand Up @@ -942,6 +943,9 @@ def __build_response_event(
tool_context: ToolContext,
invocation_context: InvocationContext,
) -> Event:
# Capture the raw result for display purposes before any normalization.
display_result = function_result

# Specs requires the result to be a dict.
if not isinstance(function_result, dict):
function_result = {'result': function_result}
Expand All @@ -956,6 +960,17 @@ def __build_response_event(
parts=[part_function_response],
)

# When summarization is skipped, ensure a displayable text part is added.
if tool_context.actions.skip_summarization and 'error' not in function_result:
if display_result is not None:
if isinstance(display_result, str):
result_text = display_result
else:
result_text = json.dumps(
display_result, ensure_ascii=False, default=str
)
content.parts.append(types.Part.from_text(text=result_text))

function_response_event = Event(
invocation_id=invocation_context.invocation_id,
author=invocation_context.agent.name,
Expand Down
95 changes: 95 additions & 0 deletions tests/unittests/tools/test_agent_tool.py
Original file line number Diff line number Diff line change
Expand Up @@ -1164,3 +1164,98 @@ def test_empty_sequential_agent_falls_back_to_request(self):

# Should fall back to 'request' parameter
assert declaration.parameters.properties['request'].type == 'STRING'


@pytest.fixture
def setup_skip_summarization_runner():
def _setup_runner(tool_agent_model_responses, tool_agent_output_schema=None):
tool_agent_model = testing_utils.MockModel.create(
responses=tool_agent_model_responses
)
tool_agent = Agent(
name='tool_agent',
model=tool_agent_model,
output_schema=tool_agent_output_schema,
)

agent_tool = AgentTool(agent=tool_agent, skip_summarization=True)

root_agent_model = testing_utils.MockModel.create(
responses=[
function_call_no_schema,
'final_summary_text_that_should_not_be_reached',
]
)

root_agent = Agent(
name='root_agent',
model=root_agent_model,
tools=[agent_tool],
)
return testing_utils.InMemoryRunner(root_agent)

return _setup_runner


def test_agent_tool_skip_summarization_has_text_output(
setup_skip_summarization_runner,
):
"""Tests that when skip_summarization is True, the final event contains text content."""
runner = setup_skip_summarization_runner(
tool_agent_model_responses=['tool_response_text']
)
events = runner.run('start')

final_events = [e for e in events if e.is_final_response()]
assert final_events
last_event = final_events[-1]
assert last_event.is_final_response()

assert any(p.function_response for p in last_event.content.parts)

assert [p.text for p in last_event.content.parts if p.text] == [
'tool_response_text'
]


def test_agent_tool_skip_summarization_preserves_json_string_output(
setup_skip_summarization_runner,
):
"""Tests that structured output string is preserved as text when skipping summarization."""
runner = setup_skip_summarization_runner(
tool_agent_model_responses=['{"field": "value"}']
)
events = runner.run('start')

final_events = [e for e in events if e.is_final_response()]
assert final_events
last_event = final_events[-1]
assert last_event.is_final_response()

text_parts = [p.text for p in last_event.content.parts if p.text]

# Check that the JSON string content is preserved exactly
assert text_parts == ['{"field": "value"}']


def test_agent_tool_skip_summarization_handles_non_string_result(
setup_skip_summarization_runner,
):
"""Tests that non-string (dict) output is correctly serialized as JSON text."""

class CustomOutput(BaseModel):
value: int

runner = setup_skip_summarization_runner(
tool_agent_model_responses=['{"value": 123}'],
tool_agent_output_schema=CustomOutput,
)
events = runner.run('start')

final_events = [e for e in events if e.is_final_response()]
assert final_events
last_event = final_events[-1]

text_parts = [p.text for p in last_event.content.parts if p.text]

assert text_parts == ['{"value": 123}']