Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
15 changes: 15 additions & 0 deletions src/google/adk/flows/llm_flows/functions.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@
import copy
import functools
import inspect
import json
import logging
import threading
from typing import Any
Expand Down Expand Up @@ -962,6 +963,20 @@ def __build_response_event(
parts=[part_function_response],
)

# When summarization is skipped, ensure a displayable text part is added.
if tool_context.actions.skip_summarization:
# If the tool returned a non-dict, it was wrapped in {'result': ...}.
# This unwraps the value for display; otherwise, it uses the original dict.
result_payload = function_result.get('result', function_result)
if isinstance(result_payload, str):
result_text = result_payload
else:
# Safely serialize non-string results to JSON for display.
result_text = json.dumps(
result_payload, ensure_ascii=False, default=str
)
content.parts.append(types.Part.from_text(text=result_text))

function_response_event = Event(
invocation_id=invocation_context.invocation_id,
author=invocation_context.agent.name,
Expand Down
80 changes: 80 additions & 0 deletions tests/unittests/tools/test_agent_tool.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@

from typing import Any
from typing import Optional
import pytest

from google.adk.agents.callback_context import CallbackContext
from google.adk.agents.invocation_context import InvocationContext
Expand Down Expand Up @@ -1164,3 +1165,82 @@ def test_empty_sequential_agent_falls_back_to_request(self):

# Should fall back to 'request' parameter
assert declaration.parameters.properties['request'].type == 'STRING'


@pytest.fixture
def setup_skip_summarization_runner():
def _setup_runner(tool_agent_model_responses, tool_agent_output_schema=None):
tool_agent_model = testing_utils.MockModel.create(responses=tool_agent_model_responses)
tool_agent = Agent(
name="tool_agent",
model=tool_agent_model,
output_schema=tool_agent_output_schema
)

agent_tool = AgentTool(agent=tool_agent, skip_summarization=True)

root_agent_model = testing_utils.MockModel.create(
responses=[
function_call_no_schema,
"final_summary_text_that_should_not_be_reached",
]
)

root_agent = Agent(
name="root_agent",
model=root_agent_model,
tools=[agent_tool],
)
return testing_utils.InMemoryRunner(root_agent)
return _setup_runner


def test_agent_tool_skip_summarization_has_text_output(setup_skip_summarization_runner):
"""Tests that when skip_summarization is True, the final event contains text content."""
runner = setup_skip_summarization_runner(tool_agent_model_responses=["tool_response_text"])
events = runner.run("start")

final_events = [e for e in events if e.is_final_response()]
assert final_events
last_event = final_events[-1]
assert last_event.is_final_response()

assert any(p.function_response for p in last_event.content.parts)

assert [p.text for p in last_event.content.parts if p.text] == ["tool_response_text"]


def test_agent_tool_skip_summarization_preserves_json_string_output(setup_skip_summarization_runner):
"""Tests that structured output string is preserved as text when skipping summarization."""
runner = setup_skip_summarization_runner(tool_agent_model_responses=['{"field": "value"}'])
events = runner.run("start")

final_events = [e for e in events if e.is_final_response()]
assert final_events
last_event = final_events[-1]
assert last_event.is_final_response()

text_parts = [p.text for p in last_event.content.parts if p.text]

# Check that the JSON string content is preserved exactly
assert text_parts == ['{"field": "value"}']


def test_agent_tool_skip_summarization_handles_non_string_result(setup_skip_summarization_runner):
"""Tests that non-string (dict) output is correctly serialized as JSON text."""
class CustomOutput(BaseModel):
value: int

runner = setup_skip_summarization_runner(
tool_agent_model_responses=['{"value": 123}'],
tool_agent_output_schema=CustomOutput
)
events = runner.run("start")

final_events = [e for e in events if e.is_final_response()]
assert final_events
last_event = final_events[-1]

text_parts = [p.text for p in last_event.content.parts if p.text]

assert text_parts == ['{"value": 123}']