From b3ffc3c2764346187f6d5ba0337681ea610b5e99 Mon Sep 17 00:00:00 2001 From: Dharamendra Kumar Date: Sat, 28 Feb 2026 22:28:49 -0800 Subject: [PATCH 1/5] feat(strands-memory): add converter injection and optional restored-tool filtering --- .../memory/integrations/strands/__init__.py | 4 + .../integrations/strands/bedrock_converter.py | 2 + .../memory/integrations/strands/config.py | 3 + .../strands/converters/__init__.py | 7 + .../strands/converters/bedrock.py | 7 + .../integrations/strands/converters/openai.py | 173 ++++++++++++++++++ .../strands/converters/protocol.py | 28 +++ .../integrations/strands/session_manager.py | 39 +++- ...memory_session_manager_openai_converter.py | 111 +++++++++++ .../strands/test_openai_converter.py | 66 +++++++ 10 files changed, 436 insertions(+), 4 deletions(-) create mode 100644 src/bedrock_agentcore/memory/integrations/strands/converters/__init__.py create mode 100644 src/bedrock_agentcore/memory/integrations/strands/converters/bedrock.py create mode 100644 src/bedrock_agentcore/memory/integrations/strands/converters/openai.py create mode 100644 src/bedrock_agentcore/memory/integrations/strands/converters/protocol.py create mode 100644 tests/bedrock_agentcore/memory/integrations/strands/test_agentcore_memory_session_manager_openai_converter.py create mode 100644 tests/bedrock_agentcore/memory/integrations/strands/test_openai_converter.py diff --git a/src/bedrock_agentcore/memory/integrations/strands/__init__.py b/src/bedrock_agentcore/memory/integrations/strands/__init__.py index 9f162933..5f4c0bfb 100644 --- a/src/bedrock_agentcore/memory/integrations/strands/__init__.py +++ b/src/bedrock_agentcore/memory/integrations/strands/__init__.py @@ -1 +1,5 @@ """Strands integration for Bedrock AgentCore Memory.""" + +from .converters import BedrockConverseConverter, MemoryConverter, OpenAIConverseConverter + +__all__ = ["BedrockConverseConverter", "MemoryConverter", "OpenAIConverseConverter"] diff --git a/src/bedrock_agentcore/memory/integrations/strands/bedrock_converter.py b/src/bedrock_agentcore/memory/integrations/strands/bedrock_converter.py index b9c06c4b..28df16b0 100644 --- a/src/bedrock_agentcore/memory/integrations/strands/bedrock_converter.py +++ b/src/bedrock_agentcore/memory/integrations/strands/bedrock_converter.py @@ -8,6 +8,8 @@ logger = logging.getLogger(__name__) +# Bedrock AgentCore Data Plane conversational payload text limit is 9000 chars. +# Ref: https://docs.aws.amazon.com/cli/latest/reference/bedrock-agentcore/create-event.html CONVERSATIONAL_MAX_SIZE = 9000 diff --git a/src/bedrock_agentcore/memory/integrations/strands/config.py b/src/bedrock_agentcore/memory/integrations/strands/config.py index e41f531f..8e9c6663 100644 --- a/src/bedrock_agentcore/memory/integrations/strands/config.py +++ b/src/bedrock_agentcore/memory/integrations/strands/config.py @@ -33,6 +33,8 @@ class AgentCoreMemoryConfig(BaseModel): Default of 1 means immediate sending (no batching). Max 100. context_tag: XML tag name used to wrap retrieved memory context injected into messages. Default is "user_context". + filter_restored_tool_context: When True, strip historical toolUse/toolResult blocks from + restored messages before loading them into Strands runtime memory. Default is False. """ memory_id: str = Field(min_length=1) @@ -41,3 +43,4 @@ class AgentCoreMemoryConfig(BaseModel): retrieval_config: Optional[Dict[str, RetrievalConfig]] = None batch_size: int = Field(default=1, ge=1, le=100) context_tag: str = Field(default="user_context", min_length=1) + filter_restored_tool_context: bool = Field(default=False) diff --git a/src/bedrock_agentcore/memory/integrations/strands/converters/__init__.py b/src/bedrock_agentcore/memory/integrations/strands/converters/__init__.py new file mode 100644 index 00000000..7d2a04de --- /dev/null +++ b/src/bedrock_agentcore/memory/integrations/strands/converters/__init__.py @@ -0,0 +1,7 @@ +"""Converters for Strands <-> STM message formats.""" + +from .bedrock import BedrockConverseConverter +from .openai import OpenAIConverseConverter +from .protocol import MemoryConverter + +__all__ = ["BedrockConverseConverter", "OpenAIConverseConverter", "MemoryConverter"] diff --git a/src/bedrock_agentcore/memory/integrations/strands/converters/bedrock.py b/src/bedrock_agentcore/memory/integrations/strands/converters/bedrock.py new file mode 100644 index 00000000..e211b4a4 --- /dev/null +++ b/src/bedrock_agentcore/memory/integrations/strands/converters/bedrock.py @@ -0,0 +1,7 @@ +"""Strands-native message-shape converter adapter.""" + +from ..bedrock_converter import AgentCoreMemoryConverter + + +class BedrockConverseConverter(AgentCoreMemoryConverter): + """Alias adapter for the default Strands-native-shape converter.""" diff --git a/src/bedrock_agentcore/memory/integrations/strands/converters/openai.py b/src/bedrock_agentcore/memory/integrations/strands/converters/openai.py new file mode 100644 index 00000000..dd99ff3b --- /dev/null +++ b/src/bedrock_agentcore/memory/integrations/strands/converters/openai.py @@ -0,0 +1,173 @@ +"""OpenAI-format converter for AgentCore Memory. + +Converts between Strands SessionMessages (Strands-native message shape) and OpenAI message format +stored in AgentCore Memory STM events. +""" + +import json +import logging +from typing import Any, Tuple + +from strands.types.session import SessionMessage + +from .protocol import exceeds_conversational_limit + +logger = logging.getLogger(__name__) + + +def _bedrock_to_openai(message: dict) -> dict: + """Convert a Strands-native message dict to OpenAI message format.""" + role = message.get("role", "user") + content = message.get("content", []) + + if content and "toolResult" in content[0]: + tool_result = content[0]["toolResult"] + text_parts = [c.get("text", "") for c in tool_result.get("content", []) if "text" in c] + result = { + "role": "tool", + "tool_call_id": tool_result["toolUseId"], + "content": "\n".join(text_parts), + } + if "status" in tool_result: + result["status"] = tool_result["status"] + return result + + text_parts = [] + tool_calls = [] + for item in content: + if "text" in item: + text = item["text"].strip() + if text: + text_parts.append(text) + elif "toolUse" in item: + tu = item["toolUse"] + tool_calls.append( + { + "id": tu["toolUseId"], + "type": "function", + "function": { + "name": tu["name"], + "arguments": json.dumps(tu.get("input", {})), + }, + } + ) + + result: dict[str, Any] = {"role": role} + + if tool_calls: + result["content"] = "\n".join(text_parts) if text_parts else None + result["tool_calls"] = tool_calls + else: + result["content"] = "\n".join(text_parts) if text_parts else "" + + return result + + +def _openai_to_bedrock(openai_msg: dict) -> dict: + """Convert an OpenAI message dict to Strands-native message shape.""" + role = openai_msg.get("role", "user") + content_items: list[dict[str, Any]] = [] + + if role == "tool": + tool_result: dict[str, Any] = { + "toolUseId": openai_msg["tool_call_id"], + "content": [{"text": openai_msg.get("content", "")}], + } + if "status" in openai_msg: + tool_result["status"] = openai_msg["status"] + return { + "role": "user", + "content": [{"toolResult": tool_result}], + } + + if role == "system": + return { + "role": "user", + "content": [{"text": openai_msg.get("content", "")}], + } + + text_content = openai_msg.get("content") + if text_content and isinstance(text_content, str): + content_items.append({"text": text_content}) + + for tc in openai_msg.get("tool_calls", []): + fn = tc.get("function", {}) + args_str = fn.get("arguments", "{}") + try: + args = json.loads(args_str) + except (json.JSONDecodeError, ValueError): + args = {} + content_items.append( + { + "toolUse": { + "toolUseId": tc["id"], + "name": fn["name"], + "input": args, + } + } + ) + + bedrock_role = "assistant" if role == "assistant" else "user" + + return {"role": bedrock_role, "content": content_items} + + +class OpenAIConverseConverter: + """Converts between Strands SessionMessages and OpenAI message format in STM.""" + + @staticmethod + def message_to_payload(session_message: SessionMessage) -> list[Tuple[str, str]]: + """Convert a SessionMessage (Strands-native shape) to OpenAI-format STM payload.""" + message = session_message.message + content = message.get("content", []) + if not content: + return [] + + has_non_empty = any( + ("text" in item and item["text"].strip()) or "toolUse" in item or "toolResult" in item for item in content + ) + if not has_non_empty: + return [] + + openai_msg = _bedrock_to_openai(message) + role = openai_msg.get("role", "user") + return [(json.dumps(openai_msg), role)] + + @staticmethod + def events_to_messages(events: list[dict[str, Any]]) -> list[SessionMessage]: + """Convert STM events containing OpenAI-format messages to SessionMessages.""" + messages: list[SessionMessage] = [] + + for event in reversed(events): + for payload_item in event.get("payload", []): + openai_msg = None + + if "conversational" in payload_item: + conv = payload_item["conversational"] + try: + openai_msg = json.loads(conv["content"]["text"]) + except (json.JSONDecodeError, KeyError, ValueError): + logger.error("Failed to parse conversational payload as OpenAI message") + continue + + elif "blob" in payload_item: + try: + blob_data = json.loads(payload_item["blob"]) + if isinstance(blob_data, (tuple, list)) and len(blob_data) == 2: + openai_msg = json.loads(blob_data[0]) + except (json.JSONDecodeError, ValueError): + logger.error("Failed to parse blob payload: %s", payload_item) + continue + + if openai_msg and isinstance(openai_msg, dict): + bedrock_msg = _openai_to_bedrock(openai_msg) + if bedrock_msg.get("content"): + session_msg = SessionMessage(message=bedrock_msg, message_id=0) + messages.append(session_msg) + + return messages + + @staticmethod + def exceeds_conversational_limit(message: tuple[str, str]) -> bool: + """Check if message exceeds conversational payload size limit.""" + return exceeds_conversational_limit(message) diff --git a/src/bedrock_agentcore/memory/integrations/strands/converters/protocol.py b/src/bedrock_agentcore/memory/integrations/strands/converters/protocol.py new file mode 100644 index 00000000..d23b1157 --- /dev/null +++ b/src/bedrock_agentcore/memory/integrations/strands/converters/protocol.py @@ -0,0 +1,28 @@ +"""Shared protocol and utilities for memory converters.""" + +from typing import Any, Protocol, Tuple + +from strands.types.session import SessionMessage + +CONVERSATIONAL_MAX_SIZE = 9000 + + +class MemoryConverter(Protocol): + """Protocol for converting between Strands messages and STM event payloads.""" + + @staticmethod + def message_to_payload(session_message: SessionMessage) -> list[Tuple[str, str]]: + """Convert SessionMessage to STM event payload format.""" + + @staticmethod + def events_to_messages(events: list[dict[str, Any]]) -> list[SessionMessage]: + """Convert STM events to SessionMessages.""" + + @staticmethod + def exceeds_conversational_limit(message: tuple[str, str]) -> bool: + """Check if message exceeds conversational payload size limit.""" + + +def exceeds_conversational_limit(message: tuple[str, str]) -> bool: + """Check if message exceeds the conversational payload size limit.""" + return sum(len(text) for text in message) >= CONVERSATIONAL_MAX_SIZE diff --git a/src/bedrock_agentcore/memory/integrations/strands/session_manager.py b/src/bedrock_agentcore/memory/integrations/strands/session_manager.py index a40bcd86..f9d1c35d 100644 --- a/src/bedrock_agentcore/memory/integrations/strands/session_manager.py +++ b/src/bedrock_agentcore/memory/integrations/strands/session_manager.py @@ -22,7 +22,7 @@ from bedrock_agentcore.memory.client import MemoryClient from bedrock_agentcore.memory.models.filters import EventMetadataFilter, LeftExpression, OperatorType, RightExpression -from .bedrock_converter import AgentCoreMemoryConverter +from .converters import BedrockConverseConverter, MemoryConverter from .config import AgentCoreMemoryConfig, RetrievalConfig if TYPE_CHECKING: @@ -98,6 +98,7 @@ def _get_monotonic_timestamp(cls, desired_timestamp: Optional[datetime] = None) def __init__( self, agentcore_memory_config: AgentCoreMemoryConfig, + converter: Optional[type[MemoryConverter]] = None, region_name: Optional[str] = None, boto_session: Optional[boto3.Session] = None, boto_client_config: Optional[BotocoreConfig] = None, @@ -107,12 +108,15 @@ def __init__( Args: agentcore_memory_config (AgentCoreMemoryConfig): Configuration for AgentCore Memory integration. + converter (Optional[type[MemoryConverter]], optional): Converter used for message format transformation. + Defaults to BedrockConverseConverter. region_name (Optional[str], optional): AWS region for Bedrock AgentCore Memory. Defaults to None. boto_session (Optional[boto3.Session], optional): Optional boto3 session. Defaults to None. boto_client_config (Optional[BotocoreConfig], optional): Optional boto3 client configuration. Defaults to None. **kwargs (Any): Additional keyword arguments. """ + self.converter = converter or BedrockConverseConverter self.config = agentcore_memory_config self.memory_client = MemoryClient(region_name=region_name) session = boto_session or boto3.Session(region_name=region_name) @@ -417,11 +421,11 @@ def create_message( raise SessionException(f"Session ID mismatch: expected {self.config.session_id}, got {session_id}") # Convert and check size ONCE (not again at flush) - messages = AgentCoreMemoryConverter.message_to_payload(session_message) + messages = self.converter.message_to_payload(session_message) if not messages: return None - is_blob = AgentCoreMemoryConverter.exceeds_conversational_limit(messages[0]) + is_blob = self.converter.exceeds_conversational_limit(messages[0]) # Parse the original timestamp and use it as desired timestamp original_timestamp = datetime.fromisoformat(session_message.created_at.replace("Z", "+00:00")) @@ -545,7 +549,9 @@ def list_messages( session_id=session_id, max_results=max_results, ) - messages = AgentCoreMemoryConverter.events_to_messages(events) + messages = self.converter.events_to_messages(events) + if self.config.filter_restored_tool_context: + messages = self._filter_restored_tool_context(messages) if limit is not None: return messages[offset : offset + limit] else: @@ -555,6 +561,31 @@ def list_messages( logger.error("Failed to list messages from AgentCore Memory: %s", e) return [] + def _filter_restored_tool_context(self, messages: list[SessionMessage]) -> list[SessionMessage]: + """Strip historical toolUse/toolResult context from restored messages.""" + filtered_messages: list[SessionMessage] = [] + for session_message in messages: + message = session_message.to_message() + filtered_content = [ + content for content in message.get("content", []) if "toolUse" not in content and "toolResult" not in content + ] + + if not filtered_content: + continue + + filtered_message: Message = {"role": message["role"], "content": filtered_content} + filtered_messages.append( + SessionMessage( + message=filtered_message, + message_id=session_message.message_id, + redact_message=session_message.redact_message, + created_at=session_message.created_at, + updated_at=session_message.updated_at, + ) + ) + + return filtered_messages + # endregion SessionRepository interface implementation # region RepositorySessionManager overrides diff --git a/tests/bedrock_agentcore/memory/integrations/strands/test_agentcore_memory_session_manager_openai_converter.py b/tests/bedrock_agentcore/memory/integrations/strands/test_agentcore_memory_session_manager_openai_converter.py new file mode 100644 index 00000000..cdbbf4d5 --- /dev/null +++ b/tests/bedrock_agentcore/memory/integrations/strands/test_agentcore_memory_session_manager_openai_converter.py @@ -0,0 +1,111 @@ +"""Session manager tests with OpenAI converter.""" + +from unittest.mock import Mock, patch + +from strands.types.session import Session, SessionMessage, SessionType + +from bedrock_agentcore.memory.integrations.strands.config import AgentCoreMemoryConfig +from bedrock_agentcore.memory.integrations.strands.converters import OpenAIConverseConverter +from bedrock_agentcore.memory.integrations.strands.session_manager import AgentCoreMemorySessionManager + + +def test_create_message_uses_tool_role_with_openai_converter(): + """When configured with OpenAI converter, create_event should receive TOOL role for tool outputs.""" + config = AgentCoreMemoryConfig(memory_id="test-memory-123", session_id="test-session-456", actor_id="test-actor-789") + + mock_memory_client = Mock() + mock_memory_client.create_event.return_value = {"eventId": "event_123"} + mock_memory_client.list_events.return_value = [] + mock_memory_client.gmcp_client = Mock() + mock_memory_client.gmdp_client = Mock() + + with ( + patch("bedrock_agentcore.memory.integrations.strands.session_manager.MemoryClient", return_value=mock_memory_client), + patch("boto3.Session") as mock_boto_session, + patch("strands.session.repository_session_manager.RepositorySessionManager.__init__", return_value=None), + ): + mock_session = Mock() + mock_session.region_name = "us-west-2" + mock_session.client.return_value = Mock() + mock_boto_session.return_value = mock_session + + manager = AgentCoreMemorySessionManager(config, converter=OpenAIConverseConverter) + manager.session_id = config.session_id + manager.session = Session(session_id=config.session_id, session_type=SessionType.AGENT) + + message = SessionMessage( + message={ + "role": "user", + "content": [ + { + "toolResult": { + "toolUseId": "call_777", + "content": [{"text": "ok"}], + "status": "success", + } + } + ], + }, + message_id=1, + created_at="2024-01-01T12:00:00Z", + ) + + manager.create_message(config.session_id, "agent-1", message) + + kwargs = mock_memory_client.create_event.call_args.kwargs + assert kwargs["messages"][0][1] == "tool" + + +def test_list_messages_filters_restored_tool_context(): + """Restored history should exclude toolUse/toolResult blocks.""" + config = AgentCoreMemoryConfig(memory_id="test-memory-123", session_id="test-session-456", actor_id="test-actor-789") + + mock_memory_client = Mock() + mock_memory_client.list_events.return_value = [{"payload": []}] + mock_memory_client.gmcp_client = Mock() + mock_memory_client.gmdp_client = Mock() + + with ( + patch("bedrock_agentcore.memory.integrations.strands.session_manager.MemoryClient", return_value=mock_memory_client), + patch("boto3.Session") as mock_boto_session, + patch("strands.session.repository_session_manager.RepositorySessionManager.__init__", return_value=None), + ): + mock_session = Mock() + mock_session.region_name = "us-west-2" + mock_session.client.return_value = Mock() + mock_boto_session.return_value = mock_session + + manager = AgentCoreMemorySessionManager(config, converter=OpenAIConverseConverter) + manager.session_id = config.session_id + manager.session = Session(session_id=config.session_id, session_type=SessionType.AGENT) + + manager.converter = Mock() + manager.converter.events_to_messages.return_value = [ + SessionMessage(message={"role": "user", "content": [{"text": "hello"}]}, message_id=0), + SessionMessage( + message={ + "role": "assistant", + "content": [ + {"text": "calling tool"}, + {"toolUse": {"toolUseId": "t1", "name": "foo", "input": {}}}, + ], + }, + message_id=1, + ), + SessionMessage( + message={ + "role": "user", + "content": [{"toolResult": {"toolUseId": "t1", "status": "success", "content": [{"text": "ok"}]}}], + }, + message_id=2, + ), + SessionMessage(message={"role": "assistant", "content": [{"text": "done"}]}, message_id=3), + ] + + messages = manager.list_messages(config.session_id, "agent-1") + + assert [m.message for m in messages] == [ + {"role": "user", "content": [{"text": "hello"}]}, + {"role": "assistant", "content": [{"text": "calling tool"}]}, + {"role": "assistant", "content": [{"text": "done"}]}, + ] diff --git a/tests/bedrock_agentcore/memory/integrations/strands/test_openai_converter.py b/tests/bedrock_agentcore/memory/integrations/strands/test_openai_converter.py new file mode 100644 index 00000000..a75bb41a --- /dev/null +++ b/tests/bedrock_agentcore/memory/integrations/strands/test_openai_converter.py @@ -0,0 +1,66 @@ +"""Tests for OpenAIConverseConverter.""" + +import json + +from strands.types.session import SessionMessage + +from bedrock_agentcore.memory.integrations.strands.converters import OpenAIConverseConverter + + +def test_tool_result_message_serializes_as_openai_tool_role(): + """toolResult messages should be saved with role='tool' for STM.""" + msg = SessionMessage( + message={ + "role": "user", + "content": [ + { + "toolResult": { + "toolUseId": "call_123", + "content": [{"text": "72°F and sunny"}], + "status": "success", + } + } + ], + }, + message_id=5, + ) + + result = OpenAIConverseConverter.message_to_payload(msg) + + assert len(result) == 1 + payload_json, role = result[0] + assert role == "tool" + + payload = json.loads(payload_json) + assert payload["role"] == "tool" + assert payload["tool_call_id"] == "call_123" + assert payload["content"] == "72°F and sunny" + + +def test_openai_tool_role_deserializes_to_strands_user_tool_result_message(): + """OpenAI role='tool' should restore as Strands toolResult content on user role.""" + events = [ + { + "payload": [ + { + "conversational": { + "content": { + "text": json.dumps( + { + "role": "tool", + "tool_call_id": "call_321", + "content": "done", + "status": "success", + } + ) + } + } + } + ] + } + ] + + messages = OpenAIConverseConverter.events_to_messages(events) + assert len(messages) == 1 + assert messages[0].message["role"] == "user" + assert messages[0].message["content"][0]["toolResult"]["toolUseId"] == "call_321" From 98f6241d54ce1ed34d8ef8972128d52ade207113 Mon Sep 17 00:00:00 2001 From: Dharamendra Kumar Date: Sat, 28 Feb 2026 22:45:30 -0800 Subject: [PATCH 2/5] fix: update conversational payload size limit to 100000 chars across converters and tests --- .../integrations/strands/bedrock_converter.py | 4 ++-- .../integrations/strands/converters/protocol.py | 2 +- .../test_agentcore_memory_session_manager.py | 15 +++++++++------ ...ore_memory_session_manager_openai_converter.py | 7 ++++++- 4 files changed, 18 insertions(+), 10 deletions(-) diff --git a/src/bedrock_agentcore/memory/integrations/strands/bedrock_converter.py b/src/bedrock_agentcore/memory/integrations/strands/bedrock_converter.py index 28df16b0..90a26617 100644 --- a/src/bedrock_agentcore/memory/integrations/strands/bedrock_converter.py +++ b/src/bedrock_agentcore/memory/integrations/strands/bedrock_converter.py @@ -8,9 +8,9 @@ logger = logging.getLogger(__name__) -# Bedrock AgentCore Data Plane conversational payload text limit is 9000 chars. +# Bedrock AgentCore Data Plane conversational payload text max is 100000 chars. # Ref: https://docs.aws.amazon.com/cli/latest/reference/bedrock-agentcore/create-event.html -CONVERSATIONAL_MAX_SIZE = 9000 +CONVERSATIONAL_MAX_SIZE = 100000 class AgentCoreMemoryConverter: diff --git a/src/bedrock_agentcore/memory/integrations/strands/converters/protocol.py b/src/bedrock_agentcore/memory/integrations/strands/converters/protocol.py index d23b1157..2ae5943e 100644 --- a/src/bedrock_agentcore/memory/integrations/strands/converters/protocol.py +++ b/src/bedrock_agentcore/memory/integrations/strands/converters/protocol.py @@ -4,7 +4,7 @@ from strands.types.session import SessionMessage -CONVERSATIONAL_MAX_SIZE = 9000 +CONVERSATIONAL_MAX_SIZE = 100000 class MemoryConverter(Protocol): diff --git a/tests/bedrock_agentcore/memory/integrations/strands/test_agentcore_memory_session_manager.py b/tests/bedrock_agentcore/memory/integrations/strands/test_agentcore_memory_session_manager.py index 19b3ec23..247a18a6 100644 --- a/tests/bedrock_agentcore/memory/integrations/strands/test_agentcore_memory_session_manager.py +++ b/tests/bedrock_agentcore/memory/integrations/strands/test_agentcore_memory_session_manager.py @@ -11,7 +11,10 @@ from strands.types.exceptions import SessionException from strands.types.session import Session, SessionAgent, SessionMessage, SessionType -from bedrock_agentcore.memory.integrations.strands.bedrock_converter import AgentCoreMemoryConverter +from bedrock_agentcore.memory.integrations.strands.bedrock_converter import ( + CONVERSATIONAL_MAX_SIZE, + AgentCoreMemoryConverter, +) from bedrock_agentcore.memory.integrations.strands.config import AgentCoreMemoryConfig, RetrievalConfig from bedrock_agentcore.memory.integrations.strands.session_manager import AgentCoreMemorySessionManager @@ -1599,7 +1602,7 @@ def track_blob_event(**kwargs): # Add multiple blob messages (>9KB each) for i in range(3): - large_text = f"blob_{i}_" + "x" * 10000 + large_text = f"blob_{i}_" + "x" * (CONVERSATIONAL_MAX_SIZE + 100) message = SessionMessage( message={"role": "user", "content": [{"text": large_text}]}, message_id=i, @@ -1643,7 +1646,7 @@ def track_blob_event(**kwargs): # Directly populate buffer with mixed messages base_time = datetime(2024, 1, 1, 12, 0, 0, tzinfo=timezone.utc) - blob_content = {"role": "user", "content": [{"text": "blob_A_" + "x" * 10000}]} + blob_content = {"role": "user", "content": [{"text": "blob_A_" + "x" * (CONVERSATIONAL_MAX_SIZE + 100)}]} batching_session_manager._message_buffer = [ # Session A: 2 conversational messages ("session-A", [("SessionA_conv_0", "user")], False, base_time), @@ -1843,8 +1846,8 @@ def test_blob_message_sent_via_gmdp_client(self, batching_session_manager, mock_ """Test large messages (blobs) are sent via gmdp_client.""" mock_memory_client.gmdp_client.create_event.return_value = {"event": {"eventId": "blob_event_123"}} - # Create a message that exceeds CONVERSATIONAL_MAX_SIZE (9000) - large_text = "x" * 10000 + # Create a message that exceeds CONVERSATIONAL_MAX_SIZE. + large_text = "x" * (CONVERSATIONAL_MAX_SIZE + 100) message = SessionMessage( message={"role": "user", "content": [{"text": large_text}]}, message_id=1, @@ -1874,7 +1877,7 @@ def test_mixed_conversational_and_blob_messages(self, batching_session_manager, batching_session_manager.create_message("test-session-456", "test-agent", small_message) # Add large (blob) message - large_text = "x" * 10000 + large_text = "x" * (CONVERSATIONAL_MAX_SIZE + 100) large_message = SessionMessage( message={"role": "user", "content": [{"text": large_text}]}, message_id=2, diff --git a/tests/bedrock_agentcore/memory/integrations/strands/test_agentcore_memory_session_manager_openai_converter.py b/tests/bedrock_agentcore/memory/integrations/strands/test_agentcore_memory_session_manager_openai_converter.py index cdbbf4d5..dff6e800 100644 --- a/tests/bedrock_agentcore/memory/integrations/strands/test_agentcore_memory_session_manager_openai_converter.py +++ b/tests/bedrock_agentcore/memory/integrations/strands/test_agentcore_memory_session_manager_openai_converter.py @@ -58,7 +58,12 @@ def test_create_message_uses_tool_role_with_openai_converter(): def test_list_messages_filters_restored_tool_context(): """Restored history should exclude toolUse/toolResult blocks.""" - config = AgentCoreMemoryConfig(memory_id="test-memory-123", session_id="test-session-456", actor_id="test-actor-789") + config = AgentCoreMemoryConfig( + memory_id="test-memory-123", + session_id="test-session-456", + actor_id="test-actor-789", + filter_restored_tool_context=True, + ) mock_memory_client = Mock() mock_memory_client.list_events.return_value = [{"payload": []}] From 950792eec9b618a99488dbdc8dc9acafec9d2aaa Mon Sep 17 00:00:00 2001 From: Dharamendra Kumar Date: Sun, 1 Mar 2026 16:01:32 -0800 Subject: [PATCH 3/5] feat: introduce AutoConverseConverter for dynamic converter selection and enhance OpenAI converter handling - Added AutoConverseConverter to facilitate automatic converter selection based on agent model. - Updated AgentCoreMemorySessionManager to support auto converter mode. - Enhanced OpenAI converter to preserve reasoning content and handle oversized payloads. - Updated tests to validate new auto converter functionality and reasoning content preservation. --- .../strands/converters/__init__.py | 12 +- .../strands/converters/anthropic.py | 200 ++++++++++++++++++ .../integrations/strands/converters/auto.py | 85 ++++++++ .../integrations/strands/converters/gemini.py | 191 +++++++++++++++++ .../integrations/strands/converters/openai.py | 25 ++- .../integrations/strands/session_manager.py | 19 +- ...memory_session_manager_openai_converter.py | 111 +++++++++- .../strands/test_anthropic_converter.py | 37 ++++ .../strands/test_auto_converter.py | 75 +++++++ .../strands/test_gemini_converter.py | 48 +++++ .../strands/test_openai_converter.py | 59 ++++++ 11 files changed, 848 insertions(+), 14 deletions(-) create mode 100644 src/bedrock_agentcore/memory/integrations/strands/converters/anthropic.py create mode 100644 src/bedrock_agentcore/memory/integrations/strands/converters/auto.py create mode 100644 src/bedrock_agentcore/memory/integrations/strands/converters/gemini.py create mode 100644 tests/bedrock_agentcore/memory/integrations/strands/test_anthropic_converter.py create mode 100644 tests/bedrock_agentcore/memory/integrations/strands/test_auto_converter.py create mode 100644 tests/bedrock_agentcore/memory/integrations/strands/test_gemini_converter.py diff --git a/src/bedrock_agentcore/memory/integrations/strands/converters/__init__.py b/src/bedrock_agentcore/memory/integrations/strands/converters/__init__.py index 7d2a04de..fe096ddc 100644 --- a/src/bedrock_agentcore/memory/integrations/strands/converters/__init__.py +++ b/src/bedrock_agentcore/memory/integrations/strands/converters/__init__.py @@ -1,7 +1,17 @@ """Converters for Strands <-> STM message formats.""" +from .anthropic import AnthropicConverseConverter +from .auto import AutoConverseConverter from .bedrock import BedrockConverseConverter +from .gemini import GeminiConverseConverter from .openai import OpenAIConverseConverter from .protocol import MemoryConverter -__all__ = ["BedrockConverseConverter", "OpenAIConverseConverter", "MemoryConverter"] +__all__ = [ + "AnthropicConverseConverter", + "AutoConverseConverter", + "BedrockConverseConverter", + "GeminiConverseConverter", + "OpenAIConverseConverter", + "MemoryConverter", +] diff --git a/src/bedrock_agentcore/memory/integrations/strands/converters/anthropic.py b/src/bedrock_agentcore/memory/integrations/strands/converters/anthropic.py new file mode 100644 index 00000000..0c6ad8fc --- /dev/null +++ b/src/bedrock_agentcore/memory/integrations/strands/converters/anthropic.py @@ -0,0 +1,200 @@ +"""Anthropic-format converter for AgentCore Memory. + +Converts Strands-native messages to/from an Anthropic-compatible message shape: +- text -> {"type":"text","text":...} +- toolUse -> {"type":"tool_use", ...} +- toolResult -> {"type":"tool_result", ...} +- reasoningContent -> {"type":"thinking", ...} +""" + +from __future__ import annotations + +import base64 +import json +import logging +from typing import Any, Tuple + +from strands.types.session import SessionMessage + +from .protocol import exceeds_conversational_limit + +logger = logging.getLogger(__name__) + + +class AnthropicConverseConverter: + """Converts between Strands SessionMessages and Anthropic-like STM payloads.""" + + @staticmethod + def message_to_payload(session_message: SessionMessage) -> list[Tuple[str, str]]: + """Convert a Strands SessionMessage into an Anthropic-style payload tuple.""" + message = session_message.message + content = message.get("content", []) + if not content: + return [] + + has_non_empty = any( + (isinstance(item.get("text"), str) and item["text"].strip()) + or "toolUse" in item + or "toolResult" in item + or "reasoningContent" in item + for item in content + if isinstance(item, dict) + ) + if not has_non_empty: + return [] + + formatted_content = [ + AnthropicConverseConverter._to_anthropic_block(item) for item in content if isinstance(item, dict) + ] + formatted_content = [item for item in formatted_content if item is not None] + if not formatted_content: + return [] + + # Anthropic APIs accept user/assistant roles. + role = message.get("role", "user") + if role not in {"user", "assistant"}: + role = "user" + + anthropic_msg = {"role": role, "content": formatted_content} + return [(json.dumps(anthropic_msg), role)] + + @staticmethod + def events_to_messages(events: list[dict[str, Any]]) -> list[SessionMessage]: + """Restore Strands SessionMessages from Anthropic-style payload events.""" + messages: list[SessionMessage] = [] + + for event in reversed(events): + for payload_item in event.get("payload", []): + msg_dict: dict[str, Any] | None = None + + if "conversational" in payload_item: + conv = payload_item["conversational"] + try: + msg_dict = json.loads(conv["content"]["text"]) + except (json.JSONDecodeError, KeyError, ValueError): + logger.error("Failed to parse conversational payload as Anthropic message") + continue + elif "blob" in payload_item: + try: + blob_data = json.loads(payload_item["blob"]) + if isinstance(blob_data, (tuple, list)) and len(blob_data) == 2: + msg_dict = json.loads(blob_data[0]) + except (json.JSONDecodeError, ValueError): + logger.error("Failed to parse blob payload: %s", payload_item) + continue + + if not (msg_dict and isinstance(msg_dict, dict)): + continue + if "content" not in msg_dict or not msg_dict["content"]: + continue + + role = msg_dict.get("role", "user") + if role not in {"user", "assistant"}: + role = "user" + strands_content = [ + AnthropicConverseConverter._from_anthropic_block(block) + for block in msg_dict["content"] + if isinstance(block, dict) + ] + strands_content = [item for item in strands_content if item is not None] + if not strands_content: + continue + messages.append(SessionMessage(message={"role": role, "content": strands_content}, message_id=0)) + + return messages + + @staticmethod + def exceeds_conversational_limit(message: tuple[str, str]) -> bool: + """Check if serialized message exceeds conversational payload size limit.""" + return exceeds_conversational_limit(message) + + @staticmethod + def _to_anthropic_block(item: dict[str, Any]) -> dict[str, Any] | None: + if "text" in item: + return {"type": "text", "text": item.get("text", "")} + + if "toolUse" in item: + tool_use = item["toolUse"] + return { + "type": "tool_use", + "id": tool_use.get("toolUseId"), + "name": tool_use.get("name"), + "input": tool_use.get("input", {}), + **( + {"reasoning_signature": tool_use.get("reasoningSignature")} + if tool_use.get("reasoningSignature") + else {} + ), + } + + if "toolResult" in item: + tool_result = item["toolResult"] + content_blocks: list[dict[str, Any]] = [] + for result_block in tool_result.get("content", []): + if "text" in result_block: + content_blocks.append({"type": "text", "text": result_block["text"]}) + elif "json" in result_block: + content_blocks.append({"type": "text", "text": json.dumps(result_block["json"])}) + return { + "type": "tool_result", + "tool_use_id": tool_result.get("toolUseId"), + "is_error": tool_result.get("status") == "error", + "content": content_blocks, + } + + if "reasoningContent" in item: + reasoning_text = item["reasoningContent"].get("reasoningText", {}) + return { + "type": "thinking", + "thinking": reasoning_text.get("text", ""), + **({"signature": reasoning_text.get("signature")} if reasoning_text.get("signature") else {}), + } + + return None + + @staticmethod + def _from_anthropic_block(block: dict[str, Any]) -> dict[str, Any] | None: + block_type = block.get("type") + if block_type == "text": + return {"text": block.get("text", "")} + + if block_type == "tool_use": + tool_use = { + "toolUseId": block.get("id", ""), + "name": block.get("name", ""), + "input": block.get("input", {}), + } + if block.get("reasoning_signature"): + tool_use["reasoningSignature"] = block["reasoning_signature"] + return {"toolUse": tool_use} + + if block_type == "tool_result": + content: list[dict[str, Any]] = [] + for content_block in block.get("content", []): + if isinstance(content_block, dict) and content_block.get("type") == "text": + content.append({"text": content_block.get("text", "")}) + return { + "toolResult": { + "toolUseId": block.get("tool_use_id", ""), + "status": "error" if block.get("is_error") else "success", + "content": content, + } + } + + if block_type == "thinking": + reasoning_text = {"text": block.get("thinking", "")} + if block.get("signature"): + reasoning_text["signature"] = block["signature"] + return {"reasoningContent": {"reasoningText": reasoning_text}} + + # Support a small compatibility path for binary thought signature if present. + if block_type == "thinking_b64": + reasoning_text = {"text": block.get("thinking", "")} + if block.get("signature_b64"): + try: + reasoning_text["signature"] = base64.b64decode(block["signature_b64"]).decode("utf-8") + except Exception: + pass + return {"reasoningContent": {"reasoningText": reasoning_text}} + + return None diff --git a/src/bedrock_agentcore/memory/integrations/strands/converters/auto.py b/src/bedrock_agentcore/memory/integrations/strands/converters/auto.py new file mode 100644 index 00000000..b6e7bb38 --- /dev/null +++ b/src/bedrock_agentcore/memory/integrations/strands/converters/auto.py @@ -0,0 +1,85 @@ +"""Automatic converter selection for mixed-model sessions.""" + +from __future__ import annotations + +import logging +from typing import Any, Tuple + +from strands.types.session import SessionMessage + +from .anthropic import AnthropicConverseConverter +from .bedrock import BedrockConverseConverter +from .gemini import GeminiConverseConverter +from .openai import OpenAIConverseConverter +from .protocol import MemoryConverter, exceeds_conversational_limit + +logger = logging.getLogger(__name__) + + +class AutoConverseConverter: + """Auto-selects a write converter and can restore mixed payload formats.""" + + _write_converter: type[MemoryConverter] = BedrockConverseConverter + _read_converters: tuple[type[MemoryConverter], ...] = ( + BedrockConverseConverter, + OpenAIConverseConverter, + AnthropicConverseConverter, + GeminiConverseConverter, + ) + + @classmethod + def set_write_converter(cls, converter: type[MemoryConverter]) -> None: + cls._write_converter = converter + + @classmethod + def select_write_converter_for_model(cls, model: Any) -> type[MemoryConverter]: + """Pick a converter by model class/module name.""" + model_cls_name = model.__class__.__name__.lower() + model_mod = model.__class__.__module__.lower() + full = f"{model_mod}.{model_cls_name}" + + if "anthropic" in full: + return AnthropicConverseConverter + if "gemini" in full: + return GeminiConverseConverter + if "openai" in full: + return OpenAIConverseConverter + return BedrockConverseConverter + + @classmethod + def message_to_payload(cls, session_message: SessionMessage) -> list[Tuple[str, str]]: + return cls._write_converter.message_to_payload(session_message) + + @classmethod + def events_to_messages(cls, events: list[dict[str, Any]]) -> list[SessionMessage]: + """Decode each payload item using the first converter that succeeds. + + This allows sessions to remain readable after switching model providers. + """ + restored: list[SessionMessage] = [] + + # Oldest to newest + for event in reversed(events): + payload_items = event.get("payload", []) + for payload_item in payload_items: + fake_event = {"payload": [payload_item]} + parsed = None + for converter in cls._read_converters: + try: + candidate = converter.events_to_messages([fake_event]) + except Exception: + continue + if candidate: + parsed = candidate + break + if parsed: + restored.extend(parsed) + else: + logger.debug("Skipping undecodable payload item: %s", payload_item.keys()) + + return restored + + @staticmethod + def exceeds_conversational_limit(message: tuple[str, str]) -> bool: + return exceeds_conversational_limit(message) + diff --git a/src/bedrock_agentcore/memory/integrations/strands/converters/gemini.py b/src/bedrock_agentcore/memory/integrations/strands/converters/gemini.py new file mode 100644 index 00000000..b2ed2d64 --- /dev/null +++ b/src/bedrock_agentcore/memory/integrations/strands/converters/gemini.py @@ -0,0 +1,191 @@ +"""Gemini-format converter for AgentCore Memory. + +Converts Strands-native messages to/from a Gemini-compatible shape: +- text -> {"text": ...} +- toolUse -> {"functionCall": {...}} +- toolResult -> {"functionResponse": {...}} +- reasoningContent -> {"thought": {"text": ..., "signature": ...}} +""" + +from __future__ import annotations + +import json +import logging +from typing import Any, Tuple + +from strands.types.session import SessionMessage + +from .protocol import exceeds_conversational_limit + +logger = logging.getLogger(__name__) + + +class GeminiConverseConverter: + """Converts between Strands SessionMessages and Gemini-like STM payloads.""" + + @staticmethod + def message_to_payload(session_message: SessionMessage) -> list[Tuple[str, str]]: + """Convert a Strands SessionMessage into a Gemini-style payload tuple.""" + message = session_message.message + content = message.get("content", []) + if not content: + return [] + + has_non_empty = any( + (isinstance(item.get("text"), str) and item["text"].strip()) + or "toolUse" in item + or "toolResult" in item + or "reasoningContent" in item + for item in content + if isinstance(item, dict) + ) + if not has_non_empty: + return [] + + formatted_parts = [GeminiConverseConverter._to_gemini_part(item) for item in content if isinstance(item, dict)] + formatted_parts = [item for item in formatted_parts if item is not None] + if not formatted_parts: + return [] + + role = message.get("role", "user") + if role not in {"user", "assistant"}: + role = "user" + + gemini_msg = {"role": "model" if role == "assistant" else "user", "parts": formatted_parts} + return [(json.dumps(gemini_msg), role)] + + @staticmethod + def events_to_messages(events: list[dict[str, Any]]) -> list[SessionMessage]: + """Restore Strands SessionMessages from Gemini-style payload events.""" + messages: list[SessionMessage] = [] + + for event in reversed(events): + for payload_item in event.get("payload", []): + msg_dict: dict[str, Any] | None = None + + if "conversational" in payload_item: + conv = payload_item["conversational"] + try: + msg_dict = json.loads(conv["content"]["text"]) + except (json.JSONDecodeError, KeyError, ValueError): + logger.error("Failed to parse conversational payload as Gemini message") + continue + elif "blob" in payload_item: + try: + blob_data = json.loads(payload_item["blob"]) + if isinstance(blob_data, (tuple, list)) and len(blob_data) == 2: + msg_dict = json.loads(blob_data[0]) + except (json.JSONDecodeError, ValueError): + logger.error("Failed to parse blob payload: %s", payload_item) + continue + + if not (msg_dict and isinstance(msg_dict, dict)): + continue + parts = msg_dict.get("parts") + if not parts: + continue + + role = msg_dict.get("role", "user") + role = "assistant" if role == "model" else "user" + if role not in {"user", "assistant"}: + role = "user" + strands_content = [ + GeminiConverseConverter._from_gemini_part(part) for part in parts if isinstance(part, dict) + ] + strands_content = [item for item in strands_content if item is not None] + if not strands_content: + continue + messages.append(SessionMessage(message={"role": role, "content": strands_content}, message_id=0)) + + return messages + + @staticmethod + def exceeds_conversational_limit(message: tuple[str, str]) -> bool: + """Check if serialized message exceeds conversational payload size limit.""" + return exceeds_conversational_limit(message) + + @staticmethod + def _to_gemini_part(item: dict[str, Any]) -> dict[str, Any] | None: + if "text" in item: + return {"text": item.get("text", "")} + + if "toolUse" in item: + tool_use = item["toolUse"] + part: dict[str, Any] = { + "functionCall": { + "id": tool_use.get("toolUseId", ""), + "name": tool_use.get("name", ""), + "args": tool_use.get("input", {}), + } + } + if tool_use.get("reasoningSignature"): + part["thoughtSignature"] = tool_use["reasoningSignature"] + return part + + if "toolResult" in item: + tool_result = item["toolResult"] + response_output: list[Any] = [] + for result_block in tool_result.get("content", []): + if "json" in result_block: + response_output.append(result_block["json"]) + elif "text" in result_block: + response_output.append({"text": result_block["text"]}) + return { + "functionResponse": { + "id": tool_result.get("toolUseId", ""), + "name": tool_result.get("toolUseId", ""), + "response": {"output": response_output}, + } + } + + if "reasoningContent" in item: + reasoning_text = item["reasoningContent"].get("reasoningText", {}) + thought: dict[str, Any] = {"text": reasoning_text.get("text", "")} + if reasoning_text.get("signature"): + thought["signature"] = reasoning_text["signature"] + return {"thought": thought} + + return None + + @staticmethod + def _from_gemini_part(part: dict[str, Any]) -> dict[str, Any] | None: + if "text" in part: + return {"text": part.get("text", "")} + + if "functionCall" in part: + fc = part["functionCall"] + tool_use = { + "toolUseId": fc.get("id", ""), + "name": fc.get("name", ""), + "input": fc.get("args", {}), + } + if part.get("thoughtSignature"): + tool_use["reasoningSignature"] = part["thoughtSignature"] + return {"toolUse": tool_use} + + if "functionResponse" in part: + fr = part["functionResponse"] + response = fr.get("response", {}) + output = response.get("output", []) + content: list[dict[str, Any]] = [] + for item in output: + if isinstance(item, dict) and "text" in item: + content.append({"text": item["text"]}) + else: + content.append({"json": item}) + return { + "toolResult": { + "toolUseId": fr.get("id", ""), + "status": "success", + "content": content, + } + } + + if "thought" in part: + thought = part["thought"] if isinstance(part["thought"], dict) else {} + reasoning_text = {"text": thought.get("text", "")} + if thought.get("signature"): + reasoning_text["signature"] = thought["signature"] + return {"reasoningContent": {"reasoningText": reasoning_text}} + + return None diff --git a/src/bedrock_agentcore/memory/integrations/strands/converters/openai.py b/src/bedrock_agentcore/memory/integrations/strands/converters/openai.py index dd99ff3b..9c4786ca 100644 --- a/src/bedrock_agentcore/memory/integrations/strands/converters/openai.py +++ b/src/bedrock_agentcore/memory/integrations/strands/converters/openai.py @@ -34,11 +34,18 @@ def _bedrock_to_openai(message: dict) -> dict: text_parts = [] tool_calls = [] + reasoning_blocks: list[dict[str, Any]] = [] for item in content: if "text" in item: - text = item["text"].strip() - if text: - text_parts.append(text) + text_value = item.get("text") + if isinstance(text_value, str): + text = text_value.strip() + if text: + text_parts.append(text) + elif "reasoningContent" in item: + # OpenAI message shape does not have a stable multi-turn reasoning block field. + # Preserve original block(s) in storage-only extension field for lossless restore. + reasoning_blocks.append(item) elif "toolUse" in item: tu = item["toolUse"] tool_calls.append( @@ -60,6 +67,9 @@ def _bedrock_to_openai(message: dict) -> dict: else: result["content"] = "\n".join(text_parts) if text_parts else "" + if reasoning_blocks: + result["_strands_reasoning_content"] = reasoning_blocks + return result @@ -107,6 +117,10 @@ def _openai_to_bedrock(openai_msg: dict) -> dict: } ) + for rc in openai_msg.get("_strands_reasoning_content", []): + if isinstance(rc, dict) and "reasoningContent" in rc: + content_items.append(rc) + bedrock_role = "assistant" if role == "assistant" else "user" return {"role": bedrock_role, "content": content_items} @@ -124,7 +138,10 @@ def message_to_payload(session_message: SessionMessage) -> list[Tuple[str, str]] return [] has_non_empty = any( - ("text" in item and item["text"].strip()) or "toolUse" in item or "toolResult" in item for item in content + (isinstance(item.get("text"), str) and item["text"].strip()) + or "toolUse" in item + or "toolResult" in item + for item in content ) if not has_non_empty: return [] diff --git a/src/bedrock_agentcore/memory/integrations/strands/session_manager.py b/src/bedrock_agentcore/memory/integrations/strands/session_manager.py index f9d1c35d..9fa1a23a 100644 --- a/src/bedrock_agentcore/memory/integrations/strands/session_manager.py +++ b/src/bedrock_agentcore/memory/integrations/strands/session_manager.py @@ -22,7 +22,7 @@ from bedrock_agentcore.memory.client import MemoryClient from bedrock_agentcore.memory.models.filters import EventMetadataFilter, LeftExpression, OperatorType, RightExpression -from .converters import BedrockConverseConverter, MemoryConverter +from .converters import AutoConverseConverter, BedrockConverseConverter, MemoryConverter from .config import AgentCoreMemoryConfig, RetrievalConfig if TYPE_CHECKING: @@ -98,7 +98,7 @@ def _get_monotonic_timestamp(cls, desired_timestamp: Optional[datetime] = None) def __init__( self, agentcore_memory_config: AgentCoreMemoryConfig, - converter: Optional[type[MemoryConverter]] = None, + converter: Optional[type[MemoryConverter] | str] = None, region_name: Optional[str] = None, boto_session: Optional[boto3.Session] = None, boto_client_config: Optional[BotocoreConfig] = None, @@ -116,7 +116,12 @@ def __init__( Defaults to None. **kwargs (Any): Additional keyword arguments. """ - self.converter = converter or BedrockConverseConverter + self._auto_converter_enabled = converter == "auto" + if self._auto_converter_enabled: + self.converter = AutoConverseConverter + AutoConverseConverter.set_write_converter(BedrockConverseConverter) + else: + self.converter = converter or BedrockConverseConverter self.config = agentcore_memory_config self.memory_client = MemoryClient(region_name=region_name) session = boto_session or boto3.Session(region_name=region_name) @@ -692,6 +697,11 @@ def register_hooks(self, registry: HookRegistry, **kwargs) -> None: @override def initialize(self, agent: "Agent", **kwargs: Any) -> None: + if self._auto_converter_enabled: + selected = AutoConverseConverter.select_write_converter_for_model(agent.model) + AutoConverseConverter.set_write_converter(selected) + logger.info("Auto converter selected %s for model %s", selected.__name__, agent.model.__class__.__name__) + if self.has_existing_agent: logger.warning( "An Agent already exists in session %s. We currently support one agent per session.", self.session_id @@ -713,7 +723,8 @@ def _flush_messages(self) -> list[dict[str, Any]]: Messages are batched by session_id - all conversational messages for the same session are combined into a single create_event() call to reduce API calls. - Blob messages (>9KB) are sent individually as they require a different API path. + Messages that exceed the conversational payload limit are sent as blob events individually + as they require a different API path. Returns: list[dict[str, Any]]: List of created event responses from AgentCore Memory. diff --git a/tests/bedrock_agentcore/memory/integrations/strands/test_agentcore_memory_session_manager_openai_converter.py b/tests/bedrock_agentcore/memory/integrations/strands/test_agentcore_memory_session_manager_openai_converter.py index dff6e800..4d5e5396 100644 --- a/tests/bedrock_agentcore/memory/integrations/strands/test_agentcore_memory_session_manager_openai_converter.py +++ b/tests/bedrock_agentcore/memory/integrations/strands/test_agentcore_memory_session_manager_openai_converter.py @@ -1,17 +1,22 @@ -"""Session manager tests with OpenAI converter.""" +"""Session manager tests with OpenAI/auto converter.""" +import json from unittest.mock import Mock, patch from strands.types.session import Session, SessionMessage, SessionType from bedrock_agentcore.memory.integrations.strands.config import AgentCoreMemoryConfig -from bedrock_agentcore.memory.integrations.strands.converters import OpenAIConverseConverter +from bedrock_agentcore.memory.integrations.strands.converters import AutoConverseConverter, OpenAIConverseConverter from bedrock_agentcore.memory.integrations.strands.session_manager import AgentCoreMemorySessionManager def test_create_message_uses_tool_role_with_openai_converter(): """When configured with OpenAI converter, create_event should receive TOOL role for tool outputs.""" - config = AgentCoreMemoryConfig(memory_id="test-memory-123", session_id="test-session-456", actor_id="test-actor-789") + config = AgentCoreMemoryConfig( + memory_id="test-memory-123", + session_id="test-session-456", + actor_id="test-actor-789", + ) mock_memory_client = Mock() mock_memory_client.create_event.return_value = {"eventId": "event_123"} @@ -20,7 +25,10 @@ def test_create_message_uses_tool_role_with_openai_converter(): mock_memory_client.gmdp_client = Mock() with ( - patch("bedrock_agentcore.memory.integrations.strands.session_manager.MemoryClient", return_value=mock_memory_client), + patch( + "bedrock_agentcore.memory.integrations.strands.session_manager.MemoryClient", + return_value=mock_memory_client, + ), patch("boto3.Session") as mock_boto_session, patch("strands.session.repository_session_manager.RepositorySessionManager.__init__", return_value=None), ): @@ -71,7 +79,10 @@ def test_list_messages_filters_restored_tool_context(): mock_memory_client.gmdp_client = Mock() with ( - patch("bedrock_agentcore.memory.integrations.strands.session_manager.MemoryClient", return_value=mock_memory_client), + patch( + "bedrock_agentcore.memory.integrations.strands.session_manager.MemoryClient", + return_value=mock_memory_client, + ), patch("boto3.Session") as mock_boto_session, patch("strands.session.repository_session_manager.RepositorySessionManager.__init__", return_value=None), ): @@ -114,3 +125,93 @@ def test_list_messages_filters_restored_tool_context(): {"role": "assistant", "content": [{"text": "calling tool"}]}, {"role": "assistant", "content": [{"text": "done"}]}, ] + + +def test_auto_converter_mode_selects_openai_for_openai_model(): + config = AgentCoreMemoryConfig( + memory_id="test-memory-123", + session_id="test-session-456", + actor_id="test-actor-789", + ) + + mock_memory_client = Mock() + mock_memory_client.list_events.return_value = [] + mock_memory_client.gmcp_client = Mock() + mock_memory_client.gmdp_client = Mock() + + with ( + patch( + "bedrock_agentcore.memory.integrations.strands.session_manager.MemoryClient", + return_value=mock_memory_client, + ), + patch("boto3.Session") as mock_boto_session, + patch("strands.session.repository_session_manager.RepositorySessionManager.__init__", return_value=None), + patch("strands.session.repository_session_manager.RepositorySessionManager.initialize", return_value=None), + ): + mock_session = Mock() + mock_session.region_name = "us-west-2" + mock_session.client.return_value = Mock() + mock_boto_session.return_value = mock_session + + manager = AgentCoreMemorySessionManager(config, converter="auto") + assert manager.converter is AutoConverseConverter + + mock_agent = Mock() + mock_agent.model = Mock() + mock_agent.model.__class__.__name__ = "OpenAIModel" + mock_agent.model.__class__.__module__ = "nflx_strands.models.openai" + + manager.initialize(mock_agent) + assert AutoConverseConverter._write_converter is OpenAIConverseConverter + + +def test_list_messages_with_auto_converter_restores_mixed_provider_history(): + config = AgentCoreMemoryConfig( + memory_id="test-memory-123", + session_id="test-session-456", + actor_id="test-actor-789", + ) + + openai_payload = json.dumps({"role": "assistant", "content": "hello from openai"}) + anthropic_payload = json.dumps( + {"role": "assistant", "content": [{"type": "thinking", "thinking": "trace", "signature": "sig"}]} + ) + gemini_payload = json.dumps({"role": "model", "parts": [{"text": "hello from gemini"}]}) + + mock_memory_client = Mock() + mock_memory_client.list_events.return_value = [ + {"payload": [{"conversational": {"content": {"text": openai_payload}}}]}, + {"payload": [{"conversational": {"content": {"text": anthropic_payload}}}]}, + {"payload": [{"conversational": {"content": {"text": gemini_payload}}}]}, + ] + mock_memory_client.gmcp_client = Mock() + mock_memory_client.gmdp_client = Mock() + + with ( + patch( + "bedrock_agentcore.memory.integrations.strands.session_manager.MemoryClient", + return_value=mock_memory_client, + ), + patch("boto3.Session") as mock_boto_session, + patch("strands.session.repository_session_manager.RepositorySessionManager.__init__", return_value=None), + ): + mock_session = Mock() + mock_session.region_name = "us-west-2" + mock_session.client.return_value = Mock() + mock_boto_session.return_value = mock_session + + manager = AgentCoreMemorySessionManager(config, converter="auto") + manager.session_id = config.session_id + manager.session = Session(session_id=config.session_id, session_type=SessionType.AGENT) + + messages = manager.list_messages(config.session_id, "agent-1") + assert len(messages) == 3 + assert any( + block.get("text") == "hello from openai" for msg in messages for block in msg.message.get("content", []) + ) + assert any( + "reasoningContent" in block for msg in messages for block in msg.message.get("content", []) + ) + assert any( + block.get("text") == "hello from gemini" for msg in messages for block in msg.message.get("content", []) + ) diff --git a/tests/bedrock_agentcore/memory/integrations/strands/test_anthropic_converter.py b/tests/bedrock_agentcore/memory/integrations/strands/test_anthropic_converter.py new file mode 100644 index 00000000..32632318 --- /dev/null +++ b/tests/bedrock_agentcore/memory/integrations/strands/test_anthropic_converter.py @@ -0,0 +1,37 @@ +"""Tests for AnthropicConverseConverter.""" + +import json + +from strands.types.session import SessionMessage + +from bedrock_agentcore.memory.integrations.strands.converters import AnthropicConverseConverter + + +def test_anthropic_converter_round_trips_tool_and_reasoning_blocks(): + msg = SessionMessage( + message={ + "role": "assistant", + "content": [ + {"text": "thinking and calling tool"}, + {"reasoningContent": {"reasoningText": {"text": "private chain", "signature": "sig"}}}, + {"toolUse": {"toolUseId": "call_1", "name": "search", "input": {"q": "x"}}}, + ], + }, + message_id=1, + ) + + payload_json, role = AnthropicConverseConverter.message_to_payload(msg)[0] + assert role == "assistant" + + payload = json.loads(payload_json) + assert payload["role"] == "assistant" + assert any(block.get("type") == "thinking" for block in payload["content"]) + assert any(block.get("type") == "tool_use" for block in payload["content"]) + + events = [{"payload": [{"conversational": {"content": {"text": payload_json}}}]}] + restored = AnthropicConverseConverter.events_to_messages(events) + + assert len(restored) == 1 + restored_content = restored[0].message["content"] + assert any("reasoningContent" in block for block in restored_content) + assert any("toolUse" in block for block in restored_content) diff --git a/tests/bedrock_agentcore/memory/integrations/strands/test_auto_converter.py b/tests/bedrock_agentcore/memory/integrations/strands/test_auto_converter.py new file mode 100644 index 00000000..efa6598e --- /dev/null +++ b/tests/bedrock_agentcore/memory/integrations/strands/test_auto_converter.py @@ -0,0 +1,75 @@ +"""Tests for AutoConverseConverter.""" + +import json + +from strands.types.session import SessionMessage + +from bedrock_agentcore.memory.integrations.strands.converters import ( + AnthropicConverseConverter, + AutoConverseConverter, + GeminiConverseConverter, + OpenAIConverseConverter, +) + + +class _OpenAIModel: + __module__ = "nflx_strands.models.openai" + + +class _AnthropicModel: + __module__ = "nflx_strands.models.anthropic" + + +class _GeminiModel: + __module__ = "nflx_strands.models.gemini" + + +def test_auto_converter_selects_provider_specific_writer(): + assert AutoConverseConverter.select_write_converter_for_model(_OpenAIModel()) is OpenAIConverseConverter + assert AutoConverseConverter.select_write_converter_for_model(_AnthropicModel()) is AnthropicConverseConverter + assert AutoConverseConverter.select_write_converter_for_model(_GeminiModel()) is GeminiConverseConverter + + +def test_auto_converter_restores_mixed_event_payloads(): + openai_payload = json.dumps({"role": "tool", "tool_call_id": "c1", "content": "ok"}) + anthropic_payload = json.dumps( + {"role": "assistant", "content": [{"type": "thinking", "thinking": "trace", "signature": "sig"}]} + ) + gemini_payload = json.dumps({"role": "model", "parts": [{"thought": {"text": "thought", "signature": "sig2"}}]}) + + events = [ + {"payload": [{"conversational": {"content": {"text": openai_payload}}}]}, + {"payload": [{"conversational": {"content": {"text": anthropic_payload}}}]}, + {"payload": [{"conversational": {"content": {"text": gemini_payload}}}]}, + ] + + messages = AutoConverseConverter.events_to_messages(events) + assert len(messages) == 3 + for message in messages: + assert message.message["role"] in {"user", "assistant"} + + all_content = [block for message in messages for block in message.message["content"]] + assert any("toolResult" in block for block in all_content) + assert sum(1 for block in all_content if "reasoningContent" in block) >= 2 + + +def test_auto_converter_uses_selected_writer_for_payload_shape(): + msg = SessionMessage(message={"role": "assistant", "content": [{"text": "hi"}]}, message_id=1) + AutoConverseConverter.set_write_converter(OpenAIConverseConverter) + payload_json, _ = AutoConverseConverter.message_to_payload(msg)[0] + parsed = json.loads(payload_json) + assert "role" in parsed and "content" in parsed + + +def test_auto_converter_skips_malformed_json_and_missing_required_fields(): + events = [ + {"payload": [{"conversational": {"content": {"text": "{bad-json"}}}]}, + {"payload": [{"conversational": {"content": {"text": json.dumps({"role": "assistant"})}}}]}, + {"payload": [{"conversational": {"content": {"text": json.dumps({"role": "assistant", "content": "ok"})}}}]}, + ] + + messages = AutoConverseConverter.events_to_messages(events) + + assert len(messages) == 1 + assert messages[0].message["role"] == "assistant" + assert messages[0].message["content"] == [{"text": "ok"}] diff --git a/tests/bedrock_agentcore/memory/integrations/strands/test_gemini_converter.py b/tests/bedrock_agentcore/memory/integrations/strands/test_gemini_converter.py new file mode 100644 index 00000000..aeae53f2 --- /dev/null +++ b/tests/bedrock_agentcore/memory/integrations/strands/test_gemini_converter.py @@ -0,0 +1,48 @@ +"""Tests for GeminiConverseConverter.""" + +import json + +from strands.types.session import SessionMessage + +from bedrock_agentcore.memory.integrations.strands.converters import GeminiConverseConverter + + +def test_gemini_converter_round_trips_tool_result_and_reasoning(): + msg = SessionMessage( + message={ + "role": "user", + "content": [ + {"text": "tool output follows"}, + {"toolResult": {"toolUseId": "call_2", "status": "success", "content": [{"text": "42"}]}}, + {"reasoningContent": {"reasoningText": {"text": "thought", "signature": "sig"}}}, + ], + }, + message_id=2, + ) + + payload_json, role = GeminiConverseConverter.message_to_payload(msg)[0] + assert role == "user" + + payload = json.loads(payload_json) + assert payload["role"] == "user" + assert any("functionResponse" in part for part in payload["parts"]) + assert any("thought" in part for part in payload["parts"]) + + events = [{"payload": [{"conversational": {"content": {"text": payload_json}}}]}] + restored = GeminiConverseConverter.events_to_messages(events) + + assert len(restored) == 1 + restored_content = restored[0].message["content"] + assert any("toolResult" in block for block in restored_content) + assert any("reasoningContent" in block for block in restored_content) + + +def test_gemini_converter_ignores_none_parts_on_restore(): + payload = json.dumps({"role": "model", "parts": [None, {"text": "kept"}]}) + events = [{"payload": [{"conversational": {"content": {"text": payload}}}]}] + + restored = GeminiConverseConverter.events_to_messages(events) + + assert len(restored) == 1 + assert restored[0].message["role"] == "assistant" + assert restored[0].message["content"] == [{"text": "kept"}] diff --git a/tests/bedrock_agentcore/memory/integrations/strands/test_openai_converter.py b/tests/bedrock_agentcore/memory/integrations/strands/test_openai_converter.py index a75bb41a..7b94db92 100644 --- a/tests/bedrock_agentcore/memory/integrations/strands/test_openai_converter.py +++ b/tests/bedrock_agentcore/memory/integrations/strands/test_openai_converter.py @@ -64,3 +64,62 @@ def test_openai_tool_role_deserializes_to_strands_user_tool_result_message(): assert len(messages) == 1 assert messages[0].message["role"] == "user" assert messages[0].message["content"][0]["toolResult"]["toolUseId"] == "call_321" + + +def test_reasoning_content_round_trips_in_openai_converter(): + """reasoningContent blocks should be preserved via storage extension field.""" + msg = SessionMessage( + message={ + "role": "assistant", + "content": [ + {"text": "answer"}, + {"reasoningContent": {"reasoningText": {"text": "chain", "signature": "abc"}}}, + ], + }, + message_id=6, + ) + + payload_json, _ = OpenAIConverseConverter.message_to_payload(msg)[0] + payload = json.loads(payload_json) + assert "_strands_reasoning_content" in payload + + events = [{"payload": [{"conversational": {"content": {"text": payload_json}}}]}] + restored = OpenAIConverseConverter.events_to_messages(events) + assert len(restored) == 1 + restored_content = restored[0].message["content"] + assert any("reasoningContent" in block for block in restored_content) + + +def test_openai_converter_round_trips_from_blob_payload(): + msg = SessionMessage( + message={ + "role": "assistant", + "content": [ + {"text": "hello from blob"}, + {"reasoningContent": {"reasoningText": {"text": "trace", "signature": "sig"}}}, + ], + }, + message_id=7, + ) + + payload_json, role = OpenAIConverseConverter.message_to_payload(msg)[0] + events = [{"payload": [{"blob": json.dumps((payload_json, role))}]}] + + restored = OpenAIConverseConverter.events_to_messages(events) + assert len(restored) == 1 + assert restored[0].message["role"] == "assistant" + assert any(block.get("text") == "hello from blob" for block in restored[0].message["content"]) + assert any("reasoningContent" in block for block in restored[0].message["content"]) + + +def test_openai_converter_returns_empty_for_empty_or_none_text_content(): + empty_msg = SessionMessage(message={"role": "assistant", "content": []}, message_id=8) + none_msg = SessionMessage(message={"role": "assistant", "content": [{"text": None}]}, message_id=9) + + assert OpenAIConverseConverter.message_to_payload(empty_msg) == [] + assert OpenAIConverseConverter.message_to_payload(none_msg) == [] + + +def test_openai_converter_detects_oversized_payload(): + oversized_payload = ("x" * 100000, "assistant") + assert OpenAIConverseConverter.exceeds_conversational_limit(oversized_payload) is True From 2223eb0c71e940bbd4ae0ee8012fed5ad6c20039 Mon Sep 17 00:00:00 2001 From: Dharamendra Kumar Date: Sun, 1 Mar 2026 16:53:26 -0800 Subject: [PATCH 4/5] fix: update model module paths in tests to reflect new structure - Changed module paths for OpenAI, Anthropic, and Gemini models in test files to align with the updated project structure. - Ensured consistency across tests for accurate model identification. --- ...est_agentcore_memory_session_manager_openai_converter.py | 2 +- .../memory/integrations/strands/test_auto_converter.py | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/tests/bedrock_agentcore/memory/integrations/strands/test_agentcore_memory_session_manager_openai_converter.py b/tests/bedrock_agentcore/memory/integrations/strands/test_agentcore_memory_session_manager_openai_converter.py index 4d5e5396..f7a8abd5 100644 --- a/tests/bedrock_agentcore/memory/integrations/strands/test_agentcore_memory_session_manager_openai_converter.py +++ b/tests/bedrock_agentcore/memory/integrations/strands/test_agentcore_memory_session_manager_openai_converter.py @@ -159,7 +159,7 @@ def test_auto_converter_mode_selects_openai_for_openai_model(): mock_agent = Mock() mock_agent.model = Mock() mock_agent.model.__class__.__name__ = "OpenAIModel" - mock_agent.model.__class__.__module__ = "nflx_strands.models.openai" + mock_agent.model.__class__.__module__ = "strands.models.openai" manager.initialize(mock_agent) assert AutoConverseConverter._write_converter is OpenAIConverseConverter diff --git a/tests/bedrock_agentcore/memory/integrations/strands/test_auto_converter.py b/tests/bedrock_agentcore/memory/integrations/strands/test_auto_converter.py index efa6598e..e9d5d8e4 100644 --- a/tests/bedrock_agentcore/memory/integrations/strands/test_auto_converter.py +++ b/tests/bedrock_agentcore/memory/integrations/strands/test_auto_converter.py @@ -13,15 +13,15 @@ class _OpenAIModel: - __module__ = "nflx_strands.models.openai" + __module__ = "strands.models.openai" class _AnthropicModel: - __module__ = "nflx_strands.models.anthropic" + __module__ = "strands.models.anthropic" class _GeminiModel: - __module__ = "nflx_strands.models.gemini" + __module__ = "strands.models.gemini" def test_auto_converter_selects_provider_specific_writer(): From bbbc7c02841f2248585e472dca6850c9ae3ea79f Mon Sep 17 00:00:00 2001 From: Dharamendra Kumar Date: Wed, 4 Mar 2026 18:19:11 -0800 Subject: [PATCH 5/5] Remove nit and kept only openai converter --- .../memory/integrations/strands/__init__.py | 4 +- .../strands/converters/__init__.py | 8 - .../strands/converters/anthropic.py | 200 ------------------ .../integrations/strands/converters/auto.py | 85 -------- .../strands/converters/bedrock.py | 7 - .../integrations/strands/converters/gemini.py | 191 ----------------- .../integrations/strands/session_manager.py | 42 ++-- ...memory_session_manager_openai_converter.py | 96 +-------- .../strands/test_anthropic_converter.py | 37 ---- .../strands/test_auto_converter.py | 75 ------- .../strands/test_bedrock_converter.py | 2 +- .../strands/test_gemini_converter.py | 48 ----- 12 files changed, 26 insertions(+), 769 deletions(-) delete mode 100644 src/bedrock_agentcore/memory/integrations/strands/converters/anthropic.py delete mode 100644 src/bedrock_agentcore/memory/integrations/strands/converters/auto.py delete mode 100644 src/bedrock_agentcore/memory/integrations/strands/converters/bedrock.py delete mode 100644 src/bedrock_agentcore/memory/integrations/strands/converters/gemini.py delete mode 100644 tests/bedrock_agentcore/memory/integrations/strands/test_anthropic_converter.py delete mode 100644 tests/bedrock_agentcore/memory/integrations/strands/test_auto_converter.py delete mode 100644 tests/bedrock_agentcore/memory/integrations/strands/test_gemini_converter.py diff --git a/src/bedrock_agentcore/memory/integrations/strands/__init__.py b/src/bedrock_agentcore/memory/integrations/strands/__init__.py index 5f4c0bfb..26a9d043 100644 --- a/src/bedrock_agentcore/memory/integrations/strands/__init__.py +++ b/src/bedrock_agentcore/memory/integrations/strands/__init__.py @@ -1,5 +1,5 @@ """Strands integration for Bedrock AgentCore Memory.""" -from .converters import BedrockConverseConverter, MemoryConverter, OpenAIConverseConverter +from .converters import MemoryConverter, OpenAIConverseConverter -__all__ = ["BedrockConverseConverter", "MemoryConverter", "OpenAIConverseConverter"] +__all__ = ["MemoryConverter", "OpenAIConverseConverter"] diff --git a/src/bedrock_agentcore/memory/integrations/strands/converters/__init__.py b/src/bedrock_agentcore/memory/integrations/strands/converters/__init__.py index fe096ddc..56d3093e 100644 --- a/src/bedrock_agentcore/memory/integrations/strands/converters/__init__.py +++ b/src/bedrock_agentcore/memory/integrations/strands/converters/__init__.py @@ -1,17 +1,9 @@ """Converters for Strands <-> STM message formats.""" -from .anthropic import AnthropicConverseConverter -from .auto import AutoConverseConverter -from .bedrock import BedrockConverseConverter -from .gemini import GeminiConverseConverter from .openai import OpenAIConverseConverter from .protocol import MemoryConverter __all__ = [ - "AnthropicConverseConverter", - "AutoConverseConverter", - "BedrockConverseConverter", - "GeminiConverseConverter", "OpenAIConverseConverter", "MemoryConverter", ] diff --git a/src/bedrock_agentcore/memory/integrations/strands/converters/anthropic.py b/src/bedrock_agentcore/memory/integrations/strands/converters/anthropic.py deleted file mode 100644 index 0c6ad8fc..00000000 --- a/src/bedrock_agentcore/memory/integrations/strands/converters/anthropic.py +++ /dev/null @@ -1,200 +0,0 @@ -"""Anthropic-format converter for AgentCore Memory. - -Converts Strands-native messages to/from an Anthropic-compatible message shape: -- text -> {"type":"text","text":...} -- toolUse -> {"type":"tool_use", ...} -- toolResult -> {"type":"tool_result", ...} -- reasoningContent -> {"type":"thinking", ...} -""" - -from __future__ import annotations - -import base64 -import json -import logging -from typing import Any, Tuple - -from strands.types.session import SessionMessage - -from .protocol import exceeds_conversational_limit - -logger = logging.getLogger(__name__) - - -class AnthropicConverseConverter: - """Converts between Strands SessionMessages and Anthropic-like STM payloads.""" - - @staticmethod - def message_to_payload(session_message: SessionMessage) -> list[Tuple[str, str]]: - """Convert a Strands SessionMessage into an Anthropic-style payload tuple.""" - message = session_message.message - content = message.get("content", []) - if not content: - return [] - - has_non_empty = any( - (isinstance(item.get("text"), str) and item["text"].strip()) - or "toolUse" in item - or "toolResult" in item - or "reasoningContent" in item - for item in content - if isinstance(item, dict) - ) - if not has_non_empty: - return [] - - formatted_content = [ - AnthropicConverseConverter._to_anthropic_block(item) for item in content if isinstance(item, dict) - ] - formatted_content = [item for item in formatted_content if item is not None] - if not formatted_content: - return [] - - # Anthropic APIs accept user/assistant roles. - role = message.get("role", "user") - if role not in {"user", "assistant"}: - role = "user" - - anthropic_msg = {"role": role, "content": formatted_content} - return [(json.dumps(anthropic_msg), role)] - - @staticmethod - def events_to_messages(events: list[dict[str, Any]]) -> list[SessionMessage]: - """Restore Strands SessionMessages from Anthropic-style payload events.""" - messages: list[SessionMessage] = [] - - for event in reversed(events): - for payload_item in event.get("payload", []): - msg_dict: dict[str, Any] | None = None - - if "conversational" in payload_item: - conv = payload_item["conversational"] - try: - msg_dict = json.loads(conv["content"]["text"]) - except (json.JSONDecodeError, KeyError, ValueError): - logger.error("Failed to parse conversational payload as Anthropic message") - continue - elif "blob" in payload_item: - try: - blob_data = json.loads(payload_item["blob"]) - if isinstance(blob_data, (tuple, list)) and len(blob_data) == 2: - msg_dict = json.loads(blob_data[0]) - except (json.JSONDecodeError, ValueError): - logger.error("Failed to parse blob payload: %s", payload_item) - continue - - if not (msg_dict and isinstance(msg_dict, dict)): - continue - if "content" not in msg_dict or not msg_dict["content"]: - continue - - role = msg_dict.get("role", "user") - if role not in {"user", "assistant"}: - role = "user" - strands_content = [ - AnthropicConverseConverter._from_anthropic_block(block) - for block in msg_dict["content"] - if isinstance(block, dict) - ] - strands_content = [item for item in strands_content if item is not None] - if not strands_content: - continue - messages.append(SessionMessage(message={"role": role, "content": strands_content}, message_id=0)) - - return messages - - @staticmethod - def exceeds_conversational_limit(message: tuple[str, str]) -> bool: - """Check if serialized message exceeds conversational payload size limit.""" - return exceeds_conversational_limit(message) - - @staticmethod - def _to_anthropic_block(item: dict[str, Any]) -> dict[str, Any] | None: - if "text" in item: - return {"type": "text", "text": item.get("text", "")} - - if "toolUse" in item: - tool_use = item["toolUse"] - return { - "type": "tool_use", - "id": tool_use.get("toolUseId"), - "name": tool_use.get("name"), - "input": tool_use.get("input", {}), - **( - {"reasoning_signature": tool_use.get("reasoningSignature")} - if tool_use.get("reasoningSignature") - else {} - ), - } - - if "toolResult" in item: - tool_result = item["toolResult"] - content_blocks: list[dict[str, Any]] = [] - for result_block in tool_result.get("content", []): - if "text" in result_block: - content_blocks.append({"type": "text", "text": result_block["text"]}) - elif "json" in result_block: - content_blocks.append({"type": "text", "text": json.dumps(result_block["json"])}) - return { - "type": "tool_result", - "tool_use_id": tool_result.get("toolUseId"), - "is_error": tool_result.get("status") == "error", - "content": content_blocks, - } - - if "reasoningContent" in item: - reasoning_text = item["reasoningContent"].get("reasoningText", {}) - return { - "type": "thinking", - "thinking": reasoning_text.get("text", ""), - **({"signature": reasoning_text.get("signature")} if reasoning_text.get("signature") else {}), - } - - return None - - @staticmethod - def _from_anthropic_block(block: dict[str, Any]) -> dict[str, Any] | None: - block_type = block.get("type") - if block_type == "text": - return {"text": block.get("text", "")} - - if block_type == "tool_use": - tool_use = { - "toolUseId": block.get("id", ""), - "name": block.get("name", ""), - "input": block.get("input", {}), - } - if block.get("reasoning_signature"): - tool_use["reasoningSignature"] = block["reasoning_signature"] - return {"toolUse": tool_use} - - if block_type == "tool_result": - content: list[dict[str, Any]] = [] - for content_block in block.get("content", []): - if isinstance(content_block, dict) and content_block.get("type") == "text": - content.append({"text": content_block.get("text", "")}) - return { - "toolResult": { - "toolUseId": block.get("tool_use_id", ""), - "status": "error" if block.get("is_error") else "success", - "content": content, - } - } - - if block_type == "thinking": - reasoning_text = {"text": block.get("thinking", "")} - if block.get("signature"): - reasoning_text["signature"] = block["signature"] - return {"reasoningContent": {"reasoningText": reasoning_text}} - - # Support a small compatibility path for binary thought signature if present. - if block_type == "thinking_b64": - reasoning_text = {"text": block.get("thinking", "")} - if block.get("signature_b64"): - try: - reasoning_text["signature"] = base64.b64decode(block["signature_b64"]).decode("utf-8") - except Exception: - pass - return {"reasoningContent": {"reasoningText": reasoning_text}} - - return None diff --git a/src/bedrock_agentcore/memory/integrations/strands/converters/auto.py b/src/bedrock_agentcore/memory/integrations/strands/converters/auto.py deleted file mode 100644 index b6e7bb38..00000000 --- a/src/bedrock_agentcore/memory/integrations/strands/converters/auto.py +++ /dev/null @@ -1,85 +0,0 @@ -"""Automatic converter selection for mixed-model sessions.""" - -from __future__ import annotations - -import logging -from typing import Any, Tuple - -from strands.types.session import SessionMessage - -from .anthropic import AnthropicConverseConverter -from .bedrock import BedrockConverseConverter -from .gemini import GeminiConverseConverter -from .openai import OpenAIConverseConverter -from .protocol import MemoryConverter, exceeds_conversational_limit - -logger = logging.getLogger(__name__) - - -class AutoConverseConverter: - """Auto-selects a write converter and can restore mixed payload formats.""" - - _write_converter: type[MemoryConverter] = BedrockConverseConverter - _read_converters: tuple[type[MemoryConverter], ...] = ( - BedrockConverseConverter, - OpenAIConverseConverter, - AnthropicConverseConverter, - GeminiConverseConverter, - ) - - @classmethod - def set_write_converter(cls, converter: type[MemoryConverter]) -> None: - cls._write_converter = converter - - @classmethod - def select_write_converter_for_model(cls, model: Any) -> type[MemoryConverter]: - """Pick a converter by model class/module name.""" - model_cls_name = model.__class__.__name__.lower() - model_mod = model.__class__.__module__.lower() - full = f"{model_mod}.{model_cls_name}" - - if "anthropic" in full: - return AnthropicConverseConverter - if "gemini" in full: - return GeminiConverseConverter - if "openai" in full: - return OpenAIConverseConverter - return BedrockConverseConverter - - @classmethod - def message_to_payload(cls, session_message: SessionMessage) -> list[Tuple[str, str]]: - return cls._write_converter.message_to_payload(session_message) - - @classmethod - def events_to_messages(cls, events: list[dict[str, Any]]) -> list[SessionMessage]: - """Decode each payload item using the first converter that succeeds. - - This allows sessions to remain readable after switching model providers. - """ - restored: list[SessionMessage] = [] - - # Oldest to newest - for event in reversed(events): - payload_items = event.get("payload", []) - for payload_item in payload_items: - fake_event = {"payload": [payload_item]} - parsed = None - for converter in cls._read_converters: - try: - candidate = converter.events_to_messages([fake_event]) - except Exception: - continue - if candidate: - parsed = candidate - break - if parsed: - restored.extend(parsed) - else: - logger.debug("Skipping undecodable payload item: %s", payload_item.keys()) - - return restored - - @staticmethod - def exceeds_conversational_limit(message: tuple[str, str]) -> bool: - return exceeds_conversational_limit(message) - diff --git a/src/bedrock_agentcore/memory/integrations/strands/converters/bedrock.py b/src/bedrock_agentcore/memory/integrations/strands/converters/bedrock.py deleted file mode 100644 index e211b4a4..00000000 --- a/src/bedrock_agentcore/memory/integrations/strands/converters/bedrock.py +++ /dev/null @@ -1,7 +0,0 @@ -"""Strands-native message-shape converter adapter.""" - -from ..bedrock_converter import AgentCoreMemoryConverter - - -class BedrockConverseConverter(AgentCoreMemoryConverter): - """Alias adapter for the default Strands-native-shape converter.""" diff --git a/src/bedrock_agentcore/memory/integrations/strands/converters/gemini.py b/src/bedrock_agentcore/memory/integrations/strands/converters/gemini.py deleted file mode 100644 index b2ed2d64..00000000 --- a/src/bedrock_agentcore/memory/integrations/strands/converters/gemini.py +++ /dev/null @@ -1,191 +0,0 @@ -"""Gemini-format converter for AgentCore Memory. - -Converts Strands-native messages to/from a Gemini-compatible shape: -- text -> {"text": ...} -- toolUse -> {"functionCall": {...}} -- toolResult -> {"functionResponse": {...}} -- reasoningContent -> {"thought": {"text": ..., "signature": ...}} -""" - -from __future__ import annotations - -import json -import logging -from typing import Any, Tuple - -from strands.types.session import SessionMessage - -from .protocol import exceeds_conversational_limit - -logger = logging.getLogger(__name__) - - -class GeminiConverseConverter: - """Converts between Strands SessionMessages and Gemini-like STM payloads.""" - - @staticmethod - def message_to_payload(session_message: SessionMessage) -> list[Tuple[str, str]]: - """Convert a Strands SessionMessage into a Gemini-style payload tuple.""" - message = session_message.message - content = message.get("content", []) - if not content: - return [] - - has_non_empty = any( - (isinstance(item.get("text"), str) and item["text"].strip()) - or "toolUse" in item - or "toolResult" in item - or "reasoningContent" in item - for item in content - if isinstance(item, dict) - ) - if not has_non_empty: - return [] - - formatted_parts = [GeminiConverseConverter._to_gemini_part(item) for item in content if isinstance(item, dict)] - formatted_parts = [item for item in formatted_parts if item is not None] - if not formatted_parts: - return [] - - role = message.get("role", "user") - if role not in {"user", "assistant"}: - role = "user" - - gemini_msg = {"role": "model" if role == "assistant" else "user", "parts": formatted_parts} - return [(json.dumps(gemini_msg), role)] - - @staticmethod - def events_to_messages(events: list[dict[str, Any]]) -> list[SessionMessage]: - """Restore Strands SessionMessages from Gemini-style payload events.""" - messages: list[SessionMessage] = [] - - for event in reversed(events): - for payload_item in event.get("payload", []): - msg_dict: dict[str, Any] | None = None - - if "conversational" in payload_item: - conv = payload_item["conversational"] - try: - msg_dict = json.loads(conv["content"]["text"]) - except (json.JSONDecodeError, KeyError, ValueError): - logger.error("Failed to parse conversational payload as Gemini message") - continue - elif "blob" in payload_item: - try: - blob_data = json.loads(payload_item["blob"]) - if isinstance(blob_data, (tuple, list)) and len(blob_data) == 2: - msg_dict = json.loads(blob_data[0]) - except (json.JSONDecodeError, ValueError): - logger.error("Failed to parse blob payload: %s", payload_item) - continue - - if not (msg_dict and isinstance(msg_dict, dict)): - continue - parts = msg_dict.get("parts") - if not parts: - continue - - role = msg_dict.get("role", "user") - role = "assistant" if role == "model" else "user" - if role not in {"user", "assistant"}: - role = "user" - strands_content = [ - GeminiConverseConverter._from_gemini_part(part) for part in parts if isinstance(part, dict) - ] - strands_content = [item for item in strands_content if item is not None] - if not strands_content: - continue - messages.append(SessionMessage(message={"role": role, "content": strands_content}, message_id=0)) - - return messages - - @staticmethod - def exceeds_conversational_limit(message: tuple[str, str]) -> bool: - """Check if serialized message exceeds conversational payload size limit.""" - return exceeds_conversational_limit(message) - - @staticmethod - def _to_gemini_part(item: dict[str, Any]) -> dict[str, Any] | None: - if "text" in item: - return {"text": item.get("text", "")} - - if "toolUse" in item: - tool_use = item["toolUse"] - part: dict[str, Any] = { - "functionCall": { - "id": tool_use.get("toolUseId", ""), - "name": tool_use.get("name", ""), - "args": tool_use.get("input", {}), - } - } - if tool_use.get("reasoningSignature"): - part["thoughtSignature"] = tool_use["reasoningSignature"] - return part - - if "toolResult" in item: - tool_result = item["toolResult"] - response_output: list[Any] = [] - for result_block in tool_result.get("content", []): - if "json" in result_block: - response_output.append(result_block["json"]) - elif "text" in result_block: - response_output.append({"text": result_block["text"]}) - return { - "functionResponse": { - "id": tool_result.get("toolUseId", ""), - "name": tool_result.get("toolUseId", ""), - "response": {"output": response_output}, - } - } - - if "reasoningContent" in item: - reasoning_text = item["reasoningContent"].get("reasoningText", {}) - thought: dict[str, Any] = {"text": reasoning_text.get("text", "")} - if reasoning_text.get("signature"): - thought["signature"] = reasoning_text["signature"] - return {"thought": thought} - - return None - - @staticmethod - def _from_gemini_part(part: dict[str, Any]) -> dict[str, Any] | None: - if "text" in part: - return {"text": part.get("text", "")} - - if "functionCall" in part: - fc = part["functionCall"] - tool_use = { - "toolUseId": fc.get("id", ""), - "name": fc.get("name", ""), - "input": fc.get("args", {}), - } - if part.get("thoughtSignature"): - tool_use["reasoningSignature"] = part["thoughtSignature"] - return {"toolUse": tool_use} - - if "functionResponse" in part: - fr = part["functionResponse"] - response = fr.get("response", {}) - output = response.get("output", []) - content: list[dict[str, Any]] = [] - for item in output: - if isinstance(item, dict) and "text" in item: - content.append({"text": item["text"]}) - else: - content.append({"json": item}) - return { - "toolResult": { - "toolUseId": fr.get("id", ""), - "status": "success", - "content": content, - } - } - - if "thought" in part: - thought = part["thought"] if isinstance(part["thought"], dict) else {} - reasoning_text = {"text": thought.get("text", "")} - if thought.get("signature"): - reasoning_text["signature"] = thought["signature"] - return {"reasoningContent": {"reasoningText": reasoning_text}} - - return None diff --git a/src/bedrock_agentcore/memory/integrations/strands/session_manager.py b/src/bedrock_agentcore/memory/integrations/strands/session_manager.py index 9fa1a23a..78a0da77 100644 --- a/src/bedrock_agentcore/memory/integrations/strands/session_manager.py +++ b/src/bedrock_agentcore/memory/integrations/strands/session_manager.py @@ -20,10 +20,16 @@ from typing_extensions import override from bedrock_agentcore.memory.client import MemoryClient -from bedrock_agentcore.memory.models.filters import EventMetadataFilter, LeftExpression, OperatorType, RightExpression - -from .converters import AutoConverseConverter, BedrockConverseConverter, MemoryConverter +from bedrock_agentcore.memory.models.filters import ( + EventMetadataFilter, + LeftExpression, + OperatorType, + RightExpression, +) + +from .bedrock_converter import AgentCoreMemoryConverter from .config import AgentCoreMemoryConfig, RetrievalConfig +from .converters import MemoryConverter if TYPE_CHECKING: from strands.agent.agent import Agent @@ -98,7 +104,7 @@ def _get_monotonic_timestamp(cls, desired_timestamp: Optional[datetime] = None) def __init__( self, agentcore_memory_config: AgentCoreMemoryConfig, - converter: Optional[type[MemoryConverter] | str] = None, + converter: Optional[type[MemoryConverter]] = None, region_name: Optional[str] = None, boto_session: Optional[boto3.Session] = None, boto_client_config: Optional[BotocoreConfig] = None, @@ -108,20 +114,15 @@ def __init__( Args: agentcore_memory_config (AgentCoreMemoryConfig): Configuration for AgentCore Memory integration. - converter (Optional[type[MemoryConverter]], optional): Converter used for message format transformation. - Defaults to BedrockConverseConverter. + converter (Optional[type[MemoryConverter]], optional): Optional custom converter. + If None, native Bedrock/Strands converter is used. region_name (Optional[str], optional): AWS region for Bedrock AgentCore Memory. Defaults to None. boto_session (Optional[boto3.Session], optional): Optional boto3 session. Defaults to None. boto_client_config (Optional[BotocoreConfig], optional): Optional boto3 client configuration. Defaults to None. **kwargs (Any): Additional keyword arguments. """ - self._auto_converter_enabled = converter == "auto" - if self._auto_converter_enabled: - self.converter = AutoConverseConverter - AutoConverseConverter.set_write_converter(BedrockConverseConverter) - else: - self.converter = converter or BedrockConverseConverter + self.converter = converter self.config = agentcore_memory_config self.memory_client = MemoryClient(region_name=region_name) session = boto_session or boto3.Session(region_name=region_name) @@ -426,11 +427,12 @@ def create_message( raise SessionException(f"Session ID mismatch: expected {self.config.session_id}, got {session_id}") # Convert and check size ONCE (not again at flush) - messages = self.converter.message_to_payload(session_message) + converter = self.converter or AgentCoreMemoryConverter + messages = converter.message_to_payload(session_message) if not messages: return None - is_blob = self.converter.exceeds_conversational_limit(messages[0]) + is_blob = converter.exceeds_conversational_limit(messages[0]) # Parse the original timestamp and use it as desired timestamp original_timestamp = datetime.fromisoformat(session_message.created_at.replace("Z", "+00:00")) @@ -554,7 +556,8 @@ def list_messages( session_id=session_id, max_results=max_results, ) - messages = self.converter.events_to_messages(events) + converter = self.converter or AgentCoreMemoryConverter + messages = converter.events_to_messages(events) if self.config.filter_restored_tool_context: messages = self._filter_restored_tool_context(messages) if limit is not None: @@ -572,7 +575,9 @@ def _filter_restored_tool_context(self, messages: list[SessionMessage]) -> list[ for session_message in messages: message = session_message.to_message() filtered_content = [ - content for content in message.get("content", []) if "toolUse" not in content and "toolResult" not in content + content + for content in message.get("content", []) + if "toolUse" not in content and "toolResult" not in content ] if not filtered_content: @@ -697,11 +702,6 @@ def register_hooks(self, registry: HookRegistry, **kwargs) -> None: @override def initialize(self, agent: "Agent", **kwargs: Any) -> None: - if self._auto_converter_enabled: - selected = AutoConverseConverter.select_write_converter_for_model(agent.model) - AutoConverseConverter.set_write_converter(selected) - logger.info("Auto converter selected %s for model %s", selected.__name__, agent.model.__class__.__name__) - if self.has_existing_agent: logger.warning( "An Agent already exists in session %s. We currently support one agent per session.", self.session_id diff --git a/tests/bedrock_agentcore/memory/integrations/strands/test_agentcore_memory_session_manager_openai_converter.py b/tests/bedrock_agentcore/memory/integrations/strands/test_agentcore_memory_session_manager_openai_converter.py index f7a8abd5..57a23d6f 100644 --- a/tests/bedrock_agentcore/memory/integrations/strands/test_agentcore_memory_session_manager_openai_converter.py +++ b/tests/bedrock_agentcore/memory/integrations/strands/test_agentcore_memory_session_manager_openai_converter.py @@ -1,12 +1,10 @@ -"""Session manager tests with OpenAI/auto converter.""" - -import json +"""Session manager tests with OpenAI converter.""" from unittest.mock import Mock, patch from strands.types.session import Session, SessionMessage, SessionType from bedrock_agentcore.memory.integrations.strands.config import AgentCoreMemoryConfig -from bedrock_agentcore.memory.integrations.strands.converters import AutoConverseConverter, OpenAIConverseConverter +from bedrock_agentcore.memory.integrations.strands.converters import OpenAIConverseConverter from bedrock_agentcore.memory.integrations.strands.session_manager import AgentCoreMemorySessionManager @@ -125,93 +123,3 @@ def test_list_messages_filters_restored_tool_context(): {"role": "assistant", "content": [{"text": "calling tool"}]}, {"role": "assistant", "content": [{"text": "done"}]}, ] - - -def test_auto_converter_mode_selects_openai_for_openai_model(): - config = AgentCoreMemoryConfig( - memory_id="test-memory-123", - session_id="test-session-456", - actor_id="test-actor-789", - ) - - mock_memory_client = Mock() - mock_memory_client.list_events.return_value = [] - mock_memory_client.gmcp_client = Mock() - mock_memory_client.gmdp_client = Mock() - - with ( - patch( - "bedrock_agentcore.memory.integrations.strands.session_manager.MemoryClient", - return_value=mock_memory_client, - ), - patch("boto3.Session") as mock_boto_session, - patch("strands.session.repository_session_manager.RepositorySessionManager.__init__", return_value=None), - patch("strands.session.repository_session_manager.RepositorySessionManager.initialize", return_value=None), - ): - mock_session = Mock() - mock_session.region_name = "us-west-2" - mock_session.client.return_value = Mock() - mock_boto_session.return_value = mock_session - - manager = AgentCoreMemorySessionManager(config, converter="auto") - assert manager.converter is AutoConverseConverter - - mock_agent = Mock() - mock_agent.model = Mock() - mock_agent.model.__class__.__name__ = "OpenAIModel" - mock_agent.model.__class__.__module__ = "strands.models.openai" - - manager.initialize(mock_agent) - assert AutoConverseConverter._write_converter is OpenAIConverseConverter - - -def test_list_messages_with_auto_converter_restores_mixed_provider_history(): - config = AgentCoreMemoryConfig( - memory_id="test-memory-123", - session_id="test-session-456", - actor_id="test-actor-789", - ) - - openai_payload = json.dumps({"role": "assistant", "content": "hello from openai"}) - anthropic_payload = json.dumps( - {"role": "assistant", "content": [{"type": "thinking", "thinking": "trace", "signature": "sig"}]} - ) - gemini_payload = json.dumps({"role": "model", "parts": [{"text": "hello from gemini"}]}) - - mock_memory_client = Mock() - mock_memory_client.list_events.return_value = [ - {"payload": [{"conversational": {"content": {"text": openai_payload}}}]}, - {"payload": [{"conversational": {"content": {"text": anthropic_payload}}}]}, - {"payload": [{"conversational": {"content": {"text": gemini_payload}}}]}, - ] - mock_memory_client.gmcp_client = Mock() - mock_memory_client.gmdp_client = Mock() - - with ( - patch( - "bedrock_agentcore.memory.integrations.strands.session_manager.MemoryClient", - return_value=mock_memory_client, - ), - patch("boto3.Session") as mock_boto_session, - patch("strands.session.repository_session_manager.RepositorySessionManager.__init__", return_value=None), - ): - mock_session = Mock() - mock_session.region_name = "us-west-2" - mock_session.client.return_value = Mock() - mock_boto_session.return_value = mock_session - - manager = AgentCoreMemorySessionManager(config, converter="auto") - manager.session_id = config.session_id - manager.session = Session(session_id=config.session_id, session_type=SessionType.AGENT) - - messages = manager.list_messages(config.session_id, "agent-1") - assert len(messages) == 3 - assert any( - block.get("text") == "hello from openai" for msg in messages for block in msg.message.get("content", []) - ) - assert any( - "reasoningContent" in block for msg in messages for block in msg.message.get("content", []) - ) - assert any( - block.get("text") == "hello from gemini" for msg in messages for block in msg.message.get("content", []) - ) diff --git a/tests/bedrock_agentcore/memory/integrations/strands/test_anthropic_converter.py b/tests/bedrock_agentcore/memory/integrations/strands/test_anthropic_converter.py deleted file mode 100644 index 32632318..00000000 --- a/tests/bedrock_agentcore/memory/integrations/strands/test_anthropic_converter.py +++ /dev/null @@ -1,37 +0,0 @@ -"""Tests for AnthropicConverseConverter.""" - -import json - -from strands.types.session import SessionMessage - -from bedrock_agentcore.memory.integrations.strands.converters import AnthropicConverseConverter - - -def test_anthropic_converter_round_trips_tool_and_reasoning_blocks(): - msg = SessionMessage( - message={ - "role": "assistant", - "content": [ - {"text": "thinking and calling tool"}, - {"reasoningContent": {"reasoningText": {"text": "private chain", "signature": "sig"}}}, - {"toolUse": {"toolUseId": "call_1", "name": "search", "input": {"q": "x"}}}, - ], - }, - message_id=1, - ) - - payload_json, role = AnthropicConverseConverter.message_to_payload(msg)[0] - assert role == "assistant" - - payload = json.loads(payload_json) - assert payload["role"] == "assistant" - assert any(block.get("type") == "thinking" for block in payload["content"]) - assert any(block.get("type") == "tool_use" for block in payload["content"]) - - events = [{"payload": [{"conversational": {"content": {"text": payload_json}}}]}] - restored = AnthropicConverseConverter.events_to_messages(events) - - assert len(restored) == 1 - restored_content = restored[0].message["content"] - assert any("reasoningContent" in block for block in restored_content) - assert any("toolUse" in block for block in restored_content) diff --git a/tests/bedrock_agentcore/memory/integrations/strands/test_auto_converter.py b/tests/bedrock_agentcore/memory/integrations/strands/test_auto_converter.py deleted file mode 100644 index e9d5d8e4..00000000 --- a/tests/bedrock_agentcore/memory/integrations/strands/test_auto_converter.py +++ /dev/null @@ -1,75 +0,0 @@ -"""Tests for AutoConverseConverter.""" - -import json - -from strands.types.session import SessionMessage - -from bedrock_agentcore.memory.integrations.strands.converters import ( - AnthropicConverseConverter, - AutoConverseConverter, - GeminiConverseConverter, - OpenAIConverseConverter, -) - - -class _OpenAIModel: - __module__ = "strands.models.openai" - - -class _AnthropicModel: - __module__ = "strands.models.anthropic" - - -class _GeminiModel: - __module__ = "strands.models.gemini" - - -def test_auto_converter_selects_provider_specific_writer(): - assert AutoConverseConverter.select_write_converter_for_model(_OpenAIModel()) is OpenAIConverseConverter - assert AutoConverseConverter.select_write_converter_for_model(_AnthropicModel()) is AnthropicConverseConverter - assert AutoConverseConverter.select_write_converter_for_model(_GeminiModel()) is GeminiConverseConverter - - -def test_auto_converter_restores_mixed_event_payloads(): - openai_payload = json.dumps({"role": "tool", "tool_call_id": "c1", "content": "ok"}) - anthropic_payload = json.dumps( - {"role": "assistant", "content": [{"type": "thinking", "thinking": "trace", "signature": "sig"}]} - ) - gemini_payload = json.dumps({"role": "model", "parts": [{"thought": {"text": "thought", "signature": "sig2"}}]}) - - events = [ - {"payload": [{"conversational": {"content": {"text": openai_payload}}}]}, - {"payload": [{"conversational": {"content": {"text": anthropic_payload}}}]}, - {"payload": [{"conversational": {"content": {"text": gemini_payload}}}]}, - ] - - messages = AutoConverseConverter.events_to_messages(events) - assert len(messages) == 3 - for message in messages: - assert message.message["role"] in {"user", "assistant"} - - all_content = [block for message in messages for block in message.message["content"]] - assert any("toolResult" in block for block in all_content) - assert sum(1 for block in all_content if "reasoningContent" in block) >= 2 - - -def test_auto_converter_uses_selected_writer_for_payload_shape(): - msg = SessionMessage(message={"role": "assistant", "content": [{"text": "hi"}]}, message_id=1) - AutoConverseConverter.set_write_converter(OpenAIConverseConverter) - payload_json, _ = AutoConverseConverter.message_to_payload(msg)[0] - parsed = json.loads(payload_json) - assert "role" in parsed and "content" in parsed - - -def test_auto_converter_skips_malformed_json_and_missing_required_fields(): - events = [ - {"payload": [{"conversational": {"content": {"text": "{bad-json"}}}]}, - {"payload": [{"conversational": {"content": {"text": json.dumps({"role": "assistant"})}}}]}, - {"payload": [{"conversational": {"content": {"text": json.dumps({"role": "assistant", "content": "ok"})}}}]}, - ] - - messages = AutoConverseConverter.events_to_messages(events) - - assert len(messages) == 1 - assert messages[0].message["role"] == "assistant" - assert messages[0].message["content"] == [{"text": "ok"}] diff --git a/tests/bedrock_agentcore/memory/integrations/strands/test_bedrock_converter.py b/tests/bedrock_agentcore/memory/integrations/strands/test_bedrock_converter.py index c44175f4..a1f66bd8 100644 --- a/tests/bedrock_agentcore/memory/integrations/strands/test_bedrock_converter.py +++ b/tests/bedrock_agentcore/memory/integrations/strands/test_bedrock_converter.py @@ -107,7 +107,7 @@ def test_exceeds_conversational_limit_false(self): def test_exceeds_conversational_limit_true(self): """Test message over conversational limit.""" - long_text = "x" * 5000 + long_text = "x" * 60000 message = (long_text, long_text) result = AgentCoreMemoryConverter.exceeds_conversational_limit(message) assert result is True diff --git a/tests/bedrock_agentcore/memory/integrations/strands/test_gemini_converter.py b/tests/bedrock_agentcore/memory/integrations/strands/test_gemini_converter.py deleted file mode 100644 index aeae53f2..00000000 --- a/tests/bedrock_agentcore/memory/integrations/strands/test_gemini_converter.py +++ /dev/null @@ -1,48 +0,0 @@ -"""Tests for GeminiConverseConverter.""" - -import json - -from strands.types.session import SessionMessage - -from bedrock_agentcore.memory.integrations.strands.converters import GeminiConverseConverter - - -def test_gemini_converter_round_trips_tool_result_and_reasoning(): - msg = SessionMessage( - message={ - "role": "user", - "content": [ - {"text": "tool output follows"}, - {"toolResult": {"toolUseId": "call_2", "status": "success", "content": [{"text": "42"}]}}, - {"reasoningContent": {"reasoningText": {"text": "thought", "signature": "sig"}}}, - ], - }, - message_id=2, - ) - - payload_json, role = GeminiConverseConverter.message_to_payload(msg)[0] - assert role == "user" - - payload = json.loads(payload_json) - assert payload["role"] == "user" - assert any("functionResponse" in part for part in payload["parts"]) - assert any("thought" in part for part in payload["parts"]) - - events = [{"payload": [{"conversational": {"content": {"text": payload_json}}}]}] - restored = GeminiConverseConverter.events_to_messages(events) - - assert len(restored) == 1 - restored_content = restored[0].message["content"] - assert any("toolResult" in block for block in restored_content) - assert any("reasoningContent" in block for block in restored_content) - - -def test_gemini_converter_ignores_none_parts_on_restore(): - payload = json.dumps({"role": "model", "parts": [None, {"text": "kept"}]}) - events = [{"payload": [{"conversational": {"content": {"text": payload}}}]}] - - restored = GeminiConverseConverter.events_to_messages(events) - - assert len(restored) == 1 - assert restored[0].message["role"] == "assistant" - assert restored[0].message["content"] == [{"text": "kept"}]