diff --git a/README.md b/README.md
index dd03c101..47b0615a 100644
--- a/README.md
+++ b/README.md
@@ -277,6 +277,17 @@ export AZURE_OPENAI_API_KEY="your_api_key_here"
export AZURE_OPENAI_API_BASE="your_api_base_here"
```
+If you want to use [MiniMax](https://www.minimax.io/) models, export your MiniMax API key:
+```bash
+export MINIMAX_API_KEY="your_api_key_here"
+```
+Then set `llm_type` to `minimax` (or a specific model name like `MiniMax-M2.7`) in your task config file:
+```yaml
+llm_type: minimax
+model: MiniMax-M2.7
+```
+Available MiniMax models: `MiniMax-M2.7`, `MiniMax-M2.5`, `MiniMax-M2.5-highspeed` (204K context).
+
## Simulation
### Framework Required Modules
diff --git a/agentverse/llms/__init__.py b/agentverse/llms/__init__.py
index f5bcda3f..406cc637 100644
--- a/agentverse/llms/__init__.py
+++ b/agentverse/llms/__init__.py
@@ -39,3 +39,4 @@
from .base import BaseLLM, BaseChatModel, BaseCompletionModel, LLMResult
from .openai import OpenAIChat
+from .minimax import MiniMaxChat
diff --git a/agentverse/llms/minimax.py b/agentverse/llms/minimax.py
new file mode 100644
index 00000000..5f91edf0
--- /dev/null
+++ b/agentverse/llms/minimax.py
@@ -0,0 +1,310 @@
+import ast
+import json
+import logging
+import os
+import re
+from typing import Dict, List, Optional, Union
+
+from pydantic import Field
+
+from agentverse.llms.base import LLMResult
+from agentverse.logging import logger
+from agentverse.message import Message
+
+from . import llm_registry
+from .base import BaseChatModel, BaseModelArgs
+from .utils.jsonrepair import JsonRepair
+
+try:
+ from openai import OpenAI, AsyncOpenAI
+ from openai import OpenAIError
+except ImportError:
+ is_openai_available = False
+ logger.warn(
+ "openai package is not installed. Please install it via `pip install openai`"
+ )
+
+MINIMAX_API_KEY = os.environ.get("MINIMAX_API_KEY")
+MINIMAX_BASE_URL = os.environ.get(
+ "MINIMAX_BASE_URL", "https://api.minimax.io/v1"
+)
+
+# MiniMax model token limits
+MINIMAX_TOKEN_LIMITS = {
+ "MiniMax-M2.7": 1000000,
+ "MiniMax-M2.5": 1000000,
+ "MiniMax-M2.5-highspeed": 204800,
+}
+
+# MiniMax model pricing (per 1K tokens, USD)
+MINIMAX_INPUT_COST = {
+ "MiniMax-M2.7": 0.0008,
+ "MiniMax-M2.5": 0.0005,
+ "MiniMax-M2.5-highspeed": 0.0003,
+}
+
+MINIMAX_OUTPUT_COST = {
+ "MiniMax-M2.7": 0.0032,
+ "MiniMax-M2.5": 0.002,
+ "MiniMax-M2.5-highspeed": 0.0012,
+}
+
+
+def _strip_think_tags(content: str) -> str:
+ """Strip ... tags from MiniMax M2.5+ responses."""
+ if content and "" in content:
+ return re.sub(r".*?\s*", "", content, flags=re.DOTALL).strip()
+ return content
+
+
+class MiniMaxChatArgs(BaseModelArgs):
+ model: str = Field(default="MiniMax-M2.7")
+ max_tokens: int = Field(default=2048)
+ temperature: float = Field(default=0.7)
+ top_p: float = Field(default=1.0)
+ n: int = Field(default=1)
+ stop: Optional[Union[str, List]] = Field(default=None)
+
+
+@llm_registry.register("minimax")
+@llm_registry.register("MiniMax-M2.7")
+@llm_registry.register("MiniMax-M2.5")
+@llm_registry.register("MiniMax-M2.5-highspeed")
+class MiniMaxChat(BaseChatModel):
+ args: MiniMaxChatArgs = Field(default_factory=MiniMaxChatArgs)
+ client_args: Optional[Dict] = Field(
+ default={"api_key": MINIMAX_API_KEY, "base_url": MINIMAX_BASE_URL}
+ )
+
+ total_prompt_tokens: int = 0
+ total_completion_tokens: int = 0
+
+ def __init__(self, max_retry: int = 3, **kwargs):
+ args = MiniMaxChatArgs()
+ args = args.dict()
+ client_args = {
+ "api_key": MINIMAX_API_KEY,
+ "base_url": MINIMAX_BASE_URL,
+ }
+ for k, v in args.items():
+ args[k] = kwargs.pop(k, v)
+ if len(kwargs) > 0:
+ logger.warn(f"Unused arguments: {kwargs}")
+ # Clamp temperature to MiniMax range [0.0, 1.0]
+ args["temperature"] = max(0.0, min(1.0, args["temperature"]))
+ super().__init__(args=args, max_retry=max_retry, client_args=client_args)
+
+ @classmethod
+ def send_token_limit(cls, model: str) -> int:
+ return MINIMAX_TOKEN_LIMITS.get(model, 204800)
+
+ def generate_response(
+ self,
+ prepend_prompt: str = "",
+ history: List[dict] = [],
+ append_prompt: str = "",
+ functions: List[dict] = [],
+ ) -> LLMResult:
+ messages = self.construct_messages(prepend_prompt, history, append_prompt)
+ logger.log_prompt(messages)
+
+ minimax_client = OpenAI(
+ api_key=self.client_args["api_key"],
+ base_url=self.client_args["base_url"],
+ )
+ try:
+ if functions:
+ response = minimax_client.chat.completions.create(
+ messages=messages,
+ functions=functions,
+ **self.args.dict(),
+ )
+ logger.log_prompt(
+ [
+ {
+ "role": "assistant",
+ "content": response.choices[0].message.content,
+ }
+ ]
+ )
+ if response.choices[0].message.function_call is not None:
+ self.collect_metrics(response)
+ return LLMResult(
+ content=_strip_think_tags(
+ response.choices[0].message.content or ""
+ ),
+ function_name=response.choices[0].message.function_call.name,
+ function_arguments=ast.literal_eval(
+ response.choices[0].message.function_call.arguments
+ ),
+ send_tokens=response.usage.prompt_tokens,
+ recv_tokens=response.usage.completion_tokens,
+ total_tokens=response.usage.total_tokens,
+ )
+ else:
+ self.collect_metrics(response)
+ return LLMResult(
+ content=_strip_think_tags(
+ response.choices[0].message.content or ""
+ ),
+ send_tokens=response.usage.prompt_tokens,
+ recv_tokens=response.usage.completion_tokens,
+ total_tokens=response.usage.total_tokens,
+ )
+ else:
+ response = minimax_client.chat.completions.create(
+ messages=messages,
+ **self.args.dict(),
+ )
+ logger.log_prompt(
+ [
+ {
+ "role": "assistant",
+ "content": response.choices[0].message.content,
+ }
+ ]
+ )
+ self.collect_metrics(response)
+ return LLMResult(
+ content=_strip_think_tags(
+ response.choices[0].message.content or ""
+ ),
+ send_tokens=response.usage.prompt_tokens,
+ recv_tokens=response.usage.completion_tokens,
+ total_tokens=response.usage.total_tokens,
+ )
+ except (OpenAIError, KeyboardInterrupt, json.decoder.JSONDecodeError) as error:
+ raise
+
+ async def agenerate_response(
+ self,
+ prepend_prompt: str = "",
+ history: List[dict] = [],
+ append_prompt: str = "",
+ functions: List[dict] = [],
+ ) -> LLMResult:
+ messages = self.construct_messages(prepend_prompt, history, append_prompt)
+ logger.log_prompt(messages)
+
+ async_minimax_client = AsyncOpenAI(
+ api_key=self.client_args["api_key"],
+ base_url=self.client_args["base_url"],
+ )
+ try:
+ if functions:
+ response = await async_minimax_client.chat.completions.create(
+ messages=messages,
+ functions=functions,
+ **self.args.dict(),
+ )
+ logger.log_prompt(
+ [
+ {
+ "role": "assistant",
+ "content": response.choices[0].message.content,
+ }
+ ]
+ )
+ if response.choices[0].message.function_call is not None:
+ function_name = response.choices[0].message.function_call.name
+ valid_function = False
+ if function_name.startswith("function."):
+ function_name = function_name.replace("function.", "")
+ elif function_name.startswith("functions."):
+ function_name = function_name.replace("functions.", "")
+ for function in functions:
+ if function["name"] == function_name:
+ valid_function = True
+ break
+ if not valid_function:
+ logger.warn(
+ f"The returned function name {function_name} is not in the list of valid functions. Retrying..."
+ )
+ raise ValueError(
+ f"The returned function name {function_name} is not in the list of valid functions."
+ )
+ try:
+ arguments = ast.literal_eval(
+ response.choices[0].message.function_call.arguments
+ )
+ except Exception:
+ try:
+ arguments = ast.literal_eval(
+ JsonRepair(
+ response.choices[0].message.function_call.arguments
+ ).repair()
+ )
+ except Exception:
+ logger.warn(
+ "The returned argument in function call is not valid json. Retrying..."
+ )
+ raise ValueError(
+ "The returned argument in function call is not valid json."
+ )
+ self.collect_metrics(response)
+ return LLMResult(
+ function_name=function_name,
+ function_arguments=arguments,
+ send_tokens=response.usage.prompt_tokens,
+ recv_tokens=response.usage.completion_tokens,
+ total_tokens=response.usage.total_tokens,
+ )
+ else:
+ self.collect_metrics(response)
+ return LLMResult(
+ content=_strip_think_tags(
+ response.choices[0].message.content or ""
+ ),
+ send_tokens=response.usage.prompt_tokens,
+ recv_tokens=response.usage.completion_tokens,
+ total_tokens=response.usage.total_tokens,
+ )
+ else:
+ response = await async_minimax_client.chat.completions.create(
+ messages=messages,
+ **self.args.dict(),
+ )
+ self.collect_metrics(response)
+ logger.log_prompt(
+ [
+ {
+ "role": "assistant",
+ "content": response.choices[0].message.content,
+ }
+ ]
+ )
+ return LLMResult(
+ content=_strip_think_tags(
+ response.choices[0].message.content or ""
+ ),
+ send_tokens=response.usage.prompt_tokens,
+ recv_tokens=response.usage.completion_tokens,
+ total_tokens=response.usage.total_tokens,
+ )
+ except (OpenAIError, KeyboardInterrupt, json.decoder.JSONDecodeError) as error:
+ raise
+
+ def construct_messages(
+ self, prepend_prompt: str, history: List[dict], append_prompt: str
+ ):
+ messages = []
+ if prepend_prompt != "":
+ messages.append({"role": "system", "content": prepend_prompt})
+ if len(history) > 0:
+ messages += history
+ if append_prompt != "":
+ messages.append({"role": "user", "content": append_prompt})
+ return messages
+
+ def collect_metrics(self, response):
+ self.total_prompt_tokens += response.usage.prompt_tokens
+ self.total_completion_tokens += response.usage.completion_tokens
+
+ def get_spend(self) -> float:
+ model = self.args.model
+ input_cost = MINIMAX_INPUT_COST.get(model, 0.0)
+ output_cost = MINIMAX_OUTPUT_COST.get(model, 0.0)
+ return (
+ self.total_prompt_tokens * input_cost / 1000.0
+ + self.total_completion_tokens * output_cost / 1000.0
+ )
diff --git a/tests/__init__.py b/tests/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/tests/test_minimax_integration.py b/tests/test_minimax_integration.py
new file mode 100644
index 00000000..1fde5d1d
--- /dev/null
+++ b/tests/test_minimax_integration.py
@@ -0,0 +1,65 @@
+"""Integration tests for MiniMax LLM provider.
+
+These tests require a valid MINIMAX_API_KEY environment variable.
+Skip with: pytest -m "not integration"
+"""
+import os
+
+import pytest
+
+# AgentVerse requires OPENAI_API_KEY for module import chain
+os.environ.setdefault("OPENAI_API_KEY", "sk-test-placeholder")
+
+pytestmark = pytest.mark.skipif(
+ not os.environ.get("MINIMAX_API_KEY"),
+ reason="MINIMAX_API_KEY not set",
+)
+
+
+@pytest.fixture
+def minimax_chat():
+ """Create a MiniMaxChat instance with real credentials."""
+ from agentverse.llms.minimax import MiniMaxChat
+
+ return MiniMaxChat(model="MiniMax-M2.5-highspeed", temperature=0.1, max_tokens=128)
+
+
+@pytest.mark.integration
+def test_basic_generation(minimax_chat):
+ """Test basic text generation with MiniMax API."""
+ result = minimax_chat.generate_response(
+ prepend_prompt="You are a helpful assistant. Reply in one short sentence.",
+ append_prompt="What is 2 + 2?",
+ )
+ assert result.content
+ assert "4" in result.content
+ assert result.send_tokens > 0
+ assert result.recv_tokens > 0
+
+
+@pytest.mark.integration
+def test_generation_with_history(minimax_chat):
+ """Test generation with conversation history."""
+ history = [
+ {"role": "user", "content": "My name is Alice."},
+ {"role": "assistant", "content": "Nice to meet you, Alice!"},
+ ]
+ result = minimax_chat.generate_response(
+ prepend_prompt="You are a helpful assistant.",
+ history=history,
+ append_prompt="What is my name?",
+ )
+ assert result.content
+ assert "Alice" in result.content
+
+
+@pytest.mark.integration
+@pytest.mark.asyncio
+async def test_async_generation(minimax_chat):
+ """Test async text generation with MiniMax API."""
+ result = await minimax_chat.agenerate_response(
+ prepend_prompt="You are a helpful assistant. Reply in one short sentence.",
+ append_prompt="What is the capital of France?",
+ )
+ assert result.content
+ assert "Paris" in result.content
diff --git a/tests/test_minimax_unit.py b/tests/test_minimax_unit.py
new file mode 100644
index 00000000..c5bdde1c
--- /dev/null
+++ b/tests/test_minimax_unit.py
@@ -0,0 +1,427 @@
+"""Unit tests for MiniMax LLM provider in AgentVerse."""
+import ast
+import json
+import os
+import re
+from unittest.mock import AsyncMock, MagicMock, patch
+
+import pytest
+
+# AgentVerse requires OPENAI_API_KEY for module import chain
+os.environ.setdefault("OPENAI_API_KEY", "sk-test-placeholder")
+
+from agentverse.llms.minimax import (
+ MINIMAX_INPUT_COST,
+ MINIMAX_OUTPUT_COST,
+ MINIMAX_TOKEN_LIMITS,
+ MiniMaxChat,
+ MiniMaxChatArgs,
+ _strip_think_tags,
+)
+from agentverse.llms.base import LLMResult
+
+
+class TestStripThinkTags:
+ """Tests for _strip_think_tags utility."""
+
+ def test_no_think_tags(self):
+ assert _strip_think_tags("Hello world") == "Hello world"
+
+ def test_empty_string(self):
+ assert _strip_think_tags("") == ""
+
+ def test_none_input(self):
+ assert _strip_think_tags(None) is None
+
+ def test_single_think_tag(self):
+ text = "internal reasoningFinal answer"
+ assert _strip_think_tags(text) == "Final answer"
+
+ def test_think_tag_with_newlines(self):
+ text = "\nstep 1\nstep 2\n\nHere is the result"
+ result = _strip_think_tags(text)
+ assert "think" not in result
+ assert "Here is the result" in result
+
+ def test_multiple_think_tags(self):
+ text = "firstmiddlesecondend"
+ result = _strip_think_tags(text)
+ assert result == "middleend"
+
+
+class TestMiniMaxChatArgs:
+ """Tests for MiniMaxChatArgs defaults."""
+
+ def test_default_model(self):
+ args = MiniMaxChatArgs()
+ assert args.model == "MiniMax-M2.7"
+
+ def test_default_temperature(self):
+ args = MiniMaxChatArgs()
+ assert args.temperature == 0.7
+
+ def test_default_max_tokens(self):
+ args = MiniMaxChatArgs()
+ assert args.max_tokens == 2048
+
+ def test_custom_model(self):
+ args = MiniMaxChatArgs(model="MiniMax-M2.5")
+ assert args.model == "MiniMax-M2.5"
+
+ def test_custom_temperature(self):
+ args = MiniMaxChatArgs(temperature=0.5)
+ assert args.temperature == 0.5
+
+
+class TestMiniMaxChatInit:
+ """Tests for MiniMaxChat initialization."""
+
+ @patch.dict(os.environ, {"MINIMAX_API_KEY": "test-key"}, clear=False)
+ def test_default_init(self):
+ with patch("agentverse.llms.minimax.MINIMAX_API_KEY", "test-key"):
+ chat = MiniMaxChat()
+ assert chat.args.model == "MiniMax-M2.7"
+ assert chat.args.temperature == 0.7
+ assert chat.total_prompt_tokens == 0
+ assert chat.total_completion_tokens == 0
+
+ @patch.dict(os.environ, {"MINIMAX_API_KEY": "test-key"}, clear=False)
+ def test_custom_model_init(self):
+ with patch("agentverse.llms.minimax.MINIMAX_API_KEY", "test-key"):
+ chat = MiniMaxChat(model="MiniMax-M2.5-highspeed")
+ assert chat.args.model == "MiniMax-M2.5-highspeed"
+
+ @patch.dict(os.environ, {"MINIMAX_API_KEY": "test-key"}, clear=False)
+ def test_temperature_clamping_high(self):
+ with patch("agentverse.llms.minimax.MINIMAX_API_KEY", "test-key"):
+ chat = MiniMaxChat(temperature=2.0)
+ assert chat.args.temperature == 1.0
+
+ @patch.dict(os.environ, {"MINIMAX_API_KEY": "test-key"}, clear=False)
+ def test_temperature_clamping_low(self):
+ with patch("agentverse.llms.minimax.MINIMAX_API_KEY", "test-key"):
+ chat = MiniMaxChat(temperature=-0.5)
+ assert chat.args.temperature == 0.0
+
+ @patch.dict(os.environ, {"MINIMAX_API_KEY": "test-key"}, clear=False)
+ def test_temperature_zero_accepted(self):
+ with patch("agentverse.llms.minimax.MINIMAX_API_KEY", "test-key"):
+ chat = MiniMaxChat(temperature=0.0)
+ assert chat.args.temperature == 0.0
+
+
+class TestMiniMaxChatTokenLimits:
+ """Tests for send_token_limit."""
+
+ def test_m27_limit(self):
+ assert MiniMaxChat.send_token_limit("MiniMax-M2.7") == 1000000
+
+ def test_m25_limit(self):
+ assert MiniMaxChat.send_token_limit("MiniMax-M2.5") == 1000000
+
+ def test_m25_highspeed_limit(self):
+ assert MiniMaxChat.send_token_limit("MiniMax-M2.5-highspeed") == 204800
+
+ def test_unknown_model_default(self):
+ assert MiniMaxChat.send_token_limit("unknown-model") == 204800
+
+
+class TestMiniMaxChatMessages:
+ """Tests for construct_messages."""
+
+ @patch.dict(os.environ, {"MINIMAX_API_KEY": "test-key"}, clear=False)
+ def test_system_and_user_messages(self):
+ with patch("agentverse.llms.minimax.MINIMAX_API_KEY", "test-key"):
+ chat = MiniMaxChat()
+ messages = chat.construct_messages(
+ prepend_prompt="You are a helper.",
+ history=[],
+ append_prompt="Hello!",
+ )
+ assert len(messages) == 2
+ assert messages[0]["role"] == "system"
+ assert messages[0]["content"] == "You are a helper."
+ assert messages[1]["role"] == "user"
+ assert messages[1]["content"] == "Hello!"
+
+ @patch.dict(os.environ, {"MINIMAX_API_KEY": "test-key"}, clear=False)
+ def test_with_history(self):
+ with patch("agentverse.llms.minimax.MINIMAX_API_KEY", "test-key"):
+ chat = MiniMaxChat()
+ history = [
+ {"role": "user", "content": "Hi"},
+ {"role": "assistant", "content": "Hello!"},
+ ]
+ messages = chat.construct_messages(
+ prepend_prompt="System",
+ history=history,
+ append_prompt="How are you?",
+ )
+ assert len(messages) == 4
+
+ @patch.dict(os.environ, {"MINIMAX_API_KEY": "test-key"}, clear=False)
+ def test_empty_prompts(self):
+ with patch("agentverse.llms.minimax.MINIMAX_API_KEY", "test-key"):
+ chat = MiniMaxChat()
+ messages = chat.construct_messages(
+ prepend_prompt="",
+ history=[],
+ append_prompt="",
+ )
+ assert len(messages) == 0
+
+
+class TestMiniMaxChatSpend:
+ """Tests for cost tracking."""
+
+ @patch.dict(os.environ, {"MINIMAX_API_KEY": "test-key"}, clear=False)
+ def test_zero_spend(self):
+ with patch("agentverse.llms.minimax.MINIMAX_API_KEY", "test-key"):
+ chat = MiniMaxChat()
+ assert chat.get_spend() == 0.0
+
+ @patch.dict(os.environ, {"MINIMAX_API_KEY": "test-key"}, clear=False)
+ def test_spend_calculation(self):
+ with patch("agentverse.llms.minimax.MINIMAX_API_KEY", "test-key"):
+ chat = MiniMaxChat(model="MiniMax-M2.7")
+ chat.total_prompt_tokens = 1000
+ chat.total_completion_tokens = 500
+ expected = (
+ 1000 * MINIMAX_INPUT_COST["MiniMax-M2.7"] / 1000.0
+ + 500 * MINIMAX_OUTPUT_COST["MiniMax-M2.7"] / 1000.0
+ )
+ assert chat.get_spend() == expected
+
+
+class TestMiniMaxChatGenerate:
+ """Tests for generate_response with mocked OpenAI client."""
+
+ def _mock_response(self, content="Hello!", prompt_tokens=10, completion_tokens=5):
+ response = MagicMock()
+ response.choices = [MagicMock()]
+ response.choices[0].message.content = content
+ response.choices[0].message.function_call = None
+ response.usage.prompt_tokens = prompt_tokens
+ response.usage.completion_tokens = completion_tokens
+ response.usage.total_tokens = prompt_tokens + completion_tokens
+ return response
+
+ @patch("agentverse.llms.minimax.OpenAI")
+ @patch("agentverse.llms.minimax.MINIMAX_API_KEY", "test-key")
+ def test_generate_basic(self, mock_openai_cls):
+ mock_client = MagicMock()
+ mock_openai_cls.return_value = mock_client
+ mock_client.chat.completions.create.return_value = self._mock_response(
+ "Test response"
+ )
+
+ chat = MiniMaxChat()
+ result = chat.generate_response(
+ prepend_prompt="System",
+ append_prompt="Hello",
+ )
+
+ assert isinstance(result, LLMResult)
+ assert result.content == "Test response"
+ assert result.send_tokens == 10
+ assert result.recv_tokens == 5
+
+ @patch("agentverse.llms.minimax.OpenAI")
+ @patch("agentverse.llms.minimax.MINIMAX_API_KEY", "test-key")
+ def test_generate_strips_think_tags(self, mock_openai_cls):
+ mock_client = MagicMock()
+ mock_openai_cls.return_value = mock_client
+ mock_client.chat.completions.create.return_value = self._mock_response(
+ "reasoning hereClean output"
+ )
+
+ chat = MiniMaxChat()
+ result = chat.generate_response(append_prompt="Test")
+
+ assert result.content == "Clean output"
+
+ @patch("agentverse.llms.minimax.OpenAI")
+ @patch("agentverse.llms.minimax.MINIMAX_API_KEY", "test-key")
+ def test_generate_with_functions(self, mock_openai_cls):
+ mock_client = MagicMock()
+ mock_openai_cls.return_value = mock_client
+ response = self._mock_response()
+ response.choices[0].message.function_call = MagicMock()
+ response.choices[0].message.function_call.name = "get_weather"
+ response.choices[0].message.function_call.arguments = '{"city": "Beijing"}'
+ mock_client.chat.completions.create.return_value = response
+
+ chat = MiniMaxChat()
+ functions = [{"name": "get_weather", "parameters": {}}]
+ result = chat.generate_response(
+ append_prompt="Weather?", functions=functions
+ )
+
+ assert result.function_name == "get_weather"
+ assert result.function_arguments == {"city": "Beijing"}
+
+ @patch("agentverse.llms.minimax.OpenAI")
+ @patch("agentverse.llms.minimax.MINIMAX_API_KEY", "test-key")
+ def test_generate_tracks_metrics(self, mock_openai_cls):
+ mock_client = MagicMock()
+ mock_openai_cls.return_value = mock_client
+ mock_client.chat.completions.create.return_value = self._mock_response(
+ prompt_tokens=100, completion_tokens=50
+ )
+
+ chat = MiniMaxChat()
+ chat.generate_response(append_prompt="Test")
+
+ assert chat.total_prompt_tokens == 100
+ assert chat.total_completion_tokens == 50
+
+
+class TestMiniMaxChatAsyncGenerate:
+ """Tests for agenerate_response with mocked AsyncOpenAI client."""
+
+ def _mock_response(self, content="Hello!", prompt_tokens=10, completion_tokens=5):
+ response = MagicMock()
+ response.choices = [MagicMock()]
+ response.choices[0].message.content = content
+ response.choices[0].message.function_call = None
+ response.usage.prompt_tokens = prompt_tokens
+ response.usage.completion_tokens = completion_tokens
+ response.usage.total_tokens = prompt_tokens + completion_tokens
+ return response
+
+ @patch("agentverse.llms.minimax.AsyncOpenAI")
+ @patch("agentverse.llms.minimax.MINIMAX_API_KEY", "test-key")
+ @pytest.mark.asyncio
+ async def test_agenerate_basic(self, mock_async_cls):
+ mock_client = MagicMock()
+ mock_async_cls.return_value = mock_client
+ mock_client.chat.completions.create = AsyncMock(
+ return_value=self._mock_response("Async response")
+ )
+
+ chat = MiniMaxChat()
+ result = await chat.agenerate_response(append_prompt="Hello")
+
+ assert isinstance(result, LLMResult)
+ assert result.content == "Async response"
+
+ @patch("agentverse.llms.minimax.AsyncOpenAI")
+ @patch("agentverse.llms.minimax.MINIMAX_API_KEY", "test-key")
+ @pytest.mark.asyncio
+ async def test_agenerate_strips_think_tags(self, mock_async_cls):
+ mock_client = MagicMock()
+ mock_async_cls.return_value = mock_client
+ mock_client.chat.completions.create = AsyncMock(
+ return_value=self._mock_response(
+ "thinking...The answer is 42"
+ )
+ )
+
+ chat = MiniMaxChat()
+ result = await chat.agenerate_response(append_prompt="What?")
+
+ assert result.content == "The answer is 42"
+
+ @patch("agentverse.llms.minimax.AsyncOpenAI")
+ @patch("agentverse.llms.minimax.MINIMAX_API_KEY", "test-key")
+ @pytest.mark.asyncio
+ async def test_agenerate_with_valid_function(self, mock_async_cls):
+ mock_client = MagicMock()
+ mock_async_cls.return_value = mock_client
+ response = self._mock_response()
+ response.choices[0].message.function_call = MagicMock()
+ response.choices[0].message.function_call.name = "search"
+ response.choices[0].message.function_call.arguments = '{"query": "test"}'
+ mock_client.chat.completions.create = AsyncMock(return_value=response)
+
+ chat = MiniMaxChat()
+ functions = [{"name": "search", "parameters": {}}]
+ result = await chat.agenerate_response(
+ append_prompt="Search", functions=functions
+ )
+
+ assert result.function_name == "search"
+ assert result.function_arguments == {"query": "test"}
+
+ @patch("agentverse.llms.minimax.AsyncOpenAI")
+ @patch("agentverse.llms.minimax.MINIMAX_API_KEY", "test-key")
+ @pytest.mark.asyncio
+ async def test_agenerate_invalid_function_raises(self, mock_async_cls):
+ mock_client = MagicMock()
+ mock_async_cls.return_value = mock_client
+ response = self._mock_response()
+ response.choices[0].message.function_call = MagicMock()
+ response.choices[0].message.function_call.name = "invalid_func"
+ response.choices[0].message.function_call.arguments = "{}"
+ mock_client.chat.completions.create = AsyncMock(return_value=response)
+
+ chat = MiniMaxChat()
+ functions = [{"name": "search", "parameters": {}}]
+ with pytest.raises(ValueError, match="not in the list of valid functions"):
+ await chat.agenerate_response(
+ append_prompt="Search", functions=functions
+ )
+
+
+class TestMiniMaxRegistry:
+ """Tests for LLM registry integration."""
+
+ def test_minimax_registered(self):
+ from agentverse.llms import llm_registry
+
+ assert "minimax" in llm_registry.entries
+
+ def test_m27_registered(self):
+ from agentverse.llms import llm_registry
+
+ assert "MiniMax-M2.7" in llm_registry.entries
+
+ def test_m25_registered(self):
+ from agentverse.llms import llm_registry
+
+ assert "MiniMax-M2.5" in llm_registry.entries
+
+ def test_m25_highspeed_registered(self):
+ from agentverse.llms import llm_registry
+
+ assert "MiniMax-M2.5-highspeed" in llm_registry.entries
+
+ @patch("agentverse.llms.minimax.MINIMAX_API_KEY", "test-key")
+ def test_registry_build(self):
+ from agentverse.llms import llm_registry
+
+ chat = llm_registry.build("minimax")
+ assert isinstance(chat, MiniMaxChat)
+ assert chat.args.model == "MiniMax-M2.7"
+
+ @patch("agentverse.llms.minimax.MINIMAX_API_KEY", "test-key")
+ def test_registry_build_specific_model(self):
+ from agentverse.llms import llm_registry
+
+ chat = llm_registry.build("MiniMax-M2.5-highspeed")
+ assert isinstance(chat, MiniMaxChat)
+
+
+class TestMiniMaxConstants:
+ """Tests for module-level constants."""
+
+ def test_token_limits_all_models(self):
+ assert "MiniMax-M2.7" in MINIMAX_TOKEN_LIMITS
+ assert "MiniMax-M2.5" in MINIMAX_TOKEN_LIMITS
+ assert "MiniMax-M2.5-highspeed" in MINIMAX_TOKEN_LIMITS
+
+ def test_input_cost_all_models(self):
+ assert "MiniMax-M2.7" in MINIMAX_INPUT_COST
+ assert "MiniMax-M2.5" in MINIMAX_INPUT_COST
+ assert "MiniMax-M2.5-highspeed" in MINIMAX_INPUT_COST
+
+ def test_output_cost_all_models(self):
+ assert "MiniMax-M2.7" in MINIMAX_OUTPUT_COST
+ assert "MiniMax-M2.5" in MINIMAX_OUTPUT_COST
+ assert "MiniMax-M2.5-highspeed" in MINIMAX_OUTPUT_COST
+
+ def test_costs_are_positive(self):
+ for model, cost in MINIMAX_INPUT_COST.items():
+ assert cost > 0, f"Input cost for {model} should be positive"
+ for model, cost in MINIMAX_OUTPUT_COST.items():
+ assert cost > 0, f"Output cost for {model} should be positive"