Skip to content

Commit 2db5be0

Browse files
committed
hg
1 parent e2b5a43 commit 2db5be0

File tree

2 files changed

+17
-8
lines changed

2 files changed

+17
-8
lines changed

optillm/mars/agent.py

Lines changed: 16 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -27,18 +27,18 @@ def __init__(self, agent_id: int, client, model: str, config: Dict[str, Any]):
2727
self.temperature = self._assign_temperature()
2828

2929
def _assign_temperature(self) -> float:
30-
"""Assign temperature based on agent ID for diversity"""
31-
temperatures = [0.3, 0.5, 0.7, 0.9, 1.0]
30+
"""Assign temperature based on agent ID for 3-agent configuration"""
31+
temperatures = [0.3, 0.6, 1.0] # Low, Medium, High reasoning effort
3232
return temperatures[self.agent_id % len(temperatures)]
3333

3434
def _get_reasoning_effort(self) -> str:
3535
"""Get reasoning effort level based on agent temperature"""
3636
if self.temperature <= 0.4:
37-
return "low" # 20% reasoning budget
38-
elif self.temperature <= 0.7:
39-
return "medium" # 50% reasoning budget
37+
return "low" # 8k thinking tokens
38+
elif self.temperature <= 0.8:
39+
return "medium" # 16k thinking tokens
4040
else:
41-
return "high" # 80% reasoning budget
41+
return "high" # 32k thinking tokens
4242

4343
def generate_solution(self, problem: str, request_id: str = None) -> Tuple[AgentSolution, int]:
4444
"""Generate a solution for the given problem using reasoning API"""
@@ -52,10 +52,19 @@ def generate_solution(self, problem: str, request_id: str = None) -> Tuple[Agent
5252
)
5353

5454
# Configure reasoning parameters for OpenRouter
55+
reasoning_effort = self._get_reasoning_effort()
5556
reasoning_config = {
56-
"effort": self._get_reasoning_effort()
57+
"effort": reasoning_effort
5758
}
5859

60+
# Add specific token budgets for 3-agent configuration
61+
if reasoning_effort == "low":
62+
reasoning_config["max_tokens"] = 8000 # Agent 0: 8k thinking tokens
63+
elif reasoning_effort == "medium":
64+
reasoning_config["max_tokens"] = 16000 # Agent 1: 16k thinking tokens
65+
else: # high
66+
reasoning_config["max_tokens"] = 32000 # Agent 2: 32k thinking tokens
67+
5968
try:
6069
# Make API call with reasoning via extra_body for OpenRouter compatibility
6170
response = self.client.chat.completions.create(

optillm/mars/mars.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,7 @@
2424
'min_verified_solutions': 1,
2525
'thinking_budget_initial': 10000,
2626
'thinking_budget_max': 32000,
27-
'max_response_tokens': 65536,
27+
'max_response_tokens': 64000,
2828
'max_verification_attempts': 10,
2929
'early_termination': True,
3030
'use_reasoning_api': True

0 commit comments

Comments
 (0)