@@ -75,7 +75,7 @@ def generate_solution(self, problem: str, request_id: str = None) -> Tuple[Agent
7575 {"role" : "system" , "content" : MATHEMATICAL_SYSTEM_PROMPT },
7676 {"role" : "user" , "content" : exploration_prompt }
7777 ],
78- max_tokens = max_tokens ,
78+ max_tokens = reasoning_tokens + 8000 ,
7979 temperature = self .temperature ,
8080 timeout = 300 , # 5 minute timeout for complex problems
8181 extra_body = {
@@ -145,7 +145,7 @@ def verify_solution(self, problem: str, solution: str, verifier_id: int, solutio
145145 {"role" : "system" , "content" : MATHEMATICAL_SYSTEM_PROMPT },
146146 {"role" : "user" , "content" : verification_prompt }
147147 ],
148- max_tokens = verification_max_tokens ,
148+ max_tokens = verification_reasoning_tokens + 8000 ,
149149 temperature = 0.1 , # Low temperature for consistent verification
150150 timeout = 180 ,
151151 extra_body = {
@@ -206,7 +206,7 @@ def improve_solution(self, problem: str, current_solution: str, feedback: str, i
206206 {"role" : "system" , "content" : MATHEMATICAL_SYSTEM_PROMPT },
207207 {"role" : "user" , "content" : improvement_prompt }
208208 ],
209- max_tokens = max_tokens ,
209+ max_tokens = improvement_reasoning_tokens + 8000 ,
210210 temperature = self .temperature * 0.8 , # Slightly lower temperature for improvement
211211 timeout = 300 ,
212212 extra_body = {
0 commit comments