Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
33 changes: 33 additions & 0 deletions src/api/transform/__tests__/openai-format.spec.ts
Original file line number Diff line number Diff line change
Expand Up @@ -112,6 +112,39 @@ describe("convertToOpenAiMessages", () => {
})
})

it("should strip null values from tool call arguments to prevent Jinja template errors", () => {
const anthropicMessages: Anthropic.Messages.MessageParam[] = [
{
role: "assistant",
content: [
{
type: "tool_use",
id: "followup-123",
name: "ask_followup_question",
input: {
question: "Pick one",
follow_up: [
{ text: "Option A", mode: null },
{ text: "Option B", mode: "code" },
],
},
},
],
},
]

const openAiMessages = convertToOpenAiMessages(anthropicMessages)
const assistantMessage = openAiMessages[0] as OpenAI.Chat.ChatCompletionAssistantMessageParam
const toolCall = assistantMessage.tool_calls![0] as any
const args = JSON.parse(toolCall.function.arguments)

// null mode should be stripped (becomes undefined, omitted from JSON)
expect(args.follow_up[0]).toEqual({ text: "Option A" })
expect(args.follow_up[0].mode).toBeUndefined()
// non-null mode should be preserved
expect(args.follow_up[1]).toEqual({ text: "Option B", mode: "code" })
})

it("should handle user messages with tool results (no normalization without normalizeToolCallId)", () => {
const anthropicMessages: Anthropic.Messages.MessageParam[] = [
{
Expand Down
9 changes: 7 additions & 2 deletions src/api/transform/openai-format.ts
Original file line number Diff line number Diff line change
Expand Up @@ -467,8 +467,13 @@ export function convertToOpenAiMessages(
type: "function",
function: {
name: toolMessage.name,
// json string
arguments: JSON.stringify(toolMessage.input),
// Serialize as JSON, stripping null values to prevent Jinja template
// errors on local models (e.g. "Cannot convert value of type
// Optional<Any> to Jinja Value"). Null in tool args typically means
// "not provided" and should be omitted instead.
arguments: JSON.stringify(toolMessage.input, (_key, value) =>
value === null ? undefined : value,
),
},
}))

Expand Down
13 changes: 9 additions & 4 deletions src/core/prompts/tools/native-tools/ask_followup_question.ts
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ Parameters:
- follow_up: (required) A list of 2-4 suggested answers. Suggestions must be complete, actionable answers without placeholders. Optionally include mode to switch modes (code/architect/etc.)

Example: Asking for file path
{ "question": "What is the path to the frontend-config.json file?", "follow_up": [{ "text": "./src/frontend-config.json", "mode": null }, { "text": "./config/frontend-config.json", "mode": null }, { "text": "./frontend-config.json", "mode": null }] }
{ "question": "What is the path to the frontend-config.json file?", "follow_up": [{ "text": "./src/frontend-config.json" }, { "text": "./config/frontend-config.json" }, { "text": "./frontend-config.json" }] }

Example: Asking with mode switch
{ "question": "Would you like me to implement this feature?", "follow_up": [{ "text": "Yes, implement it now", "mode": "code" }, { "text": "No, just plan it out", "mode": "architect" }] }`
Expand All @@ -25,7 +25,12 @@ export default {
function: {
name: "ask_followup_question",
description: ASK_FOLLOWUP_QUESTION_DESCRIPTION,
strict: true,
// Note: strict mode is intentionally disabled for this tool.
// With strict: true, OpenAI requires ALL properties to be in the 'required' array,
// which forces the LLM to always provide explicit values (even null) for optional params.
// Local models using Jinja chat templates cannot handle null values in tool call arguments,
// causing "Cannot convert value of type Optional<Any> to Jinja Value" errors.
// By disabling strict mode, the LLM can omit the optional `mode` parameter entirely.
parameters: {
type: "object",
properties: {
Expand All @@ -44,11 +49,11 @@ export default {
description: FOLLOW_UP_TEXT_DESCRIPTION,
},
mode: {
type: ["string", "null"],
type: "string",
description: FOLLOW_UP_MODE_DESCRIPTION,
},
},
required: ["text", "mode"],
required: ["text"],
additionalProperties: false,
},
minItems: 1,
Expand Down
12 changes: 10 additions & 2 deletions src/core/tools/AskFollowupQuestionTool.ts
Original file line number Diff line number Diff line change
Expand Up @@ -39,10 +39,18 @@ export class AskFollowupQuestionTool extends BaseTool<"ask_followup_question"> {
return
}

// Transform follow_up suggestions to the format expected by task.ask
// Transform follow_up suggestions to the format expected by task.ask.
// Omit `mode` when it's null/undefined to avoid Jinja template errors
// on local models that can't handle null values in tool call arguments.
const follow_up_json = {
question,
suggest: follow_up.map((s) => ({ answer: s.text, mode: s.mode })),
suggest: follow_up.map((s) => {
const suggestion: { answer: string; mode?: string } = { answer: s.text }
if (s.mode != null) {
suggestion.mode = s.mode
}
return suggestion
}),
}

task.consecutiveMistakeCount = 0
Expand Down
31 changes: 31 additions & 0 deletions src/core/tools/__tests__/askFollowupQuestionTool.spec.ts
Original file line number Diff line number Diff line change
Expand Up @@ -82,6 +82,37 @@ describe("askFollowupQuestionTool", () => {
)
})

it("should strip null mode values from suggestions to prevent Jinja template errors", async () => {
const block: ToolUse = {
type: "tool_use",
name: "ask_followup_question",
params: {
question: "What would you like to do?",
},
nativeArgs: {
question: "What would you like to do?",
follow_up: [
{ text: "Option A", mode: null as any },
{ text: "Option B", mode: "code" },
],
},
partial: false,
}

await askFollowupQuestionTool.handle(mockCline, block as ToolUse<"ask_followup_question">, {
askApproval: vi.fn(),
handleError: vi.fn(),
pushToolResult: mockPushToolResult,
})

// mode: null should be stripped, mode: "code" should be preserved
expect(mockCline.ask).toHaveBeenCalledWith(
"followup",
expect.stringContaining('"suggest":[{"answer":"Option A"},{"answer":"Option B","mode":"code"}]'),
false,
)
})

it("should handle mixed suggestions with and without mode attributes", async () => {
const block: ToolUse = {
type: "tool_use",
Expand Down
Loading