Skip to content

Commit cf05e0d

Browse files
committed
feat(chat): expose finishReason on turn-complete events
Surface the AI SDK's FinishReason on TurnCompleteEvent and BeforeTurnCompleteEvent. Gives hooks a clean signal for distinguishing a normal turn end from one paused on a pending tool call (HITL flows like ask_user). Undefined for manual pipeChat() or aborted streams.
1 parent dd94073 commit cf05e0d

File tree

3 files changed

+58
-1
lines changed

3 files changed

+58
-1
lines changed
Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,5 @@
1+
---
2+
"@trigger.dev/sdk": patch
3+
---
4+
5+
Expose `finishReason` on `TurnCompleteEvent` and `BeforeTurnCompleteEvent`. Surfaces the AI SDK's `FinishReason` (`"stop" | "tool-calls" | "length" | ...`) so hooks can distinguish a normal turn end from one paused on a pending tool call (HITL flows like `ask_user`). Undefined for manual `pipeChat()` or aborted streams.

packages/trigger-sdk/src/v3/ai.ts

Lines changed: 27 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -17,6 +17,7 @@ import {
1717
type TaskWithSchema,
1818
} from "@trigger.dev/core/v3";
1919
import type {
20+
FinishReason,
2021
ModelMessage,
2122
ToolSet,
2223
UIMessage,
@@ -2211,6 +2212,22 @@ export type TurnCompleteEvent<TClientData = unknown, TUIM extends UIMessage = UI
22112212
usage?: LanguageModelUsage;
22122213
/** Cumulative token usage across all turns in this run (including this turn). */
22132214
totalUsage: LanguageModelUsage;
2215+
/**
2216+
* Why the LLM stopped generating this turn:
2217+
* - `"stop"` — model generated a stop sequence (normal completion)
2218+
* - `"tool-calls"` — model stopped on one or more tool calls. If any tool
2219+
* has no `execute` function (e.g. an `ask_user` HITL tool), the turn is
2220+
* paused awaiting user input; inspect `responseMessage.parts` for tool
2221+
* parts in `input-available` state to distinguish.
2222+
* - `"length"` — max tokens reached
2223+
* - `"content-filter"` — content filter stopped the model
2224+
* - `"error"` — model errored
2225+
* - `"other"` — provider-specific reason
2226+
*
2227+
* Undefined if the underlying stream didn't provide a finish reason (e.g.
2228+
* manual `pipeChat()` or an aborted stream).
2229+
*/
2230+
finishReason?: FinishReason;
22142231
};
22152232

22162233
/**
@@ -3582,6 +3599,7 @@ function chatAgent<
35823599

35833600
// Captured by the onFinish callback below — works even on abort/stop.
35843601
let capturedResponseMessage: TUIMessage | undefined;
3602+
let capturedFinishReason: FinishReason | undefined;
35853603

35863604
// Promise that resolves when the AI SDK's onFinish fires.
35873605
// On abort, the stream's cancel() handler calls onFinish
@@ -3647,8 +3665,15 @@ function chatAgent<
36473665
// messageId. Without this, the frontend and backend generate IDs
36483666
// independently and they won't match for ID-based dedup.
36493667
generateMessageId: resolvedOptions.generateMessageId ?? generateMessageId,
3650-
onFinish: ({ responseMessage }: { responseMessage: UIMessage }) => {
3668+
onFinish: ({
3669+
responseMessage,
3670+
finishReason,
3671+
}: {
3672+
responseMessage: UIMessage;
3673+
finishReason?: FinishReason;
3674+
}) => {
36513675
capturedResponseMessage = responseMessage as TUIMessage;
3676+
capturedFinishReason = finishReason;
36523677
resolveOnFinish!();
36533678
},
36543679
});
@@ -4012,6 +4037,7 @@ function chatAgent<
40124037
preloaded,
40134038
usage: turnUsage,
40144039
totalUsage: cumulativeUsage,
4040+
finishReason: capturedFinishReason,
40154041
};
40164042

40174043
// Fire onBeforeTurnComplete — stream is still open so the hook

packages/trigger-sdk/test/mockChatAgent.test.ts

Lines changed: 26 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -236,6 +236,32 @@ describe("mockChatAgent", () => {
236236
}
237237
});
238238

239+
it("exposes finishReason on the onTurnComplete event", async () => {
240+
const model = new MockLanguageModelV3({
241+
doStream: async () => ({ stream: textStream("hi") }),
242+
});
243+
244+
let seenReason: string | undefined;
245+
const agent = chat.agent({
246+
id: "mockChatAgent.finish-reason",
247+
onTurnComplete: async ({ finishReason }) => {
248+
seenReason = finishReason;
249+
},
250+
run: async ({ messages, signal }) => {
251+
return streamText({ model, messages, abortSignal: signal });
252+
},
253+
});
254+
255+
const harness = mockChatAgent(agent, { chatId: "test-finish-reason" });
256+
try {
257+
await harness.sendMessage(userMessage("hello"));
258+
await new Promise((r) => setTimeout(r, 20));
259+
expect(seenReason).toBe("stop");
260+
} finally {
261+
await harness.close();
262+
}
263+
});
264+
239265
it("seeds locals before run() via setupLocals (DI pattern)", async () => {
240266
type FakeDb = { findUser(id: string): Promise<{ id: string; name: string }> };
241267
const dbKey = locals.create<FakeDb>("test-db");

0 commit comments

Comments
 (0)