@@ -872,6 +872,13 @@ const chatPrepareMessagesKey =
872872/** @internal Flag set by `chat.requestUpgrade()` to exit the loop after the current turn. */
873873const chatUpgradeRequestedKey = locals . create < boolean > ( "chat.upgradeRequested" ) ;
874874
875+ /**
876+ * @internal Flag set by `chat.endRun()` to exit the loop after the current
877+ * turn completes, without any upgrade semantics. Checked at the same
878+ * post-turn / pre-wait sites as `chatUpgradeRequestedKey`.
879+ */
880+ const chatEndRunRequestedKey = locals . create < boolean > ( "chat.endRunRequested" ) ;
881+
875882/**
876883 * Event passed to `summarize` callbacks.
877884 */
@@ -4160,7 +4167,11 @@ function chatAgent<
41604167
41614168 // chat.requestUpgrade() was called — exit the loop so the
41624169 // transport triggers a new run on the latest version.
4163- if ( locals . get ( chatUpgradeRequestedKey ) ) {
4170+ // chat.endRun() — same exit, no upgrade semantics.
4171+ if (
4172+ locals . get ( chatUpgradeRequestedKey ) ||
4173+ locals . get ( chatEndRunRequestedKey )
4174+ ) {
41644175 return "exit" ;
41654176 }
41664177
@@ -4277,8 +4288,11 @@ function chatAgent<
42774288 // Best-effort — if stream write fails, let the run continue anyway
42784289 }
42794290
4280- // chat.requestUpgrade() — exit after error turn too
4281- if ( locals . get ( chatUpgradeRequestedKey ) ) {
4291+ // chat.requestUpgrade() / chat.endRun() — exit after error turn too
4292+ if (
4293+ locals . get ( chatUpgradeRequestedKey ) ||
4294+ locals . get ( chatEndRunRequestedKey )
4295+ ) {
42824296 return ;
42834297 }
42844298
@@ -4861,6 +4875,36 @@ function requestUpgrade(): void {
48614875 locals . set ( chatUpgradeRequestedKey , true ) ;
48624876}
48634877
4878+ /**
4879+ * Exit the run after the current turn completes, without waiting for the
4880+ * next message. Unlike {@link requestUpgrade}, no upgrade-required signal
4881+ * is sent to the client — the turn finishes normally, `onTurnComplete`
4882+ * fires, and the loop exits instead of going idle.
4883+ *
4884+ * Call from `run()`, `chat.defer()`, `onBeforeTurnComplete`, or
4885+ * `onTurnComplete` to end the run on your own terms (budget exhausted,
4886+ * task complete, goal achieved, etc.).
4887+ *
4888+ * The next user message on the same `chatId` starts a fresh run via the
4889+ * normal continuation mechanism.
4890+ *
4891+ * @example
4892+ * ```ts
4893+ * chat.agent({
4894+ * id: "one-shot-agent",
4895+ * run: async ({ messages, signal }) => {
4896+ * const result = streamText({ model: openai("gpt-4o"), messages, abortSignal: signal });
4897+ * // Single-response agent — exit after this turn.
4898+ * chat.endRun();
4899+ * return result;
4900+ * },
4901+ * });
4902+ * ```
4903+ */
4904+ function endRun ( ) : void {
4905+ locals . set ( chatEndRunRequestedKey , true ) ;
4906+ }
4907+
48644908// ---------------------------------------------------------------------------
48654909// Per-turn deferred work
48664910// ---------------------------------------------------------------------------
@@ -5474,8 +5518,11 @@ function createChatSession(
54745518
54755519 // Subsequent turns: wait for the next message
54765520 if ( turn > 0 ) {
5477- // chat.requestUpgrade() — exit before waiting
5478- if ( locals . get ( chatUpgradeRequestedKey ) ) {
5521+ // chat.requestUpgrade() / chat.endRun() — exit before waiting
5522+ if (
5523+ locals . get ( chatUpgradeRequestedKey ) ||
5524+ locals . get ( chatEndRunRequestedKey )
5525+ ) {
54795526 stop . cleanup ( ) ;
54805527 return { done : true , value : undefined } ;
54815528 }
@@ -6108,6 +6155,8 @@ export const chat = {
61086155 isStopped,
61096156 /** Request that the run exits after the current turn so the next message starts on the latest version. See {@link requestUpgrade}. */
61106157 requestUpgrade,
6158+ /** Exit the run after the current turn completes, without any upgrade signal. See {@link endRun}. */
6159+ endRun,
61116160 /** Clean up aborted parts from a UIMessage. See {@link cleanupAbortedParts}. */
61126161 cleanupAbortedParts,
61136162 /** Register background work that runs in parallel with streaming. See {@link chatDefer}. */
0 commit comments