From a4d01302d0a9c26a7e4d9c21638df3630596aad1 Mon Sep 17 00:00:00 2001 From: MarioCadenas Date: Tue, 21 Apr 2026 19:42:39 +0200 Subject: [PATCH] feat(appkit): shared agent types and LLM adapter implementations MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Foundation layer for the agents feature. Adds the portable type surface that every downstream layer builds on, plus three LLM adapter implementations so the agents plugin (later PR) can target whatever the user has. ### Shared agent types `packages/shared/src/agent.ts` — no behavior, just the type vocabulary: `AgentAdapter`, `AgentEvent`, `AgentInput`, `AgentRunContext`, `AgentToolDefinition`, `Message`, `Thread`, `ThreadStore`, `ToolAnnotations`, `ToolCall`, `ToolProvider`, `ResponseStreamEvent`. Exported from the shared barrel. ### Adapters - `packages/appkit/src/agents/databricks.ts` — `DatabricksAdapter`: streams OpenAI-compatible completions against a Databricks Model Serving endpoint (raw fetch + SSE, no vendor SDKs). - `packages/appkit/src/agents/vercel-ai.ts` — `VercelAIAdapter`: wraps any Vercel AI SDK `streamText` call. Maps Vercel SDK events to AppKit `AgentEvent`s and tool calls. - `packages/appkit/src/agents/langchain.ts` — `LangChainAdapter`: wraps any LangChain `Runnable` (AgentExecutor, compiled LangGraph, etc.). Subscribes to `streamEvents(v2)` and maps to `AgentEvent`s. Each adapter is self-contained and independently testable. ### Package plumbing - Subpath exports `@databricks/appkit/agents/{databricks,vercel-ai,langchain}` so consumers pick only the adapter they want. - `@langchain/core` and `ai` declared as optional peer dependencies. - `@ai-sdk/openai`, `@langchain/core`, `ai` added as devDeps for tests. - `tsdown.config.ts` emits the three adapter entry points alongside the main bundle. ### Test plan - 24 adapter tests (Databricks: 16, Vercel AI: 4, LangChain: 4) passing - Full appkit vitest suite: 1154 tests passing - Typecheck clean - Build clean, publint clean Signed-off-by: MarioCadenas ### MVP polish - **LangChain adapter `callId` correlation fix.** The previous implementation emitted `tool_call` with the LLM-provided `tc.id ?? tc.name` and `tool_result` with LangChain's internal `event.run_id` — these never matched, breaking every Responses-API client that pairs calls by `call_id`. The adapter now records a `run_id → callId` mapping at `on_tool_start` (matching the accumulated tool_call by name) and resolves it at `on_tool_end`. A deterministic `lc___` fallback id prevents collisions when the same tool is called multiple times in one turn without a model-provided id. Adds three regression tests covering happy-path correlation, duplicate-name disambiguation, and the no-accumulator-match fallback. - **Adapter docstring cleanup.** The four `@example` blocks in `databricks.ts`, `langchain.ts`, and `vercel-ai.ts` referenced a fictional `appkit.agent.registerAgent("assistant", adapter)` API that has never existed. Replaced with real usage via `createApp({ plugins: [agents({ agents: { assistant: createAgent( { model: adapter }) } })] })`. Signed-off-by: MarioCadenas --- knip.json | 9 +- packages/appkit/package.json | 35 +- packages/appkit/src/agents/databricks.ts | 775 ++++++++++++++++++ packages/appkit/src/agents/langchain.ts | 292 +++++++ .../src/agents/tests/databricks.test.ts | 486 +++++++++++ .../appkit/src/agents/tests/langchain.test.ts | 366 +++++++++ .../appkit/src/agents/tests/vercel-ai.test.ts | 190 +++++ packages/appkit/src/agents/vercel-ai.ts | 138 ++++ packages/appkit/tsdown.config.ts | 7 +- packages/shared/src/agent.ts | 212 +++++ packages/shared/src/index.ts | 1 + pnpm-lock.yaml | 161 ++++ 12 files changed, 2668 insertions(+), 4 deletions(-) create mode 100644 packages/appkit/src/agents/databricks.ts create mode 100644 packages/appkit/src/agents/langchain.ts create mode 100644 packages/appkit/src/agents/tests/databricks.test.ts create mode 100644 packages/appkit/src/agents/tests/langchain.test.ts create mode 100644 packages/appkit/src/agents/tests/vercel-ai.test.ts create mode 100644 packages/appkit/src/agents/vercel-ai.ts create mode 100644 packages/shared/src/agent.ts diff --git a/knip.json b/knip.json index b777d8c2..13a43187 100644 --- a/knip.json +++ b/knip.json @@ -7,7 +7,9 @@ "docs" ], "workspaces": { - "packages/appkit": {}, + "packages/appkit": { + "ignoreDependencies": ["@langchain/core", "ai"] + }, "packages/appkit-ui": { "ignoreDependencies": ["tailwindcss", "tw-animate-css"] } @@ -17,6 +19,11 @@ "**/*.example.tsx", "**/*.css", "packages/appkit/src/plugins/vector-search/**", + "packages/appkit/src/plugin/index.ts", + "packages/appkit/src/plugins/agents/index.ts", + "packages/appkit/src/plugins/agents/tools/index.ts", + "packages/appkit/src/plugins/agents/from-plugin.ts", + "packages/appkit/src/plugins/agents/load-agents.ts", "template/**", "tools/**", "docs/**" diff --git a/packages/appkit/package.json b/packages/appkit/package.json index 146be5a9..27a14e66 100644 --- a/packages/appkit/package.json +++ b/packages/appkit/package.json @@ -29,6 +29,18 @@ "development": "./src/index.ts", "default": "./dist/index.js" }, + "./agents/vercel-ai": { + "development": "./src/agents/vercel-ai.ts", + "default": "./dist/agents/vercel-ai.js" + }, + "./agents/langchain": { + "development": "./src/agents/langchain.ts", + "default": "./dist/agents/langchain.js" + }, + "./agents/databricks": { + "development": "./src/agents/databricks.ts", + "default": "./dist/agents/databricks.js" + }, "./type-generator": { "types": "./dist/type-generator/index.d.ts", "development": "./src/type-generator/index.ts", @@ -77,14 +89,30 @@ "semver": "7.7.3", "shared": "workspace:*", "vite": "npm:rolldown-vite@7.1.14", - "ws": "8.18.3" + "ws": "8.18.3", + "zod": "^4.0.0" + }, + "peerDependencies": { + "@langchain/core": ">=0.3.0", + "ai": ">=4.0.0" + }, + "peerDependenciesMeta": { + "ai": { + "optional": true + }, + "@langchain/core": { + "optional": true + } }, "devDependencies": { + "@ai-sdk/openai": "4.0.0-beta.27", + "@langchain/core": "^1.1.39", "@types/express": "4.17.25", "@types/json-schema": "7.0.15", "@types/pg": "8.16.0", "@types/ws": "8.18.1", - "@vitejs/plugin-react": "5.1.1" + "@vitejs/plugin-react": "5.1.1", + "ai": "7.0.0-beta.76" }, "overrides": { "vite": "npm:rolldown-vite@7.1.14" @@ -93,6 +121,9 @@ "publishConfig": { "exports": { ".": "./dist/index.js", + "./agents/vercel-ai": "./dist/agents/vercel-ai.js", + "./agents/langchain": "./dist/agents/langchain.js", + "./agents/databricks": "./dist/agents/databricks.js", "./dist/shared/src/plugin": "./dist/shared/src/plugin.d.ts", "./type-generator": "./dist/type-generator/index.js", "./package.json": "./package.json" diff --git a/packages/appkit/src/agents/databricks.ts b/packages/appkit/src/agents/databricks.ts new file mode 100644 index 00000000..6cc98ca4 --- /dev/null +++ b/packages/appkit/src/agents/databricks.ts @@ -0,0 +1,775 @@ +import type { + AgentAdapter, + AgentEvent, + AgentInput, + AgentRunContext, + AgentToolDefinition, +} from "shared"; +import { stream as servingStream } from "../connectors/serving/client"; + +/** + * Transport shim: given an OpenAI-compatible request body, returns the raw + * SSE byte stream from the serving endpoint. Injected at construction time so + * callers can swap in the workspace SDK (factory paths), a bare `fetch` + * (the raw constructor), or a test fake. + */ +type StreamBody = ( + body: Record, + signal?: AbortSignal, +) => Promise>; + +/** + * Escape-hatch options: provide an `endpointUrl` + `authenticate()` and the + * adapter uses a bare `fetch()` to call it. Useful for tests and for pointing + * the adapter at non-workspace endpoints (reverse proxies, mocks). + */ +interface RawFetchAdapterOptions { + endpointUrl: string; + authenticate: () => Promise>; + maxSteps?: number; + maxTokens?: number; +} + +/** + * Preferred options: caller provides the transport function directly. + * The `fromServingEndpoint` / `fromModelServing` factories use this to route + * through `connectors/serving/stream`, which centralises URL encoding, auth + * via the SDK's `apiClient.request`, and any future retries/telemetry. + */ +interface StreamBodyAdapterOptions { + streamBody: StreamBody; + maxSteps?: number; + maxTokens?: number; +} + +type DatabricksAdapterOptions = + | RawFetchAdapterOptions + | StreamBodyAdapterOptions; + +function isStreamBodyOptions( + o: DatabricksAdapterOptions, +): o is StreamBodyAdapterOptions { + return "streamBody" in o; +} + +/** + * Minimal structural shape consumed by `connectors/serving/stream`. We avoid + * importing the concrete `WorkspaceClient` type to keep the adapter free of a + * compile-time dependency on the SDK. + */ +interface WorkspaceClientLike { + apiClient: { + request(options: Record): Promise; + }; +} + +interface ServingEndpointOptions { + workspaceClient: WorkspaceClientLike; + endpointName: string; + maxSteps?: number; + maxTokens?: number; +} + +interface ModelServingOptions { + maxSteps?: number; + maxTokens?: number; + workspaceClient?: WorkspaceClientLike; +} + +/** + * Structural shape for {@link createDatabricksModel}. The Vercel AI helper + * builds its own `fetch` override and so needs the workspace config surface + * (host, authenticate, ensureResolved) rather than the `apiClient` used by + * the adapter factories. + */ +interface WorkspaceConfig { + host?: string; + authenticate(headers: Headers): Promise; + ensureResolved(): Promise; +} + +interface VercelDatabricksModelOptions { + workspaceClient: { config: WorkspaceConfig }; + endpointName: string; +} + +interface OpenAIMessage { + role: "system" | "user" | "assistant" | "tool"; + content: string | null; + tool_calls?: OpenAIToolCall[]; + tool_call_id?: string; +} + +interface OpenAIToolCall { + id: string; + type: "function"; + function: { name: string; arguments: string }; +} + +interface OpenAITool { + type: "function"; + function: { + name: string; + description: string; + parameters: unknown; + }; +} + +interface DeltaToolCall { + index: number; + id?: string; + type?: string; + function?: { name?: string; arguments?: string }; +} + +/** + * Adapter that talks directly to Databricks Model Serving `/invocations` endpoint. + * + * No dependency on the Vercel AI SDK or LangChain. Uses raw `fetch()` to POST + * OpenAI-compatible payloads and parses the SSE stream itself. Calls + * `authenticate()` per-request so tokens are always fresh. + * + * Handles both structured `tool_calls` responses and text-based tool call + * fallback parsing for models that output tool calls as text. + * + * @example Using the factory (recommended) + * ```ts + * import { createApp, createAgent, agents } from "@databricks/appkit"; + * import { DatabricksAdapter } from "@databricks/appkit/agents/databricks"; + * import { WorkspaceClient } from "@databricks/sdk-experimental"; + * + * const adapter = DatabricksAdapter.fromServingEndpoint({ + * workspaceClient: new WorkspaceClient({}), + * endpointName: "my-endpoint", + * }); + * + * await createApp({ + * plugins: [ + * agents({ + * agents: { + * assistant: createAgent({ + * instructions: "You are a helpful assistant.", + * model: adapter, + * }), + * }, + * }), + * ], + * }); + * ``` + * + * @example Using the raw constructor + * ```ts + * const adapter = new DatabricksAdapter({ + * endpointUrl: "https://host/serving-endpoints/my-endpoint/invocations", + * authenticate: async () => ({ Authorization: `Bearer ${token}` }), + * }); + * ``` + */ +export class DatabricksAdapter implements AgentAdapter { + private streamBody: StreamBody; + private maxSteps: number; + private maxTokens: number; + + constructor(options: DatabricksAdapterOptions) { + this.maxSteps = options.maxSteps ?? 10; + this.maxTokens = options.maxTokens ?? 4096; + + if (isStreamBodyOptions(options)) { + this.streamBody = options.streamBody; + } else { + const { endpointUrl, authenticate } = options; + this.streamBody = async (body, signal) => { + const authHeaders = await authenticate(); + const response = await fetch(endpointUrl, { + method: "POST", + headers: { + "Content-Type": "application/json", + ...authHeaders, + }, + body: JSON.stringify(body), + signal, + }); + if (!response.ok) { + const errorText = await response.text().catch(() => "Unknown error"); + throw new Error( + `Databricks API error (${response.status}): ${errorText}`, + ); + } + if (!response.body) throw new Error("No response body"); + return response.body; + }; + } + } + + /** + * Creates a DatabricksAdapter for a Databricks Model Serving endpoint. + * + * Routes through the shared `connectors/serving/stream` helper, which + * delegates to the SDK's `apiClient.request({ raw: true })`. That gives the + * adapter centralised URL encoding + authentication with the rest of the + * serving surface — no bespoke `fetch()` + `authenticate()` plumbing. + */ + static async fromServingEndpoint( + options: ServingEndpointOptions, + ): Promise { + const { workspaceClient, endpointName, maxSteps, maxTokens } = options; + return new DatabricksAdapter({ + streamBody: (body) => + // Cast through the structural shape: the connector types + // `workspaceClient` as the SDK's concrete `WorkspaceClient`, but we + // only need `apiClient.request`. + servingStream( + workspaceClient as unknown as Parameters[0], + endpointName, + body, + ), + maxSteps, + maxTokens, + }); + } + + /** + * Creates a DatabricksAdapter from a Model Serving endpoint name. + * Auto-creates a WorkspaceClient internally. Reads the endpoint name + * from the argument or the `DATABRICKS_AGENT_ENDPOINT` env var. + * + * @example + * ```ts + * // Reads endpoint from DATABRICKS_AGENT_ENDPOINT env var + * const adapter = await DatabricksAdapter.fromModelServing(); + * + * // Explicit endpoint + * const adapter = await DatabricksAdapter.fromModelServing("my-endpoint"); + * + * // With options + * const adapter = await DatabricksAdapter.fromModelServing("my-endpoint", { + * maxSteps: 5, + * maxTokens: 2048, + * }); + * ``` + */ + static async fromModelServing( + endpointName?: string, + options?: ModelServingOptions, + ): Promise { + const resolvedEndpoint = + endpointName ?? process.env.DATABRICKS_AGENT_ENDPOINT; + + if (!resolvedEndpoint) { + throw new Error( + "No endpoint name provided and DATABRICKS_AGENT_ENDPOINT env var is not set. " + + "Pass an endpoint name or set the environment variable.", + ); + } + + let workspaceClient: WorkspaceClientLike | undefined = + options?.workspaceClient; + if (!workspaceClient) { + const sdk = await import("@databricks/sdk-experimental"); + workspaceClient = new sdk.WorkspaceClient( + {}, + ) as unknown as WorkspaceClientLike; + } + + return DatabricksAdapter.fromServingEndpoint({ + workspaceClient, + endpointName: resolvedEndpoint, + maxSteps: options?.maxSteps, + maxTokens: options?.maxTokens, + }); + } + + async *run( + input: AgentInput, + context: AgentRunContext, + ): AsyncGenerator { + // Databricks API requires tool names to match [a-zA-Z0-9_-]. + // Our tool names use dots (e.g. "analytics.query"), so we swap dots + // for double-underscores in the wire format and map back on receipt. + const nameToWire = new Map(); + const wireToName = new Map(); + for (const tool of input.tools) { + const wire = tool.name.replace(/\./g, "__"); + nameToWire.set(tool.name, wire); + wireToName.set(wire, tool.name); + } + + const tools = this.buildTools(input.tools, nameToWire); + const messages = this.buildMessages(input.messages); + + yield { type: "status", status: "running" }; + + for (let step = 0; step < this.maxSteps; step++) { + if (context.signal?.aborted) break; + + const { text, toolCalls } = yield* this.streamCompletion( + messages, + tools, + context, + ); + + if (toolCalls.length === 0) { + const parsed = parseTextToolCalls(text); + if (parsed.length > 0) { + yield* this.executeToolCalls(parsed, messages, context); + continue; + } + break; + } + + messages.push({ + role: "assistant", + content: text || null, + tool_calls: toolCalls, + }); + + for (const tc of toolCalls) { + const wireName = tc.function.name; + const originalName = wireToName.get(wireName) ?? wireName; + let args: unknown; + try { + args = JSON.parse(tc.function.arguments); + } catch { + args = {}; + } + + yield { type: "tool_call", callId: tc.id, name: originalName, args }; + + try { + const result = await context.executeTool(originalName, args); + const resultStr = + typeof result === "string" ? result : JSON.stringify(result); + + yield { type: "tool_result", callId: tc.id, result }; + + messages.push({ + role: "tool", + content: resultStr, + tool_call_id: tc.id, + }); + } catch (error) { + const errMsg = + error instanceof Error ? error.message : "Tool execution failed"; + + yield { + type: "tool_result", + callId: tc.id, + result: null, + error: errMsg, + }; + + messages.push({ + role: "tool", + content: JSON.stringify({ error: errMsg }), + tool_call_id: tc.id, + }); + } + } + } + } + + private async *streamCompletion( + messages: OpenAIMessage[], + tools: OpenAITool[], + context: AgentRunContext, + ): AsyncGenerator< + AgentEvent, + { text: string; toolCalls: OpenAIToolCall[] }, + unknown + > { + const body: Record = { + messages, + stream: true, + max_tokens: this.maxTokens, + }; + + if (tools.length > 0) { + body.tools = tools; + } + + const responseBody = await this.streamBody(body, context.signal); + const reader = responseBody.getReader(); + + const decoder = new TextDecoder(); + let buffer = ""; + let fullText = ""; + const toolCallAccumulator = new Map< + number, + { id: string; name: string; arguments: string } + >(); + + try { + while (true) { + if (context.signal?.aborted) break; + + const { done, value } = await reader.read(); + if (done) break; + + buffer += decoder.decode(value, { stream: true }); + const lines = buffer.split("\n"); + buffer = lines.pop() ?? ""; + + for (const line of lines) { + const trimmed = line.trim(); + if (!trimmed.startsWith("data: ")) continue; + const data = trimmed.slice(6); + if (data === "[DONE]") continue; + + let parsed: any; + try { + parsed = JSON.parse(data); + } catch { + continue; + } + + const delta = parsed.choices?.[0]?.delta; + if (!delta) continue; + + if (delta.content) { + fullText += delta.content; + yield { type: "message_delta" as const, content: delta.content }; + } + + if (delta.tool_calls) { + for (const tc of delta.tool_calls as DeltaToolCall[]) { + const existing = toolCallAccumulator.get(tc.index); + if (existing) { + if (tc.function?.arguments) { + existing.arguments += tc.function.arguments; + } + } else { + toolCallAccumulator.set(tc.index, { + id: tc.id ?? `call_${tc.index}`, + name: tc.function?.name ?? "", + arguments: tc.function?.arguments ?? "", + }); + } + } + } + } + } + } finally { + reader.releaseLock(); + } + + const toolCalls: OpenAIToolCall[] = Array.from( + toolCallAccumulator.values(), + ).map((tc) => ({ + id: tc.id, + type: "function" as const, + function: { name: tc.name, arguments: tc.arguments || "{}" }, + })); + + return { text: fullText, toolCalls }; + } + + private async *executeToolCalls( + calls: Array<{ name: string; args: unknown }>, + messages: OpenAIMessage[], + context: AgentRunContext, + ): AsyncGenerator { + const toolCallObjs: OpenAIToolCall[] = calls.map((c, i) => ({ + id: `text_call_${i}`, + type: "function" as const, + function: { + name: c.name, + arguments: JSON.stringify(c.args), + }, + })); + + messages.push({ + role: "assistant", + content: null, + tool_calls: toolCallObjs, + }); + + for (const tc of toolCallObjs) { + const name = tc.function.name; + let args: unknown; + try { + args = JSON.parse(tc.function.arguments); + } catch { + args = {}; + } + + yield { type: "tool_call", callId: tc.id, name, args }; + + try { + const result = await context.executeTool(name, args); + const resultStr = + typeof result === "string" ? result : JSON.stringify(result); + + yield { type: "tool_result", callId: tc.id, result }; + + messages.push({ + role: "tool", + content: resultStr, + tool_call_id: tc.id, + }); + } catch (error) { + const errMsg = + error instanceof Error ? error.message : "Tool execution failed"; + + yield { + type: "tool_result", + callId: tc.id, + result: null, + error: errMsg, + }; + + messages.push({ + role: "tool", + content: JSON.stringify({ error: errMsg }), + tool_call_id: tc.id, + }); + } + } + } + + private buildMessages(messages: AgentInput["messages"]): OpenAIMessage[] { + return messages.map((m) => ({ + role: m.role as OpenAIMessage["role"], + content: m.content, + })); + } + + private buildTools( + definitions: AgentToolDefinition[], + nameToWire: Map, + ): OpenAITool[] { + return definitions.map((def) => ({ + type: "function" as const, + function: { + name: nameToWire.get(def.name) ?? def.name, + description: def.description, + parameters: def.parameters, + }, + })); + } +} + +// --------------------------------------------------------------------------- +// Vercel AI SDK helper +// --------------------------------------------------------------------------- + +/** + * Creates a Vercel AI-compatible model backed by a Databricks Model Serving endpoint. + * + * Use with `VercelAIAdapter` to get the Vercel AI SDK ecosystem (useChat, etc.) + * while targeting a Databricks `/invocations` endpoint. + * + * Handles URL rewriting (`/chat/completions` -> `/invocations`), per-request + * auth refresh, and tool name sanitization (dots -> double-underscores). + * + * Requires the `ai` and `@ai-sdk/openai` packages as peer dependencies. + * + * @example + * ```ts + * import { createApp, createAgent, agents } from "@databricks/appkit"; + * import { createDatabricksModel } from "@databricks/appkit/agents/databricks"; + * import { VercelAIAdapter } from "@databricks/appkit/agents/vercel-ai"; + * import { WorkspaceClient } from "@databricks/sdk-experimental"; + * + * const model = await createDatabricksModel({ + * workspaceClient: new WorkspaceClient({}), + * endpointName: "my-endpoint", + * }); + * + * await createApp({ + * plugins: [ + * agents({ + * agents: { + * assistant: createAgent({ + * instructions: "You are a helpful assistant.", + * model: new VercelAIAdapter({ model }), + * }), + * }, + * }), + * ], + * }); + * ``` + */ +export async function createDatabricksModel( + options: VercelDatabricksModelOptions, +): Promise { + let createOpenAI: any; + try { + const mod = await import("@ai-sdk/openai"); + createOpenAI = mod.createOpenAI; + } catch { + throw new Error( + "createDatabricksModel requires '@ai-sdk/openai' as a dependency. Install it with: npm install @ai-sdk/openai ai", + ); + } + + const config = options.workspaceClient.config; + await config.ensureResolved(); + + const baseURL = `${config.host}/serving-endpoints/${options.endpointName}`; + + const provider = createOpenAI({ + baseURL, + apiKey: "databricks", + fetch: async (url: string | URL | Request, init?: RequestInit) => { + const rewritten = String(url).replace( + "/chat/completions", + "/invocations", + ); + + const headers = new Headers(init?.headers); + await config.authenticate(headers); + + let body = init?.body; + if (typeof body === "string") { + body = rewriteToolNamesOutbound(body); + } + + const response = await globalThis.fetch(rewritten, { + ...init, + headers, + body, + }); + + if ( + !response.body || + !response.headers.get("content-type")?.includes("text/event-stream") + ) { + return response; + } + + const transformed = response.body.pipeThrough( + createToolNameRewriteStream(), + ); + + return new Response(transformed, { + status: response.status, + statusText: response.statusText, + headers: response.headers, + }); + }, + }); + + return provider(options.endpointName); +} + +/** + * Rewrites tool names in outbound request body (dots -> double-underscores). + */ +function rewriteToolNamesOutbound(body: string): string { + try { + const parsed = JSON.parse(body); + if (parsed.tools) { + for (const tool of parsed.tools) { + if (tool.function?.name) { + tool.function.name = tool.function.name.replace(/\./g, "__"); + } + } + } + return JSON.stringify(parsed); + } catch { + return body; + } +} + +/** + * Creates a TransformStream that rewrites tool names in SSE response chunks + * (double-underscores -> dots). + */ +function createToolNameRewriteStream(): TransformStream< + Uint8Array, + Uint8Array +> { + const decoder = new TextDecoder(); + const encoder = new TextEncoder(); + + return new TransformStream({ + transform(chunk, controller) { + const text = decoder.decode(chunk, { stream: true }); + const rewritten = text.replace( + /"name"\s*:\s*"([a-zA-Z0-9_-]+)"/g, + (match, name: string) => { + if (name.includes("__")) { + return match.replace(name, name.replace(/__/g, ".")); + } + return match; + }, + ); + controller.enqueue(encoder.encode(rewritten)); + }, + }); +} + +// --------------------------------------------------------------------------- +// Text-based tool call parsing (fallback) +// --------------------------------------------------------------------------- + +/** + * Parses text-based tool calls from model output. + * + * Handles two formats: + * 1. Llama native: `[{"name": "tool_name", "parameters": {"arg": "val"}}]` + * 2. Python-style: `[tool_name(arg1='val1', arg2='val2')]` + */ +export function parseTextToolCalls( + text: string, +): Array<{ name: string; args: unknown }> { + const trimmed = text.trim(); + + const jsonResult = tryParseLlamaJsonToolCalls(trimmed); + if (jsonResult.length > 0) return jsonResult; + + const pyResult = tryParsePythonStyleToolCalls(trimmed); + if (pyResult.length > 0) return pyResult; + + return []; +} + +function tryParseLlamaJsonToolCalls( + text: string, +): Array<{ name: string; args: unknown }> { + const match = text.match(/\[\s*\{[\s\S]*\}\s*\]/); + if (!match) return []; + + try { + const parsed = JSON.parse(match[0]); + if (!Array.isArray(parsed)) return []; + + return parsed + .filter( + (item: any) => + typeof item === "object" && + item !== null && + typeof item.name === "string", + ) + .map((item: any) => ({ + name: item.name, + args: item.parameters ?? item.arguments ?? item.args ?? {}, + })); + } catch { + return []; + } +} + +function tryParsePythonStyleToolCalls( + text: string, +): Array<{ name: string; args: unknown }> { + const pattern = /\[?([a-zA-Z_][\w.]*)\(([^)]*)\)\]?/g; + const results: Array<{ name: string; args: unknown }> = []; + + for (const match of text.matchAll(pattern)) { + const name = match[1]; + const argsStr = match[2]; + + const args: Record = {}; + const argPattern = /(\w+)\s*=\s*(?:'([^']*)'|"([^"]*)"|(\S+))/g; + for (const argMatch of argsStr.matchAll(argPattern)) { + const key = argMatch[1]; + const value = argMatch[2] ?? argMatch[3] ?? argMatch[4]; + args[key] = value; + } + + results.push({ name, args }); + } + + return results; +} diff --git a/packages/appkit/src/agents/langchain.ts b/packages/appkit/src/agents/langchain.ts new file mode 100644 index 00000000..77961bcf --- /dev/null +++ b/packages/appkit/src/agents/langchain.ts @@ -0,0 +1,292 @@ +import type { + AgentAdapter, + AgentEvent, + AgentInput, + AgentRunContext, + AgentToolDefinition, +} from "shared"; + +/** + * Adapter bridging LangChain/LangGraph to the AppKit agent protocol. + * + * Accepts any LangChain `Runnable` (e.g. AgentExecutor, compiled LangGraph) + * and maps `streamEvents` v2 to `AgentEvent`. + * + * Requires `@langchain/core` as an optional peer dependency. + * + * @example + * ```ts + * import { createApp, createAgent, agents } from "@databricks/appkit"; + * import { LangChainAdapter } from "@databricks/appkit/agents/langchain"; + * import { ChatOpenAI } from "@langchain/openai"; + * import { createReactAgent } from "@langchain/langgraph/prebuilt"; + * + * const model = new ChatOpenAI({ model: "gpt-4o" }); + * const agentExecutor = createReactAgent({ llm: model, tools: [] }); + * + * await createApp({ + * plugins: [ + * agents({ + * agents: { + * assistant: createAgent({ + * instructions: "You are a helpful assistant.", + * model: new LangChainAdapter({ runnable: agentExecutor }), + * }), + * }, + * }), + * ], + * }); + * ``` + */ +export class LangChainAdapter implements AgentAdapter { + private runnable: any; + + constructor(options: { runnable: any }) { + this.runnable = options.runnable; + } + + async *run( + input: AgentInput, + context: AgentRunContext, + ): AsyncGenerator { + const lcTools = await import("@langchain/core/tools"); + const DynamicStructuredTool = lcTools.DynamicStructuredTool; + const zodModule: any = await import("zod"); + const z = zodModule.z; + + const tools = this.buildTools( + input.tools, + context, + DynamicStructuredTool, + z, + ); + + const messages = input.messages.map((m) => ({ + role: m.role, + content: m.content, + })); + + yield { type: "status", status: "running" }; + + const runnableWithTools = + tools.length > 0 && typeof this.runnable.bindTools === "function" + ? this.runnable.bindTools(tools) + : this.runnable; + + const stream = await runnableWithTools.streamEvents( + { messages }, + { + version: "v2", + signal: input.signal, + }, + ); + + // Tool-call chunks from `on_chat_model_stream` come in fragments keyed by + // the model's `index`. We accumulate them and flush on `on_tool_start`. + const toolCallAccumulator = new Map< + number, + { id: string; name: string; arguments: string } + >(); + // LangChain's `on_tool_end` reports the tool via `event.run_id` (its own + // internal identifier), not the model-provided tool_call id. To keep the + // `call_id` on `tool_call` and `tool_result` matching (so clients can + // correlate them via the Responses API `call_id` field), we record the + // mapping from `run_id` to the original model `tc.id` at `on_tool_start` + // and look it up at `on_tool_end`. + const runIdToCallId = new Map(); + // Counter for fallback callIds when the model does not provide `tc.id`. + let fallbackIdx = 0; + + for await (const event of stream) { + if (context.signal?.aborted) break; + + switch (event.event) { + case "on_chat_model_stream": { + const chunk = event.data?.chunk; + if (chunk?.content && typeof chunk.content === "string") { + yield { type: "message_delta", content: chunk.content }; + } + if (chunk?.tool_call_chunks) { + for (const tc of chunk.tool_call_chunks) { + const idx = tc.index ?? 0; + const existing = toolCallAccumulator.get(idx); + if (existing) { + if (tc.args) existing.arguments += tc.args; + // Later chunks for the same tool call may carry the id/name + // that the first chunk lacked. + if (tc.id && !existing.id.startsWith("lc_")) + existing.id = tc.id; + if (tc.name && !existing.name) existing.name = tc.name; + } else if (tc.name || tc.id) { + toolCallAccumulator.set(idx, { + // Use a deterministic fallback that cannot collide if the + // same tool is called twice in one turn without a model id. + id: + tc.id ?? `lc_${tc.name ?? "tool"}_${idx}_${++fallbackIdx}`, + name: tc.name ?? "", + arguments: tc.args ?? "", + }); + } + } + } + break; + } + + case "on_tool_start": { + // Find the accumulated tool_call that matches this tool invocation + // by name so we can record the run_id → callId mapping and yield + // the `tool_call` event with a callId that will match the + // subsequent `tool_result`. + const toolName = event.name; + let matched: { id: string; name: string; arguments: string } | null = + null; + let matchedKey: number | null = null; + for (const [key, tc] of toolCallAccumulator) { + if (tc.name === toolName) { + matched = tc; + matchedKey = key; + break; + } + } + + if (matched) { + const runId = event.run_id; + if (typeof runId === "string" && runId.length > 0) { + runIdToCallId.set(runId, matched.id); + } + let args: unknown; + try { + args = JSON.parse(matched.arguments || "{}"); + } catch { + args = {}; + } + yield { + type: "tool_call" as const, + callId: matched.id, + name: matched.name, + args, + }; + if (matchedKey !== null) toolCallAccumulator.delete(matchedKey); + } else { + // Fallback: no accumulated tool_call matched this name. Emit a + // tool_call anyway with run_id as the correlating key so the + // client at least sees a call/result pair. + const runId = event.run_id ?? `lc_${toolName}_${++fallbackIdx}`; + runIdToCallId.set(runId, runId); + yield { + type: "tool_call" as const, + callId: runId, + name: toolName ?? "", + args: event.data?.input ?? {}, + }; + } + break; + } + + case "on_tool_end": { + const output = event.data?.output; + const runId = event.run_id; + const callId = + (typeof runId === "string" && runIdToCallId.get(runId)) || runId; + if (typeof runId === "string") runIdToCallId.delete(runId); + yield { + type: "tool_result", + callId, + result: output?.content ?? output, + }; + break; + } + + case "on_chain_end": { + const output = event.data?.output; + if (output?.content && typeof output.content === "string") { + yield { type: "message", content: output.content }; + } + break; + } + } + } + } + + /** + * Converts AgentToolDefinitions into LangChain DynamicStructuredTool instances. + * + * JSON Schema properties are mapped to Zod schemas using a lightweight + * recursive converter for the subset of JSON Schema types that tools use. + */ + private buildTools( + definitions: AgentToolDefinition[], + context: AgentRunContext, + DynamicStructuredTool: any, + z: any, + ): any[] { + return definitions.map( + (def) => + new DynamicStructuredTool({ + name: def.name, + description: def.description, + schema: jsonSchemaToZod(def.parameters, z), + func: async (args: unknown) => { + try { + const result = await context.executeTool(def.name, args); + return typeof result === "string" + ? result + : JSON.stringify(result); + } catch (error) { + return `Error: ${error instanceof Error ? error.message : "Tool execution failed"}`; + } + }, + }), + ); + } +} + +/** + * Lightweight JSON Schema (subset) to Zod converter. + * Handles the types commonly used in tool parameters. + */ +function jsonSchemaToZod(schema: any, z: any): any { + if (!schema) return z.object({}); + + switch (schema.type) { + case "object": { + const shape: Record = {}; + const properties = schema.properties ?? {}; + const required = new Set(schema.required ?? []); + + for (const [key, prop] of Object.entries(properties)) { + let field = jsonSchemaToZod(prop, z); + if (!required.has(key)) { + field = field.optional(); + } + if ((prop as any).description) { + field = field.describe((prop as any).description); + } + shape[key] = field; + } + return z.object(shape); + } + + case "array": + return z.array(jsonSchemaToZod(schema.items ?? {}, z)); + + case "string": { + let s = z.string(); + if (schema.enum) s = z.enum(schema.enum); + return s; + } + + case "number": + case "integer": + return z.number(); + + case "boolean": + return z.boolean(); + + case "null": + return z.null(); + + default: + return z.any(); + } +} diff --git a/packages/appkit/src/agents/tests/databricks.test.ts b/packages/appkit/src/agents/tests/databricks.test.ts new file mode 100644 index 00000000..8a835094 --- /dev/null +++ b/packages/appkit/src/agents/tests/databricks.test.ts @@ -0,0 +1,486 @@ +import type { AgentEvent, AgentToolDefinition, Message } from "shared"; +import { afterEach, beforeEach, describe, expect, test, vi } from "vitest"; +import { DatabricksAdapter, parseTextToolCalls } from "../databricks"; + +const mockAuthenticate = vi + .fn() + .mockResolvedValue({ Authorization: "Bearer test-token" }); + +function sseChunk(data: string): string { + return `data: ${data}\n\n`; +} + +function textDelta(content: string): string { + return sseChunk( + JSON.stringify({ + choices: [{ delta: { content } }], + }), + ); +} + +function toolCallDelta( + index: number, + id: string | undefined, + name: string | undefined, + args: string, +): string { + return sseChunk( + JSON.stringify({ + choices: [ + { + delta: { + tool_calls: [ + { + index, + ...(id && { id }), + ...(name && { type: "function" }), + function: { + ...(name && { name }), + arguments: args, + }, + }, + ], + }, + }, + ], + }), + ); +} + +function createReadableStream(chunks: string[]): ReadableStream { + const encoder = new TextEncoder(); + let i = 0; + return new ReadableStream({ + pull(controller) { + if (i < chunks.length) { + controller.enqueue(encoder.encode(chunks[i])); + i++; + } else { + controller.close(); + } + }, + }); +} + +function mockFetch(chunks: string[]): typeof globalThis.fetch { + return vi.fn().mockResolvedValue({ + ok: true, + body: createReadableStream(chunks), + text: () => Promise.resolve(""), + }); +} + +function createTestMessages(): Message[] { + return [{ id: "1", role: "user", content: "Hello", createdAt: new Date() }]; +} + +function createTestTools(): AgentToolDefinition[] { + return [ + { + name: "analytics.query", + description: "Run SQL", + parameters: { + type: "object", + properties: { query: { type: "string" } }, + required: ["query"], + }, + }, + ]; +} + +function createAdapter(overrides?: { + endpointUrl?: string; + authenticate?: () => Promise>; + maxSteps?: number; + maxTokens?: number; +}) { + return new DatabricksAdapter({ + endpointUrl: + "https://test.databricks.com/serving-endpoints/my-endpoint/invocations", + authenticate: mockAuthenticate, + ...overrides, + }); +} + +describe("DatabricksAdapter", () => { + const originalFetch = globalThis.fetch; + + afterEach(() => { + globalThis.fetch = originalFetch; + mockAuthenticate.mockClear(); + }); + + test("streams text deltas from the model", async () => { + globalThis.fetch = mockFetch([ + textDelta("Hello"), + textDelta(" world"), + sseChunk("[DONE]"), + ]); + + const adapter = createAdapter(); + const events: AgentEvent[] = []; + + for await (const event of adapter.run( + { messages: createTestMessages(), tools: [], threadId: "t1" }, + { executeTool: vi.fn() }, + )) { + events.push(event); + } + + expect(events[0]).toEqual({ type: "status", status: "running" }); + expect(events[1]).toEqual({ type: "message_delta", content: "Hello" }); + expect(events[2]).toEqual({ type: "message_delta", content: " world" }); + }); + + test("calls authenticate() per request for fresh headers", async () => { + globalThis.fetch = mockFetch([textDelta("Hi"), sseChunk("[DONE]")]); + + const adapter = createAdapter(); + + for await (const _ of adapter.run( + { messages: createTestMessages(), tools: [], threadId: "t1" }, + { executeTool: vi.fn() }, + )) { + // drain + } + + expect(mockAuthenticate).toHaveBeenCalledTimes(1); + + const [, init] = (globalThis.fetch as any).mock.calls[0]; + expect(init.headers.Authorization).toBe("Bearer test-token"); + }); + + test("handles structured tool calls and executes them", async () => { + const executeTool = vi.fn().mockResolvedValue([{ trip_id: 1 }]); + + let callCount = 0; + globalThis.fetch = vi.fn().mockImplementation(() => { + callCount++; + if (callCount === 1) { + return Promise.resolve({ + ok: true, + body: createReadableStream([ + toolCallDelta(0, "call_1", "analytics__query", ""), + toolCallDelta(0, undefined, undefined, '{"query":'), + toolCallDelta(0, undefined, undefined, '"SELECT 1"}'), + sseChunk("[DONE]"), + ]), + }); + } + return Promise.resolve({ + ok: true, + body: createReadableStream([ + textDelta("Here are the results"), + sseChunk("[DONE]"), + ]), + }); + }); + + const adapter = createAdapter(); + const events: AgentEvent[] = []; + + for await (const event of adapter.run( + { + messages: createTestMessages(), + tools: createTestTools(), + threadId: "t1", + }, + { executeTool }, + )) { + events.push(event); + } + + expect(events).toContainEqual({ + type: "tool_call", + callId: "call_1", + name: "analytics.query", + args: { query: "SELECT 1" }, + }); + + expect(executeTool).toHaveBeenCalledWith("analytics.query", { + query: "SELECT 1", + }); + + expect(events).toContainEqual( + expect.objectContaining({ + type: "tool_result", + callId: "call_1", + result: [{ trip_id: 1 }], + }), + ); + + expect(events).toContainEqual({ + type: "message_delta", + content: "Here are the results", + }); + + // authenticate() called once per streamCompletion + expect(mockAuthenticate).toHaveBeenCalledTimes(2); + }); + + test("respects maxSteps limit", async () => { + globalThis.fetch = vi.fn().mockImplementation(() => + Promise.resolve({ + ok: true, + body: createReadableStream([ + toolCallDelta( + 0, + "call_loop", + "analytics__query", + '{"query":"SELECT 1"}', + ), + sseChunk("[DONE]"), + ]), + }), + ); + + const adapter = createAdapter({ maxSteps: 2 }); + const events: AgentEvent[] = []; + + for await (const event of adapter.run( + { + messages: createTestMessages(), + tools: createTestTools(), + threadId: "t1", + }, + { executeTool: vi.fn().mockResolvedValue("ok") }, + )) { + events.push(event); + } + + expect(globalThis.fetch).toHaveBeenCalledTimes(2); + }); + + test("sends correct request to endpoint URL", async () => { + globalThis.fetch = mockFetch([textDelta("Hi"), sseChunk("[DONE]")]); + + const adapter = createAdapter(); + + for await (const _ of adapter.run( + { + messages: createTestMessages(), + tools: createTestTools(), + threadId: "t1", + }, + { executeTool: vi.fn() }, + )) { + // drain + } + + const [url, init] = (globalThis.fetch as any).mock.calls[0]; + expect(url).toBe( + "https://test.databricks.com/serving-endpoints/my-endpoint/invocations", + ); + + const body = JSON.parse(init.body); + expect(body.stream).toBe(true); + expect(body.tools).toHaveLength(1); + expect(body.tools[0].function.name).toBe("analytics__query"); + expect(body.messages[0]).toEqual({ + role: "user", + content: "Hello", + }); + }); + + test("throws on non-ok response", async () => { + globalThis.fetch = vi.fn().mockResolvedValue({ + ok: false, + status: 401, + text: () => Promise.resolve("Unauthorized"), + }); + + const adapter = createAdapter(); + + await expect(async () => { + for await (const _ of adapter.run( + { messages: createTestMessages(), tools: [], threadId: "t1" }, + { executeTool: vi.fn() }, + )) { + // drain + } + }).rejects.toThrow("Databricks API error (401): Unauthorized"); + }); +}); + +describe("DatabricksAdapter.fromServingEndpoint", () => { + test("routes tool-free chat through apiClient.request with a streaming payload", async () => { + const apiClient = { + request: vi.fn().mockResolvedValue({ + contents: createReadableStream([textDelta("Hi"), sseChunk("[DONE]")]), + }), + }; + + const adapter = await DatabricksAdapter.fromServingEndpoint({ + workspaceClient: { apiClient }, + endpointName: "my-model", + }); + + for await (const _ of adapter.run( + { messages: createTestMessages(), tools: [], threadId: "t1" }, + { executeTool: vi.fn() }, + )) { + // drain + } + + expect(apiClient.request).toHaveBeenCalledTimes(1); + const [requestArgs] = apiClient.request.mock.calls[0]; + expect(requestArgs.path).toBe("/serving-endpoints/my-model/invocations"); + expect(requestArgs.method).toBe("POST"); + expect(requestArgs.raw).toBe(true); + expect(requestArgs.payload.stream).toBe(true); + // Auth + url encoding are the connector's (and the SDK's) concerns — the + // adapter no longer reaches into the workspace config. + }); + + test("URL-encodes endpoint names with special characters", async () => { + const apiClient = { + request: vi.fn().mockResolvedValue({ + contents: createReadableStream([textDelta("Hi"), sseChunk("[DONE]")]), + }), + }; + + const adapter = await DatabricksAdapter.fromServingEndpoint({ + workspaceClient: { apiClient }, + endpointName: "my model/with spaces", + }); + + for await (const _ of adapter.run( + { messages: createTestMessages(), tools: [], threadId: "t1" }, + { executeTool: vi.fn() }, + )) { + // drain + } + + const [requestArgs] = apiClient.request.mock.calls[0]; + expect(requestArgs.path).toBe( + "/serving-endpoints/my%20model%2Fwith%20spaces/invocations", + ); + }); +}); + +describe("DatabricksAdapter.fromModelServing", () => { + const originalEnv = process.env; + + beforeEach(() => { + process.env = { ...originalEnv }; + }); + + afterEach(() => { + process.env = originalEnv; + }); + + test("reads endpoint from DATABRICKS_AGENT_ENDPOINT env var", async () => { + process.env.DATABRICKS_AGENT_ENDPOINT = "my-model"; + + vi.mock("@databricks/sdk-experimental", () => ({ + WorkspaceClient: vi.fn().mockImplementation(() => ({ + apiClient: { request: vi.fn() }, + })), + })); + + const adapter = await DatabricksAdapter.fromModelServing(); + expect(adapter).toBeInstanceOf(DatabricksAdapter); + }); + + test("throws when no endpoint name and no env var", async () => { + delete process.env.DATABRICKS_AGENT_ENDPOINT; + + await expect(DatabricksAdapter.fromModelServing()).rejects.toThrow( + "No endpoint name provided", + ); + }); + + test("explicit endpoint name takes precedence over env var", async () => { + process.env.DATABRICKS_AGENT_ENDPOINT = "env-model"; + + const apiClient = { + request: vi.fn().mockResolvedValue({ + contents: createReadableStream([textDelta("Hi"), sseChunk("[DONE]")]), + }), + }; + + const adapter = await DatabricksAdapter.fromModelServing("explicit-model", { + workspaceClient: { apiClient }, + }); + + expect(adapter).toBeInstanceOf(DatabricksAdapter); + + for await (const _ of adapter.run( + { messages: createTestMessages(), tools: [], threadId: "t1" }, + { executeTool: vi.fn() }, + )) { + // drain + } + + const [requestArgs] = apiClient.request.mock.calls[0]; + expect(requestArgs.path).toBe( + "/serving-endpoints/explicit-model/invocations", + ); + }); +}); + +describe("parseTextToolCalls", () => { + test("parses Llama JSON format", () => { + const text = + '[{"name": "analytics.query", "parameters": {"query": "SELECT 1"}}]'; + const result = parseTextToolCalls(text); + + expect(result).toEqual([ + { name: "analytics.query", args: { query: "SELECT 1" } }, + ]); + }); + + test("parses multiple Llama JSON tool calls", () => { + const text = + '[{"name": "analytics.query", "parameters": {"query": "SELECT 1"}}, {"name": "files.uploads.list", "parameters": {}}]'; + const result = parseTextToolCalls(text); + + expect(result).toHaveLength(2); + expect(result[0].name).toBe("analytics.query"); + expect(result[1].name).toBe("files.uploads.list"); + }); + + test("parses Python-style tool calls", () => { + const text = + "[analytics.query(query='SELECT * FROM trips ORDER BY date DESC LIMIT 10')]"; + const result = parseTextToolCalls(text); + + expect(result).toEqual([ + { + name: "analytics.query", + args: { + query: "SELECT * FROM trips ORDER BY date DESC LIMIT 10", + }, + }, + ]); + }); + + test("parses Python-style with multiple args", () => { + const text = + "[files.uploads.read(path='/data/file.csv', encoding='utf-8')]"; + const result = parseTextToolCalls(text); + + expect(result).toEqual([ + { + name: "files.uploads.read", + args: { path: "/data/file.csv", encoding: "utf-8" }, + }, + ]); + }); + + test("returns empty array for plain text", () => { + expect(parseTextToolCalls("Hello, how can I help?")).toEqual([]); + expect(parseTextToolCalls("")).toEqual([]); + expect(parseTextToolCalls("The answer is 42")).toEqual([]); + }); + + test("handles Llama format with 'arguments' key", () => { + const text = + '[{"name": "lakebase.query", "arguments": {"text": "SELECT 1"}}]'; + const result = parseTextToolCalls(text); + + expect(result).toEqual([ + { name: "lakebase.query", args: { text: "SELECT 1" } }, + ]); + }); +}); diff --git a/packages/appkit/src/agents/tests/langchain.test.ts b/packages/appkit/src/agents/tests/langchain.test.ts new file mode 100644 index 00000000..3bf1a471 --- /dev/null +++ b/packages/appkit/src/agents/tests/langchain.test.ts @@ -0,0 +1,366 @@ +import type { AgentEvent, AgentToolDefinition, Message } from "shared"; +import { describe, expect, test, vi } from "vitest"; +import { LangChainAdapter } from "../langchain"; + +vi.mock("@langchain/core/tools", () => ({ + DynamicStructuredTool: vi.fn().mockImplementation((config: any) => ({ + name: config.name, + description: config.description, + schema: config.schema, + func: config.func, + })), +})); + +vi.mock("zod", () => { + const createChainable = (base: Record = {}): any => { + const obj: any = { ...base }; + obj.optional = () => createChainable({ ...obj, _optional: true }); + obj.describe = (d: string) => createChainable({ ...obj, _description: d }); + return obj; + }; + + return { + z: { + object: (shape: any) => createChainable({ type: "object", shape }), + string: () => createChainable({ type: "string" }), + number: () => createChainable({ type: "number" }), + boolean: () => createChainable({ type: "boolean" }), + array: (item: any) => createChainable({ type: "array", item }), + enum: (vals: any) => createChainable({ type: "enum", values: vals }), + any: () => createChainable({ type: "any" }), + null: () => createChainable({ type: "null" }), + }, + }; +}); + +function createTestMessages(): Message[] { + return [{ id: "1", role: "user", content: "Hello", createdAt: new Date() }]; +} + +function createTestTools(): AgentToolDefinition[] { + return [ + { + name: "lakebase.query", + description: "Run SQL", + parameters: { + type: "object", + properties: { + text: { type: "string", description: "SQL query" }, + values: { type: "array", items: {} }, + }, + required: ["text"], + }, + }, + ]; +} + +describe("LangChainAdapter", () => { + test("yields status running on start and maps chat_model_stream", async () => { + async function* mockStreamEvents() { + yield { + event: "on_chat_model_stream", + data: { chunk: { content: "Hello" } }, + }; + yield { + event: "on_chat_model_stream", + data: { chunk: { content: " world" } }, + }; + } + + const mockRunnable = { + bindTools: vi.fn().mockReturnValue({ + streamEvents: vi.fn().mockResolvedValue(mockStreamEvents()), + }), + }; + + const adapter = new LangChainAdapter({ runnable: mockRunnable }); + const events: AgentEvent[] = []; + + for await (const event of adapter.run( + { + messages: createTestMessages(), + tools: createTestTools(), + threadId: "t1", + }, + { executeTool: vi.fn() }, + )) { + events.push(event); + } + + expect(events[0]).toEqual({ type: "status", status: "running" }); + expect(events[1]).toEqual({ type: "message_delta", content: "Hello" }); + expect(events[2]).toEqual({ type: "message_delta", content: " world" }); + }); + + test("maps on_tool_end events to tool_result", async () => { + async function* mockStreamEvents() { + yield { + event: "on_tool_end", + run_id: "run-1", + data: { output: { content: "42 rows" } }, + }; + } + + const mockRunnable = { + bindTools: vi.fn().mockReturnValue({ + streamEvents: vi.fn().mockResolvedValue(mockStreamEvents()), + }), + }; + + const adapter = new LangChainAdapter({ runnable: mockRunnable }); + const events: AgentEvent[] = []; + + for await (const event of adapter.run( + { + messages: createTestMessages(), + tools: createTestTools(), + threadId: "t1", + }, + { executeTool: vi.fn() }, + )) { + events.push(event); + } + + expect(events).toContainEqual({ + type: "tool_result", + callId: "run-1", + result: "42 rows", + }); + }); + + test("calls bindTools when tools are provided", async () => { + const streamEvents = vi.fn().mockResolvedValue((async function* () {})()); + const bindTools = vi.fn().mockReturnValue({ streamEvents }); + + const adapter = new LangChainAdapter({ + runnable: { bindTools }, + }); + + for await (const _ of adapter.run( + { + messages: createTestMessages(), + tools: createTestTools(), + threadId: "t1", + }, + { executeTool: vi.fn() }, + )) { + // drain + } + + expect(bindTools).toHaveBeenCalledTimes(1); + expect(bindTools.mock.calls[0][0]).toHaveLength(1); + expect(bindTools.mock.calls[0][0][0].name).toBe("lakebase.query"); + }); + + test("does not call bindTools when no tools provided", async () => { + const streamEvents = vi.fn().mockResolvedValue((async function* () {})()); + const bindTools = vi.fn().mockReturnValue({ streamEvents }); + + const adapter = new LangChainAdapter({ + runnable: { bindTools, streamEvents }, + }); + + for await (const _ of adapter.run( + { + messages: createTestMessages(), + tools: [], + threadId: "t1", + }, + { executeTool: vi.fn() }, + )) { + // drain + } + + expect(bindTools).not.toHaveBeenCalled(); + }); + + test("callId on tool_call and tool_result match across on_tool_start / on_tool_end", async () => { + // Simulates the realistic LangChain stream: first the chat-model emits a + // tool_call_chunk carrying the model-provided id and name; then + // on_tool_start fires with LangChain's own run_id; then on_tool_end + // fires with the same run_id. The adapter must yield tool_call with + // callId = model id, then tool_result with the SAME callId. + async function* mockStreamEvents() { + yield { + event: "on_chat_model_stream", + data: { + chunk: { + content: "", + tool_call_chunks: [ + { + index: 0, + id: "call_abc123", + name: "lakebase.query", + args: '{"text":"SELECT 1"}', + }, + ], + }, + }, + }; + yield { + event: "on_tool_start", + name: "lakebase.query", + run_id: "run-uuid-xyz", + data: { input: { text: "SELECT 1" } }, + }; + yield { + event: "on_tool_end", + name: "lakebase.query", + run_id: "run-uuid-xyz", + data: { output: { content: "42 rows" } }, + }; + } + + const mockRunnable = { + bindTools: vi.fn().mockReturnValue({ + streamEvents: vi.fn().mockResolvedValue(mockStreamEvents()), + }), + }; + + const adapter = new LangChainAdapter({ runnable: mockRunnable }); + const events: AgentEvent[] = []; + + for await (const event of adapter.run( + { + messages: createTestMessages(), + tools: createTestTools(), + threadId: "t1", + }, + { executeTool: vi.fn() }, + )) { + events.push(event); + } + + const toolCall = events.find((e) => e.type === "tool_call"); + const toolResult = events.find((e) => e.type === "tool_result"); + expect(toolCall).toBeDefined(); + expect(toolResult).toBeDefined(); + // Critical invariant: same callId on the pair, using the model-provided id + // (not LangChain's internal run_id). + expect(toolCall && "callId" in toolCall ? toolCall.callId : undefined).toBe( + "call_abc123", + ); + expect( + toolResult && "callId" in toolResult ? toolResult.callId : undefined, + ).toBe("call_abc123"); + }); + + test("same tool invoked twice in one turn gets unique callIds when model omits id", async () => { + async function* mockStreamEvents() { + yield { + event: "on_chat_model_stream", + data: { + chunk: { + tool_call_chunks: [ + // Two calls to the same tool, no model-provided id on either. + { index: 0, name: "search", args: '{"q":"a"}' }, + { index: 1, name: "search", args: '{"q":"b"}' }, + ], + }, + }, + }; + yield { + event: "on_tool_start", + name: "search", + run_id: "run-1", + data: { input: { q: "a" } }, + }; + yield { + event: "on_tool_end", + name: "search", + run_id: "run-1", + data: { output: { content: "A-result" } }, + }; + yield { + event: "on_tool_start", + name: "search", + run_id: "run-2", + data: { input: { q: "b" } }, + }; + yield { + event: "on_tool_end", + name: "search", + run_id: "run-2", + data: { output: { content: "B-result" } }, + }; + } + + const mockRunnable = { + bindTools: vi.fn().mockReturnValue({ + streamEvents: vi.fn().mockResolvedValue(mockStreamEvents()), + }), + }; + + const adapter = new LangChainAdapter({ runnable: mockRunnable }); + const events: AgentEvent[] = []; + for await (const ev of adapter.run( + { + messages: createTestMessages(), + tools: createTestTools(), + threadId: "t1", + }, + { executeTool: vi.fn() }, + )) { + events.push(ev); + } + + const calls = events.filter( + (e): e is AgentEvent & { type: "tool_call"; callId: string } => + e.type === "tool_call", + ); + const results = events.filter( + (e): e is AgentEvent & { type: "tool_result"; callId: string } => + e.type === "tool_result", + ); + expect(calls).toHaveLength(2); + expect(results).toHaveLength(2); + expect(calls[0].callId).not.toBe(calls[1].callId); + // Each result correlates with its call. + expect(results[0].callId).toBe(calls[0].callId); + expect(results[1].callId).toBe(calls[1].callId); + }); + + test("falls back to run_id as callId when on_tool_start has no accumulated match", async () => { + async function* mockStreamEvents() { + yield { + event: "on_tool_start", + name: "orphan_tool", + run_id: "run-orphan", + data: { input: { x: 1 } }, + }; + yield { + event: "on_tool_end", + run_id: "run-orphan", + data: { output: { content: "ok" } }, + }; + } + + const mockRunnable = { + bindTools: vi.fn().mockReturnValue({ + streamEvents: vi.fn().mockResolvedValue(mockStreamEvents()), + }), + }; + + const adapter = new LangChainAdapter({ runnable: mockRunnable }); + const events: AgentEvent[] = []; + for await (const ev of adapter.run( + { + messages: createTestMessages(), + tools: createTestTools(), + threadId: "t1", + }, + { executeTool: vi.fn() }, + )) { + events.push(ev); + } + + const toolCall = events.find((e) => e.type === "tool_call"); + const toolResult = events.find((e) => e.type === "tool_result"); + expect(toolCall && "callId" in toolCall ? toolCall.callId : undefined).toBe( + "run-orphan", + ); + expect( + toolResult && "callId" in toolResult ? toolResult.callId : undefined, + ).toBe("run-orphan"); + }); +}); diff --git a/packages/appkit/src/agents/tests/vercel-ai.test.ts b/packages/appkit/src/agents/tests/vercel-ai.test.ts new file mode 100644 index 00000000..7280c9aa --- /dev/null +++ b/packages/appkit/src/agents/tests/vercel-ai.test.ts @@ -0,0 +1,190 @@ +import type { AgentEvent, AgentToolDefinition, Message } from "shared"; +import { describe, expect, test, vi } from "vitest"; +import { VercelAIAdapter } from "../vercel-ai"; + +vi.mock("ai", () => ({ + streamText: vi.fn(), + jsonSchema: vi.fn((schema: any) => schema), +})); + +function createTestMessages(): Message[] { + return [ + { + id: "1", + role: "user", + content: "Hello", + createdAt: new Date(), + }, + ]; +} + +function createTestTools(): AgentToolDefinition[] { + return [ + { + name: "analytics.query", + description: "Run SQL", + parameters: { + type: "object", + properties: { + query: { type: "string" }, + }, + required: ["query"], + }, + }, + ]; +} + +describe("VercelAIAdapter", () => { + test("yields status running on start", async () => { + const { streamText } = await import("ai"); + + async function* mockStream() { + yield { type: "text-delta", textDelta: "Hi" }; + } + + (streamText as any).mockReturnValue({ + fullStream: mockStream(), + }); + + const adapter = new VercelAIAdapter({ model: {} }); + const events: AgentEvent[] = []; + + const stream = adapter.run( + { + messages: createTestMessages(), + tools: createTestTools(), + threadId: "t1", + }, + { + executeTool: vi.fn(), + }, + ); + + for await (const event of stream) { + events.push(event); + } + + expect(events[0]).toEqual({ type: "status", status: "running" }); + expect(events[1]).toEqual({ type: "message_delta", content: "Hi" }); + }); + + test("maps tool-call and tool-result events", async () => { + const { streamText } = await import("ai"); + + async function* mockStream() { + yield { + type: "tool-call", + toolCallId: "c1", + toolName: "analytics.query", + args: { query: "SELECT 1" }, + }; + yield { + type: "tool-result", + toolCallId: "c1", + result: [{ value: 1 }], + }; + } + + (streamText as any).mockReturnValue({ + fullStream: mockStream(), + }); + + const adapter = new VercelAIAdapter({ model: {} }); + const events: AgentEvent[] = []; + + for await (const event of adapter.run( + { + messages: createTestMessages(), + tools: createTestTools(), + threadId: "t1", + }, + { executeTool: vi.fn() }, + )) { + events.push(event); + } + + expect(events).toContainEqual({ + type: "tool_call", + callId: "c1", + name: "analytics.query", + args: { query: "SELECT 1" }, + }); + + expect(events).toContainEqual({ + type: "tool_result", + callId: "c1", + result: [{ value: 1 }], + }); + }); + + test("maps error events", async () => { + const { streamText } = await import("ai"); + + async function* mockStream() { + yield { type: "error", error: "API rate limited" }; + } + + (streamText as any).mockReturnValue({ + fullStream: mockStream(), + }); + + const adapter = new VercelAIAdapter({ model: {} }); + const events: AgentEvent[] = []; + + for await (const event of adapter.run( + { + messages: createTestMessages(), + tools: [], + threadId: "t1", + }, + { executeTool: vi.fn() }, + )) { + events.push(event); + } + + expect(events).toContainEqual({ + type: "status", + status: "error", + error: "API rate limited", + }); + }); + + test("builds tools with execute functions that delegate to executeTool", async () => { + const { streamText } = await import("ai"); + + let capturedTools: Record = {}; + + (streamText as any).mockImplementation((opts: any) => { + capturedTools = opts.tools; + return { + fullStream: (async function* () {})(), + }; + }); + + const executeTool = vi.fn().mockResolvedValue({ count: 42 }); + const adapter = new VercelAIAdapter({ model: {} }); + + // Consume the stream to trigger streamText + for await (const _ of adapter.run( + { + messages: createTestMessages(), + tools: createTestTools(), + threadId: "t1", + }, + { executeTool }, + )) { + // drain + } + + expect(capturedTools["analytics.query"]).toBeDefined(); + expect(capturedTools["analytics.query"].description).toBe("Run SQL"); + + const result = await capturedTools["analytics.query"].execute({ + query: "SELECT 1", + }); + expect(executeTool).toHaveBeenCalledWith("analytics.query", { + query: "SELECT 1", + }); + expect(result).toEqual({ count: 42 }); + }); +}); diff --git a/packages/appkit/src/agents/vercel-ai.ts b/packages/appkit/src/agents/vercel-ai.ts new file mode 100644 index 00000000..ea77771a --- /dev/null +++ b/packages/appkit/src/agents/vercel-ai.ts @@ -0,0 +1,138 @@ +import type { + AgentAdapter, + AgentEvent, + AgentInput, + AgentRunContext, + AgentToolDefinition, +} from "shared"; + +/** + * Adapter bridging the Vercel AI SDK (`ai` package) to the AppKit agent protocol. + * + * Converts `AgentToolDefinition[]` to Vercel AI tool format and maps + * `streamText().fullStream` events to `AgentEvent`. + * + * Requires `ai` as an optional peer dependency. + * + * @example + * ```ts + * import { createApp, createAgent, agents } from "@databricks/appkit"; + * import { VercelAIAdapter } from "@databricks/appkit/agents/vercel-ai"; + * import { openai } from "@ai-sdk/openai"; + * + * await createApp({ + * plugins: [ + * agents({ + * agents: { + * assistant: createAgent({ + * instructions: "You are a helpful assistant.", + * model: new VercelAIAdapter({ model: openai("gpt-4o") }), + * }), + * }, + * }), + * ], + * }); + * ``` + */ +export class VercelAIAdapter implements AgentAdapter { + private model: any; + + constructor(options: { model: any }) { + this.model = options.model; + } + + async *run( + input: AgentInput, + context: AgentRunContext, + ): AsyncGenerator { + const { streamText } = await import("ai"); + const { jsonSchema } = await import("ai"); + + const tools = this.buildTools(input.tools, context, jsonSchema); + + const messages = input.messages.map((m) => ({ + role: m.role as "user" | "assistant" | "system", + content: m.content, + })); + + yield { type: "status", status: "running" }; + + const result = streamText({ + model: this.model, + messages, + tools, + maxSteps: 10 as any, + abortSignal: input.signal, + } as any); + + for await (const part of (result as any).fullStream) { + if (context.signal?.aborted) break; + + switch (part.type) { + case "text-delta": + yield { type: "message_delta", content: part.textDelta }; + break; + + case "tool-call": + yield { + type: "tool_call", + callId: part.toolCallId, + name: part.toolName, + args: part.args, + }; + break; + + case "tool-result": + yield { + type: "tool_result", + callId: part.toolCallId, + result: part.result, + }; + break; + + case "reasoning": + if (part.textDelta) { + yield { type: "thinking", content: part.textDelta }; + } + break; + + case "error": + yield { + type: "status", + status: "error", + error: String(part.error), + }; + break; + } + } + } + + private buildTools( + definitions: AgentToolDefinition[], + context: AgentRunContext, + jsonSchema: any, + ): Record { + const tools: Record = {}; + + for (const def of definitions) { + tools[def.name] = { + description: def.description, + parameters: jsonSchema(def.parameters), + execute: async (args: unknown) => { + try { + return await context.executeTool(def.name, args); + } catch (error) { + return { + error: + error instanceof Error + ? error.message + : "Tool execution failed", + }; + } + }, + }; + } + + return tools; + } +} diff --git a/packages/appkit/tsdown.config.ts b/packages/appkit/tsdown.config.ts index 97698714..0e6a4b6b 100644 --- a/packages/appkit/tsdown.config.ts +++ b/packages/appkit/tsdown.config.ts @@ -4,7 +4,12 @@ export default defineConfig([ { publint: true, name: "@databricks/appkit", - entry: "src/index.ts", + entry: [ + "src/index.ts", + "src/agents/vercel-ai.ts", + "src/agents/langchain.ts", + "src/agents/databricks.ts", + ], outDir: "dist", hash: false, format: "esm", diff --git a/packages/shared/src/agent.ts b/packages/shared/src/agent.ts new file mode 100644 index 00000000..c4f76b29 --- /dev/null +++ b/packages/shared/src/agent.ts @@ -0,0 +1,212 @@ +import type { JSONSchema7 } from "json-schema"; + +// --------------------------------------------------------------------------- +// Tool definitions +// --------------------------------------------------------------------------- + +export interface ToolAnnotations { + readOnly?: boolean; + destructive?: boolean; + idempotent?: boolean; + requiresUserContext?: boolean; +} + +export interface AgentToolDefinition { + name: string; + description: string; + parameters: JSONSchema7; + annotations?: ToolAnnotations; +} + +export interface ToolProvider { + getAgentTools(): AgentToolDefinition[]; + executeAgentTool( + name: string, + args: unknown, + signal?: AbortSignal, + ): Promise; +} + +// --------------------------------------------------------------------------- +// Messages & threads +// --------------------------------------------------------------------------- + +export interface Message { + id: string; + role: "user" | "assistant" | "system" | "tool"; + content: string; + toolCallId?: string; + toolCalls?: ToolCall[]; + createdAt: Date; +} + +export interface ToolCall { + id: string; + name: string; + args: unknown; +} + +export interface Thread { + id: string; + userId: string; + messages: Message[]; + createdAt: Date; + updatedAt: Date; +} + +// --------------------------------------------------------------------------- +// Thread store +// --------------------------------------------------------------------------- + +export interface ThreadStore { + create(userId: string): Promise; + get(threadId: string, userId: string): Promise; + list(userId: string): Promise; + addMessage(threadId: string, userId: string, message: Message): Promise; + delete(threadId: string, userId: string): Promise; +} + +// --------------------------------------------------------------------------- +// Agent events (SSE protocol) +// --------------------------------------------------------------------------- + +export type AgentEvent = + | { type: "message_delta"; content: string } + | { type: "message"; content: string } + | { type: "tool_call"; callId: string; name: string; args: unknown } + | { + type: "tool_result"; + callId: string; + result: unknown; + error?: string; + } + | { type: "thinking"; content: string } + | { + type: "status"; + status: "running" | "waiting" | "complete" | "error"; + error?: string; + } + | { type: "metadata"; data: Record }; + +// --------------------------------------------------------------------------- +// Responses API types (OpenAI-compatible wire format for HTTP boundary) +// Self-contained — no openai package dependency. +// --------------------------------------------------------------------------- + +export interface OutputTextContent { + type: "output_text"; + text: string; +} + +export interface ResponseOutputMessage { + type: "message"; + id: string; + status: "in_progress" | "completed"; + role: "assistant"; + content: OutputTextContent[]; +} + +export interface ResponseFunctionToolCall { + type: "function_call"; + id: string; + call_id: string; + name: string; + arguments: string; +} + +export interface ResponseFunctionCallOutput { + type: "function_call_output"; + id: string; + call_id: string; + output: string; +} + +export type ResponseOutputItem = + | ResponseOutputMessage + | ResponseFunctionToolCall + | ResponseFunctionCallOutput; + +export interface ResponseOutputItemAddedEvent { + type: "response.output_item.added"; + output_index: number; + item: ResponseOutputItem; + sequence_number: number; +} + +export interface ResponseOutputItemDoneEvent { + type: "response.output_item.done"; + output_index: number; + item: ResponseOutputItem; + sequence_number: number; +} + +export interface ResponseTextDeltaEvent { + type: "response.output_text.delta"; + item_id: string; + output_index: number; + content_index: number; + delta: string; + sequence_number: number; +} + +export interface ResponseCompletedEvent { + type: "response.completed"; + sequence_number: number; + response: Record; +} + +export interface ResponseErrorEvent { + type: "error"; + error: string; + sequence_number: number; +} + +export interface ResponseFailedEvent { + type: "response.failed"; + sequence_number: number; +} + +export interface AppKitThinkingEvent { + type: "appkit.thinking"; + content: string; + sequence_number: number; +} + +export interface AppKitMetadataEvent { + type: "appkit.metadata"; + data: Record; + sequence_number: number; +} + +export type ResponseStreamEvent = + | ResponseOutputItemAddedEvent + | ResponseOutputItemDoneEvent + | ResponseTextDeltaEvent + | ResponseCompletedEvent + | ResponseErrorEvent + | ResponseFailedEvent + | AppKitThinkingEvent + | AppKitMetadataEvent; + +// --------------------------------------------------------------------------- +// Adapter contract +// --------------------------------------------------------------------------- + +export interface AgentInput { + messages: Message[]; + tools: AgentToolDefinition[]; + threadId: string; + signal?: AbortSignal; +} + +export interface AgentRunContext { + executeTool: (name: string, args: unknown) => Promise; + signal?: AbortSignal; +} + +export interface AgentAdapter { + run( + input: AgentInput, + context: AgentRunContext, + ): AsyncGenerator; +} diff --git a/packages/shared/src/index.ts b/packages/shared/src/index.ts index 627d70d6..9829729a 100644 --- a/packages/shared/src/index.ts +++ b/packages/shared/src/index.ts @@ -1,3 +1,4 @@ +export * from "./agent"; export * from "./cache"; export * from "./execute"; export * from "./genie"; diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index 9ca11b81..16079b1d 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -326,7 +326,16 @@ importers: ws: specifier: 8.18.3 version: 8.18.3(bufferutil@4.0.9) + zod: + specifier: ^4.0.0 + version: 4.1.13 devDependencies: + '@ai-sdk/openai': + specifier: 4.0.0-beta.27 + version: 4.0.0-beta.27(zod@4.1.13) + '@langchain/core': + specifier: ^1.1.39 + version: 1.1.39(@opentelemetry/api@1.9.0)(@opentelemetry/exporter-trace-otlp-proto@0.208.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.6.0(@opentelemetry/api@1.9.0))(ws@8.18.3(bufferutil@4.0.9)) '@types/express': specifier: 4.17.25 version: 4.17.25 @@ -342,6 +351,9 @@ importers: '@vitejs/plugin-react': specifier: 5.1.1 version: 5.1.1(rolldown-vite@7.1.14(@types/node@25.2.3)(esbuild@0.25.10)(jiti@2.6.1)(terser@5.44.1)(tsx@4.20.6)(yaml@2.8.2)) + ai: + specifier: 7.0.0-beta.76 + version: 7.0.0-beta.76(zod@4.1.13) packages/appkit-ui: dependencies: @@ -561,16 +573,38 @@ packages: peerDependencies: zod: ^3.25.76 || ^4.1.8 + '@ai-sdk/gateway@4.0.0-beta.43': + resolution: {integrity: sha512-EGQe4If6jt1ZhENmwZn8UAeHbEc7DRiK7ff7dwgfNthwso2hdzLbgXzuTO+W/op+oDFQK1pKiAz5RrPsVQWiew==} + engines: {node: '>=18'} + peerDependencies: + zod: ^3.25.76 || ^4.1.8 + + '@ai-sdk/openai@4.0.0-beta.27': + resolution: {integrity: sha512-7DpXCE4pcc4pVzuEc0whMrQN6Whi14Qsqjx97mLPGjpS6Lff48Zcn2322IFpWuhVJ10hIM1kEZNxUYvXt1O/yg==} + engines: {node: '>=18'} + peerDependencies: + zod: ^3.25.76 || ^4.1.8 + '@ai-sdk/provider-utils@3.0.19': resolution: {integrity: sha512-W41Wc9/jbUVXVwCN/7bWa4IKe8MtxO3EyA0Hfhx6grnmiYlCvpI8neSYWFE0zScXJkgA/YK3BRybzgyiXuu6JA==} engines: {node: '>=18'} peerDependencies: zod: ^3.25.76 || ^4.1.8 + '@ai-sdk/provider-utils@5.0.0-beta.16': + resolution: {integrity: sha512-CyMV5go6libw5WaZ4m7nO0uRLTENxbIODiDrTXJNwxYIBR8p5aCGaxt9oj3prbvNkTt0Srh/Gyw+n2pR9hQ5Pg==} + engines: {node: '>=18'} + peerDependencies: + zod: ^3.25.76 || ^4.1.8 + '@ai-sdk/provider@2.0.0': resolution: {integrity: sha512-6o7Y2SeO9vFKB8lArHXehNuusnpddKPk7xqL7T2/b+OvXMRIXUO1rR4wcv1hAFUAT9avGZshty3Wlua/XA7TvA==} engines: {node: '>=18'} + '@ai-sdk/provider@4.0.0-beta.10': + resolution: {integrity: sha512-E2O/LCWjqOxAUfpykQR4xLmcGXySIu6L+wYJjav2xiHu38otPq0qIexgH9ZKulBvBWkrtJ3fxz0kzHDlCBkwng==} + engines: {node: '>=18'} + '@ai-sdk/react@2.0.115': resolution: {integrity: sha512-Etu7gWSEi2dmXss1PoR5CAZGwGShXsF9+Pon1eRO6EmatjYaBMhq1CfHPyYhGzWrint8jJIK2VaAhiMef29qZw==} engines: {node: '>=18'} @@ -1520,6 +1554,9 @@ packages: resolution: {integrity: sha512-hAs5PPKPCQ3/Nha+1fo4A4/gL85fIfxZwHPehsjCJ+BhQH2/yw6/xReuaPA/RfNQr6iz1PcD7BZcE3ctyyl3EA==} cpu: [x64] + '@cfworker/json-schema@4.1.1': + resolution: {integrity: sha512-gAmrUZSGtKc3AiBL71iNWxDsyUC5uMaKKGdvzYsBoTW/xi42JQHl7eKV2OYzCUqvc+D2RCcf7EXY2iCyFIk6og==} + '@chevrotain/cst-dts-gen@11.0.3': resolution: {integrity: sha512-BvIKpRLeS/8UbfxXxgC33xOumsacaeCKAjAeLyOn7Pcp95HiRbrpl14S+9vaZLolnbssPIUuiUd8IvgkRyt6NQ==} @@ -2646,6 +2683,10 @@ packages: '@kwsites/file-exists@1.1.1': resolution: {integrity: sha512-m9/5YGR18lIwxSFDwfE3oA7bWuq9kdau6ugN4H2rJeyhFQZcG9AgSHkQtSD15a8WvTgfz9aikZMrKPHvbpqFiw==} + '@langchain/core@1.1.39': + resolution: {integrity: sha512-DP9c7TREy6iA7HnywstmUAsNyJNYTFpRg2yBfQ+6H0l1HnvQzei9GsQ36GeOLxgRaD3vm9K8urCcawSC7yQpCw==} + engines: {node: '>=20'} + '@leichtgewicht/ip-codec@2.0.5': resolution: {integrity: sha512-Vo+PSpZG2/fmgmiNzYK9qWRh8h/CHrwD0mo1h1DzL4yzHNSfWYujGTYsWGreD000gcgmZ7K4Ys6Tx9TxtsKdDw==} @@ -5166,6 +5207,10 @@ packages: resolution: {integrity: sha512-fnYhv671l+eTTp48gB4zEsTW/YtRgRPnkI2nT7x6qw5rkI1Lq2hTmQIpHPgyThI0znLK+vX2n9XxKdXZ7BUbbw==} engines: {node: '>= 20'} + '@vercel/oidc@3.2.0': + resolution: {integrity: sha512-UycprH3T6n3jH0k44NHMa7pnFHGu/N05MjojYr+Mc6I7obkoLIJujSWwin1pCvdy/eOxrI/l3uDLQsmcrOb4ug==} + engines: {node: '>= 20'} + '@vitejs/plugin-react@5.0.4': resolution: {integrity: sha512-La0KD0vGkVkSk6K+piWDKRUyg8Rl5iAIKRMH0vMJI0Eg47bq1eOxmoObAaQG37WMW9MSyk7Cs8EIWwJC1PtzKA==} engines: {node: ^20.19.0 || >=22.12.0} @@ -5321,6 +5366,12 @@ packages: peerDependencies: zod: ^3.25.76 || ^4.1.8 + ai@7.0.0-beta.76: + resolution: {integrity: sha512-yJMCqsnfUi8jnFOvxmXhjMZd0YVSCLk1E5PZpqmGWynvo3uADt1XADYYYRcj0I9Q2wsL4HbCLAKe01I8aswzJg==} + engines: {node: '>=18'} + peerDependencies: + zod: ^3.25.76 || ^4.1.8 + ajv-formats@2.1.1: resolution: {integrity: sha512-Wx0Kx52hxE7C18hkMEggYlEifqWZtYaRgouJor+WMdPnQyEK13vgEWyVNup7SoeeoLMsr4kf5h6dOW11I15MUA==} peerDependencies: @@ -6453,6 +6504,10 @@ packages: supports-color: optional: true + decamelize@1.2.0: + resolution: {integrity: sha512-z2S+W9X73hAUUki+N+9Za2lBlun89zigOyGrsax+KUQ6wKW4ZoWpEYBkGhQjwAjjDCkWxhY0VKEhk8wzY7F5cA==} + engines: {node: '>=0.10.0'} + decimal.js-light@2.5.1: resolution: {integrity: sha512-qIMFpTMZmny+MMIitAB6D7iVPEorVw6YQRWkvarTkT4tBeSLLiHzcwj6q0MmYSFCiVpiqPJTJEYIrpcPzVEIvg==} @@ -8109,6 +8164,9 @@ packages: joi@17.13.3: resolution: {integrity: sha512-otDA4ldcIx+ZXsKHWmp0YizCweVRZG96J10b0FevjfuncLO1oX59THoAmHkNubYJ+9gWsYsp5k8v4ib6oDv1fA==} + js-tiktoken@1.0.21: + resolution: {integrity: sha512-biOj/6M5qdgx5TKjDnFT1ymSpM5tbd3ylwDtrQvFQSu0Z7bBYko2dF+W/aUkXUPuk6IVpRxk/3Q2sHOzGlS36g==} + js-tokens@4.0.0: resolution: {integrity: sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ==} @@ -8238,6 +8296,26 @@ packages: resolution: {integrity: sha512-QJv/h939gDpvT+9SiLVlY7tZC3xB2qK57v0J04Sh9wpMb6MP1q8gB21L3WIo8T5P1MSMg3Ep14L7KkDCFG3y4w==} engines: {node: '>=16.0.0'} + langsmith@0.5.18: + resolution: {integrity: sha512-3zuZUWffTHQ+73EAwnodADtf534VNEZUpXr9jC12qyG8/IQuJET7PRsCpTb9wX2lmBspakwLUpqpj3tNm/0bVA==} + peerDependencies: + '@opentelemetry/api': '*' + '@opentelemetry/exporter-trace-otlp-proto': '*' + '@opentelemetry/sdk-trace-base': '*' + openai: '*' + ws: '>=7' + peerDependenciesMeta: + '@opentelemetry/api': + optional: true + '@opentelemetry/exporter-trace-otlp-proto': + optional: true + '@opentelemetry/sdk-trace-base': + optional: true + openai: + optional: true + ws: + optional: true + latest-version@7.0.0: resolution: {integrity: sha512-KvNT4XqAMzdcL6ka6Tl3i2lYeFDgXNCuIX+xNx6ZMVR1dFq+idXd9FLKNMOIx0t9mJ9/HudyX4oZWXZQ0UJHeg==} engines: {node: '>=14.16'} @@ -8910,6 +8988,10 @@ packages: resolution: {integrity: sha512-2eznPJP8z2BFLX50tf0LuODrpINqP1RVIm/CObbTcBRITQgmC/TjcREF1NeTBzIcR5XO/ukWo+YHOjBbFwIupg==} hasBin: true + mustache@4.2.0: + resolution: {integrity: sha512-71ippSywq5Yb7/tVYyGbkBggbU8H3u5Rz56fH60jGFgr8uHwxs+aSKeqmluIVzM0m0kB7xQjKS6qPfd0b2ZoqQ==} + hasBin: true + mute-stream@2.0.0: resolution: {integrity: sha512-WWdIxpyjEn+FhQJQQv9aQAYlHoNVdzIzUySNV1gHUPDSdZJ3yZn7pAAbQcV7B56Mvu881q9FZV+0Vx2xC44VWA==} engines: {node: ^18.17.0 || >=20.5.0} @@ -11463,6 +11545,10 @@ packages: resolution: {integrity: sha512-pMZTvIkT1d+TFGvDOqodOclx0QWkkgi6Tdoa8gC8ffGAAqz9pzPTZWAybbsHHoED/ztMtkv/VoYTYyShUn81hA==} engines: {node: '>= 0.4.0'} + uuid@10.0.0: + resolution: {integrity: sha512-8XkAphELsDnEGrDxUOHB3RGvXz6TeuYSGEZBOjtTtPm2lwhGBjLgOzLHB63IUWfBpNucQjND6d3AOudO+H3RWQ==} + hasBin: true + uuid@11.1.0: resolution: {integrity: sha512-0/A9rDy9P7cJ+8w1c9WD9V//9Wj15Ce2MPz8Ri6032usz+NfePxx5AcN3bN+r6ZL6jEo066/yNYB3tn4pQEx+A==} hasBin: true @@ -11937,6 +12023,19 @@ snapshots: '@vercel/oidc': 3.0.5 zod: 4.1.13 + '@ai-sdk/gateway@4.0.0-beta.43(zod@4.1.13)': + dependencies: + '@ai-sdk/provider': 4.0.0-beta.10 + '@ai-sdk/provider-utils': 5.0.0-beta.16(zod@4.1.13) + '@vercel/oidc': 3.2.0 + zod: 4.1.13 + + '@ai-sdk/openai@4.0.0-beta.27(zod@4.1.13)': + dependencies: + '@ai-sdk/provider': 4.0.0-beta.10 + '@ai-sdk/provider-utils': 5.0.0-beta.16(zod@4.1.13) + zod: 4.1.13 + '@ai-sdk/provider-utils@3.0.19(zod@4.1.13)': dependencies: '@ai-sdk/provider': 2.0.0 @@ -11944,10 +12043,21 @@ snapshots: eventsource-parser: 3.0.6 zod: 4.1.13 + '@ai-sdk/provider-utils@5.0.0-beta.16(zod@4.1.13)': + dependencies: + '@ai-sdk/provider': 4.0.0-beta.10 + '@standard-schema/spec': 1.1.0 + eventsource-parser: 3.0.6 + zod: 4.1.13 + '@ai-sdk/provider@2.0.0': dependencies: json-schema: 0.4.0 + '@ai-sdk/provider@4.0.0-beta.10': + dependencies: + json-schema: 0.4.0 + '@ai-sdk/react@2.0.115(react@19.2.0)(zod@4.1.13)': dependencies: '@ai-sdk/provider-utils': 3.0.19(zod@4.1.13) @@ -13078,6 +13188,8 @@ snapshots: '@cdxgen/cdxgen-plugins-bin@2.0.2': optional: true + '@cfworker/json-schema@4.1.1': {} + '@chevrotain/cst-dts-gen@11.0.3': dependencies: '@chevrotain/gast': 11.0.3 @@ -14858,6 +14970,26 @@ snapshots: transitivePeerDependencies: - supports-color + '@langchain/core@1.1.39(@opentelemetry/api@1.9.0)(@opentelemetry/exporter-trace-otlp-proto@0.208.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.6.0(@opentelemetry/api@1.9.0))(ws@8.18.3(bufferutil@4.0.9))': + dependencies: + '@cfworker/json-schema': 4.1.1 + '@standard-schema/spec': 1.1.0 + ansi-styles: 5.2.0 + camelcase: 6.3.0 + decamelize: 1.2.0 + js-tiktoken: 1.0.21 + langsmith: 0.5.18(@opentelemetry/api@1.9.0)(@opentelemetry/exporter-trace-otlp-proto@0.208.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.6.0(@opentelemetry/api@1.9.0))(ws@8.18.3(bufferutil@4.0.9)) + mustache: 4.2.0 + p-queue: 6.6.2 + uuid: 11.1.0 + zod: 4.1.13 + transitivePeerDependencies: + - '@opentelemetry/api' + - '@opentelemetry/exporter-trace-otlp-proto' + - '@opentelemetry/sdk-trace-base' + - openai + - ws + '@leichtgewicht/ip-codec@2.0.5': {} '@mdx-js/mdx@3.1.1': @@ -17555,6 +17687,8 @@ snapshots: '@vercel/oidc@3.0.5': {} + '@vercel/oidc@3.2.0': {} + '@vitejs/plugin-react@5.0.4(vite@7.2.4(@types/node@24.7.2)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.20.6)(yaml@2.8.2))': dependencies: '@babel/core': 7.28.5 @@ -17779,6 +17913,13 @@ snapshots: '@opentelemetry/api': 1.9.0 zod: 4.1.13 + ai@7.0.0-beta.76(zod@4.1.13): + dependencies: + '@ai-sdk/gateway': 4.0.0-beta.43(zod@4.1.13) + '@ai-sdk/provider': 4.0.0-beta.10 + '@ai-sdk/provider-utils': 5.0.0-beta.16(zod@4.1.13) + zod: 4.1.13 + ajv-formats@2.1.1(ajv@8.17.1): optionalDependencies: ajv: 8.17.1 @@ -19053,6 +19194,8 @@ snapshots: dependencies: ms: 2.1.3 + decamelize@1.2.0: {} + decimal.js-light@2.5.1: {} decimal.js@10.6.0: {} @@ -20873,6 +21016,10 @@ snapshots: '@sideway/formula': 3.0.1 '@sideway/pinpoint': 2.0.0 + js-tiktoken@1.0.21: + dependencies: + base64-js: 1.5.1 + js-tokens@4.0.0: {} js-tokens@9.0.1: {} @@ -21027,6 +21174,16 @@ snapshots: vscode-languageserver-textdocument: 1.0.12 vscode-uri: 3.0.8 + langsmith@0.5.18(@opentelemetry/api@1.9.0)(@opentelemetry/exporter-trace-otlp-proto@0.208.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.6.0(@opentelemetry/api@1.9.0))(ws@8.18.3(bufferutil@4.0.9)): + dependencies: + p-queue: 6.6.2 + uuid: 10.0.0 + optionalDependencies: + '@opentelemetry/api': 1.9.0 + '@opentelemetry/exporter-trace-otlp-proto': 0.208.0(@opentelemetry/api@1.9.0) + '@opentelemetry/sdk-trace-base': 2.6.0(@opentelemetry/api@1.9.0) + ws: 8.18.3(bufferutil@4.0.9) + latest-version@7.0.0: dependencies: package-json: 8.1.1 @@ -21964,6 +22121,8 @@ snapshots: dns-packet: 5.6.1 thunky: 1.1.0 + mustache@4.2.0: {} + mute-stream@2.0.0: {} nanoid@3.3.11: {} @@ -24753,6 +24912,8 @@ snapshots: utils-merge@1.0.1: {} + uuid@10.0.0: {} + uuid@11.1.0: {} uuid@13.0.0: {}