From 9293f19b574eb23c52316ad1e9569dd03bd788ef Mon Sep 17 00:00:00 2001 From: Boris Yankov Date: Fri, 6 Dec 2024 01:02:17 +0200 Subject: [PATCH 1/2] refactor: extract model configuration in separate file The behavior is kept exactly the same but now allows for easy and simple change of model used by changing or replacing model.ts This approach is very flexible as Vercel AI SDK support many models out of the box, and the difference is a few lines of code. --- packages/cali/src/cli.ts | 10 ++-------- packages/cali/src/model.ts | 13 +++++++++++++ 2 files changed, 15 insertions(+), 8 deletions(-) create mode 100755 packages/cali/src/model.ts diff --git a/packages/cali/src/cli.ts b/packages/cali/src/cli.ts index 840c6dd..9b5ac89 100755 --- a/packages/cali/src/cli.ts +++ b/packages/cali/src/cli.ts @@ -2,7 +2,6 @@ import 'dotenv/config' -import { createOpenAI } from '@ai-sdk/openai' import { confirm, outro, select, spinner, text } from '@clack/prompts' import { CoreMessage, generateText } from 'ai' import * as tools from 'cali-tools' @@ -12,7 +11,6 @@ import { retro } from 'gradient-string' import { z } from 'zod' import { reactNativePrompt } from './prompt.js' -import { getApiKey } from './utils.js' const MessageSchema = z.union([ z.object({ type: z.literal('select'), content: z.string(), options: z.array(z.string()) }), @@ -49,11 +47,7 @@ console.log( console.log() -const AI_MODEL = process.env.AI_MODEL || 'gpt-4o' - -const openai = createOpenAI({ - apiKey: await getApiKey('OpenAI', 'OPENAI_API_K2EY'), -}) +import model from './model.js' async function startSession(): Promise { const question = await text({ @@ -88,7 +82,7 @@ while (true) { s.start(chalk.gray('Thinking...')) const response = await generateText({ - model: openai(AI_MODEL), + model, system: reactNativePrompt, tools, maxSteps: 10, diff --git a/packages/cali/src/model.ts b/packages/cali/src/model.ts new file mode 100755 index 0000000..16d3c49 --- /dev/null +++ b/packages/cali/src/model.ts @@ -0,0 +1,13 @@ +import { createOpenAI } from '@ai-sdk/openai' + +import { getApiKey } from './utils.js' + +const AI_MODEL = process.env.AI_MODEL || 'gpt-4o' + +const openai = createOpenAI({ + apiKey: await getApiKey('OpenAI', 'OPENAI_API_K2EY'), +}) + +const model = openai(AI_MODEL) + +export default model From caa808afa926012ab3d9d229492f6f4a6186447f Mon Sep 17 00:00:00 2001 From: Boris Yankov Date: Fri, 6 Dec 2024 02:27:58 +0200 Subject: [PATCH 2/2] experimental: use a local model through Ollama --- packages/cali/package.json | 1 + packages/cali/src/cli.ts | 2 +- packages/cali/src/model-ollama.ts | 13 +++++++++++++ 3 files changed, 15 insertions(+), 1 deletion(-) create mode 100755 packages/cali/src/model-ollama.ts diff --git a/packages/cali/package.json b/packages/cali/package.json index 541ee19..3f460ae 100644 --- a/packages/cali/package.json +++ b/packages/cali/package.json @@ -18,6 +18,7 @@ "dedent": "^1.5.3", "dotenv": "^16.4.5", "gradient-string": "^3.0.0", + "ollama-ai-provider": "^1.0.0", "zod": "^3.23.8" }, "bugs": { diff --git a/packages/cali/src/cli.ts b/packages/cali/src/cli.ts index 9b5ac89..08fbf53 100755 --- a/packages/cali/src/cli.ts +++ b/packages/cali/src/cli.ts @@ -47,7 +47,7 @@ console.log( console.log() -import model from './model.js' +import model from './model-ollama.js' async function startSession(): Promise { const question = await text({ diff --git a/packages/cali/src/model-ollama.ts b/packages/cali/src/model-ollama.ts new file mode 100755 index 0000000..c70d554 --- /dev/null +++ b/packages/cali/src/model-ollama.ts @@ -0,0 +1,13 @@ +// to make this work: +// 1. Download ollama and install it: https://ollama.com/ +// 2. Run `ollama run llama3.2` + +import { createOllama } from 'ollama-ai-provider' + +const ollama = createOllama({ + baseURL: 'http://localhost:11434/api', +}) + +const model = ollama('llama3.2:latest') + +export default model