Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -44,6 +44,7 @@ db.sqlite*

.agents/
.claude/
.augment/
.cursor/
.playwright-mcp/
.antigravity/
Expand Down
17 changes: 8 additions & 9 deletions app/extension/package.json
Original file line number Diff line number Diff line change
Expand Up @@ -20,23 +20,22 @@
"url": "https://github.com/lcomplete/huntly"
},
"dependencies": {
"@ai-sdk/anthropic": "^1.2.12",
"@ai-sdk/azure": "^1.3.24",
"@ai-sdk/deepseek": "^0.1.8",
"@ai-sdk/google": "^1.2.20",
"@ai-sdk/groq": "^1.1.14",
"@ai-sdk/openai": "^1.3.22",
"@ai-sdk/anthropic": "^3.0.58",
"@ai-sdk/azure": "^3.0.42",
"@ai-sdk/deepseek": "^2.0.24",
"@ai-sdk/google": "^3.0.43",
"@ai-sdk/groq": "^3.0.29",
"@ai-sdk/openai": "^3.0.41",
"@emotion/react": "^11.10.6",
"@emotion/styled": "^11.10.6",
"@mozilla/readability": "^0.6.0",
"@mui/icons-material": "^5.11.11",
"@mui/material": "^5.11.14",
"@types/turndown": "^5.0.6",
"ai": "^6.0.127",
"ai": "^6.0.116",
"defuddle": "^0.6.6",
"formik": "^2.2.9",
"html2canvas": "^1.4.1",
"ollama-ai-provider": "^1.2.0",
"react": "^18.2.0",
"react-dom": "^18.2.0",
"react-markdown": "^10.1.0",
Expand Down Expand Up @@ -67,6 +66,6 @@
"style-loader": "^3.3.1",
"tailwindcss": "^3.3.2",
"ts-jest": "^27.0.5",
"typescript": "^4.4.3 "
"typescript": "^5.9.3"
}
}
16 changes: 16 additions & 0 deletions app/extension/src/__tests__/providers.test.ts
Original file line number Diff line number Diff line change
@@ -1,5 +1,7 @@
import {
getOpenAICompatibleBaseUrl,
getOllamaBaseUrl,
getOllamaOpenAIBaseUrl,
isDashScopeCompatibleBaseUrl,
usesRawOpenAICompatibleStream,
} from "../ai/openAICompatibleProviders";
Expand Down Expand Up @@ -90,4 +92,18 @@ describe("providers helpers", () => {
})
).toBe("https://example.com/v1");
});

it("normalizes Ollama base urls for model and chat endpoints", () => {
expect(getOllamaBaseUrl("http://localhost:11434/v1")).toBe(
"http://localhost:11434"
);
expect(getOllamaBaseUrl(undefined)).toBe("http://localhost:11434");

expect(getOllamaOpenAIBaseUrl("http://localhost:11434")).toBe(
"http://localhost:11434/v1"
);
expect(getOllamaOpenAIBaseUrl("http://localhost:11434/v1")).toBe(
"http://localhost:11434/v1"
);
});
});
20 changes: 20 additions & 0 deletions app/extension/src/ai/openAICompatibleProviders.ts
Original file line number Diff line number Diff line change
Expand Up @@ -34,3 +34,23 @@ export function getOpenAICompatibleBaseUrl(
): string | undefined {
return config.baseUrl || PROVIDER_REGISTRY[config.type]?.defaultBaseUrl || undefined;
}

function trimTrailingSlash(url: string): string {
return url.replace(/\/+$/, "");
}

export function getOllamaBaseUrl(baseUrl?: string): string {
const normalizedBaseUrl = trimTrailingSlash(
baseUrl || PROVIDER_REGISTRY.ollama.defaultBaseUrl
);

return normalizedBaseUrl.endsWith("/v1")
? normalizedBaseUrl.slice(0, -3)
: normalizedBaseUrl;
}

export function getOllamaOpenAIBaseUrl(baseUrl?: string): string {
const normalizedBaseUrl = getOllamaBaseUrl(baseUrl);

return `${normalizedBaseUrl}/v1`;
}
45 changes: 23 additions & 22 deletions app/extension/src/ai/providers.ts
Original file line number Diff line number Diff line change
Expand Up @@ -4,19 +4,22 @@ import { createGoogleGenerativeAI } from '@ai-sdk/google';
import { createDeepSeek } from '@ai-sdk/deepseek';
import { createGroq } from '@ai-sdk/groq';
import { createAzure } from '@ai-sdk/azure';
import { ollama, createOllama } from 'ollama-ai-provider';
import { generateText, LanguageModelV1 } from 'ai';
import { generateText, LanguageModel } from 'ai';
import {
AIProviderConfig,
ConnectionTestResult,
PROVIDER_REGISTRY,
} from './types';
import { getOpenAICompatibleBaseUrl } from './openAICompatibleProviders';
import {
getOpenAICompatibleBaseUrl,
getOllamaBaseUrl,
getOllamaOpenAIBaseUrl,
} from './openAICompatibleProviders';

export function createProviderModel(
config: AIProviderConfig,
modelId?: string
): LanguageModelV1 | null {
): LanguageModel | null {
const model = modelId || config.enabledModels[0];
if (!model) return null;

Expand All @@ -28,7 +31,7 @@ export function createProviderModel(
apiKey: config.apiKey,
baseURL: getOpenAICompatibleBaseUrl(config),
});
return provider(model) as LanguageModelV1;
return provider(model) as LanguageModel;
}

case 'anthropic': {
Expand All @@ -39,40 +42,38 @@ export function createProviderModel(
'anthropic-dangerous-direct-browser-access': 'true',
},
});
return provider(model) as LanguageModelV1;
return provider(model) as LanguageModel;
}

case 'google': {
const provider = createGoogleGenerativeAI({
apiKey: config.apiKey,
baseURL: config.baseUrl || undefined,
});
return provider(model) as LanguageModelV1;
return provider(model) as LanguageModel;
}

case 'deepseek': {
const provider = createDeepSeek({
apiKey: config.apiKey,
baseURL: config.baseUrl || undefined,
});
return provider(model) as LanguageModelV1;
return provider(model) as LanguageModel;
}

case 'groq': {
const provider = createGroq({
apiKey: config.apiKey,
});
return provider(model) as LanguageModelV1;
return provider(model) as LanguageModel;
}

case 'ollama': {
if (config.baseUrl) {
const provider = createOllama({
baseURL: config.baseUrl,
});
return provider(model) as LanguageModelV1;
}
return ollama(model) as LanguageModelV1;
const provider = createOpenAI({
apiKey: config.apiKey || 'ollama',
baseURL: getOllamaOpenAIBaseUrl(config.baseUrl),
});
return provider(model) as LanguageModel;
}

case 'azure-openai':
Expand All @@ -82,7 +83,7 @@ export function createProviderModel(
apiKey: config.apiKey,
baseURL: config.baseUrl,
});
return provider(model) as LanguageModelV1;
return provider(model) as LanguageModel;
}

case 'qwen': {
Expand All @@ -91,7 +92,7 @@ export function createProviderModel(
apiKey: config.apiKey,
baseURL: getOpenAICompatibleBaseUrl(config),
});
return provider(model) as LanguageModelV1;
return provider(model) as LanguageModel;
}

case 'zhipu': {
Expand All @@ -100,7 +101,7 @@ export function createProviderModel(
apiKey: config.apiKey,
baseURL: getOpenAICompatibleBaseUrl(config),
});
return provider(model) as LanguageModelV1;
return provider(model) as LanguageModel;
}

case 'minimax': {
Expand All @@ -109,7 +110,7 @@ export function createProviderModel(
apiKey: config.apiKey,
baseURL: getOpenAICompatibleBaseUrl(config),
});
return provider(model) as LanguageModelV1;
return provider(model) as LanguageModel;
}

case 'huntly-server': {
Expand Down Expand Up @@ -180,7 +181,7 @@ export async function testProviderConnection(
const result = await generateText({
model,
prompt: 'Say "OK" in one word.',
maxTokens: 5,
maxOutputTokens: 5,
});

// Check if we got a valid response - result.text can be empty string for some models
Expand Down Expand Up @@ -223,7 +224,7 @@ export async function testProviderConnection(

export async function fetchOllamaModels(baseUrl?: string): Promise<string[]> {
try {
const url = (baseUrl || 'http://localhost:11434') + '/api/tags';
const url = getOllamaBaseUrl(baseUrl) + '/api/tags';
const response = await fetch(url);
if (!response.ok) {
return [];
Expand Down
5 changes: 3 additions & 2 deletions app/extension/src/ai/streamingPreview.ts
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@ export interface StreamingPreviewState {

export interface StreamingPreviewChunk {
type: string;
text?: string;
textDelta?: string;
}

Expand All @@ -31,7 +32,7 @@ export function applyStreamingPreviewChunk(
options: StreamingPreviewOptions = {}
): StreamingPreviewState {
const includeReasoning = options.includeReasoning ?? true;
const textDelta = chunk.textDelta || "";
const textDelta = chunk.text || chunk.textDelta || "";
Copy link
Copy Markdown

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

streamText().fullStream chunks are documented to use type: "text" (with text) for text deltas; applyStreamingPreviewChunk currently only treats "text-delta" as response text, so "text" chunks would be ignored and the preview may never show the actual answer.

Severity: high

Fix This in Augment

🤖 Was this useful? React with 👍 or 👎, or 🚀 if it prevented an incident/outage.

if (!textDelta) {
return state;
}
Expand All @@ -47,7 +48,7 @@ export function applyStreamingPreviewChunk(
};
}

if (chunk.type === "reasoning") {
if (chunk.type === "reasoning" || chunk.type === "reasoning-delta") {
if (!includeReasoning) {
return state;
}
Expand Down
11 changes: 6 additions & 5 deletions app/extension/src/background.ts
Original file line number Diff line number Diff line change
Expand Up @@ -49,6 +49,7 @@ const vercelAIAbortControllers = new Map<string, AbortController>();
const badgeCache = new Map<number, string>();
const SAVED_BADGE_TEXT = "✓";
const SAVED_BADGE_BG = "#15803D";
const AI_MAX_OUTPUT_TOKENS = 20000;

function arrayBufferToBase64(buffer: ArrayBuffer): string {
const bytes = new Uint8Array(buffer);
Expand Down Expand Up @@ -379,7 +380,7 @@ async function startProcessingWithVercelAI(task: any) {
modelId,
systemPrompt,
userPrompt,
maxTokens: 8000,
maxTokens: AI_MAX_OUTPUT_TOKENS,
requestBodyExtras: getThinkingModeOptions(Boolean(thinkingModeEnabled)),
abortSignal: abortController.signal,
onDelta: ({ contentDelta, reasoningDelta }) => {
Expand Down Expand Up @@ -420,11 +421,11 @@ async function startProcessingWithVercelAI(task: any) {
}

// Use streamText for streaming response with abort signal
const result = await streamText({
const result = streamText({
model,
system: systemPrompt,
prompt: userPrompt,
maxTokens: 8000,
maxOutputTokens: AI_MAX_OUTPUT_TOKENS,
abortSignal: abortController.signal,
});

Expand All @@ -448,8 +449,8 @@ async function startProcessingWithVercelAI(task: any) {
try {
sendStreamingPreviewUpdate(
streamState,
chunk.type === "text-delta" || chunk.type === "reasoning"
? chunk.textDelta
chunk.type === "text-delta" || chunk.type === "reasoning-delta"
? chunk.text
: ""
);
} catch (error) {
Expand Down
42 changes: 37 additions & 5 deletions app/extension/src/components/ArticlePreview.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,12 @@ import React, { useState, useEffect, useCallback, useRef, forwardRef, useImperat
import ReactMarkdown from "react-markdown";
import remarkGfm from "remark-gfm";
import TurndownService from "turndown";
import { ContentParserType, readSyncStorageSettings } from "../storage";
import {
ContentParserType,
readSyncStorageSettings,
getThinkingModeEnabled,
saveThinkingModeEnabled,
} from "../storage";
import { PageOperateResult } from "../model/pageOperateResult";
import { parseDocument } from "../parser/contentParser";
import AIToolbar, {
Expand Down Expand Up @@ -222,7 +227,7 @@ export const ArticlePreview: React.FC<ArticlePreviewProps> = ({
} | null>(null);
const [serverConfigured, setServerConfigured] = useState(false);
const [thinkingModeEnabled, setThinkingModeEnabled] = useState(
initialThinkingModeEnabled
initialThinkingModeEnabled ?? false
);

// Refs for export functionality
Expand All @@ -236,6 +241,35 @@ export const ArticlePreview: React.FC<ArticlePreviewProps> = ({
});
}, []);

useEffect(() => {
let cancelled = false;

if (typeof initialThinkingModeEnabled === "boolean") {
setThinkingModeEnabled(initialThinkingModeEnabled);
return () => {
cancelled = true;
};
}

getThinkingModeEnabled().then((savedThinkingModeEnabled) => {
if (!cancelled) {
setThinkingModeEnabled(savedThinkingModeEnabled);
}
});

return () => {
cancelled = true;
};
}, [initialThinkingModeEnabled]);

const handleThinkingModeToggle = useCallback(() => {
setThinkingModeEnabled((prev) => {
const next = !prev;
void saveThinkingModeEnabled(next);
return next;
});
}, []);

// Get shadow container for MUI Menu components
const shadowContainer = useShadowContainer();

Expand Down Expand Up @@ -500,9 +534,7 @@ export const ArticlePreview: React.FC<ArticlePreviewProps> = ({
initialSelectedModel={autoSelectedModel}
showThinkingToggle={true}
thinkingModeEnabled={thinkingModeEnabled}
onThinkingModeToggle={() =>
setThinkingModeEnabled((prev) => !prev)
}
onThinkingModeToggle={handleThinkingModeToggle}
/>

{/* Right section: Export group, Edit button, Parser selector and Close button */}
Expand Down
Loading
Loading