Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions packages/types/src/global-settings.ts
Original file line number Diff line number Diff line change
Expand Up @@ -278,6 +278,7 @@ export const SECRET_STATE_KEYS = [
"sambaNovaApiKey",
"zaiApiKey",
"fireworksApiKey",
"perplexityApiKey",
"vercelAiGatewayApiKey",
"basetenApiKey",
] as const
Expand Down
14 changes: 14 additions & 0 deletions packages/types/src/provider-settings.ts
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@ import {
moonshotModels,
openAiCodexModels,
openAiNativeModels,
perplexityModels,
qwenCodeModels,
sambaNovaModels,
vertexModels,
Expand Down Expand Up @@ -122,6 +123,7 @@ export const providerNames = [
"minimax",
"openai-codex",
"openai-native",
"perplexity",
"qwen-code",
"roo",
"sambanova",
Expand Down Expand Up @@ -376,6 +378,10 @@ const fireworksSchema = apiModelIdProviderModelSchema.extend({
fireworksApiKey: z.string().optional(),
})

const perplexitySchema = apiModelIdProviderModelSchema.extend({
perplexityApiKey: z.string().optional(),
})

const qwenCodeSchema = apiModelIdProviderModelSchema.extend({
qwenCodeOauthPath: z.string().optional(),
})
Expand Down Expand Up @@ -425,6 +431,7 @@ export const providerSettingsSchemaDiscriminated = z.discriminatedUnion("apiProv
sambaNovaSchema.merge(z.object({ apiProvider: z.literal("sambanova") })),
zaiSchema.merge(z.object({ apiProvider: z.literal("zai") })),
fireworksSchema.merge(z.object({ apiProvider: z.literal("fireworks") })),
perplexitySchema.merge(z.object({ apiProvider: z.literal("perplexity") })),
qwenCodeSchema.merge(z.object({ apiProvider: z.literal("qwen-code") })),
rooSchema.merge(z.object({ apiProvider: z.literal("roo") })),
vercelAiGatewaySchema.merge(z.object({ apiProvider: z.literal("vercel-ai-gateway") })),
Expand Down Expand Up @@ -459,6 +466,7 @@ export const providerSettingsSchema = z.object({
...sambaNovaSchema.shape,
...zaiSchema.shape,
...fireworksSchema.shape,
...perplexitySchema.shape,
...qwenCodeSchema.shape,
...rooSchema.shape,
...vercelAiGatewaySchema.shape,
Expand Down Expand Up @@ -535,6 +543,7 @@ export const modelIdKeysByProvider: Record<TypicalProvider, ModelIdKey> = {
sambanova: "apiModelId",
zai: "apiModelId",
fireworks: "apiModelId",
perplexity: "apiModelId",
roo: "apiModelId",
"vercel-ai-gateway": "vercelAiGatewayModelId",
}
Expand Down Expand Up @@ -596,6 +605,11 @@ export const MODELS_BY_PROVIDER: Record<
label: "Fireworks",
models: Object.keys(fireworksModels),
},
perplexity: {
id: "perplexity",
label: "Perplexity",
models: Object.keys(perplexityModels),
},
gemini: {
id: "gemini",
label: "Google Gemini",
Expand Down
4 changes: 4 additions & 0 deletions packages/types/src/providers/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@ export * from "./openai.js"
export * from "./openai-codex.js"
export * from "./openai-codex-rate-limits.js"
export * from "./openrouter.js"
export * from "./perplexity.js"
export * from "./poe.js"
export * from "./qwen-code.js"
export * from "./requesty.js"
Expand All @@ -37,6 +38,7 @@ import { mistralDefaultModelId } from "./mistral.js"
import { moonshotDefaultModelId } from "./moonshot.js"
import { openAiCodexDefaultModelId } from "./openai-codex.js"
import { openRouterDefaultModelId } from "./openrouter.js"
import { perplexityDefaultModelId } from "./perplexity.js"
import { poeDefaultModelId } from "./poe.js"
import { qwenCodeDefaultModelId } from "./qwen-code.js"
import { requestyDefaultModelId } from "./requesty.js"
Expand Down Expand Up @@ -105,6 +107,8 @@ export function getProviderDefaultModelId(
return sambaNovaDefaultModelId
case "fireworks":
return fireworksDefaultModelId
case "perplexity":
return perplexityDefaultModelId
case "roo":
return rooDefaultModelId
case "qwen-code":
Expand Down
50 changes: 50 additions & 0 deletions packages/types/src/providers/perplexity.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,50 @@
import type { ModelInfo } from "../model.js"

// Perplexity
// https://docs.perplexity.ai/docs/getting-started
// https://docs.perplexity.ai/guides/pricing
export type PerplexityModelId = keyof typeof perplexityModels

export const perplexityDefaultModelId: PerplexityModelId = "sonar-pro"

export const perplexityModels = {
sonar: {
maxTokens: 8192,
contextWindow: 128_000,
supportsImages: false,
supportsPromptCache: false,
inputPrice: 1.0,
outputPrice: 1.0,
description:
"Lightweight, cost-effective model with built-in web search grounding. Best for quick lookups and short answers.",
},
"sonar-pro": {
maxTokens: 8192,
contextWindow: 128_000,
supportsImages: false,
supportsPromptCache: false,
inputPrice: 3.0,
outputPrice: 15.0,
description:
"Perplexity's flagship model with built-in web search grounding. Best for complex queries that benefit from up-to-date information.",
},
"sonar-reasoning": {
maxTokens: 8192,
contextWindow: 128_000,
supportsImages: false,
supportsPromptCache: false,
inputPrice: 1.0,
outputPrice: 5.0,
description: "Reasoning model with chain-of-thought and built-in web search grounding.",
},
"sonar-reasoning-pro": {
maxTokens: 8192,
contextWindow: 128_000,
supportsImages: false,
supportsPromptCache: false,
inputPrice: 2.0,
outputPrice: 8.0,
description:
"Reasoning model with extended chain-of-thought reasoning and built-in web search grounding for complex multi-step problems.",
},
} as const satisfies Record<string, ModelInfo>
3 changes: 3 additions & 0 deletions src/api/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,7 @@ import {
SambaNovaHandler,
ZAiHandler,
FireworksHandler,
PerplexityHandler,
RooHandler,
VercelAiGatewayHandler,
MiniMaxHandler,
Expand Down Expand Up @@ -167,6 +168,8 @@ export function buildApiHandler(configuration: ProviderSettings): ApiHandler {
return new ZAiHandler(options)
case "fireworks":
return new FireworksHandler(options)
case "perplexity":
return new PerplexityHandler(options)
case "roo":
// Never throw exceptions from provider constructors
// The provider-proxy server will handle authentication and return appropriate error codes
Expand Down
195 changes: 195 additions & 0 deletions src/api/providers/__tests__/perplexity.spec.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,195 @@
// npx vitest run api/providers/__tests__/perplexity.spec.ts

import { Anthropic } from "@anthropic-ai/sdk"
import OpenAI from "openai"

import { type PerplexityModelId, perplexityDefaultModelId, perplexityModels } from "@roo-code/types"

import { PerplexityHandler, resolvePerplexityApiKey } from "../perplexity"

const mockCreate = vi.fn()

vi.mock("openai", () => ({
default: vi.fn(() => ({
chat: {
completions: {
create: mockCreate,
},
},
})),
}))

describe("PerplexityHandler", () => {
let handler: PerplexityHandler
const originalEnv = { ...process.env }

beforeEach(() => {
vi.clearAllMocks()
mockCreate.mockImplementation(async () => ({
[Symbol.asyncIterator]: async function* () {
yield {
choices: [{ delta: { content: "Test response" }, index: 0 }],
usage: null,
}
yield {
choices: [{ delta: {}, index: 0 }],
usage: { prompt_tokens: 10, completion_tokens: 5, total_tokens: 15 },
}
},
}))
handler = new PerplexityHandler({ perplexityApiKey: "test-key" })
})

afterEach(() => {
vi.restoreAllMocks()
process.env = { ...originalEnv }
})

it("should use the correct Perplexity base URL", () => {
new PerplexityHandler({ perplexityApiKey: "test-perplexity-api-key" })
expect(OpenAI).toHaveBeenCalledWith(expect.objectContaining({ baseURL: "https://api.perplexity.ai" }))
})

it("should use the provided API key from settings", () => {
const perplexityApiKey = "test-perplexity-api-key"
new PerplexityHandler({ perplexityApiKey })
expect(OpenAI).toHaveBeenCalledWith(expect.objectContaining({ apiKey: perplexityApiKey }))
})

it("should fall back to PERPLEXITY_API_KEY env var when no settings key is provided", () => {
delete process.env.PPLX_API_KEY
process.env.PERPLEXITY_API_KEY = "env-perplexity-key"
new PerplexityHandler({})
expect(OpenAI).toHaveBeenCalledWith(expect.objectContaining({ apiKey: "env-perplexity-key" }))
})

it("should fall back to PPLX_API_KEY env var as a secondary fallback", () => {
delete process.env.PERPLEXITY_API_KEY
process.env.PPLX_API_KEY = "pplx-fallback-key"
new PerplexityHandler({})
expect(OpenAI).toHaveBeenCalledWith(expect.objectContaining({ apiKey: "pplx-fallback-key" }))
})

it("should prefer explicit settings API key over env vars", () => {
process.env.PERPLEXITY_API_KEY = "env-key"
process.env.PPLX_API_KEY = "pplx-key"
new PerplexityHandler({ perplexityApiKey: "explicit-key" })
expect(OpenAI).toHaveBeenCalledWith(expect.objectContaining({ apiKey: "explicit-key" }))
})

it("should throw when no API key is configured (settings or env vars)", () => {
delete process.env.PERPLEXITY_API_KEY
delete process.env.PPLX_API_KEY
expect(() => new PerplexityHandler({})).toThrow("API key is required")
})

it("resolvePerplexityApiKey should return undefined when nothing is set", () => {
delete process.env.PERPLEXITY_API_KEY
delete process.env.PPLX_API_KEY
expect(resolvePerplexityApiKey()).toBeUndefined()
expect(resolvePerplexityApiKey("")).toBeUndefined()
})

it("should return default sonar-pro model when no model is specified", () => {
const model = handler.getModel()
expect(model.id).toBe(perplexityDefaultModelId)
expect(model.id).toBe("sonar-pro")
expect(model.info).toEqual(expect.objectContaining(perplexityModels[perplexityDefaultModelId]))
})

it("should return sonar-reasoning-pro model when configured", () => {
const testModelId: PerplexityModelId = "sonar-reasoning-pro"
const handlerWithModel = new PerplexityHandler({
apiModelId: testModelId,
perplexityApiKey: "test-key",
})
const model = handlerWithModel.getModel()
expect(model.id).toBe(testModelId)
expect(model.info).toEqual(
expect.objectContaining({
maxTokens: 8192,
contextWindow: 128_000,
supportsImages: false,
supportsPromptCache: false,
inputPrice: 2.0,
outputPrice: 8.0,
}),
)
})

it("should fall back to default model when an unknown model id is provided", () => {
const handlerWithModel = new PerplexityHandler({
apiModelId: "not-a-real-model",
perplexityApiKey: "test-key",
})
const model = handlerWithModel.getModel()
expect(model.id).toBe(perplexityDefaultModelId)
})

it("should expose all four Sonar models with 128k context", () => {
const expectedIds: PerplexityModelId[] = ["sonar", "sonar-pro", "sonar-reasoning", "sonar-reasoning-pro"]
for (const id of expectedIds) {
expect(perplexityModels[id]).toBeDefined()
expect(perplexityModels[id].contextWindow).toBe(128_000)
}
})

it("createMessage should yield text content from stream", async () => {
const testContent = "Streamed content from Perplexity"
mockCreate.mockImplementationOnce(() => ({
[Symbol.asyncIterator]: () => ({
next: vi
.fn()
.mockResolvedValueOnce({
done: false,
value: { choices: [{ delta: { content: testContent } }] },
})
.mockResolvedValueOnce({ done: true }),
}),
}))

const stream = handler.createMessage("system prompt", [])
const firstChunk = await stream.next()
expect(firstChunk.done).toBe(false)
expect(firstChunk.value).toEqual({ type: "text", text: testContent })
})

it("createMessage should pass the configured model id to the upstream client", async () => {
const modelId: PerplexityModelId = "sonar-reasoning"
const handlerWithModel = new PerplexityHandler({
apiModelId: modelId,
perplexityApiKey: "test-key",
})

mockCreate.mockImplementationOnce(() => ({
[Symbol.asyncIterator]: () => ({
async next() {
return { done: true }
},
}),
}))

const messages: Anthropic.Messages.MessageParam[] = [{ role: "user", content: "hi" }]
const generator = handlerWithModel.createMessage("system", messages)
await generator.next()

expect(mockCreate).toHaveBeenCalledWith(
expect.objectContaining({
model: modelId,
stream: true,
stream_options: { include_usage: true },
messages: expect.arrayContaining([{ role: "system", content: "system" }]),
}),
undefined,
)
})

it("createMessage should propagate upstream errors", async () => {
mockCreate.mockImplementationOnce(() => {
throw new Error("upstream 401")
})

const generator = handler.createMessage("system", [{ role: "user", content: "hi" }])
await expect(generator.next()).rejects.toThrow(/upstream 401/)
})
})
1 change: 1 addition & 0 deletions src/api/providers/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@ export { VsCodeLmHandler } from "./vscode-lm"
export { XAIHandler } from "./xai"
export { ZAiHandler } from "./zai"
export { FireworksHandler } from "./fireworks"
export { PerplexityHandler } from "./perplexity"
export { RooHandler } from "./roo"
export { VercelAiGatewayHandler } from "./vercel-ai-gateway"
export { MiniMaxHandler } from "./minimax"
Expand Down
Loading
Loading