diff --git a/assets/scripts/components/conversational-search/index.js b/assets/scripts/components/conversational-search/index.js index 64a9ea7e936..355edc06962 100644 --- a/assets/scripts/components/conversational-search/index.js +++ b/assets/scripts/components/conversational-search/index.js @@ -1,63 +1,68 @@ import { getConfig } from '../../helpers/getConfig'; -import { initializeFeatureFlags, getBooleanFlag, isDatadogEmployee, fetchDatadogUserStatus } from 'scripts/helpers/feature-flags'; +import { initializeFeatureFlags, getBooleanFlag, fetchDatadogUserStatus } from 'scripts/helpers/feature-flags'; import { logAction, logError } from './logger'; import { parseMarkdown, inlineRefChips, extractSources, renderMessageWithSources } from './markdown'; import { attachTooltips, buildSourceCards, showSourceTooltip, closeAllSourceTooltips, repositionTooltip } from './sources'; import { addMessageActions, injectCodeCopyButtons } from './actions'; -import { streamConversation, fetchConversation, resetTypesenseClient } from './streaming'; import { streamDocsAiChat } from './docsai-client'; import { pickQuestions } from './suggested-questions'; const { env } = document.documentElement.dataset; const docsConfig = getConfig(env); -const typesenseConfig = docsConfig.typesense; const docsAiConfig = docsConfig.docsAi; -// --- Providers & feature flags --------------------------------------------------- +const DOCS_AI_ENABLED_FLAG_KEY = 'docs-ai-enabled'; -const PROVIDER_INTERNAL = 'internal'; -const PROVIDER_TYPESENSE = 'typesense'; +// Optimistic render: default on so the UI mounts at DOMContentLoaded — no +// layout shift. If the flag resolves to false we tear the UI down. Right +// tradeoff for a default-on permanent kill switch: degraded UX during a rare +// incident is fine; everyday layout shift is not. +let IS_DOCS_AI_ENABLED = true; -let IS_CONVERSATIONAL_SEARCH_ENABLED = false; -const CONVERSATIONAL_SEARCH_FLAG_KEY = 'docs_conversational_search'; - -// When true, disables the internal docs-ai provider and uses the external Typesense provider. -// Default: false (internal docs-ai is the primary provider). -const USE_EXTERNAL_PROVIDER_FLAG_KEY = 'docs-ai-use-external-provider'; - -// Only relevant when provider is typesense. -const DISABLE_STREAMING_FLAG_KEY = 'docs-ai-disable-streaming'; - -const EXTERNAL_CONV_MODEL_DOCS_STABLE = 'docs-ai-conv-model-v1-stable'; const INTERNAL_CONVERSATION_ID_PREFIX = 'dd_docsai_'; const RENDER_THROTTLE = 50; +// Auto-rotated client-side messages shown while we wait for the first server `thinking` event. +// Aligned with the mapped server messages below so the user sees consistent copy either way. const LOADING_MESSAGES = [ - 'Searching documentation…', - 'Reviewing relevant pages…', - 'Analyzing content…', - 'Generating answer…' + 'Understanding your question…', + 'Searching the docs…', + 'Reading the most relevant pages…', + 'Drafting your response…' ]; +// Maps backend `thinking` event content to user-facing copy. +// `null` hides the event entirely (used for internal optimization steps the user shouldn't see). +// Unknown keys fall through to the original message so new backend stages still surface something. +const THINKING_MESSAGES = { + 'Rewriting query...': 'Understanding your question…', + 'Searching documentation...': 'Searching the docs…', + 'Reviewing relevant pages': 'Reading the most relevant pages…', + 'Generating answer...': 'Drafting your response…', + 'Something went wrong. Please try again.': 'Something went wrong. Try asking again.' +}; + +function mapThinkingMessage(serverMessage) { + if (Object.prototype.hasOwnProperty.call(THINKING_MESSAGES, serverMessage)) { + return THINKING_MESSAGES[serverMessage]; + } + return serverMessage; +} + let isDatadogUser = false; initializeFeatureFlags().then(async (client) => { - IS_CONVERSATIONAL_SEARCH_ENABLED = getBooleanFlag(client, CONVERSATIONAL_SEARCH_FLAG_KEY); - - if (!IS_CONVERSATIONAL_SEARCH_ENABLED && isDatadogEmployee()) { - IS_CONVERSATIONAL_SEARCH_ENABLED = true; - } - + IS_DOCS_AI_ENABLED = getBooleanFlag(client, DOCS_AI_ENABLED_FLAG_KEY, true); isDatadogUser = await fetchDatadogUserStatus(); - if (IS_CONVERSATIONAL_SEARCH_ENABLED) { - document.body.classList.add('conv-search-enabled'); + if (IS_DOCS_AI_ENABLED) { logAction('Conversational Search Impression', { conversational_search: { action: 'impression', page: window.location.pathname } }); - initConversationalSearch(); + } else { + teardownConversationalSearch(); } }); @@ -74,9 +79,6 @@ class ConversationalSearch { this.hasLoggedFirstOpen = false; this.isSuggestionQuery = false; - this.provider = PROVIDER_INTERNAL; - this.selectedModelId = EXTERNAL_CONV_MODEL_DOCS_STABLE; - this.typesenseStreamingDisabled = true; // Rewrites the initial user query for better retrieval before answering. // Only applied on the first message (no history). Follow-ups use history context instead. this.shouldRewriteQuery = true; @@ -87,14 +89,11 @@ class ConversationalSearch { if (!this.createElements()) return; this.bindEvents(); - this.resolveFlags(); this.ready = true; } get ctx() { return { - provider: this.provider, - selectedModelId: this.selectedModelId, conversationId: this.conversationId, isDatadogUser }; @@ -111,17 +110,6 @@ class ConversationalSearch { return `${INTERNAL_CONVERSATION_ID_PREFIX}${Date.now()}_${Math.random().toString(36).slice(2, 11)}`; } - resolveFlags() { - initializeFeatureFlags().then((client) => { - const useTypesense = getBooleanFlag(client, USE_EXTERNAL_PROVIDER_FLAG_KEY); - this.provider = useTypesense ? PROVIDER_TYPESENSE : PROVIDER_INTERNAL; - - if (useTypesense) { - this.typesenseStreamingDisabled = getBooleanFlag(client, DISABLE_STREAMING_FLAG_KEY); - } - }).catch(() => {}); - } - // --- DOM setup --------------------------------------------------------------- createElements() { @@ -325,7 +313,7 @@ class ConversationalSearch { open(trigger = 'entry_button') { if (!this.hasLoggedFirstOpen) { this.log('Conversational Search Open', { - conversational_search: { action: 'open_first_time', trigger, provider: this.provider } + conversational_search: { action: 'open_first_time', trigger } }); this.hasLoggedFirstOpen = true; } @@ -475,19 +463,16 @@ class ConversationalSearch { this.log('Conversational Search Interaction', { conversational_search: { action, - provider: this.provider, ...(this.conversationId && { conversation_id: this.conversationId }), ...extra } }); } - logResponse({ query, answer, startTime, streaming = true }) { + logResponse({ query, answer, startTime }) { this.log('Conversational Search Response', { conversational_search: { action: 'response_received', - provider: this.provider, - streaming, response_length: answer.length, ...(this.conversationId && { conversation_id: this.conversationId }), latency_ms: Date.now() - startTime @@ -510,19 +495,12 @@ class ConversationalSearch { this.addMessage('user', query); try { - if (this.provider === PROVIDER_INTERNAL) { - await this.runInternalStream(query); - } else if (this.typesenseStreamingDisabled) { - await this.runTypesenseFetch(query); - } else { - await this.runTypesenseStream(query); - } + await this.runInternalStream(query); } catch (error) { if (error.name === 'AbortError' && this.userCancelledRequest) { this.addResponseContainer().textContent = 'Request cancelled.'; } else { this.logErr('Conversational Search Response Error', error); - if (this.provider === PROVIDER_TYPESENSE) resetTypesenseClient(); this.addResponseContainer().textContent = 'Sorry, something went wrong. Please try again.'; } } finally { @@ -535,7 +513,7 @@ class ConversationalSearch { } } - // --- Internal docs-ai provider ----------------------------------------------- + // --- Docs AI streaming ------------------------------------------------------- async runInternalStream(query) { this.abortController = new AbortController(); @@ -563,7 +541,8 @@ class ConversationalSearch { rewriteQuery: isFirstMessage && this.shouldRewriteQuery && !isSuggestion, signal: this.abortController.signal, onThinking: (message) => { - loadingIndicator.updateStatus(message); + const mapped = mapThinkingMessage(message); + if (mapped) loadingIndicator.updateStatus(mapped); }, onToken: (_token, fullMessage) => { if (!responseContainer) { @@ -602,112 +581,9 @@ class ConversationalSearch { } } - // --- Typesense provider (streaming) ------------------------------------------ - - async runTypesenseStream(query) { - this.abortController = new AbortController(); - const startTime = Date.now(); - const loadingIndicator = this.showLoadingIndicator(); - - let responseContainer = null; - let accumulatedMessage = ''; - let lastRenderTime = 0; - - let response; - try { - response = await streamConversation({ - typesenseConfig, - query, - modelId: this.selectedModelId, - conversationId: this.conversationId, - signal: this.abortController.signal, - onChunk: (chunk) => { - if (chunk?.conversation_id && !this.conversationId) { - this.conversationId = chunk.conversation_id; - } - - if (chunk?.message !== undefined) { - if (!responseContainer) { - loadingIndicator.stop(); - responseContainer = this.addResponseContainer(); - } - - accumulatedMessage += chunk.message; - - const now = Date.now(); - if (now - lastRenderTime > RENDER_THROTTLE) { - const { displayMarkdown, sources } = extractSources(accumulatedMessage); - responseContainer.innerHTML = inlineRefChips(parseMarkdown(displayMarkdown)); - if (sources.length > 0) { - responseContainer.appendChild(buildSourceCards(sources)); - } - lastRenderTime = now; - this.scrollToBottom(); - } - } - }, - onError: (error) => { - this.logErr('Typesense Streaming Error', error); - } - }); - } finally { - loadingIndicator.stop(); - } - - const finalConversationId = response?.results?.[0]?.conversation?.conversation_id; - if (finalConversationId) { - this.conversationId = finalConversationId; - } - - if (!responseContainer) { - responseContainer = this.addResponseContainer(); - } - - if (accumulatedMessage) { - this.finalizeResponse(responseContainer, query, accumulatedMessage, startTime); - } else { - responseContainer.textContent = 'No response received. Please try again.'; - } - } - - // --- Typesense provider (non-streaming) -------------------------------------- - - async runTypesenseFetch(query) { - this.abortController = new AbortController(); - const startTime = Date.now(); - const loadingIndicator = this.showLoadingIndicator(); - - let response; - try { - response = await fetchConversation({ - typesenseConfig, - query, - modelId: this.selectedModelId, - conversationId: this.conversationId, - signal: this.abortController.signal - }); - } finally { - loadingIndicator.stop(); - } - - const conversation = response?.conversation || response?.results?.[0]?.conversation; - if (conversation?.conversation_id) { - this.conversationId = conversation.conversation_id; - } - - const answer = conversation?.answer || ''; - const responseContainer = this.addResponseContainer(); - - if (answer) { - this.finalizeResponse(responseContainer, query, answer, startTime, false); - } else { - responseContainer.textContent = 'No response received. Please try again.'; - } - } - // --- Shared finalization ----------------------------------------------------- - finalizeResponse(container, query, answer, startTime, streaming = true) { + finalizeResponse(container, query, answer, startTime) { container.innerHTML = renderMessageWithSources(answer, { attachTooltips, buildSourceCards @@ -715,7 +591,7 @@ class ConversationalSearch { injectCodeCopyButtons(container, this.ctx); addMessageActions(container.parentElement, query, answer, this.ctx); this.scrollToBottom(); - this.logResponse({ query, answer, startTime, streaming }); + this.logResponse({ query, answer, startTime }); } } @@ -724,7 +600,8 @@ class ConversationalSearch { let conversationalSearchInstance = null; function initConversationalSearch() { - if (!IS_CONVERSATIONAL_SEARCH_ENABLED || conversationalSearchInstance) return; + if (!IS_DOCS_AI_ENABLED || conversationalSearchInstance) return; + const instance = new ConversationalSearch(); if (instance.ready) conversationalSearchInstance = instance; @@ -738,12 +615,24 @@ function initConversationalSearch() { } } +function teardownConversationalSearch() { + const inst = conversationalSearchInstance; + if (inst) { + inst.floatButton?.remove(); + inst.overlay?.remove(); + inst.sidebar?.remove(); + conversationalSearchInstance = null; + } + document.querySelector('.home-ai-btn')?.remove(); + document.querySelector('.home-ai-divider')?.remove(); +} + // The minimum length of the query to auto-submit the conversation. // to avoid submitting short queries that are not meaningful. const AUTO_SUBMIT_MIN_LENGTH = 10; function askDocsAI(query, options = {}) { - if (!IS_CONVERSATIONAL_SEARCH_ENABLED || !conversationalSearchInstance) return; + if (!IS_DOCS_AI_ENABLED || !conversationalSearchInstance) return; const trimmed = (query || '').trim(); const inst = conversationalSearchInstance; @@ -767,4 +656,4 @@ if (document.readyState === 'loading') { initConversationalSearch(); } -export { ConversationalSearch, askDocsAI, CONVERSATIONAL_SEARCH_FLAG_KEY }; +export { ConversationalSearch, askDocsAI, DOCS_AI_ENABLED_FLAG_KEY }; diff --git a/assets/scripts/components/conversational-search/logger.js b/assets/scripts/components/conversational-search/logger.js index b35b35fc9dd..a421d6f0b0d 100644 --- a/assets/scripts/components/conversational-search/logger.js +++ b/assets/scripts/components/conversational-search/logger.js @@ -1,7 +1,6 @@ -export function logAction(message, data, { selectedModelId, isDatadogUser } = {}) { +export function logAction(message, data, { isDatadogUser } = {}) { const conversationalSearchData = { docs_ai: true, - model_id: selectedModelId, is_datadog_user: isDatadogUser ?? false, ...(data?.conversational_search || {}) }; @@ -19,10 +18,9 @@ export function logAction(message, data, { selectedModelId, isDatadogUser } = {} } } -export function logError(message, error, { selectedModelId, conversationId, isDatadogUser } = {}) { +export function logError(message, error, { conversationId, isDatadogUser } = {}) { const errorData = { docs_ai: true, - model_id: selectedModelId, conversation_id: conversationId, is_datadog_user: isDatadogUser ?? false, error_message: error?.message || String(error), diff --git a/assets/scripts/components/conversational-search/streaming.js b/assets/scripts/components/conversational-search/streaming.js deleted file mode 100644 index f5dc391520c..00000000000 --- a/assets/scripts/components/conversational-search/streaming.js +++ /dev/null @@ -1,109 +0,0 @@ -import Typesense from 'typesense'; - -let client = null; - -export function resetTypesenseClient() { - client = null; -} - -export function getTypesenseClient(typesenseConfig) { - if (client) return client; - - client = new Typesense.Client({ - nearestNode: { host: `${typesenseConfig.host}.a1.typesense.net`, port: 443, protocol: 'https' }, - nodes: [ - { host: `${typesenseConfig.host}-1.a1.typesense.net`, port: 443, protocol: 'https' }, - { host: `${typesenseConfig.host}-2.a1.typesense.net`, port: 443, protocol: 'https' }, - { host: `${typesenseConfig.host}-3.a1.typesense.net`, port: 443, protocol: 'https' } - ], - apiKey: typesenseConfig.public_key, - connectionTimeoutSeconds: 45 - }); - - return client; -} - -/** - * Strips Typesense query syntax that breaks hybrid (keyword + vector) search: - * - "double quotes" trigger exact-phrase mode; when 0 keyword hits match, - * Typesense skips vector search entirely and returns 0 results. - * - -dash prefix triggers token exclusion, which is not meaningful for - * a conversational AI query. - */ -export function sanitizeQuery(raw) { - return raw - .replace(/"/g, '') - .replace(/(^|\s)-/g, '$1') - .trim(); -} - -export async function streamConversation({ - typesenseConfig, - query, - modelId, - conversationId, - signal, - onChunk, - onError -}) { - const tsClient = getTypesenseClient(typesenseConfig); - - const searchBody = { - searches: [ - { - collection: typesenseConfig.docsIndex, - preset: 'docs_ai_search_preset' - } - ] - }; - - const commonSearchParams = { - conversation: true, - conversation_model_id: modelId, - q: sanitizeQuery(query), - conversation_stream: true - }; - - if (conversationId) { - commonSearchParams.conversation_id = conversationId; - } - - return tsClient.apiCall.post('/multi_search', searchBody, commonSearchParams, {}, { - abortSignal: signal, - isStreamingRequest: true, - streamConfig: { onChunk, onError } - }); -} - -export async function fetchConversation({ - typesenseConfig, - query, - modelId, - conversationId, - signal -}) { - const tsClient = getTypesenseClient(typesenseConfig); - - const searchBody = { - searches: [ - { - collection: typesenseConfig.docsIndex, - preset: 'docs_ai_search_preset' - } - ] - }; - - const commonSearchParams = { - conversation: true, - conversation_model_id: modelId, - q: sanitizeQuery(query) - }; - - if (conversationId) { - commonSearchParams.conversation_id = conversationId; - } - - return tsClient.apiCall.post('/multi_search', searchBody, commonSearchParams, {}, { - abortSignal: signal - }); -} diff --git a/assets/scripts/components/instantsearch/searchbarHits.js b/assets/scripts/components/instantsearch/searchbarHits.js index 2c2033a98b2..90302fdc024 100644 --- a/assets/scripts/components/instantsearch/searchbarHits.js +++ b/assets/scripts/components/instantsearch/searchbarHits.js @@ -1,14 +1,15 @@ import { getHitData, getSnippetForDisplay } from './getHitData'; import { bodyClassContains } from '../../helpers/helpers'; -import { CONVERSATIONAL_SEARCH_FLAG_KEY } from '../../components/conversational-search'; +import { DOCS_AI_ENABLED_FLAG_KEY } from '../../components/conversational-search'; import connectHits from 'instantsearch.js/es/connectors/hits/connectHits'; -import { initializeFeatureFlags, getBooleanFlag, isDatadogEmployee } from '../../helpers/feature-flags'; +import { initializeFeatureFlags, getBooleanFlag } from '../../helpers/feature-flags'; -let IS_CONVERSATIONAL_SEARCH_ENABLED = false; +// Optimistic render: assume on so the AI suggestion shows on first paint. +let IS_DOCS_AI_ENABLED = true; const ASK_AI_ICON_SRC = '/images/svg-icons/spark-ai.svg'; initializeFeatureFlags().then((client) => { - IS_CONVERSATIONAL_SEARCH_ENABLED = getBooleanFlag(client, CONVERSATIONAL_SEARCH_FLAG_KEY) || isDatadogEmployee(); + IS_DOCS_AI_ENABLED = getBooleanFlag(client, DOCS_AI_ENABLED_FLAG_KEY, true); }); const logDocsAIEvent = (message, payload) => { @@ -92,7 +93,7 @@ const updateNoHitsState = (container, numHits) => { // Kick off flag init once, update UI when ready const ensureConvSearchFlag = (state) => { - if (!IS_CONVERSATIONAL_SEARCH_ENABLED || !state?.isDocsContainer) return; + if (!IS_DOCS_AI_ENABLED || !state?.isDocsContainer) return; const aiList = state.container.querySelector('#ais-Hits-ai-list'); renderAskAISuggestion(aiList, state.query); updateNoHitsState(state.container, state.numHits); @@ -195,7 +196,7 @@ const renderHits = (renderOptions, isFirstRender) => { // Add AI suggestion first (only for docs container) if (isDocsContainer) { const aiList = container.querySelector('#ais-Hits-ai-list'); - if (IS_CONVERSATIONAL_SEARCH_ENABLED) { + if (IS_DOCS_AI_ENABLED) { renderAskAISuggestion(aiList, currentQuery); } else if (aiList) { aiList.replaceChildren(); diff --git a/assets/scripts/helpers/feature-flags.js b/assets/scripts/helpers/feature-flags.js index 7207847836a..ffb8507aeac 100644 --- a/assets/scripts/helpers/feature-flags.js +++ b/assets/scripts/helpers/feature-flags.js @@ -4,52 +4,14 @@ import { getConfig } from './getConfig'; const rawEnv = document.documentElement.dataset?.env || 'preview'; // TODO: Remove this once Feature Flag Rollout is complete -const env = rawEnv === 'development' ? 'preview' : rawEnv; +const env = rawEnv === 'development' ? 'preview' : rawEnv; const config = getConfig(env); -// Singleton Promise -let initializationPromise = null; - -const getRumTargetingKey = () => { - // Safety check for ad-blockers or missing global - if (typeof window === 'undefined' || !window.DD_RUM) return undefined; - - try { - const rumUser = window.DD_RUM.getUser && window.DD_RUM.getUser(); - const context = window.DD_RUM.getInternalContext && window.DD_RUM.getInternalContext(); - return rumUser?.device_id || context?.session_id; - } catch (e) { - return undefined; - } -}; - -const DD_INTERNAL_PARAM = 'dd_internal'; -const DD_INTERNAL_STORAGE_KEY = 'docs_dd_internal'; - -export const isDatadogEmployee = () => { - try { - - if (localStorage.getItem(DD_INTERNAL_STORAGE_KEY) === '1') return true; - - const params = new URLSearchParams(window.location.search); - if (params.get(DD_INTERNAL_PARAM) === '1') { - localStorage.setItem(DD_INTERNAL_STORAGE_KEY, '1'); - return true; - } - } catch { - console.error('Error checking if user is Datadog employee'); - } - return false; -}; - let datadogUserPromise = null; - export const fetchDatadogUserStatus = () => { if (datadogUserPromise) return datadogUserPromise; - const locateUrl = 'https://www.datadoghq.com/locate' - - datadogUserPromise = fetch(locateUrl, { credentials: 'include' }) + datadogUserPromise = fetch('https://www.datadoghq.com/locate', { credentials: 'include' }) .then((res) => res.json()) .then((data) => !!data.user_status) .catch(() => false); @@ -57,62 +19,32 @@ export const fetchDatadogUserStatus = () => { return datadogUserPromise; }; +let clientPromise = null; export const initializeFeatureFlags = () => { - if (initializationPromise) return initializationPromise; + if (clientPromise) return clientPromise; - initializationPromise = (async () => { + clientPromise = (async () => { if (!config?.ddClientToken || !config?.ddApplicationId) { console.error('[Flags] Missing Datadog config'); return null; } - - const client = OpenFeature.getClient(); - - const provider = new DatadogProvider({ - applicationId: config.ddApplicationId, - clientToken: config.ddClientToken, - env - }); - - await OpenFeature.setProviderAndWait(provider); - - // Blocking: weighted rollouts need the RUM targeting key in context. - await enrichContextWithRum(); - - return client; - })().catch((error) => { - const isAdBlockerLikely = typeof window !== 'undefined' && (!window.DD_RUM || error?.message?.includes('Unexpected end of JSON input')); - const contextMsg = isAdBlockerLikely ? ' (Likely blocked by an ad blocker or privacy extension)' : ''; - - console.warn(`[Flags] Initialization failed${contextMsg}:`, error); - initializationPromise = null; - return null; - }); - - return initializationPromise; -}; - -// Polls briefly for RUM targeting key while fetching /locate in parallel. -// RUM is either ready within ~500ms or blocked (ad-blocker). No point waiting longer. -const enrichContextWithRum = async () => { - const locatePromise = fetchDatadogUserStatus(); - - let targetingKey = getRumTargetingKey(); - if (!targetingKey) { - for (let i = 0; i < 5; i++) { - await new Promise((resolve) => setTimeout(resolve, 100)); - targetingKey = getRumTargetingKey(); - if (targetingKey) break; + try { + await OpenFeature.setProviderAndWait(new DatadogProvider({ + applicationId: config.ddApplicationId, + clientToken: config.ddClientToken, + env, + // Sends exposure events to the FF dashboard. + enableExposureLogging: true + })); + return OpenFeature.getClient(); + } catch (error) { + console.warn('[Flags] Initialization failed:', error); + return null; } - } - - const isDatadogUser = await locatePromise; - - const context = { isDatadogUser }; - if (targetingKey) context.targetingKey = targetingKey; + })(); - await OpenFeature.setContext(context); + return clientPromise; }; -export const getBooleanFlag = (client, key, defaultValue = false) => +export const getBooleanFlag = (client, key, defaultValue = false) => client?.getBooleanValue(key, defaultValue) ?? defaultValue; diff --git a/assets/styles/components/conversational-search/_home-ask-ai.scss b/assets/styles/components/conversational-search/_home-ask-ai.scss index 5ad582033df..76056bb2095 100644 --- a/assets/styles/components/conversational-search/_home-ask-ai.scss +++ b/assets/styles/components/conversational-search/_home-ask-ai.scss @@ -29,23 +29,19 @@ } .home-ai-divider { - display: none; + display: inline; color: rgba(255, 255, 255, 0.6); font-size: 13px; font-weight: 400; - .conv-search-enabled & { - display: inline; - - @media (max-width: 991px) { - display: none; - } + @media (max-width: 991px) { + display: none; } } .home-ai-btn { $home-ai-main: #b325b5; - display: none; + display: inline-flex; align-items: center; justify-content: center; gap: 6px; @@ -65,10 +61,6 @@ white-space: nowrap; transition: background 0.15s ease, border-color 0.15s ease, box-shadow 0.15s ease; - .conv-search-enabled & { - display: inline-flex; - } - svg, img { flex-shrink: 0; width: 15px; diff --git a/config/_default/menus/main.en.yaml b/config/_default/menus/main.en.yaml index 5f23163fd9e..bce66139ced 100644 --- a/config/_default/menus/main.en.yaml +++ b/config/_default/menus/main.en.yaml @@ -2956,7 +2956,12 @@ menu: url: actions/connections/aws_integration/ parent: actions_connections identifier: actions_aws_integration - weight: 101 + weight: 102 + - name: Google Workspace + url: actions/connections/google_workspace/ + parent: actions_connections + identifier: actions_google_workspace + weight: 103 - name: Private Actions url: actions/private_actions/ parent: action_catalog @@ -5361,6 +5366,11 @@ menu: parent: llm_obs_external_evaluations identifier: llm_obs_deepeval_evaluations weight: 40301 + - name: Pydantic Evaluations + url: llm_observability/evaluations/pydantic_evaluations + parent: llm_obs_external_evaluations + identifier: llm_obs_pydantic_evaluations + weight: 40302 - name: Annotation Queues url: llm_observability/evaluations/annotation_queues parent: llm_obs_evaluations diff --git a/content/en/actions/connections/google_workspace.md b/content/en/actions/connections/google_workspace.md new file mode 100644 index 00000000000..17f180188ae --- /dev/null +++ b/content/en/actions/connections/google_workspace.md @@ -0,0 +1,163 @@ +--- +title: Google Workspace +description: Connect Datadog Actions to Google Workspace services using OAuth 2.0 to automate tasks in Gmail, Calendar, Drive, Docs, Sheets, Forms, and Chat. +disable_toc: false +further_reading: +- link: "/actions/connections/" + tag: "Documentation" + text: "Find out more about connection credentials" +--- + +Use a Google Workspace connection to authenticate Datadog Actions against Google Workspace APIs. This allows you to interact with Gmail, Google Calendar, Google Drive, Google Docs, Google Sheets, Google Forms, and Google Chat in your workflows and apps. + +## Prerequisites + +You must have an OAuth 2.0 client in Google Cloud to create a connection in Datadog. If you don't already have a Google Cloud project, follow Google's [Create a Google Cloud project][1] guide. + +### Enable the required APIs + +Enable the Google APIs you plan to use in your workflows and apps: + +1. Navigate to [**APIs & Services** > **Library**][2] in the Google Cloud Console. +1. Search for and enable the APIs for the Google Workspace services you intend to use, listed in the following table. + +| Google Workspace service | API to enable | +|---|---| +| Gmail | Gmail API | +| Google Calendar | Google Calendar API | +| Google Drive | Google Drive API | +| Google Docs | Google Docs API | +| Google Sheets | Google Sheets API | +| Google Forms | Google Forms API | +| Google Chat | Google Chat API | + +### Configure the OAuth consent screen + +1. Navigate to [**APIs & Services** > **OAuth consent screen**][3] in the Google Cloud Console. +1. Select a user type: + - **Internal**: Limits access to users in your Google Workspace organization. Recommended for most enterprise use cases. + - **External**: Allows any Google account to authorize the app. Requires [app verification][4] for production use. +1. Fill in the required app information fields and click **Save and Continue**. +1. Under **Scopes**, add the OAuth scopes required for the actions you intend to use. See the [scopes reference](#scopes-reference). +1. Complete the remaining steps and click **Back to Dashboard**. + +### Create OAuth 2.0 credentials + +1. Navigate to [**APIs & Services** > **Credentials**][5] in the Google Cloud Console. +1. Click **Create Credentials** > **OAuth client ID**. +1. For **Application type**, select **Web application**. +1. Under **Authorized JavaScript origins**, add the Datadog origin URL. Under **Authorized redirect URIs**, add the Datadog OAuth callback URL. Both URLs are displayed in the Datadog connection creation dialog when you select **Google Workspace**. +1. Click **Create**. +1. Copy the **Client ID** and **Client Secret**—you need these when creating the connection in Datadog. + +## Create the connection in Datadog + +1. From the [Action Catalog page][6], click the {{< ui >}}Connections{{< /ui >}} tab. +1. Click {{< ui >}}New Connection{{< /ui >}}. +1. Select the {{< ui >}}Google Workspace{{< /ui >}} icon. +1. Enter a {{< ui >}}Connection Name{{< /ui >}}. +1. Enter the {{< ui >}}Client ID{{< /ui >}} and {{< ui >}}Client Secret{{< /ui >}} from your Google Cloud OAuth 2.0 credentials. +1. Select the {{< ui >}}Scopes{{< /ui >}} required for the actions you plan to use. See the [scopes reference](#scopes-reference). +1. Leave the {{< ui >}}Authorize URL{{< /ui >}} and {{< ui >}}Token URL{{< /ui >}} fields as their pre-populated default values unless you have a specific reason to change them. +1. Click {{< ui >}}Create{{< /ui >}}. +1. In the authorization window that opens, sign in with the Google account you want to use and grant the requested permissions. + +## Scopes reference + +Select only the scopes required by the actions you intend to use. + +### Gmail + +| Scope label | Scope value | Description | +|---|---|---| +| Gmail: Full Access | `https://mail.google.com/` | Full read and write access to all Gmail resources | +| Gmail: Read, Compose, Send, and Permanently Delete Threads | `https://www.googleapis.com/auth/gmail.modify` | All access except deleting permanently | +| Gmail: Read Only | `https://www.googleapis.com/auth/gmail.readonly` | View email messages and settings | +| Gmail: Send Only | `https://www.googleapis.com/auth/gmail.send` | Send email on behalf of the user | +| Gmail: Compose and Send | `https://www.googleapis.com/auth/gmail.compose` | Manage drafts and send email | +| Gmail: Manage Labels | `https://www.googleapis.com/auth/gmail.labels` | Create, read, update, and delete labels | +| Gmail: View Metadata Only | `https://www.googleapis.com/auth/gmail.metadata` | View email metadata such as labels and headers | + +### Google Calendar + +| Scope label | Scope value | Description | +|---|---|---| +| Calendar: Full Access | `https://www.googleapis.com/auth/calendar` | Full read and write access to calendars | +| Calendar: Read Only | `https://www.googleapis.com/auth/calendar.readonly` | View calendars | +| Calendar: Manage Events | `https://www.googleapis.com/auth/calendar.events` | View and edit events on all calendars | +| Calendar: View Events | `https://www.googleapis.com/auth/calendar.events.readonly` | View events on all calendars | + +### Google Drive + +| Scope label | Scope value | Description | +|---|---|---| +| Drive: Full Access | `https://www.googleapis.com/auth/drive` | Full read and write access to all Drive files | +| Drive: Read Only | `https://www.googleapis.com/auth/drive.readonly` | View files in Drive | +| Drive: Access Files Created by This App | `https://www.googleapis.com/auth/drive.file` | Access only files created or opened by this app | +| Drive: App Data Folder | `https://www.googleapis.com/auth/drive.appdata` | Access the app-specific data folder | +| Drive: Manage Metadata | `https://www.googleapis.com/auth/drive.metadata` | View and manage metadata of files | +| Drive: View Metadata | `https://www.googleapis.com/auth/drive.metadata.readonly` | View metadata of files | + +### Google Docs + +| Scope label | Scope value | Description | +|---|---|---| +| Docs: Full Access | `https://www.googleapis.com/auth/documents` | View and manage documents in Google Docs | +| Docs: Read Only | `https://www.googleapis.com/auth/documents.readonly` | View documents in Google Docs | + +### Google Sheets + +| Scope label | Scope value | Description | +|---|---|---| +| Sheets: Full Access | `https://www.googleapis.com/auth/spreadsheets` | View and manage spreadsheets in Google Sheets | +| Sheets: Read Only | `https://www.googleapis.com/auth/spreadsheets.readonly` | View spreadsheets in Google Sheets | + +### Google Forms + +| Scope label | Scope value | Description | +|---|---|---| +| Forms: Create and Edit Forms | `https://www.googleapis.com/auth/forms.body` | View and manage form definitions | +| Forms: View Forms | `https://www.googleapis.com/auth/forms.body.readonly` | View form definitions | +| Forms: View Responses | `https://www.googleapis.com/auth/forms.responses.readonly` | View form responses | + +### Google Chat + +| Scope label | Scope value | Description | +|---|---|---| +| Chat: View, Compose, and Delete Messages | `https://www.googleapis.com/auth/chat.messages` | View, create, update, and delete messages | +| Chat: Compose and Send Messages | `https://www.googleapis.com/auth/chat.messages.create` | Create and send messages in spaces | +| Chat: View Messages | `https://www.googleapis.com/auth/chat.messages.readonly` | View messages and reactions | +| Chat: Manage Spaces | `https://www.googleapis.com/auth/chat.spaces` | Create, view, update, and delete spaces | +| Chat: Create Spaces | `https://www.googleapis.com/auth/chat.spaces.create` | Create spaces | +| Chat: View Spaces | `https://www.googleapis.com/auth/chat.spaces.readonly` | View spaces | + +### User identity + +| Scope label | Scope value | Description | +|---|---|---| +| User Info: Email Address | `https://www.googleapis.com/auth/userinfo.email` | View the user's email address | +| User Info: Basic Profile | `https://www.googleapis.com/auth/userinfo.profile` | View the user's basic profile information | +| OpenID Connect | `openid` | Authenticate using OpenID Connect | + +### Google Workspace Admin + +| Scope label | Scope value | Description | +|---|---|---| +| Admin: Manage Users | `https://www.googleapis.com/auth/admin.directory.user` | Create, read, update, and delete users | +| Admin: View Users | `https://www.googleapis.com/auth/admin.directory.user.readonly` | View users in the directory | +| Admin: Manage Groups | `https://www.googleapis.com/auth/admin.directory.group` | Create, read, update, and delete groups | +| Admin: View Groups | `https://www.googleapis.com/auth/admin.directory.group.readonly` | View groups in the directory | + +## Further reading + +{{< partial name="whats-next/whats-next.html" >}} + +
Do you have questions or feedback? Join the **#workflows** or **#app-builder** channel on the [Datadog Community Slack][7]. + +[1]: https://developers.google.com/workspace/guides/create-project +[2]: https://console.cloud.google.com/apis/library +[3]: https://console.cloud.google.com/apis/credentials/consent +[4]: https://developers.google.com/identity/protocols/oauth2/production-readiness/restricted-scope-verification +[5]: https://console.cloud.google.com/apis/credentials +[6]: https://app.datadoghq.com/actions/action-catalog +[7]: https://chat.datadoghq.com/ diff --git a/content/en/actions/connections/http.md b/content/en/actions/connections/http.md index 16c751f1d34..9840b45084d 100644 --- a/content/en/actions/connections/http.md +++ b/content/en/actions/connections/http.md @@ -52,7 +52,7 @@ If you need to authenticate your request, use the action's {{< ui >}}Connection{ 1. In the {{< ui >}}Connection{{< /ui >}} section, click the plus icon ({{< ui >}}+{{< /ui >}}). 1. Select {{< ui >}}Azure{{< /ui >}}. 1. Enter a {{< ui >}}Connection Name{{< /ui >}}, {{< ui >}}Tenant ID{{< /ui >}}, {{< ui >}}Client ID{{< /ui >}}, and {{< ui >}}Client Secret{{< /ui >}}. -1. Optionally, enter the {{< ui >}}Custom Scope{{< /ui >}} to be requested from Microsoft when acquiring an OAuth 2 access token. A resource's scope is constructed using the identifier URI for the resource and `.default`, separated by a forward slash (`/`). For example, `{identifierURI}/.default`. For more information, see [the Microsoft documentation on .default scope][3]. +1. Optionally, enter the {{< ui >}}Custom Scope{{< /ui >}} to be requested from Microsoft when acquiring an OAuth 2.0 access token. A resource's scope is constructed using the identifier URI for the resource and `.default`, separated by a forward slash (`/`). For example, `{identifierURI}/.default`. For more information, see [the Microsoft documentation on .default scope][3]. 1. Click {{< ui >}}Create{{< /ui >}}. ### Create an HTTP token authentication connection diff --git a/content/en/getting_started/agent/_index.md b/content/en/getting_started/agent/_index.md index c212e8d639b..ae70be1c083 100644 --- a/content/en/getting_started/agent/_index.md +++ b/content/en/getting_started/agent/_index.md @@ -32,7 +32,7 @@ This guide introduces the Datadog Agent and covers: The Datadog Agent is software that runs on your hosts. It collects events and metrics from hosts and sends them to Datadog, where you can analyze your monitoring and performance data. The Agent can run on: -- Local hosts (Windows, macOS) +- Local hosts (Windows, Linux, macOS) - Containerized environments (Docker, Kubernetes) - On-premises data centers diff --git a/content/en/llm_observability/evaluations/deepeval_evaluations.md b/content/en/llm_observability/evaluations/deepeval_evaluations.md index 62358d3577a..af2bc25a7cc 100644 --- a/content/en/llm_observability/evaluations/deepeval_evaluations.md +++ b/content/en/llm_observability/evaluations/deepeval_evaluations.md @@ -1,6 +1,10 @@ --- title: DeepEval Evaluations description: Use DeepEval evaluations with LLM Observability Experiments. +further_reading: +- link: "/llm_observability/evaluations/external_evaluations" + tag: "Documentation" + text: "Submit Evaluations" --- ## Overview diff --git a/content/en/llm_observability/evaluations/pydantic_evaluations.md b/content/en/llm_observability/evaluations/pydantic_evaluations.md new file mode 100644 index 00000000000..e39352dad42 --- /dev/null +++ b/content/en/llm_observability/evaluations/pydantic_evaluations.md @@ -0,0 +1,145 @@ +--- +title: Pydantic Evaluations +description: Use Pydantic evaluations with LLM Observability Experiments. +further_reading: +- link: "/llm_observability/evaluations/external_evaluations" + tag: "Documentation" + text: "Submit Evaluations" +--- + +## Overview + +Pydantic is an open source framework that provides ready-to-use evaluations and allows for customizable LLM evaluations. For more information, see [Pydantic's documentation][3]. + +You can use LLM Observability to run Pydantic evaluations and scalar Pydantic report evaluations in [Experiments][1]. Pydantic evaluation results appear as evaluator results tied to each instance in an [LLM Observability dataset][5]. Pydantic report evaluations run on an entire LLM Observability dataset and report one scalar result for the dataset. + +## Setup + +1. Set up an [LLM Observability Experiment][2] and an [LLM Observability Dataset][4]. +2. Provide a Pydantic evaluator to the `evaluators` parameter in an LLMObs `Experiment` as demonstrated in the following code sample. (Optional) Provide a Pydantic report evaluator to the `summary_evaluators` parameter in an LLMObs `Experiment`. **Note**: Only Pydantic report evaluators that return a `ScalarResult` are supported. + +```python + +from pydantic_evals.evaluators import ( + EqualsExpected, + EvaluationReason, + Evaluator, + EvaluatorContext, + EvaluatorOutput, + LLMJudge, + ReportEvaluator, + ReportEvaluatorContext, +) +from pydantic_evals.reporting.analyses import ScalarResult + +from ddtrace.llmobs import LLMObs + + +LLMObs.enable( + api_key="", # defaults to DD_API_KEY environment variable + app_key="", # defaults to DD_APP_KEY environment variable + site="datadoghq.com", # defaults to DD_SITE environment variable + project_name="" # defaults to DD_LLMOBS_PROJECT_NAME environment variable, or "default-project" if the environment variable is not set +) + + +# this can be any Pydantic evaluator +@dataclass +class ComprehensiveCheck(Evaluator): + def evaluate(self, ctx: EvaluatorContext) -> EvaluatorOutput: + format_valid = self._check_format(ctx.output) + + to_return = { + 'valid_format': EvaluationReason( + value=format_valid, + reason='Valid JSON format' if format_valid else 'Invalid JSON format', + ), + 'quality_score': self._score_quality(ctx.output), + 'category': self._classify(ctx.output), + } + return to_return + + def _check_format(self, output: str) -> bool: + return output.startswith('{') and output.endswith('}') + + def _score_quality(self, output: str) -> float: + return len(output) / 100.0 + + def _classify(self, output: str) -> str: + return 'short' if len(output) < 50 else 'long' + +# This can be any Pydantic ReportEvaluator that returns ScalarResult +class TotalCasesEvaluator(ReportEvaluator): + def evaluate(self, ctx: ReportEvaluatorContext) -> ScalarResult: + return ScalarResult( + title='Total', + value=len(ctx.report.cases), + unit='cases', + ) + +dataset = LLMObs.create_dataset( + dataset_name="capitals-of-the-world", + project_name="capitals-project", # optional, defaults to project_name used in LLMObs.enable + description="Questions about world capitals", + records=[ + { + "input_data": { + "question": "What is the capital of China?" + }, # required, JSON or string + "expected_output": "Beijing", # optional, JSON or string + "metadata": {"difficulty": "easy"}, # optional, JSON + }, + { + "input_data": { + "question": "Which city serves as the capital of South Africa?" + }, + "expected_output": "Pretoria", + "metadata": {"difficulty": "medium"}, + }, + ], +) + +def task(input_data: Dict[str, Any], config: Optional[Dict[str, Any]] = None, metadata: Optional[Dict[str, Any]] = None) -> str: + question = input_data['question'] + # Your LLM or processing logic here + return "Beijing" if "China" in question else "Unknown" + + +llm_judge = LLMJudge( + rubric='Response provides the same answer as expected, possibly with explanation', + include_input=True, + include_expected_output=True, +) + +experiment = LLMObs.experiment( + name="", + task=my_task, + dataset=dataset, + evaluators=[EqualsExpected(), ComprehensiveCheck(), llm_judge], + summary_evaluators=[TotalCasesEvaluator()], + description="", +) + + +results = experiment.run(jobs=4, raise_errors=True) + +print(f"View experiment: {experiment.url}") +``` + +For a working example, see [Datadog's Pydantic demo in GitHub][6]. + +### Usage +After you run an experiment with a Pydantic evaluation, you can view the Pydantic evaluation results per instance in the corresponding experiment run in Datadog. In the following experiment, two Pydantic evaluations (a custom Pydantic evaluator with the name "ComprehensiveCheck" and a built-in evaluator with the name "EqualsExpected") and one Pydantic report evaluator (a custom Pydantic report evaluator with the name "TotalCasesEvaluator") were run: + +{{< img src="llm_observability/pydantic-experiment-result.png" alt="An LLM Observability experiment with a Pydantic evaluator." style="width:100%;" >}} + +## Further reading + +{{< partial name="whats-next/whats-next.html" >}} + +[1]: /llm_observability/experiments +[2]: /llm_observability/experiments/setup#create-an-experiment +[3]: https://ai.pydantic.dev/evals/ +[4]: /llm_observability/experiments/setup#create-a-dataset +[5]: /llm_observability/experiments/datasets +[6]: https://github.com/DataDog/llm-observability/blob/main/experiments/eval-integrations/2-pydantic-demo.py diff --git a/content/en/llm_observability/guide/evaluation_developer_guide.md b/content/en/llm_observability/guide/evaluation_developer_guide.md index cdd0b467538..00b0bfc3585 100644 --- a/content/en/llm_observability/guide/evaluation_developer_guide.md +++ b/content/en/llm_observability/guide/evaluation_developer_guide.md @@ -49,7 +49,7 @@ The typical flow: ## Building evaluators -There are two ways to define an evaluator using LLM Observability: class-based and function-based. In addition to these evaluators, LLM Observability has integrations with open source evaluation frameworks, such as [DeepEval][6], that can be used in LLM Observability Experiments. +There are two ways to define an evaluator using LLM Observability: class-based and function-based. In addition to these evaluators, LLM Observability has integrations with open source evaluation frameworks, such as [DeepEval][6] and [Pydantic][], that can be used in LLM Observability Experiments. | | Class-based | Function-based | |---|---|---| @@ -701,3 +701,4 @@ When submitting evaluations for [OpenTelemetry-instrumented spans][3], include t [5]: /llm_observability/evaluations/custom_llm_as_a_judge_evaluations [6]: /llm_observability/evaluations/deepeval_evaluations/ [7]: /llm_observability/evaluations/custom_llm_as_a_judge_evaluations#configure-the-prompt +[8]: /llm_observability/evaluations/pydantic_evaluations \ No newline at end of file diff --git a/content/en/opentelemetry/setup/otlp_ingest/_index.md b/content/en/opentelemetry/setup/otlp_ingest/_index.md index 2be16c506db..2b0f067b285 100644 --- a/content/en/opentelemetry/setup/otlp_ingest/_index.md +++ b/content/en/opentelemetry/setup/otlp_ingest/_index.md @@ -21,7 +21,8 @@ You might prefer this option if you're looking for a straightforward setup and w - [OTLP logs intake endpoint][3] - [OTLP metrics intake endpoint][4] -- OTLP traces intake endpoint (in Preview): To request access, contact your Customer Success Manager. +- [Instrumenting for LLM Observability][5] +- OTLP traces intake endpoint (in Preview): To request access for use, contact your Customer Success Manager. ## Further reading @@ -31,3 +32,4 @@ You might prefer this option if you're looking for a straightforward setup and w [2]: /opentelemetry/setup/collector_exporter/ [3]: /opentelemetry/setup/intake_endpoint/otlp_logs [4]: /opentelemetry/setup/intake_endpoint/otlp_metrics +[5]: /llm_observability/instrumentation/otel_instrumentation/?tab=python#setup diff --git a/static/images/llm_observability/pydantic-experiment-result.png b/static/images/llm_observability/pydantic-experiment-result.png new file mode 100644 index 00000000000..acd2e9786bd Binary files /dev/null and b/static/images/llm_observability/pydantic-experiment-result.png differ diff --git a/typesense.config.json b/typesense.config.json index 34672b72e11..47244f5b366 100644 --- a/typesense.config.json +++ b/typesense.config.json @@ -275,19 +275,6 @@ "group_max_candidates": 1000, "exclude_fields": "embedding" } - }, - { - "name": "docs_ai_search_preset", - "search_parameters": { - "collection": "docs_alias", - "query_by": "embedding,title,section_header,content", - "vector_query": "embedding:([], alpha: 0.7)", - "per_page": 20, - "filter_by": "language:en", - "rerank_hybrid_matches": true, - "prefix": false, - "exclude_fields": "embedding, id, objectID, distinct_base_url, relpermalink, language, order, rank, tags, type" - } } ] },