From 45233d40a6c0ce436169e68080dc05d0c854e7c2 Mon Sep 17 00:00:00 2001 From: "releaser-ai-plugin[bot]" <273148615+releaser-ai-plugin[bot]@users.noreply.github.com> Date: Thu, 16 Apr 2026 12:20:09 +0000 Subject: [PATCH] chore: sync skills (agent-skills-v0.48.0, context-mill@v1.10.1) --- .claude-plugin/plugin.json | 2 +- .codex-plugin/plugin.json | 2 +- .cursor-plugin/plugin.json | 2 +- gemini-extension.json | 2 +- skills/.sync-manifest | 1 + skills/instrument-feature-flags/SKILL.md | 2 +- .../references/android.md | 2 +- .../references/api.md | 2 +- .../references/best-practices.md | 224 ++++++++++++------ .../references/dotnet.md | 2 +- .../references/elixir.md | 2 +- .../references/flutter.md | 2 +- .../instrument-feature-flags/references/go.md | 2 +- .../references/ios.md | 2 +- .../references/java.md | 2 +- .../references/nodejs.md | 2 +- .../references/php.md | 2 +- .../references/python.md | 2 +- .../references/react-native.md | 2 +- .../references/react.md | 2 +- .../references/ruby.md | 2 +- .../references/rust.md | 2 +- .../references/web.md | 2 +- skills/instrument-integration/SKILL.md | 2 +- ...XAMPLE-nuxt-3.6.md => EXAMPLE-nuxt-3-6.md} | 4 +- .../references/EXAMPLE-nuxt-4.md | 2 +- .../instrument-integration/references/js.md | 2 +- .../references/posthog-js.md | 76 +++++- .../references/posthog-node.md | 2 +- .../references/python.md | 2 +- .../references/anthropic.md | 148 +++++------- .../references/autogen.md | 73 +++--- .../references/azure-openai.md | 126 +++++----- .../references/cerebras.md | 151 ++++++------ .../references/cohere.md | 151 ++++++------ .../references/deepseek.md | 151 ++++++------ .../references/fireworks-ai.md | 151 ++++++------ .../references/groq.md | 151 ++++++------ .../references/helicone.md | 151 ++++++------ .../references/hugging-face.md | 151 ++++++------ .../references/instructor.md | 154 ++++++------ .../references/langchain.md | 148 +++++------- .../references/langgraph.md | 121 +++++----- .../references/llamaindex.md | 73 +++--- .../references/manual-capture.md | 32 ++- .../references/mastra.md | 106 ++++----- .../references/mirascope.md | 82 +++---- .../references/mistral.md | 151 ++++++------ .../references/ollama.md | 153 ++++++------ .../references/openai.md | 167 ++++++------- .../references/openrouter.md | 157 ++++++------ .../references/perplexity.md | 151 ++++++------ .../references/portkey.md | 153 ++++++------ .../references/pydantic-ai.md | 86 +++---- .../references/semantic-kernel.md | 85 ++++--- .../references/smolagents.md | 82 +++---- .../references/together-ai.md | 151 ++++++------ .../references/vercel-ai.md | 21 +- .../references/xai.md | 151 ++++++------ skills/instrument-logs/references/datadog.md | 2 +- skills/instrument-logs/references/go.md | 2 +- skills/instrument-logs/references/java.md | 2 +- skills/instrument-logs/references/nextjs.md | 2 +- skills/instrument-logs/references/nodejs.md | 2 +- skills/instrument-logs/references/other.md | 6 +- skills/instrument-logs/references/python.md | 2 +- .../instrument-logs/references/start-here.md | 2 +- .../references/troubleshooting.md | 2 +- skills/instrument-product-analytics/SKILL.md | 2 +- ...XAMPLE-nuxt-3.6.md => EXAMPLE-nuxt-3-6.md} | 4 +- .../references/EXAMPLE-nuxt-4.md | 2 +- .../references/python.md | 2 +- skills/managing-subscriptions/SKILL.md | 162 +++++++++++++ 73 files changed, 2103 insertions(+), 2076 deletions(-) rename skills/instrument-integration/references/{EXAMPLE-nuxt-3.6.md => EXAMPLE-nuxt-3-6.md} (99%) rename skills/instrument-product-analytics/references/{EXAMPLE-nuxt-3.6.md => EXAMPLE-nuxt-3-6.md} (99%) create mode 100644 skills/managing-subscriptions/SKILL.md diff --git a/.claude-plugin/plugin.json b/.claude-plugin/plugin.json index e1655c1..660f0c1 100644 --- a/.claude-plugin/plugin.json +++ b/.claude-plugin/plugin.json @@ -1,7 +1,7 @@ { "name": "posthog", "description": "Access PostHog analytics, feature flags, experiments, error tracking, and insights directly from Claude Code. Optionally capture Claude Code sessions to PostHog LLM Analytics.", - "version": "1.1.3", + "version": "1.1.4", "author": { "name": "PostHog", "email": "hey@posthog.com", diff --git a/.codex-plugin/plugin.json b/.codex-plugin/plugin.json index 5560224..a9c8562 100644 --- a/.codex-plugin/plugin.json +++ b/.codex-plugin/plugin.json @@ -1,6 +1,6 @@ { "name": "posthog", - "version": "1.0.5", + "version": "1.0.6", "description": "Access PostHog analytics, feature flags, experiments, error tracking, and insights directly from Codex", "author": { "name": "PostHog", diff --git a/.cursor-plugin/plugin.json b/.cursor-plugin/plugin.json index 0bac77b..7105d1b 100644 --- a/.cursor-plugin/plugin.json +++ b/.cursor-plugin/plugin.json @@ -1,7 +1,7 @@ { "name": "posthog", "displayName": "PostHog", - "version": "1.0.5", + "version": "1.0.6", "description": "Access PostHog analytics, feature flags, experiments, error tracking, and insights directly from Cursor", "author": { "name": "PostHog", diff --git a/gemini-extension.json b/gemini-extension.json index 9bd10f0..5326d2c 100644 --- a/gemini-extension.json +++ b/gemini-extension.json @@ -1,6 +1,6 @@ { "name": "posthog", - "version": "1.0.4", + "version": "1.0.5", "description": "Access PostHog analytics, feature flags, experiments, error tracking, and insights directly from Gemini CLI", "mcpServers": { "posthog": { diff --git a/skills/.sync-manifest b/skills/.sync-manifest index 9403ea4..dd19b33 100644 --- a/skills/.sync-manifest +++ b/skills/.sync-manifest @@ -11,6 +11,7 @@ instrument-integration instrument-llm-analytics instrument-logs instrument-product-analytics +managing-subscriptions query-examples signals skills-store diff --git a/skills/instrument-feature-flags/SKILL.md b/skills/instrument-feature-flags/SKILL.md index e47ac3d..c39174f 100644 --- a/skills/instrument-feature-flags/SKILL.md +++ b/skills/instrument-feature-flags/SKILL.md @@ -65,7 +65,7 @@ STEP 6: Set up environment variables. - `references/api.md` - API feature flags installation - docs - `references/next-js.md` - Next.js - docs - `references/adding-feature-flag-code.md` - Adding feature flag code - docs -- `references/best-practices.md` - Feature flag best practices - docs +- `references/best-practices.md` - Best practices for production-ready flags - docs Each platform reference contains SDK-specific installation, flag evaluation, and code examples. Find the one matching the user's stack. If unlisted, use the API reference as a fallback. diff --git a/skills/instrument-feature-flags/references/android.md b/skills/instrument-feature-flags/references/android.md index 05dfcfe..8eb67b1 100644 --- a/skills/instrument-feature-flags/references/android.md +++ b/skills/instrument-feature-flags/references/android.md @@ -1,4 +1,4 @@ -# Android feature flags installation - Docs +# Android Feature Flags installation - Docs 1. 1 diff --git a/skills/instrument-feature-flags/references/api.md b/skills/instrument-feature-flags/references/api.md index 6c156b2..9d4bbaa 100644 --- a/skills/instrument-feature-flags/references/api.md +++ b/skills/instrument-feature-flags/references/api.md @@ -1,4 +1,4 @@ -# API feature flags installation - Docs +# API Feature Flags installation - Docs 1. 1 diff --git a/skills/instrument-feature-flags/references/best-practices.md b/skills/instrument-feature-flags/references/best-practices.md index 1bb4add..282496c 100644 --- a/skills/instrument-feature-flags/references/best-practices.md +++ b/skills/instrument-feature-flags/references/best-practices.md @@ -1,135 +1,223 @@ -# Feature flag best practices - Docs +# Best practices for production-ready flags - Docs -## 1\. Use a reverse proxy +## Checklist -Ad blockers have the potential to disable your feature flags, which can lead to bad experiences, such as users seeing the wrong version of your app, or missing a new feature rollout. +- [Call `identify()` before evaluating flags](#resolve-identity-before-evaluating-flags) – the hash uses the wrong ID otherwise. This is the most common input problem. +- [Evaluate flags server-side with local evaluation](#server-side-local-evaluation-is-the-recommended-default) – explicit inputs, your data right there, no workarounds. +- [Bootstrap client-side flags](#have-the-value-before-you-need-it) – client-side evaluation is async. [Bootstrap](/docs/feature-flags/bootstrapping.md) to eliminate the gap. +- [Handle `undefined` explicitly](#undefined-is-not-false) – it means "not evaluated yet," not `false`. +- [Evaluate once, record the result](#evaluate-once-not-continuously) – a flag is a one-time signal. Re-evaluate only on meaningful state changes. +- [Evaluate where the data lives](#evaluate-where-the-data-lives) – if the data is on your server, evaluate there. +- [Choose evaluation context deliberately](#choose-a-flag-type-intentionally) – "server and client" is the default for compatibility, not because it's the right choice for your flag. +- [Clean up flags that have done their job](#clean-up-flags-that-have-done-their-job) – a flag at 100% is done. Remove it or archive it. +- [Disable client-side evaluation for server-side flags](#disable-client-side-evaluation-for-server-side-flags) – don't let the client SDK re-evaluate what your server already decided. +- [Use a reverse proxy](#use-a-reverse-proxy) – prevent ad blockers from disabling your flags. +- [Call your flag in as few places as possible](#call-your-flag-in-as-few-places-as-possible) – wrap in a single function if used in multiple places. +- [Name flags clearly](#name-flags-clearly) – descriptive names, types, positive language. +- [Roll out progressively](#roll-out-progressively) – start small, monitor, then increase. -To avoid this, deploy a reverse proxy, which enables you to make requests and send events to PostHog Cloud using your own domain. +**The mental model:** [Flags are pure functions](#flags-are-pure-functions) – same flag key + same distinct ID = same result. Always. [Unexpected results are almost always input problems](#unexpected-results-are-almost-always-input-problems) – if the result changed, an input changed. -This means that requests are less likely to be intercepted by tracking blockers, and your feature flags are more likely to work as intended. You'll also capture more usage data. +--- -PostHog offers a free [managed reverse proxy](/docs/advanced/proxy/managed-reverse-proxy.md), or you can run your own. See our [reverse proxy docs](/docs/advanced/proxy.md) for more. +## Flags are pure functions -## 2\. Call your flag in as few places as possible +A flag hashes two things – the **flag key** and the **distinct ID** – and returns a deterministic result. Same inputs, same output. Every time. -It should be easy to understand how feature flags affect your code. The more locations a flag is in, the more likely it is to cause problems. For example, a developer could remove the flag in one place but forget to remove it in another. +PostHog AI -If you expect to use a feature flag in multiple places, it's a good idea to wrap the flag in a single function or method. For example: +``` +hash("my-experiment", "user-123") → 0.31 → always 0.31 +``` -JavaScript +On top of that, PostHog layers property targeting (does this user match?), rollout percentage (is their position below the threshold?), and variant assignment. But the foundation is the hash: **same flag key + same distinct ID = same result.** + +**Technically** + +"Pure function" means deterministic given a stable flag definition. The definition (rollout %, targeting rules, variants) is external state. Given the same definition, evaluation is fully deterministic on `flag_key` + `distinct_id`. Some features like [experience continuity](#dont-rely-on-flag-persistence-to-fix-identity-gaps) add persistence layers that introduce side effects on the server, but from your perspective as the caller, the model holds: same inputs, same output. + +### How the hash works + +PostHog uses SHA-1: PostHog AI -```javascript -function useBetaFeature() { - return posthog.isFeatureEnabled('beta-feature') -} +``` +hash_key = "{flag_key}.{distinct_id}" +position = parseInt(sha1(hash_key).slice(0, 15), 16) / LONG_SCALE → float in [0, 1] +in_rollout = position <= rollout_percentage / 100 ``` -## 3\. Targeting +For variants, a second hash with salt `"variant"` maps to variant ranges independently. The flag key is included so the same user gets independent assignments across different flags. -PostHog evaluates flags based on the user's distinct ID, having different IDs can cause the same user to receive different flag values across different sessions, devices, and platforms. By [identifying](/docs/getting-started/identify-users.md) them, you can ensure consistent flag values. +If the flag has property targeting, PostHog first checks whether the person matches the conditions. If they don't match, the hash never runs – the flag returns `false`. -The same applies to identifying [groups](/docs/product-analytics/group-analytics.md) for group-level flags. +## Unexpected results are almost always input problems -For flags targeting anonymous users, such as signup flows or landing page experiments, consider using [device bucketing](/docs/feature-flags/device-bucketing.md) instead. This evaluates the flag based on the device ID, ensuring a consistent experience on the device even after the user logs in. +If you evaluate the same flag with the same distinct ID a million times, you will get the same result a million times. It's how the math works. The hash is deterministic. It doesn't drift, it doesn't have off days, and it doesn't return different values on Tuesdays. -## 4\. Use server-side local evaluation for faster flags +So when a flag returns something you didn't expect, **the flag is fine, the problem is in the inputs passed to the flag.** Something about the identity, the properties, or the flag definition wasn't what you assumed. Find what changed, and you've found the problem. -Evaluating feature flags requires making a request to PostHog for each flag. However, you can improve performance by evaluating flags locally. Instead of making a request for each flag, PostHog will periodically request and store feature flag definitions locally, enabling you to evaluate flags without making additional requests. +If you keep running into flag issues and they're not [incidents](https://status.posthog.com), the conversation isn't about PostHog's flag behavior – it's about how your application coordinates the data that flags depend on. That's an engineering conversation about identity flows, property syncing, and evaluation architecture. No single config tweak fixes it. -Evaluate flags locally when possible, since this enables you to resolve flags faster and with fewer API calls. See our docs on [local evaluation](/docs/feature-flags/local-evaluation.md) for more details. +We're here to help with that – this guide, [PostHog AI](/docs/feature-flags/manage-flags-ai.md), and [professional services](https://posthog.com/professional-services) all exist for exactly this. But the starting point is always the same: **look at the inputs.** -## 5\. Bootstrap flags on the client to make them available immediately +When something goes wrong, in order of likelihood: -Since there is a delay between initializing PostHog and fetching feature flags, feature flags are not always available immediately. This makes them unusable if you want to do something like redirecting a user to a different page based on a feature flag. +1. **Input problems** (most common). Wrong distinct ID, missing properties, changed flag definition. PostHog gives you tools to get the coordination right – [bootstrapping](/docs/feature-flags/bootstrapping.md), [property overrides](/docs/feature-flags/property-overrides.md), [server-side evaluation](/docs/feature-flags/local-evaluation.md). +2. **Output problems.** The flag returned the right value but your code misread it – `undefined` treated as `false`, no handling for the loading gap, evaluating repeatedly instead of recording the result. +3. **Actual incidents.** Check [status.posthog.com](https://status.posthog.com). If nothing there, it's #1 or #2. And even here: with [server-side local evaluation](/docs/feature-flags/local-evaluation.md), the SDK evaluates against cached flag definitions locally. PostHog being unreachable doesn't affect flags that are already cached. Add per-flag safe defaults and even a cold start during an outage returns usable values. An incident only breaks your flags if your implementation depends on PostHog being available at request time – which is itself an implementation gap you can close. -To have your feature flags available immediately, you can initialize PostHog with precomputed values until it has had a chance to fetch them. This is called bootstrapping. +## Resolve identity before evaluating flags -See our docs on [bootstrapping](/docs/feature-flags/bootstrapping.md) for more details on how to do this. +Identity is the most common input problem. The hash takes two inputs: the flag key (stable) and the distinct ID (your responsibility). If the distinct ID is wrong at the moment of evaluation, the hash produces a valid but incorrect result. The flag is working perfectly – it just answered a question about the wrong person. -## 6\. Naming tips +If you call `identify()` after a flag has already been evaluated, the flag likely used the anonymous ID. The hash produced one result. After `identify()`, the distinct ID changes, the hash changes, and the next evaluation returns a different variant. You see a "flip" – but it's because the input changed. -Good naming conventions for your flags makes them easier to understand and maintain. Below are tips for naming your flags: +Call [`identify()`](/docs/product-analytics/identify.md) before any flag evaluation in auth flows. If you can't guarantee that timing, [bootstrap](/docs/feature-flags/bootstrapping.md) with the stable ID at init so the distinct ID is correct from the first millisecond. See [keeping flag evaluations stable](/docs/feature-flags/stable-identity-for-flags.md) for the full picture. -- **Use descriptive names.** For example, `is_v2_billing_dashboard_enabled` is much clearer than `is_dashboard_enabled`. +**SPA-specific timing.** In single-page applications, `identify()` and event captures often fire from different components during the same navigation in unpredictable order. The SDK updates the `distinct_id` synchronously when `identify()` runs, but if `capture()` was called first in the same execution frame, that event uses the anonymous ID. The fix: call `identify()` before the navigation that mounts post-auth components – in Vue, in `beforeEach` before `next()`; in React, before `navigate()`, not in a `useEffect` inside the target route. -- **Use name "types".** This helps organize them and makes their purpose clear. Types might include experiments, releases, and permissions. For example, instead of `new-billing`, they would be `new-billing-experiment` or `new-billing-release`. +### Don't rely on flag persistence to fix identity gaps -- **Name flags to reflect their return type.** For example, `is_premium_user` for a boolean, `enabled_integrations` for an array, or `selected_theme` for a single string. +If you've enabled [experience continuity](/docs/feature-flags/creating-feature-flags.md#persisting-feature-flags-across-authentication-steps-optional) (flag persistence across authentication), consider what that's telling you: the distinct ID is changing during your session, and you need PostHog to paper over it. -- **Use positive language for boolean flags.** For example, `is_premium_user` instead of `is_not_premium_user`. This helps avoid double negatives when checking the flag value (e.g. `if !is_not_premium_user` is confusing). +That comes at a cost. Experience continuity couples flag evaluation with database writes – every evaluation reads and writes to the DB to persist the result. This mixes two concerns (evaluation and storage) that should be separate, and it's the source of [known bugs](https://github.com/PostHog/posthog-js/issues/2623) where values can still change after `identify()`. It also means no support for [local evaluation](/docs/feature-flags/local-evaluation.md) and slower flag responses. -## 7\. Roll out progressively +The better fix is to make persistence unnecessary. Use [device bucketing](/docs/feature-flags/device-bucketing.md) for single-device consistency, or design your identity flow so the distinct ID [never changes](/docs/feature-flags/stable-identity-for-flags.md). If you need experience continuity today, treat it as a migration path toward proper [identity resolution](/docs/product-analytics/identity-resolution.md), not a permanent solution. The identity gap it papers over is the root cause of the most common flag issues – closing that gap eliminates the need for persistence entirely. -When testing a change behind a feature flag, it is best to roll it out to a small group of users and increase that group over time. This is also known as a [phased rollout](/tutorials/phased-rollout.md). It enables you to identify any potential issues ahead of the full release. +## Evaluation architecture -For example, at PostHog we often roll out the flag to just the responsible developer. It then moves on to the internal team, then beta users, and finally into a full rollout. This enables us to [test in production](/product-engineers/testing-in-production.md), get multiple rounds of feedback, identify issues, and polish the feature before the full release. +How you evaluate flags – where, when, and how often – determines the complexity of your implementation. Most workarounds exist because the evaluation happens in the wrong place or at the wrong time. -## 8\. Clean up after yourself +### Evaluate once, not continuously -Leaving flags in your code for too long can confuse future developers and create technical debt, especially if it's already rolled out and integrated. Be sure to remove stale flags once they are completely rolled out or no longer needed. +A flag is a one-time signal, not a continuous dependency. Evaluate it once, record the result, serve from that recording. Re-evaluate only when something meaningful changes. -When you have many flags to clean up, use [bulk delete](/docs/feature-flags/creating-feature-flags.md#deleting-feature-flags-in-bulk) to select and delete multiple flags at once. Select flags using checkboxes (shift-click to select a range), or filter by name or status and use "select all matching" to select all flags that match your criteria. PostHog validates that flags aren't used by experiments, early access features, or other dependent flags before deletion. +Re-evaluating on every request creates cost, latency, and the conditions for "flipping" – you're giving the system repeated chances to return a different answer when inputs shift. That's not a bug. That's the pure function doing its job with different inputs. -## 9\. Fallback to working code +- **Feature rollouts** – Evaluate when your user's state changes (upgrades, joins a cohort). Between triggers, your app already knows the answer. +- **Experiments** – One exposure per user. Evaluate once, record the variant, deliver that experience. If a user flips variants, the app re-asked a question it already had the answer to. -It's possible that a feature flag will return an [unexpected value](/docs/feature-flags/common-questions.md#my-feature-flag-called-events-show-none-empty-string-or-false-instead-of-my-variant-names). For example, if the flag is disabled or failed to load due to a network error. +### Evaluate where the data lives -In this case, its best to check that the feature flag returns a valid expected value before using it. If it isn't, fallback to working code. +If you target a flag on `plan_type: "pro"`, your app originally told PostHog this person is Pro. Evaluate the flag from the same place that has that knowledge – your server. PostHog does the distribution math; your app provides the targeting data. -## 10\. Use dependencies for complex rollouts +If you evaluate client-side instead, the SDK needs to fetch that property from PostHog's servers – a round-trip to look up what you originally sent it. Any flag check before that completes evaluates against incomplete data. -For sophisticated feature rollouts, consider using [feature flag dependencies](/docs/feature-flags/dependencies.md) where one flag's activation depends on another flag's state. This is useful for: +If you must evaluate client-side, use [`setPersonPropertiesForFlags()`](/docs/feature-flags/property-overrides.md#manual-overrides-with-setpersonpropertiesforflags) to set properties locally before evaluation. This avoids the round-trip when you already have the data in the browser. -- Enabling complex features only after foundational components are active -- Running experiments only on users with specific features enabled -- Creating safety mechanisms where critical flags must be enabled first +Property targeting is fine – just understand that the further the evaluation is from the data, the more async complexity you take on. -When using dependencies, keep the dependency chains simple and avoid circular dependencies. +### Server-side local evaluation is the recommended default -## 11\. Reducing your bill +[Server-side local evaluation](/docs/feature-flags/local-evaluation.md) is where the pure function model is fully legible: -We aim to be significantly cheaper than our competitors. To help you reduce your bill, we've created a [dedicated guide](/docs/feature-flags/cutting-costs.md) to estimating and reducing your feature flag costs. +- **All inputs are explicit.** You pass the distinct ID and properties directly. When something's wrong, you log what you passed. +- **Your data is right there.** User plan, account type, permissions – it's in your database at request time. No syncing, no fetching. +- **No workarounds needed.** Client-side evaluation often requires `setPersonPropertiesForFlags()`, `onFeatureFlags()`, and bootstrap to bridge the gap between where the data lives and where the flag evaluates. Server-side eliminates the gap. -## 12\. Consistent flag evaluations across frontend and backend +Client-side evaluation is right when you need properties only available in the browser, real-time flag changes, or have no server. But you're trading explicit inputs for implicit ones, and every workaround bridges that gap. -For feature flags with flag persistence enabled and used across both your frontend and backend, you will need to do one of the following to ensure the evaluation result of the flag is consistent between both environments: +### Have the value before you need it -1. Identify the user on the frontend and use the same identified distinct ID when evaluating the flag on the backend. +Client-side flag evaluation is async – the SDK needs to fetch values from PostHog. Any flag check before that completes returns `undefined`, not `false`. + +**[Bootstrap](/docs/feature-flags/bootstrapping.md) is the fix.** Evaluate flags server-side and pass values to the client at init. The value exists before the page renders – no gap, no flicker. + +If you can't bootstrap, use `onFeatureFlags()` to wait. This means you will need a loading state (spinner, skeleton) until flags arrive – it prevents showing the wrong variant but doesn't prevent a delay. + +### `undefined` is not "flag is off" nor `false` + +`posthog.getFeatureFlag()` returns `undefined` before flags load. That means "not evaluated yet," not "flag is off." JavaScript PostHog AI ```javascript -// Frontend: Identify the user -posthog.identify('user123') -// Backend: Use the same distinct ID -const flagValue = await posthog.getFeatureFlag('my-flag', 'user123') +// Returns undefined before flags load – not false +if (posthog.getFeatureFlag('my-experiment') === 'test') { + // Never runs during the loading gap +} ``` -2. If you are unable to call identify on the frontend, and only have access to the anonymous distinct ID when evaluating the flag on the backend, you can include the anonymous distinct ID as a person property override in the `getFeatureFlag` call. +Handle it with [bootstrap](/docs/feature-flags/bootstrapping.md) (preferred) or `onFeatureFlags()` (adds a loading state). You can check the current identity with `posthog.get_distinct_id()`. + +The "not loaded yet" return value varies across SDKs – some return `undefined`/`nil`/`None`, others return `false` or a `defaultValue` you provide. Don't assume that a falsy return means the flag is off. Check your SDK's documentation for the exact return type of `getFeatureFlag()` and `isFeatureEnabled()` when flags haven't loaded, and handle that state explicitly. If your goal is to programmatically check whether a flag exists at all, use the [Feature Flags API](/docs/api/feature-flags.md) to query flag definitions directly. + +## Flag hygiene + +Flags are infrastructure. Like any infrastructure, they accumulate cost when left unattended. These are operational practices that keep your flag system clean and efficient. + +### Choose a flag type intentionally + +Every flag in PostHog is configured as client-side, server-side, or both via [evaluation contexts](/docs/feature-flags/evaluation-contexts.md). New flags default to "server and client" – this exists for backwards compatibility (it's how all flags worked before we added evaluation contexts) and to avoid blocking users who haven't thought about their implementation yet. It's a safe starting point, not a recommendation. + +If all your flags are set to both, that usually means the decision was never revisited after creation – and you're paying for client-side evaluation on flags that only need to exist on your server. + +Pick the context based on where the flag is actually consumed. Server-side flags that drive backend logic don't need client SDKs fetching and evaluating them. Client-side flags for UI variations don't need server-side evaluation. "Both" is valid when a flag genuinely needs to be evaluated in both contexts – but it should be a deliberate choice, not the default you never changed. + +### Clean up flags that have done their job + +A flag set to 100% of all users with no property targeting is a flag that has finished its job. It's always returning the same value – the rollout is complete, the experiment concluded, the feature is live. But the flag is still being evaluated on every request, still included in every SDK payload, and still adding to your bill. + +Remove the flag and hardcode the winning path. If you're not ready to remove it from code, at least archive it in PostHog so it stops being evaluated. Stale flags are the most common source of unnecessary flag evaluation cost. See [cutting costs](/docs/feature-flags/cutting-costs.md) for more. + +**An idea worth considering:** design your flag code paths with an escape hatch you control outside of PostHog. For example, a "gate flag" that your server reads once every 30 seconds (not per user) – when it's `true`, the feature is fully rolled out and your code skips the per-user flag evaluation entirely. This means you stop paying for per-user evaluations the moment a rollout is complete, even before you remove the flag from code. And you can dial it back by setting the gate flag to `false`. This is also another application of "evaluate once, not continuously" – if you cache flag results (even for stale flags), your per-evaluation costs drop to near zero while you wait for the code cleanup. + +### Disable client-side evaluation for server-side flags + +If a flag is evaluated server-side and the result is passed to your frontend through your own application logic, the client SDK doesn't need to evaluate it independently. But unless you explicitly disable the flag on the client, the SDK will still fetch and evaluate it – duplicating work your server already did. + +This is the practical extension of "evaluate once, not continuously." Your server evaluates, your application propagates the result, and the client consumes it as application state rather than re-asking PostHog. Disable flags in the client SDK that your server already handles to eliminate redundant evaluation and reduce payload size. + +### Use a reverse proxy + +Ad blockers can disable your Feature Flags, leading to users seeing the wrong version of your app or missing a rollout. Deploy a [reverse proxy](/docs/advanced/proxy.md) so requests go through your own domain. PostHog offers a free [managed reverse proxy](/docs/advanced/proxy/managed-reverse-proxy.md), or you can run your own. + +### Call your flag in as few places as possible + +The more locations a flag appears in your code, the more likely it is to cause problems – a developer removes it in one place but forgets another. If you use a flag in multiple places, wrap it in a single function: JavaScript PostHog AI ```javascript -// Frontend: Get the anonymous ID (before identify is called) -const anonId = posthog.getAnonymousId() -// Backend: Pass the anonymous ID as a person property override -const flagValue = await posthog.getFeatureFlag( - 'my-flag', - 'user123', // identified distinct ID - { - personProperties: { - $anon_distinct_id: anonId - } - } -) +function useBetaFeature() { + return posthog.isFeatureEnabled('beta-feature') +} ``` +### Name flags clearly + +Good naming makes flags easier to understand and maintain: + +- **Use descriptive names.** `is_v2_billing_dashboard_enabled` is clearer than `is_dashboard_enabled`. +- **Use name types.** Suffix with the purpose: `new-billing-experiment`, `new-billing-release`. +- **Reflect the return type.** `is_premium_user` for a boolean, `selected_theme` for a string. +- **Use positive language for booleans.** `is_premium_user` instead of `is_not_premium_user` – avoids double negatives. + +### Roll out progressively + +Start at 5-10% of users, monitor metrics, then gradually increase. This is a [phased rollout](/tutorials/phased-rollout.md). At PostHog, we typically roll out to the developer first, then the internal team, then beta users, then everyone. + +### Use dependencies for complex rollouts + +[Feature flag dependencies](/docs/feature-flags/dependencies.md) let one flag's activation depend on another flag's state – useful for enabling complex features only after foundational components are active, or running Experiments only on users with specific features enabled. Keep dependency chains simple and avoid circular dependencies. + +### Reducing your bill + +Stale flags are the most common source of unnecessary cost. Beyond cleaning up flags, see our [dedicated guide to cutting costs](/docs/feature-flags/cutting-costs.md) for estimating and reducing your feature flag bill. + +## Further reading + +- [Identity resolution](/docs/product-analytics/identity-resolution.md) – how PostHog resolves who a user is +- [Keeping flag evaluations stable](/docs/feature-flags/stable-identity-for-flags.md) – preventing the hash input from changing across auth transitions +- [Local evaluation](/docs/feature-flags/local-evaluation.md) – server-side evaluation for explicit input control +- [Bootstrapping](/docs/feature-flags/bootstrapping.md) – having flag values before the page renders + ### Community questions Ask a question diff --git a/skills/instrument-feature-flags/references/dotnet.md b/skills/instrument-feature-flags/references/dotnet.md index ad5b69f..33e9ab8 100644 --- a/skills/instrument-feature-flags/references/dotnet.md +++ b/skills/instrument-feature-flags/references/dotnet.md @@ -1,4 +1,4 @@ -# .NET feature flags installation - Docs +# .NET Feature Flags installation - Docs The `PostHog` package supports any .NET platform that targets .NET Standard 2.1 or .NET 8+, including MAUI, Blazor, and console applications. The `PostHog.AspNetCore` package provides additional conveniences for ASP.NET Core applications such as streamlined registration, request-scoped caching, and integration with [.NET Feature Management](https://learn.microsoft.com/en-us/azure/azure-app-configuration/feature-management-dotnet-reference). diff --git a/skills/instrument-feature-flags/references/elixir.md b/skills/instrument-feature-flags/references/elixir.md index ac75276..c427127 100644 --- a/skills/instrument-feature-flags/references/elixir.md +++ b/skills/instrument-feature-flags/references/elixir.md @@ -1,4 +1,4 @@ -# Elixir feature flags installation - Docs +# Elixir Feature Flags installation - Docs > This library was built by the community but it's being maintained by the PostHog core team since v1.0.0. Thank you to [Nick Kezhaya](https://github.com/nkezhaya) for building it originally. Thank you to [Alex Martsinovich](https://github.com/martosaur) for contributing v2.0.0. diff --git a/skills/instrument-feature-flags/references/flutter.md b/skills/instrument-feature-flags/references/flutter.md index 46075df..949e208 100644 --- a/skills/instrument-feature-flags/references/flutter.md +++ b/skills/instrument-feature-flags/references/flutter.md @@ -1,4 +1,4 @@ -# Flutter feature flags installation - Docs +# Flutter Feature Flags installation - Docs 1. 1 diff --git a/skills/instrument-feature-flags/references/go.md b/skills/instrument-feature-flags/references/go.md index c567a03..8388ae8 100644 --- a/skills/instrument-feature-flags/references/go.md +++ b/skills/instrument-feature-flags/references/go.md @@ -1,4 +1,4 @@ -# Go feature flags installation - Docs +# Go Feature Flags installation - Docs 1. 1 diff --git a/skills/instrument-feature-flags/references/ios.md b/skills/instrument-feature-flags/references/ios.md index 2d71760..7443d8a 100644 --- a/skills/instrument-feature-flags/references/ios.md +++ b/skills/instrument-feature-flags/references/ios.md @@ -1,4 +1,4 @@ -# iOS feature flags installation - Docs +# iOS Feature Flags installation - Docs 1. 1 diff --git a/skills/instrument-feature-flags/references/java.md b/skills/instrument-feature-flags/references/java.md index b70bfa6..986b656 100644 --- a/skills/instrument-feature-flags/references/java.md +++ b/skills/instrument-feature-flags/references/java.md @@ -1,4 +1,4 @@ -# Java feature flags installation - Docs +# Java Feature Flags installation - Docs The best way to install the PostHog Java SDK is with a build system like Gradle or Maven. This ensures you can easily upgrade to the latest versions. diff --git a/skills/instrument-feature-flags/references/nodejs.md b/skills/instrument-feature-flags/references/nodejs.md index 788295e..ed920bb 100644 --- a/skills/instrument-feature-flags/references/nodejs.md +++ b/skills/instrument-feature-flags/references/nodejs.md @@ -1,4 +1,4 @@ -# Node.js feature flags installation - Docs +# Node.js Feature Flags installation - Docs 1. 1 diff --git a/skills/instrument-feature-flags/references/php.md b/skills/instrument-feature-flags/references/php.md index acf0ed4..7498c7b 100644 --- a/skills/instrument-feature-flags/references/php.md +++ b/skills/instrument-feature-flags/references/php.md @@ -1,4 +1,4 @@ -# PHP feature flags installation - Docs +# PHP Feature Flags installation - Docs 1. 1 diff --git a/skills/instrument-feature-flags/references/python.md b/skills/instrument-feature-flags/references/python.md index e7f631d..77b82fa 100644 --- a/skills/instrument-feature-flags/references/python.md +++ b/skills/instrument-feature-flags/references/python.md @@ -1,4 +1,4 @@ -# Python feature flags installation - Docs +# Python Feature Flags installation - Docs 1. 1 diff --git a/skills/instrument-feature-flags/references/react-native.md b/skills/instrument-feature-flags/references/react-native.md index 6d69595..ba03d99 100644 --- a/skills/instrument-feature-flags/references/react-native.md +++ b/skills/instrument-feature-flags/references/react-native.md @@ -1,4 +1,4 @@ -# React Native feature flags installation - Docs +# React Native Feature Flags installation - Docs 1. 1 diff --git a/skills/instrument-feature-flags/references/react.md b/skills/instrument-feature-flags/references/react.md index 97e8857..5e0244d 100644 --- a/skills/instrument-feature-flags/references/react.md +++ b/skills/instrument-feature-flags/references/react.md @@ -1,4 +1,4 @@ -# React feature flags installation - Docs +# React Feature Flags installation - Docs 1. 1 diff --git a/skills/instrument-feature-flags/references/ruby.md b/skills/instrument-feature-flags/references/ruby.md index 282b610..a02bc6c 100644 --- a/skills/instrument-feature-flags/references/ruby.md +++ b/skills/instrument-feature-flags/references/ruby.md @@ -1,4 +1,4 @@ -# Ruby feature flags installation - Docs +# Ruby Feature Flags installation - Docs 1. 1 diff --git a/skills/instrument-feature-flags/references/rust.md b/skills/instrument-feature-flags/references/rust.md index 9cc1afd..99fb2c7 100644 --- a/skills/instrument-feature-flags/references/rust.md +++ b/skills/instrument-feature-flags/references/rust.md @@ -1,4 +1,4 @@ -# Rust feature flags installation - Docs +# Rust Feature Flags installation - Docs Install the `posthog-rs` crate by adding it to your `Cargo.toml`. diff --git a/skills/instrument-feature-flags/references/web.md b/skills/instrument-feature-flags/references/web.md index 0e009da..47ecad4 100644 --- a/skills/instrument-feature-flags/references/web.md +++ b/skills/instrument-feature-flags/references/web.md @@ -1,4 +1,4 @@ -# Web feature flags installation - Docs +# Web Feature Flags installation - Docs 1. 1 diff --git a/skills/instrument-integration/SKILL.md b/skills/instrument-integration/SKILL.md index 474e619..614f08e 100644 --- a/skills/instrument-integration/SKILL.md +++ b/skills/instrument-integration/SKILL.md @@ -57,7 +57,7 @@ STEP 7: Verify and clean up. - `references/EXAMPLE-react-react-router-7-data.md` - react-react-router-7-data example project code - `references/EXAMPLE-react-react-router-7-declarative.md` - react-react-router-7-declarative example project code - `references/EXAMPLE-react-vite.md` - react-vite example project code -- `references/EXAMPLE-nuxt-3.6.md` - nuxt-3.6 example project code +- `references/EXAMPLE-nuxt-3-6.md` - nuxt-3-6 example project code - `references/EXAMPLE-nuxt-4.md` - nuxt-4 example project code - `references/EXAMPLE-vue-3.md` - vue-3 example project code - `references/EXAMPLE-react-tanstack-router-file-based.md` - react-tanstack-router-file-based example project code diff --git a/skills/instrument-integration/references/EXAMPLE-nuxt-3.6.md b/skills/instrument-integration/references/EXAMPLE-nuxt-3-6.md similarity index 99% rename from skills/instrument-integration/references/EXAMPLE-nuxt-3.6.md rename to skills/instrument-integration/references/EXAMPLE-nuxt-3-6.md index 07717a9..fee7d00 100644 --- a/skills/instrument-integration/references/EXAMPLE-nuxt-3.6.md +++ b/skills/instrument-integration/references/EXAMPLE-nuxt-3-6.md @@ -1,7 +1,7 @@ -# PostHog nuxt-3.6 Example Project +# PostHog nuxt-3-6 Example Project Repository: https://github.com/PostHog/context-mill -Path: basics/nuxt-3.6 +Path: basics/nuxt-3-6 --- diff --git a/skills/instrument-integration/references/EXAMPLE-nuxt-4.md b/skills/instrument-integration/references/EXAMPLE-nuxt-4.md index 5c35df1..1cbed60 100644 --- a/skills/instrument-integration/references/EXAMPLE-nuxt-4.md +++ b/skills/instrument-integration/references/EXAMPLE-nuxt-4.md @@ -13,7 +13,7 @@ This is a [Nuxt 4](https://nuxt.com) example demonstrating PostHog integration w Nuxt 4 supports the `@posthog/nuxt` package, which provides automatic PostHog integration with built-in error tracking, source map uploads, and simplified configuration. This is the recommended approach for Nuxt 4+. -For Nuxt 3.0 - 3.6, you must use the `posthog-js` and `posthog-node` packages directly instead. See the [Nuxt 3.6 example](../nuxt-3.6) for that approach. +For Nuxt 3.0 - 3.6, you must use the `posthog-js` and `posthog-node` packages directly instead. See the [Nuxt 3.6 example](../nuxt-3-6) for that approach. ## Features diff --git a/skills/instrument-integration/references/js.md b/skills/instrument-integration/references/js.md index 3dbff8a..50a7ad3 100644 --- a/skills/instrument-integration/references/js.md +++ b/skills/instrument-integration/references/js.md @@ -230,7 +230,7 @@ The `defaults` is a date, such as `2026-01-30`, for a configuration snapshot use > > See our guide on [identifying users](/docs/getting-started/identify-users.md) for how to set this up. -Once you've installed PostHog, see our [features doc](/docs/libraries/js/features.md) for more information about what you can do with it. +Once you've installed PostHog, see our [features doc](/docs/libraries/js/features.md) for more information about what you can do with it. You can also install the [PostHog VS Code extension](/docs/vscode-extension.md) to see live analytics, flag status, and session replay links inline in your code. ### Track across marketing website & app diff --git a/skills/instrument-integration/references/posthog-js.md b/skills/instrument-integration/references/posthog-js.md index c5485c6..ea09b29 100644 --- a/skills/instrument-integration/references/posthog-js.md +++ b/skills/instrument-integration/references/posthog-js.md @@ -1,6 +1,6 @@ # PostHog JavaScript Web SDK -**SDK Version:** 1.364.6 +**SDK Version:** 1.369.0 Posthog-js allows you to automatically capture usage and send events to PostHog. @@ -11,6 +11,7 @@ Posthog-js allows you to automatically capture usage and send events to PostHog. - Capture - Surveys - Error tracking +- Logs - LLM analytics - Privacy - Session replay @@ -43,6 +44,24 @@ posthog.PostHog(); --- +#### clearIdentity() + +**Release Tag:** public + +Clear HMAC-based identity verification, reverting to anonymous mode. + +### Returns + +- `void` + +### Examples + +```ts +posthog.clearIdentity() +``` + +--- + #### get_explicit_consent_status() **Release Tag:** public @@ -120,6 +139,33 @@ posthog.push(['register', { a: 'b' }]); --- +#### setIdentity() + +**Release Tag:** public + +Set HMAC-based identity verification. + +**Notes:** + +When set, products like conversations use server-verified identity (distinct_id + HMAC hash) instead of anonymous session identifiers. The hash should be computed server-side as HMAC-SHA256 of the distinct_id using the project's API secret. + +### Parameters + +- **`distinctId`** (`string`) - The verified user distinct_id +- **`hash`** (`string`) - HMAC-SHA256 of distinctId using the project API secret + +### Returns + +- `void` + +### Examples + +```ts +posthog.setIdentity('user_123', 'a1b2c3d4e5f6...') +``` + +--- + ### Identification methods #### alias() @@ -1131,6 +1177,34 @@ posthog.stopExceptionAutocapture() --- +### Logs methods + +#### captureLog() + +**Release Tag:** public + +Capture a log entry and send it to the PostHog logs endpoint. + +### Parameters + +- **`options`** (`CaptureLogOptions`) - The log entry options + +### Returns + +- `void` + +### Examples + +```ts +posthog.captureLog({ + body: 'checkout completed', + level: 'info', + attributes: { order_id: 'ord_789', amount_cents: 4999 }, +}) +``` + +--- + ### LLM analytics methods #### captureTraceFeedback() diff --git a/skills/instrument-integration/references/posthog-node.md b/skills/instrument-integration/references/posthog-node.md index 1e8d98b..8511a58 100644 --- a/skills/instrument-integration/references/posthog-node.md +++ b/skills/instrument-integration/references/posthog-node.md @@ -1,6 +1,6 @@ # PostHog Node.js SDK -**SDK Version:** 5.28.11 +**SDK Version:** 5.29.2 PostHog Node.js SDK allows you to capture events and send them to PostHog from your Node.js applications. diff --git a/skills/instrument-integration/references/python.md b/skills/instrument-integration/references/python.md index 11dc3b2..1db6612 100644 --- a/skills/instrument-integration/references/python.md +++ b/skills/instrument-integration/references/python.md @@ -740,7 +740,7 @@ Python PostHog AI ```python -posthog.debug = True # + +posthog.debug = True ``` ## Disabling requests during tests diff --git a/skills/instrument-llm-analytics/references/anthropic.md b/skills/instrument-llm-analytics/references/anthropic.md index c55ae2a..0d5b1fd 100644 --- a/skills/instrument-llm-analytics/references/anthropic.md +++ b/skills/instrument-llm-analytics/references/anthropic.md @@ -2,120 +2,109 @@ 1. 1 - ## Install the PostHog SDK + ## Install dependencies Required - Setting up analytics starts with installing the PostHog SDK for your language. LLM analytics works best with our Python and Node SDKs. + **Full working examples** - PostHog AI - - ### Python - - ```bash - pip install posthog - ``` - - ### Node - - ```bash - npm install @posthog/ai posthog-node - ``` - -2. 2 - - ## Install the Anthropic SDK - - Required + See the complete [Node.js](https://github.com/PostHog/posthog-js/tree/main/examples/example-ai-anthropic) and [Python](https://github.com/PostHog/posthog-python/tree/master/examples/example-ai-anthropic) examples on GitHub. If you're using the PostHog SDK wrapper instead of OpenTelemetry, see the [Node.js wrapper](https://github.com/PostHog/posthog-js/tree/e08ff1be/examples/example-ai-anthropic) and [Python wrapper](https://github.com/PostHog/posthog-python/tree/7223c52/examples/example-ai-anthropic) examples. - Install the Anthropic SDK. The PostHog SDK instruments your LLM calls by wrapping the Anthropic client. The PostHog SDK **does not** proxy your calls. + Install the OpenTelemetry SDK, the Anthropic instrumentation, and the Anthropic SDK. PostHog AI ### Python ```bash - pip install anthropic + pip install anthropic opentelemetry-sdk posthog[otel] opentelemetry-instrumentation-anthropic ``` ### Node ```bash - npm install @anthropic-ai/sdk + npm install @anthropic-ai/sdk @posthog/ai @opentelemetry/sdk-node @opentelemetry/resources @traceloop/instrumentation-anthropic ``` - **Proxy note** - - These SDKs **do not** proxy your calls. They only fire off an async call to PostHog in the background to send the data. You can also use LLM analytics with other SDKs or our API, but you will need to capture the data in the right format. See the schema in the [manual capture section](/docs/llm-analytics/installation/manual-capture.md) for more details. - -3. 3 +2. 2 - ## Initialize PostHog and the Anthropic wrapper + ## Set up OpenTelemetry tracing Required - Initialize PostHog with your project token and host from [your project settings](https://app.posthog.com/settings/project), then pass it to our Anthropic wrapper. + Configure OpenTelemetry to auto-instrument Anthropic SDK calls and export traces to PostHog. PostHog converts `gen_ai.*` spans into `$ai_generation` events automatically. PostHog AI ### Python ```python - from posthog.ai.anthropic import Anthropic - from posthog import Posthog - posthog = Posthog( - "", - host="https://us.i.posthog.com" - ) - client = Anthropic( - api_key="sk-ant-api...", # Replace with your Anthropic API key - posthog_client=posthog # This is an optional parameter. If it is not provided, a default client will be used. + from opentelemetry import trace + from opentelemetry.sdk.trace import TracerProvider + from opentelemetry.sdk.resources import Resource, SERVICE_NAME + from posthog.ai.otel import PostHogSpanProcessor + from opentelemetry.instrumentation.anthropic import AnthropicInstrumentor + resource = Resource(attributes={ + SERVICE_NAME: "my-app", + "posthog.distinct_id": "user_123", # optional: identifies the user in PostHog + "foo": "bar", # custom properties are passed through + }) + provider = TracerProvider(resource=resource) + provider.add_span_processor( + PostHogSpanProcessor( + api_key="", + host="https://us.i.posthog.com", + ) ) + trace.set_tracer_provider(provider) + AnthropicInstrumentor().instrument() ``` ### Node ```typescript - import { Anthropic } from '@posthog/ai' - import { PostHog } from 'posthog-node' - const phClient = new PostHog( - '', - { host: 'https://us.i.posthog.com' } - ) - const client = new Anthropic({ - apiKey: 'sk-ant-api...', // Replace with your Anthropic API key - posthog: phClient + import { NodeSDK } from '@opentelemetry/sdk-node' + import { resourceFromAttributes } from '@opentelemetry/resources' + import { PostHogSpanProcessor } from '@posthog/ai/otel' + import { AnthropicInstrumentation } from '@traceloop/instrumentation-anthropic' + const sdk = new NodeSDK({ + resource: resourceFromAttributes({ + 'service.name': 'my-app', + 'posthog.distinct_id': 'user_123', // optional: identifies the user in PostHog + foo: 'bar', // custom properties are passed through + }), + spanProcessors: [ + new PostHogSpanProcessor({ + apiKey: '', + host: 'https://us.i.posthog.com', + }), + ], + instrumentations: [new AnthropicInstrumentation()], }) + sdk.start() ``` - > **Note:** This also works with the `AsyncAnthropic` client as well as `AnthropicBedrock`, `AnthropicVertex`, and the async versions of those. - -4. 4 +3. 3 - ## Call Anthropic LLMs + ## Call Anthropic Required - Now, when you use the Anthropic SDK to call LLMs, PostHog automatically captures an `$ai_generation` event. You can enrich the event with additional data such as the trace ID, distinct ID, custom properties, groups, and privacy mode options. + Now, when you use the Anthropic SDK to call LLMs, PostHog automatically captures `$ai_generation` events via the OpenTelemetry instrumentation. PostHog AI ### Python ```python + import anthropic + client = anthropic.Anthropic(api_key="sk-ant-api...") response = client.messages.create( - model="claude-3-opus-20240229", + model="claude-sonnet-4-20250514", + max_tokens=1024, messages=[ - { - "role": "user", - "content": "Tell me a fun fact about hedgehogs" - } + {"role": "user", "content": "Tell me a fun fact about hedgehogs"} ], - posthog_distinct_id="user_123", # optional - posthog_trace_id="trace_123", # optional - posthog_properties={"conversation_id": "abc123", "paid": True}, # optional - posthog_groups={"company": "company_id_in_your_db"}, # optional - posthog_privacy_mode=False # optional ) print(response.content[0].text) ``` @@ -123,30 +112,19 @@ ### Node ```typescript + import Anthropic from '@anthropic-ai/sdk' + const client = new Anthropic({ apiKey: 'sk-ant-api...' }) const response = await client.messages.create({ - model: "claude-3-5-sonnet-latest", - messages: [ - { - role: "user", - content: "Tell me a fun fact about hedgehogs" - } - ], - posthogDistinctId: "user_123", // optional - posthogTraceId: "trace_123", // optional - posthogProperties: { conversationId: "abc123", paid: true }, // optional - posthogGroups: { company: "company_id_in_your_db" }, // optional - posthogPrivacyMode: false // optional + model: 'claude-sonnet-4-20250514', + max_tokens: 1024, + messages: [{ role: 'user', content: 'Tell me a fun fact about hedgehogs' }], }) console.log(response.content[0].text) - phClient.shutdown() ``` - > **Notes:** - > - > - This also works when message streams are used (e.g. `stream=True` or `client.messages.stream(...)`). - > - If you want to capture LLM events anonymously, **don't** pass a distinct ID to the request. - > - > See our docs on [anonymous vs identified events](/docs/data/anonymous-vs-identified-events.md) to learn more. + > **Note:** This also works with the `AsyncAnthropic` client as well as `AnthropicBedrock`, `AnthropicVertex`, and the async versions of those. + + > **Note:** If you want to capture LLM events anonymously, omit the `posthog.distinct_id` resource attribute. See our docs on [anonymous vs identified events](/docs/data/anonymous-vs-identified-events.md) to learn more. You can expect captured `$ai_generation` events to have the following properties: @@ -163,7 +141,7 @@ | $ai_total_cost_usd | The total cost in USD (input + output) | | [[...]](/docs/llm-analytics/generations.md#event-properties) | See [full list](/docs/llm-analytics/generations.md#event-properties) of properties | -5. ## Verify traces and generations +4. ## Verify traces and generations Recommended @@ -175,7 +153,7 @@ [Check for LLM events in PostHog](https://app.posthog.com/llm-analytics/generations) -6. 5 +5. 4 ## Next steps diff --git a/skills/instrument-llm-analytics/references/autogen.md b/skills/instrument-llm-analytics/references/autogen.md index b158ed8..7b0ca91 100644 --- a/skills/instrument-llm-analytics/references/autogen.md +++ b/skills/instrument-llm-analytics/references/autogen.md @@ -2,69 +2,66 @@ 1. 1 - ## Install the PostHog SDK + ## Install dependencies Required - Setting up analytics starts with installing the PostHog SDK. The AutoGen integration uses PostHog's OpenAI wrapper since AutoGen uses OpenAI under the hood. + **Full working examples** + + See the complete [Python example](https://github.com/PostHog/posthog-python/tree/master/examples/example-ai-autogen) on GitHub. If you're using the PostHog SDK wrapper instead of OpenTelemetry, see the [Python wrapper example](https://github.com/PostHog/posthog-python/tree/7223c52/examples/example-ai-autogen). + + Install the OpenTelemetry SDK, the OpenAI instrumentation, and AutoGen. ```bash - pip install posthog + pip install autogen-agentchat "autogen-ext[openai]" openai opentelemetry-sdk posthog[otel] opentelemetry-instrumentation-openai-v2 ``` 2. 2 - ## Install AutoGen + ## Set up OpenTelemetry tracing Required - Install AutoGen with the OpenAI extension. PostHog instruments your LLM calls by wrapping the OpenAI client that AutoGen uses internally. + Configure OpenTelemetry to auto-instrument OpenAI SDK calls and export traces to PostHog. PostHog converts `gen_ai.*` spans into `$ai_generation` events automatically. - ```bash - pip install "autogen-agentchat" "autogen-ext[openai]" + ```python + from opentelemetry import trace + from opentelemetry.sdk.trace import TracerProvider + from opentelemetry.sdk.resources import Resource, SERVICE_NAME + from posthog.ai.otel import PostHogSpanProcessor + from opentelemetry.instrumentation.openai_v2 import OpenAIInstrumentor + resource = Resource(attributes={ + SERVICE_NAME: "my-app", + "posthog.distinct_id": "user_123", # optional: identifies the user in PostHog + "foo": "bar", # custom properties are passed through + }) + provider = TracerProvider(resource=resource) + provider.add_span_processor( + PostHogSpanProcessor( + api_key="", + host="https://us.i.posthog.com", + ) + ) + trace.set_tracer_provider(provider) + OpenAIInstrumentor().instrument() ``` 3. 3 - ## Initialize PostHog and AutoGen + ## Run your agents Required - Initialize PostHog with your project token and host from [your project settings](https://app.posthog.com/settings/project), then create a PostHog OpenAI wrapper and pass it to AutoGen's `OpenAIChatCompletionClient`. + Use AutoGen as normal. PostHog automatically captures an `$ai_generation` event for each LLM call made through the OpenAI SDK that AutoGen uses internally. ```python import asyncio - from posthog.ai.openai import OpenAI - from posthog import Posthog from autogen_agentchat.agents import AssistantAgent from autogen_ext.models.openai import OpenAIChatCompletionClient - posthog = Posthog( - "", - host="https://us.i.posthog.com" - ) - openai_client = OpenAI( - api_key="your_openai_api_key", - posthog_client=posthog, - ) model_client = OpenAIChatCompletionClient( model="gpt-4o", - openai_client=openai_client, + api_key="your_openai_api_key", ) - ``` - - **How this works** - - AutoGen's `OpenAIChatCompletionClient` accepts a custom OpenAI client via the `openai_client` parameter. PostHog's `OpenAI` wrapper is a proper subclass of `openai.OpenAI`, so it works directly. PostHog captures `$ai_generation` events automatically without proxying your calls. - -4. 4 - - ## Run your agents - - Required - - Use AutoGen as normal. PostHog automatically captures an `$ai_generation` event for each LLM call made through the wrapped OpenAI client. - - ```python agent = AssistantAgent("assistant", model_client=model_client) async def main(): result = await agent.run(task="Say 'Hello World!'") @@ -73,6 +70,8 @@ asyncio.run(main()) ``` + > **Note:** If you want to capture LLM events anonymously, omit the `posthog.distinct_id` resource attribute. See our docs on [anonymous vs identified events](/docs/data/anonymous-vs-identified-events.md) to learn more. + You can expect captured `$ai_generation` events to have the following properties: | Property | Description | @@ -88,7 +87,7 @@ | $ai_total_cost_usd | The total cost in USD (input + output) | | [[...]](/docs/llm-analytics/generations.md#event-properties) | See [full list](/docs/llm-analytics/generations.md#event-properties) of properties | -5. ## Verify traces and generations +4. ## Verify traces and generations Recommended @@ -100,7 +99,7 @@ [Check for LLM events in PostHog](https://app.posthog.com/llm-analytics/generations) -6. 5 +5. 4 ## Next steps diff --git a/skills/instrument-llm-analytics/references/azure-openai.md b/skills/instrument-llm-analytics/references/azure-openai.md index ac566e9..12c8b74 100644 --- a/skills/instrument-llm-analytics/references/azure-openai.md +++ b/skills/instrument-llm-analytics/references/azure-openai.md @@ -2,102 +2,112 @@ 1. 1 - ## Install the SDKs + ## Install dependencies Required - Setting up analytics starts with installing the PostHog and OpenAI SDKs. + **Full working examples** + + See the complete [Node.js](https://github.com/PostHog/posthog-js/tree/main/examples/example-ai-azure-openai) and [Python](https://github.com/PostHog/posthog-python/tree/master/examples/example-ai-azure-openai) examples on GitHub. If you're using the PostHog SDK wrapper instead of OpenTelemetry, see the [Node.js wrapper](https://github.com/PostHog/posthog-js/tree/e08ff1be/examples/example-ai-azure-openai) and [Python wrapper](https://github.com/PostHog/posthog-python/tree/7223c52/examples/example-ai-azure-openai) examples. + + Install the OpenTelemetry SDK, the OpenAI instrumentation, and the OpenAI SDK. PostHog AI ### Python ```bash - pip install posthog openai + pip install openai opentelemetry-sdk posthog[otel] opentelemetry-instrumentation-openai-v2 ``` ### Node ```bash - npm install @posthog/ai posthog-node openai + npm install openai @posthog/ai @opentelemetry/sdk-node @opentelemetry/resources @opentelemetry/instrumentation-openai ``` 2. 2 - ## Initialize PostHog and Azure OpenAI client + ## Set up OpenTelemetry tracing Required - We call Azure OpenAI through PostHog's AzureOpenAI wrapper to capture all the details of the call. Initialize PostHog with your PostHog project token and host from [your project settings](https://app.posthog.com/settings/project), then pass the PostHog client along with your Azure OpenAI config (the API key, API version, and endpoint) to our AzureOpenAI wrapper. + Configure OpenTelemetry to auto-instrument OpenAI SDK calls and export traces to PostHog. PostHog converts `gen_ai.*` spans into `$ai_generation` events automatically. PostHog AI ### Python ```python - from posthog.ai.openai import AzureOpenAI - from posthog import Posthog - posthog = Posthog( - "", - host="https://us.i.posthog.com" - ) - client = AzureOpenAI( - api_key="", - api_version="2024-10-21", - azure_endpoint="https://.openai.azure.com", - posthog_client=posthog + from opentelemetry import trace + from opentelemetry.sdk.trace import TracerProvider + from opentelemetry.sdk.resources import Resource, SERVICE_NAME + from posthog.ai.otel import PostHogSpanProcessor + from opentelemetry.instrumentation.openai_v2 import OpenAIInstrumentor + resource = Resource(attributes={ + SERVICE_NAME: "my-app", + "posthog.distinct_id": "user_123", # optional: identifies the user in PostHog + "foo": "bar", # custom properties are passed through + }) + provider = TracerProvider(resource=resource) + provider.add_span_processor( + PostHogSpanProcessor( + api_key="", + host="https://us.i.posthog.com", + ) ) + trace.set_tracer_provider(provider) + OpenAIInstrumentor().instrument() ``` ### Node ```typescript - import { AzureOpenAI } from '@posthog/ai' - import { PostHog } from 'posthog-node' - const phClient = new PostHog( - '', - { host: 'https://us.i.posthog.com' } - ); - const client = new AzureOpenAI({ - apiKey: '', - apiVersion: '2024-10-21', - endpoint: 'https://.openai.azure.com', - posthog: phClient, - }); - // ... your code here ... - // IMPORTANT: Shutdown the client when you're done to ensure all events are sent - phClient.shutdown() + import { NodeSDK } from '@opentelemetry/sdk-node' + import { resourceFromAttributes } from '@opentelemetry/resources' + import { PostHogSpanProcessor } from '@posthog/ai/otel' + import { OpenAIInstrumentation } from '@opentelemetry/instrumentation-openai' + const sdk = new NodeSDK({ + resource: resourceFromAttributes({ + 'service.name': 'my-app', + 'posthog.distinct_id': 'user_123', // optional: identifies the user in PostHog + foo: 'bar', // custom properties are passed through + }), + spanProcessors: [ + new PostHogSpanProcessor({ + apiKey: '', + host: 'https://us.i.posthog.com', + }), + ], + instrumentations: [new OpenAIInstrumentation()], + }) + sdk.start() ``` - > **Note:** This also works with the `AsyncAzureOpenAI` client. - - **Proxy note** - - These SDKs **do not** proxy your calls. They only fire off an async call to PostHog in the background to send the data. You can also use LLM analytics with other SDKs or our API, but you will need to capture the data in the right format. See the schema in the [manual capture section](/docs/llm-analytics/installation/manual-capture.md) for more details. - 3. 3 ## Call Azure OpenAI Required - Now, when you call Azure OpenAI, PostHog automatically captures an `$ai_generation` event. You can also capture or modify additional properties with the distinct ID, trace ID, properties, groups, and privacy mode parameters. + Now, when you call Azure OpenAI, PostHog automatically captures `$ai_generation` events via the OpenTelemetry instrumentation. PostHog AI ### Python ```python + import openai + client = openai.AzureOpenAI( + api_key="", + api_version="2024-10-21", + azure_endpoint="https://.openai.azure.com", + ) response = client.chat.completions.create( model="", messages=[ {"role": "user", "content": "Tell me a fun fact about hedgehogs"} ], - posthog_distinct_id="user_123", # optional - posthog_trace_id="trace_123", # optional - posthog_properties={"conversation_id": "abc123", "paid": True}, # optional - posthog_groups={"company": "company_id_in_your_db"}, # optional - posthog_privacy_mode=False # optional ) print(response.choices[0].message.content) ``` @@ -105,24 +115,20 @@ ### Node ```typescript - const completion = await client.chat.completions.create({ - model: "", - messages: [{ role: "user", content: "Tell me a fun fact about hedgehogs" }], - posthogDistinctId: "user_123", // optional - posthogTraceId: "trace_123", // optional - posthogProperties: { conversation_id: "abc123", paid: true }, // optional - posthogGroups: { company: "company_id_in_your_db" }, // optional - posthogPrivacyMode: false // optional - }); - console.log(completion.choices[0].message.content) + import { AzureOpenAI } from 'openai' + const client = new AzureOpenAI({ + apiKey: '', + apiVersion: '2024-10-21', + endpoint: 'https://.openai.azure.com', + }) + const response = await client.chat.completions.create({ + model: '', + messages: [{ role: 'user', content: 'Tell me a fun fact about hedgehogs' }], + }) + console.log(response.choices[0].message.content) ``` - > **Notes:** - > - > - This works with responses where `stream=True`. - > - If you want to capture LLM events anonymously, **don't** pass a distinct ID to the request. - > - > See our docs on [anonymous vs identified events](/docs/data/anonymous-vs-identified-events.md) to learn more. + > **Note:** If you want to capture LLM events anonymously, omit the `posthog.distinct_id` resource attribute. See our docs on [anonymous vs identified events](/docs/data/anonymous-vs-identified-events.md) to learn more. You can expect captured `$ai_generation` events to have the following properties: diff --git a/skills/instrument-llm-analytics/references/cerebras.md b/skills/instrument-llm-analytics/references/cerebras.md index 7012a19..370cf3f 100644 --- a/skills/instrument-llm-analytics/references/cerebras.md +++ b/skills/instrument-llm-analytics/references/cerebras.md @@ -2,122 +2,112 @@ 1. 1 - ## Install the PostHog SDK + ## Install dependencies Required - Setting up analytics starts with installing the PostHog SDK for your language. LLM analytics works best with our Python and Node SDKs. + **Full working examples** - PostHog AI - - ### Python - - ```bash - pip install posthog - ``` - - ### Node - - ```bash - npm install @posthog/ai posthog-node - ``` - -2. 2 - - ## Install the OpenAI SDK - - Required + See the complete [Node.js](https://github.com/PostHog/posthog-js/tree/main/examples/example-ai-cerebras) and [Python](https://github.com/PostHog/posthog-python/tree/master/examples/example-ai-cerebras) examples on GitHub. If you're using the PostHog SDK wrapper instead of OpenTelemetry, see the [Node.js wrapper](https://github.com/PostHog/posthog-js/tree/e08ff1be/examples/example-ai-cerebras) and [Python wrapper](https://github.com/PostHog/posthog-python/tree/7223c52/examples/example-ai-cerebras) examples. - Install the OpenAI SDK. The PostHog SDK instruments your LLM calls by wrapping the OpenAI client. The PostHog SDK **does not** proxy your calls. + Install the OpenTelemetry SDK, the OpenAI instrumentation, and the OpenAI SDK. PostHog AI ### Python ```bash - pip install openai + pip install openai opentelemetry-sdk posthog[otel] opentelemetry-instrumentation-openai-v2 ``` ### Node ```bash - npm install openai + npm install openai @posthog/ai @opentelemetry/sdk-node @opentelemetry/resources @opentelemetry/instrumentation-openai ``` -3. 3 +2. 2 - ## Initialize PostHog and OpenAI client + ## Set up OpenTelemetry tracing Required - We call Cerebras through the OpenAI client and generate a response. We'll use PostHog's OpenAI provider to capture all the details of the call. Initialize PostHog with your PostHog project token and host from [your project settings](https://app.posthog.com/settings/project), then pass the PostHog client along with the Cerebras config (the base URL and API key) to our OpenAI wrapper. + Configure OpenTelemetry to auto-instrument OpenAI SDK calls and export traces to PostHog. PostHog converts `gen_ai.*` spans into `$ai_generation` events automatically. PostHog AI ### Python ```python - from posthog.ai.openai import OpenAI - from posthog import Posthog - posthog = Posthog( - "", - host="https://us.i.posthog.com" - ) - client = OpenAI( - base_url="https://api.cerebras.ai/v1", - api_key="", - posthog_client=posthog + from opentelemetry import trace + from opentelemetry.sdk.trace import TracerProvider + from opentelemetry.sdk.resources import Resource, SERVICE_NAME + from posthog.ai.otel import PostHogSpanProcessor + from opentelemetry.instrumentation.openai_v2 import OpenAIInstrumentor + resource = Resource(attributes={ + SERVICE_NAME: "my-app", + "posthog.distinct_id": "user_123", # optional: identifies the user in PostHog + "foo": "bar", # custom properties are passed through + }) + provider = TracerProvider(resource=resource) + provider.add_span_processor( + PostHogSpanProcessor( + api_key="", + host="https://us.i.posthog.com", + ) ) + trace.set_tracer_provider(provider) + OpenAIInstrumentor().instrument() ``` ### Node ```typescript - import { OpenAI } from '@posthog/ai' - import { PostHog } from 'posthog-node' - const phClient = new PostHog( - '', - { host: 'https://us.i.posthog.com' } - ); - const openai = new OpenAI({ - baseURL: 'https://api.cerebras.ai/v1', - apiKey: '', - posthog: phClient, - }); - // ... your code here ... - // IMPORTANT: Shutdown the client when you're done to ensure all events are sent - phClient.shutdown() + import { NodeSDK } from '@opentelemetry/sdk-node' + import { resourceFromAttributes } from '@opentelemetry/resources' + import { PostHogSpanProcessor } from '@posthog/ai/otel' + import { OpenAIInstrumentation } from '@opentelemetry/instrumentation-openai' + const sdk = new NodeSDK({ + resource: resourceFromAttributes({ + 'service.name': 'my-app', + 'posthog.distinct_id': 'user_123', // optional: identifies the user in PostHog + foo: 'bar', // custom properties are passed through + }), + spanProcessors: [ + new PostHogSpanProcessor({ + apiKey: '', + host: 'https://us.i.posthog.com', + }), + ], + instrumentations: [new OpenAIInstrumentation()], + }) + sdk.start() ``` - > **Note:** This also works with the `AsyncOpenAI` client. - - **Proxy note** - - These SDKs **do not** proxy your calls. They only fire off an async call to PostHog in the background to send the data. You can also use LLM analytics with other SDKs or our API, but you will need to capture the data in the right format. See the schema in the [manual capture section](/docs/llm-analytics/installation/manual-capture.md) for more details. - -4. 4 +3. 3 ## Call Cerebras Required - Now, when you call Cerebras with the OpenAI SDK, PostHog automatically captures an `$ai_generation` event. You can also capture or modify additional properties with the distinct ID, trace ID, properties, groups, and privacy mode parameters. + Now, when you use the OpenAI SDK to call Cerebras, PostHog automatically captures `$ai_generation` events via the OpenTelemetry instrumentation. PostHog AI ### Python ```python + import openai + client = openai.OpenAI( + base_url="https://api.cerebras.ai/v1", + api_key="", + ) response = client.chat.completions.create( model="llama-3.3-70b", + max_completion_tokens=1024, messages=[ {"role": "user", "content": "Tell me a fun fact about hedgehogs"} ], - posthog_distinct_id="user_123", # optional - posthog_trace_id="trace_123", # optional - posthog_properties={"conversation_id": "abc123", "paid": True}, # optional - posthog_groups={"company": "company_id_in_your_db"}, # optional - posthog_privacy_mode=False # optional ) print(response.choices[0].message.content) ``` @@ -125,25 +115,20 @@ ### Node ```typescript - const completion = await openai.chat.completions.create({ - model: "llama-3.3-70b", - messages: [{ role: "user", content: "Tell me a fun fact about hedgehogs" }], - posthogDistinctId: "user_123", // optional - posthogTraceId: "trace_123", // optional - posthogProperties: { conversation_id: "abc123", paid: true }, // optional - posthogGroups: { company: "company_id_in_your_db" }, // optional - posthogPrivacyMode: false // optional - }); - console.log(completion.choices[0].message.content) + import OpenAI from 'openai' + const client = new OpenAI({ + baseURL: 'https://api.cerebras.ai/v1', + apiKey: '', + }) + const response = await client.chat.completions.create({ + model: 'llama-3.3-70b', + max_completion_tokens: 1024, + messages: [{ role: 'user', content: 'Tell me a fun fact about hedgehogs' }], + }) + console.log(response.choices[0].message.content) ``` - > **Notes:** - > - > - We also support the old `chat.completions` API. - > - This works with responses where `stream=True`. - > - If you want to capture LLM events anonymously, **don't** pass a distinct ID to the request. - > - > See our docs on [anonymous vs identified events](/docs/data/anonymous-vs-identified-events.md) to learn more. + > **Note:** If you want to capture LLM events anonymously, omit the `posthog.distinct_id` resource attribute. See our docs on [anonymous vs identified events](/docs/data/anonymous-vs-identified-events.md) to learn more. You can expect captured `$ai_generation` events to have the following properties: @@ -160,7 +145,7 @@ | $ai_total_cost_usd | The total cost in USD (input + output) | | [[...]](/docs/llm-analytics/generations.md#event-properties) | See [full list](/docs/llm-analytics/generations.md#event-properties) of properties | -5. ## Verify traces and generations +4. ## Verify traces and generations Recommended @@ -172,7 +157,7 @@ [Check for LLM events in PostHog](https://app.posthog.com/llm-analytics/generations) -6. 5 +5. 4 ## Next steps diff --git a/skills/instrument-llm-analytics/references/cohere.md b/skills/instrument-llm-analytics/references/cohere.md index 8cd1220..e9f8ce2 100644 --- a/skills/instrument-llm-analytics/references/cohere.md +++ b/skills/instrument-llm-analytics/references/cohere.md @@ -2,122 +2,112 @@ 1. 1 - ## Install the PostHog SDK + ## Install dependencies Required - Setting up analytics starts with installing the PostHog SDK for your language. LLM analytics works best with our Python and Node SDKs. + **Full working examples** - PostHog AI - - ### Python - - ```bash - pip install posthog - ``` - - ### Node - - ```bash - npm install @posthog/ai posthog-node - ``` - -2. 2 - - ## Install the OpenAI SDK - - Required + See the complete [Node.js](https://github.com/PostHog/posthog-js/tree/main/examples/example-ai-cohere) and [Python](https://github.com/PostHog/posthog-python/tree/master/examples/example-ai-cohere) examples on GitHub. If you're using the PostHog SDK wrapper instead of OpenTelemetry, see the [Node.js wrapper](https://github.com/PostHog/posthog-js/tree/e08ff1be/examples/example-ai-cohere) and [Python wrapper](https://github.com/PostHog/posthog-python/tree/7223c52/examples/example-ai-cohere) examples. - Install the OpenAI SDK. The PostHog SDK instruments your LLM calls by wrapping the OpenAI client. The PostHog SDK **does not** proxy your calls. + Install the OpenTelemetry SDK, the OpenAI instrumentation, and the OpenAI SDK. PostHog AI ### Python ```bash - pip install openai + pip install openai opentelemetry-sdk posthog[otel] opentelemetry-instrumentation-openai-v2 ``` ### Node ```bash - npm install openai + npm install openai @posthog/ai @opentelemetry/sdk-node @opentelemetry/resources @opentelemetry/instrumentation-openai ``` -3. 3 +2. 2 - ## Initialize PostHog and OpenAI client + ## Set up OpenTelemetry tracing Required - We call Cohere through the OpenAI-compatible API and generate a response. We'll use PostHog's OpenAI provider to capture all the details of the call. Initialize PostHog with your PostHog project token and host from [your project settings](https://app.posthog.com/settings/project), then pass the PostHog client along with the Cohere config (the base URL and API key) to our OpenAI wrapper. + Configure OpenTelemetry to auto-instrument OpenAI SDK calls and export traces to PostHog. PostHog converts `gen_ai.*` spans into `$ai_generation` events automatically. PostHog AI ### Python ```python - from posthog.ai.openai import OpenAI - from posthog import Posthog - posthog = Posthog( - "", - host="https://us.i.posthog.com" - ) - client = OpenAI( - base_url="https://api.cohere.ai/compatibility/v1", - api_key="", - posthog_client=posthog + from opentelemetry import trace + from opentelemetry.sdk.trace import TracerProvider + from opentelemetry.sdk.resources import Resource, SERVICE_NAME + from posthog.ai.otel import PostHogSpanProcessor + from opentelemetry.instrumentation.openai_v2 import OpenAIInstrumentor + resource = Resource(attributes={ + SERVICE_NAME: "my-app", + "posthog.distinct_id": "user_123", # optional: identifies the user in PostHog + "foo": "bar", # custom properties are passed through + }) + provider = TracerProvider(resource=resource) + provider.add_span_processor( + PostHogSpanProcessor( + api_key="", + host="https://us.i.posthog.com", + ) ) + trace.set_tracer_provider(provider) + OpenAIInstrumentor().instrument() ``` ### Node ```typescript - import { OpenAI } from '@posthog/ai' - import { PostHog } from 'posthog-node' - const phClient = new PostHog( - '', - { host: 'https://us.i.posthog.com' } - ); - const openai = new OpenAI({ - baseURL: 'https://api.cohere.ai/compatibility/v1', - apiKey: '', - posthog: phClient, - }); - // ... your code here ... - // IMPORTANT: Shutdown the client when you're done to ensure all events are sent - phClient.shutdown() + import { NodeSDK } from '@opentelemetry/sdk-node' + import { resourceFromAttributes } from '@opentelemetry/resources' + import { PostHogSpanProcessor } from '@posthog/ai/otel' + import { OpenAIInstrumentation } from '@opentelemetry/instrumentation-openai' + const sdk = new NodeSDK({ + resource: resourceFromAttributes({ + 'service.name': 'my-app', + 'posthog.distinct_id': 'user_123', // optional: identifies the user in PostHog + foo: 'bar', // custom properties are passed through + }), + spanProcessors: [ + new PostHogSpanProcessor({ + apiKey: '', + host: 'https://us.i.posthog.com', + }), + ], + instrumentations: [new OpenAIInstrumentation()], + }) + sdk.start() ``` - > **Note:** This also works with the `AsyncOpenAI` client. - - **Proxy note** - - These SDKs **do not** proxy your calls. They only fire off an async call to PostHog in the background to send the data. You can also use LLM analytics with other SDKs or our API, but you will need to capture the data in the right format. See the schema in the [manual capture section](/docs/llm-analytics/installation/manual-capture.md) for more details. - -4. 4 +3. 3 ## Call Cohere Required - Now, when you call Cohere with the OpenAI SDK, PostHog automatically captures an `$ai_generation` event. You can also capture or modify additional properties with the distinct ID, trace ID, properties, groups, and privacy mode parameters. + Now, when you use the OpenAI SDK to call Cohere, PostHog automatically captures `$ai_generation` events via the OpenTelemetry instrumentation. PostHog AI ### Python ```python + import openai + client = openai.OpenAI( + base_url="https://api.cohere.ai/compatibility/v1", + api_key="", + ) response = client.chat.completions.create( model="command-a-03-2025", + max_completion_tokens=1024, messages=[ {"role": "user", "content": "Tell me a fun fact about hedgehogs"} ], - posthog_distinct_id="user_123", # optional - posthog_trace_id="trace_123", # optional - posthog_properties={"conversation_id": "abc123", "paid": True}, # optional - posthog_groups={"company": "company_id_in_your_db"}, # optional - posthog_privacy_mode=False # optional ) print(response.choices[0].message.content) ``` @@ -125,25 +115,20 @@ ### Node ```typescript - const completion = await openai.chat.completions.create({ - model: "command-a-03-2025", - messages: [{ role: "user", content: "Tell me a fun fact about hedgehogs" }], - posthogDistinctId: "user_123", // optional - posthogTraceId: "trace_123", // optional - posthogProperties: { conversation_id: "abc123", paid: true }, // optional - posthogGroups: { company: "company_id_in_your_db" }, // optional - posthogPrivacyMode: false // optional - }); - console.log(completion.choices[0].message.content) + import OpenAI from 'openai' + const client = new OpenAI({ + baseURL: 'https://api.cohere.ai/compatibility/v1', + apiKey: '', + }) + const response = await client.chat.completions.create({ + model: 'command-a-03-2025', + max_completion_tokens: 1024, + messages: [{ role: 'user', content: 'Tell me a fun fact about hedgehogs' }], + }) + console.log(response.choices[0].message.content) ``` - > **Notes:** - > - > - We also support the old `chat.completions` API. - > - This works with responses where `stream=True`. - > - If you want to capture LLM events anonymously, **don't** pass a distinct ID to the request. - > - > See our docs on [anonymous vs identified events](/docs/data/anonymous-vs-identified-events.md) to learn more. + > **Note:** If you want to capture LLM events anonymously, omit the `posthog.distinct_id` resource attribute. See our docs on [anonymous vs identified events](/docs/data/anonymous-vs-identified-events.md) to learn more. You can expect captured `$ai_generation` events to have the following properties: @@ -160,7 +145,7 @@ | $ai_total_cost_usd | The total cost in USD (input + output) | | [[...]](/docs/llm-analytics/generations.md#event-properties) | See [full list](/docs/llm-analytics/generations.md#event-properties) of properties | -5. ## Verify traces and generations +4. ## Verify traces and generations Recommended @@ -172,7 +157,7 @@ [Check for LLM events in PostHog](https://app.posthog.com/llm-analytics/generations) -6. 5 +5. 4 ## Next steps diff --git a/skills/instrument-llm-analytics/references/deepseek.md b/skills/instrument-llm-analytics/references/deepseek.md index e12d208..4df49f8 100644 --- a/skills/instrument-llm-analytics/references/deepseek.md +++ b/skills/instrument-llm-analytics/references/deepseek.md @@ -2,122 +2,112 @@ 1. 1 - ## Install the PostHog SDK + ## Install dependencies Required - Setting up analytics starts with installing the PostHog SDK for your language. LLM analytics works best with our Python and Node SDKs. + **Full working examples** - PostHog AI - - ### Python - - ```bash - pip install posthog - ``` - - ### Node - - ```bash - npm install @posthog/ai posthog-node - ``` - -2. 2 - - ## Install the OpenAI SDK - - Required + See the complete [Node.js](https://github.com/PostHog/posthog-js/tree/main/examples/example-ai-deepseek) and [Python](https://github.com/PostHog/posthog-python/tree/master/examples/example-ai-deepseek) examples on GitHub. If you're using the PostHog SDK wrapper instead of OpenTelemetry, see the [Node.js wrapper](https://github.com/PostHog/posthog-js/tree/e08ff1be/examples/example-ai-deepseek) and [Python wrapper](https://github.com/PostHog/posthog-python/tree/7223c52/examples/example-ai-deepseek) examples. - Install the OpenAI SDK. The PostHog SDK instruments your LLM calls by wrapping the OpenAI client. The PostHog SDK **does not** proxy your calls. + Install the OpenTelemetry SDK, the OpenAI instrumentation, and the OpenAI SDK. PostHog AI ### Python ```bash - pip install openai + pip install openai opentelemetry-sdk posthog[otel] opentelemetry-instrumentation-openai-v2 ``` ### Node ```bash - npm install openai + npm install openai @posthog/ai @opentelemetry/sdk-node @opentelemetry/resources @opentelemetry/instrumentation-openai ``` -3. 3 +2. 2 - ## Initialize PostHog and OpenAI client + ## Set up OpenTelemetry tracing Required - We call DeepSeek through the OpenAI client and generate a response. We'll use PostHog's OpenAI provider to capture all the details of the call. Initialize PostHog with your PostHog project token and host from [your project settings](https://app.posthog.com/settings/project), then pass the PostHog client along with the DeepSeek config (the base URL and API key) to our OpenAI wrapper. + Configure OpenTelemetry to auto-instrument OpenAI SDK calls and export traces to PostHog. PostHog converts `gen_ai.*` spans into `$ai_generation` events automatically. PostHog AI ### Python ```python - from posthog.ai.openai import OpenAI - from posthog import Posthog - posthog = Posthog( - "", - host="https://us.i.posthog.com" - ) - client = OpenAI( - base_url="https://api.deepseek.com", - api_key="", - posthog_client=posthog + from opentelemetry import trace + from opentelemetry.sdk.trace import TracerProvider + from opentelemetry.sdk.resources import Resource, SERVICE_NAME + from posthog.ai.otel import PostHogSpanProcessor + from opentelemetry.instrumentation.openai_v2 import OpenAIInstrumentor + resource = Resource(attributes={ + SERVICE_NAME: "my-app", + "posthog.distinct_id": "user_123", # optional: identifies the user in PostHog + "foo": "bar", # custom properties are passed through + }) + provider = TracerProvider(resource=resource) + provider.add_span_processor( + PostHogSpanProcessor( + api_key="", + host="https://us.i.posthog.com", + ) ) + trace.set_tracer_provider(provider) + OpenAIInstrumentor().instrument() ``` ### Node ```typescript - import { OpenAI } from '@posthog/ai' - import { PostHog } from 'posthog-node' - const phClient = new PostHog( - '', - { host: 'https://us.i.posthog.com' } - ); - const openai = new OpenAI({ - baseURL: 'https://api.deepseek.com', - apiKey: '', - posthog: phClient, - }); - // ... your code here ... - // IMPORTANT: Shutdown the client when you're done to ensure all events are sent - phClient.shutdown() + import { NodeSDK } from '@opentelemetry/sdk-node' + import { resourceFromAttributes } from '@opentelemetry/resources' + import { PostHogSpanProcessor } from '@posthog/ai/otel' + import { OpenAIInstrumentation } from '@opentelemetry/instrumentation-openai' + const sdk = new NodeSDK({ + resource: resourceFromAttributes({ + 'service.name': 'my-app', + 'posthog.distinct_id': 'user_123', // optional: identifies the user in PostHog + foo: 'bar', // custom properties are passed through + }), + spanProcessors: [ + new PostHogSpanProcessor({ + apiKey: '', + host: 'https://us.i.posthog.com', + }), + ], + instrumentations: [new OpenAIInstrumentation()], + }) + sdk.start() ``` - > **Note:** This also works with the `AsyncOpenAI` client. - - **Proxy note** - - These SDKs **do not** proxy your calls. They only fire off an async call to PostHog in the background to send the data. You can also use LLM analytics with other SDKs or our API, but you will need to capture the data in the right format. See the schema in the [manual capture section](/docs/llm-analytics/installation/manual-capture.md) for more details. - -4. 4 +3. 3 ## Call DeepSeek Required - Now, when you call DeepSeek with the OpenAI SDK, PostHog automatically captures an `$ai_generation` event. You can also capture or modify additional properties with the distinct ID, trace ID, properties, groups, and privacy mode parameters. + Now, when you use the OpenAI SDK to call DeepSeek, PostHog automatically captures `$ai_generation` events via the OpenTelemetry instrumentation. PostHog AI ### Python ```python + import openai + client = openai.OpenAI( + base_url="https://api.deepseek.com", + api_key="", + ) response = client.chat.completions.create( model="deepseek-chat", + max_completion_tokens=1024, messages=[ {"role": "user", "content": "Tell me a fun fact about hedgehogs"} ], - posthog_distinct_id="user_123", # optional - posthog_trace_id="trace_123", # optional - posthog_properties={"conversation_id": "abc123", "paid": True}, # optional - posthog_groups={"company": "company_id_in_your_db"}, # optional - posthog_privacy_mode=False # optional ) print(response.choices[0].message.content) ``` @@ -125,25 +115,20 @@ ### Node ```typescript - const completion = await openai.chat.completions.create({ - model: "deepseek-chat", - messages: [{ role: "user", content: "Tell me a fun fact about hedgehogs" }], - posthogDistinctId: "user_123", // optional - posthogTraceId: "trace_123", // optional - posthogProperties: { conversation_id: "abc123", paid: true }, // optional - posthogGroups: { company: "company_id_in_your_db" }, // optional - posthogPrivacyMode: false // optional - }); - console.log(completion.choices[0].message.content) + import OpenAI from 'openai' + const client = new OpenAI({ + baseURL: 'https://api.deepseek.com', + apiKey: '', + }) + const response = await client.chat.completions.create({ + model: 'deepseek-chat', + max_completion_tokens: 1024, + messages: [{ role: 'user', content: 'Tell me a fun fact about hedgehogs' }], + }) + console.log(response.choices[0].message.content) ``` - > **Notes:** - > - > - We also support the old `chat.completions` API. - > - This works with responses where `stream=True`. - > - If you want to capture LLM events anonymously, **don't** pass a distinct ID to the request. - > - > See our docs on [anonymous vs identified events](/docs/data/anonymous-vs-identified-events.md) to learn more. + > **Note:** If you want to capture LLM events anonymously, omit the `posthog.distinct_id` resource attribute. See our docs on [anonymous vs identified events](/docs/data/anonymous-vs-identified-events.md) to learn more. You can expect captured `$ai_generation` events to have the following properties: @@ -160,7 +145,7 @@ | $ai_total_cost_usd | The total cost in USD (input + output) | | [[...]](/docs/llm-analytics/generations.md#event-properties) | See [full list](/docs/llm-analytics/generations.md#event-properties) of properties | -5. ## Verify traces and generations +4. ## Verify traces and generations Recommended @@ -172,7 +157,7 @@ [Check for LLM events in PostHog](https://app.posthog.com/llm-analytics/generations) -6. 5 +5. 4 ## Next steps diff --git a/skills/instrument-llm-analytics/references/fireworks-ai.md b/skills/instrument-llm-analytics/references/fireworks-ai.md index f0d8219..11b552e 100644 --- a/skills/instrument-llm-analytics/references/fireworks-ai.md +++ b/skills/instrument-llm-analytics/references/fireworks-ai.md @@ -2,122 +2,112 @@ 1. 1 - ## Install the PostHog SDK + ## Install dependencies Required - Setting up analytics starts with installing the PostHog SDK for your language. LLM analytics works best with our Python and Node SDKs. + **Full working examples** - PostHog AI - - ### Python - - ```bash - pip install posthog - ``` - - ### Node - - ```bash - npm install @posthog/ai posthog-node - ``` - -2. 2 - - ## Install the OpenAI SDK - - Required + See the complete [Node.js](https://github.com/PostHog/posthog-js/tree/main/examples/example-ai-fireworks-ai) and [Python](https://github.com/PostHog/posthog-python/tree/master/examples/example-ai-fireworks-ai) examples on GitHub. If you're using the PostHog SDK wrapper instead of OpenTelemetry, see the [Node.js wrapper](https://github.com/PostHog/posthog-js/tree/e08ff1be/examples/example-ai-fireworks-ai) and [Python wrapper](https://github.com/PostHog/posthog-python/tree/7223c52/examples/example-ai-fireworks-ai) examples. - Install the OpenAI SDK. The PostHog SDK instruments your LLM calls by wrapping the OpenAI client. The PostHog SDK **does not** proxy your calls. + Install the OpenTelemetry SDK, the OpenAI instrumentation, and the OpenAI SDK. PostHog AI ### Python ```bash - pip install openai + pip install openai opentelemetry-sdk posthog[otel] opentelemetry-instrumentation-openai-v2 ``` ### Node ```bash - npm install openai + npm install openai @posthog/ai @opentelemetry/sdk-node @opentelemetry/resources @opentelemetry/instrumentation-openai ``` -3. 3 +2. 2 - ## Initialize PostHog and OpenAI client + ## Set up OpenTelemetry tracing Required - We call Fireworks AI through the OpenAI client and generate a response. We'll use PostHog's OpenAI provider to capture all the details of the call. Initialize PostHog with your PostHog project token and host from [your project settings](https://app.posthog.com/settings/project), then pass the PostHog client along with the Fireworks AI config (the base URL and API key) to our OpenAI wrapper. + Configure OpenTelemetry to auto-instrument OpenAI SDK calls and export traces to PostHog. PostHog converts `gen_ai.*` spans into `$ai_generation` events automatically. PostHog AI ### Python ```python - from posthog.ai.openai import OpenAI - from posthog import Posthog - posthog = Posthog( - "", - host="https://us.i.posthog.com" - ) - client = OpenAI( - base_url="https://api.fireworks.ai/inference/v1", - api_key="", - posthog_client=posthog + from opentelemetry import trace + from opentelemetry.sdk.trace import TracerProvider + from opentelemetry.sdk.resources import Resource, SERVICE_NAME + from posthog.ai.otel import PostHogSpanProcessor + from opentelemetry.instrumentation.openai_v2 import OpenAIInstrumentor + resource = Resource(attributes={ + SERVICE_NAME: "my-app", + "posthog.distinct_id": "user_123", # optional: identifies the user in PostHog + "foo": "bar", # custom properties are passed through + }) + provider = TracerProvider(resource=resource) + provider.add_span_processor( + PostHogSpanProcessor( + api_key="", + host="https://us.i.posthog.com", + ) ) + trace.set_tracer_provider(provider) + OpenAIInstrumentor().instrument() ``` ### Node ```typescript - import { OpenAI } from '@posthog/ai' - import { PostHog } from 'posthog-node' - const phClient = new PostHog( - '', - { host: 'https://us.i.posthog.com' } - ); - const openai = new OpenAI({ - baseURL: 'https://api.fireworks.ai/inference/v1', - apiKey: '', - posthog: phClient, - }); - // ... your code here ... - // IMPORTANT: Shutdown the client when you're done to ensure all events are sent - phClient.shutdown() + import { NodeSDK } from '@opentelemetry/sdk-node' + import { resourceFromAttributes } from '@opentelemetry/resources' + import { PostHogSpanProcessor } from '@posthog/ai/otel' + import { OpenAIInstrumentation } from '@opentelemetry/instrumentation-openai' + const sdk = new NodeSDK({ + resource: resourceFromAttributes({ + 'service.name': 'my-app', + 'posthog.distinct_id': 'user_123', // optional: identifies the user in PostHog + foo: 'bar', // custom properties are passed through + }), + spanProcessors: [ + new PostHogSpanProcessor({ + apiKey: '', + host: 'https://us.i.posthog.com', + }), + ], + instrumentations: [new OpenAIInstrumentation()], + }) + sdk.start() ``` - > **Note:** This also works with the `AsyncOpenAI` client. - - **Proxy note** - - These SDKs **do not** proxy your calls. They only fire off an async call to PostHog in the background to send the data. You can also use LLM analytics with other SDKs or our API, but you will need to capture the data in the right format. See the schema in the [manual capture section](/docs/llm-analytics/installation/manual-capture.md) for more details. - -4. 4 +3. 3 ## Call Fireworks AI Required - Now, when you call Fireworks AI with the OpenAI SDK, PostHog automatically captures an `$ai_generation` event. You can also capture or modify additional properties with the distinct ID, trace ID, properties, groups, and privacy mode parameters. + Now, when you use the OpenAI SDK to call Fireworks AI, PostHog automatically captures `$ai_generation` events via the OpenTelemetry instrumentation. PostHog AI ### Python ```python + import openai + client = openai.OpenAI( + base_url="https://api.fireworks.ai/inference/v1", + api_key="", + ) response = client.chat.completions.create( model="accounts/fireworks/models/llama-v3p3-70b-instruct", + max_completion_tokens=1024, messages=[ {"role": "user", "content": "Tell me a fun fact about hedgehogs"} ], - posthog_distinct_id="user_123", # optional - posthog_trace_id="trace_123", # optional - posthog_properties={"conversation_id": "abc123", "paid": True}, # optional - posthog_groups={"company": "company_id_in_your_db"}, # optional - posthog_privacy_mode=False # optional ) print(response.choices[0].message.content) ``` @@ -125,25 +115,20 @@ ### Node ```typescript - const completion = await openai.chat.completions.create({ - model: "accounts/fireworks/models/llama-v3p3-70b-instruct", - messages: [{ role: "user", content: "Tell me a fun fact about hedgehogs" }], - posthogDistinctId: "user_123", // optional - posthogTraceId: "trace_123", // optional - posthogProperties: { conversation_id: "abc123", paid: true }, // optional - posthogGroups: { company: "company_id_in_your_db" }, // optional - posthogPrivacyMode: false // optional - }); - console.log(completion.choices[0].message.content) + import OpenAI from 'openai' + const client = new OpenAI({ + baseURL: 'https://api.fireworks.ai/inference/v1', + apiKey: '', + }) + const response = await client.chat.completions.create({ + model: 'accounts/fireworks/models/llama-v3p3-70b-instruct', + max_completion_tokens: 1024, + messages: [{ role: 'user', content: 'Tell me a fun fact about hedgehogs' }], + }) + console.log(response.choices[0].message.content) ``` - > **Notes:** - > - > - We also support the old `chat.completions` API. - > - This works with responses where `stream=True`. - > - If you want to capture LLM events anonymously, **don't** pass a distinct ID to the request. - > - > See our docs on [anonymous vs identified events](/docs/data/anonymous-vs-identified-events.md) to learn more. + > **Note:** If you want to capture LLM events anonymously, omit the `posthog.distinct_id` resource attribute. See our docs on [anonymous vs identified events](/docs/data/anonymous-vs-identified-events.md) to learn more. You can expect captured `$ai_generation` events to have the following properties: @@ -160,7 +145,7 @@ | $ai_total_cost_usd | The total cost in USD (input + output) | | [[...]](/docs/llm-analytics/generations.md#event-properties) | See [full list](/docs/llm-analytics/generations.md#event-properties) of properties | -5. ## Verify traces and generations +4. ## Verify traces and generations Recommended @@ -172,7 +157,7 @@ [Check for LLM events in PostHog](https://app.posthog.com/llm-analytics/generations) -6. 5 +5. 4 ## Next steps diff --git a/skills/instrument-llm-analytics/references/groq.md b/skills/instrument-llm-analytics/references/groq.md index 99cf7d4..1993669 100644 --- a/skills/instrument-llm-analytics/references/groq.md +++ b/skills/instrument-llm-analytics/references/groq.md @@ -2,122 +2,112 @@ 1. 1 - ## Install the PostHog SDK + ## Install dependencies Required - Setting up analytics starts with installing the PostHog SDK for your language. LLM analytics works best with our Python and Node SDKs. + **Full working examples** - PostHog AI - - ### Python - - ```bash - pip install posthog - ``` - - ### Node - - ```bash - npm install @posthog/ai posthog-node - ``` - -2. 2 - - ## Install the OpenAI SDK - - Required + See the complete [Node.js](https://github.com/PostHog/posthog-js/tree/main/examples/example-ai-groq) and [Python](https://github.com/PostHog/posthog-python/tree/master/examples/example-ai-groq) examples on GitHub. If you're using the PostHog SDK wrapper instead of OpenTelemetry, see the [Node.js wrapper](https://github.com/PostHog/posthog-js/tree/e08ff1be/examples/example-ai-groq) and [Python wrapper](https://github.com/PostHog/posthog-python/tree/7223c52/examples/example-ai-groq) examples. - Install the OpenAI SDK. The PostHog SDK instruments your LLM calls by wrapping the OpenAI client. The PostHog SDK **does not** proxy your calls. + Install the OpenTelemetry SDK, the OpenAI instrumentation, and the OpenAI SDK. PostHog AI ### Python ```bash - pip install openai + pip install openai opentelemetry-sdk posthog[otel] opentelemetry-instrumentation-openai-v2 ``` ### Node ```bash - npm install openai + npm install openai @posthog/ai @opentelemetry/sdk-node @opentelemetry/resources @opentelemetry/instrumentation-openai ``` -3. 3 +2. 2 - ## Initialize PostHog and OpenAI client + ## Set up OpenTelemetry tracing Required - We call Groq through the OpenAI client and generate a response. We'll use PostHog's OpenAI provider to capture all the details of the call. Initialize PostHog with your PostHog project token and host from [your project settings](https://app.posthog.com/settings/project), then pass the PostHog client along with the Groq config (the base URL and API key) to our OpenAI wrapper. + Configure OpenTelemetry to auto-instrument OpenAI SDK calls and export traces to PostHog. PostHog converts `gen_ai.*` spans into `$ai_generation` events automatically. PostHog AI ### Python ```python - from posthog.ai.openai import OpenAI - from posthog import Posthog - posthog = Posthog( - "", - host="https://us.i.posthog.com" - ) - client = OpenAI( - base_url="https://api.groq.com/openai/v1", - api_key="", - posthog_client=posthog + from opentelemetry import trace + from opentelemetry.sdk.trace import TracerProvider + from opentelemetry.sdk.resources import Resource, SERVICE_NAME + from posthog.ai.otel import PostHogSpanProcessor + from opentelemetry.instrumentation.openai_v2 import OpenAIInstrumentor + resource = Resource(attributes={ + SERVICE_NAME: "my-app", + "posthog.distinct_id": "user_123", # optional: identifies the user in PostHog + "foo": "bar", # custom properties are passed through + }) + provider = TracerProvider(resource=resource) + provider.add_span_processor( + PostHogSpanProcessor( + api_key="", + host="https://us.i.posthog.com", + ) ) + trace.set_tracer_provider(provider) + OpenAIInstrumentor().instrument() ``` ### Node ```typescript - import { OpenAI } from '@posthog/ai' - import { PostHog } from 'posthog-node' - const phClient = new PostHog( - '', - { host: 'https://us.i.posthog.com' } - ); - const openai = new OpenAI({ - baseURL: 'https://api.groq.com/openai/v1', - apiKey: '', - posthog: phClient, - }); - // ... your code here ... - // IMPORTANT: Shutdown the client when you're done to ensure all events are sent - phClient.shutdown() + import { NodeSDK } from '@opentelemetry/sdk-node' + import { resourceFromAttributes } from '@opentelemetry/resources' + import { PostHogSpanProcessor } from '@posthog/ai/otel' + import { OpenAIInstrumentation } from '@opentelemetry/instrumentation-openai' + const sdk = new NodeSDK({ + resource: resourceFromAttributes({ + 'service.name': 'my-app', + 'posthog.distinct_id': 'user_123', // optional: identifies the user in PostHog + foo: 'bar', // custom properties are passed through + }), + spanProcessors: [ + new PostHogSpanProcessor({ + apiKey: '', + host: 'https://us.i.posthog.com', + }), + ], + instrumentations: [new OpenAIInstrumentation()], + }) + sdk.start() ``` - > **Note:** This also works with the `AsyncOpenAI` client. - - **Proxy note** - - These SDKs **do not** proxy your calls. They only fire off an async call to PostHog in the background to send the data. You can also use LLM analytics with other SDKs or our API, but you will need to capture the data in the right format. See the schema in the [manual capture section](/docs/llm-analytics/installation/manual-capture.md) for more details. - -4. 4 +3. 3 ## Call Groq Required - Now, when you call Groq with the OpenAI SDK, PostHog automatically captures an `$ai_generation` event. You can also capture or modify additional properties with the distinct ID, trace ID, properties, groups, and privacy mode parameters. + Now, when you use the OpenAI SDK to call Groq, PostHog automatically captures `$ai_generation` events via the OpenTelemetry instrumentation. PostHog AI ### Python ```python + import openai + client = openai.OpenAI( + base_url="https://api.groq.com/openai/v1", + api_key="", + ) response = client.chat.completions.create( model="llama-3.3-70b-versatile", + max_completion_tokens=1024, messages=[ {"role": "user", "content": "Tell me a fun fact about hedgehogs"} ], - posthog_distinct_id="user_123", # optional - posthog_trace_id="trace_123", # optional - posthog_properties={"conversation_id": "abc123", "paid": True}, # optional - posthog_groups={"company": "company_id_in_your_db"}, # optional - posthog_privacy_mode=False # optional ) print(response.choices[0].message.content) ``` @@ -125,25 +115,20 @@ ### Node ```typescript - const completion = await openai.chat.completions.create({ - model: "llama-3.3-70b-versatile", - messages: [{ role: "user", content: "Tell me a fun fact about hedgehogs" }], - posthogDistinctId: "user_123", // optional - posthogTraceId: "trace_123", // optional - posthogProperties: { conversation_id: "abc123", paid: true }, // optional - posthogGroups: { company: "company_id_in_your_db" }, // optional - posthogPrivacyMode: false // optional - }); - console.log(completion.choices[0].message.content) + import OpenAI from 'openai' + const client = new OpenAI({ + baseURL: 'https://api.groq.com/openai/v1', + apiKey: '', + }) + const response = await client.chat.completions.create({ + model: 'llama-3.3-70b-versatile', + max_completion_tokens: 1024, + messages: [{ role: 'user', content: 'Tell me a fun fact about hedgehogs' }], + }) + console.log(response.choices[0].message.content) ``` - > **Notes:** - > - > - We also support the old `chat.completions` API. - > - This works with responses where `stream=True`. - > - If you want to capture LLM events anonymously, **don't** pass a distinct ID to the request. - > - > See our docs on [anonymous vs identified events](/docs/data/anonymous-vs-identified-events.md) to learn more. + > **Note:** If you want to capture LLM events anonymously, omit the `posthog.distinct_id` resource attribute. See our docs on [anonymous vs identified events](/docs/data/anonymous-vs-identified-events.md) to learn more. You can expect captured `$ai_generation` events to have the following properties: @@ -160,7 +145,7 @@ | $ai_total_cost_usd | The total cost in USD (input + output) | | [[...]](/docs/llm-analytics/generations.md#event-properties) | See [full list](/docs/llm-analytics/generations.md#event-properties) of properties | -5. ## Verify traces and generations +4. ## Verify traces and generations Recommended @@ -172,7 +157,7 @@ [Check for LLM events in PostHog](https://app.posthog.com/llm-analytics/generations) -6. 5 +5. 4 ## Next steps diff --git a/skills/instrument-llm-analytics/references/helicone.md b/skills/instrument-llm-analytics/references/helicone.md index 8292d0a..04700bf 100644 --- a/skills/instrument-llm-analytics/references/helicone.md +++ b/skills/instrument-llm-analytics/references/helicone.md @@ -2,126 +2,115 @@ 1. 1 - ## Install the PostHog SDK + ## Install dependencies Required + **Full working examples** + + See the complete [Node.js](https://github.com/PostHog/posthog-js/tree/main/examples/example-ai-helicone) and [Python](https://github.com/PostHog/posthog-python/tree/master/examples/example-ai-helicone) examples on GitHub. If you're using the PostHog SDK wrapper instead of OpenTelemetry, see the [Node.js wrapper](https://github.com/PostHog/posthog-js/tree/e08ff1be/examples/example-ai-helicone) and [Python wrapper](https://github.com/PostHog/posthog-python/tree/7223c52/examples/example-ai-helicone) examples. + **About Helicone** Helicone is an open-source AI gateway that provides access to 100+ LLM providers through an OpenAI-compatible interface. The Helicone API key handles authentication and routing to your chosen model provider. - Setting up analytics starts with installing the PostHog SDK for your language. LLM analytics works best with our Python and Node SDKs. + Install the OpenTelemetry SDK, the OpenAI instrumentation, and the OpenAI SDK. PostHog AI ### Python ```bash - pip install posthog + pip install openai opentelemetry-sdk posthog[otel] opentelemetry-instrumentation-openai-v2 ``` ### Node ```bash - npm install @posthog/ai posthog-node + npm install openai @posthog/ai @opentelemetry/sdk-node @opentelemetry/resources @opentelemetry/instrumentation-openai ``` 2. 2 - ## Install the OpenAI SDK - - Required - - Install the OpenAI SDK. The PostHog SDK instruments your LLM calls by wrapping the OpenAI client. The PostHog SDK **does not** proxy your calls. - - PostHog AI - - ### Python - - ```bash - pip install openai - ``` - - ### Node - - ```bash - npm install openai - ``` - -3. 3 - - ## Initialize PostHog and OpenAI client + ## Set up OpenTelemetry tracing Required - We call Helicone through the OpenAI client and generate a response. We'll use PostHog's OpenAI provider to capture all the details of the call. Initialize PostHog with your PostHog project token and host from [your project settings](https://app.posthog.com/settings/project), then pass the PostHog client along with the Helicone config (the base URL and API key) to our OpenAI wrapper. + Configure OpenTelemetry to auto-instrument OpenAI SDK calls and export traces to PostHog. PostHog converts `gen_ai.*` spans into `$ai_generation` events automatically. PostHog AI ### Python ```python - from posthog.ai.openai import OpenAI - from posthog import Posthog - posthog = Posthog( - "", - host="https://us.i.posthog.com" - ) - client = OpenAI( - base_url="https://ai-gateway.helicone.ai/", - api_key="", - posthog_client=posthog + from opentelemetry import trace + from opentelemetry.sdk.trace import TracerProvider + from opentelemetry.sdk.resources import Resource, SERVICE_NAME + from posthog.ai.otel import PostHogSpanProcessor + from opentelemetry.instrumentation.openai_v2 import OpenAIInstrumentor + resource = Resource(attributes={ + SERVICE_NAME: "my-app", + "posthog.distinct_id": "user_123", # optional: identifies the user in PostHog + "foo": "bar", # custom properties are passed through + }) + provider = TracerProvider(resource=resource) + provider.add_span_processor( + PostHogSpanProcessor( + api_key="", + host="https://us.i.posthog.com", + ) ) + trace.set_tracer_provider(provider) + OpenAIInstrumentor().instrument() ``` ### Node ```typescript - import { OpenAI } from '@posthog/ai' - import { PostHog } from 'posthog-node' - const phClient = new PostHog( - '', - { host: 'https://us.i.posthog.com' } - ); - const openai = new OpenAI({ - baseURL: 'https://ai-gateway.helicone.ai/', - apiKey: '', - posthog: phClient, - }); - // ... your code here ... - // IMPORTANT: Shutdown the client when you're done to ensure all events are sent - phClient.shutdown() + import { NodeSDK } from '@opentelemetry/sdk-node' + import { resourceFromAttributes } from '@opentelemetry/resources' + import { PostHogSpanProcessor } from '@posthog/ai/otel' + import { OpenAIInstrumentation } from '@opentelemetry/instrumentation-openai' + const sdk = new NodeSDK({ + resource: resourceFromAttributes({ + 'service.name': 'my-app', + 'posthog.distinct_id': 'user_123', // optional: identifies the user in PostHog + foo: 'bar', // custom properties are passed through + }), + spanProcessors: [ + new PostHogSpanProcessor({ + apiKey: '', + host: 'https://us.i.posthog.com', + }), + ], + instrumentations: [new OpenAIInstrumentation()], + }) + sdk.start() ``` - > **Note:** This also works with the `AsyncOpenAI` client. - - **Proxy note** - - These SDKs **do not** proxy your calls. They only fire off an async call to PostHog in the background to send the data. You can also use LLM analytics with other SDKs or our API, but you will need to capture the data in the right format. See the schema in the [manual capture section](/docs/llm-analytics/installation/manual-capture.md) for more details. - -4. 4 +3. 3 ## Call Helicone Required - Now, when you call Helicone with the OpenAI SDK, PostHog automatically captures an `$ai_generation` event. You can also capture or modify additional properties with the distinct ID, trace ID, properties, groups, and privacy mode parameters. + Now, when you call Helicone with the OpenAI SDK, PostHog automatically captures `$ai_generation` events via the OpenTelemetry instrumentation. PostHog AI ### Python ```python + import openai + client = openai.OpenAI( + base_url="https://ai-gateway.helicone.ai/", + api_key="", + ) response = client.chat.completions.create( model="gpt-5-mini", messages=[ {"role": "user", "content": "Tell me a fun fact about hedgehogs"} ], - posthog_distinct_id="user_123", # optional - posthog_trace_id="trace_123", # optional - posthog_properties={"conversation_id": "abc123", "paid": True}, # optional - posthog_groups={"company": "company_id_in_your_db"}, # optional - posthog_privacy_mode=False # optional ) print(response.choices[0].message.content) ``` @@ -129,25 +118,19 @@ ### Node ```typescript - const completion = await openai.chat.completions.create({ - model: "gpt-5-mini", - messages: [{ role: "user", content: "Tell me a fun fact about hedgehogs" }], - posthogDistinctId: "user_123", // optional - posthogTraceId: "trace_123", // optional - posthogProperties: { conversation_id: "abc123", paid: true }, // optional - posthogGroups: { company: "company_id_in_your_db" }, // optional - posthogPrivacyMode: false // optional - }); - console.log(completion.choices[0].message.content) + import OpenAI from 'openai' + const client = new OpenAI({ + baseURL: 'https://ai-gateway.helicone.ai/', + apiKey: '', + }) + const response = await client.chat.completions.create({ + model: 'gpt-5-mini', + messages: [{ role: 'user', content: 'Tell me a fun fact about hedgehogs' }], + }) + console.log(response.choices[0].message.content) ``` - > **Notes:** - > - > - We also support the old `chat.completions` API. - > - This works with responses where `stream=True`. - > - If you want to capture LLM events anonymously, **don't** pass a distinct ID to the request. - > - > See our docs on [anonymous vs identified events](/docs/data/anonymous-vs-identified-events.md) to learn more. + > **Note:** If you want to capture LLM events anonymously, omit the `posthog.distinct_id` resource attribute. See our docs on [anonymous vs identified events](/docs/data/anonymous-vs-identified-events.md) to learn more. You can expect captured `$ai_generation` events to have the following properties: @@ -164,7 +147,7 @@ | $ai_total_cost_usd | The total cost in USD (input + output) | | [[...]](/docs/llm-analytics/generations.md#event-properties) | See [full list](/docs/llm-analytics/generations.md#event-properties) of properties | -5. ## Verify traces and generations +4. ## Verify traces and generations Recommended @@ -176,7 +159,7 @@ [Check for LLM events in PostHog](https://app.posthog.com/llm-analytics/generations) -6. 5 +5. 4 ## Next steps diff --git a/skills/instrument-llm-analytics/references/hugging-face.md b/skills/instrument-llm-analytics/references/hugging-face.md index 4889a8c..0f491b3 100644 --- a/skills/instrument-llm-analytics/references/hugging-face.md +++ b/skills/instrument-llm-analytics/references/hugging-face.md @@ -2,122 +2,112 @@ 1. 1 - ## Install the PostHog SDK + ## Install dependencies Required - Setting up analytics starts with installing the PostHog SDK for your language. LLM analytics works best with our Python and Node SDKs. + **Full working examples** - PostHog AI - - ### Python - - ```bash - pip install posthog - ``` - - ### Node - - ```bash - npm install @posthog/ai posthog-node - ``` - -2. 2 - - ## Install the OpenAI SDK - - Required + See the complete [Node.js](https://github.com/PostHog/posthog-js/tree/main/examples/example-ai-hugging-face) and [Python](https://github.com/PostHog/posthog-python/tree/master/examples/example-ai-hugging-face) examples on GitHub. If you're using the PostHog SDK wrapper instead of OpenTelemetry, see the [Node.js wrapper](https://github.com/PostHog/posthog-js/tree/e08ff1be/examples/example-ai-hugging-face) and [Python wrapper](https://github.com/PostHog/posthog-python/tree/7223c52/examples/example-ai-hugging-face) examples. - Install the OpenAI SDK. The PostHog SDK instruments your LLM calls by wrapping the OpenAI client. The PostHog SDK **does not** proxy your calls. + Install the OpenTelemetry SDK, the OpenAI instrumentation, and the OpenAI SDK. PostHog AI ### Python ```bash - pip install openai + pip install openai opentelemetry-sdk posthog[otel] opentelemetry-instrumentation-openai-v2 ``` ### Node ```bash - npm install openai + npm install openai @posthog/ai @opentelemetry/sdk-node @opentelemetry/resources @opentelemetry/instrumentation-openai ``` -3. 3 +2. 2 - ## Initialize PostHog and OpenAI client + ## Set up OpenTelemetry tracing Required - We call Hugging Face Inference API through the OpenAI-compatible endpoint and generate a response. We'll use PostHog's OpenAI provider to capture all the details of the call. Initialize PostHog with your PostHog project token and host from [your project settings](https://app.posthog.com/settings/project), then pass the PostHog client along with the Hugging Face config (the base URL and API key) to our OpenAI wrapper. + Configure OpenTelemetry to auto-instrument OpenAI SDK calls and export traces to PostHog. PostHog converts `gen_ai.*` spans into `$ai_generation` events automatically. PostHog AI ### Python ```python - from posthog.ai.openai import OpenAI - from posthog import Posthog - posthog = Posthog( - "", - host="https://us.i.posthog.com" - ) - client = OpenAI( - base_url="https://router.huggingface.co/v1/", - api_key="", - posthog_client=posthog + from opentelemetry import trace + from opentelemetry.sdk.trace import TracerProvider + from opentelemetry.sdk.resources import Resource, SERVICE_NAME + from posthog.ai.otel import PostHogSpanProcessor + from opentelemetry.instrumentation.openai_v2 import OpenAIInstrumentor + resource = Resource(attributes={ + SERVICE_NAME: "my-app", + "posthog.distinct_id": "user_123", # optional: identifies the user in PostHog + "foo": "bar", # custom properties are passed through + }) + provider = TracerProvider(resource=resource) + provider.add_span_processor( + PostHogSpanProcessor( + api_key="", + host="https://us.i.posthog.com", + ) ) + trace.set_tracer_provider(provider) + OpenAIInstrumentor().instrument() ``` ### Node ```typescript - import { OpenAI } from '@posthog/ai' - import { PostHog } from 'posthog-node' - const phClient = new PostHog( - '', - { host: 'https://us.i.posthog.com' } - ); - const openai = new OpenAI({ - baseURL: 'https://router.huggingface.co/v1/', - apiKey: '', - posthog: phClient, - }); - // ... your code here ... - // IMPORTANT: Shutdown the client when you're done to ensure all events are sent - phClient.shutdown() + import { NodeSDK } from '@opentelemetry/sdk-node' + import { resourceFromAttributes } from '@opentelemetry/resources' + import { PostHogSpanProcessor } from '@posthog/ai/otel' + import { OpenAIInstrumentation } from '@opentelemetry/instrumentation-openai' + const sdk = new NodeSDK({ + resource: resourceFromAttributes({ + 'service.name': 'my-app', + 'posthog.distinct_id': 'user_123', // optional: identifies the user in PostHog + foo: 'bar', // custom properties are passed through + }), + spanProcessors: [ + new PostHogSpanProcessor({ + apiKey: '', + host: 'https://us.i.posthog.com', + }), + ], + instrumentations: [new OpenAIInstrumentation()], + }) + sdk.start() ``` - > **Note:** This also works with the `AsyncOpenAI` client. - - **Proxy note** - - These SDKs **do not** proxy your calls. They only fire off an async call to PostHog in the background to send the data. You can also use LLM analytics with other SDKs or our API, but you will need to capture the data in the right format. See the schema in the [manual capture section](/docs/llm-analytics/installation/manual-capture.md) for more details. - -4. 4 +3. 3 ## Call Hugging Face Required - Now, when you call Hugging Face with the OpenAI SDK, PostHog automatically captures an `$ai_generation` event. You can also capture or modify additional properties with the distinct ID, trace ID, properties, groups, and privacy mode parameters. + Now, when you use the OpenAI SDK to call Hugging Face, PostHog automatically captures `$ai_generation` events via the OpenTelemetry instrumentation. PostHog AI ### Python ```python + import openai + client = openai.OpenAI( + base_url="https://router.huggingface.co/v1/", + api_key="", + ) response = client.chat.completions.create( model="meta-llama/Llama-3.3-70B-Instruct", + max_completion_tokens=1024, messages=[ {"role": "user", "content": "Tell me a fun fact about hedgehogs"} ], - posthog_distinct_id="user_123", # optional - posthog_trace_id="trace_123", # optional - posthog_properties={"conversation_id": "abc123", "paid": True}, # optional - posthog_groups={"company": "company_id_in_your_db"}, # optional - posthog_privacy_mode=False # optional ) print(response.choices[0].message.content) ``` @@ -125,25 +115,20 @@ ### Node ```typescript - const completion = await openai.chat.completions.create({ - model: "meta-llama/Llama-3.3-70B-Instruct", - messages: [{ role: "user", content: "Tell me a fun fact about hedgehogs" }], - posthogDistinctId: "user_123", // optional - posthogTraceId: "trace_123", // optional - posthogProperties: { conversation_id: "abc123", paid: true }, // optional - posthogGroups: { company: "company_id_in_your_db" }, // optional - posthogPrivacyMode: false // optional - }); - console.log(completion.choices[0].message.content) + import OpenAI from 'openai' + const client = new OpenAI({ + baseURL: 'https://router.huggingface.co/v1/', + apiKey: '', + }) + const response = await client.chat.completions.create({ + model: 'meta-llama/Llama-3.3-70B-Instruct', + max_completion_tokens: 1024, + messages: [{ role: 'user', content: 'Tell me a fun fact about hedgehogs' }], + }) + console.log(response.choices[0].message.content) ``` - > **Notes:** - > - > - We also support the old `chat.completions` API. - > - This works with responses where `stream=True`. - > - If you want to capture LLM events anonymously, **don't** pass a distinct ID to the request. - > - > See our docs on [anonymous vs identified events](/docs/data/anonymous-vs-identified-events.md) to learn more. + > **Note:** If you want to capture LLM events anonymously, omit the `posthog.distinct_id` resource attribute. See our docs on [anonymous vs identified events](/docs/data/anonymous-vs-identified-events.md) to learn more. You can expect captured `$ai_generation` events to have the following properties: @@ -160,7 +145,7 @@ | $ai_total_cost_usd | The total cost in USD (input + output) | | [[...]](/docs/llm-analytics/generations.md#event-properties) | See [full list](/docs/llm-analytics/generations.md#event-properties) of properties | -5. ## Verify traces and generations +4. ## Verify traces and generations Recommended @@ -172,7 +157,7 @@ [Check for LLM events in PostHog](https://app.posthog.com/llm-analytics/generations) -6. 5 +5. 4 ## Next steps diff --git a/skills/instrument-llm-analytics/references/instructor.md b/skills/instrument-llm-analytics/references/instructor.md index 0b32352..5d83d3d 100644 --- a/skills/instrument-llm-analytics/references/instructor.md +++ b/skills/instrument-llm-analytics/references/instructor.md @@ -2,148 +2,138 @@ 1. 1 - ## Install the PostHog SDK + ## Install dependencies Required - Setting up analytics starts with installing the PostHog SDK for your language. LLM analytics works best with our Python and Node SDKs. + **Full working examples** - PostHog AI - - ### Python - - ```bash - pip install posthog - ``` - - ### Node - - ```bash - npm install @posthog/ai posthog-node - ``` + See the complete [Node.js](https://github.com/PostHog/posthog-js/tree/main/examples/example-ai-instructor) and [Python](https://github.com/PostHog/posthog-python/tree/master/examples/example-ai-instructor) examples on GitHub. If you're using the PostHog SDK wrapper instead of OpenTelemetry, see the [Node.js wrapper](https://github.com/PostHog/posthog-js/tree/e08ff1be/examples/example-ai-instructor) and [Python wrapper](https://github.com/PostHog/posthog-python/tree/7223c52/examples/example-ai-instructor) examples. -2. 2 - - ## Install Instructor and OpenAI SDKs - - Required - - Install Instructor and the OpenAI SDK. PostHog instruments your LLM calls by wrapping the OpenAI client, which Instructor uses under the hood. + Install the OpenTelemetry SDK, the OpenAI instrumentation, and Instructor. PostHog AI ### Python ```bash - pip install instructor openai + pip install instructor openai opentelemetry-sdk posthog[otel] opentelemetry-instrumentation-openai-v2 ``` ### Node ```bash - npm install @instructor-ai/instructor openai zod@3 + npm install @instructor-ai/instructor openai zod @posthog/ai @opentelemetry/sdk-node @opentelemetry/resources @opentelemetry/instrumentation-openai ``` -3. 3 +2. 2 - ## Initialize PostHog and Instructor + ## Set up OpenTelemetry tracing Required - Initialize PostHog with your project token and host from [your project settings](https://app.posthog.com/settings/project), then create a PostHog OpenAI wrapper and pass it to Instructor. + Configure OpenTelemetry to auto-instrument OpenAI SDK calls and export traces to PostHog. PostHog converts `gen_ai.*` spans into `$ai_generation` events automatically. PostHog AI ### Python ```python - import instructor - from pydantic import BaseModel - from posthog.ai.openai import OpenAI - from posthog import Posthog - posthog = Posthog( - "", - host="https://us.i.posthog.com" - ) - openai_client = OpenAI( - api_key="your_openai_api_key", - posthog_client=posthog + from opentelemetry import trace + from opentelemetry.sdk.trace import TracerProvider + from opentelemetry.sdk.resources import Resource, SERVICE_NAME + from posthog.ai.otel import PostHogSpanProcessor + from opentelemetry.instrumentation.openai_v2 import OpenAIInstrumentor + resource = Resource(attributes={ + SERVICE_NAME: "my-app", + "posthog.distinct_id": "user_123", # optional: identifies the user in PostHog + "foo": "bar", # custom properties are passed through + }) + provider = TracerProvider(resource=resource) + provider.add_span_processor( + PostHogSpanProcessor( + api_key="", + host="https://us.i.posthog.com", + ) ) - client = instructor.from_openai(openai_client) + trace.set_tracer_provider(provider) + OpenAIInstrumentor().instrument() ``` ### Node ```typescript - import Instructor from '@instructor-ai/instructor' - import { OpenAI } from '@posthog/ai' - import { PostHog } from 'posthog-node' - import { z } from 'zod' - const phClient = new PostHog( - '', - { host: 'https://us.i.posthog.com' } - ); - const openai = new OpenAI({ - apiKey: 'your_openai_api_key', - posthog: phClient, - }); - const client = Instructor({ client: openai, mode: 'TOOLS' }) + import { NodeSDK } from '@opentelemetry/sdk-node' + import { resourceFromAttributes } from '@opentelemetry/resources' + import { PostHogSpanProcessor } from '@posthog/ai/otel' + import { OpenAIInstrumentation } from '@opentelemetry/instrumentation-openai' + const sdk = new NodeSDK({ + resource: resourceFromAttributes({ + 'service.name': 'my-app', + 'posthog.distinct_id': 'user_123', // optional: identifies the user in PostHog + foo: 'bar', // custom properties are passed through + }), + spanProcessors: [ + new PostHogSpanProcessor({ + apiKey: '', + host: 'https://us.i.posthog.com', + }), + ], + instrumentations: [new OpenAIInstrumentation()], + }) + sdk.start() ``` - **How this works** - - PostHog's `OpenAI` wrapper is a proper subclass of `openai.OpenAI`, so it works directly with `instructor.from_openai()`. PostHog captures `$ai_generation` events automatically without proxying your calls. - -4. 4 +3. 3 - ## Use Instructor with structured outputs + ## Extract structured data Required - Now use Instructor to extract structured data from LLM responses. PostHog automatically captures an `$ai_generation` event for each call. + Use Instructor to extract structured data from LLM responses. PostHog automatically captures an `$ai_generation` event for each call made through the OpenAI SDK that Instructor wraps. PostHog AI ### Python ```python - class UserInfo(BaseModel): + import instructor + import openai + from pydantic import BaseModel + class User(BaseModel): name: str age: int + client = instructor.from_openai(openai.OpenAI(api_key="your_openai_api_key")) user = client.chat.completions.create( - model="gpt-5-mini", - response_model=UserInfo, - messages=[ - {"role": "user", "content": "John Doe is 30 years old."} - ], - posthog_distinct_id="user_123", - posthog_trace_id="trace_123", - posthog_properties={"conversation_id": "abc123"}, + model="gpt-4o-mini", + response_model=User, + messages=[{"role": "user", "content": "Extract: John is 30 years old"}], ) - print(f"{user.name} is {user.age} years old") + print(user) ``` ### Node ```typescript - const UserInfo = z.object({ + import OpenAI from 'openai' + import Instructor from '@instructor-ai/instructor' + import { z } from 'zod' + const oai = new OpenAI({ apiKey: 'your_openai_api_key' }) + const client = Instructor({ client: oai, mode: 'TOOLS' }) + const UserSchema = z.object({ name: z.string(), age: z.number(), }) const user = await client.chat.completions.create({ - model: 'gpt-5-mini', - response_model: { schema: UserInfo, name: 'UserInfo' }, - messages: [ - { role: 'user', content: 'John Doe is 30 years old.' } - ], - posthogDistinctId: 'user_123', - posthogTraceId: 'trace_123', - posthogProperties: { conversation_id: 'abc123' }, + model: 'gpt-4o-mini', + response_model: { schema: UserSchema, name: 'User' }, + messages: [{ role: 'user', content: 'Extract: John is 30 years old' }], }) - console.log(`${user.name} is ${user.age} years old`) - phClient.shutdown() + console.log(user) ``` + > **Note:** If you want to capture LLM events anonymously, omit the `posthog.distinct_id` resource attribute. See our docs on [anonymous vs identified events](/docs/data/anonymous-vs-identified-events.md) to learn more. + You can expect captured `$ai_generation` events to have the following properties: | Property | Description | @@ -159,7 +149,7 @@ | $ai_total_cost_usd | The total cost in USD (input + output) | | [[...]](/docs/llm-analytics/generations.md#event-properties) | See [full list](/docs/llm-analytics/generations.md#event-properties) of properties | -5. ## Verify traces and generations +4. ## Verify traces and generations Recommended @@ -171,7 +161,7 @@ [Check for LLM events in PostHog](https://app.posthog.com/llm-analytics/generations) -6. 5 +5. 4 ## Next steps diff --git a/skills/instrument-llm-analytics/references/langchain.md b/skills/instrument-llm-analytics/references/langchain.md index ea52236..b25232b 100644 --- a/skills/instrument-llm-analytics/references/langchain.md +++ b/skills/instrument-llm-analytics/references/langchain.md @@ -2,154 +2,130 @@ 1. 1 - ## Install the PostHog SDK + ## Install dependencies Required - Setting up analytics starts with installing the PostHog SDK for your language. LLM analytics works best with our Python and Node SDKs. + **Full working examples** - PostHog AI - - ### Python - - ```bash - pip install posthog - ``` - - ### Node - - ```bash - npm install @posthog/ai posthog-node - ``` - -2. 2 - - ## Install LangChain and OpenAI SDKs - - Required + See the complete [Node.js](https://github.com/PostHog/posthog-js/tree/main/examples/example-ai-langchain) and [Python](https://github.com/PostHog/posthog-python/tree/master/examples/example-ai-langchain) examples on GitHub. If you're using the PostHog SDK wrapper instead of OpenTelemetry, see the [Node.js wrapper](https://github.com/PostHog/posthog-js/tree/e08ff1be/examples/example-ai-langchain) and [Python wrapper](https://github.com/PostHog/posthog-python/tree/7223c52/examples/example-ai-langchain) examples. - Install LangChain. The PostHog SDK instruments your LLM calls by wrapping LangChain. The PostHog SDK **does not** proxy your calls. + Install the OpenTelemetry SDK, the LangChain instrumentation, and LangChain with OpenAI. PostHog AI ### Python ```bash - pip install langchain openai langchain-openai + pip install langchain langchain-core langchain-openai opentelemetry-sdk posthog[otel] opentelemetry-instrumentation-langchain ``` ### Node ```bash - npm install langchain @langchain/core @langchain/openai @posthog/ai + npm install langchain @langchain/core @langchain/openai @posthog/ai @opentelemetry/sdk-node @opentelemetry/resources @traceloop/instrumentation-langchain ``` - **Proxy note** - - These SDKs **do not** proxy your calls. They only fire off an async call to PostHog in the background to send the data. You can also use LLM analytics with other SDKs or our API, but you will need to capture the data in the right format. See the schema in the [manual capture section](/docs/llm-analytics/installation/manual-capture.md) for more details. - -3. 3 +2. 2 - ## Initialize PostHog and LangChain + ## Set up OpenTelemetry tracing Required - Initialize PostHog with your project token and host from [your project settings](https://app.posthog.com/settings/project), then pass it to the LangChain `CallbackHandler` wrapper. Optionally, you can provide a user distinct ID, trace ID, PostHog properties, [groups](/docs/product-analytics/group-analytics.md), and privacy mode. + Configure OpenTelemetry to auto-instrument LangChain calls and export traces to PostHog. PostHog converts `gen_ai.*` spans into `$ai_generation` events automatically. PostHog AI ### Python ```python - from posthog.ai.langchain import CallbackHandler - from langchain_openai import ChatOpenAI - from langchain_core.prompts import ChatPromptTemplate - from posthog import Posthog - posthog = Posthog( - "", - host="https://us.i.posthog.com" - ) - callback_handler = CallbackHandler( - client=posthog, # This is an optional parameter. If it is not provided, a default client will be used. - distinct_id="user_123", # optional - trace_id="trace_456", # optional - properties={"conversation_id": "abc123"}, # optional - groups={"company": "company_id_in_your_db"}, # optional - privacy_mode=False # optional + from opentelemetry import trace + from opentelemetry.sdk.trace import TracerProvider + from opentelemetry.sdk.resources import Resource, SERVICE_NAME + from posthog.ai.otel import PostHogSpanProcessor + from opentelemetry.instrumentation.langchain import LangchainInstrumentor + resource = Resource(attributes={ + SERVICE_NAME: "my-app", + "posthog.distinct_id": "user_123", # optional: identifies the user in PostHog + "foo": "bar", # custom properties are passed through + }) + provider = TracerProvider(resource=resource) + provider.add_span_processor( + PostHogSpanProcessor( + api_key="", + host="https://us.i.posthog.com", + ) ) + trace.set_tracer_provider(provider) + LangchainInstrumentor().instrument() ``` ### Node ```typescript - import { PostHog } from 'posthog-node'; - import { LangChainCallbackHandler } from '@posthog/ai'; - import { ChatOpenAI } from '@langchain/openai'; - import { ChatPromptTemplate } from '@langchain/core/prompts'; - const phClient = new PostHog( - '', - { host: 'https://us.i.posthog.com' } - ); - const callbackHandler = new LangChainCallbackHandler({ - client: phClient, - distinctId: 'user_123', // optional - traceId: 'trace_456', // optional - properties: { conversationId: 'abc123' }, // optional - groups: { company: 'company_id_in_your_db' }, // optional - privacyMode: false, // optional - debug: false // optional - when true, logs all events to console - }); + import { NodeSDK } from '@opentelemetry/sdk-node' + import { resourceFromAttributes } from '@opentelemetry/resources' + import { PostHogSpanProcessor } from '@posthog/ai/otel' + import { LangChainInstrumentation } from '@traceloop/instrumentation-langchain' + const sdk = new NodeSDK({ + resource: resourceFromAttributes({ + 'service.name': 'my-app', + 'posthog.distinct_id': 'user_123', // optional: identifies the user in PostHog + foo: 'bar', // custom properties are passed through + }), + spanProcessors: [ + new PostHogSpanProcessor({ + apiKey: '', + host: 'https://us.i.posthog.com', + }), + ], + instrumentations: [new LangChainInstrumentation()], + }) + sdk.start() ``` - > **Note:** If you want to capture LLM events anonymously, **don't** pass a distinct ID to the `CallbackHandler`. See our docs on [anonymous vs identified events](/docs/data/anonymous-vs-identified-events.md) to learn more. - -4. 4 +3. 3 ## Call LangChain Required - When you invoke your chain, pass the `callback_handler` in the `config` as part of your `callbacks`: + Use LangChain as normal. The OpenTelemetry instrumentation automatically captures `$ai_generation` events for each LLM call — no callback handlers needed. PostHog AI ### Python ```python + from langchain_openai import ChatOpenAI + from langchain_core.prompts import ChatPromptTemplate prompt = ChatPromptTemplate.from_messages([ ("system", "You are a helpful assistant."), ("user", "{input}") ]) model = ChatOpenAI(openai_api_key="your_openai_api_key") chain = prompt | model - # Execute the chain with the callback handler - response = chain.invoke( - {"input": "Tell me a joke about programming"}, - config={"callbacks": [callback_handler]} - ) + response = chain.invoke({"input": "Tell me a joke about programming"}) print(response.content) ``` ### Node ```typescript + import { ChatOpenAI } from '@langchain/openai' + import { ChatPromptTemplate } from '@langchain/core/prompts' const prompt = ChatPromptTemplate.fromMessages([ ["system", "You are a helpful assistant."], ["user", "{input}"] - ]); - const model = new ChatOpenAI({ - apiKey: "your_openai_api_key" - }); - const chain = prompt.pipe(model); - // Execute the chain with the callback handler - const response = await chain.invoke( - { input: "Tell me a joke about programming" }, - { callbacks: [callbackHandler] } - ); - console.log(response.content); - phClient.shutdown(); + ]) + const model = new ChatOpenAI({ apiKey: "your_openai_api_key" }) + const chain = prompt.pipe(model) + const response = await chain.invoke({ input: "Tell me a joke about programming" }) + console.log(response.content) ``` + > **Note:** If you want to capture LLM events anonymously, omit the `posthog.distinct_id` resource attribute. See our docs on [anonymous vs identified events](/docs/data/anonymous-vs-identified-events.md) to learn more. + PostHog automatically captures an `$ai_generation` event along with these properties: | Property | Description | @@ -167,7 +143,7 @@ It also automatically creates a trace hierarchy based on how LangChain components are nested. -5. ## Verify traces and generations +4. ## Verify traces and generations Recommended @@ -179,7 +155,7 @@ [Check for LLM events in PostHog](https://app.posthog.com/llm-analytics/generations) -6. 5 +5. 4 ## Next steps diff --git a/skills/instrument-llm-analytics/references/langgraph.md b/skills/instrument-llm-analytics/references/langgraph.md index f5960d8..fbdd452 100644 --- a/skills/instrument-llm-analytics/references/langgraph.md +++ b/skills/instrument-llm-analytics/references/langgraph.md @@ -2,107 +2,95 @@ 1. 1 - ## Install the PostHog SDK + ## Install dependencies Required - Setting up analytics starts with installing the PostHog SDK for your language. LLM analytics works best with our Python and Node SDKs. + **Full working examples** - PostHog AI - - ### Python - - ```bash - pip install posthog - ``` - - ### Node - - ```bash - npm install @posthog/ai posthog-node - ``` - -2. 2 + See the complete [Node.js](https://github.com/PostHog/posthog-js/tree/main/examples/example-ai-langgraph) and [Python](https://github.com/PostHog/posthog-python/tree/master/examples/example-ai-langgraph) examples on GitHub. If you're using the PostHog SDK wrapper instead of OpenTelemetry, see the [Node.js wrapper](https://github.com/PostHog/posthog-js/tree/e08ff1be/examples/example-ai-langgraph) and [Python wrapper](https://github.com/PostHog/posthog-python/tree/7223c52/examples/example-ai-langgraph) examples. - ## Install LangGraph - - Required - - Install LangGraph and LangChain. PostHog instruments your LLM calls through LangChain-compatible callback handlers that LangGraph supports. + Install the OpenTelemetry SDK, the LangChain instrumentation, and LangGraph with OpenAI. PostHog AI ### Python ```bash - pip install langgraph langchain-openai + pip install langgraph langchain-core langchain-openai opentelemetry-sdk posthog[otel] opentelemetry-instrumentation-langchain ``` ### Node ```bash - npm install @langchain/langgraph @langchain/openai @langchain/core + npm install @langchain/langgraph @langchain/openai @langchain/core zod @posthog/ai @opentelemetry/sdk-node @opentelemetry/resources @traceloop/instrumentation-langchain ``` -3. 3 +2. 2 - ## Initialize PostHog + ## Set up OpenTelemetry tracing Required - Initialize PostHog with your project token and host from [your project settings](https://app.posthog.com/settings/project), then create a LangChain `CallbackHandler`. + Configure OpenTelemetry to auto-instrument LangChain calls and export traces to PostHog. LangGraph is built on LangChain, so the same instrumentation captures all LLM calls. PostHog converts `gen_ai.*` spans into `$ai_generation` events automatically. PostHog AI ### Python ```python - from posthog.ai.langchain import CallbackHandler - from posthog import Posthog - posthog = Posthog( - "", - host="https://us.i.posthog.com" - ) - callback_handler = CallbackHandler( - client=posthog, - distinct_id="user_123", # optional - trace_id="trace_456", # optional - properties={"conversation_id": "abc123"}, # optional - groups={"company": "company_id_in_your_db"}, # optional - privacy_mode=False # optional + from opentelemetry import trace + from opentelemetry.sdk.trace import TracerProvider + from opentelemetry.sdk.resources import Resource, SERVICE_NAME + from posthog.ai.otel import PostHogSpanProcessor + from opentelemetry.instrumentation.langchain import LangchainInstrumentor + resource = Resource(attributes={ + SERVICE_NAME: "my-app", + "posthog.distinct_id": "user_123", # optional: identifies the user in PostHog + "foo": "bar", # custom properties are passed through + }) + provider = TracerProvider(resource=resource) + provider.add_span_processor( + PostHogSpanProcessor( + api_key="", + host="https://us.i.posthog.com", + ) ) + trace.set_tracer_provider(provider) + LangchainInstrumentor().instrument() ``` ### Node ```typescript - import { PostHog } from 'posthog-node'; - import { LangChainCallbackHandler } from '@posthog/ai'; - const phClient = new PostHog( - '', - { host: 'https://us.i.posthog.com' } - ); - const callbackHandler = new LangChainCallbackHandler({ - client: phClient, - distinctId: 'user_123', // optional - traceId: 'trace_456', // optional - properties: { conversationId: 'abc123' }, // optional - groups: { company: 'company_id_in_your_db' }, // optional - privacyMode: false, // optional - }); + import { NodeSDK } from '@opentelemetry/sdk-node' + import { resourceFromAttributes } from '@opentelemetry/resources' + import { PostHogSpanProcessor } from '@posthog/ai/otel' + import { LangChainInstrumentation } from '@traceloop/instrumentation-langchain' + const sdk = new NodeSDK({ + resource: resourceFromAttributes({ + 'service.name': 'my-app', + 'posthog.distinct_id': 'user_123', // optional: identifies the user in PostHog + foo: 'bar', // custom properties are passed through + }), + spanProcessors: [ + new PostHogSpanProcessor({ + apiKey: '', + host: 'https://us.i.posthog.com', + }), + ], + instrumentations: [new LangChainInstrumentation()], + }) + sdk.start() ``` - **How this works** - - LangGraph is built on LangChain, so it supports LangChain-compatible callback handlers. PostHog's `CallbackHandler` captures `$ai_generation` events and trace hierarchy automatically without proxying your calls. - -4. 4 +3. 3 ## Run your graph Required - Pass the `callback_handler` in the `config` when invoking your LangGraph graph. PostHog automatically captures generation events for each LLM call. + Use LangGraph as normal. The OpenTelemetry instrumentation automatically captures `$ai_generation` events for each LLM call — no callback handlers needed. PostHog AI @@ -119,8 +107,7 @@ model = ChatOpenAI(api_key="your_openai_api_key") agent = create_react_agent(model, tools=[get_weather]) result = agent.invoke( - {"messages": [{"role": "user", "content": "What's the weather in Paris?"}]}, - config={"callbacks": [callback_handler]} + {"messages": [{"role": "user", "content": "What's the weather in Paris?"}]} ) print(result["messages"][-1].content) ``` @@ -145,13 +132,13 @@ const model = new ChatOpenAI({ apiKey: 'your_openai_api_key' }); const agent = createReactAgent({ llm: model, tools: [getWeather] }); const result = await agent.invoke( - { messages: [{ role: 'user', content: "What's the weather in Paris?" }] }, - { callbacks: [callbackHandler] } + { messages: [{ role: 'user', content: "What's the weather in Paris?" }] } ); console.log(result.messages[result.messages.length - 1].content); - phClient.shutdown(); ``` + > **Note:** If you want to capture LLM events anonymously, omit the `posthog.distinct_id` resource attribute. See our docs on [anonymous vs identified events](/docs/data/anonymous-vs-identified-events.md) to learn more. + PostHog automatically captures `$ai_generation` events and creates a trace hierarchy based on how LangGraph components are nested. You can expect captured events to have the following properties: | Property | Description | @@ -167,7 +154,7 @@ | $ai_total_cost_usd | The total cost in USD (input + output) | | [[...]](/docs/llm-analytics/generations.md#event-properties) | See [full list](/docs/llm-analytics/generations.md#event-properties) of properties | -5. ## Verify traces and generations +4. ## Verify traces and generations Recommended @@ -179,7 +166,7 @@ [Check for LLM events in PostHog](https://app.posthog.com/llm-analytics/generations) -6. 5 +5. 4 ## Next steps diff --git a/skills/instrument-llm-analytics/references/llamaindex.md b/skills/instrument-llm-analytics/references/llamaindex.md index 559dc6f..706c38b 100644 --- a/skills/instrument-llm-analytics/references/llamaindex.md +++ b/skills/instrument-llm-analytics/references/llamaindex.md @@ -2,69 +2,62 @@ 1. 1 - ## Install the PostHog SDK + ## Install dependencies Required - Setting up analytics starts with installing the PostHog SDK. The LlamaIndex integration uses PostHog's OpenAI wrapper. + **Full working examples** - ```bash - pip install posthog - ``` - -2. 2 + See the complete [Python example](https://github.com/PostHog/posthog-python/tree/master/examples/example-ai-llamaindex) on GitHub. If you're using the PostHog SDK wrapper instead of OpenTelemetry, see the [Python wrapper example](https://github.com/PostHog/posthog-python/tree/7223c52/examples/example-ai-llamaindex). - ## Install LlamaIndex - - Required - - Install LlamaIndex with the OpenAI integration. PostHog instruments your LLM calls by wrapping the OpenAI client that LlamaIndex uses. + Install LlamaIndex, OpenAI, and the OpenTelemetry SDK with the LlamaIndex instrumentation. ```bash - pip install llama-index llama-index-llms-openai + pip install llama-index llama-index-llms-openai opentelemetry-sdk posthog[otel] opentelemetry-instrumentation-llamaindex ``` -3. 3 +2. 2 - ## Initialize PostHog and LlamaIndex + ## Set up OpenTelemetry tracing Required - Initialize PostHog with your project token and host from [your project settings](https://app.posthog.com/settings/project), then create a PostHog OpenAI wrapper and pass it to LlamaIndex's `OpenAI` LLM class. + Configure OpenTelemetry to auto-instrument LlamaIndex calls and export traces to PostHog. PostHog converts `gen_ai.*` spans into `$ai_generation` events automatically. ```python - from llama_index.llms.openai import OpenAI as LlamaOpenAI - from posthog.ai.openai import OpenAI - from posthog import Posthog - posthog = Posthog( - "", - host="https://us.i.posthog.com" - ) - openai_client = OpenAI( - api_key="your_openai_api_key", - posthog_client=posthog + from opentelemetry import trace + from opentelemetry.sdk.trace import TracerProvider + from opentelemetry.sdk.resources import Resource, SERVICE_NAME + from posthog.ai.otel import PostHogSpanProcessor + from opentelemetry.instrumentation.llamaindex import LlamaIndexInstrumentor + resource = Resource(attributes={ + SERVICE_NAME: "my-app", + "posthog.distinct_id": "user_123", # optional: identifies the user in PostHog + "foo": "bar", # custom properties are passed through + }) + provider = TracerProvider(resource=resource) + provider.add_span_processor( + PostHogSpanProcessor( + api_key="", + host="https://us.i.posthog.com", + ) ) - llm = LlamaOpenAI( - model="gpt-5-mini", - api_key="your_openai_api_key", - ) - llm._client = openai_client + trace.set_tracer_provider(provider) + LlamaIndexInstrumentor().instrument() ``` - **How this works** - - PostHog's `OpenAI` wrapper is a proper subclass of `openai.OpenAI`, so it can replace the internal client used by LlamaIndex's OpenAI LLM. PostHog captures `$ai_generation` events automatically without proxying your calls. **Note:** This approach accesses an internal attribute (`_client`) which may change in future LlamaIndex versions. Check for updates if you encounter issues after upgrading LlamaIndex. - -4. 4 +3. 3 ## Query with LlamaIndex Required - Use LlamaIndex as normal. PostHog automatically captures an `$ai_generation` event for each LLM call made through the wrapped client. + Use LlamaIndex as normal. The OpenTelemetry instrumentation automatically captures `$ai_generation` events for each LLM call. ```python + from llama_index.llms.openai import OpenAI from llama_index.core import VectorStoreIndex, SimpleDirectoryReader + llm = OpenAI(model="gpt-4o-mini", api_key="your_openai_api_key") # Load your documents documents = SimpleDirectoryReader("data").load_data() # Create an index @@ -75,6 +68,8 @@ print(response) ``` + > **Note:** If you want to capture LLM events anonymously, omit the `posthog.distinct_id` resource attribute. See our docs on [anonymous vs identified events](/docs/data/anonymous-vs-identified-events.md) to learn more. + You can expect captured `$ai_generation` events to have the following properties: | Property | Description | @@ -90,7 +85,7 @@ | $ai_total_cost_usd | The total cost in USD (input + output) | | [[...]](/docs/llm-analytics/generations.md#event-properties) | See [full list](/docs/llm-analytics/generations.md#event-properties) of properties | -5. ## Verify traces and generations +4. ## Verify traces and generations Recommended @@ -102,7 +97,7 @@ [Check for LLM events in PostHog](https://app.posthog.com/llm-analytics/generations) -6. 5 +5. 4 ## Next steps diff --git a/skills/instrument-llm-analytics/references/manual-capture.md b/skills/instrument-llm-analytics/references/manual-capture.md index 49b8bb9..9c6c014 100644 --- a/skills/instrument-llm-analytics/references/manual-capture.md +++ b/skills/instrument-llm-analytics/references/manual-capture.md @@ -229,7 +229,9 @@ ]); ``` - ### Event Properties +2. 2 + + ## Event properties Each event type has specific properties. See the tabs below for detailed property documentation for each event type. @@ -388,6 +390,34 @@ | $ai_output_cost_usd | (Optional) Cost in USD for output tokens (usually 0 for embeddings) | | $ai_total_cost_usd | (Optional) Total cost in USD | +3. ## Verify traces and generations + + Recommended + + *Confirm LLM events are being sent to PostHog* + + Let's make sure LLM events are being captured and sent to PostHog. Under **LLM analytics**, you should see rows of data appear in the **Traces** and **Generations** tabs. + + ![LLM generations in PostHog](https://res.cloudinary.com/dmukukwp6/image/upload/SCR_20250807_syne_ecd0801880.png)![LLM generations in PostHog](https://res.cloudinary.com/dmukukwp6/image/upload/SCR_20250807_syjm_5baab36590.png) + + [Check for LLM events in PostHog](https://app.posthog.com/llm-analytics/generations) + +4. 3 + + ## Next steps + + Recommended + + Now that you're capturing AI conversations, continue with the resources below to learn what else LLM Analytics enables within the PostHog platform. + + | Resource | Description | + | --- | --- | + | [Basics](/docs/llm-analytics/basics.md) | Learn the basics of how LLM calls become events in PostHog. | + | [Generations](/docs/llm-analytics/generations.md) | Read about the $ai_generation event and its properties. | + | [Traces](/docs/llm-analytics/traces.md) | Explore the trace hierarchy and how to use it to debug LLM calls. | + | [Spans](/docs/llm-analytics/spans.md) | Review spans and their role in representing individual operations. | + | [Anaylze LLM performance](/docs/llm-analytics/dashboard.md) | Learn how to create dashboards to analyze LLM performance. | + ### Community questions Ask a question diff --git a/skills/instrument-llm-analytics/references/mastra.md b/skills/instrument-llm-analytics/references/mastra.md index 26cd094..82bed04 100644 --- a/skills/instrument-llm-analytics/references/mastra.md +++ b/skills/instrument-llm-analytics/references/mastra.md @@ -2,83 +2,83 @@ 1. 1 - ## Install the PostHog SDK + ## Install dependencies Required - Setting up analytics starts with installing the PostHog SDK. + **Full working examples** - ```bash - npm install @posthog/ai posthog-node - ``` - -2. 2 - - ## Install Mastra - - Required + See the complete [Node.js example](https://github.com/PostHog/posthog-js/tree/main/examples/example-ai-mastra) on GitHub. If you're using the PostHog SDK wrapper instead, see the [Node.js wrapper example](https://github.com/PostHog/posthog-js/tree/e08ff1be/examples/example-ai-mastra). - Install Mastra and a model provider SDK. Mastra uses the Vercel AI SDK under the hood, so you can use any Vercel AI-compatible model provider. + Install Mastra with the official `@mastra/posthog` exporter. Mastra's observability system sends traces to PostHog as `$ai_generation` events automatically. ```bash - npm install @mastra/core @ai-sdk/openai + npm install @mastra/core @mastra/observability @mastra/posthog ``` - **Proxy note** - - These SDKs **do not** proxy your calls. They only fire off an async call to PostHog in the background to send the data. You can also use LLM analytics with other SDKs or our API, but you will need to capture the data in the right format. See the schema in the [manual capture section](/docs/llm-analytics/installation/manual-capture.md) for more details. - -3. 3 +2. 2 - ## Initialize PostHog and wrap your model + ## Configure Mastra with the PostHog exporter Required - Initialize PostHog with your project token and host from [your project settings](https://app.posthog.com/settings/project), then use `withTracing` from `@posthog/ai` to wrap the model you pass to your Mastra agent. + Initialize Mastra with an `Observability` config that uses the `PosthogExporter`. Pass your PostHog project token and host from [your project settings](https://app.posthog.com/settings/project). ```typescript - import { Agent } from "@mastra/core/agent"; - import { PostHog } from "posthog-node"; - import { withTracing } from "@posthog/ai"; - import { createOpenAI } from "@ai-sdk/openai"; - const phClient = new PostHog( - '', - { host: 'https://us.i.posthog.com' } - ); - const openaiClient = createOpenAI({ - apiKey: 'your_openai_api_key', - compatibility: 'strict' - }); - const agent = new Agent({ - name: "my-agent", - instructions: "You are a helpful assistant.", - model: withTracing(openaiClient("gpt-4o"), phClient, { - posthogDistinctId: "user_123", // optional - posthogTraceId: "trace_123", // optional - posthogProperties: { conversationId: "abc123" }, // optional - posthogPrivacyMode: false, // optional - posthogGroups: { company: "companyIdInYourDb" }, // optional + import { Mastra } from '@mastra/core' + import { Agent } from '@mastra/core/agent' + import { Observability } from '@mastra/observability' + import { PosthogExporter } from '@mastra/posthog' + const weatherAgent = new Agent({ + id: 'weather-agent', + name: 'Weather Agent', + instructions: 'You are a helpful assistant with access to weather data.', + model: { id: 'openai/gpt-4o-mini' }, + }) + const mastra = new Mastra({ + agents: { weatherAgent }, + observability: new Observability({ + configs: { + posthog: { + serviceName: 'my-app', + exporters: [ + new PosthogExporter({ + apiKey: '', + host: 'https://us.i.posthog.com', + defaultDistinctId: 'user_123', // fallback if no userId in metadata + }), + ], + }, + }, }), - }); + }) ``` - You can enrich LLM events with additional data by passing parameters such as the trace ID, distinct ID, custom properties, groups, and privacy mode options. - -4. 4 +3. 3 - ## Use your Mastra agent + ## Run your agent Required - Now, when your Mastra agent makes LLM calls, PostHog automatically captures an `$ai_generation` event for each one. + Use Mastra as normal. The `PosthogExporter` automatically captures `$ai_generation` events for each LLM call, including token usage, cost, latency, and the full conversation. + + Pass `tracingOptions.metadata` to `generate()` to attach per-request metadata. The `userId` field maps to PostHog's distinct ID, `sessionId` maps to `$ai_session_id`, and any other keys are passed through as custom event properties. ```typescript - const result = await agent.generate("What is the capital of France?"); - console.log(result.text); - phClient.shutdown(); + const agent = mastra.getAgent('weatherAgent') + const result = await agent.generate("What's the weather in Dublin?", { + tracingOptions: { + metadata: { + userId: 'user_123', // becomes distinct_id + sessionId: 'session_abc', // becomes $ai_session_id + conversation_id: 'abc-123', // custom property + }, + }, + }) + console.log(result.text) ``` - > **Note:** If you want to capture LLM events anonymously, **don't** pass a distinct ID to the request. See our docs on [anonymous vs identified events](/docs/data/anonymous-vs-identified-events.md) to learn more. + > **Note:** If you want to capture LLM events anonymously, omit `userId` from `tracingOptions.metadata` and don't set `defaultDistinctId`. See our docs on [anonymous vs identified events](/docs/data/anonymous-vs-identified-events.md) to learn more. You can expect captured `$ai_generation` events to have the following properties: @@ -95,7 +95,7 @@ | $ai_total_cost_usd | The total cost in USD (input + output) | | [[...]](/docs/llm-analytics/generations.md#event-properties) | See [full list](/docs/llm-analytics/generations.md#event-properties) of properties | -5. ## Verify traces and generations +4. ## Verify traces and generations Recommended @@ -107,7 +107,7 @@ [Check for LLM events in PostHog](https://app.posthog.com/llm-analytics/generations) -6. 5 +5. 4 ## Next steps diff --git a/skills/instrument-llm-analytics/references/mirascope.md b/skills/instrument-llm-analytics/references/mirascope.md index ab41bd4..fc481af 100644 --- a/skills/instrument-llm-analytics/references/mirascope.md +++ b/skills/instrument-llm-analytics/references/mirascope.md @@ -2,75 +2,69 @@ 1. 1 - ## Install the PostHog SDK + ## Install dependencies Required - Setting up analytics starts with installing the PostHog SDK. The Mirascope integration uses PostHog's OpenAI wrapper since Mirascope supports passing a custom OpenAI client. + **Full working examples** - ```bash - pip install posthog - ``` - -2. 2 + See the complete [Python example](https://github.com/PostHog/posthog-python/tree/master/examples/example-ai-mirascope) on GitHub. If you're using the PostHog SDK wrapper instead of OpenTelemetry, see the [Python wrapper example](https://github.com/PostHog/posthog-python/tree/7223c52/examples/example-ai-mirascope). - ## Install Mirascope - - Required - - Install Mirascope with OpenAI support. PostHog instruments your LLM calls by wrapping the OpenAI client that Mirascope uses under the hood. + Install the OpenTelemetry SDK, the OpenAI instrumentation, and Mirascope. ```bash - pip install mirascope openai + pip install "mirascope[openai]" opentelemetry-sdk posthog[otel] opentelemetry-instrumentation-openai-v2 ``` -3. 3 +2. 2 - ## Initialize PostHog and Mirascope + ## Set up OpenTelemetry tracing Required - Initialize PostHog with your project token and host from [your project settings](https://app.posthog.com/settings/project), then create a PostHog OpenAI wrapper and pass it to Mirascope's `@call` decorator via the `client` parameter. + Configure OpenTelemetry to auto-instrument OpenAI SDK calls and export traces to PostHog. PostHog converts `gen_ai.*` spans into `$ai_generation` events automatically. ```python - from mirascope.llm import call - from posthog.ai.openai import OpenAI - from posthog import Posthog - posthog = Posthog( - "", - host="https://us.i.posthog.com" - ) - openai_client = OpenAI( - api_key="your_openai_api_key", - posthog_client=posthog + from opentelemetry import trace + from opentelemetry.sdk.trace import TracerProvider + from opentelemetry.sdk.resources import Resource, SERVICE_NAME + from posthog.ai.otel import PostHogSpanProcessor + from opentelemetry.instrumentation.openai_v2 import OpenAIInstrumentor + resource = Resource(attributes={ + SERVICE_NAME: "my-app", + "posthog.distinct_id": "user_123", # optional: identifies the user in PostHog + "foo": "bar", # custom properties are passed through + }) + provider = TracerProvider(resource=resource) + provider.add_span_processor( + PostHogSpanProcessor( + api_key="", + host="https://us.i.posthog.com", + ) ) + trace.set_tracer_provider(provider) + OpenAIInstrumentor().instrument() ``` - **How this works** - - Mirascope's `@call` decorator accepts a `client` parameter for passing a custom OpenAI client. PostHog's `OpenAI` wrapper is a proper subclass of `openai.OpenAI`, so it works directly. PostHog captures `$ai_generation` events automatically without proxying your calls. - -4. 4 +3. 3 - ## Make your first call + ## Call your LLMs Required - Use Mirascope as normal, passing the wrapped client to the call decorator. PostHog automatically captures an `$ai_generation` event for each LLM call. + Use Mirascope as normal. PostHog automatically captures an `$ai_generation` event for each LLM call made through the OpenAI SDK that Mirascope uses internally. ```python - @call(model="openai/gpt-5-mini", client=openai_client) - def recommend_book(genre: str): - return f"Recommend a {genre} book." - response = recommend_book( - "fantasy", - posthog_distinct_id="user_123", - posthog_trace_id="trace_123", - posthog_properties={"conversation_id": "abc123"}, - ) + from mirascope.core import openai, prompt_template + @openai.call("gpt-4o-mini") + @prompt_template("Tell me a fun fact about {topic}") + def fun_fact(topic: str): ... + response = fun_fact("hedgehogs") print(response.content) ``` + > **Note:** If you want to capture LLM events anonymously, omit the `posthog.distinct_id` resource attribute. See our docs on [anonymous vs identified events](/docs/data/anonymous-vs-identified-events.md) to learn more. + You can expect captured `$ai_generation` events to have the following properties: | Property | Description | @@ -86,7 +80,7 @@ | $ai_total_cost_usd | The total cost in USD (input + output) | | [[...]](/docs/llm-analytics/generations.md#event-properties) | See [full list](/docs/llm-analytics/generations.md#event-properties) of properties | -5. ## Verify traces and generations +4. ## Verify traces and generations Recommended @@ -98,7 +92,7 @@ [Check for LLM events in PostHog](https://app.posthog.com/llm-analytics/generations) -6. 5 +5. 4 ## Next steps diff --git a/skills/instrument-llm-analytics/references/mistral.md b/skills/instrument-llm-analytics/references/mistral.md index 22b8ee8..0e53b96 100644 --- a/skills/instrument-llm-analytics/references/mistral.md +++ b/skills/instrument-llm-analytics/references/mistral.md @@ -2,122 +2,112 @@ 1. 1 - ## Install the PostHog SDK + ## Install dependencies Required - Setting up analytics starts with installing the PostHog SDK for your language. LLM analytics works best with our Python and Node SDKs. + **Full working examples** - PostHog AI - - ### Python - - ```bash - pip install posthog - ``` - - ### Node - - ```bash - npm install @posthog/ai posthog-node - ``` - -2. 2 - - ## Install the OpenAI SDK - - Required + See the complete [Node.js](https://github.com/PostHog/posthog-js/tree/main/examples/example-ai-mistral) and [Python](https://github.com/PostHog/posthog-python/tree/master/examples/example-ai-mistral) examples on GitHub. If you're using the PostHog SDK wrapper instead of OpenTelemetry, see the [Node.js wrapper](https://github.com/PostHog/posthog-js/tree/e08ff1be/examples/example-ai-mistral) and [Python wrapper](https://github.com/PostHog/posthog-python/tree/7223c52/examples/example-ai-mistral) examples. - Install the OpenAI SDK. The PostHog SDK instruments your LLM calls by wrapping the OpenAI client. The PostHog SDK **does not** proxy your calls. + Install the OpenTelemetry SDK, the OpenAI instrumentation, and the OpenAI SDK. PostHog AI ### Python ```bash - pip install openai + pip install openai opentelemetry-sdk posthog[otel] opentelemetry-instrumentation-openai-v2 ``` ### Node ```bash - npm install openai + npm install openai @posthog/ai @opentelemetry/sdk-node @opentelemetry/resources @opentelemetry/instrumentation-openai ``` -3. 3 +2. 2 - ## Initialize PostHog and OpenAI client + ## Set up OpenTelemetry tracing Required - We call Mistral through the OpenAI client and generate a response. We'll use PostHog's OpenAI provider to capture all the details of the call. Initialize PostHog with your PostHog project token and host from [your project settings](https://app.posthog.com/settings/project), then pass the PostHog client along with the Mistral config (the base URL and API key) to our OpenAI wrapper. + Configure OpenTelemetry to auto-instrument OpenAI SDK calls and export traces to PostHog. PostHog converts `gen_ai.*` spans into `$ai_generation` events automatically. PostHog AI ### Python ```python - from posthog.ai.openai import OpenAI - from posthog import Posthog - posthog = Posthog( - "", - host="https://us.i.posthog.com" - ) - client = OpenAI( - base_url="https://api.mistral.ai/v1", - api_key="", - posthog_client=posthog + from opentelemetry import trace + from opentelemetry.sdk.trace import TracerProvider + from opentelemetry.sdk.resources import Resource, SERVICE_NAME + from posthog.ai.otel import PostHogSpanProcessor + from opentelemetry.instrumentation.openai_v2 import OpenAIInstrumentor + resource = Resource(attributes={ + SERVICE_NAME: "my-app", + "posthog.distinct_id": "user_123", # optional: identifies the user in PostHog + "foo": "bar", # custom properties are passed through + }) + provider = TracerProvider(resource=resource) + provider.add_span_processor( + PostHogSpanProcessor( + api_key="", + host="https://us.i.posthog.com", + ) ) + trace.set_tracer_provider(provider) + OpenAIInstrumentor().instrument() ``` ### Node ```typescript - import { OpenAI } from '@posthog/ai' - import { PostHog } from 'posthog-node' - const phClient = new PostHog( - '', - { host: 'https://us.i.posthog.com' } - ); - const openai = new OpenAI({ - baseURL: 'https://api.mistral.ai/v1', - apiKey: '', - posthog: phClient, - }); - // ... your code here ... - // IMPORTANT: Shutdown the client when you're done to ensure all events are sent - phClient.shutdown() + import { NodeSDK } from '@opentelemetry/sdk-node' + import { resourceFromAttributes } from '@opentelemetry/resources' + import { PostHogSpanProcessor } from '@posthog/ai/otel' + import { OpenAIInstrumentation } from '@opentelemetry/instrumentation-openai' + const sdk = new NodeSDK({ + resource: resourceFromAttributes({ + 'service.name': 'my-app', + 'posthog.distinct_id': 'user_123', // optional: identifies the user in PostHog + foo: 'bar', // custom properties are passed through + }), + spanProcessors: [ + new PostHogSpanProcessor({ + apiKey: '', + host: 'https://us.i.posthog.com', + }), + ], + instrumentations: [new OpenAIInstrumentation()], + }) + sdk.start() ``` - > **Note:** This also works with the `AsyncOpenAI` client. - - **Proxy note** - - These SDKs **do not** proxy your calls. They only fire off an async call to PostHog in the background to send the data. You can also use LLM analytics with other SDKs or our API, but you will need to capture the data in the right format. See the schema in the [manual capture section](/docs/llm-analytics/installation/manual-capture.md) for more details. - -4. 4 +3. 3 ## Call Mistral Required - Now, when you call Mistral with the OpenAI SDK, PostHog automatically captures an `$ai_generation` event. You can also capture or modify additional properties with the distinct ID, trace ID, properties, groups, and privacy mode parameters. + Now, when you use the OpenAI SDK to call Mistral, PostHog automatically captures `$ai_generation` events via the OpenTelemetry instrumentation. PostHog AI ### Python ```python + import openai + client = openai.OpenAI( + base_url="https://api.mistral.ai/v1", + api_key="", + ) response = client.chat.completions.create( model="mistral-large-latest", + max_completion_tokens=1024, messages=[ {"role": "user", "content": "Tell me a fun fact about hedgehogs"} ], - posthog_distinct_id="user_123", # optional - posthog_trace_id="trace_123", # optional - posthog_properties={"conversation_id": "abc123", "paid": True}, # optional - posthog_groups={"company": "company_id_in_your_db"}, # optional - posthog_privacy_mode=False # optional ) print(response.choices[0].message.content) ``` @@ -125,25 +115,20 @@ ### Node ```typescript - const completion = await openai.chat.completions.create({ - model: "mistral-large-latest", - messages: [{ role: "user", content: "Tell me a fun fact about hedgehogs" }], - posthogDistinctId: "user_123", // optional - posthogTraceId: "trace_123", // optional - posthogProperties: { conversation_id: "abc123", paid: true }, // optional - posthogGroups: { company: "company_id_in_your_db" }, // optional - posthogPrivacyMode: false // optional - }); - console.log(completion.choices[0].message.content) + import OpenAI from 'openai' + const client = new OpenAI({ + baseURL: 'https://api.mistral.ai/v1', + apiKey: '', + }) + const response = await client.chat.completions.create({ + model: 'mistral-large-latest', + max_completion_tokens: 1024, + messages: [{ role: 'user', content: 'Tell me a fun fact about hedgehogs' }], + }) + console.log(response.choices[0].message.content) ``` - > **Notes:** - > - > - We also support the old `chat.completions` API. - > - This works with responses where `stream=True`. - > - If you want to capture LLM events anonymously, **don't** pass a distinct ID to the request. - > - > See our docs on [anonymous vs identified events](/docs/data/anonymous-vs-identified-events.md) to learn more. + > **Note:** If you want to capture LLM events anonymously, omit the `posthog.distinct_id` resource attribute. See our docs on [anonymous vs identified events](/docs/data/anonymous-vs-identified-events.md) to learn more. You can expect captured `$ai_generation` events to have the following properties: @@ -160,7 +145,7 @@ | $ai_total_cost_usd | The total cost in USD (input + output) | | [[...]](/docs/llm-analytics/generations.md#event-properties) | See [full list](/docs/llm-analytics/generations.md#event-properties) of properties | -5. ## Verify traces and generations +4. ## Verify traces and generations Recommended @@ -172,7 +157,7 @@ [Check for LLM events in PostHog](https://app.posthog.com/llm-analytics/generations) -6. 5 +5. 4 ## Next steps diff --git a/skills/instrument-llm-analytics/references/ollama.md b/skills/instrument-llm-analytics/references/ollama.md index ffea610..4e3ee7f 100644 --- a/skills/instrument-llm-analytics/references/ollama.md +++ b/skills/instrument-llm-analytics/references/ollama.md @@ -2,126 +2,112 @@ 1. 1 - ## Install the PostHog SDK + ## Install dependencies Required - **Note** + **Full working examples** - **Note:** Make sure Ollama is running locally before making API calls. You can start it with `ollama serve`. + See the complete [Node.js](https://github.com/PostHog/posthog-js/tree/main/examples/example-ai-ollama) and [Python](https://github.com/PostHog/posthog-python/tree/master/examples/example-ai-ollama) examples on GitHub. If you're using the PostHog SDK wrapper instead of OpenTelemetry, see the [Node.js wrapper](https://github.com/PostHog/posthog-js/tree/e08ff1be/examples/example-ai-ollama) and [Python wrapper](https://github.com/PostHog/posthog-python/tree/7223c52/examples/example-ai-ollama) examples. - Setting up analytics starts with installing the PostHog SDK for your language. LLM analytics works best with our Python and Node SDKs. + Install the OpenTelemetry SDK, the OpenAI instrumentation, and the OpenAI SDK. PostHog AI ### Python ```bash - pip install posthog + pip install openai opentelemetry-sdk posthog[otel] opentelemetry-instrumentation-openai-v2 ``` ### Node ```bash - npm install @posthog/ai posthog-node + npm install openai @posthog/ai @opentelemetry/sdk-node @opentelemetry/resources @opentelemetry/instrumentation-openai ``` 2. 2 - ## Install the OpenAI SDK + ## Set up OpenTelemetry tracing Required - Install the OpenAI SDK. The PostHog SDK instruments your LLM calls by wrapping the OpenAI client. The PostHog SDK **does not** proxy your calls. - - PostHog AI - - ### Python - - ```bash - pip install openai - ``` - - ### Node - - ```bash - npm install openai - ``` - -3. 3 - - ## Initialize PostHog and OpenAI client - - Required - - We call Ollama through the OpenAI client and generate a response. We'll use PostHog's OpenAI provider to capture all the details of the call. Initialize PostHog with your PostHog project token and host from [your project settings](https://app.posthog.com/settings/project), then pass the PostHog client along with the Ollama config (the base URL) to our OpenAI wrapper. + Configure OpenTelemetry to auto-instrument OpenAI SDK calls and export traces to PostHog. PostHog converts `gen_ai.*` spans into `$ai_generation` events automatically. PostHog AI ### Python ```python - from posthog.ai.openai import OpenAI - from posthog import Posthog - posthog = Posthog( - "", - host="https://us.i.posthog.com" - ) - client = OpenAI( - base_url="http://localhost:11434/v1", - api_key="ollama", - posthog_client=posthog + from opentelemetry import trace + from opentelemetry.sdk.trace import TracerProvider + from opentelemetry.sdk.resources import Resource, SERVICE_NAME + from posthog.ai.otel import PostHogSpanProcessor + from opentelemetry.instrumentation.openai_v2 import OpenAIInstrumentor + resource = Resource(attributes={ + SERVICE_NAME: "my-app", + "posthog.distinct_id": "user_123", # optional: identifies the user in PostHog + "foo": "bar", # custom properties are passed through + }) + provider = TracerProvider(resource=resource) + provider.add_span_processor( + PostHogSpanProcessor( + api_key="", + host="https://us.i.posthog.com", + ) ) + trace.set_tracer_provider(provider) + OpenAIInstrumentor().instrument() ``` ### Node ```typescript - import { OpenAI } from '@posthog/ai' - import { PostHog } from 'posthog-node' - const phClient = new PostHog( - '', - { host: 'https://us.i.posthog.com' } - ); - const openai = new OpenAI({ - baseURL: 'http://localhost:11434/v1', - apiKey: 'ollama', - posthog: phClient, - }); - // ... your code here ... - // IMPORTANT: Shutdown the client when you're done to ensure all events are sent - phClient.shutdown() + import { NodeSDK } from '@opentelemetry/sdk-node' + import { resourceFromAttributes } from '@opentelemetry/resources' + import { PostHogSpanProcessor } from '@posthog/ai/otel' + import { OpenAIInstrumentation } from '@opentelemetry/instrumentation-openai' + const sdk = new NodeSDK({ + resource: resourceFromAttributes({ + 'service.name': 'my-app', + 'posthog.distinct_id': 'user_123', // optional: identifies the user in PostHog + foo: 'bar', // custom properties are passed through + }), + spanProcessors: [ + new PostHogSpanProcessor({ + apiKey: '', + host: 'https://us.i.posthog.com', + }), + ], + instrumentations: [new OpenAIInstrumentation()], + }) + sdk.start() ``` - > **Note:** This also works with the `AsyncOpenAI` client. - - **Proxy note** - - These SDKs **do not** proxy your calls. They only fire off an async call to PostHog in the background to send the data. You can also use LLM analytics with other SDKs or our API, but you will need to capture the data in the right format. See the schema in the [manual capture section](/docs/llm-analytics/installation/manual-capture.md) for more details. - -4. 4 +3. 3 ## Call Ollama Required - Now, when you call Ollama with the OpenAI SDK, PostHog automatically captures an `$ai_generation` event. You can also capture or modify additional properties with the distinct ID, trace ID, properties, groups, and privacy mode parameters. + Now, when you use the OpenAI SDK to call Ollama, PostHog automatically captures `$ai_generation` events via the OpenTelemetry instrumentation. PostHog AI ### Python ```python + import openai + client = openai.OpenAI( + base_url="http://localhost:11434/v1", + api_key="ollama", + ) response = client.chat.completions.create( model="llama3.2", + max_completion_tokens=1024, messages=[ {"role": "user", "content": "Tell me a fun fact about hedgehogs"} ], - posthog_distinct_id="user_123", # optional - posthog_trace_id="trace_123", # optional - posthog_properties={"conversation_id": "abc123", "paid": True}, # optional - posthog_groups={"company": "company_id_in_your_db"}, # optional - posthog_privacy_mode=False # optional ) print(response.choices[0].message.content) ``` @@ -129,25 +115,20 @@ ### Node ```typescript - const completion = await openai.chat.completions.create({ - model: "llama3.2", - messages: [{ role: "user", content: "Tell me a fun fact about hedgehogs" }], - posthogDistinctId: "user_123", // optional - posthogTraceId: "trace_123", // optional - posthogProperties: { conversation_id: "abc123", paid: true }, // optional - posthogGroups: { company: "company_id_in_your_db" }, // optional - posthogPrivacyMode: false // optional - }); - console.log(completion.choices[0].message.content) + import OpenAI from 'openai' + const client = new OpenAI({ + baseURL: 'http://localhost:11434/v1', + apiKey: 'ollama', + }) + const response = await client.chat.completions.create({ + model: 'llama3.2', + max_completion_tokens: 1024, + messages: [{ role: 'user', content: 'Tell me a fun fact about hedgehogs' }], + }) + console.log(response.choices[0].message.content) ``` - > **Notes:** - > - > - We also support the old `chat.completions` API. - > - This works with responses where `stream=True`. - > - If you want to capture LLM events anonymously, **don't** pass a distinct ID to the request. - > - > See our docs on [anonymous vs identified events](/docs/data/anonymous-vs-identified-events.md) to learn more. + > **Note:** If you want to capture LLM events anonymously, omit the `posthog.distinct_id` resource attribute. See our docs on [anonymous vs identified events](/docs/data/anonymous-vs-identified-events.md) to learn more. You can expect captured `$ai_generation` events to have the following properties: @@ -164,7 +145,7 @@ | $ai_total_cost_usd | The total cost in USD (input + output) | | [[...]](/docs/llm-analytics/generations.md#event-properties) | See [full list](/docs/llm-analytics/generations.md#event-properties) of properties | -5. ## Verify traces and generations +4. ## Verify traces and generations Recommended @@ -176,7 +157,7 @@ [Check for LLM events in PostHog](https://app.posthog.com/llm-analytics/generations) -6. 5 +5. 4 ## Next steps diff --git a/skills/instrument-llm-analytics/references/openai.md b/skills/instrument-llm-analytics/references/openai.md index aa0448b..b72607e 100644 --- a/skills/instrument-llm-analytics/references/openai.md +++ b/skills/instrument-llm-analytics/references/openai.md @@ -2,120 +2,110 @@ 1. 1 - ## Install the PostHog SDK + ## Install dependencies Required - Setting up analytics starts with installing the PostHog SDK for your language. LLM analytics works best with our Python and Node SDKs. + **Full working examples** - PostHog AI - - ### Python - - ```bash - pip install posthog - ``` - - ### Node - - ```bash - npm install @posthog/ai posthog-node - ``` - -2. 2 - - ## Install the OpenAI SDK - - Required + See the complete [Node.js](https://github.com/PostHog/posthog-js/tree/main/examples/example-ai-openai) and [Python](https://github.com/PostHog/posthog-python/tree/master/examples/example-ai-openai) examples on GitHub. If you're using the PostHog SDK wrapper instead of OpenTelemetry, see the [Node.js wrapper](https://github.com/PostHog/posthog-js/tree/e08ff1be/examples/example-ai-openai) and [Python wrapper](https://github.com/PostHog/posthog-python/tree/7223c52/examples/example-ai-openai) examples. - Install the OpenAI SDK. The PostHog SDK instruments your LLM calls by wrapping the OpenAI client. The PostHog SDK **does not** proxy your calls. + Install the OpenTelemetry SDK, the OpenAI instrumentation, and the OpenAI SDK. PostHog AI ### Python ```bash - pip install openai + pip install openai opentelemetry-sdk posthog[otel] opentelemetry-instrumentation-openai-v2 ``` ### Node ```bash - npm install openai + npm install openai @posthog/ai @opentelemetry/sdk-node @opentelemetry/resources @opentelemetry/instrumentation-openai ``` -3. 3 +2. 2 - ## Initialize PostHog and OpenAI client + ## Set up OpenTelemetry tracing Required - Initialize PostHog with your project token and host from [your project settings](https://app.posthog.com/settings/project), then pass it to our OpenAI wrapper. + Configure OpenTelemetry to auto-instrument OpenAI SDK calls and export traces to PostHog. PostHog converts `gen_ai.*` spans into `$ai_generation` events automatically. PostHog AI ### Python ```python - from posthog.ai.openai import OpenAI - from posthog import Posthog - posthog = Posthog( - "", - host="https://us.i.posthog.com" - ) - client = OpenAI( - api_key="your_openai_api_key", - posthog_client=posthog # This is an optional parameter. If it is not provided, a default client will be used. + from opentelemetry import trace + from opentelemetry.sdk.trace import TracerProvider + from opentelemetry.sdk.resources import Resource, SERVICE_NAME + from posthog.ai.otel import PostHogSpanProcessor + from opentelemetry.instrumentation.openai_v2 import OpenAIInstrumentor + resource = Resource(attributes={ + SERVICE_NAME: "my-app", + "posthog.distinct_id": "user_123", # optional: identifies the user in PostHog + "foo": "bar", # custom properties are passed through + }) + provider = TracerProvider(resource=resource) + provider.add_span_processor( + PostHogSpanProcessor( + api_key="", + host="https://us.i.posthog.com", + ) ) + trace.set_tracer_provider(provider) + OpenAIInstrumentor().instrument() ``` ### Node ```typescript - import { OpenAI } from '@posthog/ai' - import { PostHog } from 'posthog-node' - const phClient = new PostHog( - '', - { host: 'https://us.i.posthog.com' } - ); - const openai = new OpenAI({ - apiKey: 'your_openai_api_key', - posthog: phClient, - }); - // ... your code here ... - // IMPORTANT: Shutdown the client when you're done to ensure all events are sent - phClient.shutdown() + import { NodeSDK } from '@opentelemetry/sdk-node' + import { resourceFromAttributes } from '@opentelemetry/resources' + import { PostHogSpanProcessor } from '@posthog/ai/otel' + import { OpenAIInstrumentation } from '@opentelemetry/instrumentation-openai' + const sdk = new NodeSDK({ + resource: resourceFromAttributes({ + 'service.name': 'my-app', + 'posthog.distinct_id': 'user_123', // optional: identifies the user in PostHog + foo: 'bar', // custom properties are passed through + }), + spanProcessors: [ + new PostHogSpanProcessor({ + apiKey: '', + host: 'https://us.i.posthog.com', + }), + ], + instrumentations: [new OpenAIInstrumentation()], + }) + sdk.start() ``` - > **Note:** This also works with the `AsyncOpenAI` client. - - **Proxy note** - - These SDKs **do not** proxy your calls. They only fire off an async call to PostHog in the background to send the data. You can also use LLM analytics with other SDKs or our API, but you will need to capture the data in the right format. See the schema in the [manual capture section](/docs/llm-analytics/installation/manual-capture.md) for more details. - -4. 4 +3. 3 ## Call OpenAI LLMs Required - Now, when you use the OpenAI SDK to call LLMs, PostHog automatically captures an `$ai_generation` event. You can enrich the event with additional data such as the trace ID, distinct ID, custom properties, groups, and privacy mode options. + Now, when you use the OpenAI SDK to call OpenAI, PostHog automatically captures `$ai_generation` events via the OpenTelemetry instrumentation. PostHog AI ### Python ```python + import openai + client = openai.OpenAI( + api_key="your_openai_api_key", + ) response = client.responses.create( model="gpt-5-mini", input=[ {"role": "user", "content": "Tell me a fun fact about hedgehogs"} ], - posthog_distinct_id="user_123", # optional - posthog_trace_id="trace_123", # optional - posthog_properties={"conversation_id": "abc123", "paid": True}, # optional - posthog_groups={"company": "company_id_in_your_db"}, # optional - posthog_privacy_mode=False # optional ) print(response.output_text) ``` @@ -123,49 +113,50 @@ ### Node ```typescript - const completion = await openai.responses.create({ - model: "gpt-5-mini", - input: [{ role: "user", content: "Tell me a fun fact about hedgehogs" }], - posthogDistinctId: "user_123", // optional - posthogTraceId: "trace_123", // optional - posthogProperties: { conversation_id: "abc123", paid: true }, // optional - posthogGroups: { company: "company_id_in_your_db" }, // optional - posthogPrivacyMode: false // optional - }); - console.log(completion.output_text) + import OpenAI from 'openai' + const client = new OpenAI({ + apiKey: 'your_openai_api_key', + }) + const response = await client.responses.create({ + model: 'gpt-5-mini', + input: [{ role: 'user', content: 'Tell me a fun fact about hedgehogs' }], + }) + console.log(response.output_text) ``` - > **Notes:** - > - > - We also support the old `chat.completions` API. - > - This works with responses where `stream=True`. - > - If you want to capture LLM events anonymously, **don't** pass a distinct ID to the request. - > - > See our docs on [anonymous vs identified events](/docs/data/anonymous-vs-identified-events.md) to learn more. + > **Note:** If you want to capture LLM events anonymously, omit the `posthog.distinct_id` resource attribute. See our docs on [anonymous vs identified events](/docs/data/anonymous-vs-identified-events.md) to learn more. You can expect captured `$ai_generation` events to have the following properties: -5. 5 +4. 4 ## Capture embeddings Optional - PostHog can also capture embedding generations as `$ai_embedding` events. Just make sure to use the same `posthog.ai.openai` client to do so: + PostHog can also capture embedding generations as `$ai_embedding` events. The OpenTelemetry instrumentation automatically captures these when you use the embeddings API: + + PostHog AI + + ### Python ```python response = client.embeddings.create( input="The quick brown fox", model="text-embedding-3-small", - posthog_distinct_id="user_123", # optional - posthog_trace_id="trace_123", # optional - posthog_properties={"key": "value"} # optional - posthog_groups={"company": "company_id_in_your_db"} # optional - posthog_privacy_mode=False # optional ) ``` -6. ## Verify traces and generations + ### Node + + ```typescript + const response = await client.embeddings.create({ + input: 'The quick brown fox', + model: 'text-embedding-3-small', + }) + ``` + +5. ## Verify traces and generations Recommended @@ -177,7 +168,7 @@ [Check for LLM events in PostHog](https://app.posthog.com/llm-analytics/generations) -7. 6 +6. 5 ## Next steps diff --git a/skills/instrument-llm-analytics/references/openrouter.md b/skills/instrument-llm-analytics/references/openrouter.md index 41ff517..a548874 100644 --- a/skills/instrument-llm-analytics/references/openrouter.md +++ b/skills/instrument-llm-analytics/references/openrouter.md @@ -2,126 +2,112 @@ 1. 1 - ## Install the PostHog SDK + ## Install dependencies Required - **Alternative: OpenRouter Broadcast** + **Full working examples** - OpenRouter also offers a native [Broadcast feature](https://openrouter.ai/docs/guides/features/broadcast/posthog) that can automatically send LLM analytics data to PostHog without requiring SDK instrumentation. This is a simpler option if you don't need the additional customization that our SDK provides. + See the complete [Node.js](https://github.com/PostHog/posthog-js/tree/main/examples/example-ai-openrouter) and [Python](https://github.com/PostHog/posthog-python/tree/master/examples/example-ai-openrouter) examples on GitHub. If you're using the PostHog SDK wrapper instead of OpenTelemetry, see the [Node.js wrapper](https://github.com/PostHog/posthog-js/tree/e08ff1be/examples/example-ai-openrouter) and [Python wrapper](https://github.com/PostHog/posthog-python/tree/7223c52/examples/example-ai-openrouter) examples. - Setting up analytics starts with installing the PostHog SDK for your language. LLM analytics works best with our Python and Node SDKs. + Install the OpenTelemetry SDK, the OpenAI instrumentation, and the OpenAI SDK. PostHog AI ### Python ```bash - pip install posthog + pip install openai opentelemetry-sdk posthog[otel] opentelemetry-instrumentation-openai-v2 ``` ### Node ```bash - npm install @posthog/ai posthog-node + npm install openai @posthog/ai @opentelemetry/sdk-node @opentelemetry/resources @opentelemetry/instrumentation-openai ``` 2. 2 - ## Install the OpenAI SDK + ## Set up OpenTelemetry tracing Required - Install the OpenAI SDK. The PostHog SDK instruments your LLM calls by wrapping the OpenAI client. The PostHog SDK **does not** proxy your calls. - - PostHog AI - - ### Python - - ```bash - pip install openai - ``` - - ### Node - - ```bash - npm install openai - ``` - -3. 3 - - ## Initialize PostHog and OpenAI client - - Required - - We call OpenRouter through the OpenAI client and generate a response. We'll use PostHog's OpenAI provider to capture all the details of the call. Initialize PostHog with your PostHog project token and host from [your project settings](https://app.posthog.com/settings/project), then pass the PostHog client along with the OpenRouter config (the base URL and API key) to our OpenAI wrapper. + Configure OpenTelemetry to auto-instrument OpenAI SDK calls and export traces to PostHog. PostHog converts `gen_ai.*` spans into `$ai_generation` events automatically. PostHog AI ### Python ```python - from posthog.ai.openai import OpenAI - from posthog import Posthog - posthog = Posthog( - "", - host="https://us.i.posthog.com" - ) - client = OpenAI( - base_url="https://openrouter.ai/api/v1", - api_key="", - posthog_client=posthog # This is an optional parameter. If it is not provided, a default client will be used. + from opentelemetry import trace + from opentelemetry.sdk.trace import TracerProvider + from opentelemetry.sdk.resources import Resource, SERVICE_NAME + from posthog.ai.otel import PostHogSpanProcessor + from opentelemetry.instrumentation.openai_v2 import OpenAIInstrumentor + resource = Resource(attributes={ + SERVICE_NAME: "my-app", + "posthog.distinct_id": "user_123", # optional: identifies the user in PostHog + "foo": "bar", # custom properties are passed through + }) + provider = TracerProvider(resource=resource) + provider.add_span_processor( + PostHogSpanProcessor( + api_key="", + host="https://us.i.posthog.com", + ) ) + trace.set_tracer_provider(provider) + OpenAIInstrumentor().instrument() ``` ### Node ```typescript - import { OpenAI } from '@posthog/ai' - import { PostHog } from 'posthog-node' - const phClient = new PostHog( - '', - { host: 'https://us.i.posthog.com' } - ); - const openai = new OpenAI({ - baseURL: 'https://openrouter.ai/api/v1', - apiKey: '', - posthog: phClient, - }); - // ... your code here ... - // IMPORTANT: Shutdown the client when you're done to ensure all events are sent - phClient.shutdown() + import { NodeSDK } from '@opentelemetry/sdk-node' + import { resourceFromAttributes } from '@opentelemetry/resources' + import { PostHogSpanProcessor } from '@posthog/ai/otel' + import { OpenAIInstrumentation } from '@opentelemetry/instrumentation-openai' + const sdk = new NodeSDK({ + resource: resourceFromAttributes({ + 'service.name': 'my-app', + 'posthog.distinct_id': 'user_123', // optional: identifies the user in PostHog + foo: 'bar', // custom properties are passed through + }), + spanProcessors: [ + new PostHogSpanProcessor({ + apiKey: '', + host: 'https://us.i.posthog.com', + }), + ], + instrumentations: [new OpenAIInstrumentation()], + }) + sdk.start() ``` - > **Note:** This also works with the `AsyncOpenAI` client. - - **Proxy note** - - These SDKs **do not** proxy your calls. They only fire off an async call to PostHog in the background to send the data. You can also use LLM analytics with other SDKs or our API, but you will need to capture the data in the right format. See the schema in the [manual capture section](/docs/llm-analytics/installation/manual-capture.md) for more details. - -4. 4 +3. 3 ## Call OpenRouter Required - Now, when you call OpenRouter with the OpenAI SDK, PostHog automatically captures an `$ai_generation` event. You can also capture or modify additional properties with the distinct ID, trace ID, properties, groups, and privacy mode parameters. + Now, when you use the OpenAI SDK to call OpenRouter, PostHog automatically captures `$ai_generation` events via the OpenTelemetry instrumentation. PostHog AI ### Python ```python - response = client.responses.create( + import openai + client = openai.OpenAI( + base_url="https://openrouter.ai/api/v1", + api_key="", + ) + response = client.chat.completions.create( model="gpt-5-mini", - input=[ + max_completion_tokens=1024, + messages=[ {"role": "user", "content": "Tell me a fun fact about hedgehogs"} ], - posthog_distinct_id="user_123", # optional - posthog_trace_id="trace_123", # optional - posthog_properties={"conversation_id": "abc123", "paid": True}, # optional - posthog_groups={"company": "company_id_in_your_db"}, # optional - posthog_privacy_mode=False # optional ) print(response.choices[0].message.content) ``` @@ -129,25 +115,20 @@ ### Node ```typescript - const completion = await openai.responses.create({ - model: "gpt-5-mini", - input: [{ role: "user", content: "Tell me a fun fact about hedgehogs" }], - posthogDistinctId: "user_123", // optional - posthogTraceId: "trace_123", // optional - posthogProperties: { conversation_id: "abc123", paid: true }, // optional - posthogGroups: { company: "company_id_in_your_db" }, // optional - posthogPrivacyMode: false // optional - }); - console.log(completion.choices[0].message.content) + import OpenAI from 'openai' + const client = new OpenAI({ + baseURL: 'https://openrouter.ai/api/v1', + apiKey: '', + }) + const response = await client.chat.completions.create({ + model: 'gpt-5-mini', + max_completion_tokens: 1024, + messages: [{ role: 'user', content: 'Tell me a fun fact about hedgehogs' }], + }) + console.log(response.choices[0].message.content) ``` - > **Notes:** - > - > - We also support the old `chat.completions` API. - > - This works with responses where `stream=True`. - > - If you want to capture LLM events anonymously, **don't** pass a distinct ID to the request. - > - > See our docs on [anonymous vs identified events](/docs/data/anonymous-vs-identified-events.md) to learn more. + > **Note:** If you want to capture LLM events anonymously, omit the `posthog.distinct_id` resource attribute. See our docs on [anonymous vs identified events](/docs/data/anonymous-vs-identified-events.md) to learn more. You can expect captured `$ai_generation` events to have the following properties: @@ -164,7 +145,7 @@ | $ai_total_cost_usd | The total cost in USD (input + output) | | [[...]](/docs/llm-analytics/generations.md#event-properties) | See [full list](/docs/llm-analytics/generations.md#event-properties) of properties | -5. ## Verify traces and generations +4. ## Verify traces and generations Recommended @@ -176,7 +157,7 @@ [Check for LLM events in PostHog](https://app.posthog.com/llm-analytics/generations) -6. 5 +5. 4 ## Next steps diff --git a/skills/instrument-llm-analytics/references/perplexity.md b/skills/instrument-llm-analytics/references/perplexity.md index 10248e8..6d460e2 100644 --- a/skills/instrument-llm-analytics/references/perplexity.md +++ b/skills/instrument-llm-analytics/references/perplexity.md @@ -2,122 +2,112 @@ 1. 1 - ## Install the PostHog SDK + ## Install dependencies Required - Setting up analytics starts with installing the PostHog SDK for your language. LLM analytics works best with our Python and Node SDKs. + **Full working examples** - PostHog AI - - ### Python - - ```bash - pip install posthog - ``` - - ### Node - - ```bash - npm install @posthog/ai posthog-node - ``` - -2. 2 - - ## Install the OpenAI SDK - - Required + See the complete [Node.js](https://github.com/PostHog/posthog-js/tree/main/examples/example-ai-perplexity) and [Python](https://github.com/PostHog/posthog-python/tree/master/examples/example-ai-perplexity) examples on GitHub. If you're using the PostHog SDK wrapper instead of OpenTelemetry, see the [Node.js wrapper](https://github.com/PostHog/posthog-js/tree/e08ff1be/examples/example-ai-perplexity) and [Python wrapper](https://github.com/PostHog/posthog-python/tree/7223c52/examples/example-ai-perplexity) examples. - Install the OpenAI SDK. The PostHog SDK instruments your LLM calls by wrapping the OpenAI client. The PostHog SDK **does not** proxy your calls. + Install the OpenTelemetry SDK, the OpenAI instrumentation, and the OpenAI SDK. PostHog AI ### Python ```bash - pip install openai + pip install openai opentelemetry-sdk posthog[otel] opentelemetry-instrumentation-openai-v2 ``` ### Node ```bash - npm install openai + npm install openai @posthog/ai @opentelemetry/sdk-node @opentelemetry/resources @opentelemetry/instrumentation-openai ``` -3. 3 +2. 2 - ## Initialize PostHog and OpenAI client + ## Set up OpenTelemetry tracing Required - We call Perplexity through the OpenAI client and generate a response. We'll use PostHog's OpenAI provider to capture all the details of the call. Initialize PostHog with your PostHog project token and host from [your project settings](https://app.posthog.com/settings/project), then pass the PostHog client along with the Perplexity config (the base URL and API key) to our OpenAI wrapper. + Configure OpenTelemetry to auto-instrument OpenAI SDK calls and export traces to PostHog. PostHog converts `gen_ai.*` spans into `$ai_generation` events automatically. PostHog AI ### Python ```python - from posthog.ai.openai import OpenAI - from posthog import Posthog - posthog = Posthog( - "", - host="https://us.i.posthog.com" - ) - client = OpenAI( - base_url="https://api.perplexity.ai", - api_key="", - posthog_client=posthog + from opentelemetry import trace + from opentelemetry.sdk.trace import TracerProvider + from opentelemetry.sdk.resources import Resource, SERVICE_NAME + from posthog.ai.otel import PostHogSpanProcessor + from opentelemetry.instrumentation.openai_v2 import OpenAIInstrumentor + resource = Resource(attributes={ + SERVICE_NAME: "my-app", + "posthog.distinct_id": "user_123", # optional: identifies the user in PostHog + "foo": "bar", # custom properties are passed through + }) + provider = TracerProvider(resource=resource) + provider.add_span_processor( + PostHogSpanProcessor( + api_key="", + host="https://us.i.posthog.com", + ) ) + trace.set_tracer_provider(provider) + OpenAIInstrumentor().instrument() ``` ### Node ```typescript - import { OpenAI } from '@posthog/ai' - import { PostHog } from 'posthog-node' - const phClient = new PostHog( - '', - { host: 'https://us.i.posthog.com' } - ); - const openai = new OpenAI({ - baseURL: 'https://api.perplexity.ai', - apiKey: '', - posthog: phClient, - }); - // ... your code here ... - // IMPORTANT: Shutdown the client when you're done to ensure all events are sent - phClient.shutdown() + import { NodeSDK } from '@opentelemetry/sdk-node' + import { resourceFromAttributes } from '@opentelemetry/resources' + import { PostHogSpanProcessor } from '@posthog/ai/otel' + import { OpenAIInstrumentation } from '@opentelemetry/instrumentation-openai' + const sdk = new NodeSDK({ + resource: resourceFromAttributes({ + 'service.name': 'my-app', + 'posthog.distinct_id': 'user_123', // optional: identifies the user in PostHog + foo: 'bar', // custom properties are passed through + }), + spanProcessors: [ + new PostHogSpanProcessor({ + apiKey: '', + host: 'https://us.i.posthog.com', + }), + ], + instrumentations: [new OpenAIInstrumentation()], + }) + sdk.start() ``` - > **Note:** This also works with the `AsyncOpenAI` client. - - **Proxy note** - - These SDKs **do not** proxy your calls. They only fire off an async call to PostHog in the background to send the data. You can also use LLM analytics with other SDKs or our API, but you will need to capture the data in the right format. See the schema in the [manual capture section](/docs/llm-analytics/installation/manual-capture.md) for more details. - -4. 4 +3. 3 ## Call Perplexity Required - Now, when you call Perplexity with the OpenAI SDK, PostHog automatically captures an `$ai_generation` event. You can also capture or modify additional properties with the distinct ID, trace ID, properties, groups, and privacy mode parameters. + Now, when you use the OpenAI SDK to call Perplexity, PostHog automatically captures `$ai_generation` events via the OpenTelemetry instrumentation. PostHog AI ### Python ```python + import openai + client = openai.OpenAI( + base_url="https://api.perplexity.ai", + api_key="", + ) response = client.chat.completions.create( model="sonar", + max_completion_tokens=1024, messages=[ {"role": "user", "content": "Tell me a fun fact about hedgehogs"} ], - posthog_distinct_id="user_123", # optional - posthog_trace_id="trace_123", # optional - posthog_properties={"conversation_id": "abc123", "paid": True}, # optional - posthog_groups={"company": "company_id_in_your_db"}, # optional - posthog_privacy_mode=False # optional ) print(response.choices[0].message.content) ``` @@ -125,25 +115,20 @@ ### Node ```typescript - const completion = await openai.chat.completions.create({ - model: "sonar", - messages: [{ role: "user", content: "Tell me a fun fact about hedgehogs" }], - posthogDistinctId: "user_123", // optional - posthogTraceId: "trace_123", // optional - posthogProperties: { conversation_id: "abc123", paid: true }, // optional - posthogGroups: { company: "company_id_in_your_db" }, // optional - posthogPrivacyMode: false // optional - }); - console.log(completion.choices[0].message.content) + import OpenAI from 'openai' + const client = new OpenAI({ + baseURL: 'https://api.perplexity.ai', + apiKey: '', + }) + const response = await client.chat.completions.create({ + model: 'sonar', + max_completion_tokens: 1024, + messages: [{ role: 'user', content: 'Tell me a fun fact about hedgehogs' }], + }) + console.log(response.choices[0].message.content) ``` - > **Notes:** - > - > - We also support the old `chat.completions` API. - > - This works with responses where `stream=True`. - > - If you want to capture LLM events anonymously, **don't** pass a distinct ID to the request. - > - > See our docs on [anonymous vs identified events](/docs/data/anonymous-vs-identified-events.md) to learn more. + > **Note:** If you want to capture LLM events anonymously, omit the `posthog.distinct_id` resource attribute. See our docs on [anonymous vs identified events](/docs/data/anonymous-vs-identified-events.md) to learn more. You can expect captured `$ai_generation` events to have the following properties: @@ -160,7 +145,7 @@ | $ai_total_cost_usd | The total cost in USD (input + output) | | [[...]](/docs/llm-analytics/generations.md#event-properties) | See [full list](/docs/llm-analytics/generations.md#event-properties) of properties | -5. ## Verify traces and generations +4. ## Verify traces and generations Recommended @@ -172,7 +157,7 @@ [Check for LLM events in PostHog](https://app.posthog.com/llm-analytics/generations) -6. 5 +5. 4 ## Next steps diff --git a/skills/instrument-llm-analytics/references/portkey.md b/skills/instrument-llm-analytics/references/portkey.md index 29c5360..681ac2c 100644 --- a/skills/instrument-llm-analytics/references/portkey.md +++ b/skills/instrument-llm-analytics/references/portkey.md @@ -2,128 +2,116 @@ 1. 1 - ## Install the PostHog SDK + ## Install dependencies Required + **Full working examples** + + See the complete [Node.js](https://github.com/PostHog/posthog-js/tree/main/examples/example-ai-portkey) and [Python](https://github.com/PostHog/posthog-python/tree/master/examples/example-ai-portkey) examples on GitHub. If you're using the PostHog SDK wrapper instead of OpenTelemetry, see the [Node.js wrapper](https://github.com/PostHog/posthog-js/tree/e08ff1be/examples/example-ai-portkey) and [Python wrapper](https://github.com/PostHog/posthog-python/tree/7223c52/examples/example-ai-portkey) examples. + **About Portkey** Portkey acts as an AI gateway that routes requests to 250+ LLM providers. The model string format (`@integration-slug/model`) determines which provider to use, where the slug is the name you chose when setting up the integration in Portkey. - Setting up analytics starts with installing the PostHog SDK for your language. LLM analytics works best with our Python and Node SDKs. + Install the OpenTelemetry SDK, the OpenAI instrumentation, and the OpenAI SDK. PostHog AI ### Python ```bash - pip install posthog + pip install openai portkey-ai opentelemetry-sdk posthog[otel] opentelemetry-instrumentation-openai-v2 ``` ### Node ```bash - npm install @posthog/ai posthog-node + npm install openai portkey-ai @posthog/ai @opentelemetry/sdk-node @opentelemetry/resources @opentelemetry/instrumentation-openai ``` 2. 2 - ## Install the OpenAI and Portkey SDKs + ## Set up OpenTelemetry tracing Required - Install the OpenAI and Portkey SDKs. The PostHog SDK instruments your LLM calls by wrapping the OpenAI client. The PostHog SDK **does not** proxy your calls. + Configure OpenTelemetry to auto-instrument OpenAI SDK calls and export traces to PostHog. PostHog converts `gen_ai.*` spans into `$ai_generation` events automatically. PostHog AI ### Python - ```bash - pip install openai portkey-ai + ```python + from opentelemetry import trace + from opentelemetry.sdk.trace import TracerProvider + from opentelemetry.sdk.resources import Resource, SERVICE_NAME + from posthog.ai.otel import PostHogSpanProcessor + from opentelemetry.instrumentation.openai_v2 import OpenAIInstrumentor + resource = Resource(attributes={ + SERVICE_NAME: "my-app", + "posthog.distinct_id": "user_123", # optional: identifies the user in PostHog + "foo": "bar", # custom properties are passed through + }) + provider = TracerProvider(resource=resource) + provider.add_span_processor( + PostHogSpanProcessor( + api_key="", + host="https://us.i.posthog.com", + ) + ) + trace.set_tracer_provider(provider) + OpenAIInstrumentor().instrument() ``` ### Node - ```bash - npm install openai portkey-ai + ```typescript + import { NodeSDK } from '@opentelemetry/sdk-node' + import { resourceFromAttributes } from '@opentelemetry/resources' + import { PostHogSpanProcessor } from '@posthog/ai/otel' + import { OpenAIInstrumentation } from '@opentelemetry/instrumentation-openai' + const sdk = new NodeSDK({ + resource: resourceFromAttributes({ + 'service.name': 'my-app', + 'posthog.distinct_id': 'user_123', // optional: identifies the user in PostHog + foo: 'bar', // custom properties are passed through + }), + spanProcessors: [ + new PostHogSpanProcessor({ + apiKey: '', + host: 'https://us.i.posthog.com', + }), + ], + instrumentations: [new OpenAIInstrumentation()], + }) + sdk.start() ``` 3. 3 - ## Initialize PostHog and Portkey-routed client + ## Call Portkey Required - Initialize PostHog with your project token and host from [your project settings](https://app.posthog.com/settings/project), then pass it along with the Portkey gateway URL and your Portkey API key to our OpenAI wrapper. + Now, when you call Portkey with the OpenAI SDK, PostHog automatically captures `$ai_generation` events via the OpenTelemetry instrumentation. PostHog AI ### Python ```python - from posthog.ai.openai import OpenAI - from posthog import Posthog + import openai from portkey_ai import PORTKEY_GATEWAY_URL - posthog = Posthog( - "", - host="https://us.i.posthog.com" - ) - client = OpenAI( + client = openai.OpenAI( base_url=PORTKEY_GATEWAY_URL, api_key="", - posthog_client=posthog ) - ``` - - ### Node - - ```typescript - import { OpenAI } from '@posthog/ai' - import { PostHog } from 'posthog-node' - import { PORTKEY_GATEWAY_URL } from 'portkey-ai' - const phClient = new PostHog( - '', - { host: 'https://us.i.posthog.com' } - ); - const openai = new OpenAI({ - baseURL: PORTKEY_GATEWAY_URL, - apiKey: '', - posthog: phClient, - }); - // ... your code here ... - // IMPORTANT: Shutdown the client when you're done to ensure all events are sent - phClient.shutdown() - ``` - - > **Note:** This also works with the `AsyncOpenAI` client. - - **Proxy note** - - These SDKs **do not** proxy your calls. They only fire off an async call to PostHog in the background to send the data. You can also use LLM analytics with other SDKs or our API, but you will need to capture the data in the right format. See the schema in the [manual capture section](/docs/llm-analytics/installation/manual-capture.md) for more details. - -4. 4 - - ## Call Portkey - - Required - - Now, when you call Portkey with the OpenAI SDK, PostHog automatically captures an `$ai_generation` event. You can also capture or modify additional properties with the distinct ID, trace ID, properties, groups, and privacy mode parameters. - - PostHog AI - - ### Python - - ```python response = client.chat.completions.create( model="@/gpt-5-mini", messages=[ {"role": "user", "content": "Tell me a fun fact about hedgehogs"} ], - posthog_distinct_id="user_123", # optional - posthog_trace_id="trace_123", # optional - posthog_properties={"conversation_id": "abc123", "paid": True}, # optional - posthog_groups={"company": "company_id_in_your_db"}, # optional - posthog_privacy_mode=False # optional ) print(response.choices[0].message.content) ``` @@ -131,25 +119,20 @@ ### Node ```typescript - const completion = await openai.chat.completions.create({ - model: "@/gpt-5-mini", - messages: [{ role: "user", content: "Tell me a fun fact about hedgehogs" }], - posthogDistinctId: "user_123", // optional - posthogTraceId: "trace_123", // optional - posthogProperties: { conversation_id: "abc123", paid: true }, // optional - posthogGroups: { company: "company_id_in_your_db" }, // optional - posthogPrivacyMode: false // optional - }); - console.log(completion.choices[0].message.content) + import OpenAI from 'openai' + import { PORTKEY_GATEWAY_URL } from 'portkey-ai' + const client = new OpenAI({ + baseURL: PORTKEY_GATEWAY_URL, + apiKey: '', + }) + const response = await client.chat.completions.create({ + model: '@/gpt-5-mini', + messages: [{ role: 'user', content: 'Tell me a fun fact about hedgehogs' }], + }) + console.log(response.choices[0].message.content) ``` - > **Notes:** - > - > - This works with responses where `stream=True`. - > - If you want to capture LLM events anonymously, **don't** pass a distinct ID to the request. - > - The `@` prefix is the name you chose when setting up the integration in your [Portkey dashboard](https://app.portkey.ai/). - > - > See our docs on [anonymous vs identified events](/docs/data/anonymous-vs-identified-events.md) to learn more. + > **Note:** If you want to capture LLM events anonymously, omit the `posthog.distinct_id` resource attribute. See our docs on [anonymous vs identified events](/docs/data/anonymous-vs-identified-events.md) to learn more. You can expect captured `$ai_generation` events to have the following properties: @@ -166,7 +149,7 @@ | $ai_total_cost_usd | The total cost in USD (input + output) | | [[...]](/docs/llm-analytics/generations.md#event-properties) | See [full list](/docs/llm-analytics/generations.md#event-properties) of properties | -5. ## Verify traces and generations +4. ## Verify traces and generations Recommended @@ -178,7 +161,7 @@ [Check for LLM events in PostHog](https://app.posthog.com/llm-analytics/generations) -6. 5 +5. 4 ## Next steps diff --git a/skills/instrument-llm-analytics/references/pydantic-ai.md b/skills/instrument-llm-analytics/references/pydantic-ai.md index c10f842..7b85e1d 100644 --- a/skills/instrument-llm-analytics/references/pydantic-ai.md +++ b/skills/instrument-llm-analytics/references/pydantic-ai.md @@ -2,81 +2,71 @@ 1. 1 - ## Install the PostHog SDK + ## Install dependencies Required - Setting up analytics starts with installing the PostHog SDK. The Pydantic AI integration uses PostHog's OpenAI wrapper. + **Full working examples** - ```bash - pip install posthog - ``` + See the complete [Python example](https://github.com/PostHog/posthog-python/tree/master/examples/example-ai-pydantic-ai) on GitHub. If you're using the PostHog SDK wrapper instead of OpenTelemetry, see the [Python wrapper example](https://github.com/PostHog/posthog-python/tree/7223c52/examples/example-ai-pydantic-ai). -2. 2 - - ## Install Pydantic AI - - Required - - Install Pydantic AI with OpenAI support. PostHog instruments your LLM calls by wrapping the OpenAI client that Pydantic AI uses. + Install the OpenTelemetry SDK and Pydantic AI. ```bash - pip install 'pydantic-ai[openai]' + pip install "pydantic-ai[openai]" opentelemetry-sdk posthog[otel] ``` -3. 3 +2. 2 - ## Initialize PostHog and Pydantic AI + ## Set up OpenTelemetry tracing Required - Initialize PostHog with your project token and host from [your project settings](https://app.posthog.com/settings/project), then create a PostHog `AsyncOpenAI` wrapper, pass it to an `OpenAIProvider`, and use that with Pydantic AI's `OpenAIChatModel`. + Configure OpenTelemetry to export traces to PostHog and enable Pydantic AI's built-in OTel instrumentation. PostHog converts `gen_ai.*` spans into `$ai_generation` events automatically. ```python + import os + from opentelemetry import trace + from opentelemetry.sdk.trace import TracerProvider + from opentelemetry.sdk.resources import Resource, SERVICE_NAME + from posthog.ai.otel import PostHogSpanProcessor from pydantic_ai import Agent - from pydantic_ai.models.openai import OpenAIChatModel - from pydantic_ai.providers.openai import OpenAIProvider - from posthog.ai.openai import AsyncOpenAI - from posthog import Posthog - posthog = Posthog( - "", - host="https://us.i.posthog.com" - ) - openai_client = AsyncOpenAI( - api_key="your_openai_api_key", - posthog_client=posthog - ) - provider = OpenAIProvider(openai_client=openai_client) - model = OpenAIChatModel( - "gpt-5-mini", - provider=provider + resource = Resource(attributes={ + SERVICE_NAME: "my-app", + "posthog.distinct_id": "user_123", # optional: identifies the user in PostHog + "foo": "bar", # custom properties are passed through + }) + provider = TracerProvider(resource=resource) + provider.add_span_processor( + PostHogSpanProcessor( + api_key="", + host="https://us.i.posthog.com", + ) ) + trace.set_tracer_provider(provider) + # Enable automatic OTel instrumentation for all Pydantic AI agents + Agent.instrument_all() ``` - **How this works** - - PostHog's `AsyncOpenAI` wrapper is a proper subclass of `openai.AsyncOpenAI`, so it works directly as the client for Pydantic AI's `OpenAIProvider`. PostHog captures `$ai_generation` events automatically without proxying your calls. - -4. 4 +3. 3 ## Run your agent Required - Create a Pydantic AI agent with the model and run it. PostHog automatically captures an `$ai_generation` event for each LLM call. + Create a Pydantic AI agent and run it. PostHog automatically captures an `$ai_generation` event for each LLM call via the OTel instrumentation. ```python - agent = Agent( - model, - system_prompt="You are a helpful assistant.", - ) - result = agent.run_sync( - "Tell me a fun fact about hedgehogs.", - # Pass PostHog metadata via the OpenAI client's extra params - ) + from pydantic_ai import Agent + from pydantic_ai.models.openai import OpenAIModel + model = OpenAIModel("gpt-4o-mini") + agent = Agent(model, system_prompt="You are a helpful assistant.") + result = agent.run_sync("Tell me a fun fact about hedgehogs.") print(result.output) ``` + > **Note:** If you want to capture LLM events anonymously, omit the `posthog.distinct_id` resource attribute. See our docs on [anonymous vs identified events](/docs/data/anonymous-vs-identified-events.md) to learn more. + You can expect captured `$ai_generation` events to have the following properties: | Property | Description | @@ -92,7 +82,7 @@ | $ai_total_cost_usd | The total cost in USD (input + output) | | [[...]](/docs/llm-analytics/generations.md#event-properties) | See [full list](/docs/llm-analytics/generations.md#event-properties) of properties | -5. ## Verify traces and generations +4. ## Verify traces and generations Recommended @@ -104,7 +94,7 @@ [Check for LLM events in PostHog](https://app.posthog.com/llm-analytics/generations) -6. 5 +5. 4 ## Next steps diff --git a/skills/instrument-llm-analytics/references/semantic-kernel.md b/skills/instrument-llm-analytics/references/semantic-kernel.md index 771aaa7..e82e898 100644 --- a/skills/instrument-llm-analytics/references/semantic-kernel.md +++ b/skills/instrument-llm-analytics/references/semantic-kernel.md @@ -2,78 +2,77 @@ 1. 1 - ## Install the PostHog SDK + ## Install dependencies Required - Setting up analytics starts with installing the PostHog SDK. The Semantic Kernel integration uses PostHog's OpenAI wrapper. + **Full working examples** - ```bash - pip install posthog - ``` - -2. 2 - - ## Install Semantic Kernel + See the complete [Python example](https://github.com/PostHog/posthog-python/tree/master/examples/example-ai-semantic-kernel) on GitHub. If you're using the PostHog SDK wrapper instead of OpenTelemetry, see the [Python wrapper example](https://github.com/PostHog/posthog-python/tree/7223c52/examples/example-ai-semantic-kernel). - Required - - Install Semantic Kernel with OpenAI support. PostHog instruments your LLM calls by wrapping the OpenAI client that Semantic Kernel uses under the hood. + Install the OpenTelemetry SDK, the OpenAI instrumentation, and Semantic Kernel. ```bash - pip install semantic-kernel + pip install semantic-kernel openai opentelemetry-sdk posthog[otel] opentelemetry-instrumentation-openai-v2 ``` -3. 3 +2. 2 - ## Initialize PostHog and Semantic Kernel + ## Set up OpenTelemetry tracing Required - Initialize PostHog with your project token and host from [your project settings](https://app.posthog.com/settings/project), then create a PostHog `AsyncOpenAI` wrapper and pass it to Semantic Kernel's `OpenAIChatCompletion` service. + Configure OpenTelemetry to auto-instrument OpenAI SDK calls and export traces to PostHog. PostHog converts `gen_ai.*` spans into `$ai_generation` events automatically. ```python - from semantic_kernel import Kernel - from semantic_kernel.connectors.ai.open_ai import OpenAIChatCompletion - from posthog.ai.openai import AsyncOpenAI - from posthog import Posthog - posthog = Posthog( - "", - host="https://us.i.posthog.com" - ) - openai_client = AsyncOpenAI( - api_key="your_openai_api_key", - posthog_client=posthog - ) - kernel = Kernel() - kernel.add_service( - OpenAIChatCompletion( - ai_model_id="gpt-5-mini", - async_client=openai_client, + from opentelemetry import trace + from opentelemetry.sdk.trace import TracerProvider + from opentelemetry.sdk.resources import Resource, SERVICE_NAME + from posthog.ai.otel import PostHogSpanProcessor + from opentelemetry.instrumentation.openai_v2 import OpenAIInstrumentor + resource = Resource(attributes={ + SERVICE_NAME: "my-app", + "posthog.distinct_id": "user_123", # optional: identifies the user in PostHog + "foo": "bar", # custom properties are passed through + }) + provider = TracerProvider(resource=resource) + provider.add_span_processor( + PostHogSpanProcessor( + api_key="", + host="https://us.i.posthog.com", ) ) + trace.set_tracer_provider(provider) + OpenAIInstrumentor().instrument() ``` - **How this works** - - PostHog's `AsyncOpenAI` wrapper is a proper subclass of `openai.AsyncOpenAI`, so it works directly as the `async_client` parameter in Semantic Kernel's `OpenAIChatCompletion`. PostHog captures `$ai_generation` events automatically without proxying your calls. - -4. 4 +3. 3 - ## Run your kernel function + ## Run your kernel Required - Use Semantic Kernel as normal. PostHog automatically captures an `$ai_generation` event for each LLM call made through the wrapped client. + Use Semantic Kernel as normal. PostHog automatically captures an `$ai_generation` event for each LLM call made through the OpenAI SDK that Semantic Kernel uses internally. ```python import asyncio + from semantic_kernel import Kernel + from semantic_kernel.connectors.ai.open_ai import OpenAIChatCompletion async def main(): - result = await kernel.invoke_prompt("Tell me a fun fact about hedgehogs.") + kernel = Kernel() + kernel.add_service( + OpenAIChatCompletion( + ai_model_id="gpt-4o-mini", + api_key="your_openai_api_key", + ) + ) + result = await kernel.invoke_prompt("Tell me a fun fact about hedgehogs") print(result) asyncio.run(main()) ``` + > **Note:** If you want to capture LLM events anonymously, omit the `posthog.distinct_id` resource attribute. See our docs on [anonymous vs identified events](/docs/data/anonymous-vs-identified-events.md) to learn more. + You can expect captured `$ai_generation` events to have the following properties: | Property | Description | @@ -89,7 +88,7 @@ | $ai_total_cost_usd | The total cost in USD (input + output) | | [[...]](/docs/llm-analytics/generations.md#event-properties) | See [full list](/docs/llm-analytics/generations.md#event-properties) of properties | -5. ## Verify traces and generations +4. ## Verify traces and generations Recommended @@ -101,7 +100,7 @@ [Check for LLM events in PostHog](https://app.posthog.com/llm-analytics/generations) -6. 5 +5. 4 ## Next steps diff --git a/skills/instrument-llm-analytics/references/smolagents.md b/skills/instrument-llm-analytics/references/smolagents.md index 7eaa5b4..3b7c20c 100644 --- a/skills/instrument-llm-analytics/references/smolagents.md +++ b/skills/instrument-llm-analytics/references/smolagents.md @@ -2,77 +2,69 @@ 1. 1 - ## Install the PostHog SDK + ## Install dependencies Required - Setting up analytics starts with installing the PostHog SDK. The smolagents integration uses PostHog's OpenAI wrapper. + **Full working examples** - ```bash - pip install posthog - ``` + See the complete [Python example](https://github.com/PostHog/posthog-python/tree/master/examples/example-ai-smolagents) on GitHub. If you're using the PostHog SDK wrapper instead of OpenTelemetry, see the [Python wrapper example](https://github.com/PostHog/posthog-python/tree/7223c52/examples/example-ai-smolagents). -2. 2 - - ## Install smolagents and OpenAI - - Required - - Install smolagents and the OpenAI SDK. PostHog instruments your LLM calls by wrapping the OpenAI client, which you can pass to smolagents' `OpenAIServerModel`. + Install the OpenTelemetry SDK, the OpenAI instrumentation, and smolagents. ```bash - pip install smolagents openai + pip install smolagents openai opentelemetry-sdk posthog[otel] opentelemetry-instrumentation-openai-v2 ``` -3. 3 +2. 2 - ## Initialize PostHog and smolagents + ## Set up OpenTelemetry tracing Required - Initialize PostHog with your project token and host from [your project settings](https://app.posthog.com/settings/project), then create a PostHog OpenAI wrapper and pass it to smolagents' `OpenAIServerModel`. + Configure OpenTelemetry to auto-instrument OpenAI SDK calls and export traces to PostHog. PostHog converts `gen_ai.*` spans into `$ai_generation` events automatically. ```python - from smolagents import CodeAgent, OpenAIServerModel - from posthog.ai.openai import OpenAI - from posthog import Posthog - posthog = Posthog( - "", - host="https://us.i.posthog.com" - ) - openai_client = OpenAI( - api_key="your_openai_api_key", - posthog_client=posthog - ) - model = OpenAIServerModel( - model_id="gpt-5-mini", - client=openai_client, + from opentelemetry import trace + from opentelemetry.sdk.trace import TracerProvider + from opentelemetry.sdk.resources import Resource, SERVICE_NAME + from posthog.ai.otel import PostHogSpanProcessor + from opentelemetry.instrumentation.openai_v2 import OpenAIInstrumentor + resource = Resource(attributes={ + SERVICE_NAME: "my-app", + "posthog.distinct_id": "user_123", # optional: identifies the user in PostHog + "foo": "bar", # custom properties are passed through + }) + provider = TracerProvider(resource=resource) + provider.add_span_processor( + PostHogSpanProcessor( + api_key="", + host="https://us.i.posthog.com", + ) ) + trace.set_tracer_provider(provider) + OpenAIInstrumentor().instrument() ``` - **How this works** - - PostHog's `OpenAI` wrapper is a drop-in replacement for `openai.OpenAI`. By passing it as the `client` to `OpenAIServerModel`, all LLM calls made by smolagents are automatically captured as `$ai_generation` events. - -4. 4 +3. 3 ## Run your agent Required - Use smolagents as normal. PostHog automatically captures an `$ai_generation` event for each LLM call made through the wrapped OpenAI client. + Use smolagents as normal. PostHog automatically captures an `$ai_generation` event for each LLM call made through the OpenAI SDK that smolagents uses internally. ```python - agent = CodeAgent( - tools=[], - model=model, - ) - result = agent.run( - "What is a fun fact about hedgehogs?" - ) + import os + from smolagents import CodeAgent, LiteLLMModel + model = LiteLLMModel(model_id="gpt-4o-mini", api_key=os.environ["OPENAI_API_KEY"]) + agent = CodeAgent(tools=[], model=model) + result = agent.run("Tell me a fun fact about hedgehogs") print(result) ``` + > **Note:** If you want to capture LLM events anonymously, omit the `posthog.distinct_id` resource attribute. See our docs on [anonymous vs identified events](/docs/data/anonymous-vs-identified-events.md) to learn more. + You can expect captured `$ai_generation` events to have the following properties: | Property | Description | @@ -88,7 +80,7 @@ | $ai_total_cost_usd | The total cost in USD (input + output) | | [[...]](/docs/llm-analytics/generations.md#event-properties) | See [full list](/docs/llm-analytics/generations.md#event-properties) of properties | -5. ## Verify traces and generations +4. ## Verify traces and generations Recommended @@ -100,7 +92,7 @@ [Check for LLM events in PostHog](https://app.posthog.com/llm-analytics/generations) -6. 5 +5. 4 ## Next steps diff --git a/skills/instrument-llm-analytics/references/together-ai.md b/skills/instrument-llm-analytics/references/together-ai.md index 1a10a4e..6956523 100644 --- a/skills/instrument-llm-analytics/references/together-ai.md +++ b/skills/instrument-llm-analytics/references/together-ai.md @@ -2,122 +2,112 @@ 1. 1 - ## Install the PostHog SDK + ## Install dependencies Required - Setting up analytics starts with installing the PostHog SDK for your language. LLM analytics works best with our Python and Node SDKs. + **Full working examples** - PostHog AI - - ### Python - - ```bash - pip install posthog - ``` - - ### Node - - ```bash - npm install @posthog/ai posthog-node - ``` - -2. 2 - - ## Install the OpenAI SDK - - Required + See the complete [Node.js](https://github.com/PostHog/posthog-js/tree/main/examples/example-ai-together-ai) and [Python](https://github.com/PostHog/posthog-python/tree/master/examples/example-ai-together-ai) examples on GitHub. If you're using the PostHog SDK wrapper instead of OpenTelemetry, see the [Node.js wrapper](https://github.com/PostHog/posthog-js/tree/e08ff1be/examples/example-ai-together-ai) and [Python wrapper](https://github.com/PostHog/posthog-python/tree/7223c52/examples/example-ai-together-ai) examples. - Install the OpenAI SDK. The PostHog SDK instruments your LLM calls by wrapping the OpenAI client. The PostHog SDK **does not** proxy your calls. + Install the OpenTelemetry SDK, the OpenAI instrumentation, and the OpenAI SDK. PostHog AI ### Python ```bash - pip install openai + pip install openai opentelemetry-sdk posthog[otel] opentelemetry-instrumentation-openai-v2 ``` ### Node ```bash - npm install openai + npm install openai @posthog/ai @opentelemetry/sdk-node @opentelemetry/resources @opentelemetry/instrumentation-openai ``` -3. 3 +2. 2 - ## Initialize PostHog and OpenAI client + ## Set up OpenTelemetry tracing Required - We call Together AI through the OpenAI client and generate a response. We'll use PostHog's OpenAI provider to capture all the details of the call. Initialize PostHog with your PostHog project token and host from [your project settings](https://app.posthog.com/settings/project), then pass the PostHog client along with the Together AI config (the base URL and API key) to our OpenAI wrapper. + Configure OpenTelemetry to auto-instrument OpenAI SDK calls and export traces to PostHog. PostHog converts `gen_ai.*` spans into `$ai_generation` events automatically. PostHog AI ### Python ```python - from posthog.ai.openai import OpenAI - from posthog import Posthog - posthog = Posthog( - "", - host="https://us.i.posthog.com" - ) - client = OpenAI( - base_url="https://api.together.xyz/v1", - api_key="", - posthog_client=posthog + from opentelemetry import trace + from opentelemetry.sdk.trace import TracerProvider + from opentelemetry.sdk.resources import Resource, SERVICE_NAME + from posthog.ai.otel import PostHogSpanProcessor + from opentelemetry.instrumentation.openai_v2 import OpenAIInstrumentor + resource = Resource(attributes={ + SERVICE_NAME: "my-app", + "posthog.distinct_id": "user_123", # optional: identifies the user in PostHog + "foo": "bar", # custom properties are passed through + }) + provider = TracerProvider(resource=resource) + provider.add_span_processor( + PostHogSpanProcessor( + api_key="", + host="https://us.i.posthog.com", + ) ) + trace.set_tracer_provider(provider) + OpenAIInstrumentor().instrument() ``` ### Node ```typescript - import { OpenAI } from '@posthog/ai' - import { PostHog } from 'posthog-node' - const phClient = new PostHog( - '', - { host: 'https://us.i.posthog.com' } - ); - const openai = new OpenAI({ - baseURL: 'https://api.together.xyz/v1', - apiKey: '', - posthog: phClient, - }); - // ... your code here ... - // IMPORTANT: Shutdown the client when you're done to ensure all events are sent - phClient.shutdown() + import { NodeSDK } from '@opentelemetry/sdk-node' + import { resourceFromAttributes } from '@opentelemetry/resources' + import { PostHogSpanProcessor } from '@posthog/ai/otel' + import { OpenAIInstrumentation } from '@opentelemetry/instrumentation-openai' + const sdk = new NodeSDK({ + resource: resourceFromAttributes({ + 'service.name': 'my-app', + 'posthog.distinct_id': 'user_123', // optional: identifies the user in PostHog + foo: 'bar', // custom properties are passed through + }), + spanProcessors: [ + new PostHogSpanProcessor({ + apiKey: '', + host: 'https://us.i.posthog.com', + }), + ], + instrumentations: [new OpenAIInstrumentation()], + }) + sdk.start() ``` - > **Note:** This also works with the `AsyncOpenAI` client. - - **Proxy note** - - These SDKs **do not** proxy your calls. They only fire off an async call to PostHog in the background to send the data. You can also use LLM analytics with other SDKs or our API, but you will need to capture the data in the right format. See the schema in the [manual capture section](/docs/llm-analytics/installation/manual-capture.md) for more details. - -4. 4 +3. 3 ## Call Together AI Required - Now, when you call Together AI with the OpenAI SDK, PostHog automatically captures an `$ai_generation` event. You can also capture or modify additional properties with the distinct ID, trace ID, properties, groups, and privacy mode parameters. + Now, when you use the OpenAI SDK to call Together AI, PostHog automatically captures `$ai_generation` events via the OpenTelemetry instrumentation. PostHog AI ### Python ```python + import openai + client = openai.OpenAI( + base_url="https://api.together.xyz/v1", + api_key="", + ) response = client.chat.completions.create( model="meta-llama/Llama-3.3-70B-Instruct-Turbo", + max_completion_tokens=1024, messages=[ {"role": "user", "content": "Tell me a fun fact about hedgehogs"} ], - posthog_distinct_id="user_123", # optional - posthog_trace_id="trace_123", # optional - posthog_properties={"conversation_id": "abc123", "paid": True}, # optional - posthog_groups={"company": "company_id_in_your_db"}, # optional - posthog_privacy_mode=False # optional ) print(response.choices[0].message.content) ``` @@ -125,25 +115,20 @@ ### Node ```typescript - const completion = await openai.chat.completions.create({ - model: "meta-llama/Llama-3.3-70B-Instruct-Turbo", - messages: [{ role: "user", content: "Tell me a fun fact about hedgehogs" }], - posthogDistinctId: "user_123", // optional - posthogTraceId: "trace_123", // optional - posthogProperties: { conversation_id: "abc123", paid: true }, // optional - posthogGroups: { company: "company_id_in_your_db" }, // optional - posthogPrivacyMode: false // optional - }); - console.log(completion.choices[0].message.content) + import OpenAI from 'openai' + const client = new OpenAI({ + baseURL: 'https://api.together.xyz/v1', + apiKey: '', + }) + const response = await client.chat.completions.create({ + model: 'meta-llama/Llama-3.3-70B-Instruct-Turbo', + max_completion_tokens: 1024, + messages: [{ role: 'user', content: 'Tell me a fun fact about hedgehogs' }], + }) + console.log(response.choices[0].message.content) ``` - > **Notes:** - > - > - We also support the old `chat.completions` API. - > - This works with responses where `stream=True`. - > - If you want to capture LLM events anonymously, **don't** pass a distinct ID to the request. - > - > See our docs on [anonymous vs identified events](/docs/data/anonymous-vs-identified-events.md) to learn more. + > **Note:** If you want to capture LLM events anonymously, omit the `posthog.distinct_id` resource attribute. See our docs on [anonymous vs identified events](/docs/data/anonymous-vs-identified-events.md) to learn more. You can expect captured `$ai_generation` events to have the following properties: @@ -160,7 +145,7 @@ | $ai_total_cost_usd | The total cost in USD (input + output) | | [[...]](/docs/llm-analytics/generations.md#event-properties) | See [full list](/docs/llm-analytics/generations.md#event-properties) of properties | -5. ## Verify traces and generations +4. ## Verify traces and generations Recommended @@ -172,7 +157,7 @@ [Check for LLM events in PostHog](https://app.posthog.com/llm-analytics/generations) -6. 5 +5. 4 ## Next steps diff --git a/skills/instrument-llm-analytics/references/vercel-ai.md b/skills/instrument-llm-analytics/references/vercel-ai.md index 5230c10..7b592b4 100644 --- a/skills/instrument-llm-analytics/references/vercel-ai.md +++ b/skills/instrument-llm-analytics/references/vercel-ai.md @@ -12,30 +12,28 @@ npm install @posthog/ai @ai-sdk/openai ai @opentelemetry/sdk-node @opentelemetry/resources ``` - **No proxy** - - These SDKs **do not** proxy your calls. They only send analytics data to PostHog in the background. - 2. 2 ## Set up the OpenTelemetry exporter Required - Initialize the OpenTelemetry SDK with PostHog's `PostHogTraceExporter`. This sends `gen_ai.*` spans directly to PostHog's OTLP ingestion endpoint. PostHog converts these into `$ai_generation` events automatically. + Initialize the OpenTelemetry SDK with PostHog's `PostHogSpanProcessor`. This sends `gen_ai.*` spans directly to PostHog's OTLP ingestion endpoint. PostHog converts these into `$ai_generation` events automatically. ```typescript import { NodeSDK } from '@opentelemetry/sdk-node' import { resourceFromAttributes } from '@opentelemetry/resources' - import { PostHogTraceExporter } from '@posthog/ai/otel' + import { PostHogSpanProcessor } from '@posthog/ai/otel' const sdk = new NodeSDK({ resource: resourceFromAttributes({ - 'service.name': 'my-ai-app', - }), - traceExporter: new PostHogTraceExporter({ - apiKey: '', - host: 'https://us.i.posthog.com', + 'service.name': 'my-app', }), + spanProcessors: [ + new PostHogSpanProcessor({ + apiKey: '', + host: 'https://us.i.posthog.com', + }), + ], }) sdk.start() ``` @@ -63,7 +61,6 @@ }, }) console.log(result.text) - await sdk.shutdown() ``` > **Note:** If you want to capture LLM events anonymously, omit the `posthog_distinct_id` metadata field. See our docs on [anonymous vs identified events](/docs/data/anonymous-vs-identified-events.md) to learn more. diff --git a/skills/instrument-llm-analytics/references/xai.md b/skills/instrument-llm-analytics/references/xai.md index 7bdd3ad..19b723f 100644 --- a/skills/instrument-llm-analytics/references/xai.md +++ b/skills/instrument-llm-analytics/references/xai.md @@ -2,122 +2,112 @@ 1. 1 - ## Install the PostHog SDK + ## Install dependencies Required - Setting up analytics starts with installing the PostHog SDK for your language. LLM analytics works best with our Python and Node SDKs. + **Full working examples** - PostHog AI - - ### Python - - ```bash - pip install posthog - ``` - - ### Node - - ```bash - npm install @posthog/ai posthog-node - ``` - -2. 2 - - ## Install the OpenAI SDK - - Required + See the complete [Node.js](https://github.com/PostHog/posthog-js/tree/main/examples/example-ai-xai) and [Python](https://github.com/PostHog/posthog-python/tree/master/examples/example-ai-xai) examples on GitHub. If you're using the PostHog SDK wrapper instead of OpenTelemetry, see the [Node.js wrapper](https://github.com/PostHog/posthog-js/tree/e08ff1be/examples/example-ai-xai) and [Python wrapper](https://github.com/PostHog/posthog-python/tree/7223c52/examples/example-ai-xai) examples. - Install the OpenAI SDK. The PostHog SDK instruments your LLM calls by wrapping the OpenAI client. The PostHog SDK **does not** proxy your calls. + Install the OpenTelemetry SDK, the OpenAI instrumentation, and the OpenAI SDK. PostHog AI ### Python ```bash - pip install openai + pip install openai opentelemetry-sdk posthog[otel] opentelemetry-instrumentation-openai-v2 ``` ### Node ```bash - npm install openai + npm install openai @posthog/ai @opentelemetry/sdk-node @opentelemetry/resources @opentelemetry/instrumentation-openai ``` -3. 3 +2. 2 - ## Initialize PostHog and OpenAI client + ## Set up OpenTelemetry tracing Required - We call xAI through the OpenAI-compatible API and generate a response. We'll use PostHog's OpenAI provider to capture all the details of the call. Initialize PostHog with your PostHog project token and host from [your project settings](https://app.posthog.com/settings/project), then pass the PostHog client along with the xAI config (the base URL and API key) to our OpenAI wrapper. + Configure OpenTelemetry to auto-instrument OpenAI SDK calls and export traces to PostHog. PostHog converts `gen_ai.*` spans into `$ai_generation` events automatically. PostHog AI ### Python ```python - from posthog.ai.openai import OpenAI - from posthog import Posthog - posthog = Posthog( - "", - host="https://us.i.posthog.com" - ) - client = OpenAI( - base_url="https://api.x.ai/v1", - api_key="", - posthog_client=posthog + from opentelemetry import trace + from opentelemetry.sdk.trace import TracerProvider + from opentelemetry.sdk.resources import Resource, SERVICE_NAME + from posthog.ai.otel import PostHogSpanProcessor + from opentelemetry.instrumentation.openai_v2 import OpenAIInstrumentor + resource = Resource(attributes={ + SERVICE_NAME: "my-app", + "posthog.distinct_id": "user_123", # optional: identifies the user in PostHog + "foo": "bar", # custom properties are passed through + }) + provider = TracerProvider(resource=resource) + provider.add_span_processor( + PostHogSpanProcessor( + api_key="", + host="https://us.i.posthog.com", + ) ) + trace.set_tracer_provider(provider) + OpenAIInstrumentor().instrument() ``` ### Node ```typescript - import { OpenAI } from '@posthog/ai' - import { PostHog } from 'posthog-node' - const phClient = new PostHog( - '', - { host: 'https://us.i.posthog.com' } - ); - const openai = new OpenAI({ - baseURL: 'https://api.x.ai/v1', - apiKey: '', - posthog: phClient, - }); - // ... your code here ... - // IMPORTANT: Shutdown the client when you're done to ensure all events are sent - phClient.shutdown() + import { NodeSDK } from '@opentelemetry/sdk-node' + import { resourceFromAttributes } from '@opentelemetry/resources' + import { PostHogSpanProcessor } from '@posthog/ai/otel' + import { OpenAIInstrumentation } from '@opentelemetry/instrumentation-openai' + const sdk = new NodeSDK({ + resource: resourceFromAttributes({ + 'service.name': 'my-app', + 'posthog.distinct_id': 'user_123', // optional: identifies the user in PostHog + foo: 'bar', // custom properties are passed through + }), + spanProcessors: [ + new PostHogSpanProcessor({ + apiKey: '', + host: 'https://us.i.posthog.com', + }), + ], + instrumentations: [new OpenAIInstrumentation()], + }) + sdk.start() ``` - > **Note:** This also works with the `AsyncOpenAI` client. - - **Proxy note** - - These SDKs **do not** proxy your calls. They only fire off an async call to PostHog in the background to send the data. You can also use LLM analytics with other SDKs or our API, but you will need to capture the data in the right format. See the schema in the [manual capture section](/docs/llm-analytics/installation/manual-capture.md) for more details. - -4. 4 +3. 3 ## Call xAI Required - Now, when you call xAI with the OpenAI SDK, PostHog automatically captures an `$ai_generation` event. You can also capture or modify additional properties with the distinct ID, trace ID, properties, groups, and privacy mode parameters. + Now, when you use the OpenAI SDK to call xAI, PostHog automatically captures `$ai_generation` events via the OpenTelemetry instrumentation. PostHog AI ### Python ```python + import openai + client = openai.OpenAI( + base_url="https://api.x.ai/v1", + api_key="", + ) response = client.chat.completions.create( model="grok-3", + max_completion_tokens=1024, messages=[ {"role": "user", "content": "Tell me a fun fact about hedgehogs"} ], - posthog_distinct_id="user_123", # optional - posthog_trace_id="trace_123", # optional - posthog_properties={"conversation_id": "abc123", "paid": True}, # optional - posthog_groups={"company": "company_id_in_your_db"}, # optional - posthog_privacy_mode=False # optional ) print(response.choices[0].message.content) ``` @@ -125,25 +115,20 @@ ### Node ```typescript - const completion = await openai.chat.completions.create({ - model: "grok-3", - messages: [{ role: "user", content: "Tell me a fun fact about hedgehogs" }], - posthogDistinctId: "user_123", // optional - posthogTraceId: "trace_123", // optional - posthogProperties: { conversation_id: "abc123", paid: true }, // optional - posthogGroups: { company: "company_id_in_your_db" }, // optional - posthogPrivacyMode: false // optional - }); - console.log(completion.choices[0].message.content) + import OpenAI from 'openai' + const client = new OpenAI({ + baseURL: 'https://api.x.ai/v1', + apiKey: '', + }) + const response = await client.chat.completions.create({ + model: 'grok-3', + max_completion_tokens: 1024, + messages: [{ role: 'user', content: 'Tell me a fun fact about hedgehogs' }], + }) + console.log(response.choices[0].message.content) ``` - > **Notes:** - > - > - We also support the old `chat.completions` API. - > - This works with responses where `stream=True`. - > - If you want to capture LLM events anonymously, **don't** pass a distinct ID to the request. - > - > See our docs on [anonymous vs identified events](/docs/data/anonymous-vs-identified-events.md) to learn more. + > **Note:** If you want to capture LLM events anonymously, omit the `posthog.distinct_id` resource attribute. See our docs on [anonymous vs identified events](/docs/data/anonymous-vs-identified-events.md) to learn more. You can expect captured `$ai_generation` events to have the following properties: @@ -160,7 +145,7 @@ | $ai_total_cost_usd | The total cost in USD (input + output) | | [[...]](/docs/llm-analytics/generations.md#event-properties) | See [full list](/docs/llm-analytics/generations.md#event-properties) of properties | -5. ## Verify traces and generations +4. ## Verify traces and generations Recommended @@ -172,7 +157,7 @@ [Check for LLM events in PostHog](https://app.posthog.com/llm-analytics/generations) -6. 5 +5. 4 ## Next steps diff --git a/skills/instrument-logs/references/datadog.md b/skills/instrument-logs/references/datadog.md index 2619abb..dd8f5be 100644 --- a/skills/instrument-logs/references/datadog.md +++ b/skills/instrument-logs/references/datadog.md @@ -75,7 +75,7 @@ If you're already using Datadog to collect logs, you can forward them to PostHog 1. Restart the Datadog Agent (or your log forwarder) to apply the configuration 2. Generate some log entries in your application - 3. Check the PostHog logs interface for your log entries + 3. Check the PostHog Logs interface for your log entries 4. Verify the logs appear in your project [View your logs in PostHog](https://app.posthog.com/logs) diff --git a/skills/instrument-logs/references/go.md b/skills/instrument-logs/references/go.md index 8c22b0f..d110fb5 100644 --- a/skills/instrument-logs/references/go.md +++ b/skills/instrument-logs/references/go.md @@ -125,7 +125,7 @@ Once everything is configured, test that logs are flowing into PostHog: 1. Send a test log from your application - 2. Check the PostHog logs interface for your log entries + 2. Check the PostHog Logs interface for your log entries 3. Verify the logs appear in your project [View your logs in PostHog](https://app.posthog.com/logs) diff --git a/skills/instrument-logs/references/java.md b/skills/instrument-logs/references/java.md index 36c8fbf..93135bc 100644 --- a/skills/instrument-logs/references/java.md +++ b/skills/instrument-logs/references/java.md @@ -117,7 +117,7 @@ Once everything is configured, test that logs are flowing into PostHog: 1. Send a test log from your application - 2. Check the PostHog logs interface for your log entries + 2. Check the PostHog Logs interface for your log entries 3. Verify the logs appear in your project [View your logs in PostHog](https://app.posthog.com/logs) diff --git a/skills/instrument-logs/references/nextjs.md b/skills/instrument-logs/references/nextjs.md index be6ac5e..4c02360 100644 --- a/skills/instrument-logs/references/nextjs.md +++ b/skills/instrument-logs/references/nextjs.md @@ -153,7 +153,7 @@ Once everything is configured, test that logs are flowing into PostHog: 1. Send a test log from your application - 2. Check the PostHog logs interface for your log entries + 2. Check the PostHog Logs interface for your log entries 3. Verify the logs appear in your project [View your logs in PostHog](https://app.posthog.com/logs) diff --git a/skills/instrument-logs/references/nodejs.md b/skills/instrument-logs/references/nodejs.md index a5d2ca0..789f9ac 100644 --- a/skills/instrument-logs/references/nodejs.md +++ b/skills/instrument-logs/references/nodejs.md @@ -105,7 +105,7 @@ Once everything is configured, test that logs are flowing into PostHog: 1. Send a test log from your application - 2. Check the PostHog logs interface for your log entries + 2. Check the PostHog Logs interface for your log entries 3. Verify the logs appear in your project [View your logs in PostHog](https://app.posthog.com/logs) diff --git a/skills/instrument-logs/references/other.md b/skills/instrument-logs/references/other.md index d145599..1ba5d9a 100644 --- a/skills/instrument-logs/references/other.md +++ b/skills/instrument-logs/references/other.md @@ -1,6 +1,6 @@ # Other languages logs installation - Docs -PostHog logs works with any OpenTelemetry-compatible client. Check the [OpenTelemetry documentation](https://opentelemetry.io/docs/) for your specific language or framework. +PostHog Logs works with any OpenTelemetry-compatible client. Check the [OpenTelemetry documentation](https://opentelemetry.io/docs/) for your specific language or framework. 1. 1 @@ -11,7 +11,7 @@ PostHog logs works with any OpenTelemetry-compatible client. Check the [OpenTele The key requirements are: - Use OTLP (OpenTelemetry Protocol) for log export over HTTP - - Send logs to your PostHog logs endpoint (see configuration step below) + - Send logs to your Logs endpoint (see configuration step below) - Include your project token in the Authorization header or as a `?token=` query parameter Find the OpenTelemetry SDK for your language in the [official registry](https://opentelemetry.io/ecosystem/registry/). @@ -69,7 +69,7 @@ PostHog logs works with any OpenTelemetry-compatible client. Check the [OpenTele Once everything is configured, test that logs are flowing into PostHog: 1. Send a test log from your application - 2. Check the PostHog logs interface for your log entries + 2. Check the PostHog Logs interface for your log entries 3. Verify the logs appear in your project [View your logs in PostHog](https://app.posthog.com/logs) diff --git a/skills/instrument-logs/references/python.md b/skills/instrument-logs/references/python.md index cc5c50b..84f0f26 100644 --- a/skills/instrument-logs/references/python.md +++ b/skills/instrument-logs/references/python.md @@ -104,7 +104,7 @@ Once everything is configured, test that logs are flowing into PostHog: 1. Send a test log from your application - 2. Check the PostHog logs interface for your log entries + 2. Check the PostHog Logs interface for your log entries 3. Verify the logs appear in your project [View your logs in PostHog](https://app.posthog.com/logs) diff --git a/skills/instrument-logs/references/start-here.md b/skills/instrument-logs/references/start-here.md index b67d9aa..5dd9447 100644 --- a/skills/instrument-logs/references/start-here.md +++ b/skills/instrument-logs/references/start-here.md @@ -4,7 +4,7 @@ ## Install and configure logging -PostHog logs is a powerful logging solution that works with the OpenTelemetry Protocol (OTLP). You don't need any vendor specific SDKs – just use standard OpenTelemetry libraries. +PostHog Logs is a powerful logging solution that works with the OpenTelemetry Protocol (OTLP). You don't need any vendor-specific SDKs – just use standard OpenTelemetry libraries. Install and configure your logging client to send logs to PostHog: diff --git a/skills/instrument-logs/references/troubleshooting.md b/skills/instrument-logs/references/troubleshooting.md index 6baaead..4ee5bc6 100644 --- a/skills/instrument-logs/references/troubleshooting.md +++ b/skills/instrument-logs/references/troubleshooting.md @@ -19,7 +19,7 @@ Ask PostHog AI ## Connection issues -**Problem**: Cannot connect to the PostHog logs endpoint. +**Problem**: Cannot connect to the PostHog Logs endpoint. **Solutions**: diff --git a/skills/instrument-product-analytics/SKILL.md b/skills/instrument-product-analytics/SKILL.md index 8045a6c..79b06f1 100644 --- a/skills/instrument-product-analytics/SKILL.md +++ b/skills/instrument-product-analytics/SKILL.md @@ -71,7 +71,7 @@ STEP 10: Verify and clean up. - `references/EXAMPLE-react-react-router-7-framework.md` - react-react-router-7-framework example project code - `references/EXAMPLE-react-react-router-7-data.md` - react-react-router-7-data example project code - `references/EXAMPLE-react-react-router-7-declarative.md` - react-react-router-7-declarative example project code -- `references/EXAMPLE-nuxt-3.6.md` - nuxt-3.6 example project code +- `references/EXAMPLE-nuxt-3-6.md` - nuxt-3-6 example project code - `references/EXAMPLE-nuxt-4.md` - nuxt-4 example project code - `references/EXAMPLE-vue-3.md` - vue-3 example project code - `references/EXAMPLE-react-tanstack-router-file-based.md` - react-tanstack-router-file-based example project code diff --git a/skills/instrument-product-analytics/references/EXAMPLE-nuxt-3.6.md b/skills/instrument-product-analytics/references/EXAMPLE-nuxt-3-6.md similarity index 99% rename from skills/instrument-product-analytics/references/EXAMPLE-nuxt-3.6.md rename to skills/instrument-product-analytics/references/EXAMPLE-nuxt-3-6.md index 07717a9..fee7d00 100644 --- a/skills/instrument-product-analytics/references/EXAMPLE-nuxt-3.6.md +++ b/skills/instrument-product-analytics/references/EXAMPLE-nuxt-3-6.md @@ -1,7 +1,7 @@ -# PostHog nuxt-3.6 Example Project +# PostHog nuxt-3-6 Example Project Repository: https://github.com/PostHog/context-mill -Path: basics/nuxt-3.6 +Path: basics/nuxt-3-6 --- diff --git a/skills/instrument-product-analytics/references/EXAMPLE-nuxt-4.md b/skills/instrument-product-analytics/references/EXAMPLE-nuxt-4.md index 5c35df1..1cbed60 100644 --- a/skills/instrument-product-analytics/references/EXAMPLE-nuxt-4.md +++ b/skills/instrument-product-analytics/references/EXAMPLE-nuxt-4.md @@ -13,7 +13,7 @@ This is a [Nuxt 4](https://nuxt.com) example demonstrating PostHog integration w Nuxt 4 supports the `@posthog/nuxt` package, which provides automatic PostHog integration with built-in error tracking, source map uploads, and simplified configuration. This is the recommended approach for Nuxt 4+. -For Nuxt 3.0 - 3.6, you must use the `posthog-js` and `posthog-node` packages directly instead. See the [Nuxt 3.6 example](../nuxt-3.6) for that approach. +For Nuxt 3.0 - 3.6, you must use the `posthog-js` and `posthog-node` packages directly instead. See the [Nuxt 3.6 example](../nuxt-3-6) for that approach. ## Features diff --git a/skills/instrument-product-analytics/references/python.md b/skills/instrument-product-analytics/references/python.md index 11dc3b2..1db6612 100644 --- a/skills/instrument-product-analytics/references/python.md +++ b/skills/instrument-product-analytics/references/python.md @@ -740,7 +740,7 @@ Python PostHog AI ```python -posthog.debug = True # + +posthog.debug = True ``` ## Disabling requests during tests diff --git a/skills/managing-subscriptions/SKILL.md b/skills/managing-subscriptions/SKILL.md new file mode 100644 index 0000000..554dbdd --- /dev/null +++ b/skills/managing-subscriptions/SKILL.md @@ -0,0 +1,162 @@ +--- +name: managing-subscriptions +description: 'Manage PostHog subscriptions — scheduled email, Slack, or webhook deliveries of insight or dashboard snapshots. Use when the user wants to subscribe to an insight or dashboard, check existing subscriptions, change delivery frequency, add or remove recipients, or stop receiving updates.' +--- + +# Managing subscriptions + +This skill guides you through managing PostHog subscriptions. +Subscriptions deliver scheduled snapshots of insights or dashboards via email, Slack, or webhook. + +## When to use this skill + +Use this skill when the user: + +- Wants to "track", "follow", "subscribe to", or "get updates" about an insight or dashboard +- Asks for "daily updates", "weekly reports", or "send me this every morning" +- Wants to know what subscriptions they have +- Asks to stop, pause, or unsubscribe from something +- Wants to change who receives an update or how often + +## Subscriptions vs alerts + +Subscriptions and alerts serve different purposes: + +- **Subscriptions** deliver a snapshot on a fixed schedule (daily, weekly, etc.) regardless of the data +- **Alerts** fire only when a condition is met (threshold crossed, anomaly detected) + +If the user says "notify me when this drops below 100", use alerts. +If the user says "send me this every morning", use subscriptions. + +## Workflow + +### Listing existing subscriptions + +Before creating a new subscription, check if one already exists. + +Use `subscriptions-list` with optional filters: + +- Filter by insight: pass the `insight` query parameter with the insight ID +- Filter by dashboard: pass the `dashboard` query parameter with the dashboard ID +- Filter by channel: pass `target_type` as `email`, `slack`, or `webhook` + +### Creating a subscription + +#### Step 1: Ask the user how they want to receive it + +**Always ask the user whether they want email or Slack delivery** before creating a subscription. +Do not assume a channel — ask explicitly: + +> Would you like to receive this via **email** or **Slack**? + +If the user says Slack, you must verify the integration is available (see step 2). +If the user doesn't have a preference, suggest email as the simplest option. + +#### Step 2: Verify channel availability + +**Email** requires no setup — it works out of the box. You just need the user's email address. +Get it from the user context or from `org-members-list`. + +**Slack** requires a connected Slack integration. Before creating a Slack subscription: + +1. Call `integrations-list` and look for an integration where `kind` is `"slack"` +2. If a Slack integration exists, note its `id` — you'll need it as `integration_id` +3. If **no Slack integration exists**, tell the user: + > Slack isn't connected to this project yet. You can set it up in + > [Project settings > Integrations](/settings/integrations). + > In the meantime, would you like to receive this via email instead? + +Slack setup requires an OAuth flow in the browser — it cannot be done via MCP. + +**Webhook** requires the user to provide a URL. Verify it looks like a valid URL before submitting. + +#### Step 3: Identify the target + +Get the insight ID or dashboard ID. If the user provides a URL like `/project/2/insights/pKxzopBG`, +fetch the insight first with `insight-get` to get the numeric ID. + +#### Step 4: Determine delivery settings from the user's request + +| User says | Parameters | +| --------------------------------------- | ------------------------------------------------------------------------- | +| "every day" / "daily" / "every morning" | `frequency: "daily"` | +| "every week" / "weekly" | `frequency: "weekly"` | +| "every Monday" | `frequency: "weekly"`, `byweekday: ["monday"]` | +| "every month" / "monthly" | `frequency: "monthly"` | +| "twice a week" | `frequency: "weekly"`, `interval: 1`, `byweekday: ["monday", "thursday"]` | + +#### Step 5: Create with `subscriptions-create` + +For an insight subscription via email: + +```json +{ + "insight": 12345, + "target_type": "email", + "target_value": "user@example.com", + "frequency": "daily", + "start_date": "2025-01-01T09:00:00Z" +} +``` + +For a dashboard subscription (requires selecting which insights to include, max 6): + +```json +{ + "dashboard": 67, + "dashboard_export_insights": [101, 102, 103], + "target_type": "email", + "target_value": "user@example.com", + "frequency": "weekly", + "byweekday": ["monday"], + "start_date": "2025-01-01T09:00:00Z" +} +``` + +For Slack delivery, include the `integration_id` from step 2: + +```json +{ + "insight": 12345, + "target_type": "slack", + "target_value": "#general", + "integration_id": 789, + "frequency": "daily", + "start_date": "2025-01-01T09:00:00Z" +} +``` + +### Updating a subscription + +Use `subscriptions-partial-update` with the subscription ID. Common updates: + +- **Change frequency**: `{"frequency": "weekly", "byweekday": ["monday"]}` +- **Add recipients**: Update `target_value` with the full comma-separated list +- **Change channel**: Update `target_type` and `target_value` together + +### Deactivating a subscription + +Subscriptions are soft-deleted. Use `subscriptions-partial-update`: + +```json +{ + "id": 456, + "deleted": true +} +``` + +## Defaults + +When the user doesn't specify details: + +- **Frequency**: `"daily"` +- **Channel**: email to the current user +- **Start date**: now (ISO 8601) +- **Title**: auto-generated from the insight/dashboard name if not specified + +## Error handling + +- **Duplicate check**: If a subscription already exists for the same insight/dashboard and channel, inform the user and offer to update it rather than creating a duplicate +- **Slack not connected**: If a Slack subscription is requested but no Slack integration exists, explain that Slack must be connected in [Project settings > Integrations](/settings/integrations) first, then offer email as an alternative. Do not attempt to create the subscription — it will fail with a validation error +- **Slack integration wrong team**: The Slack integration must belong to the same PostHog team. If `integrations-list` returns Slack integrations but creation still fails, the integration may be misconfigured +- **Dashboard insights**: Dashboard subscriptions require at least 1 and at most 6 insights selected via `dashboard_export_insights`. If the user doesn't specify which insights, fetch the dashboard with `dashboard-get` and select the first 6 insights from its tiles