Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 4 additions & 0 deletions packages/ai-providers/server-ai-langchain/pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,9 @@ dependencies = [
"langchain>=1.0.0",
]

[project.optional-dependencies]
graph = ["langgraph>=0.1.0"]

[project.urls]
Homepage = "https://docs.launchdarkly.com/sdk/ai/python"
Repository = "https://github.com/launchdarkly/python-server-sdk-ai"
Expand All @@ -36,6 +39,7 @@ dev = [
"mypy==1.18.2",
"pycodestyle>=2.11.0",
"isort>=5.12.0",
"langgraph>=0.1.0",
]

[build-system]
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -74,11 +74,19 @@ def handle_traversal(node: AgentGraphNode, ctx: dict) -> None:
if node_config.model:
lc_model = create_langchain_model(node_config)
tool_defs = node_config.model.get_parameter('tools') or []
tool_fns = [
tools_ref[t.get('name', '')]
for t in tool_defs
if t.get('name', '') in tools_ref
]
tool_fns = []
for t in tool_defs:
config_key = t.get('name', '')
if config_key not in tools_ref:
continue
from langchain_core.tools import StructuredTool
tool_fns.append(
StructuredTool.from_function(
func=tools_ref[config_key],
name=config_key,
description=t.get('description', ''),
)
)
model = lc_model.bind_tools(tool_fns) if tool_fns else lc_model

def invoke(state: WorkflowState) -> WorkflowState:
Expand Down
4 changes: 4 additions & 0 deletions packages/ai-providers/server-ai-openai/pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,9 @@ dependencies = [
"openai>=1.0.0",
]

[project.optional-dependencies]
agents = ["openai-agents>=0.0.1"]

[project.urls]
Homepage = "https://docs.launchdarkly.com/sdk/ai/python"
Repository = "https://github.com/launchdarkly/python-server-sdk-ai"
Expand All @@ -35,6 +38,7 @@ dev = [
"mypy==1.18.2",
"pycodestyle>=2.11.0",
"isort>=5.12.0",
"openai-agents>=0.0.1",
]

[build-system]
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@
from ldai_openai.openai_helper import (
NATIVE_OPENAI_TOOLS,
get_ai_usage_from_response,
get_tool_calls_from_run_items,
)


Expand Down Expand Up @@ -69,6 +70,7 @@ async def run(self, input: Any) -> AgentGraphResult:
root_agent = self._build_agents(path, state)
result = await Runner.run(root_agent, str(input))
self._flush_final_segment(state, tracker, result)
self._track_tool_calls(result, tracker)

duration = (time.perf_counter_ns() - start_ns) // 1_000_000

Expand Down Expand Up @@ -139,6 +141,17 @@ def _flush_final_segment(
config_tracker.track_duration(int(duration_ms), graph_key=gk)
config_tracker.track_success(graph_key=gk)

def _track_tool_calls(self, result: Any, tracker: Any) -> None:
"""Track all tool calls from the run result, attributed to the node that called them."""
gk = tracker.graph_key if tracker is not None else None
for agent_name, tool_name in get_tool_calls_from_run_items(result.new_items):
node = self._graph.get_node(agent_name)
if node is None:
continue
config_tracker = node.get_config().tracker
if config_tracker is not None:
config_tracker.track_tool_call(tool_name, graph_key=gk)

def _handle_handoff(
self,
run_ctx: Any,
Expand Down Expand Up @@ -207,12 +220,10 @@ def _build_agents(self, path: List[str], state: _RunState) -> Any:
Agent,
FunctionTool,
Handoff,
RunContextWrapper,
Tool,
handoff,
)
from agents.extensions.handoff_prompt import RECOMMENDED_PROMPT_PREFIX
from agents.tool_context import ToolContext
except ImportError as exc:
raise ImportError(
"openai-agents is required for OpenAIAgentGraphRunner. "
Expand Down Expand Up @@ -269,18 +280,12 @@ def _make_tool(
description: str,
params_schema: dict,
) -> FunctionTool:
def wrapped(tool_ctx: ToolContext, tool_args: str) -> Any:
def wrapped(tool_ctx: Any, tool_args: str) -> Any:
import json
try:
args = json.loads(tool_args)
except Exception:
args = {}
path.append(name)
if config_tracker is not None:
config_tracker.track_tool_call(
name,
graph_key=tracker.graph_key if tracker is not None else None,
)
return fn(**args)

return FunctionTool(
Expand All @@ -301,6 +306,7 @@ def wrapped(tool_ctx: ToolContext, tool_args: str) -> Any:

return Agent(
name=node_config.key,
model=model.name,
instructions=f'{RECOMMENDED_PROMPT_PREFIX} {node_config.instructions or ""}',
handoffs=list(agent_handoffs),
tools=list(agent_tools),
Expand Down
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
from typing import Any, Dict, Iterable, List, Optional, cast
from typing import Any, Dict, Iterable, List, Optional, Tuple, cast

from ldai import LDMessage
from ldai.providers.types import LDAIMetrics
Expand All @@ -8,17 +8,9 @@

def _build_native_tool_map() -> Dict[str, Any]:
try:
from agents import (
CodeInterpreterTool,
FileSearchTool,
ImageGenerationTool,
WebSearchTool,
)
from agents import WebSearchTool
return {
'web_search_tool': lambda _: WebSearchTool(),
'file_search_tool': lambda _: FileSearchTool(),
'code_interpreter': lambda _: CodeInterpreterTool(),
'image_generation': lambda _: ImageGenerationTool(),
}
except ImportError:
return {}
Expand Down Expand Up @@ -80,3 +72,46 @@ def get_ai_metrics_from_response(response: Any) -> LDAIMetrics:
:return: LDAIMetrics with success status and token usage
"""
return LDAIMetrics(success=True, usage=get_ai_usage_from_response(response))


# Native tool raw_item type names don't always match the LD config key convention.
_NATIVE_TOOL_TYPE_TO_CONFIG_KEY = {
'web_search': 'web_search_tool',
}


def get_tool_calls_from_run_items(new_items: List[Any]) -> List[Tuple[str, str]]:
"""
Extract (agent_name, tool_name) pairs from RunResult.new_items.

Covers both custom FunctionTools (tracked by their config key) and native
hosted tools (web search, file search, code interpreter, image generation).

:param new_items: The new_items list from a RunResult
:return: List of (agent_name, tool_name) tuples
"""
try:
from agents.items import ToolCallItem
from openai.types.responses import ResponseFunctionToolCall
except ImportError:
return []

result = []
for item in new_items:
if not isinstance(item, ToolCallItem):
continue
agent_name = getattr(item.agent, 'name', None)
if not agent_name:
continue
raw = item.raw_item
if isinstance(raw, ResponseFunctionToolCall):
# Custom FunctionTools are registered as 'tool_{config_key}'
tool_name = raw.name.removeprefix('tool_')
else:
raw_type = getattr(raw, 'type', None) or (raw.get('type') if isinstance(raw, dict) else None)
if not raw_type:
continue
tool_name = _NATIVE_TOOL_TYPE_TO_CONFIG_KEY.get(raw_type, raw_type)
if tool_name:
result.append((agent_name, tool_name))
return result
Loading