Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
from ldai_langchain.langchain_agent_runner import LangChainAgentRunner
from ldai_langchain.langchain_helper import (
convert_messages_to_langchain,
create_langchain_model,
Expand All @@ -16,6 +17,7 @@
'__version__',
'LangChainRunnerFactory',
'LangChainModelRunner',
'LangChainAgentRunner',
'convert_messages_to_langchain',
'create_langchain_model',
'get_ai_metrics_from_response',
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,63 @@
"""LangChain agent runner for LaunchDarkly AI SDK."""

from typing import Any

from ldai import log
from ldai.providers import AgentResult, AgentRunner
from ldai.providers.types import LDAIMetrics

from ldai_langchain.langchain_helper import sum_token_usage_from_messages


class LangChainAgentRunner(AgentRunner):
"""
AgentRunner implementation for LangChain.

Wraps a compiled LangChain agent graph (from ``langchain.agents.create_agent``)
and delegates execution to it. Tool calling and loop management are handled
internally by the graph.
Returned by LangChainRunnerFactory.create_agent(config, tools).
"""

def __init__(self, agent: Any):
self._agent = agent

async def run(self, input: Any) -> AgentResult:
"""
Run the agent with the given input string.

Delegates to the compiled LangChain agent, which handles
the tool-calling loop internally.

:param input: The user prompt or input to the agent
:return: AgentResult with output, raw response, and aggregated metrics
"""
try:
result = await self._agent.ainvoke({
"messages": [{"role": "user", "content": str(input)}]
})
messages = result.get("messages", [])
output = ""
if messages:
last = messages[-1]
if hasattr(last, 'content') and isinstance(last.content, str):
output = last.content
return AgentResult(
output=output,
raw=result,
metrics=LDAIMetrics(
success=True,
usage=sum_token_usage_from_messages(messages),
),
)
except Exception as error:
log.warning(f"LangChain agent run failed: {error}")
return AgentResult(
output="",
raw=None,
metrics=LDAIMetrics(success=False, usage=None),
)

def get_agent(self) -> Any:
"""Return the underlying compiled LangChain agent."""
return self._agent
Original file line number Diff line number Diff line change
Expand Up @@ -2,8 +2,9 @@

from langchain_core.language_models.chat_models import BaseChatModel
from langchain_core.messages import AIMessage, BaseMessage, HumanMessage, SystemMessage
from ldai import LDMessage
from ldai import LDMessage, log
from ldai.models import AIConfigKind
from ldai.providers import ToolRegistry
from ldai.providers.types import LDAIMetrics
from ldai.tracker import TokenUsage

Expand Down Expand Up @@ -50,12 +51,18 @@ def convert_messages_to_langchain(
return result


def create_langchain_model(ai_config: AIConfigKind) -> BaseChatModel:
def create_langchain_model(ai_config: AIConfigKind, tool_registry: Optional[ToolRegistry] = None) -> BaseChatModel:
"""
Create a LangChain BaseChatModel from a LaunchDarkly AI configuration.

If the config includes tool definitions and a tool_registry is provided, tools found
in the registry are bound to the model. Tools not found in the registry are skipped
with a warning. Built-in provider tools (e.g. code_interpreter) are not supported
via LangChain's bind_tools abstraction and are skipped with a warning.

:param ai_config: The LaunchDarkly AI configuration
:return: A configured LangChain BaseChatModel
:param tool_registry: Optional registry mapping tool names to callable implementations
:return: A configured LangChain BaseChatModel, with tools bound if applicable
"""
from langchain.chat_models import init_chat_model

Expand All @@ -66,19 +73,119 @@ def create_langchain_model(ai_config: AIConfigKind) -> BaseChatModel:
model_name = model_dict.get('name', '')
provider = provider_dict.get('name', '')
parameters = dict(model_dict.get('parameters') or {})
tool_definitions = parameters.pop('tools', []) or []
mapped_provider = map_provider(provider)

# Bedrock requires the foundation provider (e.g. Bedrock:Anthropic) passed in
# parameters separately from model_provider, which is used for LangChain routing.
if mapped_provider == 'bedrock_converse' and 'provider' not in parameters:
parameters['provider'] = provider.removeprefix('bedrock:')

return init_chat_model(
model = init_chat_model(
model_name,
model_provider=mapped_provider,
**parameters,
)

if tool_definitions and tool_registry is not None:
bindable = _resolve_tools_for_langchain(tool_definitions, tool_registry)
if bindable:
model = model.bind_tools(bindable)

return model


def _resolve_tools_for_langchain(
tool_definitions: List[Dict[str, Any]],
tool_registry: ToolRegistry,
) -> List[Dict[str, Any]]:
"""
Match LD tool definitions against a registry, returning function-calling tool dicts
for tools that have a callable implementation. Built-in provider tools and tools
missing from the registry are skipped with a warning.
"""
bindable = []
for td in tool_definitions:
if not isinstance(td, dict):
continue

tool_type = td.get('type')
if tool_type and tool_type != 'function':
log.warning(
f"Built-in tool '{tool_type}' is not reliably supported via LangChain's "
"bind_tools abstraction and will be skipped. Use a provider-specific runner "
"to use built-in provider tools."
)
continue

name = td.get('name')
if not name:
continue

if name not in tool_registry:
log.warning(f"Tool '{name}' is defined in the AI config but was not found in the tool registry; skipping.")
continue

bindable.append({
'type': 'function',
'function': {
'name': name,
'description': td.get('description', ''),
'parameters': td.get('parameters', {'type': 'object', 'properties': {}}),
},
})

return bindable


def build_structured_tools(ai_config: AIConfigKind, tool_registry: ToolRegistry) -> List[Any]:
"""
Build a list of LangChain StructuredTool instances from LD tool definitions and a registry.

Tools found in the registry are wrapped as StructuredTool with the name and description
from the LD config. Built-in provider tools and tools missing from the registry are
skipped with a warning.

:param ai_config: The LaunchDarkly AI configuration
:param tool_registry: Registry mapping tool names to callable implementations
:return: List of StructuredTool instances ready to pass to langchain.agents.create_agent
"""
from langchain_core.tools import StructuredTool

config_dict = ai_config.to_dict()
model_dict = config_dict.get('model') or {}
parameters = dict(model_dict.get('parameters') or {})
tool_definitions = parameters.pop('tools', []) or []

structured = []
for td in tool_definitions:
if not isinstance(td, dict):
continue

tool_type = td.get('type')
if tool_type and tool_type != 'function':
log.warning(
f"Built-in tool '{tool_type}' is not reliably supported via LangChain and will be skipped. "
"Use a provider-specific runner to use built-in provider tools."
)
continue

name = td.get('name')
if not name:
continue

if name not in tool_registry:
log.warning(f"Tool '{name}' is defined in the AI config but was not found in the tool registry; skipping.")
continue

structured.append(StructuredTool.from_function(
func=tool_registry[name],
name=name,
description=td.get('description', ''),
))

return structured
Copy link
Copy Markdown

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Duplicated tool-filtering logic in two functions

Low Severity

build_structured_tools and _resolve_tools_for_langchain contain nearly identical tool-filtering logic: checking isinstance(td, dict), checking the type field, extracting name, verifying registry membership, and logging the same warnings. The only difference is the output format (dicts vs StructuredTool). Extracting the shared filtering into a common helper would reduce duplication and the risk of inconsistent updates.

Additional Locations (1)
Fix in Cursor Fix in Web



def get_ai_usage_from_response(response: Any) -> Optional[TokenUsage]:
"""
Expand All @@ -88,11 +195,11 @@ def get_ai_usage_from_response(response: Any) -> Optional[TokenUsage]:
:return: TokenUsage or None if unavailable
"""
if hasattr(response, 'usage_metadata') and response.usage_metadata:
return TokenUsage(
total=response.usage_metadata.get('total_tokens', 0),
input=response.usage_metadata.get('input_tokens', 0),
output=response.usage_metadata.get('output_tokens', 0),
)
total = response.usage_metadata.get('total_tokens', 0)
inp = response.usage_metadata.get('input_tokens', 0)
out = response.usage_metadata.get('output_tokens', 0)
if total or inp or out:
return TokenUsage(total=total, input=inp, output=out)
if hasattr(response, 'response_metadata') and response.response_metadata:
token_usage = (
response.response_metadata.get('tokenUsage')
Expand Down
Original file line number Diff line number Diff line change
@@ -1,7 +1,14 @@
from typing import Any, Optional

from langchain.agents import create_agent as lc_create_agent
from ldai.models import AIConfigKind
from ldai.providers import AIProvider
from ldai.providers import AIProvider, ToolRegistry

from ldai_langchain.langchain_helper import create_langchain_model
from ldai_langchain.langchain_agent_runner import LangChainAgentRunner
from ldai_langchain.langchain_helper import (
build_structured_tools,
create_langchain_model,
)
from ldai_langchain.langchain_model_runner import LangChainModelRunner


Expand All @@ -17,3 +24,22 @@ def create_model(self, config: AIConfigKind) -> LangChainModelRunner:
"""
llm = create_langchain_model(config)
return LangChainModelRunner(llm)

def create_agent(self, config: Any, tools: Optional[ToolRegistry] = None) -> LangChainAgentRunner:
"""
Create a configured LangChainAgentRunner for the given AI agent config.

:param config: The LaunchDarkly AI agent configuration
:param tools: ToolRegistry mapping tool names to callables
:return: LangChainAgentRunner ready to run the agent
"""
instructions = (config.instructions or '') if hasattr(config, 'instructions') else ''
llm = create_langchain_model(config)
lc_tools = build_structured_tools(config, tools or {})

agent = lc_create_agent(
llm,
tools=lc_tools or None,
system_prompt=instructions or None,
)
return LangChainAgentRunner(agent)
Loading
Loading