Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
@@ -1,10 +1,14 @@
"""LaunchDarkly AI SDK - LangChain Connector."""

from ldai_langchain.langchain_helper import LangChainHelper
from ldai_langchain.langchain_model_runner import LangChainModelRunner
from ldai_langchain.langchain_runner_factory import LangChainRunnerFactory

__version__ = "0.1.0"

__all__ = [
'__version__',
'LangChainRunnerFactory',
'LangChainHelper',
'LangChainModelRunner',
]
Original file line number Diff line number Diff line change
@@ -0,0 +1,98 @@
"""Shared LangChain utilities for the LaunchDarkly AI SDK."""

from typing import Any, Dict, List, Optional, Union

from langchain_core.language_models.chat_models import BaseChatModel
from langchain_core.messages import AIMessage, BaseMessage, HumanMessage, SystemMessage
from ldai import LDMessage
from ldai.models import AIConfigKind
from ldai.providers.types import LDAIMetrics
from ldai.tracker import TokenUsage


class LangChainHelper:
"""
Shared utilities for LangChain-based runners (model, agent, agent graph).

All methods are static — this class is a namespace, not meant to be instantiated.
"""

@staticmethod
def map_provider(ld_provider_name: str) -> str:
"""
Map a LaunchDarkly provider name to its LangChain equivalent.

:param ld_provider_name: LaunchDarkly provider name
:return: LangChain-compatible provider name
"""
mapping: Dict[str, str] = {'gemini': 'google-genai'}
return mapping.get(ld_provider_name.lower(), ld_provider_name.lower())

@staticmethod
def convert_messages(
messages: List[LDMessage],
) -> List[Union[HumanMessage, SystemMessage, AIMessage]]:
"""
Convert LaunchDarkly messages to LangChain message objects.

:param messages: List of LDMessage objects
:return: List of LangChain message objects
:raises ValueError: If an unsupported message role is encountered
"""
result: List[Union[HumanMessage, SystemMessage, AIMessage]] = []
for msg in messages:
if msg.role == 'system':
result.append(SystemMessage(content=msg.content))
elif msg.role == 'user':
result.append(HumanMessage(content=msg.content))
elif msg.role == 'assistant':
result.append(AIMessage(content=msg.content))
else:
raise ValueError(f'Unsupported message role: {msg.role}')
return result

@staticmethod
def create_langchain_model(ai_config: AIConfigKind) -> BaseChatModel:
"""
Create a LangChain BaseChatModel from a LaunchDarkly AI configuration.

:param ai_config: The LaunchDarkly AI configuration
:return: A configured LangChain BaseChatModel
"""
from langchain.chat_models import init_chat_model

config_dict = ai_config.to_dict()
model_dict = config_dict.get('model') or {}
provider_dict = config_dict.get('provider') or {}

model_name = model_dict.get('name', '')
provider = provider_dict.get('name', '')
parameters = model_dict.get('parameters') or {}

return init_chat_model(
model_name,
model_provider=LangChainHelper.map_provider(provider),
**parameters,
)

@staticmethod
def get_ai_metrics_from_response(response: Any) -> LDAIMetrics:
"""
Extract LaunchDarkly AI metrics from a LangChain response.

:param response: The response from a LangChain model (BaseMessage or similar)
:return: LDAIMetrics with success status and token usage
"""
usage: Optional[TokenUsage] = None
if hasattr(response, 'response_metadata') and response.response_metadata:
token_usage = (
response.response_metadata.get('tokenUsage')
or response.response_metadata.get('token_usage')
)
if token_usage:
usage = TokenUsage(
total=token_usage.get('totalTokens', 0) or token_usage.get('total_tokens', 0),
input=token_usage.get('promptTokens', 0) or token_usage.get('prompt_tokens', 0),
output=token_usage.get('completionTokens', 0) or token_usage.get('completion_tokens', 0),
)
return LDAIMetrics(success=True, usage=usage)
Original file line number Diff line number Diff line change
@@ -0,0 +1,103 @@
"""LangChain model runner for LaunchDarkly AI SDK."""

from typing import Any, Dict, List

from langchain_core.language_models.chat_models import BaseChatModel
from langchain_core.messages import BaseMessage
from ldai import LDMessage, log
from ldai.providers.model_runner import ModelRunner
from ldai.providers.types import LDAIMetrics, ModelResponse, StructuredResponse
from ldai.tracker import TokenUsage
from ldai_langchain.langchain_helper import LangChainHelper


class LangChainModelRunner(ModelRunner):
"""
ModelRunner implementation for LangChain.

Holds a fully-configured BaseChatModel.
Returned by LangChainConnector.create_model(config).
"""

def __init__(self, llm: BaseChatModel):
self._llm = llm

def get_llm(self) -> BaseChatModel:
"""
Return the underlying LangChain BaseChatModel.

:return: The BaseChatModel instance
"""
return self._llm

async def invoke_model(self, messages: List[LDMessage]) -> ModelResponse:
"""
Invoke the LangChain model with an array of messages.

:param messages: Array of LDMessage objects representing the conversation
:return: ModelResponse containing the model's response and metrics
"""
try:
langchain_messages = LangChainHelper.convert_messages(messages)
response: BaseMessage = await self._llm.ainvoke(langchain_messages)
metrics = LangChainHelper.get_ai_metrics_from_response(response)

content: str = ''
if isinstance(response.content, str):
content = response.content
else:
log.warning(
f'Multimodal response not supported, expecting a string. '
f'Content type: {type(response.content)}, Content: {response.content}'
)
metrics = LDAIMetrics(success=False, usage=metrics.usage)

return ModelResponse(
message=LDMessage(role='assistant', content=content),
metrics=metrics,
)
except Exception as error:
log.warning(f'LangChain model invocation failed: {error}')
return ModelResponse(
message=LDMessage(role='assistant', content=''),
metrics=LDAIMetrics(success=False, usage=None),
)

async def invoke_structured_model(
self,
messages: List[LDMessage],
response_structure: Dict[str, Any],
) -> StructuredResponse:
"""
Invoke the LangChain model with structured output support.

:param messages: Array of LDMessage objects representing the conversation
:param response_structure: Dictionary defining the output structure
:return: StructuredResponse containing the structured data
"""
try:
langchain_messages = LangChainHelper.convert_messages(messages)
structured_llm = self._llm.with_structured_output(response_structure)
response = await structured_llm.ainvoke(langchain_messages)

if not isinstance(response, dict):
log.warning(f'Structured output did not return a dict. Got: {type(response)}')
return StructuredResponse(
data={},
raw_response='',
metrics=LDAIMetrics(success=False, usage=TokenUsage(total=0, input=0, output=0)),
)

return StructuredResponse(
data=response,
raw_response=str(response),
metrics=LDAIMetrics(success=True, usage=TokenUsage(total=0, input=0, output=0)),
)
except Exception as error:
log.warning(f'LangChain structured model invocation failed: {error}')
return StructuredResponse(
data={},
raw_response='',
metrics=LDAIMetrics(success=False, usage=TokenUsage(total=0, input=0, output=0)),
)

Loading
Loading