From 83d303033f8390a0723055cf22a8062a9490ea12 Mon Sep 17 00:00:00 2001 From: Edwin Okonkwo Date: Sat, 22 Nov 2025 00:10:08 +0100 Subject: [PATCH 01/12] feat: REL-10772 Added core SDK files including models, providers, and metrics tracking. --- .readthedocs.yml | 2 +- Makefile | 14 +- docs/conf.py | 2 +- packages/sdk/server-ai/LICENSE.txt | 13 + packages/sdk/server-ai/README.md | 41 ++ packages/sdk/server-ai/pyproject.toml | 69 +++ packages/sdk/server-ai/src/ldai/__init__.py | 47 ++ .../sdk/server-ai/src/ldai/chat/__init__.py | 5 + .../server-ai/src/ldai/chat/tracked_chat.py | 184 ++++++ packages/sdk/server-ai/src/ldai/client.py | 573 ++++++++++++++++++ .../sdk/server-ai/src/ldai/judge/__init__.py | 6 + .../sdk/server-ai/src/ldai/judge/ai_judge.py | 220 +++++++ .../ldai/judge/evaluation_schema_builder.py | 74 +++ .../sdk/server-ai/src/ldai/judge/types.py | 44 ++ .../server-ai/src/ldai/metrics/__init__.py | 12 + .../src/ldai/metrics/feedback_kind.py | 12 + .../sdk/server-ai/src/ldai/metrics/metrics.py | 70 +++ .../server-ai/src/ldai/metrics/token_usage.py | 18 + packages/sdk/server-ai/src/ldai/models.py | 361 +++++++++++ .../src/ldai/providers/ai_provider.py | 89 +++ .../src/ldai/providers/ai_provider_factory.py | 160 +++++ .../sdk/server-ai/src/ldai/providers/types.py | 27 + packages/sdk/server-ai/src/ldai/tracker.py | 347 +++++++++++ packages/sdk/server-ai/tests/__init__.py | 2 + packages/sdk/server-ai/tests/test_agents.py | 342 +++++++++++ .../sdk/server-ai/tests/test_model_config.py | 330 ++++++++++ packages/sdk/server-ai/tests/test_tracker.py | 445 ++++++++++++++ release-please-config.json | 2 +- 28 files changed, 3501 insertions(+), 10 deletions(-) create mode 100644 packages/sdk/server-ai/LICENSE.txt create mode 100644 packages/sdk/server-ai/README.md create mode 100644 packages/sdk/server-ai/pyproject.toml create mode 100644 packages/sdk/server-ai/src/ldai/__init__.py create mode 100644 packages/sdk/server-ai/src/ldai/chat/__init__.py create mode 100644 packages/sdk/server-ai/src/ldai/chat/tracked_chat.py create mode 100644 packages/sdk/server-ai/src/ldai/client.py create mode 100644 packages/sdk/server-ai/src/ldai/judge/__init__.py create mode 100644 packages/sdk/server-ai/src/ldai/judge/ai_judge.py create mode 100644 packages/sdk/server-ai/src/ldai/judge/evaluation_schema_builder.py create mode 100644 packages/sdk/server-ai/src/ldai/judge/types.py create mode 100644 packages/sdk/server-ai/src/ldai/metrics/__init__.py create mode 100644 packages/sdk/server-ai/src/ldai/metrics/feedback_kind.py create mode 100644 packages/sdk/server-ai/src/ldai/metrics/metrics.py create mode 100644 packages/sdk/server-ai/src/ldai/metrics/token_usage.py create mode 100644 packages/sdk/server-ai/src/ldai/models.py create mode 100644 packages/sdk/server-ai/src/ldai/providers/ai_provider.py create mode 100644 packages/sdk/server-ai/src/ldai/providers/ai_provider_factory.py create mode 100644 packages/sdk/server-ai/src/ldai/providers/types.py create mode 100644 packages/sdk/server-ai/src/ldai/tracker.py create mode 100644 packages/sdk/server-ai/tests/__init__.py create mode 100644 packages/sdk/server-ai/tests/test_agents.py create mode 100644 packages/sdk/server-ai/tests/test_model_config.py create mode 100644 packages/sdk/server-ai/tests/test_tracker.py diff --git a/.readthedocs.yml b/.readthedocs.yml index 7c99e8d..e4ba44f 100644 --- a/.readthedocs.yml +++ b/.readthedocs.yml @@ -8,7 +8,7 @@ build: post_create_environment: - python -m pip install poetry post_install: - - VIRTUAL_ENV=$READTHEDOCS_VIRTUALENV_PATH poetry install --with docs + - cd packages/sdk/server-ai && VIRTUAL_ENV=$READTHEDOCS_VIRTUALENV_PATH poetry install --with docs sphinx: builder: html diff --git a/Makefile b/Makefile index 791925c..c2e9986 100644 --- a/Makefile +++ b/Makefile @@ -15,7 +15,7 @@ help: #! Show this help message .PHONY: install install: - @poetry install + @cd packages/sdk/server-ai && poetry install # # Quality control checks @@ -24,14 +24,14 @@ install: .PHONY: test test: #! Run unit tests test: install - @poetry run pytest $(PYTEST_FLAGS) + @cd packages/sdk/server-ai && poetry run pytest $(PYTEST_FLAGS) .PHONY: lint lint: #! Run type analysis and linting checks lint: install - @poetry run mypy ldai - @poetry run isort --check --atomic ldai - @poetry run pycodestyle ldai + @cd packages/sdk/server-ai && poetry run mypy src/ldai + @cd packages/sdk/server-ai && poetry run isort --check --atomic src/ldai + @cd packages/sdk/server-ai && poetry run pycodestyle src/ldai # # Documentation generation @@ -39,6 +39,6 @@ lint: install .PHONY: docs docs: #! Generate sphinx-based documentation - @poetry install --with docs + @cd packages/sdk/server-ai && poetry install --with docs @cd docs - @poetry run $(SPHINXBUILD) -M html "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) + @cd packages/sdk/server-ai && poetry run $(SPHINXBUILD) -M html "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) diff --git a/docs/conf.py b/docs/conf.py index 20ca389..126bb48 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -19,7 +19,7 @@ import os import sys -sys.path.insert(0, os.path.abspath('..')) +sys.path.insert(0, os.path.abspath('../packages/sdk/server-ai/src')) import ldai diff --git a/packages/sdk/server-ai/LICENSE.txt b/packages/sdk/server-ai/LICENSE.txt new file mode 100644 index 0000000..50add35 --- /dev/null +++ b/packages/sdk/server-ai/LICENSE.txt @@ -0,0 +1,13 @@ +Copyright 2024 Catamorphic, Co. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/packages/sdk/server-ai/README.md b/packages/sdk/server-ai/README.md new file mode 100644 index 0000000..0411abe --- /dev/null +++ b/packages/sdk/server-ai/README.md @@ -0,0 +1,41 @@ +# LaunchDarkly Server-side AI library for Python + +## LaunchDarkly overview + +[LaunchDarkly](https://www.launchdarkly.com) is a feature management platform that serves trillions of feature flags daily to help teams build better software, faster. [Get started](https://docs.launchdarkly.com/home/getting-started) using LaunchDarkly today! + +[![Twitter Follow](https://img.shields.io/twitter/follow/launchdarkly.svg?style=social&label=Follow&maxAge=2592000)](https://twitter.com/intent/follow?screen_name=launchdarkly) + +## Supported Python versions + +This version of the library has a minimum Python version of 3.9. + +## Getting started + +Refer to the [SDK reference guide](https://docs.launchdarkly.com/sdk/ai/python) for instructions on getting started with using the SDK. + +## Learn more + +Read our [documentation](http://docs.launchdarkly.com) for in-depth instructions on configuring and using LaunchDarkly. You can also head straight to the [reference guide for the python SDK](http://docs.launchdarkly.com/docs/python-sdk-ai-reference). + +## Contributing + +We encourage pull requests and other contributions from the community. Check out our [contributing guidelines](CONTRIBUTING.md) for instructions on how to contribute to this library. + +## Verifying library build provenance with the SLSA framework + +LaunchDarkly uses the [SLSA framework](https://slsa.dev/spec/v1.0/about) (Supply-chain Levels for Software Artifacts) to help developers make their supply chain more secure by ensuring the authenticity and build integrity of our published library packages. To learn more, see the [provenance guide](PROVENANCE.md). + +## About LaunchDarkly + +- LaunchDarkly is a continuous delivery platform that provides feature flags as a service and allows developers to iterate quickly and safely. We allow you to easily flag your features and manage them from the LaunchDarkly dashboard. With LaunchDarkly, you can: + - Roll out a new feature to a subset of your users (like a group of users who opt-in to a beta tester group), gathering feedback and bug reports from real-world use cases. + - Gradually roll out a feature to an increasing percentage of users, and track the effect that the feature has on key metrics (for instance, how likely is a user to complete a purchase if they have feature A versus feature B?). + - Turn off a feature that you realize is causing performance problems in production, without needing to re-deploy, or even restart the application with a changed configuration file. + - Grant access to certain features based on user attributes, like payment plan (eg: users on the ‘gold’ plan get access to more features than users in the ‘silver’ plan). Disable parts of your application to facilitate maintenance, without taking everything offline. +- LaunchDarkly provides feature flag SDKs for a wide variety of languages and technologies. Read [our documentation](https://docs.launchdarkly.com/sdk) for a complete list. +- Explore LaunchDarkly + - [launchdarkly.com](https://www.launchdarkly.com/ "LaunchDarkly Main Website") for more information + - [docs.launchdarkly.com](https://docs.launchdarkly.com/ "LaunchDarkly Documentation") for our documentation and SDK reference guides + - [apidocs.launchdarkly.com](https://apidocs.launchdarkly.com/ "LaunchDarkly API Documentation") for our API documentation + - [blog.launchdarkly.com](https://blog.launchdarkly.com/ "LaunchDarkly Blog Documentation") for the latest product updates diff --git a/packages/sdk/server-ai/pyproject.toml b/packages/sdk/server-ai/pyproject.toml new file mode 100644 index 0000000..d1cc9d5 --- /dev/null +++ b/packages/sdk/server-ai/pyproject.toml @@ -0,0 +1,69 @@ +[tool.poetry] +name = "launchdarkly-server-sdk-ai" +version = "0.10.1" +description = "LaunchDarkly SDK for AI" +authors = ["LaunchDarkly "] +license = "Apache-2.0" +readme = "README.md" +homepage = "https://docs.launchdarkly.com/sdk/ai/python" +repository = "https://github.com/launchdarkly/python-server-sdk-ai" +documentation = "https://launchdarkly-python-sdk-ai.readthedocs.io/en/latest/" +classifiers = [ + "Intended Audience :: Developers", + "License :: OSI Approved :: Apache Software License", + "Operating System :: OS Independent", + "Programming Language :: Python :: 3", + "Programming Language :: Python :: 3.9", + "Programming Language :: Python :: 3.10", + "Programming Language :: Python :: 3.11", + "Programming Language :: Python :: 3.12", + "Programming Language :: Python :: 3.13", + "Topic :: Software Development", + "Topic :: Software Development :: Libraries", +] +packages = [ { include = "ldai", from = "src" } ] + +[tool.poetry.dependencies] +python = ">=3.9,<4" +launchdarkly-server-sdk = ">=9.4.0" +chevron = "=0.14.0" + + +[tool.poetry.group.dev.dependencies] +pytest = ">=2.8" +pytest-cov = ">=2.4.0" +pytest-mypy = "==1.0.1" +pytest-asyncio = ">=0.21.0" +mypy = "==1.18.2" +pycodestyle = "^2.12.1" +isort = ">=5.13.2,<7.0.0" + + +[tool.poetry.group.docs] +optional = true + +[tool.poetry.group.docs.dependencies] +sphinx = ">=6,<8" +sphinx-rtd-theme = ">=1.3,<4.0" +certifi = ">=2018.4.16" +expiringdict = ">=1.1.4" +pyrfc3339 = ">=1.0" +jsonpickle = ">1.4.1" +semver = ">=2.7.9" +urllib3 = ">=1.26.0" +jinja2 = "3.1.6" + +[tool.mypy] +python_version = "3.9" +ignore_missing_imports = true +install_types = true +non_interactive = true + + +[tool.pytest.ini_options] +addopts = ["-ra"] + + +[build-system] +requires = ["poetry-core"] +build-backend = "poetry.core.masonry.api" diff --git a/packages/sdk/server-ai/src/ldai/__init__.py b/packages/sdk/server-ai/src/ldai/__init__.py new file mode 100644 index 0000000..33f0466 --- /dev/null +++ b/packages/sdk/server-ai/src/ldai/__init__.py @@ -0,0 +1,47 @@ +__version__ = "0.10.1" # x-release-please-version + +# Extend __path__ to support namespace packages at the ldai level +# This allows provider packages (like launchdarkly-server-sdk-ai-langchain) +# to extend ldai.providers.* even though ldai itself has an __init__.py +import sys +from pkgutil import extend_path + +__path__ = extend_path(__path__, __name__) + +# Export chat +from ldai.chat import TrackedChat +# Export main client +from ldai.client import LDAIClient +# Export judge +from ldai.judge import AIJudge, EvalScore, JudgeResponse +# Export models for convenience +from ldai.models import ( # Deprecated aliases for backward compatibility + AIAgentConfig, AIAgentConfigDefault, AIAgentConfigRequest, AIAgents, + AICompletionConfig, AICompletionConfigDefault, AIConfig, AIJudgeConfig, + AIJudgeConfigDefault, JudgeConfiguration, LDAIAgent, LDAIAgentConfig, + LDAIAgentDefaults, LDMessage, ModelConfig, ProviderConfig) + +__all__ = [ + 'LDAIClient', + 'AIAgentConfig', + 'AIAgentConfigDefault', + 'AIAgentConfigRequest', + 'AIAgents', + 'AICompletionConfig', + 'AICompletionConfigDefault', + 'AIJudgeConfig', + 'AIJudgeConfigDefault', + 'AIJudge', + 'TrackedChat', + 'EvalScore', + 'JudgeConfiguration', + 'JudgeResponse', + 'LDMessage', + 'ModelConfig', + 'ProviderConfig', + # Deprecated exports + 'AIConfig', + 'LDAIAgent', + 'LDAIAgentConfig', + 'LDAIAgentDefaults', +] diff --git a/packages/sdk/server-ai/src/ldai/chat/__init__.py b/packages/sdk/server-ai/src/ldai/chat/__init__.py new file mode 100644 index 0000000..265a1b3 --- /dev/null +++ b/packages/sdk/server-ai/src/ldai/chat/__init__.py @@ -0,0 +1,5 @@ +"""Chat module for LaunchDarkly AI SDK.""" + +from ldai.chat.tracked_chat import TrackedChat + +__all__ = ['TrackedChat'] diff --git a/packages/sdk/server-ai/src/ldai/chat/tracked_chat.py b/packages/sdk/server-ai/src/ldai/chat/tracked_chat.py new file mode 100644 index 0000000..841f8ed --- /dev/null +++ b/packages/sdk/server-ai/src/ldai/chat/tracked_chat.py @@ -0,0 +1,184 @@ +"""TrackedChat implementation for managing AI chat conversations.""" + +import asyncio +import logging +from typing import Dict, List, Optional + +from ldai.judge import AIJudge +from ldai.judge.types import JudgeResponse +from ldai.models import AICompletionConfig, LDMessage +from ldai.providers.ai_provider import AIProvider +from ldai.providers.types import ChatResponse +from ldai.tracker import LDAIConfigTracker + + +class TrackedChat: + """ + Concrete implementation of TrackedChat that provides chat functionality + by delegating to an AIProvider implementation. + + This class handles conversation management and tracking, while delegating + the actual model invocation to the provider. + """ + + def __init__( + self, + ai_config: AICompletionConfig, + tracker: LDAIConfigTracker, + provider: AIProvider, + judges: Optional[Dict[str, AIJudge]] = None, + ): + """ + Initialize the TrackedChat. + + :param ai_config: The completion AI configuration + :param tracker: The tracker for the completion configuration + :param provider: The AI provider to use for chat + :param judges: Optional dictionary of judge instances keyed by their configuration keys + """ + self._ai_config = ai_config + self._tracker = tracker + self._provider = provider + self._judges = judges or {} + self._logger = logging.getLogger(f'{__name__}.{self.__class__.__name__}') + self._messages: List[LDMessage] = [] + + async def invoke(self, prompt: str) -> ChatResponse: + """ + Invoke the chat model with a prompt string. + + This method handles conversation management and tracking, delegating to the provider's invoke_model method. + + :param prompt: The user prompt to send to the chat model + :return: ChatResponse containing the model's response and metrics + """ + # Convert prompt string to LDMessage with role 'user' and add to conversation history + user_message: LDMessage = LDMessage(role='user', content=prompt) + self._messages.append(user_message) + + # Prepend config messages to conversation history for model invocation + config_messages = self._ai_config.messages or [] + all_messages = config_messages + self._messages + + # Delegate to provider-specific implementation with tracking + response = await self._tracker.track_metrics_of( + lambda result: result.metrics, + lambda: self._provider.invoke_model(all_messages), + ) + + # Start judge evaluations as async tasks (don't await them) + judge_config = self._ai_config.judge_configuration + if judge_config and judge_config.judges and len(judge_config.judges) > 0: + evaluation_tasks = self._start_judge_evaluations(self._messages, response) + response.evaluations = evaluation_tasks + + # Add the response message to conversation history + self._messages.append(response.message) + return response + + def _start_judge_evaluations( + self, + messages: List[LDMessage], + response: ChatResponse, + ) -> List[asyncio.Task[Optional[JudgeResponse]]]: + """ + Start judge evaluations as async tasks without awaiting them. + + Returns a list of async tasks that can be awaited later. + + :param messages: Array of messages representing the conversation history + :param response: The AI response to be evaluated + :return: List of async tasks that will return judge evaluation results + """ + if not self._ai_config.judge_configuration or not self._ai_config.judge_configuration.judges: + return [] + + judge_configs = self._ai_config.judge_configuration.judges + + # Start all judge evaluations as tasks + async def evaluate_judge(judge_config): + judge = self._judges.get(judge_config.key) + if not judge: + self._logger.warning( + f"Judge configuration is not enabled: {judge_config.key}", + ) + return None + + eval_result = await judge.evaluate_messages( + messages, response, judge_config.sampling_rate + ) + + if eval_result and eval_result.success: + self._tracker.track_eval_scores(eval_result.evals) + + return eval_result + + # Create tasks for each judge evaluation + tasks = [ + asyncio.create_task(evaluate_judge(judge_config)) + for judge_config in judge_configs + ] + + return tasks + + def get_config(self) -> AICompletionConfig: + """ + Get the underlying AI configuration used to initialize this TrackedChat. + + :return: The AI completion configuration + """ + return self._ai_config + + def get_tracker(self) -> LDAIConfigTracker: + """ + Get the underlying AI configuration tracker used to initialize this TrackedChat. + + :return: The tracker instance + """ + return self._tracker + + def get_provider(self) -> AIProvider: + """ + Get the underlying AI provider instance. + + This provides direct access to the provider for advanced use cases. + + :return: The AI provider instance + """ + return self._provider + + def get_judges(self) -> Dict[str, AIJudge]: + """ + Get the judges associated with this TrackedChat. + + Returns a dictionary of judge instances keyed by their configuration keys. + + :return: Dictionary of judge instances + """ + return self._judges + + def append_messages(self, messages: List[LDMessage]) -> None: + """ + Append messages to the conversation history. + + Adds messages to the conversation history without invoking the model, + which is useful for managing multi-turn conversations or injecting context. + + :param messages: Array of messages to append to the conversation history + """ + self._messages.extend(messages) + + def get_messages(self, include_config_messages: bool = False) -> List[LDMessage]: + """ + Get all messages in the conversation history. + + :param include_config_messages: Whether to include the config messages from the AIConfig. + Defaults to False. + :return: Array of messages. When include_config_messages is True, returns both config + messages and conversation history with config messages prepended. When False, + returns only the conversation history messages. + """ + if include_config_messages: + config_messages = self._ai_config.messages or [] + return config_messages + self._messages + return list(self._messages) diff --git a/packages/sdk/server-ai/src/ldai/client.py b/packages/sdk/server-ai/src/ldai/client.py new file mode 100644 index 0000000..2b314cf --- /dev/null +++ b/packages/sdk/server-ai/src/ldai/client.py @@ -0,0 +1,573 @@ +import asyncio +import logging +from typing import Any, Dict, List, Optional, Tuple + +import chevron +from ldclient import Context +from ldclient.client import LDClient + +from ldai.chat import TrackedChat +from ldai.judge import AIJudge +from ldai.models import (AIAgentConfig, AIAgentConfigDefault, + AIAgentConfigRequest, AIAgents, AICompletionConfig, + AICompletionConfigDefault, AIJudgeConfig, + AIJudgeConfigDefault, JudgeConfiguration, LDMessage, + ModelConfig, ProviderConfig) +from ldai.providers.ai_provider_factory import (AIProviderFactory, + SupportedAIProvider) +from ldai.tracker import LDAIConfigTracker + + +class LDAIClient: + """The LaunchDarkly AI SDK client object.""" + + def __init__(self, client: LDClient): + self._client = client + self._logger = logging.getLogger('ldclient.ai') + + def completion_config( + self, + key: str, + context: Context, + default_value: AICompletionConfigDefault, + variables: Optional[Dict[str, Any]] = None, + ) -> AICompletionConfig: + """ + Get the value of a completion configuration. + + :param key: The key of the completion configuration. + :param context: The context to evaluate the completion configuration in. + :param default_value: The default value of the completion configuration. + :param variables: Additional variables for the completion configuration. + :return: The completion configuration with a tracker used for gathering metrics. + """ + self._client.track('$ld:ai:config:function:single', context, key, 1) + + model, provider, messages, instructions, tracker, enabled, judge_configuration = self.__evaluate( + key, context, default_value.to_dict(), variables + ) + + config = AICompletionConfig( + enabled=enabled, + model=model, + messages=messages, + provider=provider, + tracker=tracker, + judge_configuration=judge_configuration, + ) + + return config + + def config( + self, + key: str, + context: Context, + default_value: AICompletionConfigDefault, + variables: Optional[Dict[str, Any]] = None, + ) -> AICompletionConfig: + """ + Get the value of a model configuration. + + .. deprecated:: Use :meth:`completion_config` instead. This method will be removed in a future version. + + :param key: The key of the model configuration. + :param context: The context to evaluate the model configuration in. + :param default_value: The default value of the model configuration. + :param variables: Additional variables for the model configuration. + :return: The value of the model configuration along with a tracker used for gathering metrics. + """ + return self.completion_config(key, context, default_value, variables) + + def judge_config( + self, + key: str, + context: Context, + default_value: AIJudgeConfigDefault, + variables: Optional[Dict[str, Any]] = None, + ) -> AIJudgeConfig: + """ + Get the value of a judge configuration. + + :param key: The key of the judge configuration. + :param context: The context to evaluate the judge configuration in. + :param default_value: The default value of the judge configuration. + :param variables: Additional variables for the judge configuration. + :return: The judge configuration with a tracker used for gathering metrics. + """ + self._client.track('$ld:ai:judge:function:single', context, key, 1) + + model, provider, messages, instructions, tracker, enabled, judge_configuration = self.__evaluate( + key, context, default_value.to_dict(), variables + ) + + # Extract evaluation_metric_keys from the variation + variation = self._client.variation(key, context, default_value.to_dict()) + evaluation_metric_keys = variation.get('evaluationMetricKeys', default_value.evaluation_metric_keys or []) + + config = AIJudgeConfig( + enabled=enabled, + evaluation_metric_keys=evaluation_metric_keys, + model=model, + messages=messages, + provider=provider, + tracker=tracker, + ) + + return config + + async def create_judge( + self, + key: str, + context: Context, + default_value: AIJudgeConfigDefault, + variables: Optional[Dict[str, Any]] = None, + default_ai_provider: Optional[SupportedAIProvider] = None, + ) -> Optional[AIJudge]: + """ + Creates and returns a new Judge instance for AI evaluation. + + :param key: The key identifying the AI judge configuration to use + :param context: Standard Context used when evaluating flags + :param default_value: A default value representing a standard AI config result + :param variables: Dictionary of values for instruction interpolation. + The variables `message_history` and `response_to_evaluate` are reserved for the judge and will be ignored. + :param default_ai_provider: Optional default AI provider to use. + :return: Judge instance or None if disabled/unsupported + + Example:: + + judge = client.create_judge( + "relevance-judge", + context, + AIJudgeConfigDefault( + enabled=True, + model=ModelConfig("gpt-4"), + provider=ProviderConfig("openai"), + evaluation_metric_keys=['$ld:ai:judge:relevance'], + messages=[LDMessage(role='system', content='You are a relevance judge.')] + ), + variables={'metric': "relevance"} + ) + + if judge: + result = await judge.evaluate("User question", "AI response") + if result and result.evals: + relevance_eval = result.evals.get('$ld:ai:judge:relevance') + if relevance_eval: + print('Relevance score:', relevance_eval.score) + """ + self._client.track('$ld:ai:judge:function:createJudge', context, key, 1) + + try: + # Overwrite reserved variables to ensure they remain as placeholders for judge evaluation + extended_variables = dict(variables) if variables else {} + + # Warn if reserved variables are provided + if variables: + if 'message_history' in variables: + self._logger.warning( + 'Variable "message_history" is reserved for judge evaluation and will be overwritten' + ) + if 'response_to_evaluate' in variables: + self._logger.warning( + 'Variable "response_to_evaluate" is reserved for judge evaluation and will be overwritten' + ) + + extended_variables['message_history'] = '{{message_history}}' + extended_variables['response_to_evaluate'] = '{{response_to_evaluate}}' + + judge_config = self.judge_config(key, context, default_value, extended_variables) + + if not judge_config.enabled or not judge_config.tracker: + return None + + # Create AI provider for the judge + provider = await AIProviderFactory.create(judge_config, default_ai_provider) + if not provider: + return None + + return AIJudge(judge_config, judge_config.tracker, provider) + except Exception as error: + self._logger.error(f'Failed to create judge: {error}') + return None + + async def _initialize_judges( + self, + judge_configs: List[JudgeConfiguration.Judge], + context: Context, + variables: Optional[Dict[str, Any]] = None, + default_ai_provider: Optional[SupportedAIProvider] = None, + ) -> Dict[str, AIJudge]: + """ + Initialize judges from judge configurations. + + :param judge_configs: List of judge configurations + :param context: Standard Context used when evaluating flags + :param variables: Dictionary of values for instruction interpolation + :param default_ai_provider: Optional default AI provider to use + :return: Dictionary of judge instances keyed by their configuration keys + """ + judges: Dict[str, AIJudge] = {} + + async def create_judge_for_config(judge_key: str): + judge = await self.create_judge( + judge_key, + context, + AIJudgeConfigDefault(enabled=False), + variables, + default_ai_provider, + ) + return judge_key, judge + + judge_promises = [ + create_judge_for_config(judge_config.key) + for judge_config in judge_configs + ] + + results = await asyncio.gather(*judge_promises, return_exceptions=True) + + for result in results: + if isinstance(result, Exception): + continue + judge_key, judge = result # type: ignore[misc] + if judge: + judges[judge_key] = judge + + return judges + + async def create_chat( + self, + key: str, + context: Context, + default_value: AICompletionConfigDefault, + variables: Optional[Dict[str, Any]] = None, + default_ai_provider: Optional[SupportedAIProvider] = None, + ) -> Optional[TrackedChat]: + """ + Creates and returns a new TrackedChat instance for AI chat conversations. + + :param key: The key identifying the AI completion configuration to use + :param context: Standard Context used when evaluating flags + :param default_value: A default value representing a standard AI config result + :param variables: Dictionary of values for instruction interpolation + :param default_ai_provider: Optional default AI provider to use + :return: TrackedChat instance or None if disabled/unsupported + + Example:: + + chat = await client.create_chat( + "customer-support-chat", + context, + AICompletionConfigDefault( + enabled=True, + model=ModelConfig("gpt-4"), + provider=ProviderConfig("openai"), + messages=[LDMessage(role='system', content='You are a helpful assistant.')] + ), + variables={'customerName': 'John'} + ) + + if chat: + response = await chat.invoke("I need help with my order") + print(response.message.content) + + # Access conversation history + messages = chat.get_messages() + print(f"Conversation has {len(messages)} messages") + """ + self._client.track('$ld:ai:config:function:createChat', context, key, 1) + if self._logger: + self._logger.debug(f"Creating chat for key: {key}") + config = self.completion_config(key, context, default_value, variables) + + if not config.enabled or not config.tracker: + return None + + provider = await AIProviderFactory.create(config, default_ai_provider) + if not provider: + return None + + judges = {} + if config.judge_configuration and config.judge_configuration.judges: + judges = await self._initialize_judges( + config.judge_configuration.judges, + context, + variables, + default_ai_provider, + ) + + return TrackedChat(config, config.tracker, provider, judges) + + def agent_config( + self, + key: str, + context: Context, + default_value: AIAgentConfigDefault, + variables: Optional[Dict[str, Any]] = None, + ) -> AIAgentConfig: + """ + Retrieve a single AI Config agent. + + This method retrieves a single agent configuration with instructions + dynamically interpolated using the provided variables and context data. + + Example:: + + agent = client.agent_config( + 'research_agent', + context, + AIAgentConfigDefault( + enabled=True, + model=ModelConfig('gpt-4'), + instructions="You are a research assistant specializing in {{topic}}." + ), + variables={'topic': 'climate change'} + ) + + if agent.enabled: + research_result = agent.instructions # Interpolated instructions + agent.tracker.track_success() + + :param key: The agent configuration key. + :param context: The context to evaluate the agent configuration in. + :param default_value: Default agent values. + :param variables: Variables for interpolation. + :return: Configured AIAgentConfig instance. + """ + # Track single agent usage + self._client.track( + "$ld:ai:agent:function:single", + context, + key, + 1 + ) + + return self.__evaluate_agent(key, context, default_value, variables) + + def agent( + self, + config: AIAgentConfigRequest, + context: Context, + ) -> AIAgentConfig: + """ + Retrieve a single AI Config agent. + + .. deprecated:: Use :meth:`agent_config` instead. This method will be removed in a future version. + + :param config: The agent configuration to use. + :param context: The context to evaluate the agent configuration in. + :return: Configured AIAgentConfig instance. + """ + return self.agent_config(config.key, context, config.default_value, config.variables) + + def agent_configs( + self, + agent_configs: List[AIAgentConfigRequest], + context: Context, + ) -> AIAgents: + """ + Retrieve multiple AI agent configurations. + + This method allows you to retrieve multiple agent configurations in a single call, + with each agent having its own default configuration and variables for instruction + interpolation. + + Example:: + + agents = client.agent_configs([ + AIAgentConfigRequest( + key='research_agent', + default_value=AIAgentConfigDefault( + enabled=True, + instructions='You are a research assistant.' + ), + variables={'topic': 'climate change'} + ), + AIAgentConfigRequest( + key='writing_agent', + default_value=AIAgentConfigDefault( + enabled=True, + instructions='You are a writing assistant.' + ), + variables={'style': 'academic'} + ) + ], context) + + research_result = agents["research_agent"].instructions + agents["research_agent"].tracker.track_success() + + :param agent_configs: List of agent configurations to retrieve. + :param context: The context to evaluate the agent configurations in. + :return: Dictionary mapping agent keys to their AIAgentConfig configurations. + """ + # Track multiple agents usage + agent_count = len(agent_configs) + config_keys = [config.key for config in agent_configs] + self._client.track( + "$ld:ai:agent:function:multiple", + context, + {"configKeys": config_keys}, + agent_count + ) + + result: AIAgents = {} + + for config in agent_configs: + agent = self.__evaluate_agent( + config.key, + context, + config.default_value, + config.variables + ) + result[config.key] = agent + + return result + + def agents( + self, + agent_configs: List[AIAgentConfigRequest], + context: Context, + ) -> AIAgents: + """ + Retrieve multiple AI agent configurations. + + .. deprecated:: Use :meth:`agent_configs` instead. This method will be removed in a future version. + + :param agent_configs: List of agent configurations to retrieve. + :param context: The context to evaluate the agent configurations in. + :return: Dictionary mapping agent keys to their AIAgentConfig configurations. + """ + return self.agent_configs(agent_configs, context) + + def __evaluate( + self, + key: str, + context: Context, + default_dict: Dict[str, Any], + variables: Optional[Dict[str, Any]] = None, + ) -> Tuple[Optional[ModelConfig], Optional[ProviderConfig], Optional[List[LDMessage]], Optional[str], LDAIConfigTracker, bool, Optional[Any]]: + """ + Internal method to evaluate a configuration and extract components. + + :param key: The configuration key. + :param context: The evaluation context. + :param default_dict: Default configuration as dictionary. + :param variables: Variables for interpolation. + :return: Tuple of (model, provider, messages, instructions, tracker, enabled). + """ + variation = self._client.variation(key, context, default_dict) + + all_variables = {} + if variables: + all_variables.update(variables) + all_variables['ldctx'] = context.to_dict() + + # Extract messages + messages = None + if 'messages' in variation and isinstance(variation['messages'], list) and all( + isinstance(entry, dict) for entry in variation['messages'] + ): + messages = [ + LDMessage( + role=entry['role'], + content=self.__interpolate_template( + entry['content'], all_variables + ), + ) + for entry in variation['messages'] + ] + + # Extract instructions + instructions = None + if 'instructions' in variation and isinstance(variation['instructions'], str): + instructions = self.__interpolate_template(variation['instructions'], all_variables) + + # Extract provider config + provider_config = None + if 'provider' in variation and isinstance(variation['provider'], dict): + provider = variation['provider'] + provider_config = ProviderConfig(provider.get('name', '')) + + # Extract model config + model = None + if 'model' in variation and isinstance(variation['model'], dict): + parameters = variation['model'].get('parameters', None) + custom = variation['model'].get('custom', None) + model = ModelConfig( + name=variation['model']['name'], + parameters=parameters, + custom=custom + ) + + # Create tracker + tracker = LDAIConfigTracker( + self._client, + variation.get('_ldMeta', {}).get('variationKey', ''), + key, + int(variation.get('_ldMeta', {}).get('version', 1)), + model.name if model else '', + provider_config.name if provider_config else '', + context, + ) + + enabled = variation.get('_ldMeta', {}).get('enabled', False) + + # Extract judge configuration + judge_configuration = None + if 'judgeConfiguration' in variation and isinstance(variation['judgeConfiguration'], dict): + judge_config = variation['judgeConfiguration'] + if 'judges' in judge_config and isinstance(judge_config['judges'], list): + judges = [ + JudgeConfiguration.Judge( + key=judge['key'], + sampling_rate=judge['samplingRate'] + ) + for judge in judge_config['judges'] + if isinstance(judge, dict) and 'key' in judge and 'samplingRate' in judge + ] + if judges: + judge_configuration = JudgeConfiguration(judges=judges) + + return model, provider_config, messages, instructions, tracker, enabled, judge_configuration + + def __evaluate_agent( + self, + key: str, + context: Context, + default_value: AIAgentConfigDefault, + variables: Optional[Dict[str, Any]] = None, + ) -> AIAgentConfig: + """ + Internal method to evaluate an agent configuration. + + :param key: The agent configuration key. + :param context: The evaluation context. + :param default_value: Default agent values. + :param variables: Variables for interpolation. + :return: Configured AIAgentConfig instance. + """ + model, provider, messages, instructions, tracker, enabled, judge_configuration = self.__evaluate( + key, context, default_value.to_dict(), variables + ) + + # For agents, prioritize instructions over messages + final_instructions = instructions if instructions is not None else default_value.instructions + + return AIAgentConfig( + enabled=enabled, + model=model or default_value.model, + provider=provider or default_value.provider, + instructions=final_instructions, + tracker=tracker, + judge_configuration=judge_configuration or default_value.judge_configuration, + ) + + def __interpolate_template(self, template: str, variables: Dict[str, Any]) -> str: + """ + Interpolate the template with the given variables using Mustache format. + + :param template: The template string. + :param variables: The variables to interpolate into the template. + :return: The interpolated string. + """ + return chevron.render(template, variables) diff --git a/packages/sdk/server-ai/src/ldai/judge/__init__.py b/packages/sdk/server-ai/src/ldai/judge/__init__.py new file mode 100644 index 0000000..fc31e0d --- /dev/null +++ b/packages/sdk/server-ai/src/ldai/judge/__init__.py @@ -0,0 +1,6 @@ +"""Judge module for LaunchDarkly AI SDK.""" + +from ldai.judge.ai_judge import AIJudge +from ldai.judge.types import EvalScore, JudgeResponse + +__all__ = ['AIJudge', 'EvalScore', 'JudgeResponse'] diff --git a/packages/sdk/server-ai/src/ldai/judge/ai_judge.py b/packages/sdk/server-ai/src/ldai/judge/ai_judge.py new file mode 100644 index 0000000..d5bb061 --- /dev/null +++ b/packages/sdk/server-ai/src/ldai/judge/ai_judge.py @@ -0,0 +1,220 @@ +"""Judge implementation for AI evaluation.""" + +import logging +import random +from typing import Any, Dict, List, Optional + +import chevron + +from ldai.judge.evaluation_schema_builder import EvaluationSchemaBuilder +from ldai.judge.types import EvalScore, JudgeResponse +from ldai.models import AIJudgeConfig, LDMessage +from ldai.providers.ai_provider import AIProvider +from ldai.providers.types import ChatResponse, StructuredResponse +from ldai.tracker import LDAIConfigTracker + + +class AIJudge: + """ + Judge implementation that handles evaluation functionality and conversation management. + + According to the AIEval spec, judges are AI Configs with mode: "judge" that evaluate + other AI Configs using structured output. + """ + + def __init__( + self, + ai_config: AIJudgeConfig, + ai_config_tracker: LDAIConfigTracker, + ai_provider: AIProvider, + ): + """ + Initialize the Judge. + + :param ai_config: The judge AI configuration + :param ai_config_tracker: The tracker for the judge configuration + :param ai_provider: The AI provider to use for evaluation + """ + self._ai_config = ai_config + self._ai_config_tracker = ai_config_tracker + self._ai_provider = ai_provider + self._logger = logging.getLogger(f'{__name__}.{self.__class__.__name__}') + self._evaluation_response_structure = EvaluationSchemaBuilder.build( + ai_config.evaluation_metric_keys + ) + + async def evaluate( + self, + input_text: str, + output_text: str, + sampling_rate: float = 1.0, + ) -> Optional[JudgeResponse]: + """ + Evaluates an AI response using the judge's configuration. + + :param input_text: The input prompt or question that was provided to the AI + :param output_text: The AI-generated response to be evaluated + :param sampling_rate: Sampling rate (0-1) to determine if evaluation should be processed (defaults to 1) + :return: Evaluation results or None if not sampled + """ + try: + if not self._ai_config.evaluation_metric_keys or len(self._ai_config.evaluation_metric_keys) == 0: + self._logger.warning( + 'Judge configuration is missing required evaluationMetricKeys' + ) + return None + + if not self._ai_config.messages: + self._logger.warning('Judge configuration must include messages') + return None + + if random.random() > sampling_rate: + self._logger.debug(f'Judge evaluation skipped due to sampling rate: {sampling_rate}') + return None + + messages = self._construct_evaluation_messages(input_text, output_text) + + # Track metrics of the structured model invocation + response = await self._ai_config_tracker.track_metrics_of( + lambda result: result.metrics, + lambda: self._ai_provider.invoke_structured_model(messages, self._evaluation_response_structure) + ) + + success = response.metrics.success + + evals = self._parse_evaluation_response(response.data) + + if len(evals) != len(self._ai_config.evaluation_metric_keys): + self._logger.warning('Judge evaluation did not return all evaluations') + success = False + + return JudgeResponse( + evals=evals, + success=success, + ) + except Exception as error: + self._logger.error(f'Judge evaluation failed: {error}') + return JudgeResponse( + evals={}, + success=False, + error=str(error), + ) + + async def evaluate_messages( + self, + messages: List[LDMessage], + response: ChatResponse, + sampling_ratio: float = 1.0, + ) -> Optional[JudgeResponse]: + """ + Evaluates an AI response from chat messages and response. + + :param messages: Array of messages representing the conversation history + :param response: The AI response to be evaluated + :param sampling_ratio: Sampling ratio (0-1) to determine if evaluation should be processed (defaults to 1) + :return: Evaluation results or None if not sampled + """ + input_text = '\r\n'.join([msg.content for msg in messages]) if messages else '' + output_text = response.message.content + + return await self.evaluate(input_text, output_text, sampling_ratio) + + def get_ai_config(self) -> AIJudgeConfig: + """ + Returns the AI Config used by this judge. + + :return: The judge AI configuration + """ + return self._ai_config + + def get_tracker(self) -> LDAIConfigTracker: + """ + Returns the tracker associated with this judge. + + :return: The tracker for the judge configuration + """ + return self._ai_config_tracker + + def get_provider(self) -> AIProvider: + """ + Returns the AI provider used by this judge. + + :return: The AI provider + """ + return self._ai_provider + + def _construct_evaluation_messages(self, input_text: str, output_text: str) -> List[LDMessage]: + """ + Constructs evaluation messages by combining judge's config messages with input/output. + + :param input_text: The input text + :param output_text: The output text to evaluate + :return: List of messages for evaluation + """ + if not self._ai_config.messages: + return [] + + messages: List[LDMessage] = [] + for msg in self._ai_config.messages: + # Interpolate message content with reserved variables + content = self._interpolate_message(msg.content, { + 'message_history': input_text, + 'response_to_evaluate': output_text, + }) + messages.append(LDMessage(role=msg.role, content=content)) + + return messages + + def _interpolate_message(self, content: str, variables: Dict[str, str]) -> str: + """ + Interpolates message content with variables using Mustache templating. + + :param content: The message content template + :param variables: Variables to interpolate + :return: Interpolated message content + """ + # Use chevron (Mustache) for templating, with no escaping + return chevron.render(content, variables) + + def _parse_evaluation_response(self, data: Dict[str, Any]) -> Dict[str, EvalScore]: + """ + Parses the structured evaluation response from the AI provider. + + :param data: The structured response data + :return: Dictionary of evaluation scores keyed by metric key + """ + results: Dict[str, EvalScore] = {} + + if not data.get('evaluations') or not isinstance(data['evaluations'], dict): + self._logger.warning('Invalid response: missing or invalid evaluations object') + return results + + evaluations = data['evaluations'] + + for metric_key in self._ai_config.evaluation_metric_keys: + evaluation = evaluations.get(metric_key) + + if not evaluation or not isinstance(evaluation, dict): + self._logger.warning(f'Missing evaluation for metric key: {metric_key}') + continue + + score = evaluation.get('score') + reasoning = evaluation.get('reasoning') + + if not isinstance(score, (int, float)) or score < 0 or score > 1: + self._logger.warning( + f'Invalid score evaluated for {metric_key}: {score}. ' + 'Score must be a number between 0 and 1 inclusive' + ) + continue + + if not isinstance(reasoning, str): + self._logger.warning( + f'Invalid reasoning evaluated for {metric_key}: {reasoning}. ' + 'Reasoning must be a string' + ) + continue + + results[metric_key] = EvalScore(score=float(score), reasoning=reasoning) + + return results diff --git a/packages/sdk/server-ai/src/ldai/judge/evaluation_schema_builder.py b/packages/sdk/server-ai/src/ldai/judge/evaluation_schema_builder.py new file mode 100644 index 0000000..8fbc712 --- /dev/null +++ b/packages/sdk/server-ai/src/ldai/judge/evaluation_schema_builder.py @@ -0,0 +1,74 @@ +"""Internal class for building dynamic evaluation response schemas.""" + +from typing import Any, Dict, List + + +class EvaluationSchemaBuilder: + """ + Internal class for building dynamic evaluation response schemas. + Not exported - only used internally by Judge. + """ + + @staticmethod + def build(evaluation_metric_keys: list[str]) -> Dict[str, Any]: + """ + Build an evaluation response schema from evaluation metric keys. + + :param evaluation_metric_keys: List of evaluation metric keys + :return: Schema dictionary for structured output + """ + return { + 'title': 'EvaluationResponse', + 'description': f"Response containing evaluation results for {', '.join(evaluation_metric_keys)} metrics", + 'type': 'object', + 'properties': { + 'evaluations': { + 'type': 'object', + 'description': f"Object containing evaluation results for {', '.join(evaluation_metric_keys)} metrics", + 'properties': EvaluationSchemaBuilder._build_key_properties(evaluation_metric_keys), + 'required': evaluation_metric_keys, + 'additionalProperties': False, + }, + }, + 'required': ['evaluations'], + 'additionalProperties': False, + } + + @staticmethod + def _build_key_properties(evaluation_metric_keys: list[str]) -> Dict[str, Any]: + """ + Build properties for each evaluation metric key. + + :param evaluation_metric_keys: List of evaluation metric keys + :return: Dictionary of properties for each key + """ + result: Dict[str, Any] = {} + for key in evaluation_metric_keys: + result[key] = EvaluationSchemaBuilder._build_key_schema(key) + return result + + @staticmethod + def _build_key_schema(key: str) -> Dict[str, Any]: + """ + Build schema for a single evaluation metric key. + + :param key: Evaluation metric key + :return: Schema dictionary for the key + """ + return { + 'type': 'object', + 'properties': { + 'score': { + 'type': 'number', + 'minimum': 0, + 'maximum': 1, + 'description': f'Score between 0.0 and 1.0 for {key}', + }, + 'reasoning': { + 'type': 'string', + 'description': f'Reasoning behind the score for {key}', + }, + }, + 'required': ['score', 'reasoning'], + 'additionalProperties': False, + } diff --git a/packages/sdk/server-ai/src/ldai/judge/types.py b/packages/sdk/server-ai/src/ldai/judge/types.py new file mode 100644 index 0000000..7c90091 --- /dev/null +++ b/packages/sdk/server-ai/src/ldai/judge/types.py @@ -0,0 +1,44 @@ +"""Types for judge evaluation responses.""" + +from dataclasses import dataclass +from typing import Any, Dict, Optional + + +@dataclass +class EvalScore: + """ + Score and reasoning for a single evaluation metric. + """ + score: float # Score between 0.0 and 1.0 + reasoning: str # Reasoning behind the provided score + + def to_dict(self) -> Dict[str, Any]: + """ + Render the evaluation score as a dictionary object. + """ + return { + 'score': self.score, + 'reasoning': self.reasoning, + } + + +@dataclass +class JudgeResponse: + """ + Response from a judge evaluation containing scores and reasoning for multiple metrics. + """ + evals: Dict[str, EvalScore] # Dictionary where keys are metric names and values contain score and reasoning + success: bool # Whether the evaluation completed successfully + error: Optional[str] = None # Error message if evaluation failed + + def to_dict(self) -> Dict[str, Any]: + """ + Render the judge response as a dictionary object. + """ + result: Dict[str, Any] = { + 'evals': {key: eval_score.to_dict() for key, eval_score in self.evals.items()}, + 'success': self.success, + } + if self.error is not None: + result['error'] = self.error + return result diff --git a/packages/sdk/server-ai/src/ldai/metrics/__init__.py b/packages/sdk/server-ai/src/ldai/metrics/__init__.py new file mode 100644 index 0000000..b7efe93 --- /dev/null +++ b/packages/sdk/server-ai/src/ldai/metrics/__init__.py @@ -0,0 +1,12 @@ +"""Metrics module for LaunchDarkly AI SDK.""" + +from ldai.metrics.feedback_kind import FeedbackKind +from ldai.metrics.metrics import LDAIMetrics, LDAIMetricSummary +from ldai.metrics.token_usage import TokenUsage + +__all__ = [ + 'FeedbackKind', + 'LDAIMetrics', + 'LDAIMetricSummary', + 'TokenUsage', +] diff --git a/packages/sdk/server-ai/src/ldai/metrics/feedback_kind.py b/packages/sdk/server-ai/src/ldai/metrics/feedback_kind.py new file mode 100644 index 0000000..a4b7859 --- /dev/null +++ b/packages/sdk/server-ai/src/ldai/metrics/feedback_kind.py @@ -0,0 +1,12 @@ +"""Feedback kind enumeration for AI operations.""" + +from enum import Enum + + +class FeedbackKind(Enum): + """ + Types of feedback that can be provided for AI operations. + """ + + Positive = "positive" + Negative = "negative" diff --git a/packages/sdk/server-ai/src/ldai/metrics/metrics.py b/packages/sdk/server-ai/src/ldai/metrics/metrics.py new file mode 100644 index 0000000..39e1497 --- /dev/null +++ b/packages/sdk/server-ai/src/ldai/metrics/metrics.py @@ -0,0 +1,70 @@ +"""Metrics tracking for AI operations.""" + +from typing import Any, Dict, Optional + +from ldai.metrics.feedback_kind import FeedbackKind +from ldai.metrics.token_usage import TokenUsage + + +class LDAIMetricSummary: + """ + Summary of metrics which have been tracked. + """ + + def __init__(self): + self._duration = None + self._success = None + self._feedback = None + self._usage = None + self._time_to_first_token = None + + @property + def duration(self) -> Optional[int]: + return self._duration + + @property + def success(self) -> Optional[bool]: + return self._success + + @property + def feedback(self) -> Optional[Dict[str, FeedbackKind]]: + return self._feedback + + @property + def usage(self) -> Optional[TokenUsage]: + return self._usage + + @property + def time_to_first_token(self) -> Optional[int]: + return self._time_to_first_token + + +class LDAIMetrics: + """ + Metrics information for AI operations that includes success status and token usage. + """ + + def __init__(self, success: bool, usage: Optional[TokenUsage] = None): + """ + Initialize metrics. + + :param success: Whether the operation was successful. + :param usage: Optional token usage information. + """ + self.success = success + self.usage = usage + + def to_dict(self) -> Dict[str, Any]: + """ + Render the metrics as a dictionary object. + """ + result: Dict[str, Any] = { + 'success': self.success, + } + if self.usage is not None: + result['usage'] = { + 'total': self.usage.total, + 'input': self.usage.input, + 'output': self.usage.output, + } + return result diff --git a/packages/sdk/server-ai/src/ldai/metrics/token_usage.py b/packages/sdk/server-ai/src/ldai/metrics/token_usage.py new file mode 100644 index 0000000..419e1ab --- /dev/null +++ b/packages/sdk/server-ai/src/ldai/metrics/token_usage.py @@ -0,0 +1,18 @@ +"""Token usage tracking for AI operations.""" + +from dataclasses import dataclass + + +@dataclass +class TokenUsage: + """ + Tracks token usage for AI operations. + + :param total: Total number of tokens used. + :param input: Number of tokens in the prompt. + :param output: Number of tokens in the completion. + """ + + total: int + input: int + output: int diff --git a/packages/sdk/server-ai/src/ldai/models.py b/packages/sdk/server-ai/src/ldai/models.py new file mode 100644 index 0000000..c2abe56 --- /dev/null +++ b/packages/sdk/server-ai/src/ldai/models.py @@ -0,0 +1,361 @@ +import warnings +from dataclasses import dataclass, field +from typing import Any, Dict, List, Literal, Optional, Union + +from ldai.tracker import LDAIConfigTracker + + +@dataclass +class LDMessage: + role: Literal['system', 'user', 'assistant'] + content: str + + def to_dict(self) -> dict: + """ + Render the given message as a dictionary object. + """ + return { + 'role': self.role, + 'content': self.content, + } + + +class ModelConfig: + """ + Configuration related to the model. + """ + + def __init__(self, name: str, parameters: Optional[Dict[str, Any]] = None, custom: Optional[Dict[str, Any]] = None): + """ + :param name: The name of the model. + :param parameters: Additional model-specific parameters. + :param custom: Additional customer provided data. + """ + self._name = name + self._parameters = parameters + self._custom = custom + + @property + def name(self) -> str: + """ + The name of the model. + """ + return self._name + + def get_parameter(self, key: str) -> Any: + """ + Retrieve model-specific parameters. + + Accessing a named, typed attribute (e.g. name) will result in the call + being delegated to the appropriate property. + """ + if key == 'name': + return self.name + + if self._parameters is None: + return None + + return self._parameters.get(key) + + def get_custom(self, key: str) -> Any: + """ + Retrieve customer provided data. + """ + if self._custom is None: + return None + + return self._custom.get(key) + + def to_dict(self) -> dict: + """ + Render the given model config as a dictionary object. + """ + return { + 'name': self._name, + 'parameters': self._parameters, + 'custom': self._custom, + } + + +class ProviderConfig: + """ + Configuration related to the provider. + """ + + def __init__(self, name: str): + self._name = name + + @property + def name(self) -> str: + """ + The name of the provider. + """ + return self._name + + def to_dict(self) -> dict: + """ + Render the given provider config as a dictionary object. + """ + return { + 'name': self._name, + } + + +# ============================================================================ +# Judge Types +# ============================================================================ + +@dataclass(frozen=True) +class JudgeConfiguration: + """ + Configuration for judge attachment to AI Configs. + """ + + @dataclass(frozen=True) + class Judge: + """ + Configuration for a single judge attachment. + """ + key: str + sampling_rate: float + + def to_dict(self) -> dict: + """ + Render the judge as a dictionary object. + """ + return { + 'key': self.key, + 'samplingRate': self.sampling_rate, + } + + judges: List['JudgeConfiguration.Judge'] + + def to_dict(self) -> dict: + """ + Render the judge configuration as a dictionary object. + """ + return { + 'judges': [judge.to_dict() for judge in self.judges], + } + + +# ============================================================================ +# Base AI Config Types +# ============================================================================ + +@dataclass(frozen=True) +class AIConfigDefault: + """ + Base AI Config interface for default implementations with optional enabled property. + """ + enabled: Optional[bool] = None + model: Optional[ModelConfig] = None + provider: Optional[ProviderConfig] = None + + def _base_to_dict(self) -> Dict[str, Any]: + """ + Render the base config fields as a dictionary object. + """ + return { + '_ldMeta': { + 'enabled': self.enabled or False, + }, + 'model': self.model.to_dict() if self.model else None, + 'provider': self.provider.to_dict() if self.provider else None, + } + + +@dataclass(frozen=True) +class AIConfig: + """ + Base AI Config interface without mode-specific fields. + """ + enabled: bool + model: Optional[ModelConfig] = None + provider: Optional[ProviderConfig] = None + tracker: Optional[LDAIConfigTracker] = None + + def _base_to_dict(self) -> Dict[str, Any]: + """ + Render the base config fields as a dictionary object. + """ + return { + '_ldMeta': { + 'enabled': self.enabled, + }, + 'model': self.model.to_dict() if self.model else None, + 'provider': self.provider.to_dict() if self.provider else None, + } + + +# ============================================================================ +# Completion Config Types +# ============================================================================ + +@dataclass(frozen=True) +class AICompletionConfigDefault(AIConfigDefault): + """ + Default Completion AI Config (default mode). + """ + messages: Optional[List[LDMessage]] = None + judge_configuration: Optional[JudgeConfiguration] = None + + def to_dict(self) -> dict: + """ + Render the given default values as an AICompletionConfigDefault-compatible dictionary object. + """ + result = self._base_to_dict() + result['messages'] = [message.to_dict() for message in self.messages] if self.messages else None + if self.judge_configuration is not None: + result['judgeConfiguration'] = self.judge_configuration.to_dict() + return result + + +@dataclass(frozen=True) +class AICompletionConfig(AIConfig): + """ + Completion AI Config (default mode). + """ + messages: Optional[List[LDMessage]] = None + judge_configuration: Optional[JudgeConfiguration] = None + + def to_dict(self) -> dict: + """ + Render the given completion config as a dictionary object. + """ + result = self._base_to_dict() + result['messages'] = [message.to_dict() for message in self.messages] if self.messages else None + if self.judge_configuration is not None: + result['judgeConfiguration'] = self.judge_configuration.to_dict() + return result + + +# ============================================================================ +# Agent Config Types +# ============================================================================ + +@dataclass(frozen=True) +class AIAgentConfigDefault(AIConfigDefault): + """ + Default Agent-specific AI Config with instructions. + """ + instructions: Optional[str] = None + judge_configuration: Optional[JudgeConfiguration] = None + + def to_dict(self) -> Dict[str, Any]: + """ + Render the given agent config default as a dictionary object. + """ + result = self._base_to_dict() + if self.instructions is not None: + result['instructions'] = self.instructions + if self.judge_configuration is not None: + result['judgeConfiguration'] = self.judge_configuration.to_dict() + return result + + +@dataclass(frozen=True) +class AIAgentConfig(AIConfig): + """ + Agent-specific AI Config with instructions. + """ + instructions: Optional[str] = None + judge_configuration: Optional[JudgeConfiguration] = None + + def to_dict(self) -> Dict[str, Any]: + """ + Render the given agent config as a dictionary object. + """ + result = self._base_to_dict() + if self.instructions is not None: + result['instructions'] = self.instructions + if self.judge_configuration is not None: + result['judgeConfiguration'] = self.judge_configuration.to_dict() + return result + + +# ============================================================================ +# Judge Config Types +# ============================================================================ + +@dataclass(frozen=True) +class AIJudgeConfigDefault(AIConfigDefault): + """ + Default Judge-specific AI Config with required evaluation metric key. + """ + messages: Optional[List[LDMessage]] = None + evaluation_metric_keys: Optional[List[str]] = None + + def to_dict(self) -> dict: + """ + Render the given judge config default as a dictionary object. + """ + result = self._base_to_dict() + result['messages'] = [message.to_dict() for message in self.messages] if self.messages else None + if self.evaluation_metric_keys is not None: + result['evaluationMetricKeys'] = self.evaluation_metric_keys + return result + + +@dataclass(frozen=True) +class AIJudgeConfig(AIConfig): + """ + Judge-specific AI Config with required evaluation metric key. + """ + evaluation_metric_keys: List[str] = field(default_factory=list) + messages: Optional[List[LDMessage]] = None + + def to_dict(self) -> dict: + """ + Render the given judge config as a dictionary object. + """ + result = self._base_to_dict() + result['evaluationMetricKeys'] = self.evaluation_metric_keys + result['messages'] = [message.to_dict() for message in self.messages] if self.messages else None + return result + + +# ============================================================================ +# Agent Request Config +# ============================================================================ + +@dataclass +class AIAgentConfigRequest: + """ + Configuration for a single agent request. + + Combines agent key with its specific default configuration and variables. + """ + key: str + default_value: AIAgentConfigDefault + variables: Optional[Dict[str, Any]] = None + + +# Type alias for multiple agents +AIAgents = Dict[str, AIAgentConfig] + +# Type alias for all AI Config variants +AIConfigKind = Union[AIAgentConfig, AICompletionConfig, AIJudgeConfig] + + +# ============================================================================ +# Deprecated Type Aliases for Backward Compatibility +# ============================================================================ + +# Note: These are type aliases that point to the new types. +# Since Python uses duck typing, these will work at runtime even if type checkers complain. +# The old AIConfig had optional enabled, so it maps to AICompletionConfigDefault +# The old AIConfig return type had required enabled, so it maps to AICompletionConfig + +# Note: AIConfig is now the base class for all config types (defined above at line 169) +# For default configs (with optional enabled), use AICompletionConfigDefault instead +# For required configs (with required enabled), use AICompletionConfig instead + +# Deprecated: Use AIAgentConfigDefault instead +LDAIAgentDefaults = AIAgentConfigDefault + +# Deprecated: Use AIAgentConfigRequest instead +LDAIAgentConfig = AIAgentConfigRequest + +# Deprecated: Use AIAgentConfig instead (note: this was the old return type) +LDAIAgent = AIAgentConfig diff --git a/packages/sdk/server-ai/src/ldai/providers/ai_provider.py b/packages/sdk/server-ai/src/ldai/providers/ai_provider.py new file mode 100644 index 0000000..3deb40b --- /dev/null +++ b/packages/sdk/server-ai/src/ldai/providers/ai_provider.py @@ -0,0 +1,89 @@ +"""Abstract base class for AI providers.""" + +import logging +from abc import ABC, abstractmethod +from typing import Any, Dict, List, Optional + +from ldai.metrics import LDAIMetrics +from ldai.models import AIConfigKind, LDMessage +from ldai.providers.types import ChatResponse, StructuredResponse + + +class AIProvider(ABC): + """ + Abstract base class for AI providers that implement chat model functionality. + + This class provides the contract that all provider implementations must follow + to integrate with LaunchDarkly's tracking and configuration capabilities. + + Following the AICHAT spec recommendation to use base classes with non-abstract methods + for better extensibility and backwards compatibility. + """ + + def __init__(self): + """ + Initialize the AI provider. + + Creates a logger for this provider instance. + """ + self._logger = logging.getLogger(f'{__name__}.{self.__class__.__name__}') + + async def invoke_model(self, messages: List[LDMessage]) -> ChatResponse: + """ + Invoke the chat model with an array of messages. + + This method should convert messages to provider format, invoke the model, + and return a ChatResponse with the result and metrics. + + Default implementation takes no action and returns a placeholder response. + Provider implementations should override this method. + + :param messages: Array of LDMessage objects representing the conversation + :return: ChatResponse containing the model's response + """ + self._logger.warning('invokeModel not implemented by this provider') + + return ChatResponse( + message=LDMessage(role='assistant', content=''), + metrics=LDAIMetrics(success=False, usage=None), + ) + + async def invoke_structured_model( + self, + messages: List[LDMessage], + response_structure: Dict[str, Any], + ) -> StructuredResponse: + """ + Invoke the chat model with structured output support. + + This method should convert messages to provider format, invoke the model with + structured output configuration, and return a structured response. + + Default implementation takes no action and returns a placeholder response. + Provider implementations should override this method. + + :param messages: Array of LDMessage objects representing the conversation + :param response_structure: Dictionary of output configurations keyed by output name + :return: StructuredResponse containing the structured data + """ + self._logger.warning('invokeStructuredModel not implemented by this provider') + + return StructuredResponse( + data={}, + raw_response='', + metrics=LDAIMetrics(success=False, usage=None), + ) + + @staticmethod + @abstractmethod + async def create(ai_config: AIConfigKind) -> 'AIProvider': + """ + Static method that constructs an instance of the provider. + + Each provider implementation must provide their own static create method + that accepts an AIConfigKind and returns a configured instance. + + :param ai_config: The LaunchDarkly AI configuration + :return: Configured provider instance + """ + raise NotImplementedError('Provider implementations must override the static create method') diff --git a/packages/sdk/server-ai/src/ldai/providers/ai_provider_factory.py b/packages/sdk/server-ai/src/ldai/providers/ai_provider_factory.py new file mode 100644 index 0000000..7c1dec2 --- /dev/null +++ b/packages/sdk/server-ai/src/ldai/providers/ai_provider_factory.py @@ -0,0 +1,160 @@ +"""Factory for creating AIProvider instances based on the provider configuration.""" + +import importlib +import logging +from typing import List, Literal, Optional, Type + +from ldai.models import AIConfigKind +from ldai.providers.ai_provider import AIProvider + +# List of supported AI providers +SUPPORTED_AI_PROVIDERS = [ + # Multi-provider packages should be last in the list + 'langchain', +] + +# Type representing the supported AI providers +SupportedAIProvider = Literal['langchain'] + + +class AIProviderFactory: + """ + Factory for creating AIProvider instances based on the provider configuration. + """ + + _logger = logging.getLogger(__name__) + + @staticmethod + async def create( + ai_config: AIConfigKind, + default_ai_provider: Optional[SupportedAIProvider] = None, + ) -> Optional[AIProvider]: + """ + Create an AIProvider instance based on the AI configuration. + + This method attempts to load provider-specific implementations dynamically. + Returns None if the provider is not supported. + + :param ai_config: The AI configuration + :param default_ai_provider: Optional default AI provider to use + :return: AIProvider instance or None if not supported + """ + provider_name = ai_config.provider.name.lower() if ai_config.provider else None + # Determine which providers to try based on default_ai_provider + providers_to_try = AIProviderFactory._get_providers_to_try(default_ai_provider, provider_name) + + # Try each provider in order + for provider_type in providers_to_try: + provider = await AIProviderFactory._try_create_provider(provider_type, ai_config) + if provider: + return provider + + # If no provider was successfully created, log a warning + AIProviderFactory._logger.warning( + f"Provider is not supported or failed to initialize: {provider_name or 'unknown'}" + ) + return None + + @staticmethod + def _get_providers_to_try( + default_ai_provider: Optional[SupportedAIProvider], + provider_name: Optional[str], + ) -> List[SupportedAIProvider]: + """ + Determine which providers to try based on default_ai_provider and provider_name. + + :param default_ai_provider: Optional default provider to use + :param provider_name: Optional provider name from config + :return: List of providers to try in order + """ + # If default_ai_provider is set, only try that specific provider + if default_ai_provider: + return [default_ai_provider] + + # If no default_ai_provider is set, try all providers in order + provider_set = set() + + # First try the specific provider if it's supported + if provider_name and provider_name in SUPPORTED_AI_PROVIDERS: + provider_set.add(provider_name) # type: ignore + + # Then try multi-provider packages, but avoid duplicates + multi_provider_packages: List[SupportedAIProvider] = ['langchain'] + for provider in multi_provider_packages: + provider_set.add(provider) + + return list(provider_set) # type: ignore[arg-type] + + @staticmethod + async def _try_create_provider( + provider_type: SupportedAIProvider, + ai_config: AIConfigKind, + ) -> Optional[AIProvider]: + """ + Try to create a provider of the specified type. + + :param provider_type: Type of provider to create + :param ai_config: AI configuration + :return: AIProvider instance or None if creation failed + """ + # Handle built-in providers (part of this package) + if provider_type == 'langchain': + try: + from ldai.providers.langchain import LangChainProvider + return await LangChainProvider.create(ai_config) + except ImportError as error: + AIProviderFactory._logger.warning( + f"Error creating LangChainProvider: {error}. " + f"Make sure langchain and langchain-core packages are installed." + ) + return None + + # TODO: REL-10773 OpenAI provider + # TODO: REL-10776 Vercel provider + # For future external providers, use dynamic import + provider_mappings = { + # 'openai': ('launchdarkly_server_sdk_ai_openai', 'OpenAIProvider'), + # 'vercel': ('launchdarkly_server_sdk_ai_vercel', 'VercelProvider'), + } + + if provider_type not in provider_mappings: + return None + + package_name, provider_class_name = provider_mappings[provider_type] + return await AIProviderFactory._create_provider( + package_name, provider_class_name, ai_config + ) + + @staticmethod + async def _create_provider( + package_name: str, + provider_class_name: str, + ai_config: AIConfigKind, + ) -> Optional[AIProvider]: + """ + Create a provider instance dynamically. + + :param package_name: Name of the package containing the provider + :param provider_class_name: Name of the provider class + :param ai_config: AI configuration + :return: AIProvider instance or None if creation failed + """ + try: + # Try to dynamically import the provider + # This will work if the package is installed + module = importlib.import_module(package_name) + provider_class: Type[AIProvider] = getattr(module, provider_class_name) + + provider = await provider_class.create(ai_config) + AIProviderFactory._logger.debug( + f"Successfully created AIProvider for: {ai_config.provider.name if ai_config.provider else 'unknown'} " + f"with package {package_name}" + ) + return provider + except (ImportError, AttributeError, Exception) as error: + # If the provider is not available or creation fails, return None + AIProviderFactory._logger.warning( + f"Error creating AIProvider for: {ai_config.provider.name if ai_config.provider else 'unknown'} " + f"with package {package_name}: {error}" + ) + return None diff --git a/packages/sdk/server-ai/src/ldai/providers/types.py b/packages/sdk/server-ai/src/ldai/providers/types.py new file mode 100644 index 0000000..982c42b --- /dev/null +++ b/packages/sdk/server-ai/src/ldai/providers/types.py @@ -0,0 +1,27 @@ +"""Types for AI provider responses.""" + +from dataclasses import dataclass +from typing import Any, List, Optional + +from ldai.metrics import LDAIMetrics +from ldai.models import LDMessage + + +@dataclass +class ChatResponse: + """ + Chat response structure. + """ + message: LDMessage + metrics: LDAIMetrics + evaluations: Optional[List[Any]] = None # List of JudgeResponse, will be populated later + + +@dataclass +class StructuredResponse: + """ + Structured response from AI models. + """ + data: dict[str, Any] + raw_response: str + metrics: LDAIMetrics diff --git a/packages/sdk/server-ai/src/ldai/tracker.py b/packages/sdk/server-ai/src/ldai/tracker.py new file mode 100644 index 0000000..d62a78d --- /dev/null +++ b/packages/sdk/server-ai/src/ldai/tracker.py @@ -0,0 +1,347 @@ +import time +from typing import Any, Callable, Dict, Optional, TypeVar + +from ldclient import Context, LDClient + +from ldai.metrics import FeedbackKind, LDAIMetricSummary, TokenUsage + + +class LDAIConfigTracker: + """ + Tracks configuration and usage metrics for LaunchDarkly AI operations. + """ + + def __init__( + self, + ld_client: LDClient, + variation_key: str, + config_key: str, + version: int, + model_name: str, + provider_name: str, + context: Context, + ): + """ + Initialize an AI Config tracker. + + :param ld_client: LaunchDarkly client instance. + :param variation_key: Variation key for tracking. + :param config_key: Configuration key for tracking. + :param version: Version of the variation. + :param model_name: Name of the model used. + :param provider_name: Name of the provider used. + :param context: Context for evaluation. + """ + self._ld_client = ld_client + self._variation_key = variation_key + self._config_key = config_key + self._version = version + self._model_name = model_name + self._provider_name = provider_name + self._context = context + self._summary = LDAIMetricSummary() + + def __get_track_data(self): + """ + Get tracking data for events. + + :return: Dictionary containing variation and config keys. + """ + return { + "variationKey": self._variation_key, + "configKey": self._config_key, + "version": self._version, + "modelName": self._model_name, + "providerName": self._provider_name, + } + + def track_duration(self, duration: int) -> None: + """ + Manually track the duration of an AI operation. + + :param duration: Duration in milliseconds. + """ + self._summary._duration = duration + self._ld_client.track( + "$ld:ai:duration:total", self._context, self.__get_track_data(), duration + ) + + def track_time_to_first_token(self, time_to_first_token: int) -> None: + """ + Manually track the time to first token of an AI operation. + + :param time_to_first_token: Time to first token in milliseconds. + """ + self._summary._time_to_first_token = time_to_first_token + self._ld_client.track( + "$ld:ai:tokens:ttf", + self._context, + self.__get_track_data(), + time_to_first_token, + ) + + def track_duration_of(self, func): + """ + Automatically track the duration of an AI operation. + + An exception occurring during the execution of the function will still + track the duration. The exception will be re-thrown. + + :param func: Function to track (synchronous only). + :return: Result of the tracked function. + """ + start_time = time.time() + try: + result = func() + finally: + end_time = time.time() + duration = int((end_time - start_time) * 1000) # duration in milliseconds + self.track_duration(duration) + + return result + + async def track_metrics_of(self, metrics_extractor, func): + """ + Track metrics for a generic AI operation. + + This function will track the duration of the operation, extract metrics using the provided + metrics extractor function, and track success or error status accordingly. + + If the provided function throws, then this method will also throw. + In the case the provided function throws, this function will record the duration and an error. + A failed operation will not have any token usage data. + + :param metrics_extractor: Function that extracts LDAIMetrics from the operation result + :param func: Async function which executes the operation + :return: The result of the operation + """ + start_time = time.time() + result = None + try: + result = await func() + except Exception as err: + end_time = time.time() + duration = int((end_time - start_time) * 1000) + self.track_duration(duration) + self.track_error() + raise err + + # Track duration after successful call + end_time = time.time() + duration = int((end_time - start_time) * 1000) + self.track_duration(duration) + + # Extract metrics after successful AI call + from ldai.metrics import LDAIMetrics + metrics = metrics_extractor(result) + + # Track success/error based on metrics + if metrics.success: + self.track_success() + else: + self.track_error() + + # Track token usage if available + if metrics.usage: + self.track_tokens(metrics.usage) + + return result + + def track_eval_scores(self, scores: Dict[str, Any]) -> None: + """ + Track evaluation scores for multiple metrics. + + :param scores: Dictionary mapping metric keys to their evaluation scores (EvalScore objects) + """ + from ldai.judge.types import EvalScore + + # Track each evaluation score individually + for metric_key, eval_score in scores.items(): + if isinstance(eval_score, EvalScore): + self._ld_client.track( + metric_key, + self._context, + self.__get_track_data(), + eval_score.score + ) + + def track_judge_response(self, judge_response: Any) -> None: + """ + Track a judge response, including evaluation scores and success status. + + :param judge_response: JudgeResponse object containing evals and success status + """ + from ldai.judge.types import JudgeResponse + + if isinstance(judge_response, JudgeResponse): + # Track evaluation scores + if judge_response.evals: + self.track_eval_scores(judge_response.evals) + + # Track success/error based on judge response + if judge_response.success: + self.track_success() + else: + self.track_error() + + def track_feedback(self, feedback: Dict[str, FeedbackKind]) -> None: + """ + Track user feedback for an AI operation. + + :param feedback: Dictionary containing feedback kind. + """ + self._summary._feedback = feedback + if feedback["kind"] == FeedbackKind.Positive: + self._ld_client.track( + "$ld:ai:feedback:user:positive", + self._context, + self.__get_track_data(), + 1, + ) + elif feedback["kind"] == FeedbackKind.Negative: + self._ld_client.track( + "$ld:ai:feedback:user:negative", + self._context, + self.__get_track_data(), + 1, + ) + + def track_success(self) -> None: + """ + Track a successful AI generation. + """ + self._summary._success = True + self._ld_client.track( + "$ld:ai:generation:success", self._context, self.__get_track_data(), 1 + ) + + def track_error(self) -> None: + """ + Track an unsuccessful AI generation attempt. + """ + self._summary._success = False + self._ld_client.track( + "$ld:ai:generation:error", self._context, self.__get_track_data(), 1 + ) + + async def track_openai_metrics(self, func): + """ + Track OpenAI-specific operations. + + This function will track the duration of the operation, the token + usage, and the success or error status. + + If the provided function throws, then this method will also throw. + + In the case the provided function throws, this function will record the + duration and an error. + + A failed operation will not have any token usage data. + + :param func: Async function to track. + :return: Result of the tracked function. + """ + start_time = time.time() + try: + result = await func() + end_time = time.time() + duration = int((end_time - start_time) * 1000) + self.track_duration(duration) + self.track_success() + if hasattr(result, "usage") and hasattr(result.usage, "to_dict"): + self.track_tokens(_openai_to_token_usage(result.usage.to_dict())) + except Exception: + end_time = time.time() + duration = int((end_time - start_time) * 1000) + self.track_duration(duration) + self.track_error() + raise + + return result + + def track_bedrock_converse_metrics(self, res: dict) -> dict: + """ + Track AWS Bedrock conversation operations. + + + This function will track the duration of the operation, the token + usage, and the success or error status. + + :param res: Response dictionary from Bedrock. + :return: The original response dictionary. + """ + status_code = res.get("ResponseMetadata", {}).get("HTTPStatusCode", 0) + if status_code == 200: + self.track_success() + elif status_code >= 400: + self.track_error() + if res.get("metrics", {}).get("latencyMs"): + self.track_duration(res["metrics"]["latencyMs"]) + if res.get("usage"): + self.track_tokens(_bedrock_to_token_usage(res["usage"])) + return res + + def track_tokens(self, tokens: TokenUsage) -> None: + """ + Track token usage metrics. + + :param tokens: Token usage data from either custom, OpenAI, or Bedrock sources. + """ + self._summary._usage = tokens + if tokens.total > 0: + self._ld_client.track( + "$ld:ai:tokens:total", + self._context, + self.__get_track_data(), + tokens.total, + ) + if tokens.input > 0: + self._ld_client.track( + "$ld:ai:tokens:input", + self._context, + self.__get_track_data(), + tokens.input, + ) + if tokens.output > 0: + self._ld_client.track( + "$ld:ai:tokens:output", + self._context, + self.__get_track_data(), + tokens.output, + ) + + def get_summary(self) -> LDAIMetricSummary: + """ + Get the current summary of AI metrics. + + :return: Summary of AI metrics. + """ + return self._summary + + +def _bedrock_to_token_usage(data: dict) -> TokenUsage: + """ + Convert a Bedrock usage dictionary to a TokenUsage object. + + :param data: Dictionary containing Bedrock usage data. + :return: TokenUsage object containing usage data. + """ + return TokenUsage( + total=data.get("totalTokens", 0), + input=data.get("inputTokens", 0), + output=data.get("outputTokens", 0), + ) + + +def _openai_to_token_usage(data: dict) -> TokenUsage: + """ + Convert an OpenAI usage dictionary to a TokenUsage object. + + :param data: Dictionary containing OpenAI usage data. + :return: TokenUsage object containing usage data. + """ + return TokenUsage( + total=data.get("total_tokens", 0), + input=data.get("prompt_tokens", 0), + output=data.get("completion_tokens", 0), + ) diff --git a/packages/sdk/server-ai/tests/__init__.py b/packages/sdk/server-ai/tests/__init__.py new file mode 100644 index 0000000..1f7baa7 --- /dev/null +++ b/packages/sdk/server-ai/tests/__init__.py @@ -0,0 +1,2 @@ +"""Tests for LaunchDarkly Server SDK for AI - Core package.""" + diff --git a/packages/sdk/server-ai/tests/test_agents.py b/packages/sdk/server-ai/tests/test_agents.py new file mode 100644 index 0000000..755f2e5 --- /dev/null +++ b/packages/sdk/server-ai/tests/test_agents.py @@ -0,0 +1,342 @@ +import pytest +from ldclient import Config, Context, LDClient +from ldclient.integrations.test_data import TestData + +from ldai import (LDAIAgentConfig, LDAIAgentDefaults, LDAIClient, ModelConfig, + ProviderConfig) + + +@pytest.fixture +def td() -> TestData: + td = TestData.data_source() + + # Single agent with instructions + td.update( + td.flag('customer-support-agent') + .variations( + { + 'model': {'name': 'gpt-4', 'parameters': {'temperature': 0.3, 'maxTokens': 2048}}, + 'provider': {'name': 'openai'}, + 'instructions': 'You are a helpful customer support agent for {{company_name}}. Always be polite and professional.', + '_ldMeta': {'enabled': True, 'variationKey': 'agent-v1', 'version': 1, 'mode': 'agent'}, + } + ) + .variation_for_all(0) + ) + + # Agent with context interpolation + td.update( + td.flag('personalized-agent') + .variations( + { + 'model': {'name': 'claude-3', 'parameters': {'temperature': 0.5}}, + 'instructions': 'Hello {{ldctx.name}}! I am your personal assistant. Your user key is {{ldctx.key}}.', + '_ldMeta': {'enabled': True, 'variationKey': 'personal-v1', 'version': 2, 'mode': 'agent'}, + } + ) + .variation_for_all(0) + ) + + # Agent with multi-context interpolation + td.update( + td.flag('multi-context-agent') + .variations( + { + 'model': {'name': 'gpt-3.5-turbo'}, + 'instructions': 'Welcome {{ldctx.user.name}} from {{ldctx.org.name}}! Your organization tier is {{ldctx.org.tier}}.', + '_ldMeta': {'enabled': True, 'variationKey': 'multi-v1', 'version': 1, 'mode': 'agent'}, + } + ) + .variation_for_all(0) + ) + + # Disabled agent + td.update( + td.flag('disabled-agent') + .variations( + { + 'model': {'name': 'gpt-4'}, + 'instructions': 'This agent is disabled.', + '_ldMeta': {'enabled': False, 'variationKey': 'disabled-v1', 'version': 1, 'mode': 'agent'}, + } + ) + .variation_for_all(0) + ) + + # Agent with minimal metadata + td.update( + td.flag('minimal-agent') + .variations( + { + 'instructions': 'Minimal agent configuration.', + '_ldMeta': {'enabled': True}, + } + ) + .variation_for_all(0) + ) + + # Sales assistant agent + td.update( + td.flag('sales-assistant') + .variations( + { + 'model': {'name': 'gpt-4', 'parameters': {'temperature': 0.7}}, + 'provider': {'name': 'openai'}, + 'instructions': 'You are a sales assistant for {{company_name}}. Help customers find the right products.', + '_ldMeta': {'enabled': True, 'variationKey': 'sales-v1', 'version': 1, 'mode': 'agent'}, + } + ) + .variation_for_all(0) + ) + + # Research agent for testing single agent method + td.update( + td.flag('research-agent') + .variations( + { + 'model': {'name': 'gpt-4', 'parameters': {'temperature': 0.2, 'maxTokens': 3000}}, + 'provider': {'name': 'openai'}, + 'instructions': 'You are a research assistant specializing in {{topic}}. Your expertise level should match {{ldctx.expertise}}.', + '_ldMeta': {'enabled': True, 'variationKey': 'research-v1', 'version': 1, 'mode': 'agent'}, + } + ) + .variation_for_all(0) + ) + + return td + + +@pytest.fixture +def client(td: TestData) -> LDClient: + config = Config('sdk-key', update_processor_class=td, send_events=False) + return LDClient(config=config) + + +@pytest.fixture +def ldai_client(client: LDClient) -> LDAIClient: + return LDAIClient(client) + + +def test_single_agent_method(ldai_client: LDAIClient): + """Test the single agent() method functionality.""" + context = Context.builder('user-key').set('expertise', 'advanced').build() + config = LDAIAgentConfig( + key='research-agent', + default_value=LDAIAgentDefaults( + enabled=False, + model=ModelConfig('fallback-model'), + instructions="Default instructions" + ), + variables={'topic': 'quantum computing'} + ) + + agent = ldai_client.agent(config, context) + + assert agent.enabled is True + assert agent.model is not None + assert agent.model.name == 'gpt-4' + assert agent.model.get_parameter('temperature') == 0.2 + assert agent.model.get_parameter('maxTokens') == 3000 + assert agent.provider is not None + assert agent.provider.name == 'openai' + assert agent.instructions == 'You are a research assistant specializing in quantum computing. Your expertise level should match advanced.' + assert agent.tracker is not None + + +def test_single_agent_with_defaults(ldai_client: LDAIClient): + """Test single agent method with non-existent flag using defaults.""" + context = Context.create('user-key') + config = LDAIAgentConfig( + key='non-existent-agent', + default_value=LDAIAgentDefaults( + enabled=True, + model=ModelConfig('default-model', parameters={'temp': 0.8}), + provider=ProviderConfig('default-provider'), + instructions="You are a default assistant for {{task}}." + ), + variables={'task': 'general assistance'} + ) + + agent = ldai_client.agent(config, context) + + assert agent.enabled is True + assert agent.model is not None and agent.model.name == 'default-model' + assert agent.model is not None and agent.model.get_parameter('temp') == 0.8 + assert agent.provider is not None and agent.provider.name == 'default-provider' + assert agent.instructions == "You are a default assistant for general assistance." + assert agent.tracker is not None + + +def test_agents_method_with_configs(ldai_client: LDAIClient): + """Test the new agents() method with LDAIAgentConfig objects.""" + context = Context.create('user-key') + + agent_configs = [ + LDAIAgentConfig( + key='customer-support-agent', + default_value=LDAIAgentDefaults( + enabled=False, + model=ModelConfig('fallback-model'), + instructions="Default support" + ), + variables={'company_name': 'Acme Corp'} + ), + LDAIAgentConfig( + key='sales-assistant', + default_value=LDAIAgentDefaults( + enabled=False, + model=ModelConfig('fallback-model'), + instructions="Default sales" + ), + variables={'company_name': 'Acme Corp'} + ) + ] + + agents = ldai_client.agents(agent_configs, context) + + assert len(agents) == 2 + assert 'customer-support-agent' in agents + assert 'sales-assistant' in agents + + support_agent = agents['customer-support-agent'] + assert support_agent.enabled is True + assert support_agent.instructions is not None and 'Acme Corp' in support_agent.instructions + + sales_agent = agents['sales-assistant'] + assert sales_agent.enabled is True + assert sales_agent.instructions is not None and 'Acme Corp' in sales_agent.instructions + assert sales_agent.model is not None and sales_agent.model.get_parameter('temperature') == 0.7 + + +def test_agents_method_different_variables_per_agent(ldai_client: LDAIClient): + """Test agents method with different variables for each agent.""" + context = Context.builder('user-key').name('Alice').build() + + agent_configs = [ + LDAIAgentConfig( + key='personalized-agent', + default_value=LDAIAgentDefaults( + enabled=True, + instructions="Default personal" + ), + variables={} # Will use context only + ), + LDAIAgentConfig( + key='customer-support-agent', + default_value=LDAIAgentDefaults( + enabled=True, + instructions="Default support" + ), + variables={'company_name': 'TechStart Inc'} + ) + ] + + agents = ldai_client.agents(agent_configs, context) + + personal_agent = agents['personalized-agent'] + assert personal_agent.instructions == 'Hello Alice! I am your personal assistant. Your user key is user-key.' + + support_agent = agents['customer-support-agent'] + assert support_agent.instructions == 'You are a helpful customer support agent for TechStart Inc. Always be polite and professional.' + + +def test_agents_with_multi_context_interpolation(ldai_client: LDAIClient): + """Test agents method with multi-context interpolation.""" + user_context = Context.builder('user-key').name('Alice').build() + org_context = Context.builder('org-key').kind('org').name('LaunchDarkly').set('tier', 'Enterprise').build() + context = Context.multi_builder().add(user_context).add(org_context).build() + + agent_configs = [ + LDAIAgentConfig( + key='multi-context-agent', + default_value=LDAIAgentDefaults( + enabled=True, + instructions="Default multi-context" + ), + variables={} + ) + ] + + agents = ldai_client.agents(agent_configs, context) + + agent = agents['multi-context-agent'] + assert agent.instructions == 'Welcome Alice from LaunchDarkly! Your organization tier is Enterprise.' + + +def test_disabled_agent_single_method(ldai_client: LDAIClient): + """Test that disabled agents are properly handled in single agent method.""" + context = Context.create('user-key') + config = LDAIAgentConfig( + key='disabled-agent', + default_value=LDAIAgentDefaults(enabled=False), + variables={} + ) + + agent = ldai_client.agent(config, context) + + assert agent.enabled is False + assert agent.tracker is not None + + +def test_disabled_agent_multiple_method(ldai_client: LDAIClient): + """Test that disabled agents are properly handled in multiple agents method.""" + context = Context.create('user-key') + + agent_configs = [ + LDAIAgentConfig( + key='disabled-agent', + default_value=LDAIAgentDefaults(enabled=False), + variables={} + ) + ] + + agents = ldai_client.agents(agent_configs, context) + + assert len(agents) == 1 + assert agents['disabled-agent'].enabled is False + + +def test_agent_with_missing_metadata(ldai_client: LDAIClient): + """Test agent handling when metadata is minimal or missing.""" + context = Context.create('user-key') + config = LDAIAgentConfig( + key='minimal-agent', + default_value=LDAIAgentDefaults( + enabled=False, + model=ModelConfig('default-model'), + instructions="Default instructions" + ) + ) + + agent = ldai_client.agent(config, context) + + assert agent.enabled is True # From flag + assert agent.instructions == 'Minimal agent configuration.' + assert agent.model == config.default_value.model # Falls back to default + assert agent.tracker is not None + + +def test_agent_config_dataclass(): + """Test the LDAIAgentConfig dataclass functionality.""" + config = LDAIAgentConfig( + key='test-agent', + default_value=LDAIAgentDefaults( + enabled=True, + instructions="Test instructions" + ), + variables={'key': 'value'} + ) + + assert config.key == 'test-agent' + assert config.default_value.enabled is True + assert config.default_value.instructions == "Test instructions" + assert config.variables == {'key': 'value'} + + # Test with no variables + config_no_vars = LDAIAgentConfig( + key='test-agent-2', + default_value=LDAIAgentDefaults(enabled=False) + ) + + assert config_no_vars.key == 'test-agent-2' + assert config_no_vars.variables is None diff --git a/packages/sdk/server-ai/tests/test_model_config.py b/packages/sdk/server-ai/tests/test_model_config.py new file mode 100644 index 0000000..d556c10 --- /dev/null +++ b/packages/sdk/server-ai/tests/test_model_config.py @@ -0,0 +1,330 @@ +import pytest +from ldclient import Config, Context, LDClient +from ldclient.integrations.test_data import TestData + +from ldai import AICompletionConfigDefault, LDAIClient, LDMessage, ModelConfig + + +@pytest.fixture +def td() -> TestData: + td = TestData.data_source() + td.update( + td.flag('model-config') + .variations( + { + 'model': {'name': 'fakeModel', 'parameters': {'temperature': 0.5, 'maxTokens': 4096}, 'custom': {'extra-attribute': 'value'}}, + 'provider': {'name': 'fakeProvider'}, + 'messages': [{'role': 'system', 'content': 'Hello, {{name}}!'}], + '_ldMeta': {'enabled': True, 'variationKey': 'abcd', 'version': 1}, + }, + "green", + ) + .variation_for_all(0) + ) + + td.update( + td.flag('multiple-messages') + .variations( + { + 'model': {'name': 'fakeModel', 'parameters': {'temperature': 0.7, 'maxTokens': 8192}}, + 'messages': [ + {'role': 'system', 'content': 'Hello, {{name}}!'}, + {'role': 'user', 'content': 'The day is, {{day}}!'}, + ], + '_ldMeta': {'enabled': True, 'variationKey': 'abcd', 'version': 1}, + }, + "green", + ) + .variation_for_all(0) + ) + + td.update( + td.flag('ctx-interpolation') + .variations( + { + 'model': {'name': 'fakeModel', 'parameters': {'extra-attribute': 'I can be anything I set my mind/type to'}}, + 'messages': [{'role': 'system', 'content': 'Hello, {{ldctx.name}}! Is your last name {{ldctx.last}}?'}], + '_ldMeta': {'enabled': True, 'variationKey': 'abcd', 'version': 1}, + } + ) + .variation_for_all(0) + ) + + td.update( + td.flag('multi-ctx-interpolation') + .variations( + { + 'model': {'name': 'fakeModel', 'parameters': {'extra-attribute': 'I can be anything I set my mind/type to'}}, + 'messages': [{'role': 'system', 'content': 'Hello, {{ldctx.user.name}}! Do you work for {{ldctx.org.shortname}}?'}], + '_ldMeta': {'enabled': True, 'variationKey': 'abcd', 'version': 1}, + } + ) + .variation_for_all(0) + ) + + td.update( + td.flag('off-config') + .variations( + { + 'model': {'name': 'fakeModel', 'parameters': {'temperature': 0.1}}, + 'messages': [{'role': 'system', 'content': 'Hello, {{name}}!'}], + '_ldMeta': {'enabled': False, 'variationKey': 'abcd', 'version': 1}, + } + ) + .variation_for_all(0) + ) + + td.update( + td.flag('initial-config-disabled') + .variations( + { + '_ldMeta': {'enabled': False}, + }, + { + '_ldMeta': {'enabled': True}, + } + ) + .variation_for_all(0) + ) + + td.update( + td.flag('initial-config-enabled') + .variations( + { + '_ldMeta': {'enabled': False}, + }, + { + '_ldMeta': {'enabled': True}, + } + ) + .variation_for_all(1) + ) + + return td + + +@pytest.fixture +def client(td: TestData) -> LDClient: + config = Config('sdk-key', update_processor_class=td, send_events=False) + return LDClient(config=config) + + +@pytest.fixture +def ldai_client(client: LDClient) -> LDAIClient: + return LDAIClient(client) + + +def test_model_config_delegates_to_properties(): + model = ModelConfig('fakeModel', parameters={'extra-attribute': 'value'}) + assert model.name == 'fakeModel' + assert model.get_parameter('extra-attribute') == 'value' + assert model.get_parameter('non-existent') is None + + assert model.name == model.get_parameter('name') + + +def test_model_config_handles_custom(): + model = ModelConfig('fakeModel', custom={'extra-attribute': 'value'}) + assert model.name == 'fakeModel' + assert model.get_parameter('extra-attribute') is None + assert model.get_custom('non-existent') is None + assert model.get_custom('name') is None + + +def test_uses_default_on_invalid_flag(ldai_client: LDAIClient): + context = Context.create('user-key') + default_value = AICompletionConfigDefault( + enabled=True, + model=ModelConfig('fakeModel', parameters={'temperature': 0.5, 'maxTokens': 4096}), + messages=[LDMessage(role='system', content='Hello, {{name}}!')], + ) + variables = {'name': 'World'} + + config = ldai_client.config('missing-flag', context, default_value, variables) + + assert config.messages is not None + assert len(config.messages) > 0 + assert config.messages[0].content == 'Hello, World!' + assert config.enabled is True + + assert config.model is not None + assert config.model.name == 'fakeModel' + assert config.model.get_parameter('temperature') == 0.5 + assert config.model.get_parameter('maxTokens') == 4096 + + +def test_model_config_interpolation(ldai_client: LDAIClient): + context = Context.create('user-key') + default_value = AICompletionConfigDefault( + enabled=True, + model=ModelConfig('fakeModel'), + messages=[LDMessage(role='system', content='Hello, {{name}}!')], + ) + variables = {'name': 'World'} + + config = ldai_client.config('model-config', context, default_value, variables) + + assert config.messages is not None + assert len(config.messages) > 0 + assert config.messages[0].content == 'Hello, World!' + assert config.enabled is True + + assert config.model is not None + assert config.model.name == 'fakeModel' + assert config.model.get_parameter('temperature') == 0.5 + assert config.model.get_parameter('maxTokens') == 4096 + + +def test_model_config_no_variables(ldai_client: LDAIClient): + context = Context.create('user-key') + default_value = AICompletionConfigDefault(enabled=True, model=ModelConfig('fake-model'), messages=[]) + + config = ldai_client.config('model-config', context, default_value, {}) + + assert config.messages is not None + assert len(config.messages) > 0 + assert config.messages[0].content == 'Hello, !' + assert config.enabled is True + + assert config.model is not None + assert config.model.name == 'fakeModel' + assert config.model.get_parameter('temperature') == 0.5 + assert config.model.get_parameter('maxTokens') == 4096 + + +def test_provider_config_handling(ldai_client: LDAIClient): + context = Context.builder('user-key').name("Sandy").build() + default_value = AICompletionConfigDefault(enabled=True, model=ModelConfig('fake-model'), messages=[]) + variables = {'name': 'World'} + + config = ldai_client.config('model-config', context, default_value, variables) + + assert config.provider is not None + assert config.provider.name == 'fakeProvider' + + +def test_context_interpolation(ldai_client: LDAIClient): + context = Context.builder('user-key').name("Sandy").set('last', 'Beaches').build() + default_value = AICompletionConfigDefault(enabled=True, model=ModelConfig('fake-model'), messages=[]) + variables = {'name': 'World'} + + config = ldai_client.config( + 'ctx-interpolation', context, default_value, variables + ) + + assert config.messages is not None + assert len(config.messages) > 0 + assert config.messages[0].content == 'Hello, Sandy! Is your last name Beaches?' + assert config.enabled is True + + assert config.model is not None + assert config.model.name == 'fakeModel' + assert config.model.get_parameter('temperature') is None + assert config.model.get_parameter('maxTokens') is None + assert config.model.get_parameter('extra-attribute') == 'I can be anything I set my mind/type to' + + +def test_multi_context_interpolation(ldai_client: LDAIClient): + user_context = Context.builder('user-key').name("Sandy").build() + org_context = Context.builder('org-key').kind('org').name("LaunchDarkly").set('shortname', 'LD').build() + context = Context.multi_builder().add(user_context).add(org_context).build() + default_value = AICompletionConfigDefault(enabled=True, model=ModelConfig('fake-model'), messages=[]) + variables = {'name': 'World'} + + config = ldai_client.config( + 'multi-ctx-interpolation', context, default_value, variables + ) + + assert config.messages is not None + assert len(config.messages) > 0 + assert config.messages[0].content == 'Hello, Sandy! Do you work for LD?' + assert config.enabled is True + + assert config.model is not None + assert config.model.name == 'fakeModel' + assert config.model.get_parameter('temperature') is None + assert config.model.get_parameter('maxTokens') is None + assert config.model.get_parameter('extra-attribute') == 'I can be anything I set my mind/type to' + + +def test_model_config_multiple(ldai_client: LDAIClient): + context = Context.create('user-key') + default_value = AICompletionConfigDefault(enabled=True, model=ModelConfig('fake-model'), messages=[]) + variables = {'name': 'World', 'day': 'Monday'} + + config = ldai_client.config( + 'multiple-messages', context, default_value, variables + ) + + assert config.messages is not None + assert len(config.messages) > 0 + assert config.messages[0].content == 'Hello, World!' + assert config.messages[1].content == 'The day is, Monday!' + assert config.enabled is True + + assert config.model is not None + assert config.model.name == 'fakeModel' + assert config.model.get_parameter('temperature') == 0.7 + assert config.model.get_parameter('maxTokens') == 8192 + + +def test_model_config_disabled(ldai_client: LDAIClient): + context = Context.create('user-key') + default_value = AICompletionConfigDefault(enabled=False, model=ModelConfig('fake-model'), messages=[]) + + config = ldai_client.config('off-config', context, default_value, {}) + + assert config.model is not None + assert config.enabled is False + assert config.model.name == 'fakeModel' + assert config.model.get_parameter('temperature') == 0.1 + assert config.model.get_parameter('maxTokens') is None + + +def test_model_initial_config_disabled(ldai_client: LDAIClient): + context = Context.create('user-key') + default_value = AICompletionConfigDefault(enabled=False, model=ModelConfig('fake-model'), messages=[]) + + config = ldai_client.config('initial-config-disabled', context, default_value, {}) + + assert config.enabled is False + assert config.model is None + assert config.messages is None + assert config.provider is None + + +def test_model_initial_config_enabled(ldai_client: LDAIClient): + context = Context.create('user-key') + default_value = AICompletionConfigDefault(enabled=False, model=ModelConfig('fake-model'), messages=[]) + + config = ldai_client.config('initial-config-enabled', context, default_value, {}) + + assert config.enabled is True + assert config.model is None + assert config.messages is None + assert config.provider is None + + +def test_config_method_tracking(ldai_client: LDAIClient): + from unittest.mock import Mock + + mock_client = Mock() + mock_client.variation.return_value = { + '_ldMeta': {'enabled': True, 'variationKey': 'test-variation', 'version': 1}, + 'model': {'name': 'test-model'}, + 'provider': {'name': 'test-provider'}, + 'messages': [] + } + + client = LDAIClient(mock_client) + context = Context.create('user-key') + default_value = AICompletionConfigDefault(enabled=False, model=ModelConfig('fake-model'), messages=[]) + + config = client.config('test-config-key', context, default_value) + + mock_client.track.assert_called_once_with( + '$ld:ai:config:function:single', + context, + 'test-config-key', + 1 + ) diff --git a/packages/sdk/server-ai/tests/test_tracker.py b/packages/sdk/server-ai/tests/test_tracker.py new file mode 100644 index 0000000..f48ac5a --- /dev/null +++ b/packages/sdk/server-ai/tests/test_tracker.py @@ -0,0 +1,445 @@ +from time import sleep +from unittest.mock import MagicMock, call + +import pytest +from ldclient import Config, Context, LDClient +from ldclient.integrations.test_data import TestData + +from ldai.metrics import FeedbackKind, TokenUsage +from ldai.tracker import LDAIConfigTracker + + +@pytest.fixture +def td() -> TestData: + td = TestData.data_source() + td.update( + td.flag("model-config") + .variations( + { + "model": { + "name": "fakeModel", + "parameters": {"temperature": 0.5, "maxTokens": 4096}, + "custom": {"extra-attribute": "value"}, + }, + "provider": {"name": "fakeProvider"}, + "messages": [{"role": "system", "content": "Hello, {{name}}!"}], + "_ldMeta": {"enabled": True, "variationKey": "abcd", "version": 1}, + }, + "green", + ) + .variation_for_all(0) + ) + + return td + + +@pytest.fixture +def client(td: TestData) -> LDClient: + config = Config("sdk-key", update_processor_class=td, send_events=False) + client = LDClient(config=config) + client.track = MagicMock() # type: ignore + return client + + +def test_summary_starts_empty(client: LDClient): + context = Context.create("user-key") + tracker = LDAIConfigTracker(client, "variation-key", "config-key", 1, "fakeModel", "fakeProvider", context) + + assert tracker.get_summary().duration is None + assert tracker.get_summary().feedback is None + assert tracker.get_summary().success is None + assert tracker.get_summary().usage is None + + +def test_tracks_duration(client: LDClient): + context = Context.create("user-key") + tracker = LDAIConfigTracker(client, "variation-key", "config-key", 3, "fakeModel", "fakeProvider", context) + tracker.track_duration(100) + + client.track.assert_called_with( # type: ignore + "$ld:ai:duration:total", + context, + {"variationKey": "variation-key", "configKey": "config-key", "version": 3, "modelName": "fakeModel", "providerName": "fakeProvider"}, + 100, + ) + + assert tracker.get_summary().duration == 100 + + +def test_tracks_duration_of(client: LDClient): + context = Context.create("user-key") + tracker = LDAIConfigTracker(client, "variation-key", "config-key", 3, "fakeModel", "fakeProvider", context) + tracker.track_duration_of(lambda: sleep(0.01)) + + calls = client.track.mock_calls # type: ignore + + assert len(calls) == 1 + assert calls[0].args[0] == "$ld:ai:duration:total" + assert calls[0].args[1] == context + assert calls[0].args[2] == { + "variationKey": "variation-key", + "configKey": "config-key", + "version": 3, + "modelName": "fakeModel", + "providerName": "fakeProvider", + } + assert calls[0].args[3] == pytest.approx(10, rel=10) + + +def test_tracks_time_to_first_token(client: LDClient): + context = Context.create("user-key") + tracker = LDAIConfigTracker(client, "variation-key", "config-key", 3, "fakeModel", "fakeProvider", context) + tracker.track_time_to_first_token(100) + + client.track.assert_called_with( # type: ignore + "$ld:ai:tokens:ttf", + context, + {"variationKey": "variation-key", "configKey": "config-key", "version": 3, "modelName": "fakeModel", "providerName": "fakeProvider"}, + 100, + ) + + assert tracker.get_summary().time_to_first_token == 100 + + +def test_tracks_duration_of_with_exception(client: LDClient): + context = Context.create("user-key") + tracker = LDAIConfigTracker(client, "variation-key", "config-key", 3, "fakeModel", "fakeProvider", context) + + def sleep_and_throw(): + sleep(0.01) + raise ValueError("Something went wrong") + + try: + tracker.track_duration_of(sleep_and_throw) + assert False, "Should have thrown an exception" + except ValueError: + pass + + calls = client.track.mock_calls # type: ignore + + assert len(calls) == 1 + assert calls[0].args[0] == "$ld:ai:duration:total" + assert calls[0].args[1] == context + assert calls[0].args[2] == { + "variationKey": "variation-key", + "configKey": "config-key", + "version": 3, + "modelName": "fakeModel", + "providerName": "fakeProvider", + } + assert calls[0].args[3] == pytest.approx(10, rel=10) + + +def test_tracks_token_usage(client: LDClient): + context = Context.create("user-key") + tracker = LDAIConfigTracker(client, "variation-key", "config-key", 3, "fakeModel", "fakeProvider", context) + + tokens = TokenUsage(300, 200, 100) + tracker.track_tokens(tokens) + + calls = [ + call( + "$ld:ai:tokens:total", + context, + {"variationKey": "variation-key", "configKey": "config-key", "version": 3, "modelName": "fakeModel", "providerName": "fakeProvider"}, + 300, + ), + call( + "$ld:ai:tokens:input", + context, + {"variationKey": "variation-key", "configKey": "config-key", "version": 3, "modelName": "fakeModel", "providerName": "fakeProvider"}, + 200, + ), + call( + "$ld:ai:tokens:output", + context, + {"variationKey": "variation-key", "configKey": "config-key", "version": 3, "modelName": "fakeModel", "providerName": "fakeProvider"}, + 100, + ), + ] + + client.track.assert_has_calls(calls) # type: ignore + + assert tracker.get_summary().usage == tokens + + +def test_tracks_bedrock_metrics(client: LDClient): + context = Context.create("user-key") + tracker = LDAIConfigTracker(client, "variation-key", "config-key", 3, "fakeModel", "fakeProvider", context) + + bedrock_result = { + "ResponseMetadata": {"HTTPStatusCode": 200}, + "usage": { + "inputTokens": 220, + "outputTokens": 110, + "totalTokens": 330, + }, + "metrics": { + "latencyMs": 50, + }, + } + tracker.track_bedrock_converse_metrics(bedrock_result) + + calls = [ + call( + "$ld:ai:generation:success", + context, + {"variationKey": "variation-key", "configKey": "config-key", "version": 3, "modelName": "fakeModel", "providerName": "fakeProvider"}, + 1, + ), + call( + "$ld:ai:duration:total", + context, + {"variationKey": "variation-key", "configKey": "config-key", "version": 3, "modelName": "fakeModel", "providerName": "fakeProvider"}, + 50, + ), + call( + "$ld:ai:tokens:total", + context, + {"variationKey": "variation-key", "configKey": "config-key", "version": 3, "modelName": "fakeModel", "providerName": "fakeProvider"}, + 330, + ), + call( + "$ld:ai:tokens:input", + context, + {"variationKey": "variation-key", "configKey": "config-key", "version": 3, "modelName": "fakeModel", "providerName": "fakeProvider"}, + 220, + ), + call( + "$ld:ai:tokens:output", + context, + {"variationKey": "variation-key", "configKey": "config-key", "version": 3, "modelName": "fakeModel", "providerName": "fakeProvider"}, + 110, + ), + ] + + client.track.assert_has_calls(calls) # type: ignore + + assert tracker.get_summary().success is True + assert tracker.get_summary().duration == 50 + assert tracker.get_summary().usage == TokenUsage(330, 220, 110) + + +def test_tracks_bedrock_metrics_with_error(client: LDClient): + context = Context.create("user-key") + tracker = LDAIConfigTracker(client, "variation-key", "config-key", 3, "fakeModel", "fakeProvider", context) + + bedrock_result = { + "ResponseMetadata": {"HTTPStatusCode": 500}, + "usage": { + "totalTokens": 330, + "inputTokens": 220, + "outputTokens": 110, + }, + "metrics": { + "latencyMs": 50, + }, + } + tracker.track_bedrock_converse_metrics(bedrock_result) + + calls = [ + call( + "$ld:ai:generation:error", + context, + {"variationKey": "variation-key", "configKey": "config-key", "version": 3, "modelName": "fakeModel", "providerName": "fakeProvider"}, + 1, + ), + call( + "$ld:ai:duration:total", + context, + {"variationKey": "variation-key", "configKey": "config-key", "version": 3, "modelName": "fakeModel", "providerName": "fakeProvider"}, + 50, + ), + call( + "$ld:ai:tokens:total", + context, + {"variationKey": "variation-key", "configKey": "config-key", "version": 3, "modelName": "fakeModel", "providerName": "fakeProvider"}, + 330, + ), + call( + "$ld:ai:tokens:input", + context, + {"variationKey": "variation-key", "configKey": "config-key", "version": 3, "modelName": "fakeModel", "providerName": "fakeProvider"}, + 220, + ), + call( + "$ld:ai:tokens:output", + context, + {"variationKey": "variation-key", "configKey": "config-key", "version": 3, "modelName": "fakeModel", "providerName": "fakeProvider"}, + 110, + ), + ] + + client.track.assert_has_calls(calls) # type: ignore + + assert tracker.get_summary().success is False + assert tracker.get_summary().duration == 50 + assert tracker.get_summary().usage == TokenUsage(330, 220, 110) + + +@pytest.mark.asyncio +async def test_tracks_openai_metrics(client: LDClient): + context = Context.create("user-key") + tracker = LDAIConfigTracker(client, "variation-key", "config-key", 3, "fakeModel", "fakeProvider", context) + + class Result: + def __init__(self): + self.usage = Usage() + + class Usage: + def to_dict(self): + return { + "total_tokens": 330, + "prompt_tokens": 220, + "completion_tokens": 110, + } + + async def get_result(): + return Result() + + await tracker.track_openai_metrics(get_result) + + calls = [ + call( + "$ld:ai:generation:success", + context, + {"variationKey": "variation-key", "configKey": "config-key", "version": 3, "modelName": "fakeModel", "providerName": "fakeProvider"}, + 1, + ), + call( + "$ld:ai:tokens:total", + context, + {"variationKey": "variation-key", "configKey": "config-key", "version": 3, "modelName": "fakeModel", "providerName": "fakeProvider"}, + 330, + ), + call( + "$ld:ai:tokens:input", + context, + {"variationKey": "variation-key", "configKey": "config-key", "version": 3, "modelName": "fakeModel", "providerName": "fakeProvider"}, + 220, + ), + call( + "$ld:ai:tokens:output", + context, + {"variationKey": "variation-key", "configKey": "config-key", "version": 3, "modelName": "fakeModel", "providerName": "fakeProvider"}, + 110, + ), + ] + + client.track.assert_has_calls(calls, any_order=False) # type: ignore + + assert tracker.get_summary().usage == TokenUsage(330, 220, 110) + + +@pytest.mark.asyncio +async def test_tracks_openai_metrics_with_exception(client: LDClient): + context = Context.create("user-key") + tracker = LDAIConfigTracker(client, "variation-key", "config-key", 3, "fakeModel", "fakeProvider", context) + + async def raise_exception(): + raise ValueError("Something went wrong") + + try: + await tracker.track_openai_metrics(raise_exception) + assert False, "Should have thrown an exception" + except ValueError: + pass + + calls = [ + call( + "$ld:ai:generation:error", + context, + {"variationKey": "variation-key", "configKey": "config-key", "version": 3, "modelName": "fakeModel", "providerName": "fakeProvider"}, + 1, + ), + ] + + client.track.assert_has_calls(calls, any_order=False) # type: ignore + + assert tracker.get_summary().usage is None + + +@pytest.mark.parametrize( + "kind,label", + [ + pytest.param(FeedbackKind.Positive, "positive", id="positive"), + pytest.param(FeedbackKind.Negative, "negative", id="negative"), + ], +) +def test_tracks_feedback(client: LDClient, kind: FeedbackKind, label: str): + context = Context.create("user-key") + tracker = LDAIConfigTracker(client, "variation-key", "config-key", 3, "fakeModel", "fakeProvider", context) + + tracker.track_feedback({"kind": kind}) + + client.track.assert_called_with( # type: ignore + f"$ld:ai:feedback:user:{label}", + context, + {"variationKey": "variation-key", "configKey": "config-key", "version": 3, "modelName": "fakeModel", "providerName": "fakeProvider"}, + 1, + ) + assert tracker.get_summary().feedback == {"kind": kind} + + +def test_tracks_success(client: LDClient): + context = Context.create("user-key") + tracker = LDAIConfigTracker(client, "variation-key", "config-key", 3, "fakeModel", "fakeProvider", context) + tracker.track_success() + + calls = [ + call( + "$ld:ai:generation:success", + context, + {"variationKey": "variation-key", "configKey": "config-key", "version": 3, "modelName": "fakeModel", "providerName": "fakeProvider"}, + 1, + ), + ] + + client.track.assert_has_calls(calls) # type: ignore + + assert tracker.get_summary().success is True + + +def test_tracks_error(client: LDClient): + context = Context.create("user-key") + tracker = LDAIConfigTracker(client, "variation-key", "config-key", 3, "fakeModel", "fakeProvider", context) + tracker.track_error() + + calls = [ + call( + "$ld:ai:generation:error", + context, + {"variationKey": "variation-key", "configKey": "config-key", "version": 3, "modelName": "fakeModel", "providerName": "fakeProvider"}, + 1, + ), + ] + + client.track.assert_has_calls(calls) # type: ignore + + assert tracker.get_summary().success is False + + +def test_error_overwrites_success(client: LDClient): + context = Context.create("user-key") + tracker = LDAIConfigTracker(client, "variation-key", "config-key", 3, "fakeModel", "fakeProvider", context) + tracker.track_success() + tracker.track_error() + + calls = [ + call( + "$ld:ai:generation:success", + context, + {"variationKey": "variation-key", "configKey": "config-key", "version": 3, "modelName": "fakeModel", "providerName": "fakeProvider"}, + 1, + ), + call( + "$ld:ai:generation:error", + context, + {"variationKey": "variation-key", "configKey": "config-key", "version": 3, "modelName": "fakeModel", "providerName": "fakeProvider"}, + 1, + ), + ] + + client.track.assert_has_calls(calls) # type: ignore + + assert tracker.get_summary().success is False diff --git a/release-please-config.json b/release-please-config.json index 78df6d7..ec0b76f 100644 --- a/release-please-config.json +++ b/release-please-config.json @@ -1,6 +1,6 @@ { "packages": { - ".": { + "packages/sdk/server-ai": { "release-type": "python", "versioning": "default", "bump-minor-pre-major": true, From 50a42c8c70b20e7f162d91c3a1becff748ca8f5c Mon Sep 17 00:00:00 2001 From: Edwin Okonkwo Date: Sat, 22 Nov 2025 00:18:41 +0100 Subject: [PATCH 02/12] docs --- Makefile | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/Makefile b/Makefile index c2e9986..57067b3 100644 --- a/Makefile +++ b/Makefile @@ -40,5 +40,4 @@ lint: install .PHONY: docs docs: #! Generate sphinx-based documentation @cd packages/sdk/server-ai && poetry install --with docs - @cd docs - @cd packages/sdk/server-ai && poetry run $(SPHINXBUILD) -M html "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) + @cd packages/sdk/server-ai && poetry run $(SPHINXBUILD) -M html "../../../$(SOURCEDIR)" "../../../$(BUILDDIR)" $(SPHINXOPTS) $(O) From 0da0dba7cfefc2b331ca9ecd4601474850a05ad7 Mon Sep 17 00:00:00 2001 From: Edwin Okonkwo Date: Tue, 25 Nov 2025 18:43:39 +0100 Subject: [PATCH 03/12] remove the old files --- ... copied from another project for reference | 1 + ldai/__init__.py | 1 - ldai/client.py | 455 ------------------ ldai/testing/__init__.py | 0 ldai/testing/test_agents.py | 342 ------------- ldai/testing/test_model_config.py | 330 ------------- ldai/testing/test_tracker.py | 439 ----------------- ldai/tracker.py | 313 ------------ pyproject.toml | 71 --- release-please-config.json | 2 +- setup.cfg | 2 - 11 files changed, 2 insertions(+), 1954 deletions(-) create mode 160000 js-core - copied from another project for reference delete mode 100644 ldai/__init__.py delete mode 100644 ldai/client.py delete mode 100644 ldai/testing/__init__.py delete mode 100644 ldai/testing/test_agents.py delete mode 100644 ldai/testing/test_model_config.py delete mode 100644 ldai/testing/test_tracker.py delete mode 100644 ldai/tracker.py delete mode 100644 pyproject.toml delete mode 100644 setup.cfg diff --git a/js-core - copied from another project for reference b/js-core - copied from another project for reference new file mode 160000 index 0000000..213fc79 --- /dev/null +++ b/js-core - copied from another project for reference @@ -0,0 +1 @@ +Subproject commit 213fc793c752af6517ba7c117219205fb62b9c65 diff --git a/ldai/__init__.py b/ldai/__init__.py deleted file mode 100644 index cb7e545..0000000 --- a/ldai/__init__.py +++ /dev/null @@ -1 +0,0 @@ -__version__ = "0.10.1" # x-release-please-version diff --git a/ldai/client.py b/ldai/client.py deleted file mode 100644 index a8bd888..0000000 --- a/ldai/client.py +++ /dev/null @@ -1,455 +0,0 @@ -from dataclasses import dataclass -from typing import Any, Dict, List, Literal, Optional, Tuple - -import chevron -from ldclient import Context -from ldclient.client import LDClient - -from ldai.tracker import LDAIConfigTracker - - -@dataclass -class LDMessage: - role: Literal['system', 'user', 'assistant'] - content: str - - def to_dict(self) -> dict: - """ - Render the given message as a dictionary object. - """ - return { - 'role': self.role, - 'content': self.content, - } - - -class ModelConfig: - """ - Configuration related to the model. - """ - - def __init__(self, name: str, parameters: Optional[Dict[str, Any]] = None, custom: Optional[Dict[str, Any]] = None): - """ - :param name: The name of the model. - :param parameters: Additional model-specific parameters. - :param custom: Additional customer provided data. - """ - self._name = name - self._parameters = parameters - self._custom = custom - - @property - def name(self) -> str: - """ - The name of the model. - """ - return self._name - - def get_parameter(self, key: str) -> Any: - """ - Retrieve model-specific parameters. - - Accessing a named, typed attribute (e.g. name) will result in the call - being delegated to the appropriate property. - """ - if key == 'name': - return self.name - - if self._parameters is None: - return None - - return self._parameters.get(key) - - def get_custom(self, key: str) -> Any: - """ - Retrieve customer provided data. - """ - if self._custom is None: - return None - - return self._custom.get(key) - - def to_dict(self) -> dict: - """ - Render the given model config as a dictionary object. - """ - return { - 'name': self._name, - 'parameters': self._parameters, - 'custom': self._custom, - } - - -class ProviderConfig: - """ - Configuration related to the provider. - """ - - def __init__(self, name: str): - self._name = name - - @property - def name(self) -> str: - """ - The name of the provider. - """ - return self._name - - def to_dict(self) -> dict: - """ - Render the given provider config as a dictionary object. - """ - return { - 'name': self._name, - } - - -@dataclass(frozen=True) -class AIConfig: - enabled: Optional[bool] = None - model: Optional[ModelConfig] = None - messages: Optional[List[LDMessage]] = None - provider: Optional[ProviderConfig] = None - - def to_dict(self) -> dict: - """ - Render the given default values as an AIConfig-compatible dictionary object. - """ - return { - '_ldMeta': { - 'enabled': self.enabled or False, - }, - 'model': self.model.to_dict() if self.model else None, - 'messages': [message.to_dict() for message in self.messages] if self.messages else None, - 'provider': self.provider.to_dict() if self.provider else None, - } - - -@dataclass(frozen=True) -class LDAIAgent: - """ - Represents an AI agent configuration with instructions and model settings. - - An agent is similar to an AIConfig but focuses on instructions rather than messages, - making it suitable for AI assistant/agent use cases. - """ - enabled: Optional[bool] = None - model: Optional[ModelConfig] = None - provider: Optional[ProviderConfig] = None - instructions: Optional[str] = None - tracker: Optional[LDAIConfigTracker] = None - - def to_dict(self) -> Dict[str, Any]: - """ - Render the given agent as a dictionary object. - """ - result: Dict[str, Any] = { - '_ldMeta': { - 'enabled': self.enabled or False, - }, - 'model': self.model.to_dict() if self.model else None, - 'provider': self.provider.to_dict() if self.provider else None, - } - if self.instructions is not None: - result['instructions'] = self.instructions - return result - - -@dataclass(frozen=True) -class LDAIAgentDefaults: - """ - Default values for AI agent configurations. - - Similar to LDAIAgent but without tracker and with optional enabled field, - used as fallback values when agent configurations are not available. - """ - enabled: Optional[bool] = None - model: Optional[ModelConfig] = None - provider: Optional[ProviderConfig] = None - instructions: Optional[str] = None - - def to_dict(self) -> Dict[str, Any]: - """ - Render the given agent defaults as a dictionary object. - """ - result: Dict[str, Any] = { - '_ldMeta': { - 'enabled': self.enabled or False, - }, - 'model': self.model.to_dict() if self.model else None, - 'provider': self.provider.to_dict() if self.provider else None, - } - if self.instructions is not None: - result['instructions'] = self.instructions - return result - - -@dataclass -class LDAIAgentConfig: - """ - Configuration for individual agent in batch requests. - - Combines agent key with its specific default configuration and variables. - """ - key: str - default_value: LDAIAgentDefaults - variables: Optional[Dict[str, Any]] = None - - -# Type alias for multiple agents -LDAIAgents = Dict[str, LDAIAgent] - - -class LDAIClient: - """The LaunchDarkly AI SDK client object.""" - - def __init__(self, client: LDClient): - self._client = client - - def config( - self, - key: str, - context: Context, - default_value: AIConfig, - variables: Optional[Dict[str, Any]] = None, - ) -> Tuple[AIConfig, LDAIConfigTracker]: - """ - Get the value of a model configuration. - - :param key: The key of the model configuration. - :param context: The context to evaluate the model configuration in. - :param default_value: The default value of the model configuration. - :param variables: Additional variables for the model configuration. - :return: The value of the model configuration along with a tracker used for gathering metrics. - """ - self._client.track('$ld:ai:config:function:single', context, key, 1) - - model, provider, messages, instructions, tracker, enabled = self.__evaluate(key, context, default_value.to_dict(), variables) - - config = AIConfig( - enabled=bool(enabled), - model=model, - messages=messages, - provider=provider, - ) - - return config, tracker - - def agent( - self, - config: LDAIAgentConfig, - context: Context, - ) -> LDAIAgent: - """ - Retrieve a single AI Config agent. - - This method retrieves a single agent configuration with instructions - dynamically interpolated using the provided variables and context data. - - Example:: - - agent = client.agent(LDAIAgentConfig( - key='research_agent', - default_value=LDAIAgentDefaults( - enabled=True, - model=ModelConfig('gpt-4'), - instructions="You are a research assistant specializing in {{topic}}." - ), - variables={'topic': 'climate change'} - ), context) - - if agent.enabled: - research_result = agent.instructions # Interpolated instructions - agent.tracker.track_success() - - :param config: The agent configuration to use. - :param context: The context to evaluate the agent configuration in. - :return: Configured LDAIAgent instance. - """ - # Track single agent usage - self._client.track( - "$ld:ai:agent:function:single", - context, - config.key, - 1 - ) - - return self.__evaluate_agent(config.key, context, config.default_value, config.variables) - - def agents( - self, - agent_configs: List[LDAIAgentConfig], - context: Context, - ) -> LDAIAgents: - """ - Retrieve multiple AI agent configurations. - - This method allows you to retrieve multiple agent configurations in a single call, - with each agent having its own default configuration and variables for instruction - interpolation. - - Example:: - - agents = client.agents([ - LDAIAgentConfig( - key='research_agent', - default_value=LDAIAgentDefaults( - enabled=True, - instructions='You are a research assistant.' - ), - variables={'topic': 'climate change'} - ), - LDAIAgentConfig( - key='writing_agent', - default_value=LDAIAgentDefaults( - enabled=True, - instructions='You are a writing assistant.' - ), - variables={'style': 'academic'} - ) - ], context) - - research_result = agents["research_agent"].instructions - agents["research_agent"].tracker.track_success() - - :param agent_configs: List of agent configurations to retrieve. - :param context: The context to evaluate the agent configurations in. - :return: Dictionary mapping agent keys to their LDAIAgent configurations. - """ - # Track multiple agents usage - agent_count = len(agent_configs) - self._client.track( - "$ld:ai:agent:function:multiple", - context, - agent_count, - agent_count - ) - - result: LDAIAgents = {} - - for config in agent_configs: - agent = self.__evaluate_agent( - config.key, - context, - config.default_value, - config.variables - ) - result[config.key] = agent - - return result - - def __evaluate( - self, - key: str, - context: Context, - default_dict: Dict[str, Any], - variables: Optional[Dict[str, Any]] = None, - ) -> Tuple[Optional[ModelConfig], Optional[ProviderConfig], Optional[List[LDMessage]], Optional[str], LDAIConfigTracker, bool]: - """ - Internal method to evaluate a configuration and extract components. - - :param key: The configuration key. - :param context: The evaluation context. - :param default_dict: Default configuration as dictionary. - :param variables: Variables for interpolation. - :return: Tuple of (model, provider, messages, instructions, tracker, enabled). - """ - variation = self._client.variation(key, context, default_dict) - - all_variables = {} - if variables: - all_variables.update(variables) - all_variables['ldctx'] = context.to_dict() - - # Extract messages - messages = None - if 'messages' in variation and isinstance(variation['messages'], list) and all( - isinstance(entry, dict) for entry in variation['messages'] - ): - messages = [ - LDMessage( - role=entry['role'], - content=self.__interpolate_template( - entry['content'], all_variables - ), - ) - for entry in variation['messages'] - ] - - # Extract instructions - instructions = None - if 'instructions' in variation and isinstance(variation['instructions'], str): - instructions = self.__interpolate_template(variation['instructions'], all_variables) - - # Extract provider config - provider_config = None - if 'provider' in variation and isinstance(variation['provider'], dict): - provider = variation['provider'] - provider_config = ProviderConfig(provider.get('name', '')) - - # Extract model config - model = None - if 'model' in variation and isinstance(variation['model'], dict): - parameters = variation['model'].get('parameters', None) - custom = variation['model'].get('custom', None) - model = ModelConfig( - name=variation['model']['name'], - parameters=parameters, - custom=custom - ) - - # Create tracker - tracker = LDAIConfigTracker( - self._client, - variation.get('_ldMeta', {}).get('variationKey', ''), - key, - int(variation.get('_ldMeta', {}).get('version', 1)), - model.name if model else '', - provider_config.name if provider_config else '', - context, - ) - - enabled = variation.get('_ldMeta', {}).get('enabled', False) - - return model, provider_config, messages, instructions, tracker, enabled - - def __evaluate_agent( - self, - key: str, - context: Context, - default_value: LDAIAgentDefaults, - variables: Optional[Dict[str, Any]] = None, - ) -> LDAIAgent: - """ - Internal method to evaluate an agent configuration. - - :param key: The agent configuration key. - :param context: The evaluation context. - :param default_value: Default agent values. - :param variables: Variables for interpolation. - :return: Configured LDAIAgent instance. - """ - model, provider, messages, instructions, tracker, enabled = self.__evaluate( - key, context, default_value.to_dict(), variables - ) - - # For agents, prioritize instructions over messages - final_instructions = instructions if instructions is not None else default_value.instructions - - return LDAIAgent( - enabled=bool(enabled) if enabled is not None else default_value.enabled, - model=model or default_value.model, - provider=provider or default_value.provider, - instructions=final_instructions, - tracker=tracker, - ) - - def __interpolate_template(self, template: str, variables: Dict[str, Any]) -> str: - """ - Interpolate the template with the given variables using Mustache format. - - :param template: The template string. - :param variables: The variables to interpolate into the template. - :return: The interpolated string. - """ - return chevron.render(template, variables) diff --git a/ldai/testing/__init__.py b/ldai/testing/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/ldai/testing/test_agents.py b/ldai/testing/test_agents.py deleted file mode 100644 index b2e80c0..0000000 --- a/ldai/testing/test_agents.py +++ /dev/null @@ -1,342 +0,0 @@ -import pytest -from ldclient import Config, Context, LDClient -from ldclient.integrations.test_data import TestData - -from ldai.client import (LDAIAgentConfig, LDAIAgentDefaults, LDAIClient, - ModelConfig, ProviderConfig) - - -@pytest.fixture -def td() -> TestData: - td = TestData.data_source() - - # Single agent with instructions - td.update( - td.flag('customer-support-agent') - .variations( - { - 'model': {'name': 'gpt-4', 'parameters': {'temperature': 0.3, 'maxTokens': 2048}}, - 'provider': {'name': 'openai'}, - 'instructions': 'You are a helpful customer support agent for {{company_name}}. Always be polite and professional.', - '_ldMeta': {'enabled': True, 'variationKey': 'agent-v1', 'version': 1, 'mode': 'agent'}, - } - ) - .variation_for_all(0) - ) - - # Agent with context interpolation - td.update( - td.flag('personalized-agent') - .variations( - { - 'model': {'name': 'claude-3', 'parameters': {'temperature': 0.5}}, - 'instructions': 'Hello {{ldctx.name}}! I am your personal assistant. Your user key is {{ldctx.key}}.', - '_ldMeta': {'enabled': True, 'variationKey': 'personal-v1', 'version': 2, 'mode': 'agent'}, - } - ) - .variation_for_all(0) - ) - - # Agent with multi-context interpolation - td.update( - td.flag('multi-context-agent') - .variations( - { - 'model': {'name': 'gpt-3.5-turbo'}, - 'instructions': 'Welcome {{ldctx.user.name}} from {{ldctx.org.name}}! Your organization tier is {{ldctx.org.tier}}.', - '_ldMeta': {'enabled': True, 'variationKey': 'multi-v1', 'version': 1, 'mode': 'agent'}, - } - ) - .variation_for_all(0) - ) - - # Disabled agent - td.update( - td.flag('disabled-agent') - .variations( - { - 'model': {'name': 'gpt-4'}, - 'instructions': 'This agent is disabled.', - '_ldMeta': {'enabled': False, 'variationKey': 'disabled-v1', 'version': 1, 'mode': 'agent'}, - } - ) - .variation_for_all(0) - ) - - # Agent with minimal metadata - td.update( - td.flag('minimal-agent') - .variations( - { - 'instructions': 'Minimal agent configuration.', - '_ldMeta': {'enabled': True}, - } - ) - .variation_for_all(0) - ) - - # Sales assistant agent - td.update( - td.flag('sales-assistant') - .variations( - { - 'model': {'name': 'gpt-4', 'parameters': {'temperature': 0.7}}, - 'provider': {'name': 'openai'}, - 'instructions': 'You are a sales assistant for {{company_name}}. Help customers find the right products.', - '_ldMeta': {'enabled': True, 'variationKey': 'sales-v1', 'version': 1, 'mode': 'agent'}, - } - ) - .variation_for_all(0) - ) - - # Research agent for testing single agent method - td.update( - td.flag('research-agent') - .variations( - { - 'model': {'name': 'gpt-4', 'parameters': {'temperature': 0.2, 'maxTokens': 3000}}, - 'provider': {'name': 'openai'}, - 'instructions': 'You are a research assistant specializing in {{topic}}. Your expertise level should match {{ldctx.expertise}}.', - '_ldMeta': {'enabled': True, 'variationKey': 'research-v1', 'version': 1, 'mode': 'agent'}, - } - ) - .variation_for_all(0) - ) - - return td - - -@pytest.fixture -def client(td: TestData) -> LDClient: - config = Config('sdk-key', update_processor_class=td, send_events=False) - return LDClient(config=config) - - -@pytest.fixture -def ldai_client(client: LDClient) -> LDAIClient: - return LDAIClient(client) - - -def test_single_agent_method(ldai_client: LDAIClient): - """Test the single agent() method functionality.""" - context = Context.builder('user-key').set('expertise', 'advanced').build() - config = LDAIAgentConfig( - key='research-agent', - default_value=LDAIAgentDefaults( - enabled=False, - model=ModelConfig('fallback-model'), - instructions="Default instructions" - ), - variables={'topic': 'quantum computing'} - ) - - agent = ldai_client.agent(config, context) - - assert agent.enabled is True - assert agent.model is not None - assert agent.model.name == 'gpt-4' - assert agent.model.get_parameter('temperature') == 0.2 - assert agent.model.get_parameter('maxTokens') == 3000 - assert agent.provider is not None - assert agent.provider.name == 'openai' - assert agent.instructions == 'You are a research assistant specializing in quantum computing. Your expertise level should match advanced.' - assert agent.tracker is not None - - -def test_single_agent_with_defaults(ldai_client: LDAIClient): - """Test single agent method with non-existent flag using defaults.""" - context = Context.create('user-key') - config = LDAIAgentConfig( - key='non-existent-agent', - default_value=LDAIAgentDefaults( - enabled=True, - model=ModelConfig('default-model', parameters={'temp': 0.8}), - provider=ProviderConfig('default-provider'), - instructions="You are a default assistant for {{task}}." - ), - variables={'task': 'general assistance'} - ) - - agent = ldai_client.agent(config, context) - - assert agent.enabled is True - assert agent.model is not None and agent.model.name == 'default-model' - assert agent.model is not None and agent.model.get_parameter('temp') == 0.8 - assert agent.provider is not None and agent.provider.name == 'default-provider' - assert agent.instructions == "You are a default assistant for general assistance." - assert agent.tracker is not None - - -def test_agents_method_with_configs(ldai_client: LDAIClient): - """Test the new agents() method with LDAIAgentConfig objects.""" - context = Context.create('user-key') - - agent_configs = [ - LDAIAgentConfig( - key='customer-support-agent', - default_value=LDAIAgentDefaults( - enabled=False, - model=ModelConfig('fallback-model'), - instructions="Default support" - ), - variables={'company_name': 'Acme Corp'} - ), - LDAIAgentConfig( - key='sales-assistant', - default_value=LDAIAgentDefaults( - enabled=False, - model=ModelConfig('fallback-model'), - instructions="Default sales" - ), - variables={'company_name': 'Acme Corp'} - ) - ] - - agents = ldai_client.agents(agent_configs, context) - - assert len(agents) == 2 - assert 'customer-support-agent' in agents - assert 'sales-assistant' in agents - - support_agent = agents['customer-support-agent'] - assert support_agent.enabled is True - assert support_agent.instructions is not None and 'Acme Corp' in support_agent.instructions - - sales_agent = agents['sales-assistant'] - assert sales_agent.enabled is True - assert sales_agent.instructions is not None and 'Acme Corp' in sales_agent.instructions - assert sales_agent.model is not None and sales_agent.model.get_parameter('temperature') == 0.7 - - -def test_agents_method_different_variables_per_agent(ldai_client: LDAIClient): - """Test agents method with different variables for each agent.""" - context = Context.builder('user-key').name('Alice').build() - - agent_configs = [ - LDAIAgentConfig( - key='personalized-agent', - default_value=LDAIAgentDefaults( - enabled=True, - instructions="Default personal" - ), - variables={} # Will use context only - ), - LDAIAgentConfig( - key='customer-support-agent', - default_value=LDAIAgentDefaults( - enabled=True, - instructions="Default support" - ), - variables={'company_name': 'TechStart Inc'} - ) - ] - - agents = ldai_client.agents(agent_configs, context) - - personal_agent = agents['personalized-agent'] - assert personal_agent.instructions == 'Hello Alice! I am your personal assistant. Your user key is user-key.' - - support_agent = agents['customer-support-agent'] - assert support_agent.instructions == 'You are a helpful customer support agent for TechStart Inc. Always be polite and professional.' - - -def test_agents_with_multi_context_interpolation(ldai_client: LDAIClient): - """Test agents method with multi-context interpolation.""" - user_context = Context.builder('user-key').name('Alice').build() - org_context = Context.builder('org-key').kind('org').name('LaunchDarkly').set('tier', 'Enterprise').build() - context = Context.multi_builder().add(user_context).add(org_context).build() - - agent_configs = [ - LDAIAgentConfig( - key='multi-context-agent', - default_value=LDAIAgentDefaults( - enabled=True, - instructions="Default multi-context" - ), - variables={} - ) - ] - - agents = ldai_client.agents(agent_configs, context) - - agent = agents['multi-context-agent'] - assert agent.instructions == 'Welcome Alice from LaunchDarkly! Your organization tier is Enterprise.' - - -def test_disabled_agent_single_method(ldai_client: LDAIClient): - """Test that disabled agents are properly handled in single agent method.""" - context = Context.create('user-key') - config = LDAIAgentConfig( - key='disabled-agent', - default_value=LDAIAgentDefaults(enabled=False), - variables={} - ) - - agent = ldai_client.agent(config, context) - - assert agent.enabled is False - assert agent.tracker is not None - - -def test_disabled_agent_multiple_method(ldai_client: LDAIClient): - """Test that disabled agents are properly handled in multiple agents method.""" - context = Context.create('user-key') - - agent_configs = [ - LDAIAgentConfig( - key='disabled-agent', - default_value=LDAIAgentDefaults(enabled=False), - variables={} - ) - ] - - agents = ldai_client.agents(agent_configs, context) - - assert len(agents) == 1 - assert agents['disabled-agent'].enabled is False - - -def test_agent_with_missing_metadata(ldai_client: LDAIClient): - """Test agent handling when metadata is minimal or missing.""" - context = Context.create('user-key') - config = LDAIAgentConfig( - key='minimal-agent', - default_value=LDAIAgentDefaults( - enabled=False, - model=ModelConfig('default-model'), - instructions="Default instructions" - ) - ) - - agent = ldai_client.agent(config, context) - - assert agent.enabled is True # From flag - assert agent.instructions == 'Minimal agent configuration.' - assert agent.model == config.default_value.model # Falls back to default - assert agent.tracker is not None - - -def test_agent_config_dataclass(): - """Test the LDAIAgentConfig dataclass functionality.""" - config = LDAIAgentConfig( - key='test-agent', - default_value=LDAIAgentDefaults( - enabled=True, - instructions="Test instructions" - ), - variables={'key': 'value'} - ) - - assert config.key == 'test-agent' - assert config.default_value.enabled is True - assert config.default_value.instructions == "Test instructions" - assert config.variables == {'key': 'value'} - - # Test with no variables - config_no_vars = LDAIAgentConfig( - key='test-agent-2', - default_value=LDAIAgentDefaults(enabled=False) - ) - - assert config_no_vars.key == 'test-agent-2' - assert config_no_vars.variables is None diff --git a/ldai/testing/test_model_config.py b/ldai/testing/test_model_config.py deleted file mode 100644 index 1ffc033..0000000 --- a/ldai/testing/test_model_config.py +++ /dev/null @@ -1,330 +0,0 @@ -import pytest -from ldclient import Config, Context, LDClient -from ldclient.integrations.test_data import TestData - -from ldai.client import AIConfig, LDAIClient, LDMessage, ModelConfig - - -@pytest.fixture -def td() -> TestData: - td = TestData.data_source() - td.update( - td.flag('model-config') - .variations( - { - 'model': {'name': 'fakeModel', 'parameters': {'temperature': 0.5, 'maxTokens': 4096}, 'custom': {'extra-attribute': 'value'}}, - 'provider': {'name': 'fakeProvider'}, - 'messages': [{'role': 'system', 'content': 'Hello, {{name}}!'}], - '_ldMeta': {'enabled': True, 'variationKey': 'abcd', 'version': 1}, - }, - "green", - ) - .variation_for_all(0) - ) - - td.update( - td.flag('multiple-messages') - .variations( - { - 'model': {'name': 'fakeModel', 'parameters': {'temperature': 0.7, 'maxTokens': 8192}}, - 'messages': [ - {'role': 'system', 'content': 'Hello, {{name}}!'}, - {'role': 'user', 'content': 'The day is, {{day}}!'}, - ], - '_ldMeta': {'enabled': True, 'variationKey': 'abcd', 'version': 1}, - }, - "green", - ) - .variation_for_all(0) - ) - - td.update( - td.flag('ctx-interpolation') - .variations( - { - 'model': {'name': 'fakeModel', 'parameters': {'extra-attribute': 'I can be anything I set my mind/type to'}}, - 'messages': [{'role': 'system', 'content': 'Hello, {{ldctx.name}}! Is your last name {{ldctx.last}}?'}], - '_ldMeta': {'enabled': True, 'variationKey': 'abcd', 'version': 1}, - } - ) - .variation_for_all(0) - ) - - td.update( - td.flag('multi-ctx-interpolation') - .variations( - { - 'model': {'name': 'fakeModel', 'parameters': {'extra-attribute': 'I can be anything I set my mind/type to'}}, - 'messages': [{'role': 'system', 'content': 'Hello, {{ldctx.user.name}}! Do you work for {{ldctx.org.shortname}}?'}], - '_ldMeta': {'enabled': True, 'variationKey': 'abcd', 'version': 1}, - } - ) - .variation_for_all(0) - ) - - td.update( - td.flag('off-config') - .variations( - { - 'model': {'name': 'fakeModel', 'parameters': {'temperature': 0.1}}, - 'messages': [{'role': 'system', 'content': 'Hello, {{name}}!'}], - '_ldMeta': {'enabled': False, 'variationKey': 'abcd', 'version': 1}, - } - ) - .variation_for_all(0) - ) - - td.update( - td.flag('initial-config-disabled') - .variations( - { - '_ldMeta': {'enabled': False}, - }, - { - '_ldMeta': {'enabled': True}, - } - ) - .variation_for_all(0) - ) - - td.update( - td.flag('initial-config-enabled') - .variations( - { - '_ldMeta': {'enabled': False}, - }, - { - '_ldMeta': {'enabled': True}, - } - ) - .variation_for_all(1) - ) - - return td - - -@pytest.fixture -def client(td: TestData) -> LDClient: - config = Config('sdk-key', update_processor_class=td, send_events=False) - return LDClient(config=config) - - -@pytest.fixture -def ldai_client(client: LDClient) -> LDAIClient: - return LDAIClient(client) - - -def test_model_config_delegates_to_properties(): - model = ModelConfig('fakeModel', parameters={'extra-attribute': 'value'}) - assert model.name == 'fakeModel' - assert model.get_parameter('extra-attribute') == 'value' - assert model.get_parameter('non-existent') is None - - assert model.name == model.get_parameter('name') - - -def test_model_config_handles_custom(): - model = ModelConfig('fakeModel', custom={'extra-attribute': 'value'}) - assert model.name == 'fakeModel' - assert model.get_parameter('extra-attribute') is None - assert model.get_custom('non-existent') is None - assert model.get_custom('name') is None - - -def test_uses_default_on_invalid_flag(ldai_client: LDAIClient): - context = Context.create('user-key') - default_value = AIConfig( - enabled=True, - model=ModelConfig('fakeModel', parameters={'temperature': 0.5, 'maxTokens': 4096}), - messages=[LDMessage(role='system', content='Hello, {{name}}!')], - ) - variables = {'name': 'World'} - - config, _ = ldai_client.config('missing-flag', context, default_value, variables) - - assert config.messages is not None - assert len(config.messages) > 0 - assert config.messages[0].content == 'Hello, World!' - assert config.enabled is True - - assert config.model is not None - assert config.model.name == 'fakeModel' - assert config.model.get_parameter('temperature') == 0.5 - assert config.model.get_parameter('maxTokens') == 4096 - - -def test_model_config_interpolation(ldai_client: LDAIClient): - context = Context.create('user-key') - default_value = AIConfig( - enabled=True, - model=ModelConfig('fakeModel'), - messages=[LDMessage(role='system', content='Hello, {{name}}!')], - ) - variables = {'name': 'World'} - - config, _ = ldai_client.config('model-config', context, default_value, variables) - - assert config.messages is not None - assert len(config.messages) > 0 - assert config.messages[0].content == 'Hello, World!' - assert config.enabled is True - - assert config.model is not None - assert config.model.name == 'fakeModel' - assert config.model.get_parameter('temperature') == 0.5 - assert config.model.get_parameter('maxTokens') == 4096 - - -def test_model_config_no_variables(ldai_client: LDAIClient): - context = Context.create('user-key') - default_value = AIConfig(enabled=True, model=ModelConfig('fake-model'), messages=[]) - - config, _ = ldai_client.config('model-config', context, default_value, {}) - - assert config.messages is not None - assert len(config.messages) > 0 - assert config.messages[0].content == 'Hello, !' - assert config.enabled is True - - assert config.model is not None - assert config.model.name == 'fakeModel' - assert config.model.get_parameter('temperature') == 0.5 - assert config.model.get_parameter('maxTokens') == 4096 - - -def test_provider_config_handling(ldai_client: LDAIClient): - context = Context.builder('user-key').name("Sandy").build() - default_value = AIConfig(enabled=True, model=ModelConfig('fake-model'), messages=[]) - variables = {'name': 'World'} - - config, _ = ldai_client.config('model-config', context, default_value, variables) - - assert config.provider is not None - assert config.provider.name == 'fakeProvider' - - -def test_context_interpolation(ldai_client: LDAIClient): - context = Context.builder('user-key').name("Sandy").set('last', 'Beaches').build() - default_value = AIConfig(enabled=True, model=ModelConfig('fake-model'), messages=[]) - variables = {'name': 'World'} - - config, _ = ldai_client.config( - 'ctx-interpolation', context, default_value, variables - ) - - assert config.messages is not None - assert len(config.messages) > 0 - assert config.messages[0].content == 'Hello, Sandy! Is your last name Beaches?' - assert config.enabled is True - - assert config.model is not None - assert config.model.name == 'fakeModel' - assert config.model.get_parameter('temperature') is None - assert config.model.get_parameter('maxTokens') is None - assert config.model.get_parameter('extra-attribute') == 'I can be anything I set my mind/type to' - - -def test_multi_context_interpolation(ldai_client: LDAIClient): - user_context = Context.builder('user-key').name("Sandy").build() - org_context = Context.builder('org-key').kind('org').name("LaunchDarkly").set('shortname', 'LD').build() - context = Context.multi_builder().add(user_context).add(org_context).build() - default_value = AIConfig(enabled=True, model=ModelConfig('fake-model'), messages=[]) - variables = {'name': 'World'} - - config, _ = ldai_client.config( - 'multi-ctx-interpolation', context, default_value, variables - ) - - assert config.messages is not None - assert len(config.messages) > 0 - assert config.messages[0].content == 'Hello, Sandy! Do you work for LD?' - assert config.enabled is True - - assert config.model is not None - assert config.model.name == 'fakeModel' - assert config.model.get_parameter('temperature') is None - assert config.model.get_parameter('maxTokens') is None - assert config.model.get_parameter('extra-attribute') == 'I can be anything I set my mind/type to' - - -def test_model_config_multiple(ldai_client: LDAIClient): - context = Context.create('user-key') - default_value = AIConfig(enabled=True, model=ModelConfig('fake-model'), messages=[]) - variables = {'name': 'World', 'day': 'Monday'} - - config, _ = ldai_client.config( - 'multiple-messages', context, default_value, variables - ) - - assert config.messages is not None - assert len(config.messages) > 0 - assert config.messages[0].content == 'Hello, World!' - assert config.messages[1].content == 'The day is, Monday!' - assert config.enabled is True - - assert config.model is not None - assert config.model.name == 'fakeModel' - assert config.model.get_parameter('temperature') == 0.7 - assert config.model.get_parameter('maxTokens') == 8192 - - -def test_model_config_disabled(ldai_client: LDAIClient): - context = Context.create('user-key') - default_value = AIConfig(enabled=False, model=ModelConfig('fake-model'), messages=[]) - - config, _ = ldai_client.config('off-config', context, default_value, {}) - - assert config.model is not None - assert config.enabled is False - assert config.model.name == 'fakeModel' - assert config.model.get_parameter('temperature') == 0.1 - assert config.model.get_parameter('maxTokens') is None - - -def test_model_initial_config_disabled(ldai_client: LDAIClient): - context = Context.create('user-key') - default_value = AIConfig(enabled=False, model=ModelConfig('fake-model'), messages=[]) - - config, _ = ldai_client.config('initial-config-disabled', context, default_value, {}) - - assert config.enabled is False - assert config.model is None - assert config.messages is None - assert config.provider is None - - -def test_model_initial_config_enabled(ldai_client: LDAIClient): - context = Context.create('user-key') - default_value = AIConfig(enabled=False, model=ModelConfig('fake-model'), messages=[]) - - config, _ = ldai_client.config('initial-config-enabled', context, default_value, {}) - - assert config.enabled is True - assert config.model is None - assert config.messages is None - assert config.provider is None - - -def test_config_method_tracking(ldai_client: LDAIClient): - from unittest.mock import Mock - - mock_client = Mock() - mock_client.variation.return_value = { - '_ldMeta': {'enabled': True, 'variationKey': 'test-variation', 'version': 1}, - 'model': {'name': 'test-model'}, - 'provider': {'name': 'test-provider'}, - 'messages': [] - } - - client = LDAIClient(mock_client) - context = Context.create('user-key') - default_value = AIConfig(enabled=False, model=ModelConfig('fake-model'), messages=[]) - - config, tracker = client.config('test-config-key', context, default_value) - - mock_client.track.assert_called_once_with( - '$ld:ai:config:function:single', - context, - 'test-config-key', - 1 - ) diff --git a/ldai/testing/test_tracker.py b/ldai/testing/test_tracker.py deleted file mode 100644 index 19c8161..0000000 --- a/ldai/testing/test_tracker.py +++ /dev/null @@ -1,439 +0,0 @@ -from time import sleep -from unittest.mock import MagicMock, call - -import pytest -from ldclient import Config, Context, LDClient -from ldclient.integrations.test_data import TestData - -from ldai.tracker import FeedbackKind, LDAIConfigTracker, TokenUsage - - -@pytest.fixture -def td() -> TestData: - td = TestData.data_source() - td.update( - td.flag("model-config") - .variations( - { - "model": { - "name": "fakeModel", - "parameters": {"temperature": 0.5, "maxTokens": 4096}, - "custom": {"extra-attribute": "value"}, - }, - "provider": {"name": "fakeProvider"}, - "messages": [{"role": "system", "content": "Hello, {{name}}!"}], - "_ldMeta": {"enabled": True, "variationKey": "abcd", "version": 1}, - }, - "green", - ) - .variation_for_all(0) - ) - - return td - - -@pytest.fixture -def client(td: TestData) -> LDClient: - config = Config("sdk-key", update_processor_class=td, send_events=False) - client = LDClient(config=config) - client.track = MagicMock() # type: ignore - return client - - -def test_summary_starts_empty(client: LDClient): - context = Context.create("user-key") - tracker = LDAIConfigTracker(client, "variation-key", "config-key", 1, "fakeModel", "fakeProvider", context) - - assert tracker.get_summary().duration is None - assert tracker.get_summary().feedback is None - assert tracker.get_summary().success is None - assert tracker.get_summary().usage is None - - -def test_tracks_duration(client: LDClient): - context = Context.create("user-key") - tracker = LDAIConfigTracker(client, "variation-key", "config-key", 3, "fakeModel", "fakeProvider", context) - tracker.track_duration(100) - - client.track.assert_called_with( # type: ignore - "$ld:ai:duration:total", - context, - {"variationKey": "variation-key", "configKey": "config-key", "version": 3, "modelName": "fakeModel", "providerName": "fakeProvider"}, - 100, - ) - - assert tracker.get_summary().duration == 100 - - -def test_tracks_duration_of(client: LDClient): - context = Context.create("user-key") - tracker = LDAIConfigTracker(client, "variation-key", "config-key", 3, "fakeModel", "fakeProvider", context) - tracker.track_duration_of(lambda: sleep(0.01)) - - calls = client.track.mock_calls # type: ignore - - assert len(calls) == 1 - assert calls[0].args[0] == "$ld:ai:duration:total" - assert calls[0].args[1] == context - assert calls[0].args[2] == { - "variationKey": "variation-key", - "configKey": "config-key", - "version": 3, - "modelName": "fakeModel", - "providerName": "fakeProvider", - } - assert calls[0].args[3] == pytest.approx(10, rel=10) - - -def test_tracks_time_to_first_token(client: LDClient): - context = Context.create("user-key") - tracker = LDAIConfigTracker(client, "variation-key", "config-key", 3, "fakeModel", "fakeProvider", context) - tracker.track_time_to_first_token(100) - - client.track.assert_called_with( # type: ignore - "$ld:ai:tokens:ttf", - context, - {"variationKey": "variation-key", "configKey": "config-key", "version": 3, "modelName": "fakeModel", "providerName": "fakeProvider"}, - 100, - ) - - assert tracker.get_summary().time_to_first_token == 100 - - -def test_tracks_duration_of_with_exception(client: LDClient): - context = Context.create("user-key") - tracker = LDAIConfigTracker(client, "variation-key", "config-key", 3, "fakeModel", "fakeProvider", context) - - def sleep_and_throw(): - sleep(0.01) - raise ValueError("Something went wrong") - - try: - tracker.track_duration_of(sleep_and_throw) - assert False, "Should have thrown an exception" - except ValueError: - pass - - calls = client.track.mock_calls # type: ignore - - assert len(calls) == 1 - assert calls[0].args[0] == "$ld:ai:duration:total" - assert calls[0].args[1] == context - assert calls[0].args[2] == { - "variationKey": "variation-key", - "configKey": "config-key", - "version": 3, - "modelName": "fakeModel", - "providerName": "fakeProvider", - } - assert calls[0].args[3] == pytest.approx(10, rel=10) - - -def test_tracks_token_usage(client: LDClient): - context = Context.create("user-key") - tracker = LDAIConfigTracker(client, "variation-key", "config-key", 3, "fakeModel", "fakeProvider", context) - - tokens = TokenUsage(300, 200, 100) - tracker.track_tokens(tokens) - - calls = [ - call( - "$ld:ai:tokens:total", - context, - {"variationKey": "variation-key", "configKey": "config-key", "version": 3, "modelName": "fakeModel", "providerName": "fakeProvider"}, - 300, - ), - call( - "$ld:ai:tokens:input", - context, - {"variationKey": "variation-key", "configKey": "config-key", "version": 3, "modelName": "fakeModel", "providerName": "fakeProvider"}, - 200, - ), - call( - "$ld:ai:tokens:output", - context, - {"variationKey": "variation-key", "configKey": "config-key", "version": 3, "modelName": "fakeModel", "providerName": "fakeProvider"}, - 100, - ), - ] - - client.track.assert_has_calls(calls) # type: ignore - - assert tracker.get_summary().usage == tokens - - -def test_tracks_bedrock_metrics(client: LDClient): - context = Context.create("user-key") - tracker = LDAIConfigTracker(client, "variation-key", "config-key", 3, "fakeModel", "fakeProvider", context) - - bedrock_result = { - "ResponseMetadata": {"HTTPStatusCode": 200}, - "usage": { - "inputTokens": 220, - "outputTokens": 110, - "totalTokens": 330, - }, - "metrics": { - "latencyMs": 50, - }, - } - tracker.track_bedrock_converse_metrics(bedrock_result) - - calls = [ - call( - "$ld:ai:generation:success", - context, - {"variationKey": "variation-key", "configKey": "config-key", "version": 3, "modelName": "fakeModel", "providerName": "fakeProvider"}, - 1, - ), - call( - "$ld:ai:duration:total", - context, - {"variationKey": "variation-key", "configKey": "config-key", "version": 3, "modelName": "fakeModel", "providerName": "fakeProvider"}, - 50, - ), - call( - "$ld:ai:tokens:total", - context, - {"variationKey": "variation-key", "configKey": "config-key", "version": 3, "modelName": "fakeModel", "providerName": "fakeProvider"}, - 330, - ), - call( - "$ld:ai:tokens:input", - context, - {"variationKey": "variation-key", "configKey": "config-key", "version": 3, "modelName": "fakeModel", "providerName": "fakeProvider"}, - 220, - ), - call( - "$ld:ai:tokens:output", - context, - {"variationKey": "variation-key", "configKey": "config-key", "version": 3, "modelName": "fakeModel", "providerName": "fakeProvider"}, - 110, - ), - ] - - client.track.assert_has_calls(calls) # type: ignore - - assert tracker.get_summary().success is True - assert tracker.get_summary().duration == 50 - assert tracker.get_summary().usage == TokenUsage(330, 220, 110) - - -def test_tracks_bedrock_metrics_with_error(client: LDClient): - context = Context.create("user-key") - tracker = LDAIConfigTracker(client, "variation-key", "config-key", 3, "fakeModel", "fakeProvider", context) - - bedrock_result = { - "ResponseMetadata": {"HTTPStatusCode": 500}, - "usage": { - "totalTokens": 330, - "inputTokens": 220, - "outputTokens": 110, - }, - "metrics": { - "latencyMs": 50, - }, - } - tracker.track_bedrock_converse_metrics(bedrock_result) - - calls = [ - call( - "$ld:ai:generation:error", - context, - {"variationKey": "variation-key", "configKey": "config-key", "version": 3, "modelName": "fakeModel", "providerName": "fakeProvider"}, - 1, - ), - call( - "$ld:ai:duration:total", - context, - {"variationKey": "variation-key", "configKey": "config-key", "version": 3, "modelName": "fakeModel", "providerName": "fakeProvider"}, - 50, - ), - call( - "$ld:ai:tokens:total", - context, - {"variationKey": "variation-key", "configKey": "config-key", "version": 3, "modelName": "fakeModel", "providerName": "fakeProvider"}, - 330, - ), - call( - "$ld:ai:tokens:input", - context, - {"variationKey": "variation-key", "configKey": "config-key", "version": 3, "modelName": "fakeModel", "providerName": "fakeProvider"}, - 220, - ), - call( - "$ld:ai:tokens:output", - context, - {"variationKey": "variation-key", "configKey": "config-key", "version": 3, "modelName": "fakeModel", "providerName": "fakeProvider"}, - 110, - ), - ] - - client.track.assert_has_calls(calls) # type: ignore - - assert tracker.get_summary().success is False - assert tracker.get_summary().duration == 50 - assert tracker.get_summary().usage == TokenUsage(330, 220, 110) - - -def test_tracks_openai_metrics(client: LDClient): - context = Context.create("user-key") - tracker = LDAIConfigTracker(client, "variation-key", "config-key", 3, "fakeModel", "fakeProvider", context) - - class Result: - def __init__(self): - self.usage = Usage() - - class Usage: - def to_dict(self): - return { - "total_tokens": 330, - "prompt_tokens": 220, - "completion_tokens": 110, - } - - tracker.track_openai_metrics(lambda: Result()) - - calls = [ - call( - "$ld:ai:generation:success", - context, - {"variationKey": "variation-key", "configKey": "config-key", "version": 3, "modelName": "fakeModel", "providerName": "fakeProvider"}, - 1, - ), - call( - "$ld:ai:tokens:total", - context, - {"variationKey": "variation-key", "configKey": "config-key", "version": 3, "modelName": "fakeModel", "providerName": "fakeProvider"}, - 330, - ), - call( - "$ld:ai:tokens:input", - context, - {"variationKey": "variation-key", "configKey": "config-key", "version": 3, "modelName": "fakeModel", "providerName": "fakeProvider"}, - 220, - ), - call( - "$ld:ai:tokens:output", - context, - {"variationKey": "variation-key", "configKey": "config-key", "version": 3, "modelName": "fakeModel", "providerName": "fakeProvider"}, - 110, - ), - ] - - client.track.assert_has_calls(calls, any_order=False) # type: ignore - - assert tracker.get_summary().usage == TokenUsage(330, 220, 110) - - -def test_tracks_openai_metrics_with_exception(client: LDClient): - context = Context.create("user-key") - tracker = LDAIConfigTracker(client, "variation-key", "config-key", 3, "fakeModel", "fakeProvider", context) - - def raise_exception(): - raise ValueError("Something went wrong") - - try: - tracker.track_openai_metrics(raise_exception) - assert False, "Should have thrown an exception" - except ValueError: - pass - - calls = [ - call( - "$ld:ai:generation:error", - context, - {"variationKey": "variation-key", "configKey": "config-key", "version": 3, "modelName": "fakeModel", "providerName": "fakeProvider"}, - 1, - ), - ] - - client.track.assert_has_calls(calls, any_order=False) # type: ignore - - assert tracker.get_summary().usage is None - - -@pytest.mark.parametrize( - "kind,label", - [ - pytest.param(FeedbackKind.Positive, "positive", id="positive"), - pytest.param(FeedbackKind.Negative, "negative", id="negative"), - ], -) -def test_tracks_feedback(client: LDClient, kind: FeedbackKind, label: str): - context = Context.create("user-key") - tracker = LDAIConfigTracker(client, "variation-key", "config-key", 3, "fakeModel", "fakeProvider", context) - - tracker.track_feedback({"kind": kind}) - - client.track.assert_called_with( # type: ignore - f"$ld:ai:feedback:user:{label}", - context, - {"variationKey": "variation-key", "configKey": "config-key", "version": 3, "modelName": "fakeModel", "providerName": "fakeProvider"}, - 1, - ) - assert tracker.get_summary().feedback == {"kind": kind} - - -def test_tracks_success(client: LDClient): - context = Context.create("user-key") - tracker = LDAIConfigTracker(client, "variation-key", "config-key", 3, "fakeModel", "fakeProvider", context) - tracker.track_success() - - calls = [ - call( - "$ld:ai:generation:success", - context, - {"variationKey": "variation-key", "configKey": "config-key", "version": 3, "modelName": "fakeModel", "providerName": "fakeProvider"}, - 1, - ), - ] - - client.track.assert_has_calls(calls) # type: ignore - - assert tracker.get_summary().success is True - - -def test_tracks_error(client: LDClient): - context = Context.create("user-key") - tracker = LDAIConfigTracker(client, "variation-key", "config-key", 3, "fakeModel", "fakeProvider", context) - tracker.track_error() - - calls = [ - call( - "$ld:ai:generation:error", - context, - {"variationKey": "variation-key", "configKey": "config-key", "version": 3, "modelName": "fakeModel", "providerName": "fakeProvider"}, - 1, - ), - ] - - client.track.assert_has_calls(calls) # type: ignore - - assert tracker.get_summary().success is False - - -def test_error_overwrites_success(client: LDClient): - context = Context.create("user-key") - tracker = LDAIConfigTracker(client, "variation-key", "config-key", 3, "fakeModel", "fakeProvider", context) - tracker.track_success() - tracker.track_error() - - calls = [ - call( - "$ld:ai:generation:success", - context, - {"variationKey": "variation-key", "configKey": "config-key", "version": 3, "modelName": "fakeModel", "providerName": "fakeProvider"}, - 1, - ), - call( - "$ld:ai:generation:error", - context, - {"variationKey": "variation-key", "configKey": "config-key", "version": 3, "modelName": "fakeModel", "providerName": "fakeProvider"}, - 1, - ), - ] - - client.track.assert_has_calls(calls) # type: ignore - - assert tracker.get_summary().success is False diff --git a/ldai/tracker.py b/ldai/tracker.py deleted file mode 100644 index a049952..0000000 --- a/ldai/tracker.py +++ /dev/null @@ -1,313 +0,0 @@ -import time -from dataclasses import dataclass -from enum import Enum -from typing import Dict, Optional - -from ldclient import Context, LDClient - - -class FeedbackKind(Enum): - """ - Types of feedback that can be provided for AI operations. - """ - - Positive = "positive" - Negative = "negative" - - -@dataclass -class TokenUsage: - """ - Tracks token usage for AI operations. - - :param total: Total number of tokens used. - :param input: Number of tokens in the prompt. - :param output: Number of tokens in the completion. - """ - - total: int - input: int - output: int - - -class LDAIMetricSummary: - """ - Summary of metrics which have been tracked. - """ - - def __init__(self): - self._duration = None - self._success = None - self._feedback = None - self._usage = None - self._time_to_first_token = None - - @property - def duration(self) -> Optional[int]: - return self._duration - - @property - def success(self) -> Optional[bool]: - return self._success - - @property - def feedback(self) -> Optional[Dict[str, FeedbackKind]]: - return self._feedback - - @property - def usage(self) -> Optional[TokenUsage]: - return self._usage - - @property - def time_to_first_token(self) -> Optional[int]: - return self._time_to_first_token - - -class LDAIConfigTracker: - """ - Tracks configuration and usage metrics for LaunchDarkly AI operations. - """ - - def __init__( - self, - ld_client: LDClient, - variation_key: str, - config_key: str, - version: int, - model_name: str, - provider_name: str, - context: Context, - ): - """ - Initialize an AI Config tracker. - - :param ld_client: LaunchDarkly client instance. - :param variation_key: Variation key for tracking. - :param config_key: Configuration key for tracking. - :param version: Version of the variation. - :param model_name: Name of the model used. - :param provider_name: Name of the provider used. - :param context: Context for evaluation. - """ - self._ld_client = ld_client - self._variation_key = variation_key - self._config_key = config_key - self._version = version - self._model_name = model_name - self._provider_name = provider_name - self._context = context - self._summary = LDAIMetricSummary() - - def __get_track_data(self): - """ - Get tracking data for events. - - :return: Dictionary containing variation and config keys. - """ - return { - "variationKey": self._variation_key, - "configKey": self._config_key, - "version": self._version, - "modelName": self._model_name, - "providerName": self._provider_name, - } - - def track_duration(self, duration: int) -> None: - """ - Manually track the duration of an AI operation. - - :param duration: Duration in milliseconds. - """ - self._summary._duration = duration - self._ld_client.track( - "$ld:ai:duration:total", self._context, self.__get_track_data(), duration - ) - - def track_time_to_first_token(self, time_to_first_token: int) -> None: - """ - Manually track the time to first token of an AI operation. - - :param time_to_first_token: Time to first token in milliseconds. - """ - self._summary._time_to_first_token = time_to_first_token - self._ld_client.track( - "$ld:ai:tokens:ttf", - self._context, - self.__get_track_data(), - time_to_first_token, - ) - - def track_duration_of(self, func): - """ - Automatically track the duration of an AI operation. - - An exception occurring during the execution of the function will still - track the duration. The exception will be re-thrown. - - :param func: Function to track. - :return: Result of the tracked function. - """ - start_time = time.time() - try: - result = func() - finally: - end_time = time.time() - duration = int((end_time - start_time) * 1000) # duration in milliseconds - self.track_duration(duration) - - return result - - def track_feedback(self, feedback: Dict[str, FeedbackKind]) -> None: - """ - Track user feedback for an AI operation. - - :param feedback: Dictionary containing feedback kind. - """ - self._summary._feedback = feedback - if feedback["kind"] == FeedbackKind.Positive: - self._ld_client.track( - "$ld:ai:feedback:user:positive", - self._context, - self.__get_track_data(), - 1, - ) - elif feedback["kind"] == FeedbackKind.Negative: - self._ld_client.track( - "$ld:ai:feedback:user:negative", - self._context, - self.__get_track_data(), - 1, - ) - - def track_success(self) -> None: - """ - Track a successful AI generation. - """ - self._summary._success = True - self._ld_client.track( - "$ld:ai:generation:success", self._context, self.__get_track_data(), 1 - ) - - def track_error(self) -> None: - """ - Track an unsuccessful AI generation attempt. - """ - self._summary._success = False - self._ld_client.track( - "$ld:ai:generation:error", self._context, self.__get_track_data(), 1 - ) - - def track_openai_metrics(self, func): - """ - Track OpenAI-specific operations. - - This function will track the duration of the operation, the token - usage, and the success or error status. - - If the provided function throws, then this method will also throw. - - In the case the provided function throws, this function will record the - duration and an error. - - A failed operation will not have any token usage data. - - :param func: Function to track. - :return: Result of the tracked function. - """ - try: - result = self.track_duration_of(func) - self.track_success() - if hasattr(result, "usage") and hasattr(result.usage, "to_dict"): - self.track_tokens(_openai_to_token_usage(result.usage.to_dict())) - except Exception: - self.track_error() - raise - - return result - - def track_bedrock_converse_metrics(self, res: dict) -> dict: - """ - Track AWS Bedrock conversation operations. - - - This function will track the duration of the operation, the token - usage, and the success or error status. - - :param res: Response dictionary from Bedrock. - :return: The original response dictionary. - """ - status_code = res.get("ResponseMetadata", {}).get("HTTPStatusCode", 0) - if status_code == 200: - self.track_success() - elif status_code >= 400: - self.track_error() - if res.get("metrics", {}).get("latencyMs"): - self.track_duration(res["metrics"]["latencyMs"]) - if res.get("usage"): - self.track_tokens(_bedrock_to_token_usage(res["usage"])) - return res - - def track_tokens(self, tokens: TokenUsage) -> None: - """ - Track token usage metrics. - - :param tokens: Token usage data from either custom, OpenAI, or Bedrock sources. - """ - self._summary._usage = tokens - if tokens.total > 0: - self._ld_client.track( - "$ld:ai:tokens:total", - self._context, - self.__get_track_data(), - tokens.total, - ) - if tokens.input > 0: - self._ld_client.track( - "$ld:ai:tokens:input", - self._context, - self.__get_track_data(), - tokens.input, - ) - if tokens.output > 0: - self._ld_client.track( - "$ld:ai:tokens:output", - self._context, - self.__get_track_data(), - tokens.output, - ) - - def get_summary(self) -> LDAIMetricSummary: - """ - Get the current summary of AI metrics. - - :return: Summary of AI metrics. - """ - return self._summary - - -def _bedrock_to_token_usage(data: dict) -> TokenUsage: - """ - Convert a Bedrock usage dictionary to a TokenUsage object. - - :param data: Dictionary containing Bedrock usage data. - :return: TokenUsage object containing usage data. - """ - return TokenUsage( - total=data.get("totalTokens", 0), - input=data.get("inputTokens", 0), - output=data.get("outputTokens", 0), - ) - - -def _openai_to_token_usage(data: dict) -> TokenUsage: - """ - Convert an OpenAI usage dictionary to a TokenUsage object. - - :param data: Dictionary containing OpenAI usage data. - :return: TokenUsage object containing usage data. - """ - return TokenUsage( - total=data.get("total_tokens", 0), - input=data.get("prompt_tokens", 0), - output=data.get("completion_tokens", 0), - ) diff --git a/pyproject.toml b/pyproject.toml deleted file mode 100644 index 200215c..0000000 --- a/pyproject.toml +++ /dev/null @@ -1,71 +0,0 @@ -[tool.poetry] -name = "launchdarkly-server-sdk-ai" -version = "0.10.1" -description = "LaunchDarkly SDK for AI" -authors = ["LaunchDarkly "] -license = "Apache-2.0" -readme = "README.md" -homepage = "https://docs.launchdarkly.com/sdk/ai/python" -repository = "https://github.com/launchdarkly/python-server-sdk-ai" -documentation = "https://launchdarkly-python-sdk-ai.readthedocs.io/en/latest/" -classifiers = [ - "Intended Audience :: Developers", - "License :: OSI Approved :: Apache Software License", - "Operating System :: OS Independent", - "Programming Language :: Python :: 3", - "Programming Language :: Python :: 3.9", - "Programming Language :: Python :: 3.10", - "Programming Language :: Python :: 3.11", - "Programming Language :: Python :: 3.12", - "Programming Language :: Python :: 3.13", - "Topic :: Software Development", - "Topic :: Software Development :: Libraries", -] -packages = [ { include = "ldai" } ] -exclude = [ - { path = "ldai/testing", format = "wheel" } -] - -[tool.poetry.dependencies] -python = ">=3.9,<4" -launchdarkly-server-sdk = ">=9.4.0" -chevron = "=0.14.0" - - -[tool.poetry.group.dev.dependencies] -pytest = ">=2.8" -pytest-cov = ">=2.4.0" -pytest-mypy = "==1.0.1" -mypy = "==1.18.2" -pycodestyle = "^2.12.1" -isort = ">=5.13.2,<7.0.0" - - -[tool.poetry.group.docs] -optional = true - -[tool.poetry.group.docs.dependencies] -sphinx = ">=6,<8" -sphinx-rtd-theme = ">=1.3,<4.0" -certifi = ">=2018.4.16" -expiringdict = ">=1.1.4" -pyrfc3339 = ">=1.0" -jsonpickle = ">1.4.1" -semver = ">=2.7.9" -urllib3 = ">=1.26.0" -jinja2 = "3.1.6" - -[tool.mypy] -python_version = "3.9" -ignore_missing_imports = true -install_types = true -non_interactive = true - - -[tool.pytest.ini_options] -addopts = ["-ra"] - - -[build-system] -requires = ["poetry-core"] -build-backend = "poetry.core.masonry.api" diff --git a/release-please-config.json b/release-please-config.json index ec0b76f..3857781 100644 --- a/release-please-config.json +++ b/release-please-config.json @@ -5,7 +5,7 @@ "versioning": "default", "bump-minor-pre-major": true, "include-v-in-tag": false, - "extra-files": ["ldai/__init__.py", "PROVENANCE.md"], + "extra-files": ["src/ldai/__init__.py", "PROVENANCE.md"], "include-component-in-tag": false } } diff --git a/setup.cfg b/setup.cfg deleted file mode 100644 index c178190..0000000 --- a/setup.cfg +++ /dev/null @@ -1,2 +0,0 @@ -[pycodestyle] -ignore = E501 From b9bf6f69ac8279e0287b280229521a8b242cb22c Mon Sep 17 00:00:00 2001 From: Edwin Okonkwo Date: Tue, 25 Nov 2025 18:54:20 +0100 Subject: [PATCH 04/12] update release for server-ai --- .github/actions/build/action.yml | 3 ++- .github/workflows/ci.yml | 1 + .github/workflows/manual-publish.yml | 1 + .github/workflows/release-please.yml | 1 + 4 files changed, 5 insertions(+), 1 deletion(-) diff --git a/.github/actions/build/action.yml b/.github/actions/build/action.yml index 6761d0d..3235a5f 100644 --- a/.github/actions/build/action.yml +++ b/.github/actions/build/action.yml @@ -10,10 +10,11 @@ runs: steps: - name: Build distribution files shell: bash + working-directory: packages/sdk/server-ai run: poetry build - name: Hash build files for provenance id: package-hashes shell: bash - working-directory: ./dist + working-directory: packages/sdk/server-ai/dist run: | echo "package-hashes=$(sha256sum * | base64 -w0)" >> "$GITHUB_OUTPUT" diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index fd8c2dc..c627433 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -59,6 +59,7 @@ jobs: uses: abatilo/actions-poetry@7b6d33e44b4f08d7021a1dee3c044e9c253d6439 - name: Install requirements + working-directory: packages/sdk/server-ai run: poetry install - name: Run tests diff --git a/.github/workflows/manual-publish.yml b/.github/workflows/manual-publish.yml index 9b35bb2..63a17b1 100644 --- a/.github/workflows/manual-publish.yml +++ b/.github/workflows/manual-publish.yml @@ -40,6 +40,7 @@ jobs: uses: pypa/gh-action-pypi-publish@release/v1 with: password: ${{env.PYPI_AUTH_TOKEN}} + packages-dir: packages/sdk/server-ai/dist/ release-provenance: needs: [ 'build-publish' ] diff --git a/.github/workflows/release-please.yml b/.github/workflows/release-please.yml index 7176356..8cb0c4d 100644 --- a/.github/workflows/release-please.yml +++ b/.github/workflows/release-please.yml @@ -52,6 +52,7 @@ jobs: uses: pypa/gh-action-pypi-publish@release/v1 with: password: ${{env.PYPI_AUTH_TOKEN}} + packages-dir: packages/sdk/server-ai/dist/ release-provenance: needs: [ 'release-package' ] From 0584f7e37517bfe3e27074d022a860d9cd1c92be Mon Sep 17 00:00:00 2001 From: Edwin Okonkwo Date: Tue, 25 Nov 2025 18:57:35 +0100 Subject: [PATCH 05/12] fixes. --- packages/sdk/server-ai/setup.cfg | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 packages/sdk/server-ai/setup.cfg diff --git a/packages/sdk/server-ai/setup.cfg b/packages/sdk/server-ai/setup.cfg new file mode 100644 index 0000000..2740b5b --- /dev/null +++ b/packages/sdk/server-ai/setup.cfg @@ -0,0 +1,3 @@ +[pycodestyle] +ignore = E501 + From b198591bdd71f14e7700568e2706c694547fa9be Mon Sep 17 00:00:00 2001 From: Edwin Okonkwo Date: Tue, 25 Nov 2025 21:11:31 +0100 Subject: [PATCH 06/12] remove uneeded --- js-core - copied from another project for reference | 1 - 1 file changed, 1 deletion(-) delete mode 160000 js-core - copied from another project for reference diff --git a/js-core - copied from another project for reference b/js-core - copied from another project for reference deleted file mode 160000 index 213fc79..0000000 --- a/js-core - copied from another project for reference +++ /dev/null @@ -1 +0,0 @@ -Subproject commit 213fc793c752af6517ba7c117219205fb62b9c65 From 7b954b913195571f709d95ff2de7b660a0f90841 Mon Sep 17 00:00:00 2001 From: Edwin Okonkwo Date: Mon, 1 Dec 2025 15:34:07 +0100 Subject: [PATCH 07/12] move make file to fit monorepo style --- Makefile => packages/sdk/server-ai/Makefile | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) rename Makefile => packages/sdk/server-ai/Makefile (55%) diff --git a/Makefile b/packages/sdk/server-ai/Makefile similarity index 55% rename from Makefile rename to packages/sdk/server-ai/Makefile index 57067b3..a640218 100644 --- a/Makefile +++ b/packages/sdk/server-ai/Makefile @@ -3,7 +3,7 @@ PYTEST_FLAGS=-W error::SyntaxWarning SPHINXOPTS = -W --keep-going SPHINXBUILD = sphinx-build SPHINXPROJ = launchdarkly-server-sdk -SOURCEDIR = docs +SOURCEDIR = ../../../docs BUILDDIR = $(SOURCEDIR)/build .PHONY: help @@ -15,7 +15,7 @@ help: #! Show this help message .PHONY: install install: - @cd packages/sdk/server-ai && poetry install + poetry install # # Quality control checks @@ -24,14 +24,14 @@ install: .PHONY: test test: #! Run unit tests test: install - @cd packages/sdk/server-ai && poetry run pytest $(PYTEST_FLAGS) + poetry run pytest $(PYTEST_FLAGS) .PHONY: lint lint: #! Run type analysis and linting checks lint: install - @cd packages/sdk/server-ai && poetry run mypy src/ldai - @cd packages/sdk/server-ai && poetry run isort --check --atomic src/ldai - @cd packages/sdk/server-ai && poetry run pycodestyle src/ldai + poetry run mypy src/ldai + poetry run isort --check --atomic src/ldai + poetry run pycodestyle src/ldai # # Documentation generation @@ -39,5 +39,6 @@ lint: install .PHONY: docs docs: #! Generate sphinx-based documentation - @cd packages/sdk/server-ai && poetry install --with docs - @cd packages/sdk/server-ai && poetry run $(SPHINXBUILD) -M html "../../../$(SOURCEDIR)" "../../../$(BUILDDIR)" $(SPHINXOPTS) $(O) + poetry install --with docs + poetry run $(SPHINXBUILD) -M html "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) + From 4675731e8a6bb05c7855af7019aad0c6cf1b766d Mon Sep 17 00:00:00 2001 From: Edwin Okonkwo Date: Mon, 1 Dec 2025 15:39:07 +0100 Subject: [PATCH 08/12] fix ci --- .github/actions/build-docs/action.yml | 1 + .github/workflows/ci.yml | 3 +++ 2 files changed, 4 insertions(+) diff --git a/.github/actions/build-docs/action.yml b/.github/actions/build-docs/action.yml index 84e6a1b..93bfb1a 100644 --- a/.github/actions/build-docs/action.yml +++ b/.github/actions/build-docs/action.yml @@ -6,4 +6,5 @@ runs: steps: - name: Build Documentation shell: bash + working-directory: packages/sdk/server-ai run: make docs diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index c627433..34b8744 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -32,9 +32,11 @@ jobs: - uses: ./.github/actions/build-docs - name: Run tests + working-directory: packages/sdk/server-ai run: make test - name: Verify typehints + working-directory: packages/sdk/server-ai run: make lint windows: @@ -63,4 +65,5 @@ jobs: run: poetry install - name: Run tests + working-directory: packages/sdk/server-ai run: make test From 849a6f551bf2ae59812ab776818cbee290e7317e Mon Sep 17 00:00:00 2001 From: Edwin Okonkwo Date: Mon, 1 Dec 2025 16:39:59 +0100 Subject: [PATCH 09/12] remove extraline --- .../sdk/server-ai/src/ldai/providers/ai_provider_factory.py | 4 ---- 1 file changed, 4 deletions(-) diff --git a/packages/sdk/server-ai/src/ldai/providers/ai_provider_factory.py b/packages/sdk/server-ai/src/ldai/providers/ai_provider_factory.py index 7c1dec2..57f97a1 100644 --- a/packages/sdk/server-ai/src/ldai/providers/ai_provider_factory.py +++ b/packages/sdk/server-ai/src/ldai/providers/ai_provider_factory.py @@ -16,7 +16,6 @@ # Type representing the supported AI providers SupportedAIProvider = Literal['langchain'] - class AIProviderFactory: """ Factory for creating AIProvider instances based on the provider configuration. @@ -108,9 +107,6 @@ async def _try_create_provider( f"Make sure langchain and langchain-core packages are installed." ) return None - - # TODO: REL-10773 OpenAI provider - # TODO: REL-10776 Vercel provider # For future external providers, use dynamic import provider_mappings = { # 'openai': ('launchdarkly_server_sdk_ai_openai', 'OpenAIProvider'), From 079c136867d62e69af2cf1985afe84e7e112528c Mon Sep 17 00:00:00 2001 From: Edwin Okonkwo Date: Mon, 1 Dec 2025 19:40:46 +0100 Subject: [PATCH 10/12] move type to judge and chat instead --- packages/sdk/server-ai/src/ldai/__init__.py | 6 ++++-- packages/sdk/server-ai/src/ldai/chat/__init__.py | 3 ++- packages/sdk/server-ai/src/ldai/chat/tracked_chat.py | 2 +- .../server-ai/src/ldai/{providers => chat}/types.py | 12 +----------- packages/sdk/server-ai/src/ldai/judge/__init__.py | 4 ++-- packages/sdk/server-ai/src/ldai/judge/ai_judge.py | 4 ++-- packages/sdk/server-ai/src/ldai/judge/types.py | 12 ++++++++++++ .../sdk/server-ai/src/ldai/providers/ai_provider.py | 3 ++- 8 files changed, 26 insertions(+), 20 deletions(-) rename packages/sdk/server-ai/src/ldai/{providers => chat}/types.py (63%) diff --git a/packages/sdk/server-ai/src/ldai/__init__.py b/packages/sdk/server-ai/src/ldai/__init__.py index 33f0466..0061482 100644 --- a/packages/sdk/server-ai/src/ldai/__init__.py +++ b/packages/sdk/server-ai/src/ldai/__init__.py @@ -9,11 +9,11 @@ __path__ = extend_path(__path__, __name__) # Export chat -from ldai.chat import TrackedChat +from ldai.chat import ChatResponse, TrackedChat # Export main client from ldai.client import LDAIClient # Export judge -from ldai.judge import AIJudge, EvalScore, JudgeResponse +from ldai.judge import AIJudge, EvalScore, JudgeResponse, StructuredResponse # Export models for convenience from ldai.models import ( # Deprecated aliases for backward compatibility AIAgentConfig, AIAgentConfigDefault, AIAgentConfigRequest, AIAgents, @@ -33,9 +33,11 @@ 'AIJudgeConfigDefault', 'AIJudge', 'TrackedChat', + 'ChatResponse', 'EvalScore', 'JudgeConfiguration', 'JudgeResponse', + 'StructuredResponse', 'LDMessage', 'ModelConfig', 'ProviderConfig', diff --git a/packages/sdk/server-ai/src/ldai/chat/__init__.py b/packages/sdk/server-ai/src/ldai/chat/__init__.py index 265a1b3..6a71e92 100644 --- a/packages/sdk/server-ai/src/ldai/chat/__init__.py +++ b/packages/sdk/server-ai/src/ldai/chat/__init__.py @@ -1,5 +1,6 @@ """Chat module for LaunchDarkly AI SDK.""" from ldai.chat.tracked_chat import TrackedChat +from ldai.chat.types import ChatResponse -__all__ = ['TrackedChat'] +__all__ = ['TrackedChat', 'ChatResponse'] diff --git a/packages/sdk/server-ai/src/ldai/chat/tracked_chat.py b/packages/sdk/server-ai/src/ldai/chat/tracked_chat.py index 841f8ed..e301234 100644 --- a/packages/sdk/server-ai/src/ldai/chat/tracked_chat.py +++ b/packages/sdk/server-ai/src/ldai/chat/tracked_chat.py @@ -4,11 +4,11 @@ import logging from typing import Dict, List, Optional +from ldai.chat.types import ChatResponse from ldai.judge import AIJudge from ldai.judge.types import JudgeResponse from ldai.models import AICompletionConfig, LDMessage from ldai.providers.ai_provider import AIProvider -from ldai.providers.types import ChatResponse from ldai.tracker import LDAIConfigTracker diff --git a/packages/sdk/server-ai/src/ldai/providers/types.py b/packages/sdk/server-ai/src/ldai/chat/types.py similarity index 63% rename from packages/sdk/server-ai/src/ldai/providers/types.py rename to packages/sdk/server-ai/src/ldai/chat/types.py index 982c42b..86b1d57 100644 --- a/packages/sdk/server-ai/src/ldai/providers/types.py +++ b/packages/sdk/server-ai/src/ldai/chat/types.py @@ -1,4 +1,4 @@ -"""Types for AI provider responses.""" +"""Types for chat responses.""" from dataclasses import dataclass from typing import Any, List, Optional @@ -15,13 +15,3 @@ class ChatResponse: message: LDMessage metrics: LDAIMetrics evaluations: Optional[List[Any]] = None # List of JudgeResponse, will be populated later - - -@dataclass -class StructuredResponse: - """ - Structured response from AI models. - """ - data: dict[str, Any] - raw_response: str - metrics: LDAIMetrics diff --git a/packages/sdk/server-ai/src/ldai/judge/__init__.py b/packages/sdk/server-ai/src/ldai/judge/__init__.py index fc31e0d..f57744d 100644 --- a/packages/sdk/server-ai/src/ldai/judge/__init__.py +++ b/packages/sdk/server-ai/src/ldai/judge/__init__.py @@ -1,6 +1,6 @@ """Judge module for LaunchDarkly AI SDK.""" from ldai.judge.ai_judge import AIJudge -from ldai.judge.types import EvalScore, JudgeResponse +from ldai.judge.types import EvalScore, JudgeResponse, StructuredResponse -__all__ = ['AIJudge', 'EvalScore', 'JudgeResponse'] +__all__ = ['AIJudge', 'EvalScore', 'JudgeResponse', 'StructuredResponse'] diff --git a/packages/sdk/server-ai/src/ldai/judge/ai_judge.py b/packages/sdk/server-ai/src/ldai/judge/ai_judge.py index d5bb061..f90bf8a 100644 --- a/packages/sdk/server-ai/src/ldai/judge/ai_judge.py +++ b/packages/sdk/server-ai/src/ldai/judge/ai_judge.py @@ -6,11 +6,11 @@ import chevron +from ldai.chat.types import ChatResponse from ldai.judge.evaluation_schema_builder import EvaluationSchemaBuilder -from ldai.judge.types import EvalScore, JudgeResponse +from ldai.judge.types import EvalScore, JudgeResponse, StructuredResponse from ldai.models import AIJudgeConfig, LDMessage from ldai.providers.ai_provider import AIProvider -from ldai.providers.types import ChatResponse, StructuredResponse from ldai.tracker import LDAIConfigTracker diff --git a/packages/sdk/server-ai/src/ldai/judge/types.py b/packages/sdk/server-ai/src/ldai/judge/types.py index 7c90091..bf42302 100644 --- a/packages/sdk/server-ai/src/ldai/judge/types.py +++ b/packages/sdk/server-ai/src/ldai/judge/types.py @@ -3,6 +3,8 @@ from dataclasses import dataclass from typing import Any, Dict, Optional +from ldai.metrics import LDAIMetrics + @dataclass class EvalScore: @@ -42,3 +44,13 @@ def to_dict(self) -> Dict[str, Any]: if self.error is not None: result['error'] = self.error return result + + +@dataclass +class StructuredResponse: + """ + Structured response from AI models. + """ + data: dict[str, Any] + raw_response: str + metrics: LDAIMetrics diff --git a/packages/sdk/server-ai/src/ldai/providers/ai_provider.py b/packages/sdk/server-ai/src/ldai/providers/ai_provider.py index 3deb40b..151058e 100644 --- a/packages/sdk/server-ai/src/ldai/providers/ai_provider.py +++ b/packages/sdk/server-ai/src/ldai/providers/ai_provider.py @@ -4,9 +4,10 @@ from abc import ABC, abstractmethod from typing import Any, Dict, List, Optional +from ldai.chat.types import ChatResponse +from ldai.judge.types import StructuredResponse from ldai.metrics import LDAIMetrics from ldai.models import AIConfigKind, LDMessage -from ldai.providers.types import ChatResponse, StructuredResponse class AIProvider(ABC): From 5a2d2dc6c375b7fd2f40473afbad600b86b8be5e Mon Sep 17 00:00:00 2001 From: Edwin Okonkwo Date: Mon, 1 Dec 2025 19:42:24 +0100 Subject: [PATCH 11/12] fix linting --- packages/sdk/server-ai/src/ldai/providers/ai_provider_factory.py | 1 + 1 file changed, 1 insertion(+) diff --git a/packages/sdk/server-ai/src/ldai/providers/ai_provider_factory.py b/packages/sdk/server-ai/src/ldai/providers/ai_provider_factory.py index 57f97a1..bc3e7e2 100644 --- a/packages/sdk/server-ai/src/ldai/providers/ai_provider_factory.py +++ b/packages/sdk/server-ai/src/ldai/providers/ai_provider_factory.py @@ -16,6 +16,7 @@ # Type representing the supported AI providers SupportedAIProvider = Literal['langchain'] + class AIProviderFactory: """ Factory for creating AIProvider instances based on the provider configuration. From 11750d25146a58e99aa9f3ea7cbf3c17813a09e0 Mon Sep 17 00:00:00 2001 From: Edwin Okonkwo Date: Tue, 2 Dec 2025 18:27:26 +0100 Subject: [PATCH 12/12] fixes --- packages/sdk/server-ai/src/ldai/__init__.py | 3 +- .../server-ai/src/ldai/chat/tracked_chat.py | 3 +- packages/sdk/server-ai/src/ldai/chat/types.py | 2 +- packages/sdk/server-ai/src/ldai/client.py | 4 +- .../sdk/server-ai/src/ldai/config/__init__.py | 5 + .../sdk/server-ai/src/ldai/config/types.py | 102 +++++++++++++++++ .../sdk/server-ai/src/ldai/judge/ai_judge.py | 3 +- packages/sdk/server-ai/src/ldai/models.py | 105 +----------------- .../src/ldai/providers/ai_provider.py | 3 +- 9 files changed, 124 insertions(+), 106 deletions(-) create mode 100644 packages/sdk/server-ai/src/ldai/config/__init__.py create mode 100644 packages/sdk/server-ai/src/ldai/config/types.py diff --git a/packages/sdk/server-ai/src/ldai/__init__.py b/packages/sdk/server-ai/src/ldai/__init__.py index 0061482..099c8b7 100644 --- a/packages/sdk/server-ai/src/ldai/__init__.py +++ b/packages/sdk/server-ai/src/ldai/__init__.py @@ -12,6 +12,7 @@ from ldai.chat import ChatResponse, TrackedChat # Export main client from ldai.client import LDAIClient +from ldai.config import LDMessage, ModelConfig, ProviderConfig # Export judge from ldai.judge import AIJudge, EvalScore, JudgeResponse, StructuredResponse # Export models for convenience @@ -19,7 +20,7 @@ AIAgentConfig, AIAgentConfigDefault, AIAgentConfigRequest, AIAgents, AICompletionConfig, AICompletionConfigDefault, AIConfig, AIJudgeConfig, AIJudgeConfigDefault, JudgeConfiguration, LDAIAgent, LDAIAgentConfig, - LDAIAgentDefaults, LDMessage, ModelConfig, ProviderConfig) + LDAIAgentDefaults) __all__ = [ 'LDAIClient', diff --git a/packages/sdk/server-ai/src/ldai/chat/tracked_chat.py b/packages/sdk/server-ai/src/ldai/chat/tracked_chat.py index e301234..03033f4 100644 --- a/packages/sdk/server-ai/src/ldai/chat/tracked_chat.py +++ b/packages/sdk/server-ai/src/ldai/chat/tracked_chat.py @@ -5,9 +5,10 @@ from typing import Dict, List, Optional from ldai.chat.types import ChatResponse +from ldai.config.types import LDMessage from ldai.judge import AIJudge from ldai.judge.types import JudgeResponse -from ldai.models import AICompletionConfig, LDMessage +from ldai.models import AICompletionConfig from ldai.providers.ai_provider import AIProvider from ldai.tracker import LDAIConfigTracker diff --git a/packages/sdk/server-ai/src/ldai/chat/types.py b/packages/sdk/server-ai/src/ldai/chat/types.py index 86b1d57..b158a3f 100644 --- a/packages/sdk/server-ai/src/ldai/chat/types.py +++ b/packages/sdk/server-ai/src/ldai/chat/types.py @@ -3,8 +3,8 @@ from dataclasses import dataclass from typing import Any, List, Optional +from ldai.config.types import LDMessage from ldai.metrics import LDAIMetrics -from ldai.models import LDMessage @dataclass diff --git a/packages/sdk/server-ai/src/ldai/client.py b/packages/sdk/server-ai/src/ldai/client.py index 2b314cf..a2a9d98 100644 --- a/packages/sdk/server-ai/src/ldai/client.py +++ b/packages/sdk/server-ai/src/ldai/client.py @@ -7,12 +7,12 @@ from ldclient.client import LDClient from ldai.chat import TrackedChat +from ldai.config import LDMessage, ModelConfig, ProviderConfig from ldai.judge import AIJudge from ldai.models import (AIAgentConfig, AIAgentConfigDefault, AIAgentConfigRequest, AIAgents, AICompletionConfig, AICompletionConfigDefault, AIJudgeConfig, - AIJudgeConfigDefault, JudgeConfiguration, LDMessage, - ModelConfig, ProviderConfig) + AIJudgeConfigDefault, JudgeConfiguration) from ldai.providers.ai_provider_factory import (AIProviderFactory, SupportedAIProvider) from ldai.tracker import LDAIConfigTracker diff --git a/packages/sdk/server-ai/src/ldai/config/__init__.py b/packages/sdk/server-ai/src/ldai/config/__init__.py new file mode 100644 index 0000000..b9553f7 --- /dev/null +++ b/packages/sdk/server-ai/src/ldai/config/__init__.py @@ -0,0 +1,5 @@ +"""Config module for LaunchDarkly AI SDK.""" + +from ldai.config.types import LDMessage, ModelConfig, ProviderConfig + +__all__ = ['LDMessage', 'ModelConfig', 'ProviderConfig'] diff --git a/packages/sdk/server-ai/src/ldai/config/types.py b/packages/sdk/server-ai/src/ldai/config/types.py new file mode 100644 index 0000000..ac7516a --- /dev/null +++ b/packages/sdk/server-ai/src/ldai/config/types.py @@ -0,0 +1,102 @@ +"""Types for configuration.""" + +from __future__ import annotations + +from dataclasses import dataclass +from typing import Any, Dict, Literal, Optional + + +@dataclass +class LDMessage: + role: Literal['system', 'user', 'assistant'] + content: str + + def to_dict(self) -> dict: + """ + Render the given message as a dictionary object. + """ + return { + 'role': self.role, + 'content': self.content, + } + + +class ModelConfig: + """ + Configuration related to the model. + """ + + def __init__(self, name: str, parameters: Optional[Dict[str, Any]] = None, custom: Optional[Dict[str, Any]] = None): + """ + :param name: The name of the model. + :param parameters: Additional model-specific parameters. + :param custom: Additional customer provided data. + """ + self._name = name + self._parameters = parameters + self._custom = custom + + @property + def name(self) -> str: + """ + The name of the model. + """ + return self._name + + def get_parameter(self, key: str) -> Any: + """ + Retrieve model-specific parameters. + + Accessing a named, typed attribute (e.g. name) will result in the call + being delegated to the appropriate property. + """ + if key == 'name': + return self.name + + if self._parameters is None: + return None + + return self._parameters.get(key) + + def get_custom(self, key: str) -> Any: + """ + Retrieve customer provided data. + """ + if self._custom is None: + return None + + return self._custom.get(key) + + def to_dict(self) -> dict: + """ + Render the given model config as a dictionary object. + """ + return { + 'name': self._name, + 'parameters': self._parameters, + 'custom': self._custom, + } + + +class ProviderConfig: + """ + Configuration related to the provider. + """ + + def __init__(self, name: str): + self._name = name + + @property + def name(self) -> str: + """ + The name of the provider. + """ + return self._name + + def to_dict(self) -> dict: + """ + Render the given provider config as a dictionary object. + """ + return { + 'name': self._name, + } diff --git a/packages/sdk/server-ai/src/ldai/judge/ai_judge.py b/packages/sdk/server-ai/src/ldai/judge/ai_judge.py index f90bf8a..9934657 100644 --- a/packages/sdk/server-ai/src/ldai/judge/ai_judge.py +++ b/packages/sdk/server-ai/src/ldai/judge/ai_judge.py @@ -7,9 +7,10 @@ import chevron from ldai.chat.types import ChatResponse +from ldai.config.types import LDMessage from ldai.judge.evaluation_schema_builder import EvaluationSchemaBuilder from ldai.judge.types import EvalScore, JudgeResponse, StructuredResponse -from ldai.models import AIJudgeConfig, LDMessage +from ldai.models import AIJudgeConfig from ldai.providers.ai_provider import AIProvider from ldai.tracker import LDAIConfigTracker diff --git a/packages/sdk/server-ai/src/ldai/models.py b/packages/sdk/server-ai/src/ldai/models.py index c2abe56..e4e0a9e 100644 --- a/packages/sdk/server-ai/src/ldai/models.py +++ b/packages/sdk/server-ai/src/ldai/models.py @@ -1,110 +1,17 @@ +from __future__ import annotations + import warnings from dataclasses import dataclass, field -from typing import Any, Dict, List, Literal, Optional, Union +from typing import Any, Dict, List, Optional, Union +from ldai.config.types import LDMessage, ModelConfig, ProviderConfig from ldai.tracker import LDAIConfigTracker - -@dataclass -class LDMessage: - role: Literal['system', 'user', 'assistant'] - content: str - - def to_dict(self) -> dict: - """ - Render the given message as a dictionary object. - """ - return { - 'role': self.role, - 'content': self.content, - } - - -class ModelConfig: - """ - Configuration related to the model. - """ - - def __init__(self, name: str, parameters: Optional[Dict[str, Any]] = None, custom: Optional[Dict[str, Any]] = None): - """ - :param name: The name of the model. - :param parameters: Additional model-specific parameters. - :param custom: Additional customer provided data. - """ - self._name = name - self._parameters = parameters - self._custom = custom - - @property - def name(self) -> str: - """ - The name of the model. - """ - return self._name - - def get_parameter(self, key: str) -> Any: - """ - Retrieve model-specific parameters. - - Accessing a named, typed attribute (e.g. name) will result in the call - being delegated to the appropriate property. - """ - if key == 'name': - return self.name - - if self._parameters is None: - return None - - return self._parameters.get(key) - - def get_custom(self, key: str) -> Any: - """ - Retrieve customer provided data. - """ - if self._custom is None: - return None - - return self._custom.get(key) - - def to_dict(self) -> dict: - """ - Render the given model config as a dictionary object. - """ - return { - 'name': self._name, - 'parameters': self._parameters, - 'custom': self._custom, - } - - -class ProviderConfig: - """ - Configuration related to the provider. - """ - - def __init__(self, name: str): - self._name = name - - @property - def name(self) -> str: - """ - The name of the provider. - """ - return self._name - - def to_dict(self) -> dict: - """ - Render the given provider config as a dictionary object. - """ - return { - 'name': self._name, - } - - # ============================================================================ # Judge Types # ============================================================================ + @dataclass(frozen=True) class JudgeConfiguration: """ @@ -128,7 +35,7 @@ def to_dict(self) -> dict: 'samplingRate': self.sampling_rate, } - judges: List['JudgeConfiguration.Judge'] + judges: List[JudgeConfiguration.Judge] def to_dict(self) -> dict: """ diff --git a/packages/sdk/server-ai/src/ldai/providers/ai_provider.py b/packages/sdk/server-ai/src/ldai/providers/ai_provider.py index 151058e..9c88c58 100644 --- a/packages/sdk/server-ai/src/ldai/providers/ai_provider.py +++ b/packages/sdk/server-ai/src/ldai/providers/ai_provider.py @@ -5,9 +5,10 @@ from typing import Any, Dict, List, Optional from ldai.chat.types import ChatResponse +from ldai.config.types import LDMessage from ldai.judge.types import StructuredResponse from ldai.metrics import LDAIMetrics -from ldai.models import AIConfigKind, LDMessage +from ldai.models import AIConfigKind class AIProvider(ABC):