diff --git a/python/packages/google/.gitignore b/python/packages/google/.gitignore new file mode 100644 index 0000000000..6beea689d0 --- /dev/null +++ b/python/packages/google/.gitignore @@ -0,0 +1 @@ +.temp_e2e/ diff --git a/python/packages/google/LICENSE b/python/packages/google/LICENSE new file mode 100644 index 0000000000..9e841e7a26 --- /dev/null +++ b/python/packages/google/LICENSE @@ -0,0 +1,21 @@ + MIT License + + Copyright (c) Microsoft Corporation. + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in all + copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + SOFTWARE diff --git a/python/packages/google/README.md b/python/packages/google/README.md new file mode 100644 index 0000000000..870fa2bdee --- /dev/null +++ b/python/packages/google/README.md @@ -0,0 +1,250 @@ +# Get Started with Microsoft Agent Framework Google + +Please install this package via pip: + +```bash +pip install agent-framework-google --pre +``` + +## Google AI (Gemini API) Integration + +This package provides integration with Google's Gemini API for Agent Framework: + +- **Google AI (Gemini API)**: Direct access to Google's Gemini models with API key authentication + +> **Note**: This package uses the new `google-genai` SDK as recommended by Google. See the [migration guide](https://ai.google.dev/gemini-api/docs/migrate) for more information. + +### Current Features + +**Available Now:** +- `GoogleAISettings`: Configuration class for Google AI (Gemini API) authentication and settings +- `GoogleAIChatClient`: Chat client for Google AI with streaming, function calling, and multi-turn conversation support +- Function calling with `@AIFunction` decorator and plain Python functions +- Multi-modal support (images) +- Full `ChatOptions` support (temperature, top_p, max_tokens, stop sequences) +- Usage tracking and OpenTelemetry observability + +**Coming Soon:** +- Advanced features (context caching, safety settings, structured output) +- Thinking mode (Gemini 2.5) +- Enhanced error handling with retry policies + +### Configuration + +#### Google AI Settings + +```python +from agent_framework_google import GoogleAISettings + +# Configure via environment variables +# GOOGLE_AI_API_KEY=your_api_key +# GOOGLE_AI_CHAT_MODEL_ID=gemini-2.5-flash + +settings = GoogleAISettings() + +# Or pass parameters directly (pass SecretStr for type safety) +from pydantic import SecretStr + +settings = GoogleAISettings( + api_key=SecretStr("your_api_key"), + chat_model_id="gemini-2.5-flash" +) +``` + +### Usage Examples + +#### Basic Chat Completion + +```python +import asyncio +from agent_framework import ChatMessage, Role, ChatOptions +from agent_framework_google import GoogleAIChatClient + +async def main(): + # Configure via environment variables + # GOOGLE_AI_API_KEY=your_api_key + # GOOGLE_AI_CHAT_MODEL_ID=gemini-2.5-flash + + client = GoogleAIChatClient() + + # Create a simple chat message + messages = [ + ChatMessage(role=Role.USER, text="What is the capital of France?") + ] + + # Get response + response = await client.get_response( + messages=messages, + chat_options=ChatOptions() + ) + + print(response.messages[0].text) + # Output: Paris is the capital of France. + +# Run the async function +asyncio.run(main()) +``` + +#### Streaming Chat + +```python +import asyncio +from agent_framework import ChatMessage, Role, ChatOptions +from agent_framework_google import GoogleAIChatClient + +async def main(): + client = GoogleAIChatClient() + + messages = [ + ChatMessage(role=Role.USER, text="Write a short poem about programming.") + ] + + # Stream the response + async for chunk in client.get_streaming_response( + messages=messages, + chat_options=ChatOptions() + ): + if chunk.text: + print(chunk.text, end="", flush=True) + +# Run the async function +asyncio.run(main()) +``` + +#### Chat with System Instructions + +```python +import asyncio +from agent_framework import ChatMessage, Role, ChatOptions +from agent_framework_google import GoogleAIChatClient + +async def main(): + client = GoogleAIChatClient() + + messages = [ + ChatMessage(role=Role.SYSTEM, text="You are a helpful coding assistant."), + ChatMessage(role=Role.USER, text="How do I reverse a string in Python?") + ] + + response = await client.get_response( + messages=messages, + chat_options=ChatOptions() + ) + + print(response.messages[0].text) + +# Run the async function +asyncio.run(main()) +``` + +#### Multi-Turn Conversation + +```python +import asyncio +from agent_framework import ChatMessage, Role, ChatOptions +from agent_framework_google import GoogleAIChatClient + +async def main(): + client = GoogleAIChatClient() + + messages = [ + ChatMessage(role=Role.USER, text="Hello! My name is Alice."), + ChatMessage(role=Role.ASSISTANT, text="Hello Alice! Nice to meet you."), + ChatMessage(role=Role.USER, text="What's my name?") + ] + + response = await client.get_response( + messages=messages, + chat_options=ChatOptions() + ) + + print(response.messages[0].text) + # Output: Your name is Alice! + +# Run the async function +asyncio.run(main()) +``` + +#### Customizing Generation Parameters + +```python +import asyncio +from agent_framework import ChatMessage, Role, ChatOptions +from agent_framework_google import GoogleAIChatClient + +async def main(): + client = GoogleAIChatClient() + + messages = [ + ChatMessage(role=Role.USER, text="Generate a creative story.") + ] + + # Customize temperature and token limit + chat_options = ChatOptions( + temperature=0.9, # Higher for more creativity + max_tokens=500, + top_p=0.95 + ) + + response = await client.get_response( + messages=messages, + chat_options=chat_options + ) + + print(response.messages[0].text) + +# Run the async function +asyncio.run(main()) +``` + +## Configuration + +### Environment Variables + +**Google AI:** +- `GOOGLE_AI_API_KEY`: Your Google AI API key ([Get one here](https://aistudio.google.com/app/apikey)) +- `GOOGLE_AI_CHAT_MODEL_ID`: Model to use (e.g., `gemini-2.5-flash`, `gemini-2.5-pro`) + +### Supported Models + +- `gemini-2.5-flash`: Best price-performance, recommended for most use cases (stable) +- `gemini-2.5-pro`: Advanced thinking model for complex reasoning (stable) +- `gemini-2.0-flash`: Previous generation workhorse model (stable) +- `gemini-1.5-pro`: Legacy stable model +- `gemini-1.5-flash`: Legacy fast model + +## Features + +### Current Features +- ✅ Chat completion (streaming and non-streaming) +- ✅ System instructions +- ✅ Conversation history management +- ✅ Usage/token tracking +- ✅ Customizable generation parameters (temperature, max_tokens, top_p, stop) +- ✅ Function/tool calling (`@AIFunction` and plain Python functions) +- ✅ Multi-modal support (images) +- ✅ OpenTelemetry observability + +### Planned Features +- 🚧 Context caching +- 🚧 Safety settings configuration +- 🚧 Structured output (JSON mode) +- 🚧 Thinking mode (Gemini 2.5) + +## Development Status + +This package is being developed incrementally: + +- ✅ **Phase 1**: Package structure and settings classes +- ✅ **Phase 2**: Google AI chat client with streaming, function calling, and multi-modal support +- 🚧 **Phase 3**: Advanced features (context caching, safety settings, thinking mode) +- 🚧 **Phase 4**: Integration tests and comprehensive samples + +## Additional Information + +For more information: +- [Google AI Studio](https://aistudio.google.com/) - Get an API key and test models +- [Google AI Documentation](https://ai.google.dev/gemini-api/docs) +- [Google GenAI SDK Migration Guide](https://ai.google.dev/gemini-api/docs/migrate) +- [Agent Framework Documentation](https://aka.ms/agent-framework) +- [Agent Framework Repository](https://github.com/microsoft/agent-framework) diff --git a/python/packages/google/agent_framework_google/__init__.py b/python/packages/google/agent_framework_google/__init__.py new file mode 100644 index 0000000000..4bf1430f7b --- /dev/null +++ b/python/packages/google/agent_framework_google/__init__.py @@ -0,0 +1,16 @@ +# Copyright (c) Microsoft. All rights reserved. + +import importlib.metadata + +from ._chat_client import GoogleAIChatClient, GoogleAIChatOptions + +try: + __version__ = importlib.metadata.version(__name__) +except importlib.metadata.PackageNotFoundError: + __version__ = "0.0.0" # Fallback for development mode + +__all__ = [ + "GoogleAIChatClient", + "GoogleAIChatOptions", + "__version__", +] diff --git a/python/packages/google/agent_framework_google/_chat_client.py b/python/packages/google/agent_framework_google/_chat_client.py new file mode 100644 index 0000000000..9881af99af --- /dev/null +++ b/python/packages/google/agent_framework_google/_chat_client.py @@ -0,0 +1,868 @@ +# Copyright (c) Microsoft. All rights reserved. + +from __future__ import annotations + +import base64 +import json +import logging +import sys +import uuid +from collections.abc import AsyncIterable, Awaitable, Mapping, Sequence +from typing import Any, ClassVar, Generic + +from agent_framework import ( + AGENT_FRAMEWORK_USER_AGENT, + BaseChatClient, + ChatAndFunctionMiddlewareTypes, + ChatMiddlewareLayer, + ChatOptions, + ChatResponse, + ChatResponseUpdate, + Content, + FinishReasonLiteral, + FunctionInvocationConfiguration, + FunctionInvocationLayer, + FunctionTool, + Message, + ResponseStream, + UsageDetails, +) +from agent_framework._settings import SecretString, load_settings +from agent_framework._types import _get_data_bytes_as_str # type: ignore +from agent_framework.observability import ChatTelemetryLayer +from google import genai +from google.genai import types + +if sys.version_info >= (3, 11): + from typing import TypedDict # type: ignore # pragma: no cover +else: + from typing_extensions import TypedDict # type: ignore # pragma: no cover +if sys.version_info >= (3, 13): + from typing import TypeVar # type: ignore # pragma: no cover +else: + from typing_extensions import TypeVar # type: ignore # pragma: no cover +if sys.version_info >= (3, 12): + from typing import override # type: ignore # pragma: no cover +else: + from typing_extensions import override # type: ignore # pragma: no cover + + +__all__ = [ + "GoogleAIChatClient", + "GoogleAIChatOptions", + "GoogleAISettings", +] + +logger = logging.getLogger("agent_framework.google") + + +# region Role and Finish Reason Maps + + +# Role mapping from Agent Framework to Google AI +# Note: SYSTEM messages are extracted to config.system_instruction and skipped in message conversion +ROLE_MAP: dict[str, str] = { + "user": "user", + "assistant": "model", + "system": "user", # Fallback only - system messages are normally extracted to system_instruction + "tool": "function", +} + +# Finish reason mapping from Google AI to Agent Framework +FINISH_REASON_MAP: dict[str, FinishReasonLiteral] = { + "STOP": "stop", + "MAX_TOKENS": "length", + "SAFETY": "content_filter", + "RECITATION": "content_filter", + "LANGUAGE": "stop", + "OTHER": "stop", + "BLOCKLIST": "content_filter", + "PROHIBITED_CONTENT": "content_filter", + "SPII": "content_filter", + "MALFORMED_FUNCTION_CALL": "stop", + "IMAGE_SAFETY": "content_filter", + "IMAGE_PROHIBITED_CONTENT": "content_filter", + "IMAGE_OTHER": "stop", + "NO_IMAGE": "stop", + "IMAGE_RECITATION": "content_filter", + "UNEXPECTED_TOOL_CALL": "stop", + "TOO_MANY_TOOL_CALLS": "stop", +} + + +# region Settings and Options + + +class GoogleAISettings(TypedDict, total=False): + """Google AI settings for Gemini API access. + + Settings are resolved in this order: explicit keyword arguments, values from an + explicitly provided .env file, then environment variables with the prefix + 'GOOGLE_AI_'. + + Keys: + api_key: The Google AI API key. + chat_model_id: The Google AI chat model ID (e.g., gemini-2.5-flash). + """ + + api_key: SecretString | None + chat_model_id: str | None + + +class GoogleAIChatOptions(ChatOptions, total=False): + """Google AI-specific chat options. + + Extends ChatOptions with options specific to Google's Gemini API. + Options that Google doesn't support are typed as None to indicate they're unavailable. + + Keys: + model_id: The model to use for the request. + temperature: Sampling temperature between 0 and 2. + top_p: Nucleus sampling parameter. + max_tokens: Maximum number of output tokens, + translates to ``max_output_tokens`` in Google AI API. + stop: Stop sequences, + translates to ``stop_sequences`` in Google AI API. + tools: List of tools (functions) available to the model. + tool_choice: How the model should use tools. + top_k: Number of top tokens to consider for sampling. + candidate_count: Number of response candidates to generate. + presence_penalty: Presence penalty for the model. + frequency_penalty: Frequency penalty for the model. + instructions: System instructions for the model, + translates to ``system_instruction`` in Google AI API. + """ + + # Google-specific generation parameters + top_k: int + candidate_count: int + + # Unsupported base options (override with None to indicate not supported) + logit_bias: None # type: ignore[misc] + seed: None # type: ignore[misc] + store: None # type: ignore[misc] + conversation_id: None # type: ignore[misc] + + +GoogleOptionsT = TypeVar( + "GoogleOptionsT", + bound=TypedDict, # type: ignore[valid-type] + default="GoogleAIChatOptions", + covariant=True, +) + + +# region GoogleAIChatClient + + +class GoogleAIChatClient( + ChatMiddlewareLayer[GoogleOptionsT], + FunctionInvocationLayer[GoogleOptionsT], + ChatTelemetryLayer[GoogleOptionsT], + BaseChatClient[GoogleOptionsT], + Generic[GoogleOptionsT], +): + """Google AI chat client for Gemini models with middleware, telemetry, and function invocation support. + + This client implements the BaseChatClient interface to provide access to + Google's Gemini models through the Google AI API (Gemini API). + + Examples: + .. code-block:: python + + from agent_framework_google import GoogleAIChatClient + + # Using environment variables + # Set GOOGLE_AI_API_KEY=your_api_key + # Set GOOGLE_AI_CHAT_MODEL_ID=gemini-2.5-flash + + client = GoogleAIChatClient() + + # Or pass parameters directly + client = GoogleAIChatClient(api_key="your_api_key", model_id="gemini-2.5-flash") + + # Using custom ChatOptions with type safety: + from typing import TypedDict + from agent_framework_google import GoogleAIChatOptions + + + class MyOptions(GoogleAIChatOptions, total=False): + my_custom_option: str + + + client: GoogleAIChatClient[MyOptions] = GoogleAIChatClient(model_id="gemini-2.5-flash") + response = await client.get_response("Hello", options={"my_custom_option": "value"}) + """ + + OTEL_PROVIDER_NAME: ClassVar[str] = "gcp.gemini" # type: ignore[reportIncompatibleVariableOverride, misc] + + def __init__( + self, + *, + api_key: str | None = None, + model_id: str | None = None, + google_client: genai.Client | None = None, + middleware: Sequence[ChatAndFunctionMiddlewareTypes] | None = None, + function_invocation_configuration: FunctionInvocationConfiguration | None = None, + env_file_path: str | None = None, + env_file_encoding: str | None = None, + **kwargs: Any, + ) -> None: + """Initialize a Google AI chat client. + + Keyword Args: + api_key: The Google AI API key to use for authentication. + model_id: The model ID to use for chat completions (e.g., "gemini-2.5-flash"). + google_client: An existing Google GenAI client to use. If not provided, one will be created. + middleware: Optional middleware to apply to the client. + function_invocation_configuration: Optional function invocation configuration override. + env_file_path: Path to environment file for loading settings. + env_file_encoding: Encoding of the environment file. + kwargs: Additional keyword arguments passed to the parent class. + + Examples: + .. code-block:: python + + from agent_framework_google import GoogleAIChatClient + + # Using environment variables + # Set GOOGLE_AI_API_KEY=your_api_key + # Set GOOGLE_AI_CHAT_MODEL_ID=gemini-2.5-flash + + client = GoogleAIChatClient() + + # Or passing parameters directly + client = GoogleAIChatClient( + model_id="gemini-2.5-flash", + api_key="your_api_key", + ) + + # Or loading from a .env file + client = GoogleAIChatClient(env_file_path="path/to/.env") + + # Or passing in an existing client + from google import genai + + google_client = genai.Client(api_key="your_api_key") + client = GoogleAIChatClient( + model_id="gemini-2.5-flash", + google_client=google_client, + ) + """ + google_settings = load_settings( + GoogleAISettings, + env_prefix="GOOGLE_AI_", + api_key=api_key, + chat_model_id=model_id, + env_file_path=env_file_path, + env_file_encoding=env_file_encoding, + ) + + if google_client is None: + if not google_settings["api_key"]: + raise ValueError( + "Google AI API key is required. Set via 'api_key' parameter " + "or 'GOOGLE_AI_API_KEY' environment variable." + ) + + google_client = genai.Client( + api_key=google_settings["api_key"].get_secret_value(), + http_options={"headers": {"User-Agent": AGENT_FRAMEWORK_USER_AGENT}}, + ) + + # Initialize parent + super().__init__( + middleware=middleware, + function_invocation_configuration=function_invocation_configuration, + **kwargs, + ) + + # Initialize instance variables + self.google_client = google_client + self.model_id = google_settings["chat_model_id"] + self._function_name_map: dict[str, str] = {} + + # region Static factory methods for hosted tools + + @staticmethod + def get_google_search_tool() -> dict[str, Any]: + """Create a Google Search tool configuration for Gemini. + + Returns: + A dict-based tool configuration ready to pass to ChatAgent tools. + + Examples: + .. code-block:: python + + from agent_framework_google import GoogleAIChatClient + + tool = GoogleAIChatClient.get_google_search_tool() + agent = GoogleAIChatClient().as_agent(tools=[tool]) + """ + return {"google_search": {}} + + @staticmethod + def get_code_execution_tool() -> dict[str, Any]: + """Create a Code Execution tool configuration for Gemini. + + Returns: + A dict-based tool configuration ready to pass to ChatAgent tools. + + Examples: + .. code-block:: python + + from agent_framework_google import GoogleAIChatClient + + tool = GoogleAIChatClient.get_code_execution_tool() + agent = GoogleAIChatClient().as_agent(tools=[tool]) + """ + return {"code_execution": {}} + + # endregion + + # region Get response methods + + @override + def _inner_get_response( + self, + *, + messages: Sequence[Message], + options: Mapping[str, Any], + stream: bool = False, + **kwargs: Any, + ) -> Awaitable[ChatResponse] | ResponseStream[ChatResponseUpdate, ChatResponse]: + # prepare + run_options = self._prepare_options(messages, options, **kwargs) + model = run_options["model"] + contents = run_options["contents"] + config = run_options["config"] + + if stream: + # Streaming mode + async def _stream() -> AsyncIterable[ChatResponseUpdate]: + async for chunk in await self.google_client.aio.models.generate_content_stream( + model=model, + contents=contents, + config=config, + ): + parsed_chunk = self._process_stream_chunk(chunk) + if parsed_chunk: + yield parsed_chunk + + return self._build_response_stream(_stream(), response_format=options.get("response_format")) + + # Non-streaming mode + async def _get_response() -> ChatResponse: + response = await self.google_client.aio.models.generate_content( + model=model, + contents=contents, + config=config, + ) + return self._process_response(response, options) + + return _get_response() + + # endregion + + # region Prep methods + + def _prepare_options( + self, + messages: Sequence[Message], + options: Mapping[str, Any], + **kwargs: Any, + ) -> dict[str, Any]: + """Create run options for the Google AI client based on messages and options. + + Args: + messages: The list of chat messages. + options: The options dict. + kwargs: Additional keyword arguments. + + Returns: + A dictionary with model, contents, and config keys for the Google AI client. + """ + # Prepend instructions from options if they exist + instructions = options.get("instructions") + if instructions: + from agent_framework._types import prepend_instructions_to_messages + + messages = prepend_instructions_to_messages(list(messages), instructions, role="system") + + # Build configuration + config = self._create_config(options, messages, **kwargs) + + # Convert messages to Google AI format (skip system messages) + contents = self._prepare_messages_for_google(messages) + + # Determine model ID + model = options.get("model_id") or self.model_id + if not model: + raise ValueError("model_id must be a non-empty string") + + return { + "model": model, + "contents": contents, + "config": config, + } + + def _create_config( + self, + options: Mapping[str, Any], + messages: Sequence[Message] | None = None, + **kwargs: Any, + ) -> types.GenerateContentConfig: + """Create the Google AI generation config from chat options. + + Args: + options: The chat options dict to convert. + messages: The conversation messages (used to extract system instruction). + kwargs: Additional keyword arguments. + + Returns: + The Google AI generation config. + """ + config_params: dict[str, Any] = {} + + # Extract system instruction from all system messages + if messages: + system_instructions = [msg.text for msg in messages if msg.role == "system" and msg.text] + if system_instructions: + config_params["system_instruction"] = "\n".join(system_instructions) + + # Map Agent Framework options to Google AI config + if options.get("temperature") is not None: + config_params["temperature"] = options["temperature"] + if options.get("top_p") is not None: + config_params["top_p"] = options["top_p"] + if options.get("max_tokens") is not None: + config_params["max_output_tokens"] = options["max_tokens"] + if options.get("stop") is not None: + stop_val = options["stop"] + config_params["stop_sequences"] = [stop_val] if isinstance(stop_val, str) else list(stop_val) + if options.get("top_k") is not None: + config_params["top_k"] = options["top_k"] + if options.get("candidate_count") is not None: + config_params["candidate_count"] = options["candidate_count"] + if options.get("presence_penalty") is not None: + config_params["presence_penalty"] = options["presence_penalty"] + if options.get("frequency_penalty") is not None: + config_params["frequency_penalty"] = options["frequency_penalty"] + + # Add tools if provided + if options.get("tools"): + tools_list = self._prepare_tools_for_google(options) + if tools_list: + config_params["tools"] = tools_list + + # Add tool choice if provided + if options.get("tool_choice") is not None: + tool_config = self._prepare_tool_config(options) + if tool_config: + config_params["tool_config"] = tool_config + + extra_params = {k: v for k, v in kwargs.items() if not k.startswith("_") and k not in {"thread", "middleware"}} + config_params.update(extra_params) + + return types.GenerateContentConfig(**config_params) + + def _prepare_tools_for_google(self, options: Mapping[str, Any]) -> list[Any] | None: + """Convert tools to Google AI format. + + Converts FunctionTool to Google AI format. Hosted tools (google_search, + code_execution) are passed through as Tool objects. + + Args: + options: The options dict containing tools. + + Returns: + List of Google AI Tool objects, or None if no tools. + """ + tools = options.get("tools") + if not tools: + return None + + function_declarations: list[types.FunctionDeclaration] = [] + hosted_tools: list[Any] = [] + + for tool_item in tools: + if isinstance(tool_item, FunctionTool): + # FunctionTool has name, description, and parameters() method + function_declarations.append( + types.FunctionDeclaration( + name=tool_item.name, + description=tool_item.description or "", + parameters=tool_item.parameters(), + ) + ) + elif isinstance(tool_item, dict): + # Hosted tools (google_search, code_execution) pass through as dicts + if "google_search" in tool_item: + hosted_tools.append(types.Tool(google_search=types.GoogleSearch())) + elif "code_execution" in tool_item: + hosted_tools.append(types.Tool(code_execution=types.ToolCodeExecution())) + else: + # Other dict-based tools pass through unchanged + hosted_tools.append(tool_item) + else: + logger.debug(f"Ignoring unsupported tool type: {type(tool_item)}") + + result: list[Any] = [] + if function_declarations: + result.append(types.Tool(function_declarations=function_declarations)) + result.extend(hosted_tools) + + return result or None + + def _prepare_tool_config(self, options: Mapping[str, Any]) -> types.ToolConfig | None: + """Prepare tool_config for the Google AI request based on tool_choice. + + Args: + options: The options dict containing tool_choice. + + Returns: + A ToolConfig object, or None. + """ + from agent_framework._types import validate_tool_mode + + tool_choice = options.get("tool_choice") + tool_mode = validate_tool_mode(tool_choice) + if tool_mode is None: + return None + + match tool_mode.get("mode"): + case "auto": + return types.ToolConfig( + function_calling_config=types.FunctionCallingConfig(mode="AUTO") + ) + case "required": + if "required_function_name" in tool_mode: + return types.ToolConfig( + function_calling_config=types.FunctionCallingConfig( + mode="ANY", + allowed_function_names=[tool_mode["required_function_name"]], + ) + ) + return types.ToolConfig( + function_calling_config=types.FunctionCallingConfig(mode="ANY") + ) + case "none": + return types.ToolConfig( + function_calling_config=types.FunctionCallingConfig(mode="NONE") + ) + case _: + logger.debug(f"Ignoring unsupported tool choice mode: {tool_mode} for now") + return None + + # endregion + + # region Message Conversion + + def _prepare_messages_for_google(self, messages: Sequence[Message]) -> list[dict[str, Any]]: + """Convert Agent Framework messages to Google AI format. + + Args: + messages: The messages to convert. + + Returns: + The messages in Google AI format. + + Raises: + ValueError: If no messages remain after filtering system messages. + """ + google_messages: list[dict[str, Any]] = [] + + for message in messages: + # Skip system messages - they're passed as system_instruction in config + if message.role == "system": + continue + google_messages.append(self._prepare_message_for_google(message)) + + # Google AI requires at least one message + if not google_messages: + raise ValueError( + "No messages to send to Google AI after filtering. " + "Ensure at least one non-system message is provided." + ) + + return google_messages + + def _prepare_message_for_google(self, message: Message) -> dict[str, Any]: + """Convert a single Agent Framework message to Google AI format. + + Args: + message: The message to convert. + + Returns: + The message in Google AI format. + """ + google_parts: list[dict[str, Any]] = [] + + for content in message.contents: + match content.type: + case "text": + if content.text: + google_parts.append({"text": content.text}) + case "function_call": + args = content.arguments + if isinstance(args, str): + from contextlib import suppress + + with suppress(json.JSONDecodeError): + args = json.loads(args) + if isinstance(args, Mapping): + args = dict(args) + google_parts.append({ + "function_call": {"name": content.name, "args": args or {}} + }) + case "function_result": + # function_result content uses call_id; Google API requires a name, + # so we look up the original function name from _function_name_map. + fn_name = self._function_name_map.get(content.call_id, content.call_id) + result = content.result + if result is None: + result = "" + google_parts.append({ + "function_response": { + "name": fn_name, + "response": {"result": str(result)}, + } + }) + case "data": + if content.media_type and content.media_type.startswith("image/"): + data_str = _get_data_bytes_as_str(content) + if data_str: + try: + data_bytes = base64.b64decode(data_str) + google_parts.append({ + "inline_data": { + "mime_type": content.media_type, + "data": data_bytes, + } + }) + except Exception as e: + logger.error(f"Failed to process image data: {e}") + else: + logger.debug( + f"Ignoring unsupported data media type: {content.media_type}" + ) + case "uri": + if content.media_type and content.media_type.startswith("image/"): + google_parts.append({ + "file_data": { + "mime_type": content.media_type, + "file_uri": content.uri, + } + }) + else: + logger.debug( + f"Ignoring unsupported URI content media type: {content.media_type}" + ) + case _: + logger.debug( + f"Ignoring unsupported content type: {content.type} for now" + ) + + return { + "role": ROLE_MAP.get(message.role, "user"), + "parts": google_parts, + } + + # endregion + + # region Response Processing Methods + + def _parse_parts_from_google(self, parts: list[Any]) -> list[Content]: + """Parse Google AI response parts into Agent Framework Content objects. + + Handles text and function_call parts. Populates _function_name_map + for function calls so that function_result conversion can look up + the original function name. + + Args: + parts: The list of parts from a Google AI response candidate. + + Returns: + A list of Content objects parsed from the parts. + """ + contents: list[Content] = [] + for part in parts: + # Check for text content + if hasattr(part, "text") and part.text: + contents.append( + Content.from_text( + text=part.text, + raw_representation=part, + ) + ) + # Check for function call content + elif hasattr(part, "function_call") and part.function_call: + fc = part.function_call + # Google doesn't provide a call ID, so we generate one + call_id = str(uuid.uuid4()) + self._function_name_map[call_id] = fc.name + # Handle args that might already be a dict or need parsing + args_value = fc.args if fc.args else {} + arguments = args_value if isinstance(args_value, dict) else str(args_value) if args_value else {} + contents.append( + Content.from_function_call( + call_id=call_id, + name=fc.name, + arguments=arguments, + raw_representation=part, + ) + ) + return contents + + def _process_response( + self, + response: types.GenerateContentResponse, + options: Mapping[str, Any], + ) -> ChatResponse: + """Process a Google AI response into Agent Framework format. + + Args: + response: The Google AI response. + options: The options dict used for the request. + + Returns: + The Agent Framework chat response. + """ + contents: list[Content] = [] + finish_reason: FinishReasonLiteral | None = "stop" + + if hasattr(response, "candidates") and response.candidates: + candidate = response.candidates[0] + if ( + hasattr(candidate, "content") + and candidate.content is not None + and hasattr(candidate.content, "parts") + and candidate.content.parts is not None + ): + contents = self._parse_parts_from_google(candidate.content.parts) + + # Determine finish reason from candidate + if hasattr(candidate, "finish_reason") and candidate.finish_reason: + reason_str = ( + candidate.finish_reason.name + if hasattr(candidate.finish_reason, "name") + else str(candidate.finish_reason) + ) + finish_reason = FINISH_REASON_MAP.get(reason_str, "stop") + + # If the response contains function calls, set finish_reason to "tool_calls" + has_function_calls = any(c.type == "function_call" for c in contents) + if has_function_calls and finish_reason == "stop": + finish_reason = "tool_calls" + + # Parse usage + usage_details = self._parse_usage_from_google(response) + + # Create the response message + message = Message( + "assistant", + contents=contents, + raw_representation=response, + ) + + return ChatResponse( + messages=[message], + finish_reason=finish_reason, + usage_details=usage_details, + model_id=options.get("model_id") or self.model_id, + raw_representation=response, + response_format=options.get("response_format"), + ) + + def _process_stream_chunk( + self, + chunk: types.GenerateContentResponse, + ) -> ChatResponseUpdate | None: + """Process a streaming chunk from Google AI. + + Args: + chunk: The streaming chunk. + + Returns: + A chat response update, or None if the chunk should be skipped. + """ + contents: list[Content] = [] + finish_reason: FinishReasonLiteral | None = None + + if hasattr(chunk, "candidates") and chunk.candidates: + candidate = chunk.candidates[0] + if ( + hasattr(candidate, "content") + and candidate.content is not None + and hasattr(candidate.content, "parts") + and candidate.content.parts is not None + ): + contents = self._parse_parts_from_google(candidate.content.parts) + + # Check finish reason + if hasattr(candidate, "finish_reason") and candidate.finish_reason: + reason_str = ( + candidate.finish_reason.name + if hasattr(candidate.finish_reason, "name") + else str(candidate.finish_reason) + ) + finish_reason = FINISH_REASON_MAP.get(reason_str, "stop") + + # Parse usage from chunk + usage_details = self._parse_usage_from_google(chunk) + if usage_details: + contents.append( + Content.from_usage( + usage_details=usage_details, + raw_representation=chunk, + ) + ) + + if not contents: + return None + + return ChatResponseUpdate( + role="assistant", + contents=contents, + finish_reason=finish_reason, + raw_representation=chunk, + ) + + def _parse_usage_from_google( + self, + response: types.GenerateContentResponse, + ) -> UsageDetails | None: + """Parse usage details from a Google AI response. + + Args: + response: The Google AI response or chunk. + + Returns: + UsageDetails dict, or None if no usage metadata is present. + """ + if not hasattr(response, "usage_metadata") or not response.usage_metadata: + return None + + usage = response.usage_metadata + result = UsageDetails( + output_token_count=getattr(usage, "candidates_token_count", None), + ) + input_count = getattr(usage, "prompt_token_count", None) + if input_count is not None: + result["input_token_count"] = input_count + total_count = getattr(usage, "total_token_count", None) + if total_count is not None: + result["total_token_count"] = total_count + cached_count = getattr(usage, "cached_content_token_count", None) + if cached_count is not None: + result["google.cached_content_token_count"] = cached_count # type: ignore[typeddict-unknown-key] + thoughts_count = getattr(usage, "thoughts_token_count", None) + if thoughts_count is not None: + result["google.thoughts_token_count"] = thoughts_count # type: ignore[typeddict-unknown-key] + return result + + def service_url(self) -> str: + """Get the service URL for the chat client.""" + return "https://generativelanguage.googleapis.com" + + # endregion diff --git a/python/packages/google/agent_framework_google/py.typed b/python/packages/google/agent_framework_google/py.typed new file mode 100644 index 0000000000..e69de29bb2 diff --git a/python/packages/google/pyproject.toml b/python/packages/google/pyproject.toml new file mode 100644 index 0000000000..484293f0b8 --- /dev/null +++ b/python/packages/google/pyproject.toml @@ -0,0 +1,93 @@ +[project] +name = "agent-framework-google" +description = "Google AI (Gemini API) integration for Microsoft Agent Framework." +authors = [{ name = "Microsoft", email = "af-support@microsoft.com"}] +readme = "README.md" +requires-python = ">=3.10" +version = "1.0.0b260304" +license-files = ["LICENSE"] +urls.homepage = "https://aka.ms/agent-framework" +urls.source = "https://github.com/microsoft/agent-framework/tree/main/python" +urls.release_notes = "https://github.com/microsoft/agent-framework/releases?q=tag%3Apython-1&expanded=true" +urls.issues = "https://github.com/microsoft/agent-framework/issues" +classifiers = [ + "License :: OSI Approved :: MIT License", + "Development Status :: 4 - Beta", + "Intended Audience :: Developers", + "Programming Language :: Python :: 3", + "Programming Language :: Python :: 3.10", + "Programming Language :: Python :: 3.11", + "Programming Language :: Python :: 3.12", + "Programming Language :: Python :: 3.13", + "Programming Language :: Python :: 3.14", + "Typing :: Typed", +] +dependencies = [ + "agent-framework-core>=1.0.0rc3", + "google-genai>=0.2,<1", +] + +[tool.uv] +prerelease = "if-necessary-or-explicit" +environments = [ + "sys_platform == 'darwin'", + "sys_platform == 'linux'", + "sys_platform == 'win32'" +] + +[tool.uv-dynamic-versioning] +fallback-version = "0.0.0" + +[tool.pytest.ini_options] +testpaths = 'tests' +addopts = "-ra -q -r fEX" +asyncio_mode = "auto" +asyncio_default_fixture_loop_scope = "function" +filterwarnings = [ + "ignore:Support for class-based `config` is deprecated:DeprecationWarning:pydantic.*" +] +timeout = 120 +markers = [ + "integration: marks tests as integration tests that require external services", +] + +[tool.ruff] +extend = "../../pyproject.toml" + +[tool.coverage.run] +omit = [ + "**/__init__.py" +] + +[tool.pyright] +extends = "../../pyproject.toml" +exclude = ['tests'] + +[tool.mypy] +plugins = ['pydantic.mypy'] +strict = true +python_version = "3.10" +ignore_missing_imports = true +disallow_untyped_defs = true +no_implicit_optional = true +check_untyped_defs = true +warn_return_any = true +show_error_codes = true +warn_unused_ignores = false +disallow_incomplete_defs = true +disallow_untyped_decorators = true + +[tool.bandit] +targets = ["agent_framework_google"] +exclude_dirs = ["tests"] + +[tool.poe] +executor.type = "uv" +include = "../../shared_tasks.toml" +[tool.poe.tasks] +mypy = "mypy --config-file $POE_ROOT/pyproject.toml agent_framework_google" +test = "pytest --cov=agent_framework_google --cov-report=term-missing:skip-covered tests" + +[build-system] +requires = ["flit-core >= 3.11,<4.0"] +build-backend = "flit_core.buildapi" diff --git a/python/packages/google/tests/conftest.py b/python/packages/google/tests/conftest.py new file mode 100644 index 0000000000..18982c8336 --- /dev/null +++ b/python/packages/google/tests/conftest.py @@ -0,0 +1,62 @@ +# Copyright (c) Microsoft. All rights reserved. +import os +from typing import Any +from unittest.mock import AsyncMock, MagicMock + +from pytest import fixture + + +@fixture +def exclude_list(request: Any) -> list[str]: + """Fixture that returns a list of environment variables to exclude.""" + return request.param if hasattr(request, "param") else [] + + +@fixture +def override_env_param_dict(request: Any) -> dict[str, str]: + """Fixture that returns a dict of environment variables to override.""" + return request.param if hasattr(request, "param") else {} + + +@fixture +def google_ai_unit_test_env(monkeypatch, exclude_list, override_env_param_dict): # type: ignore + """Fixture to set environment variables for GoogleAISettings.""" + if exclude_list is None: + exclude_list = [] + + if override_env_param_dict is None: + override_env_param_dict = {} + + # Ensure tests are deterministic regardless of the machine environment. + for key in list(os.environ): + if key.startswith("GOOGLE_AI_"): + monkeypatch.delenv(key, raising=False) # type: ignore + + env_vars = { + "GOOGLE_AI_API_KEY": "test-api-key-12345", + "GOOGLE_AI_CHAT_MODEL_ID": "gemini-2.5-flash", + } + + env_vars.update(override_env_param_dict) # type: ignore + + for key, value in env_vars.items(): + if key in exclude_list: + monkeypatch.delenv(key, raising=False) # type: ignore + continue + monkeypatch.setenv(key, value) # type: ignore + + return env_vars + + +@fixture +def mock_google_client() -> MagicMock: + """Fixture that provides a mock Google GenAI client.""" + mock_client = MagicMock() + + # Mock aio.models property for async calls + mock_client.aio = MagicMock() + mock_client.aio.models = MagicMock() + mock_client.aio.models.generate_content = AsyncMock() + mock_client.aio.models.generate_content_stream = AsyncMock() + + return mock_client diff --git a/python/packages/google/tests/test_google_chat_client.py b/python/packages/google/tests/test_google_chat_client.py new file mode 100644 index 0000000000..591591a3dd --- /dev/null +++ b/python/packages/google/tests/test_google_chat_client.py @@ -0,0 +1,1205 @@ +# Copyright (c) Microsoft. All rights reserved. +import base64 +import os +from typing import Annotated +from unittest.mock import AsyncMock, MagicMock, patch + +import pytest +from agent_framework import ( + ChatOptions, + ChatResponseUpdate, + Content, + Message, + SupportsChatGetResponse, + tool, +) +from agent_framework._settings import load_settings +from agent_framework._tools import normalize_function_invocation_configuration +from google.genai import types +from pydantic import Field + +from agent_framework_google import GoogleAIChatClient +from agent_framework_google._chat_client import GoogleAISettings + +# Test constants +VALID_PNG_BASE64 = b"iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAADUlEQVR42mNk+M9QDwADhgGAWjR9awAAAABJRU5ErkJggg==" + +skip_if_google_integration_tests_disabled = pytest.mark.skipif( + os.getenv("GOOGLE_AI_API_KEY", "") in ("", "test-api-key-12345"), + reason="No real GOOGLE_AI_API_KEY provided; skipping integration tests.", +) + + +def create_test_google_client( + mock_google_client: MagicMock, + model_id: str | None = None, + google_settings: GoogleAISettings | None = None, +) -> GoogleAIChatClient: + """Helper function to create GoogleAIChatClient instances for testing, bypassing normal validation.""" + if google_settings is None: + google_settings = load_settings( + GoogleAISettings, + env_prefix="GOOGLE_AI_", + api_key="test-api-key-12345", + chat_model_id="gemini-2.5-flash", + ) + + # Create client instance directly + client = object.__new__(GoogleAIChatClient) + + # Set attributes directly + client.google_client = mock_google_client + client.model_id = model_id or google_settings["chat_model_id"] + client._last_call_id_name = None + client._function_name_map = {} + client.additional_properties = {} + client.middleware = None + client.chat_middleware = [] + client.function_middleware = [] + client.function_invocation_configuration = normalize_function_invocation_configuration(None) + + return client + + +# region Settings Tests + + +def test_google_ai_settings_init(google_ai_unit_test_env: dict[str, str]) -> None: + """Test GoogleAISettings initialization.""" + settings = load_settings(GoogleAISettings, env_prefix="GOOGLE_AI_") + + assert settings["api_key"] is not None + assert settings["api_key"].get_secret_value() == google_ai_unit_test_env["GOOGLE_AI_API_KEY"] + assert settings["chat_model_id"] == google_ai_unit_test_env["GOOGLE_AI_CHAT_MODEL_ID"] + + +def test_google_ai_settings_init_with_explicit_values() -> None: + """Test GoogleAISettings initialization with explicit values.""" + settings = load_settings( + GoogleAISettings, + env_prefix="GOOGLE_AI_", + api_key="custom-api-key", + chat_model_id="gemini-2.5-flash", + ) + + assert settings["api_key"] is not None + assert settings["api_key"].get_secret_value() == "custom-api-key" + assert settings["chat_model_id"] == "gemini-2.5-flash" + + +@pytest.mark.parametrize("exclude_list", [["GOOGLE_AI_API_KEY"]], indirect=True) +def test_google_ai_settings_missing_api_key(google_ai_unit_test_env: dict[str, str]) -> None: + """Test GoogleAISettings when API key is missing.""" + settings = load_settings(GoogleAISettings, env_prefix="GOOGLE_AI_") + assert settings["api_key"] is None + assert settings["chat_model_id"] == google_ai_unit_test_env["GOOGLE_AI_CHAT_MODEL_ID"] + + +# endregion + +# region Client Initialization Tests + + +def test_google_client_init_with_client(mock_google_client: MagicMock) -> None: + """Test GoogleAIChatClient initialization with existing google_client.""" + client = create_test_google_client(mock_google_client, model_id="gemini-2.5-flash") + + assert client.google_client is mock_google_client + assert client.model_id == "gemini-2.5-flash" + assert isinstance(client, SupportsChatGetResponse) + + +def test_google_client_init_auto_create_client(google_ai_unit_test_env: dict[str, str]) -> None: + """Test GoogleAIChatClient initialization with auto-created google_client.""" + client = GoogleAIChatClient( + api_key=google_ai_unit_test_env["GOOGLE_AI_API_KEY"], + model_id=google_ai_unit_test_env["GOOGLE_AI_CHAT_MODEL_ID"], + ) + + assert client.google_client is not None + assert client.model_id == google_ai_unit_test_env["GOOGLE_AI_CHAT_MODEL_ID"] + + +def test_google_client_init_missing_api_key() -> None: + """Test GoogleAIChatClient initialization when API key is missing.""" + with patch("agent_framework_google._chat_client.load_settings") as mock_load: + mock_load.return_value = {"api_key": None, "chat_model_id": "gemini-2.5-flash"} + + with pytest.raises(Exception, match="Google AI API key is required"): + GoogleAIChatClient() + + +# endregion + +# region Message Conversion Tests + + +def test_prepare_message_for_google_text(mock_google_client: MagicMock) -> None: + """Test converting text message to Google format.""" + client = create_test_google_client(mock_google_client) + message = Message(role="user", text="Hello, world!") + + result = client._prepare_message_for_google(message) + + assert result["role"] == "user" + assert len(result["parts"]) == 1 + assert result["parts"][0]["text"] == "Hello, world!" + + +def test_prepare_message_for_google_assistant(mock_google_client: MagicMock) -> None: + """Test converting assistant message to Google format.""" + client = create_test_google_client(mock_google_client) + message = Message(role="assistant", text="Hello back!") + + result = client._prepare_message_for_google(message) + + assert result["role"] == "model" + assert len(result["parts"]) == 1 + assert result["parts"][0]["text"] == "Hello back!" + + +def test_convert_message_function_call(mock_google_client: MagicMock) -> None: + """Test converting message with function call to Google format.""" + client = create_test_google_client(mock_google_client) + message = Message( + role="assistant", + contents=[ + Content.from_function_call( + call_id="call_123", + name="get_weather", + arguments={"location": "Seattle"}, + ) + ], + ) + + result = client._prepare_message_for_google(message) + + assert result["role"] == "model" + assert len(result["parts"]) == 1 + assert result["parts"][0]["function_call"]["name"] == "get_weather" + assert result["parts"][0]["function_call"]["args"]["location"] == "Seattle" + + +def test_convert_message_function_call_string_arguments(mock_google_client: MagicMock) -> None: + """Test converting message with string arguments for function call to Google format.""" + client = create_test_google_client(mock_google_client) + message = Message( + role="assistant", + contents=[ + Content.from_function_call( + call_id="call_456", + name="search", + arguments='{"query": "hello"}', + ) + ], + ) + + result = client._prepare_message_for_google(message) + + assert result["role"] == "model" + assert len(result["parts"]) == 1 + assert result["parts"][0]["function_call"]["name"] == "search" + assert result["parts"][0]["function_call"]["args"]["query"] == "hello" + + +def test_convert_message_function_result(mock_google_client: MagicMock) -> None: + """Test converting message with function result to Google format.""" + client = create_test_google_client(mock_google_client) + message = Message( + role="tool", + contents=[ + Content.from_function_result( + call_id="call_123", + result="72 degrees and sunny", + ) + ], + ) + + result = client._prepare_message_for_google(message) + + assert result["role"] == "function" + assert len(result["parts"]) == 1 + # FunctionResultContent uses call_id as the name since Google requires it + assert result["parts"][0]["function_response"]["name"] == "call_123" + assert result["parts"][0]["function_response"]["response"]["result"] == "72 degrees and sunny" + + +def test_convert_message_with_image_data(mock_google_client: MagicMock) -> None: + """Test converting message with image data to Google format.""" + client = create_test_google_client(mock_google_client) + + # Use valid PNG data + base64_image = "iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAADUlEQVR42mP8z8BQDwAEhQGAhKmMIQAAAABJRU5ErkJggg==" + image_bytes = base64.b64decode(base64_image) + + message = Message( + role="user", + contents=[Content.from_data(media_type="image/png", data=image_bytes)], + ) + + result = client._prepare_message_for_google(message) + + assert result["role"] == "user" + assert len(result["parts"]) == 1 + assert "inline_data" in result["parts"][0] + assert result["parts"][0]["inline_data"]["mime_type"] == "image/png" + assert isinstance(result["parts"][0]["inline_data"]["data"], bytes) + + +def test_convert_message_with_unsupported_media_type(mock_google_client: MagicMock) -> None: + """Test converting message with unsupported media type.""" + client = create_test_google_client(mock_google_client) + + message = Message( + role="user", + contents=[Content.from_data(media_type="application/pdf", data=b"PDF data")], + ) + + result = client._prepare_message_for_google(message) + + # Unsupported media type should be skipped + assert result["role"] == "user" + assert len(result["parts"]) == 0 + + +def test_convert_message_with_image_uri(mock_google_client: MagicMock) -> None: + """Test converting message with image URI to Google format.""" + client = create_test_google_client(mock_google_client) + message = Message( + role="user", + contents=[Content.from_uri(uri="https://example.com/image.png", media_type="image/png")], + ) + result = client._prepare_message_for_google(message) + assert result["role"] == "user" + assert len(result["parts"]) == 1 + assert "file_data" in result["parts"][0] + assert result["parts"][0]["file_data"]["mime_type"] == "image/png" + assert result["parts"][0]["file_data"]["file_uri"] == "https://example.com/image.png" + + +def test_convert_message_with_unsupported_uri_type(mock_google_client: MagicMock) -> None: + """Test converting message with unsupported URI content type.""" + client = create_test_google_client(mock_google_client) + message = Message( + role="user", + contents=[Content.from_uri(uri="https://example.com/video.mp4", media_type="video/mp4")], + ) + result = client._prepare_message_for_google(message) + assert result["role"] == "user" + assert len(result["parts"]) == 0 + + +def test_prepare_message_for_google_multiple_text(mock_google_client: MagicMock) -> None: + """Test converting message with multiple text contents.""" + client = create_test_google_client(mock_google_client) + message = Message( + role="user", + contents=[ + Content.from_text("First part"), + Content.from_text("Second part"), + ], + ) + + result = client._prepare_message_for_google(message) + + assert result["role"] == "user" + assert len(result["parts"]) == 2 + assert result["parts"][0]["text"] == "First part" + assert result["parts"][1]["text"] == "Second part" + + +def test_prepare_messages_for_google_with_system(mock_google_client: MagicMock) -> None: + """Test converting messages list with system message.""" + client = create_test_google_client(mock_google_client) + messages = [ + Message(role="system", text="You are a helpful assistant."), + Message(role="user", text="Hello!"), + ] + + result = client._prepare_messages_for_google(messages) + + # System message should be skipped + assert len(result) == 1 + assert result[0]["role"] == "user" + assert result[0]["parts"][0]["text"] == "Hello!" + + +def test_prepare_messages_for_google_without_system(mock_google_client: MagicMock) -> None: + """Test converting messages list without system message.""" + client = create_test_google_client(mock_google_client) + messages = [ + Message(role="user", text="Hello!"), + Message(role="assistant", text="Hi there!"), + ] + + result = client._prepare_messages_for_google(messages) + + assert len(result) == 2 + assert result[0]["role"] == "user" + assert result[1]["role"] == "model" + + +def test_convert_messages_all_system_messages_error(mock_google_client: MagicMock) -> None: + """Test that error is raised when only system messages are provided.""" + client = create_test_google_client(mock_google_client) + + messages = [ + Message(role="system", text="System instruction 1"), + Message(role="system", text="System instruction 2"), + ] + + with pytest.raises(ValueError, match="No messages to send to Google AI after filtering"): + client._prepare_messages_for_google(messages) + + +# endregion + +# region Tool Conversion Tests + + +def test_convert_tools_to_google_format_with_function_tool(mock_google_client: MagicMock) -> None: + """Test converting FunctionTool to Google format.""" + client = create_test_google_client(mock_google_client) + + @tool(approval_mode="never_require") + def get_weather(location: Annotated[str, Field(description="Location to get weather for")]) -> str: + """Get weather for a location.""" + return f"Weather for {location}" + + result = client._prepare_tools_for_google({"tools": [get_weather]}) + + assert result is not None + assert len(result) == 1 + func_decl = result[0].function_declarations[0] + assert func_decl.name == "get_weather" + assert "Get weather for a location" in func_decl.description + + +def test_convert_tools_to_google_format_empty_list(mock_google_client: MagicMock) -> None: + """Test converting empty tools list.""" + client = create_test_google_client(mock_google_client) + + result = client._prepare_tools_for_google({"tools": []}) + + assert result is None + + +def test_convert_tools_to_google_format_none(mock_google_client: MagicMock) -> None: + """Test converting None tools.""" + client = create_test_google_client(mock_google_client) + + result = client._prepare_tools_for_google({}) + + assert result is None + + +def test_convert_tools_to_google_format_unsupported_type(mock_google_client: MagicMock) -> None: + """Test converting unsupported tool types.""" + client = create_test_google_client(mock_google_client) + + result = client._prepare_tools_for_google({"tools": ["not_a_function", 123]}) + + # Should return None when no valid tools + assert result is None + + +def test_convert_tools_google_search(mock_google_client: MagicMock) -> None: + """Test converting a Google Search dict tool to Google format.""" + client = create_test_google_client(mock_google_client) + + # Google search is passed as a dict from get_google_search_tool() + google_search_tool = GoogleAIChatClient.get_google_search_tool() + + result = client._prepare_tools_for_google({"tools": [google_search_tool]}) + + # Dict with "google_search" key should be converted to types.Tool + assert result is not None + assert len(result) == 1 + assert isinstance(result[0], types.Tool) + + +def test_convert_tools_code_execution(mock_google_client: MagicMock) -> None: + """Test converting a code execution dict tool to Google format.""" + client = create_test_google_client(mock_google_client) + + code_exec_tool = GoogleAIChatClient.get_code_execution_tool() + + result = client._prepare_tools_for_google({"tools": [code_exec_tool]}) + + # Dict with "code_execution" key should be converted to types.Tool + assert result is not None + assert len(result) == 1 + assert isinstance(result[0], types.Tool) + + +def test_convert_tools_dict_tool(mock_google_client: MagicMock) -> None: + """Test converting dict tools to Google format - unknown dicts are passed through.""" + client = create_test_google_client(mock_google_client) + + # Unknown dict-based tools are passed through unchanged + result = client._prepare_tools_for_google({"tools": [{"type": "custom", "name": "custom_tool"}]}) + + assert result is not None + assert len(result) == 1 + assert result[0] == {"type": "custom", "name": "custom_tool"} + + +# endregion + +# region Tool Choice Tests + + +def test_prepare_tool_config_auto(mock_google_client: MagicMock) -> None: + """Test tool_choice auto mode.""" + client = create_test_google_client(mock_google_client) + result = client._prepare_tool_config({"tool_choice": "auto"}) + assert result is not None + assert result.function_calling_config.mode == "AUTO" + + +def test_prepare_tool_config_required(mock_google_client: MagicMock) -> None: + """Test tool_choice required mode.""" + client = create_test_google_client(mock_google_client) + result = client._prepare_tool_config({"tool_choice": "required"}) + assert result is not None + assert result.function_calling_config.mode == "ANY" + + +def test_prepare_tool_config_required_specific(mock_google_client: MagicMock) -> None: + """Test tool_choice required mode with specific function.""" + client = create_test_google_client(mock_google_client) + result = client._prepare_tool_config({"tool_choice": {"mode": "required", "required_function_name": "get_weather"}}) + assert result is not None + assert result.function_calling_config.mode == "ANY" + assert result.function_calling_config.allowed_function_names == ["get_weather"] + + +def test_prepare_tool_config_none(mock_google_client: MagicMock) -> None: + """Test tool_choice none mode.""" + client = create_test_google_client(mock_google_client) + result = client._prepare_tool_config({"tool_choice": "none"}) + assert result is not None + assert result.function_calling_config.mode == "NONE" + + +# endregion + +# region Config Creation Tests + + +def test_create_config_with_system_message(mock_google_client: MagicMock) -> None: + """Test config creation extracts system instruction from messages.""" + client = create_test_google_client(mock_google_client) + messages = [ + Message(role="system", text="You are a helpful assistant."), + Message(role="user", text="Hello!"), + ] + chat_options: ChatOptions = {} + + config = client._create_config(chat_options, messages) + + assert isinstance(config, types.GenerateContentConfig) + assert config.system_instruction == "You are a helpful assistant." + + +def test_create_config_with_multiple_system_messages(mock_google_client: MagicMock) -> None: + """Test config creation joins multiple system messages.""" + client = create_test_google_client(mock_google_client) + messages = [ + Message(role="system", text="You are a helpful assistant."), + Message(role="system", text="Be concise."), + Message(role="user", text="Hello!"), + ] + chat_options: ChatOptions = {} + + config = client._create_config(chat_options, messages) + + assert isinstance(config, types.GenerateContentConfig) + assert "You are a helpful assistant." in config.system_instruction + assert "Be concise." in config.system_instruction + + +def test_create_config_without_system_message(mock_google_client: MagicMock) -> None: + """Test config creation without system message.""" + client = create_test_google_client(mock_google_client) + messages = [ + Message(role="user", text="Hello!"), + ] + chat_options: ChatOptions = {} + + config = client._create_config(chat_options, messages) + + assert isinstance(config, types.GenerateContentConfig) + assert config.system_instruction is None + + +def test_create_config_with_temperature(mock_google_client: MagicMock) -> None: + """Test config creation with temperature.""" + client = create_test_google_client(mock_google_client) + messages = [Message(role="user", text="Hello!")] + chat_options: ChatOptions = {"temperature": 0.7} + + config = client._create_config(chat_options, messages) + + assert isinstance(config, types.GenerateContentConfig) + assert config.temperature == 0.7 + + +def test_create_config_with_chat_options(mock_google_client: MagicMock) -> None: + """Test config creation with multiple chat options.""" + client = create_test_google_client(mock_google_client) + messages = [Message(role="user", text="Hello!")] + chat_options: ChatOptions = { + "temperature": 0.7, + "max_tokens": 100, + "top_p": 0.9, + } + + config = client._create_config(chat_options, messages) + + assert isinstance(config, types.GenerateContentConfig) + assert config.temperature == 0.7 + assert config.max_output_tokens == 100 + assert config.top_p == 0.9 + + +def test_create_config_with_stop_sequences(mock_google_client: MagicMock) -> None: + """Test config creation with stop sequences.""" + client = create_test_google_client(mock_google_client) + chat_options: ChatOptions = {"stop": ["END", "STOP"]} + + config = client._create_config(chat_options) + + assert config.stop_sequences == ["END", "STOP"] + + +def test_create_config_with_tools(mock_google_client: MagicMock) -> None: + """Test config creation includes tools.""" + client = create_test_google_client(mock_google_client) + + @tool(approval_mode="never_require") + def get_weather(location: str) -> str: + """Get weather for a location.""" + return f"Weather for {location}" + + chat_options: ChatOptions = {"tools": [get_weather]} + + config = client._create_config(chat_options) + + assert config.tools is not None + assert len(config.tools) == 1 + + +# endregion + +# region Response Processing Tests + + +def test_process_response_with_text(mock_google_client: MagicMock) -> None: + """Test processing response with text content.""" + client = create_test_google_client(mock_google_client) + + # Create mock response + mock_part = MagicMock() + mock_part.text = "Hello, world!" + + mock_content = MagicMock() + mock_content.parts = [mock_part] + + mock_candidate = MagicMock() + mock_candidate.content = mock_content + mock_candidate.finish_reason.name = "STOP" + + mock_usage = MagicMock() + mock_usage.prompt_token_count = 10 + mock_usage.candidates_token_count = 5 + + mock_response = MagicMock() + mock_response.candidates = [mock_candidate] + mock_response.usage_metadata = mock_usage + + result = client._process_response(mock_response, {}) + + assert len(result.messages) == 1 + assert result.messages[0].role == "assistant" + # Text content + usage content + text_contents = [c for c in result.messages[0].contents if c.type == "text"] + assert len(text_contents) == 1 + assert text_contents[0].text == "Hello, world!" + assert result.finish_reason == "stop" + assert result.usage_details is not None + assert result.usage_details["input_token_count"] == 10 + assert result.usage_details["output_token_count"] == 5 + + +def test_process_response_with_function_call(mock_google_client: MagicMock) -> None: + """Test processing response with function call.""" + client = create_test_google_client(mock_google_client) + + # Create mock function call part + mock_fc = MagicMock() + mock_fc.name = "get_weather" + mock_fc.args = {"location": "Seattle"} + + mock_part = MagicMock() + mock_part.text = None + mock_part.function_call = mock_fc + + mock_content = MagicMock() + mock_content.parts = [mock_part] + + mock_candidate = MagicMock() + mock_candidate.content = mock_content + mock_candidate.finish_reason.name = "STOP" + + mock_response = MagicMock() + mock_response.candidates = [mock_candidate] + mock_response.usage_metadata = None + + result = client._process_response(mock_response, {}) + + assert len(result.messages) == 1 + assert len(result.messages[0].contents) == 1 + assert result.messages[0].contents[0].type == "function_call" + assert result.messages[0].contents[0].name == "get_weather" + assert result.messages[0].contents[0].arguments == {"location": "Seattle"} + + +def test_process_response_with_multiple_parts(mock_google_client: MagicMock) -> None: + """Test processing response with multiple text parts.""" + client = create_test_google_client(mock_google_client) + + mock_part1 = MagicMock() + mock_part1.text = "First part. " + + mock_part2 = MagicMock() + mock_part2.text = "Second part." + + mock_content = MagicMock() + mock_content.parts = [mock_part1, mock_part2] + + mock_candidate = MagicMock() + mock_candidate.content = mock_content + mock_candidate.finish_reason.name = "STOP" + + mock_response = MagicMock() + mock_response.candidates = [mock_candidate] + mock_response.usage_metadata = None + + result = client._process_response(mock_response, {}) + + assert len(result.messages) == 1 + text_contents = [c for c in result.messages[0].contents if c.type == "text"] + assert len(text_contents) == 2 + assert text_contents[0].text == "First part. " + assert text_contents[1].text == "Second part." + + +def test_process_response_finish_reason_stop(mock_google_client: MagicMock) -> None: + """Test processing response with STOP finish reason.""" + client = create_test_google_client(mock_google_client) + + mock_part = MagicMock() + mock_part.text = "Complete response" + + mock_content = MagicMock() + mock_content.parts = [mock_part] + + mock_candidate = MagicMock() + mock_candidate.content = mock_content + mock_candidate.finish_reason.name = "STOP" + + mock_response = MagicMock() + mock_response.candidates = [mock_candidate] + mock_response.usage_metadata = None + + result = client._process_response(mock_response, {}) + + assert result.finish_reason == "stop" + + +def test_process_response_finish_reason_max_tokens(mock_google_client: MagicMock) -> None: + """Test processing response with MAX_TOKENS finish reason.""" + client = create_test_google_client(mock_google_client) + + mock_part = MagicMock() + mock_part.text = "Partial response" + + mock_content = MagicMock() + mock_content.parts = [mock_part] + + mock_candidate = MagicMock() + mock_candidate.content = mock_content + mock_candidate.finish_reason.name = "MAX_TOKENS" + + mock_response = MagicMock() + mock_response.candidates = [mock_candidate] + mock_response.usage_metadata = None + + result = client._process_response(mock_response, {}) + + assert result.finish_reason == "length" + + +def test_process_response_finish_reason_safety(mock_google_client: MagicMock) -> None: + """Test processing response with SAFETY finish reason.""" + client = create_test_google_client(mock_google_client) + + mock_part = MagicMock() + mock_part.text = "Blocked content" + + mock_content = MagicMock() + mock_content.parts = [mock_part] + + mock_candidate = MagicMock() + mock_candidate.content = mock_content + mock_candidate.finish_reason.name = "SAFETY" + + mock_response = MagicMock() + mock_response.candidates = [mock_candidate] + mock_response.usage_metadata = None + + result = client._process_response(mock_response, {}) + + assert result.finish_reason == "content_filter" + + +def test_process_response_finish_reason_recitation(mock_google_client: MagicMock) -> None: + """Test processing response with RECITATION finish reason.""" + client = create_test_google_client(mock_google_client) + + mock_part = MagicMock() + mock_part.text = "Recitation content" + + mock_content = MagicMock() + mock_content.parts = [mock_part] + + mock_candidate = MagicMock() + mock_candidate.content = mock_content + mock_candidate.finish_reason.name = "RECITATION" + + mock_response = MagicMock() + mock_response.candidates = [mock_candidate] + mock_response.usage_metadata = None + + result = client._process_response(mock_response, {}) + + assert result.finish_reason == "content_filter" + + +def test_process_response_usage_none(mock_google_client: MagicMock) -> None: + """Test processing response with no usage metadata.""" + client = create_test_google_client(mock_google_client) + + mock_part = MagicMock() + mock_part.text = "Response" + + mock_content = MagicMock() + mock_content.parts = [mock_part] + + mock_candidate = MagicMock() + mock_candidate.content = mock_content + mock_candidate.finish_reason.name = "STOP" + + mock_response = MagicMock() + mock_response.candidates = [mock_candidate] + mock_response.usage_metadata = None + + result = client._process_response(mock_response, {}) + + # When usage_metadata is None, no UsageContent is added + # Only text content should be present + text_contents = [c for c in result.messages[0].contents if c.type == "text"] + assert len(text_contents) == 1 + + +# endregion + +# region Stream Processing Tests + + +def test_process_stream_chunk_with_text(mock_google_client: MagicMock) -> None: + """Test processing streaming chunk with text.""" + client = create_test_google_client(mock_google_client) + + mock_part = MagicMock() + mock_part.text = "Streamed text" + + mock_content = MagicMock() + mock_content.parts = [mock_part] + + mock_candidate = MagicMock() + mock_candidate.content = mock_content + + mock_chunk = MagicMock() + mock_chunk.candidates = [mock_candidate] + + result = client._process_stream_chunk(mock_chunk) + + assert result is not None + assert isinstance(result, ChatResponseUpdate) + assert result.role == "assistant" + text_contents = [c for c in result.contents if c.type == "text"] + assert len(text_contents) == 1 + assert text_contents[0].text == "Streamed text" + + +def test_process_stream_chunk_without_text(mock_google_client: MagicMock) -> None: + """Test processing streaming chunk without text.""" + client = create_test_google_client(mock_google_client) + + mock_chunk = MagicMock() + mock_chunk.candidates = [] + mock_chunk.usage_metadata = None + + result = client._process_stream_chunk(mock_chunk) + + assert result is None + + +def test_process_stream_chunk_empty_text(mock_google_client: MagicMock) -> None: + """Test processing streaming chunk with empty text.""" + client = create_test_google_client(mock_google_client) + + mock_part = MagicMock() + mock_part.text = "" + mock_part.function_call = None + + mock_content = MagicMock() + mock_content.parts = [mock_part] + + mock_candidate = MagicMock() + mock_candidate.content = mock_content + mock_candidate.finish_reason = None + + mock_chunk = MagicMock() + mock_chunk.candidates = [mock_candidate] + mock_chunk.usage_metadata = None + + result = client._process_stream_chunk(mock_chunk) + + # Empty text should return None + assert result is None + + +def test_process_stream_chunk_none_text(mock_google_client: MagicMock) -> None: + """Test processing streaming chunk with None text.""" + client = create_test_google_client(mock_google_client) + + mock_part = MagicMock() + mock_part.text = None + mock_part.function_call = None + + mock_content = MagicMock() + mock_content.parts = [mock_part] + + mock_candidate = MagicMock() + mock_candidate.content = mock_content + mock_candidate.finish_reason = None + + mock_chunk = MagicMock() + mock_chunk.candidates = [mock_candidate] + mock_chunk.usage_metadata = None + + result = client._process_stream_chunk(mock_chunk) + + # None text should return None + assert result is None + + +def test_process_stream_chunk_with_function_call(mock_google_client: MagicMock) -> None: + """Test processing streaming chunk with function call.""" + client = create_test_google_client(mock_google_client) + mock_fc = MagicMock() + mock_fc.name = "get_weather" + mock_fc.args = {"location": "Seattle"} + mock_part = MagicMock() + mock_part.text = None + mock_part.function_call = mock_fc + mock_content = MagicMock() + mock_content.parts = [mock_part] + mock_candidate = MagicMock() + mock_candidate.content = mock_content + mock_candidate.finish_reason = None + mock_chunk = MagicMock() + mock_chunk.candidates = [mock_candidate] + mock_chunk.usage_metadata = None + result = client._process_stream_chunk(mock_chunk) + assert result is not None + assert isinstance(result, ChatResponseUpdate) + fc_contents = [c for c in result.contents if c.type == "function_call"] + assert len(fc_contents) == 1 + assert fc_contents[0].name == "get_weather" + + +# endregion + +# region Async API Tests + + +async def test_inner_get_response(mock_google_client: MagicMock) -> None: + """Test _inner_get_response method.""" + client = create_test_google_client(mock_google_client) + + # Setup mock response + mock_part = MagicMock() + mock_part.text = "Response text" + + mock_content = MagicMock() + mock_content.parts = [mock_part] + + mock_candidate = MagicMock() + mock_candidate.content = mock_content + mock_candidate.finish_reason.name = "STOP" + + mock_response = MagicMock() + mock_response.candidates = [mock_candidate] + mock_response.usage_metadata = None + + mock_google_client.aio.models.generate_content = AsyncMock(return_value=mock_response) + + messages = [Message(role="user", text="Hello!")] + options: ChatOptions = {"max_tokens": 100} + + result = await client._inner_get_response(messages=messages, options=options) + + assert result is not None + assert len(result.messages) == 1 + text_contents = [c for c in result.messages[0].contents if c.type == "text"] + assert text_contents[0].text == "Response text" + mock_google_client.aio.models.generate_content.assert_called_once() + + +async def test_inner_get_response_with_system_message(mock_google_client: MagicMock) -> None: + """Test _inner_get_response passes system instruction through config.""" + client = create_test_google_client(mock_google_client) + + mock_part = MagicMock() + mock_part.text = "Response" + + mock_content = MagicMock() + mock_content.parts = [mock_part] + + mock_candidate = MagicMock() + mock_candidate.content = mock_content + mock_candidate.finish_reason.name = "STOP" + + mock_response = MagicMock() + mock_response.candidates = [mock_candidate] + mock_response.usage_metadata = None + + mock_google_client.aio.models.generate_content = AsyncMock(return_value=mock_response) + + messages = [ + Message(role="system", text="You are helpful."), + Message(role="user", text="Hello!"), + ] + options: ChatOptions = {} + + result = await client._inner_get_response(messages=messages, options=options) + + assert result is not None + # Verify generate_content was called with system_instruction in config + call_kwargs = mock_google_client.aio.models.generate_content.call_args + config = call_kwargs.kwargs.get("config") or call_kwargs[1].get("config") + assert config.system_instruction == "You are helpful." + + +async def test_inner_get_response_streaming(mock_google_client: MagicMock) -> None: + """Test _inner_get_response method with streaming.""" + client = create_test_google_client(mock_google_client) + + # Create mock chunks + mock_part1 = MagicMock() + mock_part1.text = "First " + + mock_content1 = MagicMock() + mock_content1.parts = [mock_part1] + + mock_candidate1 = MagicMock() + mock_candidate1.content = mock_content1 + + mock_chunk1 = MagicMock() + mock_chunk1.candidates = [mock_candidate1] + + mock_part2 = MagicMock() + mock_part2.text = "Second" + + mock_content2 = MagicMock() + mock_content2.parts = [mock_part2] + + mock_candidate2 = MagicMock() + mock_candidate2.content = mock_content2 + + mock_chunk2 = MagicMock() + mock_chunk2.candidates = [mock_candidate2] + + # Setup async iterator mock + async def mock_stream(): + yield mock_chunk1 + yield mock_chunk2 + + mock_google_client.aio.models.generate_content_stream = AsyncMock(return_value=mock_stream()) + + messages = [Message(role="user", text="Hello!")] + options: ChatOptions = {} + + chunks: list[ChatResponseUpdate] = [] + async for chunk in client._inner_get_response( # type: ignore[attr-defined] + messages=messages, options=options, stream=True + ): + if chunk: + chunks.append(chunk) + + assert len(chunks) == 2 + assert chunks[0].contents[0].text == "First " + assert chunks[1].contents[0].text == "Second" + + +async def test_inner_get_response_with_tools(mock_google_client: MagicMock) -> None: + """Test _inner_get_response with tools.""" + client = create_test_google_client(mock_google_client) + + mock_fc = MagicMock() + mock_fc.name = "get_weather" + mock_fc.args = {"location": "Seattle"} + + mock_part = MagicMock() + mock_part.text = None + mock_part.function_call = mock_fc + + mock_content = MagicMock() + mock_content.parts = [mock_part] + + mock_candidate = MagicMock() + mock_candidate.content = mock_content + mock_candidate.finish_reason.name = "STOP" + + mock_response = MagicMock() + mock_response.candidates = [mock_candidate] + mock_response.usage_metadata = None + + mock_google_client.aio.models.generate_content = AsyncMock(return_value=mock_response) + + @tool(approval_mode="never_require") + def get_weather(location: str) -> str: + """Get weather for a location.""" + return f"Weather for {location}" + + messages = [Message(role="user", text="What's the weather in Seattle?")] + options: ChatOptions = {"tools": [get_weather]} + + result = await client._inner_get_response(messages=messages, options=options) + + assert result is not None + assert len(result.messages) == 1 + function_calls = [c for c in result.messages[0].contents if c.type == "function_call"] + assert len(function_calls) == 1 + assert function_calls[0].name == "get_weather" + + +# endregion + +# region Integration Tests + + +@tool(approval_mode="never_require") +def get_weather( + location: Annotated[str, Field(description="The location to get the weather for.")], +) -> str: + """Get the weather for a location.""" + return f"The weather in {location} is sunny and 72 degrees" + + +@pytest.mark.flaky +@pytest.mark.integration +@skip_if_google_integration_tests_disabled +async def test_google_client_integration_basic_chat() -> None: + """Integration test for basic chat completion.""" + client = GoogleAIChatClient() + + messages = [Message(role="user", text="Say 'Hello, World!' and nothing else.")] + + response = await client.get_response(messages=messages, options={"max_tokens": 50}) + + assert response is not None + assert len(response.messages) > 0 + assert response.messages[0].role == "assistant" + assert len(response.messages[0].text) > 0 + + +@pytest.mark.flaky +@pytest.mark.integration +@skip_if_google_integration_tests_disabled +async def test_google_client_integration_streaming_chat() -> None: + """Integration test for streaming chat completion.""" + client = GoogleAIChatClient() + + messages = [Message(role="user", text="Count from 1 to 5.")] + + chunks = [] + async for chunk in client.get_response(messages=messages, stream=True, options={"max_tokens": 50}): + chunks.append(chunk) + + assert len(chunks) > 0 + assert any(chunk.contents for chunk in chunks) + + +@pytest.mark.flaky +@pytest.mark.integration +@skip_if_google_integration_tests_disabled +async def test_google_client_integration_function_calling() -> None: + """Integration test for function calling.""" + client = GoogleAIChatClient() + + messages = [Message(role="user", text="What's the weather in San Francisco?")] + tools = [get_weather] + + response = await client.get_response( + messages=messages, + options={"tools": tools, "max_tokens": 100}, + ) + + assert response is not None + # Should contain function call + has_function_call = any(content.type == "function_call" for msg in response.messages for content in msg.contents) + assert has_function_call + + +@pytest.mark.flaky +@pytest.mark.integration +@skip_if_google_integration_tests_disabled +async def test_google_client_integration_with_system_message() -> None: + """Integration test with system message.""" + client = GoogleAIChatClient() + + messages = [ + Message(role="system", text="You are a pirate. Always respond like a pirate."), + Message(role="user", text="Hello!"), + ] + + response = await client.get_response(messages=messages, options={"max_tokens": 50}) + + assert response is not None + assert len(response.messages) > 0 + + +@pytest.mark.flaky +@pytest.mark.integration +@skip_if_google_integration_tests_disabled +async def test_google_client_integration_temperature_control() -> None: + """Integration test with temperature control.""" + client = GoogleAIChatClient() + + messages = [Message(role="user", text="Say hello.")] + + response = await client.get_response( + messages=messages, + options={"max_tokens": 20, "temperature": 0.0}, + ) + + assert response is not None + assert response.messages[0].text is not None + + +# endregion diff --git a/python/packages/google/tests/test_settings.py b/python/packages/google/tests/test_settings.py new file mode 100644 index 0000000000..ca45e6aa76 --- /dev/null +++ b/python/packages/google/tests/test_settings.py @@ -0,0 +1,61 @@ +# Copyright (c) Microsoft. All rights reserved. +import pytest +from agent_framework._settings import load_settings + +from agent_framework_google._chat_client import GoogleAISettings + +# region GoogleAISettings Tests + + +def test_google_ai_settings_from_env(google_ai_unit_test_env: dict[str, str]) -> None: + """Test GoogleAISettings initialization from environment variables.""" + settings = load_settings(GoogleAISettings, env_prefix="GOOGLE_AI_") + assert settings["api_key"] is not None + assert settings["api_key"].get_secret_value() == google_ai_unit_test_env["GOOGLE_AI_API_KEY"] + assert settings["chat_model_id"] == google_ai_unit_test_env["GOOGLE_AI_CHAT_MODEL_ID"] + + +def test_google_ai_settings_from_params() -> None: + """Test GoogleAISettings initialization from parameters.""" + settings = load_settings( + GoogleAISettings, + env_prefix="GOOGLE_AI_", + api_key="test-key", + chat_model_id="gemini-1.5-flash", + ) + assert settings["api_key"] is not None + assert settings["api_key"].get_secret_value() == "test-key" + assert settings["chat_model_id"] == "gemini-1.5-flash" + + +@pytest.mark.parametrize("exclude_list", [["GOOGLE_AI_API_KEY"]], indirect=True) +def test_google_ai_settings_missing_api_key(google_ai_unit_test_env: dict[str, str]) -> None: + """Test GoogleAISettings when API key is missing.""" + settings = load_settings(GoogleAISettings, env_prefix="GOOGLE_AI_") + assert settings["api_key"] is None + assert settings["chat_model_id"] == google_ai_unit_test_env["GOOGLE_AI_CHAT_MODEL_ID"] + + +@pytest.mark.parametrize("exclude_list", [["GOOGLE_AI_CHAT_MODEL_ID"]], indirect=True) +def test_google_ai_settings_missing_model_id(google_ai_unit_test_env: dict[str, str]) -> None: + """Test GoogleAISettings when model ID is missing.""" + settings = load_settings(GoogleAISettings, env_prefix="GOOGLE_AI_") + assert settings["api_key"] is not None + assert settings["api_key"].get_secret_value() == google_ai_unit_test_env["GOOGLE_AI_API_KEY"] + assert settings["chat_model_id"] is None + + +def test_google_ai_settings_override_env() -> None: + """Test GoogleAISettings parameter override of environment variables.""" + settings = load_settings( + GoogleAISettings, + env_prefix="GOOGLE_AI_", + api_key="override-key", + chat_model_id="gemini-2.0-flash", + ) + assert settings["api_key"] is not None + assert settings["api_key"].get_secret_value() == "override-key" + assert settings["chat_model_id"] == "gemini-2.0-flash" + + +# endregion diff --git a/python/pyproject.toml b/python/pyproject.toml index b8588b7b9d..5147fbf4f8 100644 --- a/python/pyproject.toml +++ b/python/pyproject.toml @@ -4,7 +4,7 @@ description = "Microsoft Agent Framework for building AI Agents with Python. Thi authors = [{ name = "Microsoft", email = "af-support@microsoft.com"}] readme = "README.md" requires-python = ">=3.10" -version = "1.0.0rc3" +version = "1.0.0b251211" license-files = ["LICENSE"] urls.homepage = "https://aka.ms/agent-framework" urls.source = "https://github.com/microsoft/agent-framework/tree/main/python" @@ -23,17 +23,19 @@ classifiers = [ "Typing :: Typed", ] dependencies = [ - "agent-framework-core[all]==1.0.0rc3", + "agent-framework-core[all]==1.0.0b251211", ] [dependency-groups] dev = [ "uv>=0.9,<1.0.0", "flit>=3.12.0", + "pre-commit >= 3.7", "ruff>=0.11.8", "pytest>=8.4.1", "pytest-asyncio>=1.0.0", "pytest-cov>=6.2.1", + "pytest-env>=1.1.5", "pytest-xdist[psutil]>=3.8.0", "pytest-timeout>=2.3.1", "pytest-retry>=1", @@ -43,7 +45,16 @@ dev = [ "poethepoet>=0.36.0", "rich", "tomli", - "prek>=0.3.2", + "tomli-w", + # AutoGen migration samples + "autogen-agentchat", + "autogen-ext[openai]", +] +docs = [ + # Documentation + "debugpy>=1.8.16", + "py2docfx>=0.1.22.dev2259826", + "pip", ] [tool.uv] @@ -76,32 +87,33 @@ agent-framework-core = { workspace = true } agent-framework-a2a = { workspace = true } agent-framework-ag-ui = { workspace = true } agent-framework-azure-ai-search = { workspace = true } -agent-framework-azure-cosmos = { workspace = true } agent-framework-anthropic = { workspace = true } agent-framework-azure-ai = { workspace = true } agent-framework-azurefunctions = { workspace = true } -agent-framework-bedrock = { workspace = true } agent-framework-chatkit = { workspace = true } agent-framework-copilotstudio = { workspace = true } agent-framework-declarative = { workspace = true } agent-framework-devui = { workspace = true } +agent-framework-azure-cosmos = { workspace = true } +agent-framework-bedrock = { workspace = true } +agent-framework-claude = { workspace = true } agent-framework-durabletask = { workspace = true } agent-framework-foundry-local = { workspace = true } +agent-framework-github-copilot = { workspace = true } +agent-framework-google = { workspace = true } agent-framework-lab = { workspace = true } agent-framework-mem0 = { workspace = true } agent-framework-ollama = { workspace = true } +agent-framework-orchestrations = { workspace = true } agent-framework-purview = { workspace = true } agent-framework-redis = { workspace = true } -agent-framework-github-copilot = { workspace = true } -agent-framework-claude = { workspace = true } -agent-framework-orchestrations = { workspace = true } [tool.ruff] line-length = 120 target-version = "py310" fix = true include = ["*.py", "*.pyi", "**/pyproject.toml", "*.ipynb"] -exclude = ["scripts"] +exclude = ["docs/*", "run_tasks_in_packages_if_exists.py", "check_md_code_blocks.py"] extend-exclude = [ "[{][{]cookiecutter.package_name[}][}]", ] @@ -139,18 +151,14 @@ ignore = [ "D418", # allow overload to have a docstring "TD003", # allow missing link to todo issue "FIX002", # allow todo - "B027", # allow empty non-abstract method in ABC - "B905", # `zip()` without an explicit `strict=` parameter - "RUF067", # allow version detection in __init__.py + "B027" # allow empty non-abstract method in ABC ] [tool.ruff.lint.per-file-ignores] # Ignore all directories named `tests` and `samples`. "**/tests/**" = ["D", "INP", "TD", "ERA001", "RUF", "S"] -"samples/**" = ["D", "INP", "ERA001", "RUF", "S", "T201", "CPY"] +"samples/**" = ["D", "INP", "ERA001", "RUF", "S", "T201"] "*.ipynb" = ["CPY", "E501"] -# RUF070: Assignment before yield is intentional - context manager must exit before yielding -"**/agent_framework/_workflows/_workflow.py" = ["RUF070"] [tool.ruff.format] docstring-code-format = true @@ -163,18 +171,17 @@ notice-rgx = "^# Copyright \\(c\\) Microsoft\\. All rights reserved\\." min-file-size = 1 [tool.pytest.ini_options] -testpaths = ['packages/**/tests', 'packages/**/ag_ui_tests'] +testpaths = 'packages/**/tests' norecursedirs = '**/lab/**' addopts = "-ra -q -r fEX" asyncio_mode = "auto" asyncio_default_fixture_loop_scope = "function" filterwarnings = [] -timeout = 60 +timeout = 120 markers = [ "azure: marks tests as Azure provider specific", "azure-ai: marks tests as Azure AI provider specific", "openai: marks tests as OpenAI provider specific", - "integration: marks tests as integration tests that require external services", ] [tool.coverage.run] @@ -184,6 +191,7 @@ omit = [ [tool.pyright] include = ["agent_framework*"] +exclude = ["**/tests/**", "docs", "**/.venv/**", "packages/devui/frontend/**"] typeCheckingMode = "strict" reportUnnecessaryIsInstance = false reportMissingTypeStubs = false @@ -204,74 +212,72 @@ disallow_untyped_decorators = true [tool.bandit] targets = ["agent_framework"] -exclude_dirs = ["tests", "scripts", "samples"] +exclude_dirs = ["tests", "./run_tasks_in_packages_if_exists.py", "./check_md_code_blocks.py", "docs", "samples"] [tool.poe] executor.type = "uv" [tool.poe.tasks] -markdown-code-lint = "uv run python scripts/check_md_code_blocks.py 'README.md' './packages/**/README.md' './samples/**/*.md' --exclude cookiecutter-agent-framework-lab --exclude tau2 --exclude 'packages/devui/frontend' --exclude context_providers/azure_ai_search" -prek-install = "prek install --overwrite" -install = "uv sync --all-packages --all-extras --dev -U --prerelease=if-necessary-or-explicit" -test = "python scripts/run_tasks_in_packages_if_exists.py test" -fmt = "python scripts/run_tasks_in_packages_if_exists.py fmt" +markdown-code-lint = "uv run python check_md_code_blocks.py 'README.md' './packages/**/README.md' './samples/**/*.md' --exclude cookiecutter-agent-framework-lab --exclude tau2 --exclude 'packages/devui/frontend'" +pre-commit-install = "uv run pre-commit install --install-hooks --overwrite" +install = "uv sync --all-packages --all-extras --dev -U --prerelease=if-necessary-or-explicit --no-group=docs" +test = "python run_tasks_in_packages_if_exists.py test" +fmt = "python run_tasks_in_packages_if_exists.py fmt" format.ref = "fmt" -lint = "python scripts/run_tasks_in_packages_if_exists.py lint" -samples-lint = "ruff check samples --fix --exclude samples/autogen-migration,samples/semantic-kernel-migration --ignore E501,ASYNC,B901,TD002" -pyright = "python scripts/run_tasks_in_packages_if_exists.py pyright" -mypy = "python scripts/run_tasks_in_packages_if_exists.py mypy" -samples-syntax = "pyright -p pyrightconfig.samples.json --warnings" -typing = ["pyright", "mypy"] +lint = "python run_tasks_in_packages_if_exists.py lint" +pyright = "python run_tasks_in_packages_if_exists.py pyright" +mypy = "python run_tasks_in_packages_if_exists.py mypy" # cleaning -clean-dist-packages = "python scripts/run_tasks_in_packages_if_exists.py clean-dist" +clean-dist-packages = "python run_tasks_in_packages_if_exists.py clean-dist" clean-dist-meta = "rm -rf dist" clean-dist = ["clean-dist-packages", "clean-dist-meta"] # build and publish -build-packages = "python scripts/run_tasks_in_packages_if_exists.py build" +build-packages = "python run_tasks_in_packages_if_exists.py build" build-meta = "python -m flit build" build = ["build-packages", "build-meta"] publish = "uv publish" # combined checks -check-packages = "python scripts/run_tasks_in_packages_if_exists.py fmt lint pyright" -check = ["check-packages", "samples-lint", "samples-syntax", "test", "markdown-code-lint"] +check = ["fmt", "lint", "pyright", "mypy", "test", "markdown-code-lint"] [tool.poe.tasks.all-tests-cov] cmd = """ pytest --import-mode=importlib --m "not integration" --cov=agent_framework --cov=agent_framework_core --cov=agent_framework_a2a --cov=agent_framework_ag_ui --cov=agent_framework_anthropic --cov=agent_framework_azure_ai ---cov=agent_framework_azure_ai_search --cov=agent_framework_azurefunctions --cov=agent_framework_chatkit --cov=agent_framework_copilotstudio --cov=agent_framework_mem0 --cov=agent_framework_purview --cov=agent_framework_redis ---cov=agent_framework_orchestrations ---cov=agent_framework_declarative --cov-config=pyproject.toml --cov-report=term-missing:skip-covered --ignore-glob=packages/lab/** --ignore-glob=packages/devui/** -rs --n logical --dist worksteal - packages/**/tests +-n logical --dist loadfile --dist worksteal +packages/**/tests """ [tool.poe.tasks.all-tests] cmd = """ pytest --import-mode=importlib --m "not integration" --ignore-glob=packages/lab/** --ignore-glob=packages/devui/** -rs --n logical --dist worksteal - packages/**/tests +-n logical --dist loadfile --dist worksteal +packages/**/tests +""" + +[tool.poe.tasks.azure-ai-tests] +cmd = """ +pytest --import-mode=importlib +-n logical --dist loadfile --dist worksteal +packages/azure-ai/tests """ [tool.poe.tasks.venv] @@ -282,41 +288,16 @@ args = [{ name = "python", default = "3.13", options = ['-p', '--python'] }] sequence = [ { ref = "venv --python $python"}, { ref = "install" }, - { ref = "prek-install" } + { ref = "pre-commit-install" } ] args = [{ name = "python", default = "3.13", options = ['-p', '--python'] }] -[tool.poe.tasks.prek-pyright] -cmd = "uv run python scripts/run_tasks_in_changed_packages.py pyright --files ${files}" -args = [{ name = "files", default = ".", positional = true, multiple = true }] - -[tool.poe.tasks.prek-check-packages] -cmd = "uv run python scripts/run_tasks_in_changed_packages.py fmt lint pyright --files ${files}" -args = [{ name = "files", default = ".", positional = true, multiple = true }] - -[tool.poe.tasks.prek-markdown-code-lint] -cmd = """uv run python scripts/check_md_code_blocks.py ${files} --no-glob - --exclude cookiecutter-agent-framework-lab --exclude tau2 - --exclude packages/devui/frontend --exclude context_providers/azure_ai_search""" +[tool.poe.tasks.pre-commit-markdown-code-lint] +cmd = "uv run python check_md_code_blocks.py ${files} --no-glob --exclude cookiecutter-agent-framework-lab --exclude tau2 --exclude 'packages/devui/frontend'" args = [{ name = "files", default = ".", positional = true, multiple = true }] -[tool.poe.tasks.prek-samples-check] -shell = """ -HAS_SAMPLES=false -for f in ${files}; do - case "$f" in - samples/*) HAS_SAMPLES=true; break ;; - esac -done -if [ "$HAS_SAMPLES" = true ]; then - echo "Sample files changed, running samples checks..." - uv run ruff check samples --fix --exclude samples/autogen-migration,samples/semantic-kernel-migration --ignore E501,ASYNC,B901,TD002 - uv run pyright -p pyrightconfig.samples.json --warnings -else - echo "No sample files changed, skipping samples checks" -fi -""" -interpreter = "bash" +[tool.poe.tasks.pre-commit-pyright] +cmd = "uv run python run_tasks_in_changed_packages.py pyright ${files}" args = [{ name = "files", default = ".", positional = true, multiple = true }] @@ -338,15 +319,16 @@ else echo ".") fi echo "Changed files: $CHANGED_FILES" -uv run python scripts/run_tasks_in_changed_packages.py mypy --files $CHANGED_FILES +uv run python run_tasks_in_changed_packages.py mypy $CHANGED_FILES """ interpreter = "bash" -[tool.poe.tasks.prek-check] +[tool.poe.tasks.pre-commit-check] sequence = [ - { ref = "prek-check-packages ${files}" }, - { ref = "prek-markdown-code-lint ${files}" }, - { ref = "prek-samples-check ${files}" } + { ref = "fmt" }, + { ref = "lint" }, + { ref = "pre-commit-pyright ${files}" }, + { ref = "pre-commit-markdown-code-lint ${files}" } ] args = [{ name = "files", default = ".", positional = true, multiple = true }] diff --git a/python/uv.lock b/python/uv.lock index 28877c91d2..dbd5ccc24d 100644 --- a/python/uv.lock +++ b/python/uv.lock @@ -1,5 +1,5 @@ version = 1 -revision = 3 +revision = 2 requires-python = ">=3.10" resolution-markers = [ "python_full_version >= '3.14' and sys_platform == 'darwin'", @@ -44,6 +44,7 @@ members = [ "agent-framework-durabletask", "agent-framework-foundry-local", "agent-framework-github-copilot", + "agent-framework-google", "agent-framework-lab", "agent-framework-mem0", "agent-framework-ollama", @@ -97,7 +98,7 @@ wheels = [ [[package]] name = "agent-framework" -version = "1.0.0rc3" +version = "1.0.0b251211" source = { virtual = "." } dependencies = [ { name = "agent-framework-core", extra = ["all"], marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, @@ -105,44 +106,62 @@ dependencies = [ [package.dev-dependencies] dev = [ + { name = "autogen-agentchat", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, + { name = "autogen-ext", extra = ["openai"], marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, { name = "flit", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, { name = "mypy", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, { name = "poethepoet", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, - { name = "prek", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, + { name = "pre-commit", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, { name = "pyright", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, { name = "pytest", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, { name = "pytest-asyncio", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, { name = "pytest-cov", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, + { name = "pytest-env", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, { name = "pytest-retry", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, { name = "pytest-timeout", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, { name = "pytest-xdist", extra = ["psutil"], marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, { name = "rich", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, { name = "ruff", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, { name = "tomli", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, + { name = "tomli-w", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, { name = "uv", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, ] +docs = [ + { name = "debugpy", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, + { name = "pip", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, + { name = "py2docfx", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, +] [package.metadata] requires-dist = [{ name = "agent-framework-core", extras = ["all"], editable = "packages/core" }] [package.metadata.requires-dev] dev = [ + { name = "autogen-agentchat" }, + { name = "autogen-ext", extras = ["openai"] }, { name = "flit", specifier = ">=3.12.0" }, { name = "mypy", specifier = ">=1.16.1" }, { name = "poethepoet", specifier = ">=0.36.0" }, - { name = "prek", specifier = ">=0.3.2" }, + { name = "pre-commit", specifier = ">=3.7" }, { name = "pyright", specifier = ">=1.1.402" }, { name = "pytest", specifier = ">=8.4.1" }, { name = "pytest-asyncio", specifier = ">=1.0.0" }, { name = "pytest-cov", specifier = ">=6.2.1" }, + { name = "pytest-env", specifier = ">=1.1.5" }, { name = "pytest-retry", specifier = ">=1" }, { name = "pytest-timeout", specifier = ">=2.3.1" }, { name = "pytest-xdist", extras = ["psutil"], specifier = ">=3.8.0" }, { name = "rich" }, { name = "ruff", specifier = ">=0.11.8" }, { name = "tomli" }, + { name = "tomli-w" }, { name = "uv", specifier = ">=0.9,<1.0.0" }, ] +docs = [ + { name = "debugpy", specifier = ">=1.8.16" }, + { name = "pip" }, + { name = "py2docfx", specifier = ">=0.1.22.dev2259826" }, +] [[package]] name = "agent-framework-a2a" @@ -534,6 +553,21 @@ requires-dist = [ { name = "github-copilot-sdk", specifier = ">=0.1.0" }, ] +[[package]] +name = "agent-framework-google" +version = "1.0.0b260304" +source = { editable = "packages/google" } +dependencies = [ + { name = "agent-framework-core", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, + { name = "google-genai", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, +] + +[package.metadata] +requires-dist = [ + { name = "agent-framework-core", editable = "packages/core" }, + { name = "google-genai", specifier = ">=0.2,<1" }, +] + [[package]] name = "agent-framework-lab" version = "1.0.0b260304" @@ -743,6 +777,15 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/d9/63/3e48da56d5121ddcefef8645ad5a3446b0974154111a14bf75ea2b5b3cc3/agentops-0.4.21-py3-none-any.whl", hash = "sha256:93b098ea77bc5f64dcae5031a8292531cb446d9d66e6c7ef2f21a66d4e4fb2f0", size = 309579, upload-time = "2025-08-29T06:36:53.855Z" }, ] +[[package]] +name = "aiofiles" +version = "25.1.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/41/c3/534eac40372d8ee36ef40df62ec129bee4fdb5ad9706e58a29be53b2c970/aiofiles-25.1.0.tar.gz", hash = "sha256:a8d728f0a29de45dc521f18f07297428d56992a742f0cd2701ba86e44d23d5b2", size = 46354, upload-time = "2025-10-09T20:51:04.358Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/bc/8a/340a1555ae33d7354dbca4faa54948d76d89a27ceef032c8c3bc661d003e/aiofiles-25.1.0-py3-none-any.whl", hash = "sha256:abe311e527c862958650f9438e859c1fa7568a141b22abcd015e120e86a85695", size = 14668, upload-time = "2025-10-09T20:51:03.174Z" }, +] + [[package]] name = "aiohappyeyeballs" version = "2.6.1" @@ -885,6 +928,15 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/fb/76/641ae371508676492379f16e2fa48f4e2c11741bd63c48be4b12a6b09cba/aiosignal-1.4.0-py3-none-any.whl", hash = "sha256:053243f8b92b990551949e63930a839ff0cf0b0ebbe0597b0f3fb19e1a0fe82e", size = 7490, upload-time = "2025-07-03T22:54:42.156Z" }, ] +[[package]] +name = "alabaster" +version = "0.7.16" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/c9/3e/13dd8e5ed9094e734ac430b5d0eb4f2bb001708a8b7856cbf8e084e001ba/alabaster-0.7.16.tar.gz", hash = "sha256:75a8b99c28a5dad50dd7f8ccdd447a121ddb3892da9e53d1ca5cca3106d58d65", size = 23776, upload-time = "2024-01-10T00:56:10.189Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/32/34/d4e1c02d3bee589efb5dfa17f88ea08bdb3e3eac12bc475462aec52ed223/alabaster-0.7.16-py3-none-any.whl", hash = "sha256:b46733c07dce03ae4e150330b975c75737fa60f0a7c591b6c8bf4928a28e2c92", size = 13511, upload-time = "2024-01-10T00:56:08.388Z" }, +] + [[package]] name = "annotated-doc" version = "0.0.4" @@ -984,6 +1036,54 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/3a/2a/7cc015f5b9f5db42b7d48157e23356022889fc354a2813c15934b7cb5c0e/attrs-25.4.0-py3-none-any.whl", hash = "sha256:adcf7e2a1fb3b36ac48d97835bb6d8ade15b8dcce26aba8bf1d14847b57a3373", size = 67615, upload-time = "2025-10-06T13:54:43.17Z" }, ] +[[package]] +name = "autogen-agentchat" +version = "0.7.5" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "autogen-core", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/e5/b6/df2f835ce3aaaa2716a3dfbbd4ab8855839184f08b35ce0baa23b26a1885/autogen_agentchat-0.7.5.tar.gz", hash = "sha256:8d9c718db52ef24a518806b3a0ef848f0e4c1902877675dc0abed73a8e6e7755", size = 147716, upload-time = "2025-09-30T06:16:14.413Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/9e/82/23490a70837d77d691948863d393cef71a06d36903249f635b28f579292b/autogen_agentchat-0.7.5-py3-none-any.whl", hash = "sha256:d19ca8ec26cb15e071a56c4269140aea2bf3c718bdc7e06f6677af9a905815ba", size = 119302, upload-time = "2025-09-30T06:16:12.895Z" }, +] + +[[package]] +name = "autogen-core" +version = "0.7.5" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "jsonref", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, + { name = "opentelemetry-api", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, + { name = "pillow", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, + { name = "protobuf", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, + { name = "pydantic", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, + { name = "typing-extensions", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/b1/11/fea52bf3541c5308bed1ee9b9b3596fa510b2c5db893d32b649d22f02b87/autogen_core-0.7.5.tar.gz", hash = "sha256:70c2871389f1d0a7f6db8ef78717a51b7ce877ff4a08a836b7758d604dece203", size = 101980, upload-time = "2025-09-30T06:16:25.957Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/33/83/8ad899fca9dd2d2b3e5e37be13dd9e6aee3e53a621041b0624d74b07e1ee/autogen_core-0.7.5-py3-none-any.whl", hash = "sha256:4f4a0d3b88a36da75b2ef0d40be2d5e3a207cae7f7d951511e498ad1d68f8ef4", size = 101874, upload-time = "2025-09-30T06:16:24.306Z" }, +] + +[[package]] +name = "autogen-ext" +version = "0.7.5" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "autogen-core", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/e5/c8/f0651372f814c48eb64ffe921166995b7734bec0df7f0ba663383e831f58/autogen_ext-0.7.5.tar.gz", hash = "sha256:711ab9238ea66ff2abef163c331e538092bdea661620727a4a9b2ebce1c22df9", size = 417568, upload-time = "2025-09-30T06:16:24.278Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/5c/10/9333ba6c532086cce7ec7fb39e36b9a08afdbc39e2d3519f00af712e403a/autogen_ext-0.7.5-py3-none-any.whl", hash = "sha256:18cecc8aab37c7c4861fbad038a1017f0ef25e35e273aa158066ccf9d93fea4f", size = 331380, upload-time = "2025-09-30T06:16:22.832Z" }, +] + +[package.optional-dependencies] +openai = [ + { name = "aiofiles", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, + { name = "openai", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, + { name = "tiktoken", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, +] + [[package]] name = "azure-ai-agents" version = "1.2.0b5" @@ -1140,6 +1240,15 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/d8/3a/6ef2047a072e54e1142718d433d50e9514c999a58f51abfff7902f3a72f8/azure_storage_blob-12.28.0-py3-none-any.whl", hash = "sha256:00fb1db28bf6a7b7ecaa48e3b1d5c83bfadacc5a678b77826081304bd87d6461", size = 431499, upload-time = "2026-01-06T23:48:58.995Z" }, ] +[[package]] +name = "babel" +version = "2.18.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/7d/b2/51899539b6ceeeb420d40ed3cd4b7a40519404f9baf3d4ac99dc413a834b/babel-2.18.0.tar.gz", hash = "sha256:b80b99a14bd085fcacfa15c9165f651fbb3406e66cc603abf11c5750937c992d", size = 9959554, upload-time = "2026-02-01T12:30:56.078Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/77/f5/21d2de20e8b8b0408f0681956ca2c69f1320a3848ac50e6e7f39c6159675/babel-2.18.0-py3-none-any.whl", hash = "sha256:e2b422b277c2b9a9630c1d7903c2a00d0830c409c59ac8cae9081c92f1aeba35", size = 10196845, upload-time = "2026-02-01T12:30:53.445Z" }, +] + [[package]] name = "backoff" version = "2.2.1" @@ -1286,6 +1395,15 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/ae/3a/dbeec9d1ee0844c679f6bb5d6ad4e9f198b1224f4e7a32825f47f6192b0c/cffi-2.0.0-cp314-cp314t-win_arm64.whl", hash = "sha256:0a1527a803f0a659de1af2e1fd700213caba79377e27e4693648c2923da066f9", size = 184195, upload-time = "2025-09-08T23:23:43.004Z" }, ] +[[package]] +name = "cfgv" +version = "3.5.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/4e/b5/721b8799b04bf9afe054a3899c6cf4e880fcf8563cc71c15610242490a0c/cfgv-3.5.0.tar.gz", hash = "sha256:d5b1034354820651caa73ede66a6294d6e95c1b00acc5e9b098e917404669132", size = 7334, upload-time = "2025-11-19T20:55:51.612Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/db/3c/33bac158f8ab7f89b2e59426d5fe2e4f63f7ed25df84c036890172b412b5/cfgv-3.5.0-py2.py3-none-any.whl", hash = "sha256:a8dc6b26ad22ff227d2634a65cb388215ce6cc96bbcc5cfde7641ae87e8dacc0", size = 7445, upload-time = "2025-11-19T20:55:50.744Z" }, +] + [[package]] name = "charset-normalizer" version = "3.4.4" @@ -1793,6 +1911,35 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/e7/05/c19819d5e3d95294a6f5947fb9b9629efb316b96de511b418c53d245aae6/cycler-0.12.1-py3-none-any.whl", hash = "sha256:85cef7cff222d8644161529808465972e51340599459b8ac3ccbac5a854e0d30", size = 8321, upload-time = "2023-10-07T05:32:16.783Z" }, ] +[[package]] +name = "debugpy" +version = "1.8.20" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/e0/b7/cd8080344452e4874aae67c40d8940e2b4d47b01601a8fd9f44786c757c7/debugpy-1.8.20.tar.gz", hash = "sha256:55bc8701714969f1ab89a6d5f2f3d40c36f91b2cbe2f65d98bf8196f6a6a2c33", size = 1645207, upload-time = "2026-01-29T23:03:28.199Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/71/be/8bd693a0b9d53d48c8978fa5d889e06f3b5b03e45fd1ea1e78267b4887cb/debugpy-1.8.20-cp310-cp310-macosx_15_0_x86_64.whl", hash = "sha256:157e96ffb7f80b3ad36d808646198c90acb46fdcfd8bb1999838f0b6f2b59c64", size = 2099192, upload-time = "2026-01-29T23:03:29.707Z" }, + { url = "https://files.pythonhosted.org/packages/77/1b/85326d07432086a06361d493d2743edd0c4fc2ef62162be7f8618441ac37/debugpy-1.8.20-cp310-cp310-manylinux_2_34_x86_64.whl", hash = "sha256:c1178ae571aff42e61801a38b007af504ec8e05fde1c5c12e5a7efef21009642", size = 3088568, upload-time = "2026-01-29T23:03:31.467Z" }, + { url = "https://files.pythonhosted.org/packages/e8/60/3e08462ee3eccd10998853eb35947c416e446bfe2bc37dbb886b9044586c/debugpy-1.8.20-cp310-cp310-win32.whl", hash = "sha256:c29dd9d656c0fbd77906a6e6a82ae4881514aa3294b94c903ff99303e789b4a2", size = 5284399, upload-time = "2026-01-29T23:03:33.678Z" }, + { url = "https://files.pythonhosted.org/packages/72/43/09d49106e770fe558ced5e80df2e3c2ebee10e576eda155dcc5670473663/debugpy-1.8.20-cp310-cp310-win_amd64.whl", hash = "sha256:3ca85463f63b5dd0aa7aaa933d97cbc47c174896dcae8431695872969f981893", size = 5316388, upload-time = "2026-01-29T23:03:35.095Z" }, + { url = "https://files.pythonhosted.org/packages/51/56/c3baf5cbe4dd77427fd9aef99fcdade259ad128feeb8a786c246adb838e5/debugpy-1.8.20-cp311-cp311-macosx_15_0_universal2.whl", hash = "sha256:eada6042ad88fa1571b74bd5402ee8b86eded7a8f7b827849761700aff171f1b", size = 2208318, upload-time = "2026-01-29T23:03:36.481Z" }, + { url = "https://files.pythonhosted.org/packages/9a/7d/4fa79a57a8e69fe0d9763e98d1110320f9ecd7f1f362572e3aafd7417c9d/debugpy-1.8.20-cp311-cp311-manylinux_2_34_x86_64.whl", hash = "sha256:7de0b7dfeedc504421032afba845ae2a7bcc32ddfb07dae2c3ca5442f821c344", size = 3171493, upload-time = "2026-01-29T23:03:37.775Z" }, + { url = "https://files.pythonhosted.org/packages/7d/f2/1e8f8affe51e12a26f3a8a8a4277d6e60aa89d0a66512f63b1e799d424a4/debugpy-1.8.20-cp311-cp311-win32.whl", hash = "sha256:773e839380cf459caf73cc533ea45ec2737a5cc184cf1b3b796cd4fd98504fec", size = 5209240, upload-time = "2026-01-29T23:03:39.109Z" }, + { url = "https://files.pythonhosted.org/packages/d5/92/1cb532e88560cbee973396254b21bece8c5d7c2ece958a67afa08c9f10dc/debugpy-1.8.20-cp311-cp311-win_amd64.whl", hash = "sha256:1f7650546e0eded1902d0f6af28f787fa1f1dbdbc97ddabaf1cd963a405930cb", size = 5233481, upload-time = "2026-01-29T23:03:40.659Z" }, + { url = "https://files.pythonhosted.org/packages/14/57/7f34f4736bfb6e00f2e4c96351b07805d83c9a7b33d28580ae01374430f7/debugpy-1.8.20-cp312-cp312-macosx_15_0_universal2.whl", hash = "sha256:4ae3135e2089905a916909ef31922b2d733d756f66d87345b3e5e52b7a55f13d", size = 2550686, upload-time = "2026-01-29T23:03:42.023Z" }, + { url = "https://files.pythonhosted.org/packages/ab/78/b193a3975ca34458f6f0e24aaf5c3e3da72f5401f6054c0dfd004b41726f/debugpy-1.8.20-cp312-cp312-manylinux_2_34_x86_64.whl", hash = "sha256:88f47850a4284b88bd2bfee1f26132147d5d504e4e86c22485dfa44b97e19b4b", size = 4310588, upload-time = "2026-01-29T23:03:43.314Z" }, + { url = "https://files.pythonhosted.org/packages/c1/55/f14deb95eaf4f30f07ef4b90a8590fc05d9e04df85ee379712f6fb6736d7/debugpy-1.8.20-cp312-cp312-win32.whl", hash = "sha256:4057ac68f892064e5f98209ab582abfee3b543fb55d2e87610ddc133a954d390", size = 5331372, upload-time = "2026-01-29T23:03:45.526Z" }, + { url = "https://files.pythonhosted.org/packages/a1/39/2bef246368bd42f9bd7cba99844542b74b84dacbdbea0833e610f384fee8/debugpy-1.8.20-cp312-cp312-win_amd64.whl", hash = "sha256:a1a8f851e7cf171330679ef6997e9c579ef6dd33c9098458bd9986a0f4ca52e3", size = 5372835, upload-time = "2026-01-29T23:03:47.245Z" }, + { url = "https://files.pythonhosted.org/packages/15/e2/fc500524cc6f104a9d049abc85a0a8b3f0d14c0a39b9c140511c61e5b40b/debugpy-1.8.20-cp313-cp313-macosx_15_0_universal2.whl", hash = "sha256:5dff4bb27027821fdfcc9e8f87309a28988231165147c31730128b1c983e282a", size = 2539560, upload-time = "2026-01-29T23:03:48.738Z" }, + { url = "https://files.pythonhosted.org/packages/90/83/fb33dcea789ed6018f8da20c5a9bc9d82adc65c0c990faed43f7c955da46/debugpy-1.8.20-cp313-cp313-manylinux_2_34_x86_64.whl", hash = "sha256:84562982dd7cf5ebebfdea667ca20a064e096099997b175fe204e86817f64eaf", size = 4293272, upload-time = "2026-01-29T23:03:50.169Z" }, + { url = "https://files.pythonhosted.org/packages/a6/25/b1e4a01bfb824d79a6af24b99ef291e24189080c93576dfd9b1a2815cd0f/debugpy-1.8.20-cp313-cp313-win32.whl", hash = "sha256:da11dea6447b2cadbf8ce2bec59ecea87cc18d2c574980f643f2d2dfe4862393", size = 5331208, upload-time = "2026-01-29T23:03:51.547Z" }, + { url = "https://files.pythonhosted.org/packages/13/f7/a0b368ce54ffff9e9028c098bd2d28cfc5b54f9f6c186929083d4c60ba58/debugpy-1.8.20-cp313-cp313-win_amd64.whl", hash = "sha256:eb506e45943cab2efb7c6eafdd65b842f3ae779f020c82221f55aca9de135ed7", size = 5372930, upload-time = "2026-01-29T23:03:53.585Z" }, + { url = "https://files.pythonhosted.org/packages/33/2e/f6cb9a8a13f5058f0a20fe09711a7b726232cd5a78c6a7c05b2ec726cff9/debugpy-1.8.20-cp314-cp314-macosx_15_0_universal2.whl", hash = "sha256:9c74df62fc064cd5e5eaca1353a3ef5a5d50da5eb8058fcef63106f7bebe6173", size = 2538066, upload-time = "2026-01-29T23:03:54.999Z" }, + { url = "https://files.pythonhosted.org/packages/c5/56/6ddca50b53624e1ca3ce1d1e49ff22db46c47ea5fb4c0cc5c9b90a616364/debugpy-1.8.20-cp314-cp314-manylinux_2_34_x86_64.whl", hash = "sha256:077a7447589ee9bc1ff0cdf443566d0ecf540ac8aa7333b775ebcb8ce9f4ecad", size = 4269425, upload-time = "2026-01-29T23:03:56.518Z" }, + { url = "https://files.pythonhosted.org/packages/c5/d9/d64199c14a0d4c476df46c82470a3ce45c8d183a6796cfb5e66533b3663c/debugpy-1.8.20-cp314-cp314-win32.whl", hash = "sha256:352036a99dd35053b37b7803f748efc456076f929c6a895556932eaf2d23b07f", size = 5331407, upload-time = "2026-01-29T23:03:58.481Z" }, + { url = "https://files.pythonhosted.org/packages/e0/d9/1f07395b54413432624d61524dfd98c1a7c7827d2abfdb8829ac92638205/debugpy-1.8.20-cp314-cp314-win_amd64.whl", hash = "sha256:a98eec61135465b062846112e5ecf2eebb855305acc1dfbae43b72903b8ab5be", size = 5372521, upload-time = "2026-01-29T23:03:59.864Z" }, + { url = "https://files.pythonhosted.org/packages/e0/c3/7f67dea8ccf8fdcb9c99033bbe3e90b9e7395415843accb81428c441be2d/debugpy-1.8.20-py2.py3-none-any.whl", hash = "sha256:5be9bed9ae3be00665a06acaa48f8329d2b9632f15fd09f6a9a8c8d9907e54d7", size = 5337658, upload-time = "2026-01-29T23:04:17.404Z" }, +] + [[package]] name = "deepdiff" version = "8.6.1" @@ -1805,6 +1952,15 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/f7/e6/efe534ef0952b531b630780e19cabd416e2032697019d5295defc6ef9bd9/deepdiff-8.6.1-py3-none-any.whl", hash = "sha256:ee8708a7f7d37fb273a541fa24ad010ed484192cd0c4ffc0fa0ed5e2d4b9e78b", size = 91378, upload-time = "2025-09-03T19:40:39.679Z" }, ] +[[package]] +name = "distlib" +version = "0.4.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/96/8e/709914eb2b5749865801041647dc7f4e6d00b549cfe88b65ca192995f07c/distlib-0.4.0.tar.gz", hash = "sha256:feec40075be03a04501a973d81f633735b4b69f98b05450592310c0f401a4e0d", size = 614605, upload-time = "2025-07-17T16:52:00.465Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/33/6b/e0547afaf41bf2c42e52430072fa5658766e3d65bd4b03a563d1b6336f57/distlib-0.4.0-py2.py3-none-any.whl", hash = "sha256:9659f7d87e46584a30b5780e43ac7a2143098441670ff0a49d5f9034c54a6c16", size = 469047, upload-time = "2025-07-17T16:51:58.613Z" }, +] + [[package]] name = "distro" version = "1.9.0" @@ -1834,11 +1990,11 @@ wheels = [ [[package]] name = "docutils" -version = "0.22.4" +version = "0.19" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/ae/b6/03bb70946330e88ffec97aefd3ea75ba575cb2e762061e0e62a213befee8/docutils-0.22.4.tar.gz", hash = "sha256:4db53b1fde9abecbb74d91230d32ab626d94f6badfc575d6db9194a49df29968", size = 2291750, upload-time = "2025-12-18T19:00:26.443Z" } +sdist = { url = "https://files.pythonhosted.org/packages/6b/5c/330ea8d383eb2ce973df34d1239b3b21e91cd8c865d21ff82902d952f91f/docutils-0.19.tar.gz", hash = "sha256:33995a6753c30b7f577febfc2c50411fec6aac7f7ffeb7c4cfe5991072dcf9e6", size = 2056383, upload-time = "2022-07-05T20:17:31.045Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/02/10/5da547df7a391dcde17f59520a231527b8571e6f46fc8efb02ccb370ab12/docutils-0.22.4-py3-none-any.whl", hash = "sha256:d0013f540772d1420576855455d050a2180186c91c15779301ac2ccb3eeb68de", size = 633196, upload-time = "2025-12-18T19:00:18.077Z" }, + { url = "https://files.pythonhosted.org/packages/93/69/e391bd51bc08ed9141ecd899a0ddb61ab6465309f1eb470905c0c8868081/docutils-0.19-py3-none-any.whl", hash = "sha256:5e1de4d849fee02c63b040a4a3fd567f4ab104defd8a5511fbbc24a8a017efbc", size = 570472, upload-time = "2022-07-05T20:17:26.388Z" }, ] [[package]] @@ -2360,6 +2516,21 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/83/1d/d6466de3a5249d35e832a52834115ca9d1d0de6abc22065f049707516d47/google_auth-2.48.0-py3-none-any.whl", hash = "sha256:2e2a537873d449434252a9632c28bfc268b0adb1e53f9fb62afc5333a975903f", size = 236499, upload-time = "2026-01-26T19:22:45.099Z" }, ] +[[package]] +name = "google-genai" +version = "0.8.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "google-auth", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, + { name = "pydantic", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, + { name = "requests", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, + { name = "websockets", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/d2/ef/d5a57aa9360f31b3a3b726fa4d0cc8b2ea14e3a6a0c482cca74a28ab5392/google_genai-0.8.0.tar.gz", hash = "sha256:b5730bcb144177cfcf6cfe44ab59611f8dec3f7c44599cfb321d5d71856a910e", size = 118835, upload-time = "2025-01-30T23:25:28.291Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/74/6d/c5b1757ffe28fdfb38df3fda79c614ec840ebac0b72a3fcbe2ed969e254d/google_genai-0.8.0-py3-none-any.whl", hash = "sha256:dbaea9054f0e3547d9e5810390304574808d9cb5d77198b8a247f497271c8254", size = 125385, upload-time = "2025-01-30T23:25:26.272Z" }, +] + [[package]] name = "googleapis-common-protos" version = "1.72.0" @@ -2732,6 +2903,15 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/48/30/47d0bf6072f7252e6521f3447ccfa40b421b6824517f82854703d0f5a98b/hyperframe-6.1.0-py3-none-any.whl", hash = "sha256:b03380493a519fce58ea5af42e4a42317bf9bd425596f7a0835ffce80f1a42e5", size = 13007, upload-time = "2025-01-22T21:41:47.295Z" }, ] +[[package]] +name = "identify" +version = "2.6.17" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/57/84/376a3b96e5a8d33a7aa2c5b3b31a4b3c364117184bf0b17418055f6ace66/identify-2.6.17.tar.gz", hash = "sha256:f816b0b596b204c9fdf076ded172322f2723cf958d02f9c3587504834c8ff04d", size = 99579, upload-time = "2026-03-01T20:04:12.702Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/40/66/71c1227dff78aaeb942fed29dd5651f2aec166cc7c9aeea3e8b26a539b7d/identify-2.6.17-py2.py3-none-any.whl", hash = "sha256:be5f8412d5ed4b20f2bd41a65f920990bdccaa6a4a18a08f1eefdcd0bdd885f0", size = 99382, upload-time = "2026-03-01T20:04:11.439Z" }, +] + [[package]] name = "idna" version = "3.11" @@ -2741,6 +2921,15 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/0e/61/66938bbb5fc52dbdf84594873d5b51fb1f7c7794e9c0f5bd885f30bc507b/idna-3.11-py3-none-any.whl", hash = "sha256:771a87f49d9defaf64091e6e6fe9c18d4833f140bd19464795bc32d966ca37ea", size = 71008, upload-time = "2025-10-12T14:55:18.883Z" }, ] +[[package]] +name = "imagesize" +version = "2.0.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/6c/e6/7bf14eeb8f8b7251141944835abd42eb20a658d89084b7e1f3e5fe394090/imagesize-2.0.0.tar.gz", hash = "sha256:8e8358c4a05c304f1fccf7ff96f036e7243a189e9e42e90851993c558cfe9ee3", size = 1773045, upload-time = "2026-03-03T14:18:29.941Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/5f/53/fb7122b71361a0d121b669dcf3d31244ef75badbbb724af388948de543e2/imagesize-2.0.0-py2.py3-none-any.whl", hash = "sha256:5667c5bbb57ab3f1fa4bc366f4fbc971db3d5ed011fd2715fd8001f782718d96", size = 9441, upload-time = "2026-03-03T14:18:27.892Z" }, +] + [[package]] name = "importlib-metadata" version = "8.7.1" @@ -2916,6 +3105,15 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/03/99/33c7d78a3fb70d545fd5411ac67a651c81602cc09c9cf0df383733f068c5/jsonpath_ng-1.8.0-py3-none-any.whl", hash = "sha256:b8dde192f8af58d646fc031fac9c99fe4d00326afc4148f1f043c601a8cfe138", size = 67844, upload-time = "2026-02-28T00:53:19.637Z" }, ] +[[package]] +name = "jsonref" +version = "1.1.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/aa/0d/c1f3277e90ccdb50d33ed5ba1ec5b3f0a242ed8c1b1a85d3afeb68464dca/jsonref-1.1.0.tar.gz", hash = "sha256:32fe8e1d85af0fdefbebce950af85590b22b60f9e95443176adbde4e1ecea552", size = 8814, upload-time = "2023-01-16T16:10:04.455Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/0c/ec/e1db9922bceb168197a558a2b8c03a7963f1afe93517ddd3cf99f202f996/jsonref-1.1.0-py3-none-any.whl", hash = "sha256:590dc7773df6c21cbf948b5dac07a72a251db28b0238ceecce0a2abfa8ec30a9", size = 9425, upload-time = "2023-01-16T16:10:02.255Z" }, +] + [[package]] name = "jsonschema" version = "4.26.0" @@ -4563,6 +4761,15 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/de/f0/c81e05b613866b76d2d1066490adf1a3dbc4ee9d9c839961c3fc8a6997af/pip-26.0.1-py3-none-any.whl", hash = "sha256:bdb1b08f4274833d62c1aa29e20907365a2ceb950410df15fc9521bad440122b", size = 1787723, upload-time = "2026-02-05T02:20:16.416Z" }, ] +[[package]] +name = "platformdirs" +version = "4.9.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/1b/04/fea538adf7dbbd6d186f551d595961e564a3b6715bdf276b477460858672/platformdirs-4.9.2.tar.gz", hash = "sha256:9a33809944b9db043ad67ca0db94b14bf452cc6aeaac46a88ea55b26e2e9d291", size = 28394, upload-time = "2026-02-16T03:56:10.574Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/48/31/05e764397056194206169869b50cf2fee4dbbbc71b344705b9c0d878d4d8/platformdirs-4.9.2-py3-none-any.whl", hash = "sha256:9170634f126f8efdae22fb58ae8a0eaa86f38365bc57897a6c4f781d1f5875bd", size = 21168, upload-time = "2026-02-16T03:56:08.891Z" }, +] + [[package]] name = "plotly" version = "6.6.0" @@ -4669,6 +4876,22 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/6f/96/0f8a1f86485b3ec0315e3e8403326884a0334b3dcd699df2482669cca4be/powerfx-0.0.34-py3-none-any.whl", hash = "sha256:f2dc1c42ba8bfa4c72a7fcff2a00755b95394547388ca0b3e36579c49ee7ed75", size = 3483089, upload-time = "2025-12-22T15:50:57.536Z" }, ] +[[package]] +name = "pre-commit" +version = "4.5.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "cfgv", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, + { name = "identify", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, + { name = "nodeenv", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, + { name = "pyyaml", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, + { name = "virtualenv", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/40/f1/6d86a29246dfd2e9b6237f0b5823717f60cad94d47ddc26afa916d21f525/pre_commit-4.5.1.tar.gz", hash = "sha256:eb545fcff725875197837263e977ea257a402056661f09dae08e4b149b030a61", size = 198232, upload-time = "2025-12-16T21:14:33.552Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/5d/19/fd3ef348460c80af7bb4669ea7926651d1f95c23ff2df18b9d24bab4f3fa/pre_commit-4.5.1-py2.py3-none-any.whl", hash = "sha256:3b3afd891e97337708c1674210f8eba659b52a38ea5f822ff142d10786221f77", size = 226437, upload-time = "2025-12-16T21:14:32.409Z" }, +] + [[package]] name = "prek" version = "0.3.4" @@ -4848,6 +5071,20 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/50/1b/6921afe68c74868b4c9fa424dad3be35b095e16687989ebbb50ce4fceb7c/psutil-7.0.0-cp37-abi3-win_amd64.whl", hash = "sha256:4cf3d4eb1aa9b348dec30105c55cd9b7d4629285735a102beb4441e38db90553", size = 244885, upload-time = "2025-02-13T21:54:37.486Z" }, ] +[[package]] +name = "py2docfx" +version = "0.1.24" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "pyyaml", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, + { name = "sphinx", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, + { name = "wheel", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/9a/79/0d5c7b56065fc5183799619bebf7034fc1e1bf721af702a4677cd3d7b2ca/py2docfx-0.1.24.tar.gz", hash = "sha256:42148a9e07ea32f8672ed2cc8f9c5c8a10b62067bd1996ff6fc17b16cea1190c", size = 8848164, upload-time = "2026-03-04T01:22:48.058Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a5/95/88aeb7619407407aa080e4f945f43d885fd7c033e5b976f0151b9653e3f4/py2docfx-0.1.24-py3-none-any.whl", hash = "sha256:ec7c55c3344b9e31a089fe72fd6d234c96d012e3f6dff7bce5c7afe45a0a4109", size = 11088628, upload-time = "2026-02-19T19:46:42.893Z" }, +] + [[package]] name = "pyarrow" version = "23.0.1" @@ -5239,6 +5476,20 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/ee/49/1377b49de7d0c1ce41292161ea0f721913fa8722c19fb9c1e3aa0367eecb/pytest_cov-7.0.0-py3-none-any.whl", hash = "sha256:3b8e9558b16cc1479da72058bdecf8073661c7f57f7d3c5f22a1c23507f2d861", size = 22424, upload-time = "2025-09-09T10:57:00.695Z" }, ] +[[package]] +name = "pytest-env" +version = "1.5.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "pytest", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, + { name = "python-dotenv", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, + { name = "tomli", marker = "(python_full_version < '3.11' and sys_platform == 'darwin') or (python_full_version < '3.11' and sys_platform == 'linux') or (python_full_version < '3.11' and sys_platform == 'win32')" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/e6/56/a931c6f6194917ff44be41b8586e2ffd13a18fa70fb28d9800a4695befa5/pytest_env-1.5.0.tar.gz", hash = "sha256:db8994b9ce170f135a37acc09ac753a6fc697d15e691b576ed8d8ca261c40246", size = 15271, upload-time = "2026-02-17T18:31:39.095Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/61/af/99b52a8524983bfece35e51e65a0b517b22920c023e57855c95e744e19e4/pytest_env-1.5.0-py3-none-any.whl", hash = "sha256:89a15686ac837c9cd009a8a2d52bd55865e2f23c82094247915dae4540c87161", size = 10122, upload-time = "2026-02-17T18:31:37.496Z" }, +] + [[package]] name = "pytest-retry" version = "1.7.0" @@ -5293,6 +5544,19 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/ec/57/56b9bcc3c9c6a792fcbaf139543cee77261f3651ca9da0c93f5c1221264b/python_dateutil-2.9.0.post0-py2.py3-none-any.whl", hash = "sha256:a8b2bc7bffae282281c8140a97d3aa9c14da0b136dfe83f850eea9a5f7470427", size = 229892, upload-time = "2024-03-01T18:36:18.57Z" }, ] +[[package]] +name = "python-discovery" +version = "1.1.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "filelock", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, + { name = "platformdirs", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/82/bb/93a3e83bdf9322c7e21cafd092e56a4a17c4d8ef4277b6eb01af1a540a6f/python_discovery-1.1.0.tar.gz", hash = "sha256:447941ba1aed8cc2ab7ee3cb91be5fc137c5bdbb05b7e6ea62fbdcb66e50b268", size = 55674, upload-time = "2026-02-26T09:42:49.668Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/06/54/82a6e2ef37f0f23dccac604b9585bdcbd0698604feb64807dcb72853693e/python_discovery-1.1.0-py3-none-any.whl", hash = "sha256:a162893b8809727f54594a99ad2179d2ede4bf953e12d4c7abc3cc9cdbd1437b", size = 30687, upload-time = "2026-02-26T09:42:48.548Z" }, +] + [[package]] name = "python-dotenv" version = "1.2.2" @@ -6222,6 +6486,15 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/e9/44/75a9c9421471a6c4805dbf2356f7c181a29c1879239abab1ea2cc8f38b40/sniffio-1.3.1-py3-none-any.whl", hash = "sha256:2f6da418d1f1e0fddd844478f41680e794e6051915791a034ff65e5f100525a2", size = 10235, upload-time = "2024-02-25T23:20:01.196Z" }, ] +[[package]] +name = "snowballstemmer" +version = "3.0.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/75/a7/9810d872919697c9d01295633f5d574fb416d47e535f258272ca1f01f447/snowballstemmer-3.0.1.tar.gz", hash = "sha256:6d5eeeec8e9f84d4d56b847692bacf79bc2c8e90c7f80ca4444ff8b6f2e52895", size = 105575, upload-time = "2025-05-09T16:34:51.843Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c8/78/3565d011c61f5a43488987ee32b6f3f656e7f107ac2782dd57bdd7d91d9a/snowballstemmer-3.0.1-py3-none-any.whl", hash = "sha256:6cd7b3897da8d6c9ffb968a6781fa6532dce9c3618a4b127d920dab764a19064", size = 103274, upload-time = "2025-05-09T16:34:50.371Z" }, +] + [[package]] name = "soundfile" version = "0.12.1" @@ -6240,6 +6513,87 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/50/ff/26a4ee48d0b66625a4e4028a055b9f25bc9d7c7b2d17d21a45137621a50d/soundfile-0.12.1-py2.py3-none-win_amd64.whl", hash = "sha256:0d86924c00b62552b650ddd28af426e3ff2d4dc2e9047dae5b3d8452e0a49a77", size = 1009109, upload-time = "2023-02-15T15:37:29.41Z" }, ] +[[package]] +name = "sphinx" +version = "6.1.3" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "alabaster", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, + { name = "babel", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, + { name = "colorama", marker = "sys_platform == 'win32'" }, + { name = "docutils", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, + { name = "imagesize", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, + { name = "jinja2", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, + { name = "packaging", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, + { name = "pygments", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, + { name = "requests", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, + { name = "snowballstemmer", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, + { name = "sphinxcontrib-applehelp", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, + { name = "sphinxcontrib-devhelp", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, + { name = "sphinxcontrib-htmlhelp", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, + { name = "sphinxcontrib-jsmath", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, + { name = "sphinxcontrib-qthelp", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, + { name = "sphinxcontrib-serializinghtml", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/db/0b/a0f60c4abd8a69bd5b0d20edde8a8d8d9d4ca825bbd920d328d248fd0290/Sphinx-6.1.3.tar.gz", hash = "sha256:0dac3b698538ffef41716cf97ba26c1c7788dba73ce6f150c1ff5b4720786dd2", size = 6663266, upload-time = "2023-01-10T15:58:38.349Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/2e/2c/22a20486cad91a66f4f70bd88c20c8bb306ae719cbba93d7debae7efa80d/sphinx-6.1.3-py3-none-any.whl", hash = "sha256:807d1cb3d6be87eb78a381c3e70ebd8d346b9a25f3753e9947e866b2786865fc", size = 3027954, upload-time = "2023-01-10T15:58:34.907Z" }, +] + +[[package]] +name = "sphinxcontrib-applehelp" +version = "2.0.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/ba/6e/b837e84a1a704953c62ef8776d45c3e8d759876b4a84fe14eba2859106fe/sphinxcontrib_applehelp-2.0.0.tar.gz", hash = "sha256:2f29ef331735ce958efa4734873f084941970894c6090408b079c61b2e1c06d1", size = 20053, upload-time = "2024-07-29T01:09:00.465Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/5d/85/9ebeae2f76e9e77b952f4b274c27238156eae7979c5421fba91a28f4970d/sphinxcontrib_applehelp-2.0.0-py3-none-any.whl", hash = "sha256:4cd3f0ec4ac5dd9c17ec65e9ab272c9b867ea77425228e68ecf08d6b28ddbdb5", size = 119300, upload-time = "2024-07-29T01:08:58.99Z" }, +] + +[[package]] +name = "sphinxcontrib-devhelp" +version = "2.0.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/f6/d2/5beee64d3e4e747f316bae86b55943f51e82bb86ecd325883ef65741e7da/sphinxcontrib_devhelp-2.0.0.tar.gz", hash = "sha256:411f5d96d445d1d73bb5d52133377b4248ec79db5c793ce7dbe59e074b4dd1ad", size = 12967, upload-time = "2024-07-29T01:09:23.417Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/35/7a/987e583882f985fe4d7323774889ec58049171828b58c2217e7f79cdf44e/sphinxcontrib_devhelp-2.0.0-py3-none-any.whl", hash = "sha256:aefb8b83854e4b0998877524d1029fd3e6879210422ee3780459e28a1f03a8a2", size = 82530, upload-time = "2024-07-29T01:09:21.945Z" }, +] + +[[package]] +name = "sphinxcontrib-htmlhelp" +version = "2.1.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/43/93/983afd9aa001e5201eab16b5a444ed5b9b0a7a010541e0ddfbbfd0b2470c/sphinxcontrib_htmlhelp-2.1.0.tar.gz", hash = "sha256:c9e2916ace8aad64cc13a0d233ee22317f2b9025b9cf3295249fa985cc7082e9", size = 22617, upload-time = "2024-07-29T01:09:37.889Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/0a/7b/18a8c0bcec9182c05a0b3ec2a776bba4ead82750a55ff798e8d406dae604/sphinxcontrib_htmlhelp-2.1.0-py3-none-any.whl", hash = "sha256:166759820b47002d22914d64a075ce08f4c46818e17cfc9470a9786b759b19f8", size = 98705, upload-time = "2024-07-29T01:09:36.407Z" }, +] + +[[package]] +name = "sphinxcontrib-jsmath" +version = "1.0.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/b2/e8/9ed3830aeed71f17c026a07a5097edcf44b692850ef215b161b8ad875729/sphinxcontrib-jsmath-1.0.1.tar.gz", hash = "sha256:a9925e4a4587247ed2191a22df5f6970656cb8ca2bd6284309578f2153e0c4b8", size = 5787, upload-time = "2019-01-21T16:10:16.347Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c2/42/4c8646762ee83602e3fb3fbe774c2fac12f317deb0b5dbeeedd2d3ba4b77/sphinxcontrib_jsmath-1.0.1-py2.py3-none-any.whl", hash = "sha256:2ec2eaebfb78f3f2078e73666b1415417a116cc848b72e5172e596c871103178", size = 5071, upload-time = "2019-01-21T16:10:14.333Z" }, +] + +[[package]] +name = "sphinxcontrib-qthelp" +version = "2.0.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/68/bc/9104308fc285eb3e0b31b67688235db556cd5b0ef31d96f30e45f2e51cae/sphinxcontrib_qthelp-2.0.0.tar.gz", hash = "sha256:4fe7d0ac8fc171045be623aba3e2a8f613f8682731f9153bb2e40ece16b9bbab", size = 17165, upload-time = "2024-07-29T01:09:56.435Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/27/83/859ecdd180cacc13b1f7e857abf8582a64552ea7a061057a6c716e790fce/sphinxcontrib_qthelp-2.0.0-py3-none-any.whl", hash = "sha256:b18a828cdba941ccd6ee8445dbe72ffa3ef8cbe7505d8cd1fa0d42d3f2d5f3eb", size = 88743, upload-time = "2024-07-29T01:09:54.885Z" }, +] + +[[package]] +name = "sphinxcontrib-serializinghtml" +version = "2.0.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/3b/44/6716b257b0aa6bfd51a1b31665d1c205fb12cb5ad56de752dfa15657de2f/sphinxcontrib_serializinghtml-2.0.0.tar.gz", hash = "sha256:e9d912827f872c029017a53f0ef2180b327c3f7fd23c87229f7a8e8b70031d4d", size = 16080, upload-time = "2024-07-29T01:10:09.332Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/52/a7/d2782e4e3f77c8450f727ba74a8f12756d5ba823d81b941f1b04da9d033a/sphinxcontrib_serializinghtml-2.0.0-py3-none-any.whl", hash = "sha256:6e2cb0eef194e10c27ec0023bfeb25badbbb5868244cf5bc5bdc04e4464bf331", size = 92072, upload-time = "2024-07-29T01:10:08.203Z" }, +] + [[package]] name = "sqlalchemy" version = "2.0.48" @@ -6750,6 +7104,22 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/63/9a/0962b05b308494e3202d3f794a6e85abe471fe3cafdbcf95c2e8c713aabd/uvloop-0.21.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:a5c39f217ab3c663dc699c04cbd50c13813e31d917642d459fdcec07555cc553", size = 4660018, upload-time = "2024-10-14T23:38:10.888Z" }, ] +[[package]] +name = "virtualenv" +version = "21.1.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "distlib", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, + { name = "filelock", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, + { name = "platformdirs", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, + { name = "python-discovery", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, + { name = "typing-extensions", marker = "(python_full_version < '3.11' and sys_platform == 'darwin') or (python_full_version < '3.11' and sys_platform == 'linux') or (python_full_version < '3.11' and sys_platform == 'win32')" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/2f/c9/18d4b36606d6091844daa3bd93cf7dc78e6f5da21d9f21d06c221104b684/virtualenv-21.1.0.tar.gz", hash = "sha256:1990a0188c8f16b6b9cf65c9183049007375b26aad415514d377ccacf1e4fb44", size = 5840471, upload-time = "2026-02-27T08:49:29.702Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/78/55/896b06bf93a49bec0f4ae2a6f1ed12bd05c8860744ac3a70eda041064e4d/virtualenv-21.1.0-py3-none-any.whl", hash = "sha256:164f5e14c5587d170cf98e60378eb91ea35bf037be313811905d3a24ea33cc07", size = 5825072, upload-time = "2026-02-27T08:49:27.516Z" }, +] + [[package]] name = "watchdog" version = "6.0.0" @@ -6853,6 +7223,18 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/4d/ec/d58832f89ede95652fd01f4f24236af7d32b70cab2196dfcc2d2fd13c5c2/werkzeug-3.1.6-py3-none-any.whl", hash = "sha256:7ddf3357bb9564e407607f988f683d72038551200c704012bb9a4c523d42f131", size = 225166, upload-time = "2026-02-19T15:17:17.475Z" }, ] +[[package]] +name = "wheel" +version = "0.46.3" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "packaging", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/89/24/a2eb353a6edac9a0303977c4cb048134959dd2a51b48a269dfc9dde00c8a/wheel-0.46.3.tar.gz", hash = "sha256:e3e79874b07d776c40bd6033f8ddf76a7dad46a7b8aa1b2787a83083519a1803", size = 60605, upload-time = "2026-01-22T12:39:49.136Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/87/22/b76d483683216dde3d67cba61fb2444be8d5be289bf628c13fc0fd90e5f9/wheel-0.46.3-py3-none-any.whl", hash = "sha256:4b399d56c9d9338230118d705d9737a2a468ccca63d5e813e2a4fc7815d8bc4d", size = 30557, upload-time = "2026-01-22T12:39:48.099Z" }, +] + [[package]] name = "win32-setctime" version = "1.2.0"