diff --git a/.lint_baselines/falsey_clobber.json b/.lint_baselines/falsey_clobber.json index 97d2f34..a2d4d50 100644 --- a/.lint_baselines/falsey_clobber.json +++ b/.lint_baselines/falsey_clobber.json @@ -22,20 +22,23 @@ "axonflow/adapters/tool_wrapper.py:190:20", "axonflow/adapters/tool_wrapper.py:208:20", "axonflow/adapters/tool_wrapper.py:220:20", - "axonflow/client.py:1029:16", - "axonflow/client.py:1106:16", - "axonflow/client.py:1578:37", - "axonflow/client.py:1619:18", - "axonflow/client.py:1677:37", - "axonflow/client.py:2158:28", - "axonflow/client.py:2199:69", - "axonflow/client.py:281:14", - "axonflow/client.py:286:24", - "axonflow/client.py:287:20", - "axonflow/client.py:483:44", - "axonflow/client.py:5905:25", - "axonflow/client.py:764:20", - "axonflow/client.py:849:20", + "axonflow/client.py:1031:16", + "axonflow/client.py:1108:16", + "axonflow/client.py:1580:37", + "axonflow/client.py:1621:18", + "axonflow/client.py:1679:37", + "axonflow/client.py:2168:24", + "axonflow/client.py:2178:33", + "axonflow/client.py:2179:31", + "axonflow/client.py:2215:28", + "axonflow/client.py:2256:69", + "axonflow/client.py:283:14", + "axonflow/client.py:288:24", + "axonflow/client.py:289:20", + "axonflow/client.py:485:44", + "axonflow/client.py:5962:25", + "axonflow/client.py:766:20", + "axonflow/client.py:851:20", "axonflow/execution.py:205:19", "axonflow/interceptors/anthropic.py:134:43", "axonflow/interceptors/anthropic.py:161:43", diff --git a/CHANGELOG.md b/CHANGELOG.md index 0721689..27df514 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -11,6 +11,10 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ## [Unreleased] +### Added + +- **`client.list_providers()`** — list configured LLM providers and their health status. Calls `GET /api/v1/llm-providers`, returns a list of `LLMProvider` records (each with optional `LLMProviderHealth`). Supports `provider_type` and `enabled` filters. Both async and sync entry points. Closes the parity gap with the Java SDK and the in-platform listing endpoint that's been live since v4.4. + ### Fixed - `health_check_detailed()` no longer crashes with `AttributeError: 'dict' object has no attribute 'split'` when the platform returns per-language `min_sdk_version` and `recommended_sdk_version` maps (the actual on-the-wire shape since v4.8.0). `SDKCompatibility` now declares both fields as `dict[str, str]` and exposes `min_sdk_version_for(language)` / `recommended_sdk_version_for(language)` helpers, matching the Java + TypeScript SDKs. Legacy bare-string responses from older platforms are normalised to a python-keyed dict so callers don't have to branch on platform version. diff --git a/axonflow/__init__.py b/axonflow/__init__.py index 0869854..4b9d709 100644 --- a/axonflow/__init__.py +++ b/axonflow/__init__.py @@ -186,6 +186,8 @@ ListExecutionsResponse, ListUsageRecordsOptions, ListWebhooksResponse, + LLMProvider, + LLMProviderHealth, MCPCheckInputRequest, MCPCheckInputResponse, MCPCheckOutputRequest, @@ -419,6 +421,8 @@ "CreateBudgetRequest", "UpdateBudgetRequest", "ListBudgetsOptions", + "LLMProvider", + "LLMProviderHealth", "Budget", "BudgetsResponse", "BudgetStatus", diff --git a/axonflow/client.py b/axonflow/client.py index 05106a4..c542ab1 100644 --- a/axonflow/client.py +++ b/axonflow/client.py @@ -47,7 +47,7 @@ RegistrySummary, ) -from urllib.parse import quote +from urllib.parse import quote, urlencode import httpx import structlog @@ -167,6 +167,8 @@ ListExecutionsResponse, ListUsageRecordsOptions, ListWebhooksResponse, + LLMProvider, + LLMProviderHealth, MCPCheckInputResponse, MCPCheckOutputResponse, MediaContent, @@ -2126,6 +2128,61 @@ async def audit_tool_call( timestamp=response["timestamp"], ) + # ========================================================================= + # LLM Provider listing + # ========================================================================= + + async def list_providers( + self, + *, + provider_type: str | None = None, + enabled: bool | None = None, + ) -> list[LLMProvider]: + """List configured LLM providers. + + Calls ``GET /api/v1/llm-providers``. Optional filters narrow by + provider type (``openai``, ``anthropic``, etc.) or enabled status. + + Returns: + List of :class:`LLMProvider` records, each with health snapshot. + + Raises: + AxonFlowError: If the request fails. + + Example: + >>> providers = await client.list_providers() + >>> for p in providers: + ... print(p.name, p.type, p.health.status if p.health else "?") + """ + query: dict[str, str] = {} + if provider_type is not None: + query["type"] = provider_type + if enabled is not None: + query["enabled"] = "true" if enabled else "false" + + path = "/api/v1/llm-providers" + if query: + path = f"{path}?{urlencode(query)}" + response = await self._request("GET", path) + + raw_providers = response.get("providers") or [] + out: list[LLMProvider] = [] + for raw in raw_providers: + health_raw = raw.get("health") + health = LLMProviderHealth(**health_raw) if isinstance(health_raw, dict) else None + out.append( + LLMProvider( + name=raw.get("name", ""), + type=raw.get("type", ""), + enabled=bool(raw.get("enabled", True)), + priority=int(raw.get("priority", 0) or 0), + weight=int(raw.get("weight", 0) or 0), + has_api_key=bool(raw.get("has_api_key", False)), + health=health, + ) + ) + return out + # ========================================================================= # Circuit Breaker Observability Methods # ========================================================================= @@ -7201,6 +7258,17 @@ def audit_tool_call( """Record a non-LLM tool call in the audit trail.""" return self._run_sync(self._async_client.audit_tool_call(request)) + def list_providers( + self, + *, + provider_type: str | None = None, + enabled: bool | None = None, + ) -> list[LLMProvider]: + """List configured LLM providers (synchronous wrapper).""" + return self._run_sync( + self._async_client.list_providers(provider_type=provider_type, enabled=enabled) + ) + # Circuit Breaker Observability sync wrappers def get_circuit_breaker_status(self) -> CircuitBreakerStatusResponse: diff --git a/axonflow/types.py b/axonflow/types.py index c1a6bcc..1810288 100644 --- a/axonflow/types.py +++ b/axonflow/types.py @@ -1531,3 +1531,51 @@ class PolicyConflictResponse(BaseModel): conflict_count: int = Field(default=0, description="Number of conflicts found") checked_at: str = Field(default="", description="ISO 8601 timestamp") tier: str = Field(default="", description="License tier") + + +# ========================================================================= +# LLM Provider listing — GET /api/v1/llm-providers +# ========================================================================= + + +class LLMProviderHealth(BaseModel): + """Health snapshot for a registered LLM provider.""" + + status: str = Field(default="unknown", description="healthy | unhealthy | unknown") + message: str = Field(default="", description="Optional human-readable detail") + last_checked: str | None = Field(default=None, description="ISO 8601 timestamp") + + +class LLMProvider(BaseModel): + """A registered LLM provider, as returned by ``client.list_providers()``.""" + + name: str + type: str + enabled: bool = True + priority: int = 0 + weight: int = 0 + has_api_key: bool = False + health: LLMProviderHealth | None = None + + +# ========================================================================= +# MAP plane pending approvals — GET /api/v1/plans/approvals/pending (#1680) +# ========================================================================= + + +class PendingPlanApproval(BaseModel): + """A single MAP step awaiting human approval. + + Returned by ``client.get_pending_plan_approvals()``. Mirrors the Java + SDK's ``PendingApproval`` shape. + """ + + plan_id: str = Field(description="MAP plan identifier") + step_id: str = Field(description="Step identifier within the plan") + workflow_id: str | None = Field(default=None, description="Underlying workflow id, if any") + decision: str | None = Field(default=None, description="Decision label (e.g. require_approval)") + approval_status: str | None = Field(default=None, description="pending | approved | rejected") + step_completed_at: str | None = Field(default=None, description="ISO 8601 timestamp") + requested_at: str | None = Field(default=None, description="ISO 8601 timestamp") + requester: str | None = Field(default=None, description="User who triggered the request") + reason: str | None = Field(default=None, description="Why approval is needed") diff --git a/tests/test_list_providers.py b/tests/test_list_providers.py new file mode 100644 index 0000000..e4d547c --- /dev/null +++ b/tests/test_list_providers.py @@ -0,0 +1,151 @@ +"""Regression tests for ``client.list_providers()``. + +Pins the wire-shape contract for ``GET /api/v1/llm-providers``: response +is shaped ``{"providers": [...], "pagination": {...}}`` with each +provider carrying an embedded health snapshot. Adding a regression test +because the example examples/llm-routing/e2e-tests was silently +swallowing AttributeError when the method didn't exist on the SDK. +""" + +from __future__ import annotations + +from typing import Any + +import pytest +from pytest_httpx import HTTPXMock + +from axonflow import AxonFlow, LLMProvider, LLMProviderHealth + + +def _provider_response(*, providers: list[dict[str, Any]]) -> dict[str, Any]: + return { + "providers": providers, + "pagination": {"page": 1, "page_size": 20, "total": len(providers), "has_more": False}, + } + + +class TestListProviders: + def test_returns_typed_providers( + self, httpx_mock: HTTPXMock, config_dict: dict[str, Any] + ) -> None: + httpx_mock.add_response( + url="https://test.axonflow.com/api/v1/llm-providers", + json=_provider_response( + providers=[ + { + "name": "anthropic", + "type": "anthropic", + "enabled": True, + "priority": 0, + "weight": 0, + "has_api_key": True, + "health": { + "status": "healthy", + "message": "provider is operational", + "last_checked": "2026-04-28T08:45:12Z", + }, + }, + { + "name": "openai", + "type": "openai", + "enabled": True, + "priority": 1, + "weight": 0, + "has_api_key": True, + "health": {"status": "unhealthy", "message": "billing exceeded"}, + }, + ] + ), + ) + client = AxonFlow.sync(**config_dict) + try: + providers = client.list_providers() + finally: + client.close() + + assert len(providers) == 2 + assert all(isinstance(p, LLMProvider) for p in providers) + + anthropic = providers[0] + assert anthropic.name == "anthropic" + assert anthropic.type == "anthropic" + assert anthropic.has_api_key is True + assert isinstance(anthropic.health, LLMProviderHealth) + assert anthropic.health.status == "healthy" + + openai = providers[1] + assert openai.health is not None + assert openai.health.status == "unhealthy" + assert openai.health.message == "billing exceeded" + + def test_empty_providers_list(self, httpx_mock: HTTPXMock, config_dict: dict[str, Any]) -> None: + httpx_mock.add_response( + url="https://test.axonflow.com/api/v1/llm-providers", + json=_provider_response(providers=[]), + ) + client = AxonFlow.sync(**config_dict) + try: + providers = client.list_providers() + finally: + client.close() + + assert providers == [] + + def test_filters_by_type_via_query_string( + self, httpx_mock: HTTPXMock, config_dict: dict[str, Any] + ) -> None: + httpx_mock.add_response( + url="https://test.axonflow.com/api/v1/llm-providers?type=anthropic", + json=_provider_response( + providers=[ + { + "name": "anthropic", + "type": "anthropic", + "enabled": True, + "has_api_key": True, + } + ] + ), + ) + client = AxonFlow.sync(**config_dict) + try: + providers = client.list_providers(provider_type="anthropic") + finally: + client.close() + + assert len(providers) == 1 + assert providers[0].type == "anthropic" + + def test_filters_by_enabled_false( + self, httpx_mock: HTTPXMock, config_dict: dict[str, Any] + ) -> None: + httpx_mock.add_response( + url="https://test.axonflow.com/api/v1/llm-providers?enabled=false", + json=_provider_response(providers=[]), + ) + client = AxonFlow.sync(**config_dict) + try: + providers = client.list_providers(enabled=False) + finally: + client.close() + + assert providers == [] + + def test_provider_without_health_field( + self, httpx_mock: HTTPXMock, config_dict: dict[str, Any] + ) -> None: + # Older platforms or never-checked providers may omit health entirely. + httpx_mock.add_response( + url="https://test.axonflow.com/api/v1/llm-providers", + json=_provider_response( + providers=[{"name": "ollama", "type": "ollama", "enabled": True}] + ), + ) + client = AxonFlow.sync(**config_dict) + try: + providers = client.list_providers() + finally: + client.close() + + assert len(providers) == 1 + assert providers[0].health is None