"""LLM client abstraction layer with Protocol pattern."""

from __future__ import annotations

import os
from typing import Protocol

import anthropic
import openai
from dotenv import load_dotenv

load_dotenv()


class LLMClient(Protocol):
    """Protocol defining the LLM client interface."""

    def chat(self, system: str, messages: list[dict[str, str]]) -> str:
        """Send a chat request and return the assistant's text response."""
        ...


class AnthropicClient:
    """LLM client wrapping the Anthropic SDK."""

    def __init__(self, client: anthropic.Anthropic, model: str) -> None:
        self._client = client
        self._model = model

    @property
    def client(self) -> anthropic.Anthropic:
        """Expose underlying SDK client for testing."""
        return self._client

    @property
    def model(self) -> str:
        """Expose model name for testing."""
        return self._model

    def chat(self, system: str, messages: list[dict[str, str]]) -> str:
        """Send a chat request via Anthropic API."""
        response = self._client.messages.create(
            model=self._model,
            max_tokens=4096,
            system=system,
            messages=messages,
        )
        return response.content[0].text


class OpenAIClient:
    """LLM client wrapping the OpenAI SDK."""

    def __init__(self, client: openai.OpenAI, model: str) -> None:
        self._client = client
        self._model = model

    @property
    def client(self) -> openai.OpenAI:
        """Expose underlying SDK client for testing."""
        return self._client

    @property
    def model(self) -> str:
        """Expose model name for testing."""
        return self._model

    def chat(self, system: str, messages: list[dict[str, str]]) -> str:
        """Send a chat request via OpenAI API."""
        full_messages = [{"role": "system", "content": system}, *messages]
        response = self._client.chat.completions.create(
            model=self._model,
            messages=full_messages,
        )
        return response.choices[0].message.content


_DEFAULT_MODELS: dict[str, str] = {
    "anthropic": "claude-sonnet-4-20250514",
    "openai": "gpt-4o",
}


def create_client() -> AnthropicClient | OpenAIClient:
    """Create an LLM client based on environment variables.

    Env vars:
        LLM_PROVIDER: "anthropic" (default) or "openai"
        LLM_MODEL: model name (optional, has sensible defaults)
    """
    provider = os.environ.get("LLM_PROVIDER", "anthropic")
    model = os.environ.get("LLM_MODEL", _DEFAULT_MODELS.get(provider, ""))

    if provider == "anthropic":
        sdk_client = anthropic.Anthropic()
        return AnthropicClient(client=sdk_client, model=model)
    elif provider == "openai":
        base_url = os.environ.get("OPENAI_BASE_URL")
        sdk_client = openai.OpenAI(base_url=base_url) if base_url else openai.OpenAI()
        return OpenAIClient(client=sdk_client, model=model)
    else:
        raise ValueError(f"Unknown LLM provider: {provider}. Use 'anthropic' or 'openai'.")
