"""Provider-agnostic LLM adapter with fake-response support."""

from __future__ import annotations

from collections import deque
from dataclasses import dataclass, field
import json
import os
from typing import Any

from anthropic import Anthropic
from openai import OpenAI


DEFAULT_LLM_TIMEOUT_SECONDS = 30.0
JSON_RETRY_SUFFIX = "\n\nReturn only valid JSON. Do not include markdown fences, explanations, or any text before or after the JSON object."


class LLMProviderError(RuntimeError):
    """Raised when the provider or its credentials are unavailable."""


class LLMParseError(ValueError):
    """Raised when model output cannot be parsed into the expected shape."""


@dataclass
class LLMClient:
    """Minimal LLM adapter used by the framework runtime."""

    fake_responses: deque[Any] = field(default_factory=deque)
    fake_mode: bool = False

    @classmethod
    def from_fake_responses(cls, responses: list[Any]) -> "LLMClient":
        return cls(fake_responses=deque(responses), fake_mode=True)

    def generate_structured_response(
        self,
        provider: str,
        model: str,
        system_prompt: str,
        user_prompt: str,
    ) -> dict[str, Any]:
        if self.fake_mode:
            response = self._pop_fake_response()
            if isinstance(response, dict):
                return response
            if isinstance(response, str):
                return _parse_json_object(response)
            raise LLMParseError("expected structured fake response")

        provider = provider.lower()
        if provider == "openai":
            return self._generate_structured_with_retry(
                generator=self._generate_openai_structured_response,
                model=model,
                system_prompt=system_prompt,
                user_prompt=user_prompt,
            )
        if provider == "anthropic":
            return self._generate_structured_with_retry(
                generator=self._generate_anthropic_structured_response,
                model=model,
                system_prompt=system_prompt,
                user_prompt=user_prompt,
            )

        raise ValueError(f"unsupported provider: {provider}")

    def _generate_structured_with_retry(
        self,
        generator: Any,
        model: str,
        system_prompt: str,
        user_prompt: str,
    ) -> dict[str, Any]:
        try:
            return generator(model=model, system_prompt=system_prompt, user_prompt=user_prompt)
        except LLMParseError:
            strengthened_user_prompt = user_prompt + JSON_RETRY_SUFFIX
            return generator(
                model=model,
                system_prompt=system_prompt,
                user_prompt=strengthened_user_prompt,
            )

    def generate_text_response(
        self,
        provider: str,
        model: str,
        system_prompt: str,
        user_prompt: str,
    ) -> str:
        if self.fake_mode:
            response = self._pop_fake_response()
            if not isinstance(response, str):
                raise LLMParseError("expected text fake response")
            return response

        provider = provider.lower()
        if provider == "openai":
            client = self._build_openai_client()
            completion = client.responses.create(
                model=model,
                input=[
                    {"role": "system", "content": system_prompt},
                    {"role": "user", "content": user_prompt},
                ],
            )
            return getattr(completion, "output_text", "")
        if provider == "anthropic":
            client = self._build_anthropic_client()
            message = client.messages.create(
                model=model,
                max_tokens=4096,
                system=system_prompt,
                messages=[{"role": "user", "content": user_prompt}],
            )
            return _extract_anthropic_text_blocks(message.content)

        raise ValueError(f"unsupported provider: {provider}")

    def _build_openai_client(self) -> OpenAI:
        api_key = os.environ.get("OPENAI_API_KEY")
        if not api_key:
            raise LLMProviderError("OPENAI_API_KEY is required for provider 'openai'")
        base_url = os.environ.get("OPENAI_BASE_URL")
        if base_url:
            return OpenAI(api_key=api_key, base_url=base_url, timeout=DEFAULT_LLM_TIMEOUT_SECONDS)
        return OpenAI(api_key=api_key, timeout=DEFAULT_LLM_TIMEOUT_SECONDS)

    def _generate_openai_structured_response(
        self,
        model: str,
        system_prompt: str,
        user_prompt: str,
    ) -> dict[str, Any]:
        client = self._build_openai_client()
        completion = client.responses.create(
            model=model,
            input=[
                {"role": "system", "content": system_prompt},
                {"role": "user", "content": user_prompt},
            ],
        )
        text = getattr(completion, "output_text", "")
        return _parse_json_object(text)

    def _build_anthropic_client(self) -> Anthropic:
        api_key = os.environ.get("ANTHROPIC_API_KEY")
        if not api_key:
            raise LLMProviderError("ANTHROPIC_API_KEY is required for provider 'anthropic'")
        return Anthropic(api_key=api_key, timeout=DEFAULT_LLM_TIMEOUT_SECONDS)

    def _generate_anthropic_structured_response(
        self,
        model: str,
        system_prompt: str,
        user_prompt: str,
    ) -> dict[str, Any]:
        client = self._build_anthropic_client()
        message = client.messages.create(
            model=model,
            max_tokens=4096,
            system=system_prompt,
            messages=[{"role": "user", "content": user_prompt}],
        )
        text = _extract_anthropic_text_blocks(message.content)
        return _parse_json_object(text)

    def _pop_fake_response(self) -> Any:
        if not self.fake_responses:
            raise LLMParseError("no fake responses available")
        return self.fake_responses.popleft()


def _parse_json_object(text: str) -> dict[str, Any]:
    stripped = text.strip()
    if stripped.startswith("```"):
        stripped = _strip_fenced_block(stripped)

    try:
        payload = json.loads(stripped)
    except json.JSONDecodeError:
        candidate = _extract_first_json_object(stripped)
        if candidate is None:
            raise LLMParseError("model did not return valid JSON") from None
        try:
            payload = json.loads(candidate)
        except json.JSONDecodeError as exc:
            raise LLMParseError("model did not return valid JSON") from exc

    if not isinstance(payload, dict):
        raise LLMParseError("expected JSON object response")
    return payload


def _strip_fenced_block(text: str) -> str:
    lines = text.splitlines()
    if len(lines) >= 2 and lines[0].startswith("```") and lines[-1].startswith("```"):
        return "\n".join(lines[1:-1]).strip()
    return text


def _extract_first_json_object(text: str) -> str | None:
    start = text.find("{")
    if start == -1:
        return None

    depth = 0
    in_string = False
    escaped = False
    for index in range(start, len(text)):
        char = text[index]
        if in_string:
            if escaped:
                escaped = False
            elif char == "\\":
                escaped = True
            elif char == '"':
                in_string = False
            continue

        if char == '"':
            in_string = True
        elif char == "{":
            depth += 1
        elif char == "}":
            depth -= 1
            if depth == 0:
                return text[start : index + 1]
    return None


def _extract_anthropic_text_blocks(blocks: Any) -> str:
    parts: list[str] = []
    for block in blocks:
        if getattr(block, "type", "") != "text":
            continue
        text = getattr(block, "text", "")
        if isinstance(text, str):
            parts.append(text)
    return "".join(parts)
