from core.llm import LLMClient, LLMParseError, LLMProviderError
import pytest


def test_parse_structured_response_from_fenced_json() -> None:
    client = LLMClient.from_fake_responses(
        ["""```json
{"next_action": "FINISH", "analysis": "done", "final_report": "# Report"}
```"""]
    )

    result = client.generate_structured_response(
        provider="openai",
        model="fake",
        system_prompt="prompt",
        user_prompt="prompt",
    )

    assert result["next_action"] == "FINISH"


def test_parse_structured_response_with_leading_and_trailing_text() -> None:
    client = LLMClient.from_fake_responses(
        [
            'Here is the JSON you requested:\n{"next_action": "FINISH", "analysis": "done", "final_report": "# Report"}\nThanks.'
        ]
    )

    result = client.generate_structured_response(
        provider="anthropic",
        model="fake",
        system_prompt="prompt",
        user_prompt="prompt",
    )

    assert result["final_report"] == "# Report"


def test_fake_llm_client_returns_structured_payload() -> None:
    client = LLMClient.from_fake_responses(
        [{"next_action": "FINISH", "analysis": "done", "final_report": "# Report"}]
    )

    result = client.generate_structured_response(
        provider="openai",
        model="fake",
        system_prompt="prompt",
        user_prompt="prompt",
    )

    assert result["next_action"] == "FINISH"


def test_fake_llm_client_returns_text_payload() -> None:
    client = LLMClient.from_fake_responses(["plain text response"])

    result = client.generate_text_response(
        provider="anthropic",
        model="fake",
        system_prompt="prompt",
        user_prompt="prompt",
    )

    assert result == "plain text response"


def test_openai_provider_requires_api_key(monkeypatch: pytest.MonkeyPatch) -> None:
    monkeypatch.delenv("OPENAI_API_KEY", raising=False)
    client = LLMClient()

    with pytest.raises(LLMProviderError):
        client.generate_text_response(
            provider="openai",
            model="gpt-4.1",
            system_prompt="prompt",
            user_prompt="prompt",
        )


def test_anthropic_provider_requires_api_key(monkeypatch: pytest.MonkeyPatch) -> None:
    monkeypatch.delenv("ANTHROPIC_API_KEY", raising=False)
    client = LLMClient()

    with pytest.raises(LLMProviderError):
        client.generate_structured_response(
            provider="anthropic",
            model="claude-sonnet-4-5",
            system_prompt="prompt",
            user_prompt="prompt",
        )


def test_openai_client_uses_explicit_timeout(monkeypatch: pytest.MonkeyPatch) -> None:
    monkeypatch.setenv("OPENAI_API_KEY", "test-key")
    client = LLMClient()

    openai_client = client._build_openai_client()

    assert openai_client.timeout is not None


def test_openai_provider_missing_api_key_raises_provider_error(
    monkeypatch: pytest.MonkeyPatch,
) -> None:
    monkeypatch.delenv("OPENAI_API_KEY", raising=False)
    client = LLMClient()

    with pytest.raises(LLMProviderError):
        client.generate_structured_response(
            provider="openai",
            model="gpt-4.1",
            system_prompt="prompt",
            user_prompt="prompt",
        )


def test_anthropic_client_uses_explicit_timeout(monkeypatch: pytest.MonkeyPatch) -> None:
    monkeypatch.setenv("ANTHROPIC_API_KEY", "test-key")
    client = LLMClient()

    anthropic_client = client._build_anthropic_client()

    assert anthropic_client.timeout is not None


def test_real_structured_response_retries_once_with_stronger_json_prompt() -> None:
    class RetryingClient(LLMClient):
        def __init__(self) -> None:
            super().__init__()
            self.user_prompts: list[str] = []
            self.calls = 0

        def _generate_openai_structured_response(
            self,
            model: str,
            system_prompt: str,
            user_prompt: str,
        ) -> dict[str, object]:
            self.calls += 1
            self.user_prompts.append(user_prompt)
            if self.calls == 1:
                raise LLMParseError("model did not return valid JSON")
            return {"next_action": "FINISH", "analysis": "done", "final_report": "# Report"}

    client = RetryingClient()

    result = client.generate_structured_response(
        provider="openai",
        model="gpt-4.1",
        system_prompt="prompt",
        user_prompt="original prompt",
    )

    assert result["next_action"] == "FINISH"
    assert client.calls == 2
    assert "Return only valid JSON" in client.user_prompts[1]
