"""Tests for the PM Agent router."""

from __future__ import annotations

import json

import pytest

from src.models import AgentConfig, MeetingState, PMDecision
from src.pm import build_pm_system_prompt, run_pm


class MockLLMClient:
    """Mock LLM client returning a canned JSON response."""

    def __init__(self, response: str) -> None:
        self.response = response
        self.call_count = 0
        self.last_system: str = ""

    def chat(self, system: str, messages: list[dict[str, str]]) -> str:
        self.call_count += 1
        self.last_system = system
        return self.response


SAMPLE_AGENTS: dict[str, AgentConfig] = {
    "architect": AgentConfig(name="architect", role="Software Architect", system_prompt="You are an architect."),
    "devops": AgentConfig(name="devops", role="DevOps Engineer", system_prompt="You are DevOps."),
}


class TestBuildPMSystemPrompt:
    """Tests for build_pm_system_prompt."""

    def test_includes_agent_names_and_roles(self) -> None:
        prompt = build_pm_system_prompt(SAMPLE_AGENTS)
        assert "architect" in prompt
        assert "Software Architect" in prompt
        assert "devops" in prompt
        assert "DevOps Engineer" in prompt

    def test_includes_json_schema_instruction(self) -> None:
        prompt = build_pm_system_prompt(SAMPLE_AGENTS)
        assert "JSON" in prompt
        assert "CALL_AGENT" in prompt
        assert "FINISH" in prompt

    def test_pm_prompt_encourages_depth(self) -> None:
        """PM system prompt includes keywords encouraging deep discussion."""
        prompt = build_pm_system_prompt(SAMPLE_AGENTS)
        assert "follow-up" in prompt.lower() or "follow up" in prompt.lower()
        assert "deeper" in prompt.lower() or "deep" in prompt.lower()


class TestRunPM:
    """Tests for run_pm function."""

    def test_run_pm_returns_call_agent_decision(self) -> None:
        decision_json = json.dumps({
            "analysis": "Need architect input.",
            "next_action": "CALL_AGENT",
            "target_agent": "architect",
            "prompt_for_agent": "What about scalability?",
        })
        mock_client = MockLLMClient(response=decision_json)
        state = MeetingState(topic="Database migration")
        decision = run_pm(state, SAMPLE_AGENTS, mock_client)
        assert isinstance(decision, PMDecision)
        assert decision.next_action == "CALL_AGENT"
        assert decision.target_agent == "architect"

    def test_pm_user_content_includes_context(self) -> None:
        """When MeetingState has context, PM receives it in user content."""
        decision_json = json.dumps({
            "analysis": "OK.",
            "next_action": "FINISH",
            "final_report": "# Done",
        })

        class CaptureMockClient:
            def __init__(self) -> None:
                self.last_messages: list[dict[str, str]] = []

            def chat(self, system: str, messages: list[dict[str, str]]) -> str:
                self.last_messages = messages
                return decision_json

        mock_client = CaptureMockClient()
        state = MeetingState(topic="Test", context="Important background info")
        run_pm(state, SAMPLE_AGENTS, mock_client)
        user_msg = mock_client.last_messages[0]["content"]
        assert "Important background info" in user_msg
        assert "Background Context" in user_msg

    def test_run_pm_returns_finish_decision(self) -> None:
        decision_json = json.dumps({
            "analysis": "All covered.",
            "next_action": "FINISH",
            "final_report": "# Report\n\nDone.",
        })
        mock_client = MockLLMClient(response=decision_json)
        state = MeetingState(topic="Test")
        decision = run_pm(state, SAMPLE_AGENTS, mock_client)
        assert decision.next_action == "FINISH"
        assert "Report" in decision.final_report

    def test_run_pm_retries_on_invalid_json(self) -> None:
        call_count = 0

        class RetryMockClient:
            def chat(self, system: str, messages: list[dict[str, str]]) -> str:
                nonlocal call_count
                call_count += 1
                if call_count < 3:
                    return "This is not JSON"
                return json.dumps({
                    "analysis": "OK",
                    "next_action": "FINISH",
                    "final_report": "# Done",
                })

        state = MeetingState(topic="Test")
        decision = run_pm(state, SAMPLE_AGENTS, RetryMockClient())
        assert decision.next_action == "FINISH"
        assert call_count == 3

    def test_run_pm_raises_after_max_retries(self) -> None:
        mock_client = MockLLMClient(response="not json at all")
        state = MeetingState(topic="Test")
        with pytest.raises(RuntimeError, match="Failed to get valid PM decision"):
            run_pm(state, SAMPLE_AGENTS, mock_client)

    def test_run_pm_validates_target_agent_exists(self) -> None:
        call_count = 0

        class AgentValidationMockClient:
            def chat(self, system: str, messages: list[dict[str, str]]) -> str:
                nonlocal call_count
                call_count += 1
                if call_count == 1:
                    return json.dumps({
                        "analysis": "Need input.",
                        "next_action": "CALL_AGENT",
                        "target_agent": "nonexistent_agent",
                        "prompt_for_agent": "Hello?",
                    })
                return json.dumps({
                    "analysis": "OK.",
                    "next_action": "CALL_AGENT",
                    "target_agent": "architect",
                    "prompt_for_agent": "What do you think?",
                })

        state = MeetingState(topic="Test")
        decision = run_pm(state, SAMPLE_AGENTS, AgentValidationMockClient())
        assert decision.target_agent == "architect"
        assert call_count == 2

    def test_pm_system_prompt_handles_human_feedback(self) -> None:
        """PM system prompt includes instructions for handling HUMAN entries."""
        prompt = build_pm_system_prompt(SAMPLE_AGENTS)
        assert "HUMAN" in prompt

    def test_run_pm_with_context_manager_uses_compressed_context(self) -> None:
        """When context_manager is provided, PM receives compressed whiteboard."""
        from src.context_manager import ContextManager

        decision_json = json.dumps({
            "analysis": "OK.",
            "next_action": "FINISH",
            "final_report": "# Done",
        })

        class CaptureMockClient:
            def __init__(self) -> None:
                self.last_messages: list[dict[str, str]] = []

            def chat(self, system: str, messages: list[dict[str, str]]) -> str:
                self.last_messages = messages
                return decision_json

        mock_client = CaptureMockClient()
        state = MeetingState(topic="Test", context="Some context", current_round=2)
        cm = ContextManager(budget_tokens=6000)
        decision = run_pm(state, SAMPLE_AGENTS, mock_client, context_manager=cm)
        assert isinstance(decision, PMDecision)
        # PM received content built by context_manager
        user_msg = mock_client.last_messages[0]["content"]
        assert "Test" in user_msg

    def test_run_pm_without_context_manager_backward_compatible(self) -> None:
        """When context_manager is None, run_pm behaves exactly as before."""
        decision_json = json.dumps({
            "analysis": "OK.",
            "next_action": "FINISH",
            "final_report": "# Done",
        })
        mock_client = MockLLMClient(response=decision_json)
        state = MeetingState(topic="Test", context="Background")
        decision = run_pm(state, SAMPLE_AGENTS, mock_client)
        assert decision.next_action == "FINISH"
