"""Tests for the context management system."""

from __future__ import annotations

from datetime import datetime

import pytest

from src.models import MeetingState, ScratchpadEntry


# ---------------------------------------------------------------------------
# Helpers
# ---------------------------------------------------------------------------

def _make_entry(agent: str, content: str) -> ScratchpadEntry:
    """Create a ScratchpadEntry with a fixed timestamp."""
    return ScratchpadEntry(
        agent_name=agent,
        content=content,
        timestamp=datetime(2026, 3, 30, 10, 0),
    )


class MockLLMClient:
    """Mock LLM client that returns a canned summary."""

    def __init__(self, response: str = "- Key point A\n- Key point B") -> None:
        self.response = response
        self.call_count = 0
        self.last_system: str = ""
        self.last_messages: list[dict[str, str]] = []

    def chat(self, system: str, messages: list[dict[str, str]]) -> str:
        self.call_count += 1
        self.last_system = system
        self.last_messages = messages
        return self.response


# ===========================================================================
# estimate_tokens
# ===========================================================================


class TestEstimateTokens:
    """Tests for the estimate_tokens utility."""

    def test_estimate_tokens_english(self) -> None:
        """English text: ~4 chars per token."""
        from src.context_manager import estimate_tokens

        text = "Hello world this is a test sentence with some words."
        tokens = estimate_tokens(text)
        # ~52 chars of English → ~13 tokens; allow reasonable range
        assert 8 <= tokens <= 20

    def test_estimate_tokens_chinese(self) -> None:
        """Chinese text: ~1.5 chars per token."""
        from src.context_manager import estimate_tokens

        text = "这是一个中文测试句子用于验证中文分词估算功能"
        tokens = estimate_tokens(text)
        # 21 Chinese chars → ~14 tokens; allow reasonable range
        assert 10 <= tokens <= 25

    def test_estimate_tokens_mixed(self) -> None:
        """Mixed Chinese and English text."""
        from src.context_manager import estimate_tokens

        text = "Hello世界，this is a test测试"
        tokens = estimate_tokens(text)
        # Should give a blended result
        assert tokens > 0

    def test_estimate_tokens_empty(self) -> None:
        """Empty string → 0 tokens."""
        from src.context_manager import estimate_tokens

        assert estimate_tokens("") == 0


# ===========================================================================
# ContextManager.compact_scratchpad — Level 0 (no compression)
# ===========================================================================


class TestCompactScratchpadLevel0:
    """Short scratchpad stays unchanged (Level 0)."""

    def test_short_scratchpad_unchanged(self) -> None:
        """Scratchpad under budget returns full text, no LLM call."""
        from src.context_manager import ContextManager

        entries = [
            _make_entry("architect", "Use microservices."),
            _make_entry("devops", "Sounds good."),
        ]
        cm = ContextManager(budget_tokens=6000)
        mock_client = MockLLMClient()

        result = cm.compact_scratchpad(entries, mock_client)

        # Full content preserved
        assert "architect" in result
        assert "Use microservices." in result
        assert "devops" in result
        assert "Sounds good." in result
        # No LLM call needed
        assert mock_client.call_count == 0


# ===========================================================================
# ContextManager.compact_scratchpad — Level 1 (truncation)
# ===========================================================================


class TestCompactScratchpadLevel1:
    """Medium scratchpad truncates early entries (Level 1)."""

    def test_truncates_early_entries(self) -> None:
        """Early entries are truncated; recent 2 entries preserved in full."""
        from src.context_manager import ContextManager

        long_content = "A" * 2000  # ~500 tokens of English
        entries = [
            _make_entry("agent_a", long_content),
            _make_entry("agent_b", long_content),
            _make_entry("agent_c", "Recent response C."),
            _make_entry("agent_d", "Recent response D."),
        ]
        # Budget small enough that full text exceeds it, but truncation fits
        cm = ContextManager(budget_tokens=500)
        mock_client = MockLLMClient()

        result = cm.compact_scratchpad(entries, mock_client)

        # Recent entries fully preserved
        assert "Recent response C." in result
        assert "Recent response D." in result
        # Early entries should be truncated (not the full 2000 chars)
        assert long_content not in result

    def test_recent_entries_preserved_in_full(self) -> None:
        """The last 2 entries are always kept verbatim."""
        from src.context_manager import ContextManager

        entries = [
            _make_entry("early1", "X" * 3000),
            _make_entry("early2", "Y" * 3000),
            _make_entry("recent1", "Important recent insight about architecture."),
            _make_entry("recent2", "Final clarification on deployment."),
        ]
        cm = ContextManager(budget_tokens=500)
        mock_client = MockLLMClient()

        result = cm.compact_scratchpad(entries, mock_client)

        assert "Important recent insight about architecture." in result
        assert "Final clarification on deployment." in result


# ===========================================================================
# ContextManager.compact_scratchpad — Level 2 (LLM summarization)
# ===========================================================================


class TestCompactScratchpadLevel2:
    """Very long scratchpad uses LLM summarization (Level 2)."""

    def test_very_long_scratchpad_calls_llm(self) -> None:
        """When truncation alone can't meet budget, LLM summarization is used."""
        from src.context_manager import ContextManager

        # Create many large entries so even truncation can't fit budget
        entries = [
            _make_entry(f"agent_{i}", "Z" * 4000)
            for i in range(10)
        ]
        # Add 2 recent entries
        entries.append(_make_entry("recent1", "Recent point 1."))
        entries.append(_make_entry("recent2", "Recent point 2."))

        cm = ContextManager(budget_tokens=100)
        mock_client = MockLLMClient(response="- Summary of early discussion")

        result = cm.compact_scratchpad(entries, mock_client)

        # LLM was called for summarization
        assert mock_client.call_count == 1
        # Summary appears in result
        assert "Summary of early discussion" in result
        # Recent entries preserved
        assert "Recent point 1." in result
        assert "Recent point 2." in result


# ===========================================================================
# ContextManager.build_pm_context
# ===========================================================================


class TestBuildPMContext:
    """Tests for building PM context."""

    def test_build_pm_context_includes_topic_and_scratchpad(self) -> None:
        """PM context includes topic and compressed scratchpad."""
        from src.context_manager import ContextManager

        state = MeetingState(topic="API Design", context="REST vs GraphQL")
        state.scratchpad = [
            _make_entry("architect", "Prefer GraphQL."),
        ]
        cm = ContextManager(budget_tokens=6000)
        mock_client = MockLLMClient()

        result = cm.build_pm_context(state, mock_client)

        assert "API Design" in result
        assert "Prefer GraphQL." in result

    def test_build_pm_context_uses_context_summary_when_available(self) -> None:
        """When context_summary is set, PM gets summary instead of full context."""
        from src.context_manager import ContextManager

        state = MeetingState(
            topic="Big Topic",
            context="A" * 20000,  # Very long original context
            context_summary="Short summary of background.",
            current_round=2,  # Not first round → uses summary
        )
        cm = ContextManager(budget_tokens=6000)
        mock_client = MockLLMClient()

        result = cm.build_pm_context(state, mock_client)

        assert "Short summary of background." in result
        # Full context should NOT be included
        assert ("A" * 20000) not in result

    def test_build_pm_context_first_round_includes_full_context(self) -> None:
        """Round 1: PM gets full context even if summary exists."""
        from src.context_manager import ContextManager

        full_context = "Detailed background about the project requirements."
        state = MeetingState(
            topic="Topic",
            context=full_context,
            context_summary="Short summary.",
            current_round=0,
        )
        cm = ContextManager(budget_tokens=6000)
        mock_client = MockLLMClient()

        result = cm.build_pm_context(state, mock_client)

        assert full_context in result


# ===========================================================================
# ContextManager.build_agent_context
# ===========================================================================


class TestBuildAgentContext:
    """Tests for building Agent-specific context."""

    def test_build_agent_context_includes_prompt_and_summary(self) -> None:
        """Agent context includes PM's prompt and compressed scratchpad."""
        from src.context_manager import ContextManager

        state = MeetingState(topic="Testing")
        state.scratchpad = [
            _make_entry("pm", "Let's discuss testing."),
        ]
        cm = ContextManager(budget_tokens=6000)
        mock_client = MockLLMClient()

        result = cm.build_agent_context(
            state=state,
            target_agent="architect",
            pm_prompt="What testing strategy do you recommend?",
            client=mock_client,
        )

        assert "What testing strategy do you recommend?" in result

    def test_build_agent_context_uses_context_summary(self) -> None:
        """Agent always gets context_summary, not full context."""
        from src.context_manager import ContextManager

        state = MeetingState(
            topic="Topic",
            context="B" * 20000,
            context_summary="Brief background.",
        )
        cm = ContextManager(budget_tokens=6000)
        mock_client = MockLLMClient()

        result = cm.build_agent_context(
            state=state,
            target_agent="devops",
            pm_prompt="Deploy question?",
            client=mock_client,
        )

        assert "Brief background." in result
        assert ("B" * 20000) not in result


# ===========================================================================
# Context summary generation
# ===========================================================================


class TestContextSummaryGeneration:
    """Tests for summarize_context (long background → summary)."""

    def test_short_context_not_summarized(self) -> None:
        """Short context returns as-is, no LLM call."""
        from src.context_manager import ContextManager

        cm = ContextManager(budget_tokens=6000)
        mock_client = MockLLMClient()

        result = cm.summarize_context("Short background.", mock_client)

        assert result == "Short background."
        assert mock_client.call_count == 0

    def test_long_context_summarized_via_llm(self) -> None:
        """Context exceeding threshold is summarized using LLM."""
        from src.context_manager import ContextManager

        long_context = "C" * 15000  # Way over threshold
        cm = ContextManager(budget_tokens=6000)
        mock_client = MockLLMClient(response="Summarized context.")

        result = cm.summarize_context(long_context, mock_client)

        assert result == "Summarized context."
        assert mock_client.call_count == 1


# ===========================================================================
# Edge cases
# ===========================================================================


class TestEdgeCases:
    """Edge-case tests for ContextManager."""

    def test_compact_empty_entries(self) -> None:
        """Empty entries list returns sentinel string."""
        from src.context_manager import ContextManager

        cm = ContextManager(budget_tokens=6000)
        mock_client = MockLLMClient()

        result = cm.compact_scratchpad([], mock_client)
        assert result == "No discussion yet."
        assert mock_client.call_count == 0

    def test_single_entry_exceeding_budget(self) -> None:
        """One entry exceeding budget is truncated or LLM-summarized."""
        from src.context_manager import ContextManager

        entries = [_make_entry("agent", "E" * 40000)]
        cm = ContextManager(budget_tokens=100)
        mock_client = MockLLMClient(response="- Summarized single entry")

        result = cm.compact_scratchpad(entries, mock_client)

        # Full 40000-char content should NOT appear
        assert ("E" * 40000) not in result
        # Either truncated or summarized
        assert "agent" in result or "Summarized" in result

    def test_two_entries_exceeding_budget(self) -> None:
        """Two entries exceeding budget compresses correctly."""
        from src.context_manager import ContextManager

        entries = [
            _make_entry("a", "F" * 10000),
            _make_entry("b", "G" * 10000),
        ]
        cm = ContextManager(budget_tokens=100)
        mock_client = MockLLMClient(response="- Summary of discussion")

        result = cm.compact_scratchpad(entries, mock_client)

        # Should have called LLM and recent entry preserved
        assert "G" * 10000 in result or "Summary of discussion" in result

    def test_summarize_empty_context(self) -> None:
        """Empty context returns empty string."""
        from src.context_manager import ContextManager

        cm = ContextManager(budget_tokens=6000)
        mock_client = MockLLMClient()

        assert cm.summarize_context("", mock_client) == ""
        assert mock_client.call_count == 0

    def test_build_pm_context_no_context(self) -> None:
        """PM context works when state has no background context."""
        from src.context_manager import ContextManager

        state = MeetingState(topic="No Context Meeting")
        cm = ContextManager(budget_tokens=6000)
        mock_client = MockLLMClient()

        result = cm.build_pm_context(state, mock_client)

        assert "No Context Meeting" in result
        assert "Background Context" not in result

    def test_build_agent_context_no_context_no_summary(self) -> None:
        """Agent context works when no context or summary exists."""
        from src.context_manager import ContextManager

        state = MeetingState(topic="Bare Meeting")
        cm = ContextManager(budget_tokens=6000)
        mock_client = MockLLMClient()

        result = cm.build_agent_context(
            state=state,
            target_agent="test",
            pm_prompt="Question?",
            client=mock_client,
        )

        assert "Background" not in result
        assert "Question?" in result
