"""Meeting event loop - orchestrates the multi-agent discussion."""

from __future__ import annotations

import re
from datetime import datetime
from pathlib import Path
from typing import TYPE_CHECKING, Callable

from src.agent import format_scratchpad_summary, run_agent
from src.models import AgentConfig, MeetingResult, MeetingState, ScratchpadEntry
from src.pm import run_pm

if TYPE_CHECKING:
    from src.context_manager import ContextManager
    from src.llm_client import LLMClient

_COLORS = {
    "pm": "\033[1;34m",      # Bold blue
    "agent": "\033[1;32m",   # Bold green
    "system": "\033[1;33m",  # Bold yellow
    "human": "\033[1;35m",   # Bold magenta
    "reset": "\033[0m",
}


def _print_colored(label: str, message: str, color_key: str) -> None:
    """Print a colored message to the terminal."""
    color = _COLORS.get(color_key, "")
    reset = _COLORS["reset"]
    print(f"{color}[{label}]{reset} {message}")


def _slugify(text: str) -> str:
    """Convert text to a filename-safe slug."""
    slug = re.sub(r"[^\w\s-]", "", text.lower())
    slug = re.sub(r"[\s_]+", "-", slug)
    return slug[:50].strip("-")


def save_report(report: str, topic: str, output_dir: Path = Path("reports")) -> Path:
    """Save meeting report as a Markdown file.

    Args:
        report: The Markdown report content to save.
        topic: The meeting topic used to name the file.
        output_dir: Directory where the report file is written.

    Returns:
        The Path to the saved report file.
    """
    output_dir.mkdir(parents=True, exist_ok=True)
    timestamp = datetime.now().strftime("%Y%m%d-%H%M%S")
    slug = _slugify(topic)
    filename = f"{timestamp}-{slug}.md"
    path = output_dir / filename
    path.write_text(report, encoding="utf-8")
    return path


def save_transcript(result: MeetingResult, output_dir: Path = Path("reports")) -> Path:
    """Save meeting transcript as a Markdown file.

    Args:
        result: The complete MeetingResult with transcript data.
        output_dir: Directory where the transcript file is written.

    Returns:
        The Path to the saved transcript file.
    """
    output_dir.mkdir(parents=True, exist_ok=True)
    timestamp = datetime.now().strftime("%Y%m%d-%H%M%S")
    slug = _slugify(result.topic)
    filename = f"{timestamp}-{slug}.transcript.md"
    path = output_dir / filename

    lines: list[str] = [f"# 会议讨论记录：{result.topic}\n"]
    if result.context:
        lines.append(f"## 背景\n\n{result.context}\n")

    for entry in result.transcript:
        lines.append(f"### 💬 {entry.agent_name}")
        lines.append(f"{entry.content}\n")

    path.write_text("\n".join(lines), encoding="utf-8")
    return path


def _run_meeting_loop(
    state: MeetingState,
    agent_configs: dict[str, AgentConfig],
    client: LLMClient,
    context_manager: ContextManager | None = None,
) -> str | None:
    """Execute one meeting loop until PM finishes or max_rounds reached.

    Args:
        state: The current MeetingState (modified in place).
        agent_configs: Map of agent name to AgentConfig.
        client: LLM client for PM and agents.
        context_manager: Optional ContextManager for whiteboard compression.

    Returns:
        The final report string if PM finished, or None if max_rounds reached.
    """
    while state.current_round < state.max_rounds:
        state.current_round += 1
        _print_colored("SYSTEM", f"--- Round {state.current_round}/{state.max_rounds} ---", "system")

        if state.current_round == state.max_rounds:
            force_entry = ScratchpadEntry(
                agent_name="SYSTEM",
                content="MAX ROUNDS REACHED. You MUST set next_action to FINISH and provide a final_report.",
                timestamp=datetime.now(),
            )
            state.scratchpad.append(force_entry)

        decision = run_pm(state, agent_configs, client, context_manager=context_manager)
        _print_colored("PM", f"Analysis: {decision.analysis}", "pm")

        if decision.next_action == "FINISH":
            _print_colored("PM", "Meeting concluded.", "pm")
            print()
            return decision.final_report or "# Meeting Report\n\nNo report generated."

        target_name = decision.target_agent
        target_config = agent_configs[target_name]
        _print_colored("PM", f"Calling {target_name}: {decision.prompt_for_agent}", "pm")

        if context_manager is not None:
            scratchpad_summary = context_manager.build_agent_context(
                state=state,
                target_agent=target_name,
                pm_prompt=decision.prompt_for_agent,
                client=client,
            )
        else:
            scratchpad_summary = format_scratchpad_summary(state.scratchpad)
        agent_response = run_agent(
            config=target_config,
            prompt=decision.prompt_for_agent,
            scratchpad_summary=scratchpad_summary,
            client=client,
        )

        _print_colored(target_name.upper(), agent_response, "agent")
        print()

        entry = ScratchpadEntry(
            agent_name=target_name,
            content=agent_response,
            timestamp=datetime.now(),
        )
        state.scratchpad.append(entry)

    return None


def run_meeting(
    topic: str,
    agent_configs: dict[str, AgentConfig],
    client: LLMClient,
    max_rounds: int = 10,
    output_dir: Path = Path("reports"),
    context: str = "",
    context_manager: ContextManager | None = None,
) -> MeetingResult:
    """Execute the full meeting loop.

    Args:
        topic: The discussion topic for the meeting.
        agent_configs: Map of agent name to AgentConfig for all participants.
        client: LLM client used by both PM and agents.
        max_rounds: Maximum number of PM decision rounds before forcing a finish.
        output_dir: Directory to save the final report (unused here; caller may use).
        context: Background context for the discussion.
        context_manager: Optional ContextManager for whiteboard compression.

    Returns:
        A MeetingResult containing the final report, transcript, and metadata.
    """
    started_at = datetime.now()
    state = MeetingState(topic=topic, max_rounds=max_rounds, context=context)

    # Generate context summary if context is long and context_manager is provided
    if context_manager is not None and context:
        state.context_summary = context_manager.summarize_context(context, client)

    _print_colored("SYSTEM", f"Meeting started: {topic}", "system")
    _print_colored("SYSTEM", f"Participants: {', '.join(agent_configs.keys())}", "system")
    print()

    report = _run_meeting_loop(state, agent_configs, client, context_manager=context_manager)

    if report is None:
        report = "# Meeting Report\n\nMeeting ended after maximum rounds without PM conclusion."

    return MeetingResult(
        topic=topic,
        context=context,
        participants=list(agent_configs.keys()),
        transcript=list(state.scratchpad),
        final_report=report,
        started_at=started_at,
        finished_at=datetime.now(),
    )


def run_meeting_interactive(
    topic: str,
    context: str,
    agent_configs: dict[str, AgentConfig],
    client: LLMClient,
    max_rounds: int = 10,
    output_dir: Path = Path("reports"),
    input_fn: Callable[[str], str] = input,
    context_manager: ContextManager | None = None,
) -> MeetingResult:
    """Execute an interactive meeting with human-in-the-loop feedback.

    Runs the meeting loop, then prompts the user for feedback. If feedback
    is provided, it is appended to the scratchpad and the loop continues.

    Args:
        topic: The discussion topic for the meeting.
        context: Background context for the discussion.
        agent_configs: Map of agent name to AgentConfig for all participants.
        client: LLM client used by both PM and agents.
        max_rounds: Maximum number of PM decision rounds per loop iteration.
        output_dir: Directory to save the final report.
        input_fn: Callable for getting user input (injected for testing).
        context_manager: Optional ContextManager for whiteboard compression.

    Returns:
        A MeetingResult containing the final report, transcript, and metadata.
    """
    started_at = datetime.now()
    state = MeetingState(topic=topic, max_rounds=max_rounds, context=context)

    # Generate context summary if context is long and context_manager is provided
    if context_manager is not None and context:
        state.context_summary = context_manager.summarize_context(context, client)

    _print_colored("SYSTEM", f"Interactive meeting started: {topic}", "system")
    _print_colored("SYSTEM", f"Participants: {', '.join(agent_configs.keys())}", "system")
    print()

    report = "# Meeting Report\n\nNo report generated."

    while True:
        loop_report = _run_meeting_loop(state, agent_configs, client, context_manager=context_manager)
        if loop_report is not None:
            report = loop_report

        print(f"\n{'=' * 60}")
        print(report)
        print(f"{'=' * 60}\n")

        user_input = input_fn("\n💬 有补充内容吗？(直接回车结束): ")
        if not user_input.strip():
            break

        # Append human feedback to scratchpad
        human_entry = ScratchpadEntry(
            agent_name="HUMAN",
            content=user_input.strip(),
            timestamp=datetime.now(),
        )
        state.scratchpad.append(human_entry)
        _print_colored("HUMAN", user_input.strip(), "human")

        # Reset round counter for next loop iteration
        state.current_round = 0

    return MeetingResult(
        topic=topic,
        context=context,
        participants=list(agent_configs.keys()),
        transcript=list(state.scratchpad),
        final_report=report,
        started_at=started_at,
        finished_at=datetime.now(),
    )
