"""Meeting runtime and PM-led orchestration loop."""

from __future__ import annotations

from dataclasses import dataclass
from pathlib import Path

from pydantic import ValidationError

from core.context_loader import build_context_bundle
from core.llm import LLMClient, LLMParseError
from core.models import (
    AgentConfig,
    AgentTurnResult,
    ContextBundle,
    DocumentCitation,
    MeetingInput,
    MeetingState,
    PMDecision,
)


@dataclass
class MeetingRunResult:
    """Result returned by the meeting runtime."""

    final_report: str
    state: MeetingState


def run_meeting(
    meeting_input: MeetingInput,
    agents: dict[str, AgentConfig],
    provider: str,
    model: str,
    max_loops: int,
    llm_client: LLMClient | None,
    meeting_file_path: Path | None = None,
) -> MeetingRunResult:
    """Run a single PM-led meeting until finish or forced stop."""

    client = llm_client or LLMClient()
    context_bundle = build_context_bundle(
        meeting_input,
        meeting_file_path=meeting_file_path or _virtual_meeting_file(),
    )
    state = MeetingState(
        topic=meeting_input.topic,
        participants=list(agents),
        loop_count=0,
        max_loops=max_loops,
        status="running",
        summary=meeting_input.topic,
        meeting_input=meeting_input,
        context_bundle=context_bundle,
    )
    last_error: str | None = None

    while True:
        if state.loop_count >= state.max_loops:
            return _force_finish(state, agents, provider, model, client, last_error)

        pm_decision = _get_pm_decision(
            client=client,
            provider=provider,
            model=model,
            state=state,
            agents=agents,
            mode="normal",
            last_error=last_error,
        )
        if pm_decision is None:
            return _fallback_finish(state, forced=True)

        if pm_decision.next_action == "FINISH":
            final_report = _normalize_or_fallback_report(state, pm_decision.final_report or "# Report")
            state.status = "finished"
            return MeetingRunResult(final_report=final_report, state=state)

        handled = _handle_call_agent(
            pm_decision=pm_decision,
            client=client,
            provider=provider,
            model=model,
            state=state,
            agents=agents,
        )
        if handled == "routing_error":
            second_decision = _get_pm_decision(
                client=client,
                provider=provider,
                model=model,
                state=state,
                agents=agents,
                mode="normal",
                last_error="unknown target agent",
            )
            if second_decision is None or second_decision.target_agent not in agents:
                return _fallback_finish(state, forced=True)
            handled = _handle_call_agent(
                pm_decision=second_decision,
                client=client,
                provider=provider,
                model=model,
                state=state,
                agents=agents,
            )

        if handled == "agent_error":
            last_error = "agent invocation failed"
            continue
        last_error = None


def _get_pm_decision(
    client: LLMClient,
    provider: str,
    model: str,
    state: MeetingState,
    agents: dict[str, AgentConfig],
    mode: str,
    last_error: str | None,
) -> PMDecision | None:
    for _ in range(2):
        try:
            payload = client.generate_structured_response(
                provider=provider,
                model=model,
                system_prompt=agents["pm"].system_prompt,
                user_prompt=_build_pm_prompt(state, agents, mode, last_error),
            )
            return PMDecision.model_validate(payload)
        except (ValidationError, LLMParseError):
            continue
    return None


def _handle_call_agent(
    pm_decision: PMDecision,
    client: LLMClient,
    provider: str,
    model: str,
    state: MeetingState,
    agents: dict[str, AgentConfig],
) -> str:
    target_agent = pm_decision.target_agent
    if not target_agent or target_agent not in agents:
        state.event_log.append({"type": "pm_routing_error", "target_agent": target_agent or ""})
        return "routing_error"

    for _ in range(2):
        try:
            payload = client.generate_structured_response(
                provider=provider,
                model=model,
                system_prompt=agents[target_agent].system_prompt,
                user_prompt=_build_agent_prompt(state, pm_decision.prompt_for_agent or ""),
            )
            turn_result = AgentTurnResult.model_validate(payload)
            _validate_citations(turn_result.citations, state.context_bundle)
            _merge_agent_turn(state, turn_result)
            state.loop_count += 1
            return "ok"
        except (ValidationError, LLMParseError):
            continue

    state.loop_count += 1
    state.event_log.append({"type": "agent_error", "target_agent": target_agent})
    return "agent_error"


def _merge_agent_turn(state: MeetingState, turn_result: AgentTurnResult) -> None:
    state.latest_agent_outputs[turn_result.agent_name] = turn_result
    state.event_log.append({"type": "agent_turn", "agent_name": turn_result.agent_name})

    for item in turn_result.key_points:
        if item not in state.key_points and len(state.key_points) < 20:
            state.key_points.append(item)

    for item in turn_result.recommendations:
        if item not in state.decisions and len(state.decisions) < 20:
            state.decisions.append(item)

    for item in turn_result.risks:
        if item not in state.open_questions and len(state.open_questions) < 20:
            state.open_questions.append(item)

    latest_summary = turn_result.response.splitlines()[0][:200]
    state.summary = (
        f"{state.topic}\n"
        f"Latest: {turn_result.agent_name} - {latest_summary}\n"
        f"Counts: key_points={len(state.key_points)}, open_questions={len(state.open_questions)}, decisions={len(state.decisions)}"
    )[:1200]


def _force_finish(
    state: MeetingState,
    agents: dict[str, AgentConfig],
    provider: str,
    model: str,
    client: LLMClient,
    last_error: str | None,
) -> MeetingRunResult:
    state.status = "forced_stop"
    pm_decision = _get_pm_decision(
        client=client,
        provider=provider,
        model=model,
        state=state,
        agents=agents,
        mode="forced_finish",
        last_error=last_error,
    )
    if pm_decision is not None and pm_decision.next_action == "FINISH" and pm_decision.final_report:
        state.status = "finished"
        return MeetingRunResult(final_report=_ensure_process_note(pm_decision.final_report), state=state)
    return _fallback_finish(state, forced=True)


def _fallback_finish(state: MeetingState, forced: bool) -> MeetingRunResult:
    process_note = "Meeting stopped after reaching max loop limit before full convergence."
    report = "\n\n".join(
        [
            "# Meeting Report",
            f"## Topic\n{state.topic}",
            f"## Current Summary\n{state.summary}",
            "## Key Findings\n" + _render_list(state.key_points),
            "## Open Questions\n" + _render_list(state.open_questions),
            "## Candidate Decisions\n" + _render_list(state.decisions),
            "## Evidence Reviewed\n" + _render_evidence(state.context_bundle),
            f"## Process Note\n{process_note if forced else 'Meeting ended with fallback report.'}",
        ]
    )
    state.status = "finished"
    return MeetingRunResult(final_report=report, state=state)


def _render_list(items: list[str]) -> str:
    if not items:
        return "- None"
    return "\n".join(f"- {item}" for item in items)


def _render_evidence(context_bundle: ContextBundle) -> str:
    if not context_bundle.documents:
        return "- None"
    return "\n".join(f"- {document.source_path}" for document in context_bundle.documents)


def _validate_citations(citations: list[DocumentCitation], context_bundle: ContextBundle) -> None:
    documents_by_id = {document.document_id: document for document in context_bundle.documents}
    for citation in citations:
        document = documents_by_id.get(citation.document_id)
        if document is None:
            raise LLMParseError(f"unknown citation document_id: {citation.document_id}")
        if citation.source_path != document.source_path:
            raise LLMParseError(
                f"citation source_path does not match document_id: {citation.document_id}"
            )
        if citation.quote and citation.quote not in document.excerpt:
            raise LLMParseError(
                f"citation quote does not match excerpt for document_id: {citation.document_id}"
            )


def _ensure_process_note(report: str) -> str:
    if "## Process Note" in report:
        return report
    return report.rstrip() + "\n\n## Process Note\nMeeting stopped after reaching max loop limit before full convergence."


def _normalize_or_fallback_report(state: MeetingState, report: str) -> str:
    required_sections = ["## Topic", "## Evidence Reviewed"]
    if all(section in report for section in required_sections):
        return report
    return _fallback_finish(state, forced=False).final_report


def _build_pm_prompt(
    state: MeetingState,
    agents: dict[str, AgentConfig],
    mode: str,
    last_error: str | None,
) -> str:
    decision_packet = state.meeting_input.decision_packet
    remaining_loops = max(state.max_loops - state.loop_count, 0)
    return (
        f"topic={state.topic}\n"
        f"decision_to_make={decision_packet.decision_to_make}\n"
        f"options={' | '.join(decision_packet.options)}\n"
        f"evaluation_dimensions={' | '.join(decision_packet.evaluation_dimensions)}\n"
        f"required_questions={' | '.join(decision_packet.required_questions)}\n"
        f"risk_tolerance={decision_packet.risk_tolerance}\n"
        f"time_horizon={decision_packet.time_horizon}\n"
        f"summary={state.summary}\n"
        f"participants={', '.join(f'{name}:{agent.role}' for name, agent in agents.items())}\n"
        f"key_points={' | '.join(state.key_points)}\n"
        f"open_questions={' | '.join(state.open_questions)}\n"
        f"decisions={' | '.join(state.decisions)}\n"
        f"mode={mode}\n"
        f"remaining_loops={remaining_loops}\n"
        f"last_error={last_error or ''}"
    )


def _build_agent_prompt(state: MeetingState, prompt_for_agent: str) -> str:
    brief = state.meeting_input.brief
    documents = " | ".join(
        f"{document.source_path}: {document.excerpt}" for document in state.context_bundle.documents
    )
    return (
        f"prompt={prompt_for_agent}\n"
        f"meeting_summary={state.summary}\n"
        f"decision_to_make={state.meeting_input.decision_packet.decision_to_make}\n"
        f"project_background={brief.project_background}\n"
        f"current_state={brief.current_state}\n"
        f"constraints={' | '.join(brief.constraints)}\n"
        f"success_criteria={' | '.join(brief.success_criteria)}\n"
        f"non_goals={' | '.join(brief.non_goals)}\n"
        f"context_summary={state.context_bundle.summary}\n"
        f"documents={documents}"
    )


def _virtual_meeting_file() -> Path:
    return Path("meeting.yaml")
