"""TDD tests for the deep_dive module (all written BEFORE implementation)."""
from __future__ import annotations

from datetime import datetime
from pathlib import Path
from unittest.mock import MagicMock, patch

import pytest
from pytest_httpx import HTTPXMock

from ai_usecases_explorer.deep_dive.models import (
    ArticleContent,
    BiasIndicator,
    CitedArgument,
    CredibilityBreakdown,
    DeepDiveReport,
    FactCheck,
    LogicalQuality,
    MultiPerspectiveEvaluation,
    ResearchResults,
    SearchQueries,
    StructuralAnalysis,
    WebSource,
)

# ---------------------------------------------------------------------------
# Fixtures / helpers
# ---------------------------------------------------------------------------


def _article() -> ArticleContent:
    return ArticleContent(
        title="The Rise of AI Agents",
        url="https://example.com/ai-agents",
        raw_text="AI agents are transforming software development. They can write code, "
        "browse the web, and execute tasks autonomously. Critics argue this creates risks.",
    )


def _search_queries() -> SearchQueries:
    return SearchQueries(
        supporting=["AI agents benefits productivity"],
        opposing=["AI agents risks criticism"],
        expert=["AI agents research paper 2025"],
        fact_checks=["AI agents market adoption statistics"],
    )


def _structural_analysis() -> StructuralAnalysis:
    return StructuralAnalysis(
        main_thesis="AI agents will replace traditional software engineering workflows.",
        sub_claims=[
            "Agents can autonomously execute multi-step tasks",
            "The technology is production-ready in 2025",
        ],
        logical_structure="因果论证",
        key_entities=["Anthropic", "OpenAI", "GitHub Copilot"],
        verifiable_facts=[
            "GitHub Copilot has 1 million users",
            "Claude 3.5 Sonnet released in 2024",
        ],
        search_queries=_search_queries(),
    )


def _web_source(url: str = "https://example.com/source") -> WebSource:
    return WebSource(
        title="Example Source",
        url=url,
        snippet="This source supports the claim about AI agents.",
    )


def _fact_check() -> FactCheck:
    return FactCheck(
        claim="GitHub Copilot has 1 million users",
        status="verified",
        evidence="GitHub official blog confirms 1M+ users as of 2024",
        source_url="https://github.blog/copilot-1m-users",
    )


def _research_results() -> ResearchResults:
    return ResearchResults(
        supporting=[_web_source("https://example.com/pro")],
        opposing=[_web_source("https://example.com/con")],
        expert=[_web_source("https://example.com/expert")],
        fact_checks=[_fact_check()],
    )


def _evaluation() -> MultiPerspectiveEvaluation:
    return MultiPerspectiveEvaluation(
        pro_arguments=["Increases developer productivity by 40%", "Reduces repetitive tasks"],
        con_arguments=["May introduce security vulnerabilities", "Reduces junior developer learning"],
        neutral_context=["Technology is still evolving", "Adoption varies by team"],
        bias_assessment="Author works for an AI company, potential conflict of interest",
        credibility_score=0.72,
        overall_verdict="Partially credible with some unsupported claims",
        further_exploration=["Search for peer-reviewed studies on AI coding tools"],
    )


def _report(tmp_path: Path) -> DeepDiveReport:
    return DeepDiveReport(
        article=_article(),
        structure=_structural_analysis(),
        research=_research_results(),
        evaluation=_evaluation(),
        generated_at=datetime(2026, 2, 28, 14, 30, 0),
        report_path=tmp_path / "test-report.md",
    )


# ---------------------------------------------------------------------------
# Pydantic model validation tests
# ---------------------------------------------------------------------------


class TestArticleContent:
    def test_valid_model_with_url(self) -> None:
        article = ArticleContent(
            title="Test",
            url="https://example.com",
            raw_text="Some text content",
        )
        assert article.title == "Test"
        assert article.url == "https://example.com"
        assert article.raw_text == "Some text content"

    def test_valid_model_without_url(self) -> None:
        article = ArticleContent(title="Test", url=None, raw_text="Direct text input")
        assert article.url is None

    def test_requires_title(self) -> None:
        with pytest.raises(Exception):
            ArticleContent(url="https://example.com", raw_text="text")  # type: ignore[call-arg]

    def test_requires_raw_text(self) -> None:
        with pytest.raises(Exception):
            ArticleContent(title="Test", url=None)  # type: ignore[call-arg]


class TestSearchQueries:
    def test_valid_model(self) -> None:
        q = _search_queries()
        assert len(q.supporting) == 1
        assert len(q.opposing) == 1
        assert len(q.expert) == 1
        assert len(q.fact_checks) == 1

    def test_empty_lists_allowed(self) -> None:
        q = SearchQueries(supporting=[], opposing=[], expert=[], fact_checks=[])
        assert q.supporting == []


class TestStructuralAnalysis:
    def test_valid_model(self) -> None:
        sa = _structural_analysis()
        assert sa.main_thesis.startswith("AI agents")
        assert len(sa.sub_claims) == 2
        assert sa.logical_structure == "因果论证"
        assert len(sa.key_entities) == 3
        assert len(sa.verifiable_facts) == 2
        assert isinstance(sa.search_queries, SearchQueries)

    def test_requires_all_fields(self) -> None:
        with pytest.raises(Exception):
            StructuralAnalysis(main_thesis="only this")  # type: ignore[call-arg]


class TestWebSource:
    def test_valid_model(self) -> None:
        ws = _web_source()
        assert ws.title == "Example Source"
        assert ws.url == "https://example.com/source"
        assert "AI agents" in ws.snippet


class TestFactCheck:
    def test_valid_model(self) -> None:
        fc = _fact_check()
        assert fc.status == "verified"
        assert fc.source_url is not None

    def test_status_can_be_any_string(self) -> None:
        fc = FactCheck(
            claim="Some claim",
            status="disputed",
            evidence="Evidence here",
            source_url=None,
        )
        assert fc.status == "disputed"
        assert fc.source_url is None


class TestResearchResults:
    def test_valid_model(self) -> None:
        rr = _research_results()
        assert len(rr.supporting) == 1
        assert len(rr.opposing) == 1
        assert len(rr.expert) == 1
        assert len(rr.fact_checks) == 1


class TestMultiPerspectiveEvaluation:
    def test_valid_model(self) -> None:
        ev = _evaluation()
        assert ev.credibility_score == 0.72
        assert len(ev.pro_arguments) == 2
        assert len(ev.con_arguments) == 2

    def test_credibility_score_is_float(self) -> None:
        ev = _evaluation()
        assert isinstance(ev.credibility_score, float)


class TestDeepDiveReport:
    def test_valid_model(self, tmp_path: Path) -> None:
        r = _report(tmp_path)
        assert r.article.title == "The Rise of AI Agents"
        assert isinstance(r.generated_at, datetime)
        assert r.report_path is not None

    def test_report_path_optional(self) -> None:
        r = DeepDiveReport(
            article=_article(),
            structure=_structural_analysis(),
            research=_research_results(),
            evaluation=_evaluation(),
            generated_at=datetime(2026, 2, 28),
        )
        assert r.report_path is None


# ---------------------------------------------------------------------------
# ContentFetcher tests
# ---------------------------------------------------------------------------


class TestContentFetcher:
    def test_from_text_returns_article(self) -> None:
        from ai_usecases_explorer.deep_dive.fetcher import ContentFetcher

        fetcher = ContentFetcher()
        article = fetcher.from_text("Some article text about AI", title="My Article")
        assert article.title == "My Article"
        assert article.raw_text == "Some article text about AI"
        assert article.url is None

    def test_from_text_default_title(self) -> None:
        from ai_usecases_explorer.deep_dive.fetcher import ContentFetcher

        fetcher = ContentFetcher()
        article = fetcher.from_text("Some content")
        assert article.title != ""

    def test_fetch_url_returns_article(self, httpx_mock: HTTPXMock) -> None:
        from ai_usecases_explorer.deep_dive.fetcher import ContentFetcher

        html = "<html><head><title>Test Page</title></head><body><p>Main content here.</p></body></html>"
        httpx_mock.add_response(text=html, headers={"content-type": "text/html"})

        fetcher = ContentFetcher()
        article = fetcher.fetch_url("https://example.com/article")
        assert article.url == "https://example.com/article"
        assert "Main content here" in article.raw_text

    def test_fetch_url_truncates_at_8000_chars(self, httpx_mock: HTTPXMock) -> None:
        from ai_usecases_explorer.deep_dive.fetcher import ContentFetcher

        long_text = "word " * 10000  # ~50000 chars
        html = f"<html><body><p>{long_text}</p></body></html>"
        httpx_mock.add_response(text=html, headers={"content-type": "text/html"})

        fetcher = ContentFetcher()
        article = fetcher.fetch_url("https://example.com/long")
        assert len(article.raw_text) <= 8000

    def test_fetch_url_sets_title_from_html(self, httpx_mock: HTTPXMock) -> None:
        from ai_usecases_explorer.deep_dive.fetcher import ContentFetcher

        html = "<html><head><title>Article Title</title></head><body><p>Content.</p></body></html>"
        httpx_mock.add_response(text=html, headers={"content-type": "text/html"})

        fetcher = ContentFetcher()
        article = fetcher.fetch_url("https://example.com/article")
        assert article.title == "Article Title"

    def test_fetch_url_raises_on_http_error(self, httpx_mock: HTTPXMock) -> None:
        from ai_usecases_explorer.deep_dive.fetcher import ContentFetcher

        httpx_mock.add_response(status_code=404)

        fetcher = ContentFetcher()
        with pytest.raises(Exception):
            fetcher.fetch_url("https://example.com/notfound")


# ---------------------------------------------------------------------------
# StructuralAnalyzer tests
# ---------------------------------------------------------------------------


class TestStructuralAnalyzer:
    def _fake_llm_response(self) -> dict:
        return {
            "main_thesis": "AI will transform software development",
            "sub_claims": ["Claim 1", "Claim 2"],
            "logical_structure": "因果论证",
            "key_entities": ["Anthropic", "GitHub"],
            "verifiable_facts": ["GitHub has 100M users"],
            "search_queries": {
                "supporting": ["AI benefits productivity"],
                "opposing": ["AI risks criticism"],
                "expert": ["AI research paper"],
                "fact_checks": ["GitHub users count"],
            },
        }

    def test_run_returns_structural_analysis(self) -> None:
        from ai_usecases_explorer.deep_dive.analyzer import StructuralAnalyzer

        with patch(
            "ai_usecases_explorer.deep_dive.analyzer.StructuralAnalyzer._call_llm",
            return_value=self._fake_llm_response(),
        ):
            analyzer = StructuralAnalyzer(client=MagicMock(), model="claude-sonnet-4-6")
            result = analyzer.run(_article())

        assert isinstance(result, StructuralAnalysis)
        assert result.main_thesis == "AI will transform software development"

    def test_run_parses_sub_claims(self) -> None:
        from ai_usecases_explorer.deep_dive.analyzer import StructuralAnalyzer

        with patch(
            "ai_usecases_explorer.deep_dive.analyzer.StructuralAnalyzer._call_llm",
            return_value=self._fake_llm_response(),
        ):
            analyzer = StructuralAnalyzer(client=MagicMock(), model="claude-sonnet-4-6")
            result = analyzer.run(_article())

        assert len(result.sub_claims) == 2
        assert "Claim 1" in result.sub_claims

    def test_run_parses_search_queries(self) -> None:
        from ai_usecases_explorer.deep_dive.analyzer import StructuralAnalyzer

        with patch(
            "ai_usecases_explorer.deep_dive.analyzer.StructuralAnalyzer._call_llm",
            return_value=self._fake_llm_response(),
        ):
            analyzer = StructuralAnalyzer(client=MagicMock(), model="claude-sonnet-4-6")
            result = analyzer.run(_article())

        assert isinstance(result.search_queries, SearchQueries)
        assert len(result.search_queries.supporting) == 1

    def test_run_calls_llm_with_article_content(self) -> None:
        from ai_usecases_explorer.deep_dive.analyzer import StructuralAnalyzer

        with patch(
            "ai_usecases_explorer.deep_dive.analyzer.StructuralAnalyzer._call_llm",
            return_value=self._fake_llm_response(),
        ) as mock_llm:
            analyzer = StructuralAnalyzer(client=MagicMock(), model="claude-sonnet-4-6")
            analyzer.run(_article())

        mock_llm.assert_called_once()
        call_arg = mock_llm.call_args[0][0]
        assert isinstance(call_arg, ArticleContent)


# ---------------------------------------------------------------------------
# WebResearcher tests
# ---------------------------------------------------------------------------


def _make_exa_result(
    url: str = "https://example.com/result",
    title: str = "Research Finding",
    text: str = "This is the content of the search result.",
) -> MagicMock:
    result = MagicMock()
    result.url = url
    result.title = title
    result.text = text
    return result


def _make_exa_response(results: list[MagicMock]) -> MagicMock:
    response = MagicMock()
    response.results = results
    return response


class TestWebResearcher:
    def test_run_returns_research_results(self) -> None:
        from ai_usecases_explorer.deep_dive.researcher import WebResearcher

        mock_exa = MagicMock()
        mock_exa.search_and_contents.return_value = _make_exa_response(
            [_make_exa_result()]
        )
        researcher = WebResearcher(exa_client=mock_exa)
        result = researcher.run(_search_queries())

        assert isinstance(result, ResearchResults)

    def test_run_returns_supporting_sources(self) -> None:
        from ai_usecases_explorer.deep_dive.researcher import WebResearcher

        mock_exa = MagicMock()
        mock_exa.search_and_contents.return_value = _make_exa_response(
            [_make_exa_result(url="https://pro.example.com")]
        )
        researcher = WebResearcher(exa_client=mock_exa)
        result = researcher.run(_search_queries())

        assert isinstance(result.supporting, list)
        assert isinstance(result.opposing, list)
        assert isinstance(result.expert, list)
        assert isinstance(result.fact_checks, list)

    def test_run_calls_exa_for_each_query_type(self) -> None:
        from ai_usecases_explorer.deep_dive.researcher import WebResearcher

        mock_exa = MagicMock()
        mock_exa.search_and_contents.return_value = _make_exa_response([])
        researcher = WebResearcher(exa_client=mock_exa)
        researcher.run(_search_queries())

        # Should call Exa multiple times (one per query category + individual queries)
        assert mock_exa.search_and_contents.call_count >= 4

    def test_run_deduplicates_results_by_url(self) -> None:
        from ai_usecases_explorer.deep_dive.researcher import WebResearcher

        same_result = _make_exa_result(url="https://example.com/duplicate")
        mock_exa = MagicMock()
        mock_exa.search_and_contents.return_value = _make_exa_response(
            [same_result, same_result]
        )
        researcher = WebResearcher(exa_client=mock_exa)
        result = researcher.run(_search_queries())

        all_urls = [s.url for s in result.supporting + result.opposing + result.expert]
        assert len(all_urls) == len(set(all_urls))

    def test_run_gracefully_handles_exa_error(self) -> None:
        from ai_usecases_explorer.deep_dive.researcher import WebResearcher

        mock_exa = MagicMock()
        mock_exa.search_and_contents.side_effect = RuntimeError("API error")
        researcher = WebResearcher(exa_client=mock_exa)
        result = researcher.run(_search_queries())

        # Should return empty results, not raise
        assert isinstance(result, ResearchResults)
        assert result.supporting == []

    def test_web_source_has_required_fields(self) -> None:
        from ai_usecases_explorer.deep_dive.researcher import WebResearcher

        mock_exa = MagicMock()
        mock_exa.search_and_contents.return_value = _make_exa_response(
            [_make_exa_result(url="https://example.com/r1", title="Title 1", text="Snippet 1")]
        )
        researcher = WebResearcher(exa_client=mock_exa)
        result = researcher.run(_search_queries())

        if result.supporting:
            source = result.supporting[0]
            assert source.title != ""
            assert source.url != ""
            assert source.snippet != ""


# ---------------------------------------------------------------------------
# MultiPerspectiveEvaluator tests
# ---------------------------------------------------------------------------


class TestMultiPerspectiveEvaluator:
    def _fake_llm_response(self) -> dict:
        return {
            "pro_arguments": ["Argument for 1", "Argument for 2"],
            "con_arguments": ["Argument against 1"],
            "neutral_context": ["Background context 1"],
            "bias_assessment": "Slight pro-AI bias detected",
            "credibility_score": 0.65,
            "overall_verdict": "Moderately credible with caveats",
            "further_exploration": ["Look for peer-reviewed studies"],
        }

    def test_run_returns_evaluation(self) -> None:
        from ai_usecases_explorer.deep_dive.evaluator import MultiPerspectiveEvaluator

        with patch(
            "ai_usecases_explorer.deep_dive.evaluator.MultiPerspectiveEvaluator._call_llm",
            return_value=self._fake_llm_response(),
        ):
            evaluator = MultiPerspectiveEvaluator(client=MagicMock(), model="claude-sonnet-4-6")
            result = evaluator.run(_structural_analysis(), _research_results())

        assert isinstance(result, MultiPerspectiveEvaluation)
        assert result.credibility_score == 0.65

    def test_run_parses_pro_arguments(self) -> None:
        from ai_usecases_explorer.deep_dive.evaluator import MultiPerspectiveEvaluator

        with patch(
            "ai_usecases_explorer.deep_dive.evaluator.MultiPerspectiveEvaluator._call_llm",
            return_value=self._fake_llm_response(),
        ):
            evaluator = MultiPerspectiveEvaluator(client=MagicMock(), model="claude-sonnet-4-6")
            result = evaluator.run(_structural_analysis(), _research_results())

        assert len(result.pro_arguments) == 2
        assert "Argument for 1" in result.pro_arguments

    def test_run_parses_con_arguments(self) -> None:
        from ai_usecases_explorer.deep_dive.evaluator import MultiPerspectiveEvaluator

        with patch(
            "ai_usecases_explorer.deep_dive.evaluator.MultiPerspectiveEvaluator._call_llm",
            return_value=self._fake_llm_response(),
        ):
            evaluator = MultiPerspectiveEvaluator(client=MagicMock(), model="claude-sonnet-4-6")
            result = evaluator.run(_structural_analysis(), _research_results())

        assert len(result.con_arguments) == 1

    def test_run_calls_llm_with_both_inputs(self) -> None:
        from ai_usecases_explorer.deep_dive.evaluator import MultiPerspectiveEvaluator

        with patch(
            "ai_usecases_explorer.deep_dive.evaluator.MultiPerspectiveEvaluator._call_llm",
            return_value=self._fake_llm_response(),
        ) as mock_llm:
            evaluator = MultiPerspectiveEvaluator(client=MagicMock(), model="claude-sonnet-4-6")
            evaluator.run(_structural_analysis(), _research_results())

        mock_llm.assert_called_once()
        args = mock_llm.call_args[0]
        assert isinstance(args[0], StructuralAnalysis)
        assert isinstance(args[1], ResearchResults)

    def test_overall_verdict_is_string(self) -> None:
        from ai_usecases_explorer.deep_dive.evaluator import MultiPerspectiveEvaluator

        with patch(
            "ai_usecases_explorer.deep_dive.evaluator.MultiPerspectiveEvaluator._call_llm",
            return_value=self._fake_llm_response(),
        ):
            evaluator = MultiPerspectiveEvaluator(client=MagicMock(), model="claude-sonnet-4-6")
            result = evaluator.run(_structural_analysis(), _research_results())

        assert isinstance(result.overall_verdict, str)
        assert len(result.overall_verdict) > 0


# ---------------------------------------------------------------------------
# DeepDiveReporter tests
# ---------------------------------------------------------------------------


class TestDeepDiveReporter:
    def test_write_creates_file(self, tmp_path: Path) -> None:
        from ai_usecases_explorer.deep_dive.reporter import DeepDiveReporter

        reporter = DeepDiveReporter(report_dir=tmp_path)
        report = _report(tmp_path)
        output_path = reporter.write(report)

        assert output_path.exists()

    def test_write_returns_path(self, tmp_path: Path) -> None:
        from ai_usecases_explorer.deep_dive.reporter import DeepDiveReporter

        reporter = DeepDiveReporter(report_dir=tmp_path)
        report = _report(tmp_path)
        output_path = reporter.write(report)

        assert isinstance(output_path, Path)
        assert output_path.suffix == ".md"

    def test_write_creates_parent_dirs(self, tmp_path: Path) -> None:
        from ai_usecases_explorer.deep_dive.reporter import DeepDiveReporter

        deep_dir = tmp_path / "a" / "b" / "c"
        reporter = DeepDiveReporter(report_dir=deep_dir)
        report = _report(tmp_path)
        output_path = reporter.write(report)

        assert output_path.exists()

    def test_render_markdown_contains_title(self, tmp_path: Path) -> None:
        from ai_usecases_explorer.deep_dive.reporter import DeepDiveReporter

        reporter = DeepDiveReporter(report_dir=tmp_path)
        report = _report(tmp_path)
        md = reporter._render_markdown(report)

        assert "The Rise of AI Agents" in md

    def test_render_markdown_contains_main_thesis(self, tmp_path: Path) -> None:
        from ai_usecases_explorer.deep_dive.reporter import DeepDiveReporter

        reporter = DeepDiveReporter(report_dir=tmp_path)
        report = _report(tmp_path)
        md = reporter._render_markdown(report)

        assert "AI agents will replace traditional software engineering" in md

    def test_render_markdown_contains_credibility_score(self, tmp_path: Path) -> None:
        from ai_usecases_explorer.deep_dive.reporter import DeepDiveReporter

        reporter = DeepDiveReporter(report_dir=tmp_path)
        report = _report(tmp_path)
        md = reporter._render_markdown(report)

        assert "0.72" in md

    def test_render_markdown_contains_pro_arguments(self, tmp_path: Path) -> None:
        from ai_usecases_explorer.deep_dive.reporter import DeepDiveReporter

        reporter = DeepDiveReporter(report_dir=tmp_path)
        report = _report(tmp_path)
        md = reporter._render_markdown(report)

        assert "Increases developer productivity" in md

    def test_render_markdown_contains_fact_check_table(self, tmp_path: Path) -> None:
        from ai_usecases_explorer.deep_dive.reporter import DeepDiveReporter

        reporter = DeepDiveReporter(report_dir=tmp_path)
        report = _report(tmp_path)
        md = reporter._render_markdown(report)

        assert "GitHub Copilot has 1 million users" in md

    def test_render_markdown_contains_supporting_sources(self, tmp_path: Path) -> None:
        from ai_usecases_explorer.deep_dive.reporter import DeepDiveReporter

        reporter = DeepDiveReporter(report_dir=tmp_path)
        report = _report(tmp_path)
        md = reporter._render_markdown(report)

        assert "https://example.com/pro" in md

    def test_filename_contains_date_and_slug(self, tmp_path: Path) -> None:
        from ai_usecases_explorer.deep_dive.reporter import DeepDiveReporter

        reporter = DeepDiveReporter(report_dir=tmp_path)
        report = _report(tmp_path)
        output_path = reporter.write(report)

        filename = output_path.name
        assert "2026-02-28" in filename

    def test_file_written_with_utf8(self, tmp_path: Path) -> None:
        from ai_usecases_explorer.deep_dive.reporter import DeepDiveReporter

        reporter = DeepDiveReporter(report_dir=tmp_path)
        report = _report(tmp_path)
        output_path = reporter.write(report)

        content = output_path.read_text(encoding="utf-8")
        assert "深度探索" in content


# ---------------------------------------------------------------------------
# DeepDiveOrchestrator integration tests (all steps mocked)
# ---------------------------------------------------------------------------


class TestDeepDiveOrchestrator:
    def _setup_mocks(self) -> tuple[MagicMock, MagicMock, MagicMock, MagicMock, MagicMock]:
        mock_fetcher = MagicMock()
        mock_fetcher.fetch_url.return_value = _article()
        mock_fetcher.from_text.return_value = _article()

        mock_analyzer = MagicMock()
        mock_analyzer.run.return_value = _structural_analysis()

        mock_researcher = MagicMock()
        mock_researcher.run.return_value = _research_results()

        mock_evaluator = MagicMock()
        mock_evaluator.run.return_value = _evaluation()

        mock_reporter = MagicMock()
        mock_reporter.write.return_value = Path("/tmp/2026-02-28-1430-test.md")

        return mock_fetcher, mock_analyzer, mock_researcher, mock_evaluator, mock_reporter

    def test_run_with_url_returns_report(self, tmp_path: Path) -> None:
        from ai_usecases_explorer.deep_dive.cli import DeepDiveOrchestrator

        fetcher, analyzer, researcher, evaluator, reporter = self._setup_mocks()
        orchestrator = DeepDiveOrchestrator(
            fetcher=fetcher,
            analyzer=analyzer,
            researcher=researcher,
            evaluator=evaluator,
            reporter=reporter,
        )
        result = orchestrator.run(url="https://example.com/article")

        assert isinstance(result, DeepDiveReport)
        fetcher.fetch_url.assert_called_once_with("https://example.com/article")

    def test_run_with_text_returns_report(self, tmp_path: Path) -> None:
        from ai_usecases_explorer.deep_dive.cli import DeepDiveOrchestrator

        fetcher, analyzer, researcher, evaluator, reporter = self._setup_mocks()
        orchestrator = DeepDiveOrchestrator(
            fetcher=fetcher,
            analyzer=analyzer,
            researcher=researcher,
            evaluator=evaluator,
            reporter=reporter,
        )
        result = orchestrator.run(text="Some article text")

        assert isinstance(result, DeepDiveReport)
        fetcher.from_text.assert_called_once()

    def test_run_calls_pipeline_in_order(self) -> None:
        from ai_usecases_explorer.deep_dive.cli import DeepDiveOrchestrator

        call_order: list[str] = []
        fetcher, analyzer, researcher, evaluator, reporter = self._setup_mocks()

        fetcher.fetch_url.side_effect = lambda *a, **kw: (call_order.append("fetch"), _article())[1]
        analyzer.run.side_effect = lambda *a, **kw: (call_order.append("analyze"), _structural_analysis())[1]
        researcher.run.side_effect = lambda *a, **kw: (call_order.append("research"), _research_results())[1]
        evaluator.run.side_effect = lambda *a, **kw: (call_order.append("evaluate"), _evaluation())[1]
        reporter.write.side_effect = lambda *a, **kw: (call_order.append("report"), Path("/tmp/r.md"))[1]

        orchestrator = DeepDiveOrchestrator(
            fetcher=fetcher,
            analyzer=analyzer,
            researcher=researcher,
            evaluator=evaluator,
            reporter=reporter,
        )
        orchestrator.run(url="https://example.com")

        assert call_order == ["fetch", "analyze", "research", "evaluate", "report"]

    def test_run_dry_run_skips_reporter(self) -> None:
        from ai_usecases_explorer.deep_dive.cli import DeepDiveOrchestrator

        fetcher, analyzer, researcher, evaluator, reporter = self._setup_mocks()
        orchestrator = DeepDiveOrchestrator(
            fetcher=fetcher,
            analyzer=analyzer,
            researcher=researcher,
            evaluator=evaluator,
            reporter=reporter,
        )
        result = orchestrator.run(url="https://example.com", dry_run=True)

        reporter.write.assert_not_called()
        assert isinstance(result, DeepDiveReport)
        assert result.report_path is None

    def test_run_raises_when_no_input(self) -> None:
        from ai_usecases_explorer.deep_dive.cli import DeepDiveOrchestrator

        fetcher, analyzer, researcher, evaluator, reporter = self._setup_mocks()
        orchestrator = DeepDiveOrchestrator(
            fetcher=fetcher,
            analyzer=analyzer,
            researcher=researcher,
            evaluator=evaluator,
            reporter=reporter,
        )
        with pytest.raises(ValueError):
            orchestrator.run()


# ---------------------------------------------------------------------------
# New model unit tests
# ---------------------------------------------------------------------------


class TestLogicalQuality:
    def test_construction(self) -> None:
        lq = LogicalQuality(
            validity_assessment="论证总体严密",
            identified_fallacies=["诉诸权威：仅引用单一来源"],
            strength="中",
        )
        assert lq.validity_assessment == "论证总体严密"
        assert len(lq.identified_fallacies) == 1
        assert lq.strength == "中"

    def test_empty_fallacies_allowed(self) -> None:
        lq = LogicalQuality(
            validity_assessment="逻辑清晰",
            identified_fallacies=[],
            strength="强",
        )
        assert lq.identified_fallacies == []


class TestCitedArgument:
    def test_construction_with_urls(self) -> None:
        ca = CitedArgument(
            text="AI 显著提升开发效率",
            source_urls=["https://example.com/study"],
        )
        assert ca.text == "AI 显著提升开发效率"
        assert len(ca.source_urls) == 1

    def test_source_urls_defaults_to_empty(self) -> None:
        ca = CitedArgument(text="某个论据")
        assert ca.source_urls == []


class TestBiasIndicator:
    def test_construction(self) -> None:
        bi = BiasIndicator(
            bias_type="确认偏误",
            description="作者只引用支持其观点的研究",
            severity="高",
        )
        assert bi.bias_type == "确认偏误"
        assert bi.severity == "高"


class TestCredibilityBreakdown:
    def test_construction(self) -> None:
        cb = CredibilityBreakdown(
            evidence_quality=0.7,
            source_diversity=0.6,
            logical_rigor=0.8,
            author_transparency=0.5,
            explanation="证据质量较高，但来源多样性不足",
        )
        assert cb.evidence_quality == 0.7
        assert cb.source_diversity == 0.6
        assert cb.logical_rigor == 0.8
        assert cb.author_transparency == 0.5
        assert "证据" in cb.explanation


# ---------------------------------------------------------------------------
# New StructuralAnalyzer tests (logical_quality)
# ---------------------------------------------------------------------------


class TestStructuralAnalyzerLogicalQuality:
    def _fake_llm_response_with_lq(self) -> dict:
        return {
            "main_thesis": "AI will transform software development",
            "sub_claims": ["Claim 1"],
            "logical_structure": "因果论证",
            "key_entities": ["Anthropic"],
            "verifiable_facts": ["GitHub has 100M users"],
            "search_queries": {
                "supporting": ["AI benefits"],
                "opposing": ["AI risks"],
                "expert": ["AI research"],
                "fact_checks": ["GitHub users"],
            },
            "logical_quality": {
                "validity_assessment": "论证基本有效，但部分因果关系缺乏支撑",
                "identified_fallacies": ["滑坡谬误：夸大AI影响", "诉诸权威：仅引用单一来源"],
                "strength": "中",
            },
        }

    def _fake_llm_response_without_lq(self) -> dict:
        return {
            "main_thesis": "AI will transform software development",
            "sub_claims": ["Claim 1"],
            "logical_structure": "因果论证",
            "key_entities": ["Anthropic"],
            "verifiable_facts": ["GitHub has 100M users"],
            "search_queries": {
                "supporting": ["AI benefits"],
                "opposing": ["AI risks"],
                "expert": ["AI research"],
                "fact_checks": ["GitHub users"],
            },
        }

    def test_run_parses_logical_quality(self) -> None:
        from ai_usecases_explorer.deep_dive.analyzer import StructuralAnalyzer

        with patch(
            "ai_usecases_explorer.deep_dive.analyzer.StructuralAnalyzer._call_llm",
            return_value=self._fake_llm_response_with_lq(),
        ):
            analyzer = StructuralAnalyzer(client=MagicMock(), model="claude-sonnet-4-6")
            result = analyzer.run(_article())

        assert result.logical_quality is not None
        assert result.logical_quality.strength == "中"
        assert len(result.logical_quality.identified_fallacies) == 2

    def test_run_handles_missing_logical_quality(self) -> None:
        from ai_usecases_explorer.deep_dive.analyzer import StructuralAnalyzer

        with patch(
            "ai_usecases_explorer.deep_dive.analyzer.StructuralAnalyzer._call_llm",
            return_value=self._fake_llm_response_without_lq(),
        ):
            analyzer = StructuralAnalyzer(client=MagicMock(), model="claude-sonnet-4-6")
            result = analyzer.run(_article())

        assert result.logical_quality is None


# ---------------------------------------------------------------------------
# New MultiPerspectiveEvaluator tests
# ---------------------------------------------------------------------------


class TestMultiPerspectiveEvaluatorEnriched:
    def _fake_llm_response_enriched(self) -> dict:
        return {
            "cited_pro_arguments": [
                {"text": "AI提升生产力", "source_urls": ["https://example.com/pro"]},
                {"text": "减少重复工作", "source_urls": []},
            ],
            "cited_con_arguments": [
                {"text": "可能引入安全漏洞", "source_urls": ["https://example.com/con"]},
            ],
            "neutral_context": ["技术仍在发展中"],
            "bias_assessment": "作者存在轻微的AI偏向",
            "bias_indicators": [
                {"bias_type": "确认偏误", "description": "只引用支持观点的研究", "severity": "中"},
                {"bias_type": "来源偏向", "description": "主要引用AI公司的报告", "severity": "低"},
            ],
            "credibility_breakdown": {
                "evidence_quality": 0.7,
                "source_diversity": 0.6,
                "logical_rigor": 0.8,
                "author_transparency": 0.5,
                "explanation": "证据质量较好但来源不够多元",
            },
            "credibility_score": 0.68,
            "core_disputes": ["AI是否真正提升生产力", "安全风险的实际影响范围"],
            "overall_verdict": "文章基本可信，但需要更多独立研究支撑",
            "synthesis": "AI编程工具处于快速发展阶段，其长期影响尚不明确，需持续观察。",
            "further_exploration": ["查找同行评审研究"],
            "assessed_fact_checks": [
                {
                    "claim": "GitHub Copilot有100万用户",
                    "status": "verified",
                    "evidence": "GitHub官方博客确认",
                    "source_url": "https://github.blog/copilot",
                    "source_authority": "官方机构",
                }
            ],
        }

    def _fake_llm_response_minimal(self) -> dict:
        """Minimal response — only original fields, no new ones."""
        return {
            "pro_arguments": ["Arg 1"],
            "con_arguments": ["Con 1"],
            "neutral_context": ["Context 1"],
            "bias_assessment": "Some bias",
            "credibility_score": 0.5,
            "overall_verdict": "Neutral verdict",
            "further_exploration": ["Direction 1"],
        }

    def test_run_parses_cited_pro_arguments(self) -> None:
        from ai_usecases_explorer.deep_dive.evaluator import MultiPerspectiveEvaluator

        with patch(
            "ai_usecases_explorer.deep_dive.evaluator.MultiPerspectiveEvaluator._call_llm",
            return_value=self._fake_llm_response_enriched(),
        ):
            evaluator = MultiPerspectiveEvaluator(client=MagicMock(), model="claude-sonnet-4-6")
            result = evaluator.run(_structural_analysis(), _research_results())

        assert len(result.cited_pro_arguments) == 2
        assert result.cited_pro_arguments[0].source_urls == ["https://example.com/pro"]
        assert result.cited_pro_arguments[1].source_urls == []

    def test_run_parses_bias_indicators(self) -> None:
        from ai_usecases_explorer.deep_dive.evaluator import MultiPerspectiveEvaluator

        with patch(
            "ai_usecases_explorer.deep_dive.evaluator.MultiPerspectiveEvaluator._call_llm",
            return_value=self._fake_llm_response_enriched(),
        ):
            evaluator = MultiPerspectiveEvaluator(client=MagicMock(), model="claude-sonnet-4-6")
            result = evaluator.run(_structural_analysis(), _research_results())

        assert len(result.bias_indicators) == 2
        assert result.bias_indicators[0].bias_type == "确认偏误"
        assert result.bias_indicators[0].severity == "中"

    def test_run_parses_credibility_breakdown(self) -> None:
        from ai_usecases_explorer.deep_dive.evaluator import MultiPerspectiveEvaluator

        with patch(
            "ai_usecases_explorer.deep_dive.evaluator.MultiPerspectiveEvaluator._call_llm",
            return_value=self._fake_llm_response_enriched(),
        ):
            evaluator = MultiPerspectiveEvaluator(client=MagicMock(), model="claude-sonnet-4-6")
            result = evaluator.run(_structural_analysis(), _research_results())

        assert result.credibility_breakdown is not None
        assert result.credibility_breakdown.evidence_quality == 0.7
        assert result.credibility_breakdown.logical_rigor == 0.8

    def test_run_parses_core_disputes(self) -> None:
        from ai_usecases_explorer.deep_dive.evaluator import MultiPerspectiveEvaluator

        with patch(
            "ai_usecases_explorer.deep_dive.evaluator.MultiPerspectiveEvaluator._call_llm",
            return_value=self._fake_llm_response_enriched(),
        ):
            evaluator = MultiPerspectiveEvaluator(client=MagicMock(), model="claude-sonnet-4-6")
            result = evaluator.run(_structural_analysis(), _research_results())

        assert len(result.core_disputes) == 2
        assert "AI是否真正提升生产力" in result.core_disputes

    def test_run_parses_synthesis(self) -> None:
        from ai_usecases_explorer.deep_dive.evaluator import MultiPerspectiveEvaluator

        with patch(
            "ai_usecases_explorer.deep_dive.evaluator.MultiPerspectiveEvaluator._call_llm",
            return_value=self._fake_llm_response_enriched(),
        ):
            evaluator = MultiPerspectiveEvaluator(client=MagicMock(), model="claude-sonnet-4-6")
            result = evaluator.run(_structural_analysis(), _research_results())

        assert len(result.synthesis) > 0
        assert "AI" in result.synthesis

    def test_run_parses_assessed_fact_checks_with_status(self) -> None:
        from ai_usecases_explorer.deep_dive.evaluator import MultiPerspectiveEvaluator

        with patch(
            "ai_usecases_explorer.deep_dive.evaluator.MultiPerspectiveEvaluator._call_llm",
            return_value=self._fake_llm_response_enriched(),
        ):
            evaluator = MultiPerspectiveEvaluator(client=MagicMock(), model="claude-sonnet-4-6")
            result = evaluator.run(_structural_analysis(), _research_results())

        assert len(result.assessed_fact_checks) == 1
        afc = result.assessed_fact_checks[0]
        assert afc.status == "verified"
        assert afc.source_authority == "官方机构"
        assert afc.status != "unverifiable"

    def test_run_backward_compat_missing_new_fields(self) -> None:
        from ai_usecases_explorer.deep_dive.evaluator import MultiPerspectiveEvaluator

        with patch(
            "ai_usecases_explorer.deep_dive.evaluator.MultiPerspectiveEvaluator._call_llm",
            return_value=self._fake_llm_response_minimal(),
        ):
            evaluator = MultiPerspectiveEvaluator(client=MagicMock(), model="claude-sonnet-4-6")
            result = evaluator.run(_structural_analysis(), _research_results())

        # New fields default gracefully
        assert result.cited_pro_arguments == []
        assert result.cited_con_arguments == []
        assert result.bias_indicators == []
        assert result.credibility_breakdown is None
        assert result.core_disputes == []
        assert result.synthesis == ""
        assert result.assessed_fact_checks == []
        # Old fields still work
        assert result.pro_arguments == ["Arg 1"]
        assert result.credibility_score == 0.5


# ---------------------------------------------------------------------------
# New DeepDiveReporter tests
# ---------------------------------------------------------------------------


def _evaluation_enriched() -> MultiPerspectiveEvaluation:
    """Evaluation with all new optional fields populated."""
    return MultiPerspectiveEvaluation(
        pro_arguments=["Increases developer productivity by 40%"],
        con_arguments=["May introduce security vulnerabilities"],
        neutral_context=["Technology is still evolving"],
        bias_assessment="Author works for an AI company",
        credibility_score=0.68,
        overall_verdict="Partially credible with some unsupported claims",
        further_exploration=["Search for peer-reviewed studies"],
        cited_pro_arguments=[
            CitedArgument(
                text="AI显著提升生产力",
                source_urls=["https://example.com/study1", "https://example.com/study2"],
            )
        ],
        cited_con_arguments=[
            CitedArgument(text="存在安全风险", source_urls=["https://example.com/risk"])
        ],
        bias_indicators=[
            BiasIndicator(bias_type="确认偏误", description="只引用支持性研究", severity="中"),
            BiasIndicator(bias_type="来源偏向", description="主要引用AI公司报告", severity="低"),
        ],
        credibility_breakdown=CredibilityBreakdown(
            evidence_quality=0.7,
            source_diversity=0.6,
            logical_rigor=0.8,
            author_transparency=0.5,
            explanation="证据质量较好，来源多样性不足",
        ),
        core_disputes=["AI是否真正提升生产力", "长期就业影响"],
        synthesis="AI编程工具正在改变软件开发范式，但其长期影响需要持续评估。目前证据显示短期生产力提升明显，但质量和安全风险不可忽视。",
        assessed_fact_checks=[
            FactCheck(
                claim="GitHub Copilot有100万用户",
                status="verified",
                evidence="GitHub官方博客2024年确认",
                source_url="https://github.blog/copilot-1m",
                source_authority="官方机构",
            )
        ],
    )


def _report_enriched(tmp_path: Path) -> DeepDiveReport:
    from ai_usecases_explorer.deep_dive.models import LogicalQuality

    sa = _structural_analysis()
    sa_with_lq = StructuralAnalysis(
        main_thesis=sa.main_thesis,
        sub_claims=sa.sub_claims,
        logical_structure=sa.logical_structure,
        key_entities=sa.key_entities,
        verifiable_facts=sa.verifiable_facts,
        search_queries=sa.search_queries,
        logical_quality=LogicalQuality(
            validity_assessment="论证基本有效，部分因果关系缺乏依据",
            identified_fallacies=["滑坡谬误：夸大AI的替代效应"],
            strength="中",
        ),
    )
    return DeepDiveReport(
        article=_article(),
        structure=sa_with_lq,
        research=_research_results(),
        evaluation=_evaluation_enriched(),
        generated_at=datetime(2026, 2, 28, 14, 30, 0),
        report_path=tmp_path / "test-enriched.md",
    )


class TestDeepDiveReporterEnriched:
    def test_render_markdown_uses_cited_arguments_with_links(self, tmp_path: Path) -> None:
        from ai_usecases_explorer.deep_dive.reporter import DeepDiveReporter

        reporter = DeepDiveReporter(report_dir=tmp_path)
        report = _report_enriched(tmp_path)
        md = reporter._render_markdown(report)

        assert "AI显著提升生产力" in md
        assert "https://example.com/study1" in md

    def test_render_markdown_shows_bias_indicators_table(self, tmp_path: Path) -> None:
        from ai_usecases_explorer.deep_dive.reporter import DeepDiveReporter

        reporter = DeepDiveReporter(report_dir=tmp_path)
        report = _report_enriched(tmp_path)
        md = reporter._render_markdown(report)

        assert "确认偏误" in md
        assert "来源偏向" in md

    def test_render_markdown_shows_credibility_breakdown_table(self, tmp_path: Path) -> None:
        from ai_usecases_explorer.deep_dive.reporter import DeepDiveReporter

        reporter = DeepDiveReporter(report_dir=tmp_path)
        report = _report_enriched(tmp_path)
        md = reporter._render_markdown(report)

        assert "证据质量" in md
        assert "来源多样性" in md
        assert "0.70" in md or "0.7" in md

    def test_render_markdown_shows_core_disputes_section(self, tmp_path: Path) -> None:
        from ai_usecases_explorer.deep_dive.reporter import DeepDiveReporter

        reporter = DeepDiveReporter(report_dir=tmp_path)
        report = _report_enriched(tmp_path)
        md = reporter._render_markdown(report)

        assert "核心争议" in md
        assert "AI是否真正提升生产力" in md

    def test_render_markdown_shows_synthesis_section(self, tmp_path: Path) -> None:
        from ai_usecases_explorer.deep_dive.reporter import DeepDiveReporter

        reporter = DeepDiveReporter(report_dir=tmp_path)
        report = _report_enriched(tmp_path)
        md = reporter._render_markdown(report)

        assert "深度洞察" in md
        assert "AI编程工具正在改变软件开发范式" in md

    def test_render_markdown_uses_assessed_fact_checks_status(self, tmp_path: Path) -> None:
        from ai_usecases_explorer.deep_dive.reporter import DeepDiveReporter

        reporter = DeepDiveReporter(report_dir=tmp_path)
        report = _report_enriched(tmp_path)
        md = reporter._render_markdown(report)

        # assessed_fact_checks should be used (not the researcher's fact_checks)
        assert "GitHub Copilot有100万用户" in md
        assert "✅" in md

    def test_render_markdown_shows_logical_quality_block(self, tmp_path: Path) -> None:
        from ai_usecases_explorer.deep_dive.reporter import DeepDiveReporter

        reporter = DeepDiveReporter(report_dir=tmp_path)
        report = _report_enriched(tmp_path)
        md = reporter._render_markdown(report)

        assert "论证质量" in md
        assert "滑坡谬误" in md

    def test_render_markdown_fallback_when_new_fields_empty(self, tmp_path: Path) -> None:
        from ai_usecases_explorer.deep_dive.reporter import DeepDiveReporter

        reporter = DeepDiveReporter(report_dir=tmp_path)
        # Use original report with no new fields
        report = _report(tmp_path)
        md = reporter._render_markdown(report)

        # Old content still present
        assert "Increases developer productivity" in md
        assert "GitHub Copilot has 1 million users" in md
        assert "0.72" in md
