"""
Integration tests for CLI bot scenarios.

Loads YAML test scenarios and executes them against the CLI bot.
"""

import pytest
import yaml
import os
import shutil
from pathlib import Path
from typing import List, Dict, Any
from unittest.mock import patch, MagicMock

# Import CLI bot
import sys
sys.path.insert(0, str(Path(__file__).parent.parent.parent))

from tools.cli_bot import CLIBot
from slack_bot.tools.registry import TOOL_FUNCTIONS


class ToolCallTracker:
    """Tracks tool calls made during bot execution."""

    def __init__(self):
        self.tool_calls: List[Dict[str, Any]] = []
        self.original_functions = {}

    def track_tool(self, tool_name: str):
        """Wrap a tool function to track its calls."""
        original_func = TOOL_FUNCTIONS[tool_name]
        self.original_functions[tool_name] = original_func

        def wrapper(**kwargs):
            self.tool_calls.append({
                "name": tool_name,
                "args": kwargs
            })
            return original_func(**kwargs)

        return wrapper

    def start_tracking(self, tool_names: List[str]):
        """Start tracking specified tools."""
        for tool_name in tool_names:
            if tool_name in TOOL_FUNCTIONS:
                TOOL_FUNCTIONS[tool_name] = self.track_tool(tool_name)

    def stop_tracking(self):
        """Restore original tool functions."""
        for tool_name, original_func in self.original_functions.items():
            TOOL_FUNCTIONS[tool_name] = original_func
        self.original_functions.clear()

    def reset(self):
        """Reset tracked calls."""
        self.tool_calls.clear()

    def has_tool(self, tool_name: str) -> bool:
        """Check if a specific tool was called."""
        return any(call["name"] == tool_name for call in self.tool_calls)

    def get_tool_calls(self, tool_name: str) -> List[Dict[str, Any]]:
        """Get all calls for a specific tool."""
        return [call for call in self.tool_calls if call["name"] == tool_name]


class ResponseCapture:
    """Captures bot responses from CLIClient."""

    def __init__(self):
        self.messages: List[str] = []

    def chat_postMessage(self, channel: str, text: str, **kwargs):
        """Capture posted message."""
        self.messages.append(text)
        return {"ts": f"test_{len(self.messages)}", "ok": True}

    def chat_update(self, channel: str, ts: str, text: str, **kwargs):
        """Capture updated message."""
        # Replace last message if exists, otherwise append
        if self.messages:
            self.messages[-1] = text
        else:
            self.messages.append(text)
        return {"ts": ts, "ok": True}

    def get_latest_response(self) -> str:
        """Get the latest bot response."""
        return self.messages[-1] if self.messages else ""

    def get_all_responses(self) -> List[str]:
        """Get all bot responses."""
        return self.messages.copy()

    def reset(self):
        """Reset captured messages."""
        self.messages.clear()


class TestCLIScenarios:
    """Test scenarios loaded from YAML files."""

    @pytest.fixture(autouse=True)
    def setup_test_env(self, tmp_path):
        """Set up isolated test environment for each test."""
        # Use unique test directory per test
        test_dir = tmp_path / "butler_test"
        test_dir.mkdir()

        # Set environment variable
        os.environ["DATA_DIR"] = str(test_dir)

        yield test_dir

        # Cleanup
        if test_dir.exists():
            shutil.rmtree(test_dir)

    @pytest.fixture
    def cli_bot(self, setup_test_env):
        """Create CLI bot instance for testing."""
        bot = CLIBot(
            use_real_llm=True,  # Use real LLM for integration tests
            test_data_dir=str(setup_test_env)
        )

        # Set up response capture
        response_capture = ResponseCapture()
        bot.dispatcher.client = response_capture
        bot.response_capture = response_capture

        # Set up tool tracker
        tool_tracker = ToolCallTracker()
        bot.tool_tracker = tool_tracker

        yield bot

        # Cleanup tool tracking
        tool_tracker.stop_tracking()

    def load_scenario_file(self, scenario_file: str) -> Dict[str, Any]:
        """Load a YAML scenario file."""
        scenario_path = Path(__file__).parent / "scenarios" / scenario_file
        with open(scenario_path) as f:
            return yaml.safe_load(f)

    def run_scenario(self, bot: CLIBot, scenario: Dict[str, Any]):
        """Run a single scenario."""
        scenario_id = scenario["id"]
        description = scenario["description"]

        print(f"\n{'='*60}")
        print(f"Running Scenario: {scenario_id}")
        print(f"Description: {description}")
        print(f"{'='*60}")

        # Track all potential tools
        all_tools = list(TOOL_FUNCTIONS.keys())
        bot.tool_tracker.start_tracking(all_tools)

        for step_idx, step in enumerate(scenario["steps"]):
            user_message = step["user"]
            step_desc = step.get("description", "")

            print(f"\nStep {step_idx + 1}: {step_desc}")
            print(f"User: {user_message}")

            # Reset trackers for this step
            bot.tool_tracker.reset()
            bot.response_capture.reset()

            # Send message to bot
            bot.send_message(user_message)

            # Get bot response
            bot_response = bot.response_capture.get_latest_response()
            print(f"Bot: {bot_response[:200]}...")  # Print first 200 chars

            # Verify expected tools were called
            if "expected_tools" in step:
                for expected_tool in step["expected_tools"]:
                    assert bot.tool_tracker.has_tool(expected_tool), \
                        f"Expected tool '{expected_tool}' was not called in step {step_idx + 1}"
                    print(f"✓ Tool '{expected_tool}' was called")

            # Verify tools that should NOT be called
            if "not_expected_tools" in step:
                for not_expected_tool in step["not_expected_tools"]:
                    assert not bot.tool_tracker.has_tool(not_expected_tool), \
                        f"Tool '{not_expected_tool}' should not be called in step {step_idx + 1}"
                    print(f"✓ Tool '{not_expected_tool}' was NOT called (as expected)")

            # Verify response contains expected keywords
            if "expected_response_contains" in step:
                for keyword in step["expected_response_contains"]:
                    assert keyword in bot_response, \
                        f"Expected keyword '{keyword}' not found in response: {bot_response}"
                    print(f"✓ Response contains '{keyword}'")

        print(f"\n{'='*60}")
        print(f"✅ Scenario {scenario_id} PASSED")
        print(f"{'='*60}\n")

    # Individual test methods for each scenario file

    @pytest.mark.integration
    def test_confirmation_bug_scenarios(self, cli_bot):
        """Test diet logging confirmation bug scenarios."""
        data = self.load_scenario_file("confirmation_bug.yaml")

        for scenario in data["scenarios"]:
            self.run_scenario(cli_bot, scenario)

    @pytest.mark.integration
    def test_diet_logging_scenarios(self, cli_bot):
        """Test diet logging scenarios."""
        data = self.load_scenario_file("diet_logging.yaml")

        for scenario in data["scenarios"]:
            self.run_scenario(cli_bot, scenario)

    @pytest.mark.integration
    def test_health_query_scenarios(self, cli_bot):
        """Test health data query scenarios."""
        data = self.load_scenario_file("health_queries.yaml")

        for scenario in data["scenarios"]:
            self.run_scenario(cli_bot, scenario)

    # Utility test to run a single scenario by ID

    @pytest.mark.skip(reason="Manual test - specify scenario ID")
    def test_single_scenario(self, cli_bot):
        """Run a single scenario by ID (for debugging)."""
        # Usage: pytest tests/integration/test_cli_scenarios.py::TestCLIScenarios::test_single_scenario -v
        # Then modify the scenario_file and scenario_id below

        scenario_file = "confirmation_bug.yaml"
        scenario_id = "confirmation_001"

        data = self.load_scenario_file(scenario_file)
        scenario = next(s for s in data["scenarios"] if s["id"] == scenario_id)

        self.run_scenario(cli_bot, scenario)


# Standalone function to run scenarios from command line
def run_scenario_from_cli(scenario_file: str, scenario_id: str):
    """Run a specific scenario from command line."""
    import tempfile

    # Create temporary test directory
    with tempfile.TemporaryDirectory() as tmp_dir:
        os.environ["DATA_DIR"] = tmp_dir

        # Create bot
        bot = CLIBot(use_real_llm=True, test_data_dir=tmp_dir)

        # Set up trackers
        response_capture = ResponseCapture()
        bot.dispatcher.client = response_capture
        bot.response_capture = response_capture

        tool_tracker = ToolCallTracker()
        bot.tool_tracker = tool_tracker

        # Load and run scenario
        test_instance = TestCLIScenarios()
        data = test_instance.load_scenario_file(scenario_file)
        scenario = next(s for s in data["scenarios"] if s["id"] == scenario_id)

        test_instance.run_scenario(bot, scenario)

        # Cleanup
        tool_tracker.stop_tracking()


if __name__ == "__main__":
    # Allow running scenarios directly from command line
    import argparse

    parser = argparse.ArgumentParser(description="Run CLI bot scenarios")
    parser.add_argument("scenario_file", help="YAML scenario file name")
    parser.add_argument("scenario_id", help="Scenario ID to run")

    args = parser.parse_args()

    run_scenario_from_cli(args.scenario_file, args.scenario_id)
