from typing import Optional, List, Dict, Any
from slack_sdk import WebClient
import os

from health.utils.logging_config import setup_logger
from slack_bot.llm.gemini import GeminiLLM
from slack_bot.context.storage import ContextStorage
from slack_bot.tools.registry import TOOLS_SCHEMA, TOOL_FUNCTIONS

logger = setup_logger(__name__)


class MessageDispatcherClean:
    """
    CLEAN VERSION: Routes incoming Slack messages to Gemini - NO SAFETY OVERRIDE.

    This is an experiment to test if the LLM can handle all cases without keyword matching.
    """

    def __init__(self, bot_token: Optional[str] = None, system_instruction: Optional[str] = None, tools: Optional[List[Dict]] = None):
        self.llm = GeminiLLM(system_instruction=system_instruction)
        self.tools = tools if tools is not None else TOOLS_SCHEMA
        self.bot_token = bot_token or os.environ.get("SLACK_BOT_TOKEN")
        self.client = WebClient(token=self.bot_token)
        logger.info("Clean Dispatcher initialized (NO SAFETY OVERRIDE)")

    def dispatch(self, message_text: str, channel_id: str, user_id: str, response_ts: Optional[str] = None, request_id: str = "N/A", files: Optional[List[Dict]] = None) -> Dict[str, Any]:
        """
        Process message and return result for logging/comparison.

        Returns:
            Dict with keys: response_text, tool_calls, error, timing
        """
        import time
        t0 = time.time()

        prefix = f"[CLEAN-{request_id}]"
        result = {
            "response_text": "",
            "tool_calls": [],
            "error": None,
            "timing": {},
            "raw_llm_response": "",
            "raw_tool_calls": None
        }

        logger.info(f"{prefix} Processing message: {message_text[:100]}")

        try:
            # 1. Initialize Context
            storage = ContextStorage(f"{channel_id}_clean_test")  # Separate storage for test

            # Handle clear command
            if message_text.strip().lower() in ["clear", "reset", "清除"]:
                storage.clear()
                result["response_text"] = "🧹 Context cleared."
                return result

            storage.add_message("user", message_text)

            # 2. Get conversation context
            context = storage.get_context()

            # 3. Call Gemini - LET IT HANDLE EVERYTHING
            logger.info(f"{prefix} Calling Gemini (with tools)...")
            t_llm_start = time.time()

            response_text, tool_calls = self.llm.generate_response(
                message=message_text,
                context=context[:-1],
                tools=self.tools,
                images=[]
            )

            result["timing"]["llm_call"] = time.time() - t_llm_start
            result["raw_llm_response"] = response_text or ""
            result["raw_tool_calls"] = tool_calls

            # Defensive check
            if response_text is None:
                response_text = ""
                logger.warning(f"{prefix} LLM returned None")

            if not response_text.strip() and not tool_calls:
                logger.warning(f"{prefix} Empty response and no tools!")

            # 4. Execute tools if LLM requested
            tool_results = []
            if tool_calls:
                logger.info(f"{prefix} Executing {len(tool_calls)} tools")
                result["tool_calls"] = tool_calls

                for tool_call in tool_calls:
                    tool_name = tool_call["name"]
                    tool_args = tool_call["args"]
                    logger.info(f"{prefix} Tool: {tool_name} Args: {tool_args}")

                    t_tool_start = time.time()
                    if tool_name in TOOL_FUNCTIONS:
                        try:
                            if tool_name == "execute_shell":
                                tool_args["channel_id"] = channel_id

                            exec_result = TOOL_FUNCTIONS[tool_name](**tool_args)

                            # Truncate
                            str_result = str(exec_result)
                            if len(str_result) > 8000:
                                str_result = str_result[:8000] + "... (truncated)"

                            tool_results.append({
                                "tool": tool_name,
                                "args": tool_args,
                                "result": str_result
                            })
                            logger.info(f"{prefix} ✓ {tool_name} finished in {time.time()-t_tool_start:.2f}s")
                        except Exception as e:
                            error_msg = f"Error executing {tool_name}: {str(e)}"
                            logger.error(f"{prefix} {error_msg}")
                            tool_results.append({
                                "tool": tool_name,
                                "args": tool_args,
                                "result": f"❌ {error_msg}"
                            })

                # 5. Second round: Get final analysis
                logger.info(f"{prefix} Requesting final analysis...")

                tool_results_text = "\n".join([
                    f"Tool '{tr['tool']}' (Args: {tr['args']}) returned:\n{tr['result']}"
                    for tr in tool_results
                ])

                analysis_prompt = (
                    f"Here are the execution results from the tools:\n{tool_results_text}\n\n"
                    f"Answer the user's question based on these results. Be concise and natural."
                )

                t_llm_2_start = time.time()
                response_text, _ = self.llm.generate_response(
                    message=analysis_prompt,
                    context=context,
                    tools=None
                )
                result["timing"]["llm_analysis"] = time.time() - t_llm_2_start

                if not response_text or not response_text.strip():
                    response_text = "✅ Tools executed (see results above)"

            result["response_text"] = response_text
            result["timing"]["total"] = time.time() - t0

            # Save to context
            storage.add_message("assistant", response_text, model="gemini")

        except Exception as e:
            logger.error(f"{prefix} Dispatch failed: {e}", exc_info=True)
            result["error"] = str(e)
            result["timing"]["total"] = time.time() - t0

        return result
