import os
from typing import List, Dict, Tuple, Optional
from slack_bot.llm.gemini import GeminiLLM
from slack_bot.obsidian.indexer import ObsidianIndexer
from slack_bot.tools.web import WebSearchTool
from health.utils.logging_config import setup_logger
from health.utils.time_utils import get_current_time_str

logger = setup_logger(__name__)


OBSIDIAN_SYSTEM_PROMPT = """You are Butler (Obsidian Edition), an intelligent knowledge assistant connected to the user's second brain.
Current Time: {current_time}

Your Role:
You are NOT a health assistant. You are a Knowledge Partner designed to help the user think, write, and communicate. You draw directly from the user's local Obsidian notes, values, and methodology.

Core Capabilities:
1. Writing: You mimic the user's unique writing style (as defined in `writing_style.md`).
2. Communication: You draft high-EQ, logically rigorous replies for professional contexts (as defined in `REPLY-SAMPLE.md`).
3. Decision: You act as a "Devil's Advocate" and strategic advisor using the user's decision frameworks (GPA, IPO).

Guidelines:
- **Style Alignment**: Strictly adhere to the tone and sentence structures found in the provided samples.
- **Data Source**: Rely primarily on the provided context (RAG) and loaded markdown files.
- **Identity**: You are pragmatic, rational, and value "technological optimism" and "intellectual honesty".
"""

class BaseGenerator:
    def __init__(self, indexer: ObsidianIndexer):
        formatted_prompt = OBSIDIAN_SYSTEM_PROMPT.format(current_time=get_current_time_str())
        self.llm = GeminiLLM(system_instruction=formatted_prompt)
        self.indexer = indexer
        self.workspace_root = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))

    def _read_file(self, filename: str) -> str:
        """Reads a file from the workspace root."""
        path = os.path.join(self.workspace_root, filename)
        try:
            with open(path, "r", encoding="utf-8") as f:
                return f.read()
        except Exception as e:
            logger.error(f"Failed to read {filename}: {e}")
            return ""

    def chat(self, user_input: str, history: List[Dict]) -> Tuple[str, List[Dict]]:
        """
        Processes chat input.
        If history is empty, treats input as a Topic/Query and builds a Rich Prompt.
        If history exists, treats input as follow-up instruction.
        Returns (response_text, updated_history)
        """
        raise NotImplementedError

class WritingAssistant(BaseGenerator):
    def chat(self, user_input: str, history: List[Dict]) -> Tuple[str, List[Dict]]:
        
        is_first_turn = (len(history) == 0)
        
        if is_first_turn:
            # === First Turn: Build Rich Prompt ===
            # user_input is treated as "Topic"
            
            # Naive splitting for context if formatted as "Topic | Context"
            parts = user_input.split("|", 1)
            topic = parts[0].strip()
            extra_context = parts[1].strip() if len(parts) > 1 else ""

            # 1. RAG & Samples
            style_samples = self.indexer.get_writing_samples(count=3)
            style_text = "\n\n".join(style_samples)
            
            rag_notes = self.indexer.search(topic, limit=5)
            rag_text = "\n\n".join(rag_notes)
            
            writing_style_guide = self._read_file("writing_style.md")
            methodology = self._read_file("methodology.md")

            prompt = f"""You are a ghostwriter for the user. Your goal is to write an article on the TOPIC provided.
            
            === YOUR IDENTITY & METHODOLOGY ===
            {methodology}
            
            === YOUR WRITING STYLE GUIDE ===
            {writing_style_guide}
            
            === YOUR WRITING SAMPLES (MIMIC THIS TONE) ===
            {style_text}
            
            === RELEVANT NOTES FROM OBSIDIAN VAULT ===
            {rag_text}
            
            === TASK ===
            Topic: {topic}
            Extra Context: {extra_context}
            
            Write the article in Chinese (unless the topic implies English). 
            Adhere strictly to the "Identity" and "Style Guide".
            """
            
            logger.info(f"Generating article for topic: {topic}")
            actual_user_message = prompt # We inject the big prompt
            
        else:
            # === Follow-up Turn ===
            # user_input is just instructions like "Make it shorter"
            logger.info(f"Follow-up instruction: {user_input}")
            actual_user_message = user_input
            prompt = user_input

        # Call LLM
        response, _ = self.llm.generate_response(prompt, history)
        
        # Update History
        new_history = history.copy()
        new_history.append({"role": "user", "content": actual_user_message})
        new_history.append({"role": "assistant", "content": response})
        
        return response, new_history

class ReplyGenerator(BaseGenerator):
    def chat(self, user_input: str, history: List[Dict]) -> Tuple[str, List[Dict]]:
        
        is_first_turn = (len(history) == 0)
        
        if is_first_turn:
             # === First Turn ===
            static_samples = self._read_file("REPLY-SAMPLE.md")
            dynamic_samples = self.indexer.get_reply_samples(count=3)
            dynamic_samples_text = "\n\n".join(dynamic_samples)
            methodology = self._read_file("methodology.md")
            
            prompt = f"""You are an experienced Product/R&D Manager. 
            Your task is to draft a reply to a challenging situation.
            
            === CORE PHILOSOPHY ===
            {methodology}
            
            === REPLY GUIDELINES ===
            1. Logic: Be clear and structured (e.g., numbered lists).
            2. Responsibility: Don't take unnecessary blame, but don't shirk core duties. Explain objective causes.
            3. Emotion: Be empathetic but professional.
            4. Conciseness: No fluff.
            
            === REFERENCE SAMPLES (FEW-SHOT) ===
            {static_samples}
            
            {dynamic_samples_text}
            
            === THE SITUATION ===
            {user_input}
            
            Draft a reply. If the context implies a specific role (PM, Dev Lead), assume that role.
            """
            logger.info(f"Generating reply for query: {user_input}")
            actual_user_message = prompt
            
        else:
             # === Follow-up Turn ===
            logger.info(f"Follow-up reply instruction: {user_input}")
            actual_user_message = user_input
            prompt = user_input

        response, _ = self.llm.generate_response(prompt, history)
        
        new_history = history.copy()
        new_history.append({"role": "user", "content": actual_user_message})
        new_history.append({"role": "assistant", "content": response})
        
        return response, new_history

class DecisionSupport(BaseGenerator):
    def chat(self, user_input: str, history: List[Dict]) -> Tuple[str, List[Dict]]:
        
        is_first_turn = (len(history) == 0)
        
        if is_first_turn:
            decision_guide = self._read_file("decision.md")
            methodology = self._read_file("methodology.md")
            
            rag_notes = self.indexer.search(user_input, limit=3)
            rag_text = "\n\n".join(rag_notes)
            
            prompt = f"""You are a Decision Support Assistant.
            Your goal is to help the user think through a complex decision.
            
            === DECISION FRAMEWORK ===
            {decision_guide}
            
            === CORE PHILOSOPHY ===
            {methodology}
            
            === RELEVANT CONTEXT FROM NOTES ===
            {rag_text}
            
            === THE DECISION / ISSUE ===
            {user_input}
            
            === TASK ===
            1. Challenge the premise: Is this the right problem to solve?
            2. Apply the Framework: Use GPA (Goal, Priority, Alternatives) and IPO (Information, People, Objective reasoning) models.
            3. Pre-mortem: What is the worst that could happen?
            4. Provide a recommendation or a set of questions to clarify.
            """
            
            logger.info(f"Analyzing decision: {user_input}")
            actual_user_message = prompt
        else:
            logger.info(f"Follow-up decision instruction: {user_input}")
            actual_user_message = user_input
            prompt = user_input

        response, _ = self.llm.generate_response(prompt, history)
        
        new_history = history.copy()
        new_history.append({"role": "user", "content": actual_user_message})
        new_history.append({"role": "assistant", "content": response})
        
        return response, new_history

class SearchAnalyzer(BaseGenerator):
    def chat(self, user_input: str, history: List[Dict]) -> Tuple[str, List[Dict]]:
        
        # Tools definition for the LLM
        tools = [
            {
                "type": "function",
                "function": {
                    "name": "search_web",
                    "description": "Search the public web using DuckDuckGo. Use this when the internal notes are insufficient or when you need up-to-date external information.",
                    "parameters": {
                        "type": "object",
                        "properties": {
                            "query": {
                                "type": "string",
                                "description": "The search query"
                            }
                        },
                        "required": ["query"]
                    }
                }
            },
            {
                "type": "function",
                "function": {
                    "name": "list_recent_files",
                    "description": "List files in the Obsidian vault that have been modified recently. Use this when the user asks for 'recent updates', 'what's new', or changes in the last X days.",
                    "parameters": {
                        "type": "object",
                        "properties": {
                            "days": {
                                "type": "integer",
                                "description": "Number of days to check (default 5)",
                                "default": 5
                            }
                        },
                        "required": []
                    }
                }
            }
        ]

        is_first_turn = (len(history) == 0)
        
        # Current Turn Message
        if is_first_turn:
            rag_notes = self.indexer.search(user_input, limit=5)
            rag_text = "\n\n".join(rag_notes)
            
            logger.info(f"SearchAnalyzer retrieved {len(rag_notes)} local notes.")
            for note in rag_notes:
                # Extract source line for logging
                first_line = note.split('\n')[0]
                logger.debug(f" - Retrieved: {first_line}")
            
            prompt = f"""You are an Analyst using Butler (Obsidian Edition).
            
            === TASK ===
            Answer the user's question. 
            CRITICAL: You MUST explicitly cite your sources in the output.
            
            === STRATEGY ===
            1. first: Check "RELEVANT LOCAL NOTES" provided below.
            2. second: If the notes answer the question, answer directly.
            3. third: If the notes are missing info, ambiguos, or outdated, USE THE `search_web` TOOL to find external info.
            
            === OUTPUT FORMAT ===
            - Begin with a summary.
            - When stating facts, append the source in brackets, e.g., "The server failed [Source: XLSmart 2025-12-25.md]" or "Market share is 20% [Source: Web Search]".
            - If data comes from BOTH, mention both.
            
            === RELEVANT LOCAL NOTES ===
            {rag_text}
            
            === USER QUESTION ===
            {user_input}
            """
            actual_user_message = prompt
        else:
            actual_user_message = user_input
            
        # Prepare execution context
        # We don't want to mutate the input history directly until the end
        exec_history = history.copy()
        exec_history.append({"role": "user", "content": actual_user_message})
        
        logger.info(f"Analyzer thinking on: {user_input}")
        
        # === ReAct Loop (Max 2 turns to prevent infinite loops) ===
        for i in range(2):
            response_text, tool_calls = self.llm.generate_response(
                message="", # Message is already in history
                context=exec_history,
                tools=tools
            )
            
            if tool_calls:
                # LLM wants to use tools
                logger.info(f"LLM requested tool calls: {tool_calls}")
                
                # Append the assistant's request (with tool calls) to history
                exec_history.append({
                    "role": "assistant",
                    "content": f"[Tool Call Request: {tool_calls}]"
                })
                
                for tc in tool_calls:
                    if tc["name"] == "search_web":
                        query = tc["args"].get("query")
                        search_result = WebSearchTool.search_web(query)
                        
                        # Feed result back
                        exec_history.append({
                            "role": "user", # Using 'function' role would be better if our LLM class supports it fully, but 'user' works as system injection
                            "content": f"--- TOOL OUTPUT (search_web) ---\n{search_result}"
                        })

                    elif tc["name"] == "list_recent_files":
                        days = tc["args"].get("days", 5)
                        # Call indexer method
                        recent_list = self.indexer.get_recent_files(days=days)
                        
                        exec_history.append({
                            "role": "user",
                            "content": f"--- TOOL OUTPUT (list_recent_files) ---\n{recent_list}"
                        })
                
                # Loop again to get the final answer using tool outputs
                try:
                     # Second pass with tool outputs
                    response_text, _ = self.llm.generate_response(
                        message="",
                        context=exec_history,
                        tools=tools
                    )
                    return response_text, exec_history
                except Exception as e:
                    logger.error(f"Error in second ReAct loop: {e}")
                    return response_text, exec_history
            
            else:
                # Final answer (no tools)
                return response_text, exec_history

        # Fallback if loop exhausted
        return response_text, exec_history


class DeAIReviser(BaseGenerator):
    """Remove AI tone from articles - text-in, text-out mode."""

    def chat(self, user_input: str, history: List[Dict]) -> Tuple[str, List[Dict]]:

        is_first_turn = (len(history) == 0)

        if is_first_turn:
            # === First Turn: User pastes the AI-generated article ===
            # Load the POWELL_REVISE.md guide
            powell_revise_guide = self._read_file("POWELL_REVISE.md")

            # Load user's writing samples for style reference
            style_samples = self.indexer.get_writing_samples(count=3)
            style_text = "\n\n".join(style_samples)

            prompt = f"""You are an expert editor specializing in removing AI-generated tone from articles.

Your task is to revise the article provided by the user to make it sound more human and authentic.

=== REVISION GUIDE ===
{powell_revise_guide}

=== USER'S AUTHENTIC WRITING SAMPLES (for style reference) ===
{style_text}

=== ARTICLE TO REVISE ===
{user_input}

=== TASK ===
1. Analyze the article and identify AI characteristics (过渡词堆积, 排比对仗, 空洞修饰词等)
2. Rewrite the article following the POWELL_REVISE.md principles
3. Preserve:
   - Original meaning and key information
   - Technical terms and proper nouns
   - Code blocks, lists, and formatting
   - Original language (Chinese/English)
4. Output the revised article directly (no meta-commentary like "Here's the revised version...")

CRITICAL: Output ONLY the revised article text. Do NOT add explanations before or after.
"""

            logger.info(f"DeAI Reviser: Processing article ({len(user_input)} chars)")
            actual_user_message = prompt

        else:
            # === Follow-up Turn: User gives refinement instructions ===
            logger.info(f"DeAI Reviser: Follow-up instruction - {user_input}")
            actual_user_message = user_input
            prompt = user_input

        # Call LLM
        response, _ = self.llm.generate_response(prompt, history)

        # Update History
        new_history = history.copy()
        new_history.append({"role": "user", "content": actual_user_message})
        new_history.append({"role": "assistant", "content": response})

        return response, new_history


class ZhihuGenerator(BaseGenerator):
    """Generate Zhihu-style answers based on style guide and user's knowledge base."""

    def chat(self, user_input: str, history: List[Dict]) -> Tuple[str, List[Dict]]:

        is_first_turn = (len(history) == 0)

        if is_first_turn:
            # === First Turn: Build Rich Prompt ===
            # Expected input format: "Question | Core Ideas"
            parts = user_input.split("|", 1)
            question = parts[0].strip()
            core_ideas = parts[1].strip() if len(parts) > 1 else ""

            # 1. Load Style Guide (zhihu-style.md serves as writing standard, NOT as material)
            zhihu_style_guide = self._read_file("zhihu-style.md")

            # 2. RAG from Obsidian vault for actual content material
            # Search Article directory for relevant revised.md files
            rag_notes = self.indexer.search(question, limit=5)
            rag_text = "\n\n".join(rag_notes)

            # 3. Load methodology for consistent identity
            methodology = self._read_file("methodology.md")

            prompt = f"""You are answering a Zhihu question. Your goal is to write an authentic, experience-driven answer.

=== YOUR IDENTITY & METHODOLOGY ===
{methodology}

=== ZHIHU STYLE GUIDE (FOLLOW THESE WRITING PRINCIPLES) ===
{zhihu_style_guide}

CRITICAL: The style guide above defines HOW to write (tone, structure, language patterns).
Do NOT treat zhihu-sample content as factual material. It's ONLY for style reference.

=== RELEVANT KNOWLEDGE FROM YOUR OBSIDIAN VAULT (USE AS ACTUAL MATERIAL) ===
{rag_text}

=== THE QUESTION ===
{question}

=== YOUR CORE IDEAS ===
{core_ideas}

=== TASK ===
1. Answer the question based on YOUR knowledge (from Obsidian vault) and core ideas
2. Follow the Zhihu style guide strictly for tone and structure
3. Share real experiences, examples, and insights from the vault material
4. Write in Chinese (unless question implies English)
5. Use first person ("我", "我们") frequently
6. Be professional yet approachable, technical yet relatable
7. Adhere to the "七个避免事项" (avoid AI tone, avoid empty buzzwords, etc.)

OUTPUT FORMAT:
- If the answer is over 300 characters, append this at the end:

---

**更多关于这些问题的思考可以在公众号搜索账户: AI Manifest**

Begin writing the answer:
"""

            logger.info(f"Generating Zhihu answer for: {question[:50]}...")
            actual_user_message = prompt

        else:
            # === Follow-up Turn: User gives refinement instructions ===
            logger.info(f"Follow-up Zhihu instruction: {user_input}")
            actual_user_message = user_input
            prompt = user_input

        # Call LLM
        response, _ = self.llm.generate_response(prompt, history)

        # Post-processing: Add signature if length > 300 and not already present
        if is_first_turn and len(response) > 300:
            signature = "\n\n---\n\n**更多关于这些问题的思考可以在公众号搜索账户: AI Manifest**"
            if "AI Manifest" not in response:
                response += signature

        # Update History
        new_history = history.copy()
        new_history.append({"role": "user", "content": actual_user_message})
        new_history.append({"role": "assistant", "content": response})

        return response, new_history
