import unittest
from unittest.mock import MagicMock, patch
from slack_bot.dispatcher import MessageDispatcher
from slack_bot.context.storage import ContextStorage

class TestScenario(unittest.TestCase):
    def setUp(self):
        self.test_channel = "SCENARIO_CHANNEL"
        # Ensure clean state
        storage = ContextStorage(self.test_channel)
        storage.clear()

    def tearDown(self):
        storage = ContextStorage(self.test_channel)
        storage.clear()

    @patch("slack_bot.dispatcher.WebClient")
    @patch("slack_bot.dispatcher.GeminiLLM")
    def test_multi_turn_conversation(self, mock_llm_cls, mock_client_cls):
        """
        Scenario: User asks a question, Bot replies. User follows up.
        """
        # Setup Mocks
        mock_client = MagicMock()
        mock_client_cls.return_value = mock_client
        
        mock_llm = MagicMock()
        mock_llm_cls.return_value = mock_llm
        
        # Turn 1: User says "Hi" -> Gemini says "Hi back"
        mock_llm.generate_response.return_value = ("Hi back", None)
        
        dispatcher = MessageDispatcher()
        dispatcher.dispatch("Hi", self.test_channel, "User1")
        
        # Verify Dispatch to Slack
        mock_client.chat_postMessage.assert_called_with(
            channel=self.test_channel,
            text="Hi back"
        )
        
        # Verify Context
        storage = ContextStorage(self.test_channel)
        context = storage.get_context()
        self.assertEqual(len(context), 2)
        self.assertEqual(context[0]["content"], "Hi")
        self.assertEqual(context[1]["content"], "Hi back")
        
        # Turn 2: User says "How are you?" -> Gemini says "Good"
        mock_llm.generate_response.return_value = ("Good", None)
        dispatcher.dispatch("How are you?", self.test_channel, "User1")
        
        # Verify Context Growth
        context = storage.get_context()
        self.assertEqual(len(context), 4)
        self.assertEqual(context[2]["content"], "How are you?")
        self.assertEqual(context[3]["content"], "Good")

    @patch("slack_bot.dispatcher.WebClient")
    @patch("slack_bot.dispatcher.GeminiLLM")
    def test_loading_message_flow(self, mock_llm_cls, mock_client_cls):
        """
        Scenario: Dispatcher receives response_ts, should use chat_update.
        """
        mock_client = MagicMock()
        mock_client_cls.return_value = mock_client
        mock_llm = MagicMock()
        mock_llm_cls.return_value = mock_llm
        
        mock_llm.generate_response.return_value = ("Final Answer", None)
        
        dispatcher = MessageDispatcher()
        # Simulate passing the timestamp of "Processing..." message
        dispatcher.dispatch("Hi", self.test_channel, "User1", response_ts="1234.5678", request_id="TEST-ID")
        
        # Verify chat_update called instead of postMessage
        mock_client.chat_update.assert_called_with(
            channel=self.test_channel,
            ts="1234.5678",
            text="Final Answer"
        )
        mock_client.chat_postMessage.assert_not_called()

    @patch("slack_bot.dispatcher.WebClient")
    @patch("slack_bot.dispatcher.GeminiLLM")
    def test_tool_execution(self, mock_llm_cls, mock_client_cls):
        """
        Scenario: LLM requests tool execution (mocked).
        """
        mock_client = MagicMock()
        mock_client_cls.return_value = mock_client
        mock_llm = MagicMock()
        mock_llm_cls.return_value = mock_llm
        
        # Round 1: Request Tool
        mock_llm.generate_response.side_effect = [
            ("", [{"name": "mock_tool", "args": {}}]),  # First call returns tool
            ("Analysis of tool result", None)           # Second call returns analysis
        ]
        
        # Mock TOOL_FUNCTIONS to avoid real execution
        with patch.dict("slack_bot.dispatcher.TOOL_FUNCTIONS", {"mock_tool": lambda: "Mock Result"}):
            dispatcher = MessageDispatcher()
            dispatcher.dispatch("Run tool", self.test_channel, "User1")
            
            # Verify 2 LLM calls
            self.assertEqual(mock_llm.generate_response.call_count, 2)
            
            # Verify Final Update contained the analysis
            args, kwargs = mock_client.chat_postMessage.call_args
            # OR chat_update if we passed TS (here we didn't, so postMessage)
            
            final_text = kwargs["text"]
            self.assertIn("Mock Result", final_text)       # Tool output
            self.assertIn("Analysis of tool", final_text)  # LLM analysis

if __name__ == '__main__':
    unittest.main()
