#!/usr/bin/env python3
"""
Deep dive: What exactly does local proxy return?
"""

import os
import sys
sys.path.append(os.getcwd())

from openai import OpenAI
import json

from slack_bot.llm.gemini import get_system_instruction
from slack_bot.tools.registry import TOOLS_SCHEMA

system_prompt = get_system_instruction()
test_message = "今天睡得怎么样？"

print("=" * 80)
print("Local Proxy Deep Analysis")
print("=" * 80)

local_config = {
    "base_url": "http://127.0.0.1:8045",
    "api_key": "sk-457cbcd2e0a4467e90db1af0ae65748e",
    "model": "gemini-3-pro-high"
}

client = OpenAI(
    api_key=local_config['api_key'],
    base_url=f"{local_config['base_url']}/v1"
)

print("\nTest 1: With full system prompt + 21 tools")
print("-" * 80)

response = client.chat.completions.create(
    model=local_config['model'],
    messages=[
        {"role": "system", "content": system_prompt},
        {"role": "user", "content": test_message}
    ],
    tools=TOOLS_SCHEMA
)

print("Raw response object inspection:")
print(f"  response.id: {response.id}")
print(f"  response.model: {response.model}")
print(f"  response.choices: {len(response.choices)}")

choice = response.choices[0]
print(f"\nChoice[0] details:")
print(f"  finish_reason: {choice.finish_reason}")
print(f"  index: {choice.index}")

msg = choice.message
print(f"\nMessage details:")
print(f"  role: {msg.role}")
print(f"  content: {repr(msg.content)}")
print(f"  tool_calls: {msg.tool_calls}")
print(f"  function_call: {getattr(msg, 'function_call', 'N/A')}")

if hasattr(response, 'usage'):
    print(f"\nUsage stats:")
    print(f"  prompt_tokens: {response.usage.prompt_tokens if response.usage else 'N/A'}")
    print(f"  completion_tokens: {response.usage.completion_tokens if response.usage else 'N/A'}")

# Dump full response
print(f"\nFull response JSON:")
print(json.dumps(response.model_dump(), indent=2, default=str)[:1000])

print("\n" + "=" * 80)
print("\nTest 2: Same request WITHOUT tools")
print("-" * 80)

response2 = client.chat.completions.create(
    model=local_config['model'],
    messages=[
        {"role": "system", "content": system_prompt},
        {"role": "user", "content": test_message}
    ]
    # NO tools
)

msg2 = response2.choices[0].message
print(f"Finish reason: {response2.choices[0].finish_reason}")
print(f"Content: {repr(msg2.content)[:100] if msg2.content else '(empty)'}")

print("\n" + "=" * 80)
print("\nTest 3: Simple prompt WITH tools")
print("-" * 80)

simple_prompt = "You are a helpful assistant."

response3 = client.chat.completions.create(
    model=local_config['model'],
    messages=[
        {"role": "system", "content": simple_prompt},
        {"role": "user", "content": test_message}
    ],
    tools=TOOLS_SCHEMA[:1]  # Only first tool
)

msg3 = response3.choices[0].message
print(f"Finish reason: {response3.choices[0].finish_reason}")
print(f"Content: {repr(msg3.content)[:100] if msg3.content else '(empty)'}")
print(f"Tool calls: {msg3.tool_calls}")

print("\n" + "=" * 80)
print("DIAGNOSIS:")
print("=" * 80)

if response.choices[0].finish_reason == "stop" and not msg.content:
    print("\n🔍 The model completed normally (finish_reason=stop)")
    print("   but returned no content and no tool calls.")
    print("\n   This means:")
    print("   1. The request was accepted by the proxy")
    print("   2. The proxy forwarded it to Gemini (or simulated it)")
    print("   3. But the response was empty/filtered")
    print("\n   Possible causes:")
    print("   a) Proxy's content filter blocked the response")
    print("   b) Proxy's Gemini integration is broken for this model")
    print("   c) The model name 'gemini-3-pro-high' doesn't exist in proxy's mapping")
    print("   d) Proxy returned a default empty response on error")
