#!/usr/bin/env python3
"""
Find what specifically in system prompt causes empty response.
"""

import os
import sys
sys.path.append(os.getcwd())

from health.utils.env_loader import load_env_with_extras
from openai import OpenAI

load_env_with_extras()

client = OpenAI(
    api_key=os.environ.get("GEMINI_API_KEY"),
    base_url=f"{os.environ.get('GEMINI_BASE_URL')}/v1"
)
model = os.environ.get("GEMINI_MODEL", "gemini-3-flash")

print("=" * 60)
print("Progressive System Prompt Test")
print("=" * 60)

test_prompts = [
    ("Empty", ""),
    ("Simple", "You are a helpful assistant."),
    ("With tool mention", "You are a helpful assistant. Use tools when needed."),
    ("TOOL-FIRST", "You are a helpful assistant.\n\n1. **TOOL-FIRST POLICY**: When user asks about data, call tools first."),
]

for name, prompt in test_prompts:
    print(f"\nTest: {name}")
    print(f"Prompt length: {len(prompt)}")
    print("-" * 60)

    try:
        response = client.chat.completions.create(
            model=model,
            messages=[
                {"role": "system", "content": prompt} if prompt else {},
                {"role": "user", "content": "今天睡得怎么样？"}
            ]
        )

        content = response.choices[0].message.content or ""
        finish = response.choices[0].finish_reason

        print(f"{'✅ WORKS' if content else '❌ EMPTY'}")
        print(f"  Length: {len(content)}, Finish: {finish}")
        if content:
            print(f"  Preview: {content[:100]}")

    except Exception as e:
        print(f"❌ Error: {str(e)[:100]}")

print("\n" + "=" * 60)
print("This shows which part of system prompt triggers the issue")
