import json
from datetime import date, datetime, timedelta
from typing import Dict, Any, Optional, List
from pathlib import Path

from health import config
from health.services.query import HealthDataQuery
from health.services.manual_log_storage import ManualLogStorage, DailyManualLog
from health.utils.logging_config import setup_logger

logger = setup_logger(__name__)

class HealthReader:
    @staticmethod
    def get_daily_detailed_stats(target_date: str = None, **kwargs) -> str:
        """
        Get all available health metrics and activities for a specific day.
        Args:
            target_date: ISO format date string (YYYY-MM-DD)
            **kwargs: Catch-all for hallucinated arguments.
        """
        try:
            # Handle aliases/hallucinations from LLM
            if not target_date:
                target_date = kwargs.get('start_date') or kwargs.get('date') or kwargs.get('day')
            
            if not target_date:
                return "Error: target_date (or start_date) is required."

            d = date.fromisoformat(target_date)
            query = HealthDataQuery()
            data = query.get_daily_summary(d)

            # Auto-sync if data is missing for recent days (today/yesterday)
            # This mimics behavior of ./h.sh which usually syncs before showing
            if not data.get("metrics") and (date.today() - d).days <= 1:
                logger.info(f"Data missing for {target_date}, attempting auto-sync...")
                try:
                    from health.services.data_sync import HealthDataSync
                    syncer = HealthDataSync()
                    syncer.authenticate()
                    syncer.sync_all_metrics(d, d, force=True)
                    # Re-query after sync
                    data = query.get_daily_summary(d)
                    logger.info("Auto-sync completed, re-queried data.")
                except Exception as sync_err:
                    logger.error(f"Auto-sync failed: {sync_err}")
                    # Continue with empty data, user will see "No data"

            
            # Format summarized output to save context window
            lines = [f"📊 **Health Report for {target_date}**"]
            
            # Metrics
            if data.get("metrics"):
                lines.append("\n**Metrics:**")
                for key, val in data["metrics"].items():
                    # Flatten simple dicts for readability
                    if isinstance(val, dict):
                        # Extract key high-level stats if possible, otherwise stringify
                        summary_str = ", ".join([f"{k}={v}" for k, v in val.items() if isinstance(v, (int, float, str))])
                        if len(summary_str) > 200: 
                            summary_str = summary_str[:200] + "..."
                        lines.append(f"- {key}: {summary_str}")
                    else:
                        lines.append(f"- {key}: {val}")
                        
            # Activities
            if data.get("activities"):
                lines.append("\n**Activities:**")
                for act in data["activities"]:
                    lines.append(f"- {act.get('activity_type', 'Activity')}: {act.get('duration_seconds', 0)//60} mins, {act.get('calories', 0)} kcal, Avg HR {act.get('average_heart_rate', 'N/A')}")
            
            # Manual Logs
            manual = ManualLogStorage().load_log(d)
            if manual.diet_entries:
                lines.append(f"\n**Diet:** {len(manual.diet_entries)} entries")
            if manual.alcohol_entries:
                lines.append(f"**Alcohol:** {len(manual.alcohol_entries)} entries")
            if manual.fasting_mode:
                lines.append(f"**Fasting:** {manual.fasting_mode}")
            if manual.feeling_entries:
                lines.append(f"**Feelings:** {len(manual.feeling_entries)} entries")
                
            return "\n".join(lines) if len(lines) > 1 else f"No data found for {target_date}."
            
        except Exception as e:
            logger.error(f"Error getting detailed stats: {e}")
            return f"Error retrieving data: {str(e)}"

    @staticmethod
    def get_metric_history(metric_type: str = None, start_date: str = None, end_date: str = None, **kwargs) -> str:
        """
        Get historical data for a specific metric (e.g., steps, sleep_score, heart_rate, hrv, weight).

        For time ranges > 7 days, automatically uses aggregated analysis for better insights.
        """
        try:
            # Handle alias: target_date -> start/end
            if 'target_date' in kwargs and not start_date:
                start_date = kwargs['target_date']
                end_date = kwargs['target_date']

            if not metric_type:
                return "Error: Metric type is required (e.g., 'steps', 'sleep', 'heart_rate')."

            if not start_date or not end_date:
                return "Error: Start date and end_date are required."

            start = date.fromisoformat(start_date)
            end = date.fromisoformat(end_date)

            # Calculate time span
            time_span_days = (end - start).days + 1

            # For longer time ranges (> 7 days), use aggregated analysis instead
            if time_span_days > 7:
                logger.info(f"Time range {time_span_days} days - using aggregated analysis")

                # Map common metric name variations to our standard names
                metric_mapping = {
                    'heart_rate': 'rhr',
                    'resting_heart_rate': 'rhr',
                    'sleep_score': 'sleep',
                    'sleep_quality': 'sleep',
                }

                metric_name = metric_mapping.get(metric_type, metric_type)

                # Call aggregated analysis with single metric
                return HealthReader.get_aggregated_analysis(
                    start_date=start_date,
                    end_date=end_date,
                    metrics=[metric_name]
                )

            # For short time ranges (<= 7 days), return raw data as before
            query = HealthDataQuery()
            data_list = query.get_metric_range(metric_type, start, end)

            if not data_list:
                return f"No {metric_type} data found from {start_date} to {end_date}."

            # Summarize list for LLM
            return f"Found {len(data_list)} records for {metric_type}:\n" + str(data_list)

        except Exception as e:
            logger.error(f"Error getting metric history: {e}")
            return f"Error retrieving {metric_type} history: {str(e)}"

    @staticmethod
    def get_activity_history(start_date: str = None, end_date: str = None, **kwargs) -> str:
        """
        Get list of activities/workouts in a date range.
        """
        try:
            # Handle alias
            if 'target_date' in kwargs and not start_date:
                start_date = kwargs['target_date']
                end_date = kwargs['target_date']
                
            if not start_date or not end_date:
                return "Error: Start date and end_date are required."

            start = date.fromisoformat(start_date)
            end = date.fromisoformat(end_date)
            
            query = HealthDataQuery()
            activities = query.get_activities_range(start, end)
            
            if not activities:
                return f"No activities found from {start_date} to {end_date}."

            lines = [f"**Activities from {start_date} to {end_date}:**"]
            for act in activities:
                date_str = act.get('start_time', 'Unknown Date')[:10]
                type_str = act.get('activity_type', 'Unknown Activity')
                dist_m = act.get('distance_meters', 0)
                dur_s = act.get('duration_seconds', 0)
                cal = act.get('calories', 0)
                lines.append(f"- {date_str}: {type_str} | {dur_s//60} mins | {cal} kcal | {dist_m/1000:.2f} km")
            
            return "\n".join(lines)

        except Exception as e:
            logger.error(f"Error getting activity history: {e}")
            return f"Error retrieving activity history: {str(e)}"

    @staticmethod
    def get_manual_history(start_date: str = None, end_date: str = None, category: str = "all", target_date: str = None, **kwargs) -> str:
        """
        Get manual logs (diet, alcohol, supplements, feelings, fasting) over a date range.
        Args:
            start_date: YYYY-MM-DD
            end_date: YYYY-MM-DD
            category: 'diet', 'alcohol', 'supplements', 'feelings', 'fasting', 'all'
            target_date: Optional alias for single-day query (sets start=end=target)
            **kwargs: Catch-all for hallucinated arguments (e.g. dosage, name) to prevent crashes.
        """
        try:
            if kwargs:
                logger.warning(f"get_manual_history received unexpected args: {kwargs}")

            # Handle target_date alias for single day
            if target_date:
                start_date = target_date
                end_date = target_date
            
            if not start_date or not end_date:
                return "Error: Please provide start_date and end_date (or target_date)."

            start = date.fromisoformat(start_date)
            end = date.fromisoformat(end_date)
            
            storage = ManualLogStorage()
            logs = storage.get_logs_in_range(start, end)
            
            if not logs:
                return f"No manual logs found from {start_date} to {end_date}."

            lines = []
            for log in logs:
                date_header = f"**{log.log_date}:**"
                day_lines = []
                
                if category in ["diet", "all"] and log.diet_entries:
                    day_lines.append(f"  Diet: {len(log.diet_entries)} items")
                    for e in log.diet_entries:
                        day_lines.append(f"    - [{e.time}] {e.description} ({e.meal_type})")
                        
                if category in ["alcohol", "all"] and log.alcohol_entries:
                    day_lines.append(f"  Alcohol: {len(log.alcohol_entries)} entries")
                    for e in log.alcohol_entries:
                        day_lines.append(f"    - [{e.time}] {e.amount} {e.alcohol_type}")

                if category in ["supplements", "all"] and log.supplement_entries:
                    day_lines.append(f"  Supplements: {len(log.supplement_entries)} entries")
                    for e in log.supplement_entries:
                        day_lines.append(f"    - [{e.time}] {e.supplement_name} {e.dosage}")
                
                if category in ["feelings", "all"] and log.feeling_entries:
                    day_lines.append(f"  Feelings: {len(log.feeling_entries)} entries")
                    for e in log.feeling_entries:
                        day_lines.append(f"    - [{e.time}] {e.feeling_type} ({e.severity}/10): {e.description}")

                if category in ["fasting", "all"] and log.fasting_mode:
                    day_lines.append(f"  Fasting: {log.fasting_mode}")

                if day_lines:
                    lines.append(date_header)
                    lines.extend(day_lines)

            return "\n".join(lines) if lines else f"No manual data for '{category}' found in range."

        except Exception as e:
            logger.error(f"Error getting manual history: {e}")
            return f"Error retrieving manual history: {str(e)}"

    @staticmethod
    def get_aggregated_analysis(
        start_date: str = None,
        end_date: str = None,
        metrics: List[str] = None,
        **kwargs
    ) -> str:
        """
        Get comprehensive health trend analysis over a large time window.

        This method aggregates data and calculates statistics to minimize AI tool calls.
        Instead of returning raw data, it provides a pre-analyzed report with trends,
        averages, and comparisons.

        Args:
            start_date: Start date in YYYY-MM-DD format
            end_date: End date in YYYY-MM-DD format
            metrics: List of metrics to analyze. If None, analyzes common metrics:
                    ['resting_heart_rate', 'hrv', 'sleep', 'stress', 'steps']
            **kwargs: Catch-all for hallucinated arguments

        Returns:
            Formatted analysis report with trends, statistics, and comparisons
        """
        try:
            # Default to last 2 years if not specified
            if not end_date:
                end_date = date.today().isoformat()
            if not start_date:
                start = date.fromisoformat(end_date) - timedelta(days=730)
                start_date = start.isoformat()

            start = date.fromisoformat(start_date)
            end = date.fromisoformat(end_date)

            # Default metrics if not specified (use correct metric names from config)
            if not metrics:
                metrics = ['rhr', 'hrv', 'sleep', 'stress', 'steps']

            query = HealthDataQuery()
            lines = [f"📊 **Health Trend Analysis ({start_date} to {end_date})**"]
            lines.append(f"Time span: {(end - start).days} days\n")

            # Analyze each metric
            for metric in metrics:
                try:
                    data_list = query.get_metric_range(metric, start, end)

                    if not data_list:
                        lines.append(f"\n❌ **{metric.upper()}**: No data available")
                        continue

                    lines.append(f"\n{'='*60}")
                    lines.append(f"📈 **{metric.upper().replace('_', ' ')}**")
                    lines.append(f"{'='*60}")

                    # Metric-specific analysis
                    if metric in ['resting_heart_rate', 'rhr']:
                        lines.extend(_analyze_resting_heart_rate(data_list, start, end))
                    elif metric == 'hrv':
                        lines.extend(_analyze_hrv(data_list, start, end))
                    elif metric == 'sleep':
                        lines.extend(_analyze_sleep(data_list, start, end))
                    elif metric == 'stress':
                        lines.extend(_analyze_stress(data_list, start, end))
                    elif metric == 'steps':
                        lines.extend(_analyze_steps(data_list, start, end))
                    else:
                        # Generic analysis for unknown metrics
                        lines.extend(_analyze_generic(metric, data_list, start, end))

                except Exception as metric_err:
                    logger.error(f"Error analyzing {metric}: {metric_err}")
                    lines.append(f"\n❌ **{metric}**: Error - {str(metric_err)}")

            return "\n".join(lines)

        except Exception as e:
            logger.error(f"Error in aggregated analysis: {e}")
            return f"Error generating analysis: {str(e)}"


def _analyze_resting_heart_rate(data_list: List[Dict], start: date, end: date) -> List[str]:
    """Analyze resting heart rate trends."""
    lines = []

    # Extract values (try both 'rhr' and 'resting_heart_rate' field names)
    values = []
    dates = []
    for item in data_list:
        rhr_val = item.get('rhr') or item.get('resting_heart_rate')
        if rhr_val:
            values.append(rhr_val)
            dates.append(item.get('date', ''))

    if not values:
        return ["No valid resting heart rate data"]

    # Overall statistics
    avg_all = sum(values) / len(values)
    min_val = min(values)
    max_val = max(values)

    lines.append(f"📊 Overall: {avg_all:.1f} bpm (range: {min_val}-{max_val})")
    lines.append(f"   Data points: {len(values)} days")

    # Split by year for trend analysis
    yearly_avgs = {}
    for item in data_list:
        rhr_val = item.get('rhr') or item.get('resting_heart_rate')
        if 'date' in item and rhr_val:
            year = item['date'][:4]
            if year not in yearly_avgs:
                yearly_avgs[year] = []
            yearly_avgs[year].append(rhr_val)

    if len(yearly_avgs) > 1:
        lines.append("\n📅 Yearly Trends:")
        prev_avg = None
        for year in sorted(yearly_avgs.keys()):
            year_avg = sum(yearly_avgs[year]) / len(yearly_avgs[year])
            change_str = ""
            if prev_avg:
                change = year_avg - prev_avg
                pct = (change / prev_avg) * 100
                direction = "↓" if change < 0 else "↑"
                change_str = f" ({direction} {abs(change):.1f} bpm, {abs(pct):.1f}%)"
            lines.append(f"   {year}: {year_avg:.1f} bpm{change_str}")
            prev_avg = year_avg

    # Recent 30 days vs baseline
    recent_30 = [v for i, v in enumerate(values) if len(values) - i <= 30]
    if len(recent_30) >= 5:
        recent_avg = sum(recent_30) / len(recent_30)
        baseline = sum(values[:len(values)//2]) / (len(values)//2) if len(values) > 60 else avg_all
        change = recent_avg - baseline
        status = "✅ Improved" if change < -2 else "⚠️ Elevated" if change > 2 else "🔄 Stable"
        lines.append(f"\n🎯 Recent 30 days: {recent_avg:.1f} bpm ({status})")

    return lines


def _analyze_hrv(data_list: List[Dict], start: date, end: date) -> List[str]:
    """Analyze HRV (Heart Rate Variability) trends."""
    lines = []

    # Extract HRV values (can be stored in multiple field formats)
    values = []
    weekly_avgs = []
    statuses = []

    for item in data_list:
        hrv_val = None

        # Try different field names (prioritize hrv_value which is the main field)
        if 'hrv_value' in item:
            hrv_val = item['hrv_value']
        elif 'average_hrv_rmssd' in item:
            hrv_val = item['average_hrv_rmssd']
        elif 'hrv_rmssd' in item:
            hrv_val = item['hrv_rmssd']
        elif 'lastNightAvg' in item:
            hrv_val = item['lastNightAvg']

        # Also try to extract from raw_data nested structure
        if not hrv_val and 'raw_data' in item and isinstance(item['raw_data'], dict):
            hrv_summary = item['raw_data'].get('hrvSummary', {})
            hrv_val = hrv_summary.get('lastNightAvg')

            # Collect weekly average if available
            weekly = hrv_summary.get('weeklyAvg')
            if weekly and weekly > 0:
                weekly_avgs.append(weekly)

        if hrv_val and hrv_val > 0:
            values.append(hrv_val)

            # Track balance status if available
            status = item.get('status') or item.get('hrv_status')
            if status:
                statuses.append(status)

    if not values:
        return ["No valid HRV data"]

    avg_all = sum(values) / len(values)
    lines.append(f"📊 Overall: {avg_all:.1f} ms (last night average)")
    lines.append(f"   Data points: {len(values)} days")

    # Show weekly average if available
    if weekly_avgs:
        avg_weekly = sum(weekly_avgs) / len(weekly_avgs)
        lines.append(f"   Weekly Average: {avg_weekly:.1f} ms")

    # Yearly trend
    yearly_avgs = {}
    for item in data_list:
        # Extract HRV value using same logic as above
        hrv_val = item.get('hrv_value')
        if not hrv_val and 'raw_data' in item:
            hrv_summary = item['raw_data'].get('hrvSummary', {})
            hrv_val = hrv_summary.get('lastNightAvg')

        if 'date' in item and hrv_val and hrv_val > 0:
            year = item['date'][:4]
            if year not in yearly_avgs:
                yearly_avgs[year] = []
            yearly_avgs[year].append(hrv_val)

    if len(yearly_avgs) > 1:
        lines.append("\n📅 Yearly Trends:")
        prev_avg = None
        for year in sorted(yearly_avgs.keys()):
            year_avg = sum(yearly_avgs[year]) / len(yearly_avgs[year])
            change_str = ""
            if prev_avg:
                change = year_avg - prev_avg
                pct = (change / prev_avg) * 100
                direction = "↑" if change > 0 else "↓"
                status = "✅" if change > 0 else "⚠️"
                change_str = f" ({status} {direction} {abs(change):.1f} ms, {abs(pct):.1f}%)"
            lines.append(f"   {year}: {year_avg:.1f} ms{change_str}")
            prev_avg = year_avg

    # Balance status summary
    if statuses:
        balanced_count = statuses.count('BALANCED')
        unbalanced_count = statuses.count('UNBALANCED')
        total = len(statuses)
        lines.append(f"\n🎯 Recovery Status: {balanced_count}/{total} days BALANCED ({balanced_count/total*100:.0f}%)")

    # Recent trend
    recent_30 = values[-30:] if len(values) >= 30 else values
    if len(recent_30) >= 5:
        recent_avg = sum(recent_30) / len(recent_30)
        baseline = sum(values[:len(values)//2]) / (len(values)//2) if len(values) > 60 else avg_all
        change = recent_avg - baseline
        status = "✅ Improved" if change > 3 else "⚠️ Declined" if change < -3 else "🔄 Stable"
        lines.append(f"   Recent 30 days: {recent_avg:.1f} ms ({status})")

    return lines


def _analyze_sleep(data_list: List[Dict], start: date, end: date) -> List[str]:
    """Analyze sleep quality trends."""
    lines = []

    total_sleep_hrs = []
    deep_sleep_hrs = []
    rem_sleep_hrs = []
    sleep_scores = []

    for item in data_list:
        # Total sleep
        total = item.get('total_sleep_seconds', 0) / 3600
        if total > 0:
            total_sleep_hrs.append(total)

        # Deep sleep
        deep = item.get('deep_sleep_seconds', 0) / 3600
        if deep > 0:
            deep_sleep_hrs.append(deep)

        # REM sleep
        rem = item.get('rem_sleep_seconds', 0) / 3600
        if rem > 0:
            rem_sleep_hrs.append(rem)

        # Sleep score
        score = item.get('overall_sleep_score') or item.get('sleep_score', 0)
        if score > 0:
            sleep_scores.append(score)

    if not total_sleep_hrs:
        return ["No valid sleep data"]

    avg_total = sum(total_sleep_hrs) / len(total_sleep_hrs)
    avg_deep = sum(deep_sleep_hrs) / len(deep_sleep_hrs) if deep_sleep_hrs else 0
    avg_rem = sum(rem_sleep_hrs) / len(rem_sleep_hrs) if rem_sleep_hrs else 0
    avg_score = sum(sleep_scores) / len(sleep_scores) if sleep_scores else 0

    lines.append(f"📊 Overall Averages:")
    lines.append(f"   Total Sleep: {avg_total:.1f} hrs/night")
    if avg_deep > 0:
        deep_pct = (avg_deep / avg_total) * 100
        lines.append(f"   Deep Sleep: {avg_deep:.1f} hrs ({deep_pct:.0f}%)")
    if avg_rem > 0:
        rem_pct = (avg_rem / avg_total) * 100
        lines.append(f"   REM Sleep: {avg_rem:.1f} hrs ({rem_pct:.0f}%)")
    if avg_score > 0:
        lines.append(f"   Sleep Score: {avg_score:.0f}/100")

    # Recent 30 days comparison
    recent_30_total = total_sleep_hrs[-30:] if len(total_sleep_hrs) >= 30 else total_sleep_hrs
    if len(recent_30_total) >= 5:
        recent_avg = sum(recent_30_total) / len(recent_30_total)
        change = recent_avg - avg_total
        status = "✅" if abs(change) < 0.3 else "⚠️"
        lines.append(f"\n🎯 Recent 30 days: {recent_avg:.1f} hrs ({status})")

    return lines


def _analyze_stress(data_list: List[Dict], start: date, end: date) -> List[str]:
    """Analyze stress level trends."""
    lines = []

    avg_stress_values = []
    high_stress_days = 0

    for item in data_list:
        avg_stress = item.get('average_stress_level')
        if avg_stress is not None:
            avg_stress_values.append(avg_stress)
            if avg_stress > 50:
                high_stress_days += 1

    if not avg_stress_values:
        return ["No valid stress data"]

    avg_all = sum(avg_stress_values) / len(avg_stress_values)
    lines.append(f"📊 Average Stress: {avg_all:.1f}/100")
    lines.append(f"   High stress days (>50): {high_stress_days}/{len(avg_stress_values)} ({high_stress_days/len(avg_stress_values)*100:.0f}%)")

    # Recent trend
    recent_30 = avg_stress_values[-30:] if len(avg_stress_values) >= 30 else avg_stress_values
    if len(recent_30) >= 5:
        recent_avg = sum(recent_30) / len(recent_30)
        change = recent_avg - avg_all
        status = "✅ Lower" if change < -5 else "⚠️ Higher" if change > 5 else "🔄 Stable"
        lines.append(f"\n🎯 Recent 30 days: {recent_avg:.1f}/100 ({status})")

    return lines


def _analyze_steps(data_list: List[Dict], start: date, end: date) -> List[str]:
    """Analyze daily steps trends."""
    lines = []

    step_values = []
    for item in data_list:
        steps = item.get('total_steps')
        if steps:
            step_values.append(steps)

    if not step_values:
        return ["No valid steps data"]

    avg_all = sum(step_values) / len(step_values)
    lines.append(f"📊 Average Daily Steps: {avg_all:,.0f}")

    # Active days (>5000 steps)
    active_days = sum(1 for s in step_values if s > 5000)
    lines.append(f"   Active days (>5k steps): {active_days}/{len(step_values)} ({active_days/len(step_values)*100:.0f}%)")

    # Recent trend
    recent_30 = step_values[-30:] if len(step_values) >= 30 else step_values
    if len(recent_30) >= 5:
        recent_avg = sum(recent_30) / len(recent_30)
        change = recent_avg - avg_all
        pct = (change / avg_all) * 100
        status = "✅" if change > 500 else "⚠️" if change < -500 else "🔄"
        lines.append(f"\n🎯 Recent 30 days: {recent_avg:,.0f} steps ({status} {pct:+.0f}%)")

    return lines


def _analyze_generic(metric: str, data_list: List[Dict], start: date, end: date) -> List[str]:
    """Generic analysis for unknown metric types."""
    lines = [f"Data points: {len(data_list)}"]
    lines.append("(Use specific metrics like 'resting_heart_rate', 'hrv', 'sleep' for detailed analysis)")
    return lines
