"""
Data synchronization orchestration service.

Coordinates Garmin API data fetching, storage, and incremental sync logic.
"""

from datetime import date, timedelta
from typing import List, Dict, Any, Optional, Tuple
from pathlib import Path

from health import config
from health.services.garmin_client import GarminHealthClient
from health.services.storage import HealthStorage
from health.db.repository import HealthRepository
from health.utils.exceptions import SyncError, GarminAPIError
from health.utils.date_utils import (
    parse_date,
    date_range,
    split_date_range,
    get_yesterday,
    format_date,
)
from health.utils.logging_config import setup_logger

logger = setup_logger(__name__)


class HealthDataSync:
    """Service for orchestrating health data synchronization."""

    def __init__(
        self,
        client: Optional[GarminHealthClient] = None,
        storage: Optional[HealthStorage] = None,
        repo: Optional[HealthRepository] = None,
    ) -> None:
        """Initialize sync service.

        Args:
            client: Optional Garmin client (creates new if not provided)
            storage: Optional storage service (creates new if not provided)
            repo: Optional repository (creates new if not provided)
        """
        self.client = client or GarminHealthClient()
        self.storage = storage or HealthStorage()
        self.repo = repo or HealthRepository()

    def authenticate(self) -> None:
        """Authenticate with Garmin Connect.

        Raises:
            GarminAuthError: If authentication fails
        """
        logger.info("Authenticating with Garmin...")
        self.client.authenticate()
        logger.info("Authentication successful")

    def sync_daily_metric(
        self, metric_type: str, target_date: date, force: bool = False
    ) -> bool:
        """Sync a single daily metric for a specific date.

        Args:
            metric_type: Type of metric to sync
            target_date: Date to sync
            force: If True, re-sync even if data already exists

        Returns:
            True if data was synced, False if skipped or no data available
        """
        # Check if already synced (unless forcing)
        if not force and self.storage.metric_exists(metric_type, target_date):
            logger.debug(f"Skipping {metric_type} for {target_date} (already synced)")
            return False

        # Get the appropriate fetch method
        fetch_method = getattr(self.client, f"fetch_{metric_type}", None)
        if not fetch_method:
            logger.warning(f"No fetch method for metric type: {metric_type}")
            return False

        try:
            # Fetch data from Garmin
            data = fetch_method(target_date)

            if not data:
                logger.debug(f"No {metric_type} data available for {target_date}")
                return False

            # Save to storage
            self.storage.save_daily_metric(data, metric_type)
            logger.info(f"✓ Synced {metric_type} for {target_date}")
            return True

        except GarminAPIError as e:
            logger.error(f"API error syncing {metric_type} for {target_date}: {e}")
            raise
        except Exception as e:
            logger.error(f"Error syncing {metric_type} for {target_date}: {e}")
            return False

    def sync_daily_metrics_range(
        self,
        metric_type: str,
        start_date: date,
        end_date: date,
        force: bool = False,
    ) -> Dict[str, int]:
        """Sync a daily metric for a date range.

        Args:
            metric_type: Type of metric to sync
            start_date: Start date (inclusive)
            end_date: End date (inclusive)
            force: If True, re-sync existing data

        Returns:
            Statistics dictionary with sync counts
        """
        logger.info(
            f"Syncing {metric_type} from {start_date} to {end_date} "
            f"({(end_date - start_date).days + 1} days)"
        )

        stats = {"synced": 0, "skipped": 0, "errors": 0}

        for current_date in date_range(start_date, end_date):
            try:
                if self.sync_daily_metric(metric_type, current_date, force=force):
                    stats["synced"] += 1
                else:
                    stats["skipped"] += 1

            except GarminAPIError as e:
                logger.error(f"API error on {current_date}: {e}")
                stats["errors"] += 1
                # Continue with other dates even if one fails
                continue

        logger.info(
            f"{metric_type} sync complete: "
            f"{stats['synced']} synced, {stats['skipped']} skipped, "
            f"{stats['errors']} errors"
        )

        return stats

    def sync_activities_range(
        self, start_date: date, end_date: date, force: bool = False
    ) -> Dict[str, int]:
        """Sync activities for a date range.

        Args:
            start_date: Start date
            end_date: End date
            force: If True, re-sync existing activities

        Returns:
            Statistics dictionary
        """
        logger.info(
            f"Syncing activities from {start_date} to {end_date}"
        )

        stats = {"synced": 0, "skipped": 0, "errors": 0}

        try:
            # Fetch all activities in the range
            activities = self.client.fetch_activities(start_date, end_date)

            logger.info(f"Found {len(activities)} activities")

            for activity in activities:
                try:
                    # Check if already synced
                    if not force and self.storage.activity_exists(activity.activity_id):
                        logger.debug(
                            f"Skipping activity {activity.activity_id} (already synced)"
                        )
                        stats["skipped"] += 1
                        continue

                    # Save activity
                    self.storage.save_activity(activity)
                    stats["synced"] += 1
                    logger.info(
                        f"✓ Synced activity {activity.activity_id} "
                        f"({activity.activity_type}) on {activity.date}"
                    )

                except Exception as e:
                    logger.error(f"Error saving activity {activity.activity_id}: {e}")
                    stats["errors"] += 1
                    continue

        except GarminAPIError as e:
            logger.error(f"API error fetching activities: {e}")
            stats["errors"] += 1

        logger.info(
            f"Activity sync complete: "
            f"{stats['synced']} synced, {stats['skipped']} skipped, "
            f"{stats['errors']} errors"
        )

        return stats

    def sync_all_metrics(
        self,
        start_date: date,
        end_date: date,
        metric_types: Optional[List[str]] = None,
        force: bool = False,
    ) -> Dict[str, Dict[str, int]]:
        """Sync all (or specified) metrics for a date range.

        Args:
            start_date: Start date
            end_date: End date
            metric_types: Optional list of metric types to sync (defaults to all)
            force: If True, re-sync existing data

        Returns:
            Dictionary mapping metric type to sync statistics
        """
        # Default to all configured metric types except activities
        if not metric_types:
            metric_types = [
                mt
                for mt in config.DATA_TYPE_CONFIG.keys()
                if mt != "activities"  # Handle activities separately
            ]
        else:
            # Even if metric_types is provided, filter out activities to handle separately
            metric_types = [mt for mt in metric_types if mt != "activities"]

        logger.info(
            f"Syncing {len(metric_types)} metric types from {start_date} to {end_date}"
        )

        results = {}

        for metric_type in metric_types:
            try:
                stats = self.sync_daily_metrics_range(
                    metric_type, start_date, end_date, force=force
                )
                results[metric_type] = stats

                # Update last sync state
                if stats["synced"] > 0:
                    self.repo.update_last_sync_state(
                        metric_type, end_date, total_records=stats["synced"]
                    )

            except Exception as e:
                logger.error(f"Failed to sync {metric_type}: {e}")
                results[metric_type] = {"synced": 0, "skipped": 0, "errors": 1}
                continue

        # Sync activities (always, regardless of metric_types parameter)
        try:
            activity_stats = self.sync_activities_range(start_date, end_date, force=force)
            results["activities"] = activity_stats

            if activity_stats["synced"] > 0:
                self.repo.update_last_sync_state(
                    "activities", end_date, total_records=activity_stats["synced"]
                )

        except Exception as e:
            logger.error(f"Failed to sync activities: {e}")
            results["activities"] = {"synced": 0, "skipped": 0, "errors": 1}

        # Create sync record
        total_synced = sum(r.get("synced", 0) for r in results.values())
        total_errors = sum(r.get("errors", 0) for r in results.values())

        status = "success" if total_errors == 0 else ("partial" if total_synced > 0 else "failed")

        self.repo.create_sync_record(
            data_type="all_metrics",
            start_date=start_date,
            end_date=end_date,
            status=status,
            records_synced=total_synced,
            error_message=f"{total_errors} errors" if total_errors > 0 else None,
        )

        return results

    def sync_incremental(
        self,
        metric_types: Optional[List[str]] = None,
        until_date: Optional[date] = None,
    ) -> Dict[str, Dict[str, int]]:
        """Perform incremental sync for all metrics since last sync.

        Args:
            metric_types: Optional list of metric types to sync
            until_date: Optional end date (defaults to yesterday)

        Returns:
            Dictionary mapping metric type to sync statistics
        """
        if not until_date:
            until_date = get_yesterday()

        if not metric_types:
            metric_types = list(config.DATA_TYPE_CONFIG.keys())

        logger.info(f"Starting incremental sync (until {until_date})")

        results = {}

        for metric_type in metric_types:
            try:
                # Get last sync date
                last_sync = self.repo.get_last_sync_date(metric_type)
                
                # Determine start_date for historical sync
                if last_sync:
                    start_date = last_sync + timedelta(days=1)
                else:
                    # First sync - use default start date
                    start_date = parse_date(config.HISTORICAL_START_DATE)
                    logger.info(
                        f"{metric_type} not synced before, starting from {start_date}"
                    )

                stats = {"synced": 0, "skipped": 0, "errors": 0}
                today = date.today()
                yesterday = today - timedelta(days=1)
                
                # 1. Sync Historical Data (start_date to min(until_date, yesterday))
                # Only if we have a valid range before today
                historical_end = min(until_date, yesterday)
                
                if start_date <= historical_end:
                    if metric_type == "activities":
                        h_stats = self.sync_activities_range(start_date, historical_end, force=False)
                    else:
                        h_stats = self.sync_daily_metrics_range(
                            metric_type, start_date, historical_end, force=False
                        )
                    
                    stats["synced"] += h_stats.get("synced", 0)
                    stats["skipped"] += h_stats.get("skipped", 0)
                    stats["errors"] += h_stats.get("errors", 0)

                # 2. Sync Today (if requested) - Always Force
                # If until_date includes today, we force sync today regardless of last_sync
                if until_date >= today:
                    logger.info(f"Force syncing {metric_type} for today ({today})")
                    if metric_type == "activities":
                        t_stats = self.sync_activities_range(today, today, force=True)
                    else:
                        t_stats = self.sync_daily_metrics_range(
                            metric_type, today, today, force=True
                        )
                    
                    stats["synced"] += t_stats.get("synced", 0)
                    stats["skipped"] += t_stats.get("skipped", 0)
                    stats["errors"] += t_stats.get("errors", 0)
                
                # Log if skipped everything
                if stats["synced"] == 0 and stats["errors"] == 0 and stats["skipped"] == 0:
                     if start_date > historical_end and until_date < today:
                         logger.info(f"{metric_type} already up to date")

                results[metric_type] = stats

                # Update last sync state if we synced anything
                if stats["synced"] > 0:
                    self.repo.update_last_sync_state(metric_type, until_date)

            except Exception as e:
                logger.error(f"Incremental sync failed for {metric_type}: {e}")
                results[metric_type] = {"synced": 0, "skipped": 0, "errors": 1}
                continue

        return results

    def backfill_historical(
        self,
        start_date: date,
        end_date: Optional[date] = None,
        metric_types: Optional[List[str]] = None,
        batch_size: int = config.DEFAULT_BATCH_SIZE_DAYS,
    ) -> Dict[str, Dict[str, int]]:
        """Backfill historical data in batches.

        Args:
            start_date: Start date for backfill
            end_date: End date (defaults to yesterday)
            metric_types: Optional list of metric types
            batch_size: Days per batch (default: 30)

        Returns:
            Aggregated sync statistics
        """
        if not end_date:
            end_date = get_yesterday()

        if not metric_types:
            metric_types = list(config.DATA_TYPE_CONFIG.keys())

        logger.info(
            f"Starting historical backfill from {start_date} to {end_date} "
            f"({(end_date - start_date).days + 1} days, batch size: {batch_size})"
        )

        # Split into batches
        batches = split_date_range(start_date, end_date, batch_size)
        logger.info(f"Processing {len(batches)} batches")

        # Aggregate results
        aggregated_results: Dict[str, Dict[str, int]] = {
            mt: {"synced": 0, "skipped": 0, "errors": 0} for mt in metric_types
        }

        for i, (batch_start, batch_end) in enumerate(batches, 1):
            logger.info(
                f"\n📦 Batch {i}/{len(batches)}: {batch_start} to {batch_end}"
            )

            batch_results = self.sync_all_metrics(
                batch_start, batch_end, metric_types=metric_types
            )

            # Aggregate statistics
            for metric_type, stats in batch_results.items():
                if metric_type not in aggregated_results:
                    aggregated_results[metric_type] = {"synced": 0, "skipped": 0, "errors": 0}

                aggregated_results[metric_type]["synced"] += stats.get("synced", 0)
                aggregated_results[metric_type]["skipped"] += stats.get("skipped", 0)
                aggregated_results[metric_type]["errors"] += stats.get("errors", 0)

        logger.info("\n✅ Historical backfill complete!")
        logger.info("\n📊 Final Statistics:")
        for metric_type, stats in aggregated_results.items():
            logger.info(
                f"  {metric_type}: {stats['synced']} synced, "
                f"{stats['skipped']} skipped, {stats['errors']} errors"
            )

        return aggregated_results

    def get_sync_status(self) -> Dict[str, Any]:
        """Get current sync status for all data types.

        Returns:
            Dictionary with sync status information
        """
        status = {}
        all_states = self.repo.get_all_last_sync_states()

        for data_type in config.DATA_TYPE_CONFIG.keys():
            if data_type in all_states:
                state = all_states[data_type]
                status[data_type] = {
                    "last_sync_date": state["last_sync_date"],
                    "total_records": state["total_records"],
                    "updated_at": state["updated_at"],
                    "status": "synced",
                }
            else:
                status[data_type] = {
                    "last_sync_date": None,
                    "total_records": 0,
                    "updated_at": None,
                    "status": "never_synced",
                }

        return status
