"""
Hold Task Poller
================

Celery task that polls for HOLD tasks and starts them when dependencies are met.
Uses non-blocking self-rescheduling to avoid blocking the worker.
"""

import logging
import sys
import time
from typing import Optional

from src.utils.celery_worker import celery_app

logger = logging.getLogger(__name__)


@celery_app.task(bind=True, name='src.smart_inventory.tasks.hold_task_poller.poll_hold_tasks')
def poll_hold_tasks(
    self,
    company_id: Optional[int] = None,
    batch_id: Optional[str] = None,
    max_duration_seconds: int = 300,  # 5 minutes max
    poll_interval_seconds: int = 2,  # Check every 2 seconds
    main_poll_interval_seconds: int = 20,  # Main check every 20 seconds (unused now)
    _start_time: Optional[float] = None,  # Internal: track start time across retries
    _total_started: int = 0,  # Internal: track total started across retries
    _iterations: int = 0  # Internal: track iterations across retries
):
    """
    Poll for HOLD tasks and start them when dependencies are met.
    
    This task handles BOTH:
    - Snapshot tasks (daily_sales, inventory_snapshot, etc.)
    - CSV processing tasks (product, purchase_order, sales_order, etc.)
    
    This task uses non-blocking self-rescheduling:
    - Checks if HOLD tasks can be started
    - Reschedules itself with countdown to avoid blocking the worker
    - Stops when no HOLD tasks remain or max duration is reached
    
    Args:
        company_id: Optional filter by company
        batch_id: Optional filter by batch
        max_duration_seconds: Maximum time to poll (default 5 minutes)
        poll_interval_seconds: How often to check for HOLD tasks (default 2 seconds)
    """
    try:
        from src.utils.db import get_db_session
        from src.smart_inventory.utils.task_orchestrator import process_hold_tasks, has_hold_tasks
        from src.smart_inventory.utils.csv_task_orchestrator import process_hold_csv_tasks, has_hold_csv_tasks
    except ImportError as e:
        logger.error(f"Failed to import required modules: {e}")
        return {"success": False, "error": str(e)}
    
    # Initialize start time on first call
    if _start_time is None:
        _start_time = time.time()
    
    elapsed = time.time() - _start_time
    
    # Check if we've exceeded max duration
    if elapsed >= max_duration_seconds:
        logger.debug(f"Hold poller timeout ({max_duration_seconds}s)")
        return {
            "success": True,
            "total_started": _total_started,
            "duration_seconds": elapsed,
            "iterations": _iterations,
            "reason": "timeout"
        }
    
    # Get database session
    db = get_db_session()
    
    try:
        # Check if there are any HOLD tasks (snapshot or CSV)
        has_snapshot_hold = has_hold_tasks(db, company_id)
        has_csv_hold = has_hold_csv_tasks(db, company_id)
        
        if not has_snapshot_hold and not has_csv_hold:
            logger.debug("No HOLD tasks remaining (snapshot or CSV)")
            return {
                "success": True,
                "total_started": _total_started,
                "duration_seconds": elapsed,
                "iterations": _iterations,
                "reason": "no_hold_tasks"
            }
        
        # Process snapshot HOLD tasks
        started = 0
        if has_snapshot_hold:
            started += process_hold_tasks(db, company_id, batch_id)
        
        # Process CSV HOLD tasks
        if has_csv_hold:
            started += process_hold_csv_tasks(db, company_id)
        
        _total_started += started
        _iterations += 1
        
        if started > 0:
            logger.info(f"Started {started} HOLD tasks (snapshot + CSV)")
        
        # Log status periodically
        if _iterations % 10 == 0:
            logger.debug(f"Hold poller: {elapsed:.0f}s, {_total_started} started")
    
    except Exception as e:
        logger.error(f"Error in hold task poller: {e}")
    
    finally:
        db.close()
    
    # Reschedule self with countdown (non-blocking)
    # Use send_task on Linux (prefork pool), apply_async on Windows (solo pool)
    task_kwargs = {
        "company_id": company_id,
        "batch_id": batch_id,
        "max_duration_seconds": max_duration_seconds,
        "poll_interval_seconds": poll_interval_seconds,
        "_start_time": _start_time,
        "_total_started": _total_started,
        "_iterations": _iterations
    }
    
    if sys.platform == 'win32':
        poll_hold_tasks.apply_async(kwargs=task_kwargs, countdown=poll_interval_seconds)
    else:
        # Linux: use send_task with task name for prefork pool compatibility
        celery_app.send_task(
            'src.smart_inventory.tasks.hold_task_poller.poll_hold_tasks',
            kwargs=task_kwargs,
            countdown=poll_interval_seconds
        )
    
    return {
        "success": True,
        "total_started": _total_started,
        "duration_seconds": elapsed,
        "iterations": _iterations,
        "reason": "rescheduled"
    }


@celery_app.task(bind=True, name='src.smart_inventory.tasks.hold_task_poller.check_and_process_hold_tasks')
def check_and_process_hold_tasks(self, company_id: Optional[int] = None):
    """
    One-time check to process any HOLD tasks (both snapshot and CSV).
    Use this for manual triggering or scheduled checks.
    
    Args:
        company_id: Optional filter by company
    """
    try:
        from src.utils.db import get_db_session
        from src.smart_inventory.utils.task_orchestrator import process_hold_tasks, has_hold_tasks
        from src.smart_inventory.utils.csv_task_orchestrator import process_hold_csv_tasks, has_hold_csv_tasks
    except ImportError as e:
        logger.error(f"Failed to import required modules: {e}")
        return {"success": False, "error": str(e)}
    
    db = get_db_session()
    
    try:
        has_snapshot_hold = has_hold_tasks(db, company_id)
        has_csv_hold = has_hold_csv_tasks(db, company_id)
        
        if not has_snapshot_hold and not has_csv_hold:
            return {"success": True, "message": "No HOLD tasks to process", "started": 0}
        
        started = 0
        
        # Process snapshot HOLD tasks
        if has_snapshot_hold:
            started += process_hold_tasks(db, company_id)
        
        # Process CSV HOLD tasks
        if has_csv_hold:
            started += process_hold_csv_tasks(db, company_id)
        
        return {
            "success": True,
            "started": started,
            "message": f"Processed HOLD tasks (snapshot + CSV), started {started}"
        }
    
    except Exception as e:
        logger.error(f"Error checking HOLD tasks: {e}")
        return {"success": False, "error": str(e)}
    
    finally:
        db.close()
