import time as _imp_time  # Added for import timing debug

_imp_start = _imp_time.time()

import requests
from loguru import logger

logger.debug(f"[IMPORT][{_imp_time.time() - _imp_start:0.3f}s] requests loaded")
from flask import Flask, Response, request

logger.debug(
    f"[IMPORT][{_imp_time.time() - _imp_start:0.3f}s] flask (Flask, Response, request) loaded"
)

logger.debug(f"[IMPORT][{_imp_time.time() - _imp_start:0.3f}s] loguru.logger loaded")

from lib.memory_profiler import profiler

logger.debug(
    f"[IMPORT][{_imp_time.time() - _imp_start:0.3f}s] lib.memory_profiler.profiler loaded"
)
from lib.profiler_endpoints import setup_profiler_routes

logger.debug(
    f"[IMPORT][{_imp_time.time() - _imp_start:0.3f}s] lib.profiler_endpoints.setup_profiler_routes loaded"
)


from sentry_sdk.integrations.logging import LoggingIntegration
from sentry_sdk.integrations.loguru import LoggingLevels

# Configure for production safety
profiler.configure_for_production()

# Configure which modules to profile
profiler.add_app_modules(["lib", "views", "services", "tasks", "models"])
profiler.add_library_modules(
    [
        "gunicorn",
        "gevent",
        "celery",
        "redis",
        "psutil",
        "requests",
        "urllib3",
        "pymongo",
        "sqlalchemy",
        "flask",
    ]
)

from lib import recaptcha

logger.debug(f"[IMPORT][{_imp_time.time() - _imp_start:0.3f}s] lib.recaptcha loaded")
from lib.log_manager import set_log_level_in_redis, start_background_logging

logger.debug(
    f"[IMPORT][{_imp_time.time() - _imp_start:0.3f}s] lib.log_manager (set_log_level_in_redis, start_background_logging) loaded"
)

start_background_logging()
# Save the original requests.Session.request method
_original_request = requests.Session.request


def _logged_request(self, method, url, *args, **kwargs):
    # Log the URL of the outgoing request
    logger.debug(f"Monkey request CookieCutter: {method} {url}")
    # Call the original request method
    return _original_request(self, method, url, *args, **kwargs)


logger.debug("Monkey Patching requests.Session.request method")
# Monkey patch the requests.Session.request method
requests.Session.request = _logged_request

import atexit

logger.debug(f"[IMPORT][{_imp_time.time() - _imp_start:0.3f}s] atexit loaded")
import time

logger.debug(f"[IMPORT][{_imp_time.time() - _imp_start:0.3f}s] time loaded")

import sentry_sdk

logger.debug(f"[IMPORT][{_imp_time.time() - _imp_start:0.3f}s] sentry_sdk loaded")
from flask import redirect

logger.debug(f"[IMPORT][{_imp_time.time() - _imp_start:0.3f}s] flask.redirect loaded")
from flask_cors import CORS

logger.debug(f"[IMPORT][{_imp_time.time() - _imp_start:0.3f}s] flask_cors.CORS loaded")
from greenlet import GreenletExit

logger.debug(
    f"[IMPORT][{_imp_time.time() - _imp_start:0.3f}s] greenlet.GreenletExit loaded"
)
from prometheus_client import generate_latest

logger.debug(
    f"[IMPORT][{_imp_time.time() - _imp_start:0.3f}s] prometheus_client.generate_latest loaded"
)
from werkzeug.middleware.dispatcher import DispatcherMiddleware

logger.debug(
    f"[IMPORT][{_imp_time.time() - _imp_start:0.3f}s] werkzeug.middleware.dispatcher.DispatcherMiddleware loaded"
)

from etc.config import (
    BASIC_AUTH_PASSWORD,
    BASIC_AUTH_USERNAME,
    HOST,
    MODE,
    RUNAS,
    SENTRY_DSN,
)

logger.debug(
    f"[IMPORT][{_imp_time.time() - _imp_start:0.3f}s] etc.config constants loaded"
)
from lib import mailer

logger.debug(f"[IMPORT][{_imp_time.time() - _imp_start:0.3f}s] lib.mailer loaded")
from lib.dbdrivers import exporter as redis_exporter  # Import the Redis exporter

logger.debug(
    f"[IMPORT][{_imp_time.time() - _imp_start:0.3f}s] lib.dbdrivers.exporter (redis_exporter) loaded"
)
from lib.redqueue import bg_listeners

logger.debug(
    f"[IMPORT][{_imp_time.time() - _imp_start:0.3f}s] lib.redqueue.bg_listeners loaded"
)
from tasks import JOBS, run_startup_jobs, scheduler

logger.debug(
    f"[IMPORT][{_imp_time.time() - _imp_start:0.3f}s] tasks (JOBS, run_startup_jobs, scheduler) loaded"
)
from views.auth import app as auth_bp

logger.debug(f"[IMPORT][{_imp_time.time() - _imp_start:0.3f}s] views.auth loaded")
from views.closedapis import app as closed_apis_bp

logger.debug(f"[IMPORT][{_imp_time.time() - _imp_start:0.3f}s] views.closedapis loaded")
from views.conversation import app as conversation_bp

logger.debug(
    f"[IMPORT][{_imp_time.time() - _imp_start:0.3f}s] views.conversation loaded"
)
from views.dashboard import app as dashboard_bp

logger.debug(f"[IMPORT][{_imp_time.time() - _imp_start:0.3f}s] views.dashboard loaded")
from views.evercookie import app as evercookie_bp

logger.debug(f"[IMPORT][{_imp_time.time() - _imp_start:0.3f}s] views.evercookie loaded")
from views.jserver import app as jserver_bp

logger.debug(f"[IMPORT][{_imp_time.time() - _imp_start:0.3f}s] views.jserver loaded")
from views.script_requests import app as script_requests_bp

logger.debug(
    f"[IMPORT][{_imp_time.time() - _imp_start:0.3f}s] views.script_requests loaded"
)
from views.lake import app as lake_bp

logger.debug(f"[IMPORT][{_imp_time.time() - _imp_start:0.3f}s] views.lake loaded")
from views.loyalty_rewards import app as loyalty_rewards_bp

logger.debug(
    f"[IMPORT][{_imp_time.time() - _imp_start:0.3f}s] views.loyalty_rewards loaded"
)
from views.mockapi import app as mockapi_bp

logger.debug(f"[IMPORT][{_imp_time.time() - _imp_start:0.3f}s] views.mockapi loaded")
from views.otp import otp as otp_bp

logger.debug(f"[IMPORT][{_imp_time.time() - _imp_start:0.3f}s] views.otp loaded")
from views.prefillcapture import app as prefill_capture_bp

logger.debug(
    f"[IMPORT][{_imp_time.time() - _imp_start:0.3f}s] views.prefillcapture loaded"
)
from views.productapis import app as productapis_bp

logger.debug(
    f"[IMPORT][{_imp_time.time() - _imp_start:0.3f}s] views.productapis loaded"
)
from views.server_sent_events import app as sse_bp

logger.debug(
    f"[IMPORT][{_imp_time.time() - _imp_start:0.3f}s] views.server_sent_events loaded"
)
from views.shopify import app as shopify_bp

logger.debug(f"[IMPORT][{_imp_time.time() - _imp_start:0.3f}s] views.shopify loaded")
from views.shopify_quick_login import app as shopify_auth_bp

logger.debug(
    f"[IMPORT][{_imp_time.time() - _imp_start:0.3f}s] views.shopify_quick_login loaded"
)
from views.magento_sso import app as magento_sso
from views.us_dashboard import app as us_dashboard

logger.debug(
    f"[IMPORT][{_imp_time.time() - _imp_start:0.3f}s] views.us_dashboard loaded"
)
from views.wizke import app as wizke_bp

logger.debug(f"[IMPORT][{_imp_time.time() - _imp_start:0.3f}s] views.wizke loaded")
from views.queue_dashboard import app as queue_dashboard_bp

logger.debug(
    f"[IMPORT][{_imp_time.time() - _imp_start:0.3f}s] views.queue_dashboard loaded"
)

from views.chatbot import app as chatbot

app = Flask(__name__)

# Initialize Beanie ODM on app startup (configurable strategy)
import os

logger.debug(f"[IMPORT][{_imp_time.time() - _imp_start:0.3f}s] os loaded")
import threading

logger.debug(f"[IMPORT][{_imp_time.time() - _imp_start:0.3f}s] threading loaded")


def initialize_beanie_in_background():
    """Initialize Beanie ODM in a background thread during app startup"""
    # Check if Beanie initialization should be skipped (default: true = disabled)
    if os.getenv("SKIP_BEANIE_INIT", "true").lower() == "true":
        logger.debug(
            "⏭️  Skipping Beanie ODM initialization (SKIP_BEANIE_INIT=true, default behavior)"
        )
        return

    import asyncio

    from lib.dbdrivers import initialize_beanie

    def run_init():
        try:
            logger.debug("🚀 Starting Beanie ODM initialization...")

            # Create new event loop for this thread
            loop = asyncio.new_event_loop()
            asyncio.set_event_loop(loop)

            # Initialize Beanie with longer timeout for production
            init_task = loop.create_task(initialize_beanie())
            try:
                # Increased timeout to 60 seconds for production MongoDB connections
                loop.run_until_complete(asyncio.wait_for(init_task, timeout=60.0))
                logger.debug("✅ Beanie ODM initialization completed in background")
            except asyncio.TimeoutError:
                logger.warning(
                    "⏰ Beanie ODM initialization timed out after 60s - shadow validation will be disabled"
                )
            except Exception as init_error:
                logger.error(
                    f"❌ Beanie ODM initialization failed: {type(init_error).__name__}: {init_error}"
                )

            # Close the loop
            loop.close()
        except Exception as e:
            logger.error(
                f"❌ Failed to initialize Beanie ODM in background thread: {e}"
            )
            # Don't raise - let app start even if Beanie fails

    # Start initialization in background thread
    init_thread = threading.Thread(target=run_init, daemon=True, name="BeaniInitThread")
    init_thread.start()
    logger.debug("🧵 Started Beanie ODM initialization in background thread")


# Initialize Beanie when app starts (unless disabled)
# initialize_beanie_in_background() DISABLED for now to avoid startup issues

# Create separate WSGI apps for different metrics systems
from prometheus_client import CONTENT_TYPE_LATEST

logger.debug(
    f"[IMPORT][{_imp_time.time() - _imp_start:0.3f}s] prometheus_client.CONTENT_TYPE_LATEST loaded"
)


def function_metrics_wsgi_app(environ, start_response):
    """WSGI app for memory profiler metrics using default registry"""
    try:
        # Use prometheus_client.generate_latest() to get the default registry metrics
        # which includes the memory profiler metrics
        data = generate_latest()
        status = "200 OK"
        headers = [
            ("Content-Type", CONTENT_TYPE_LATEST),
            ("Content-Length", str(len(data))),
        ]
    except Exception as e:
        # Fallback in case of errors
        error_msg = f"Error generating metrics: {str(e)}"
        data = error_msg.encode("utf-8")
        status = "500 Internal Server Error"
        headers = [
            ("Content-Type", "text/plain"),
            ("Content-Length", str(len(data))),
        ]

    start_response(status, headers)
    return [data]


def redis_metrics_wsgi_app(environ, start_response):
    """WSGI app for Redis metrics using separate registry"""
    try:
        if redis_exporter and hasattr(redis_exporter, "registry"):
            data = generate_latest(redis_exporter.registry)
        else:
            data = b"# Redis monitoring not available\n"

        status = "200 OK"
        headers = [
            ("Content-Type", CONTENT_TYPE_LATEST),
            ("Content-Length", str(len(data))),
        ]
    except Exception as e:
        # Fallback in case of errors
        error_msg = f"Error generating Redis metrics: {str(e)}"
        data = error_msg.encode("utf-8")
        status = "500 Internal Server Error"
        headers = [
            ("Content-Type", "text/plain"),
            ("Content-Length", str(len(data))),
        ]

    start_response(status, headers)
    return [data]


# Set up the dispatcher middleware with separate endpoints
app.wsgi_app = DispatcherMiddleware(
    app.wsgi_app,
    {
        "/metrics/function": function_metrics_wsgi_app,  # Memory profiler metrics
        "/redis_metrics2": redis_metrics_wsgi_app,  # Redis monitoring metrics (separate registry)
    },
)

app.register_blueprint(wizke_bp)
app.register_blueprint(jserver_bp)
app.register_blueprint(script_requests_bp)
app.register_blueprint(shopify_auth_bp)
app.register_blueprint(evercookie_bp)
app.register_blueprint(auth_bp)
app.register_blueprint(dashboard_bp)
app.register_blueprint(mockapi_bp)
app.register_blueprint(closed_apis_bp)
app.register_blueprint(prefill_capture_bp)
app.register_blueprint(loyalty_rewards_bp)
app.register_blueprint(shopify_bp)
app.register_blueprint(magento_sso)
app.register_blueprint(conversation_bp)
app.register_blueprint(otp_bp)
app.register_blueprint(lake_bp)
app.register_blueprint(productapis_bp)
app.register_blueprint(sse_bp)
app.register_blueprint(us_dashboard, url_prefix="/v1/us")
app.register_blueprint(queue_dashboard_bp)
app.register_blueprint(chatbot)

from lib.queue_monitor import register_queue_monitor_routes

logger.debug(
    f"[IMPORT][{_imp_time.time() - _imp_start:0.3f}s] lib.queue_monitor.register_queue_monitor_routes loaded"
)

register_queue_monitor_routes(app)

# # Run task recovery on startup to handle any stalled tasks from previous runs
# if MODE == "PRODUCTION":
#     try:
#         from lib.task_recovery import run_task_recovery
#         logger.debug("Running startup task recovery...")
#         recovery_result = run_task_recovery()
#         logger.debug(f"Startup task recovery completed: {recovery_result}")
#     except Exception as e:
#         logger.error(f"Startup task recovery failed (non-critical): {e}")

# Set up profiler endpoints
setup_profiler_routes(app, prefix="/profiler")

app.config["MAIL_SERVER"] = "smtp.postmarkapp.com"
app.config["MAIL_PORT"] = 587
app.config["MAIL_USERNAME"] = "a63adf3b-8948-4f1d-bd7d-6abe062adfc7"
# app.config["MAIL_PASSWORD"] = "fgux nzte kfcf ykwx" # thoughtbeam.letters
app.config["MAIL_PASSWORD"] = "a63adf3b-8948-4f1d-bd7d-6abe062adfc7"
app.config["MAIL_USE_TLS"] = True  # Fixed: Should be True for port 587
app.config["MAIL_USE_SSL"] = False  # Fixed: Should be False when using TLS

app.config["SCHEDULER_API_ENABLED"] = True
app.config["JOBS"] = JOBS

cors = CORS(app, resources={r"/*": {"origins": "*"}})

logger.debug(f"Starting in {MODE} at {HOST}")

if MODE == "PRODUCTION":
    sentry_sdk.init(
        dsn=SENTRY_DSN,
        environment=MODE,
        traces_sample_rate=0,
        ignore_errors=[GreenletExit],
        include_local_variables=True,
        include_source_context=True,
        attach_stacktrace=True,
        integrations=[
            LoggingIntegration(level=None, event_level=LoggingLevels.CRITICAL)
        ],  # Only send exceptions to Sentry
    )


@app.before_request
def require_auth():
    # Define routes that need authentication
    protected_routes = [
        "/amazin-spiderman",
        "/domains",
        "/hits",
        "/events",
        "/stats",
        "/routes",
        "/roamin-spiderman",
        "/search",
        "/traffic-source",
        "/redis_metrics",
        "/redis_metrics2",
        "/metrics/function",  # Add the new function metrics endpoint
        "/queues",  # Queue dashboard
        "/v1/admin/queues/data",  # Queue dashboard JSON
    ]

    if request.path in protected_routes:
        auth = request.authorization
        if (
            not auth
            or auth.username != BASIC_AUTH_USERNAME
            or auth.password != BASIC_AUTH_PASSWORD
        ):
            return Response(
                "Authentication required",
                401,
                {"WWW-Authenticate": 'Basic realm="Login Required"'},
            )


@app.route("/sentry-error")
def sentry():
    0 / 0
    return redirect("https://make.com")


@app.route("/v1/thirdparty/form", methods=["POST"])
def form_data():
    # return {}
    form = request.form.items()
    recaptcha_token = request.form.get("recaptcha_token")

    success = recaptcha.validate(recaptcha_token)
    if success is False:
        return "invalid recaptcha", 400

    source = request.form.get("source")
    first_name = request.form.get("firstname", "")
    last_name = request.form.get("lastname", "")
    full_name = name = first_name + " " + last_name
    full_name = full_name.strip()

    xform = {}
    vars = {
        "first_name": first_name,
        "last_name": last_name,
        "full_name": full_name,
        "form": xform,
    }
    for k, v in request.form.items():
        if k != "recaptcha_token":
            xform[k] = v

    # print("V", vars)
    if source == "nitro-main-website":
        email = request.form.get("email")
        if email is not None:
            mailer.send_via_dakiya(
                "nitrox/welcomeuser.html",
                "Welcome to NitroCommerce!",
                [email],
                [],
                vars=vars,
            )

    mailer.send_via_dakiya(
        "cookiecutter/formpost.html",
        "Form Post",
        [
            "pleomax00@gmail.com",
            "anshika@getnitro.co",
            "umair@getnitro.co",
            "pratik@getnitro.co",
            "parijat@getnitro.co",
            "harshit@getnitro.co",
            "sales@getnitro.co",
            "sarthak@getnitro.co",
            "arnav@getnitro.co",
            "pragya@getnitro.co",
            "ayush@getnitro.co",
            "hitika@getnitro.co",
            "adya@getnitro.co",
        ],
        [],
        vars=vars,
    )
    return {}


@app.route("/health_startup")
def health_startup():
    return "ok", 200

@app.route("/health")
def version():
    from lib.health_check import is_all_services_healthy
    if is_all_services_healthy():
        return "ok", 200
    return "Service Unavailable", 503


@app.route("/health-x")
def version_x():
    logger.debug("Debug message")
    logger.debug("Info message")
    logger.warning("Warning message")
    logger.error("Error message")
    return "ok", 200


@app.route("/beanie-status")
def beanie_status():
    """Check Beanie ODM initialization status"""
    try:
        from lib.dbdrivers import is_beanie_initialized, is_beanie_initializing

        status = {
            "beanie_initialized": is_beanie_initialized(),
            "beanie_initializing": is_beanie_initializing(),
            "skip_beanie_init": os.getenv("SKIP_BEANIE_INIT", "true"),
            "shadow_validation_enabled": os.getenv(
                "BEANIE_SHADOW_VALIDATION_ENABLED", "true"
            ),
            "shadow_sample_rate": os.getenv("BEANIE_SHADOW_SAMPLE_RATE", "0.1"),
        }

        return status, 200
    except Exception as e:
        return {"error": str(e)}, 500


@app.route("/set_level/<level>")
def set_level(level):
    if set_log_level_in_redis(level):
        return f"Log level set to {level.upper()} with TTL 30m", 200
    return f"Invalid level: {level}", 400


@app.route("/webhook/events", methods=["POST"])
def webhook_events():
    data = request.json
    logger.debug(f"BIG_O_DEBUG: Received webhook event: {data}")
    return {"status": "success"}, 200


if RUNAS == "SCHEDULER":
    logger.debug("Starting scheduler..")
    run_startup_jobs()
    scheduler.start()

    def turn_off_scheduler():
        logger.debug(scheduler.state)
        if scheduler.state == 1:
            scheduler.shutdown(wait=False)

    atexit.register(turn_off_scheduler)

    try:
        while True:
            time.sleep(1)
    except (KeyboardInterrupt, SystemExit):
        scheduler.shutdown(wait=False)


if RUNAS == "QUEUES":
    logger.debug("Starting Queue Sub-Processor...")
    logger.debug("Allowing Beanie initialization to proceed...")
    time.sleep(3)  # Give time for Beanie initialization logs to appear
    bg_listeners()
