xbp 10.15.4

XBP is a zero-config build pack that can also interact with proxies, kafka, sockets, synthetic monitors.
Documentation
use colored::Colorize;
use tokio::process::Command;

use crate::cli::commands;
use crate::cli::error::CliResult;
use crate::commands::kafka_logs::LogConfig;
#[cfg(feature = "docker")]
use crate::commands::print_docker_ps;
use crate::commands::service::load_xbp_config;
use crate::commands::{
    pm2_env, pm2_flush, pm2_list, pm2_logs, pm2_monitor, pm2_resurrect, pm2_save, pm2_snapshot,
    pm2_start_wrapper, run_ports, start_log_shipping, tail_kafka_topic,
};
use crate::logging::{get_log_directory, log_error, log_info, log_warn};
use crate::strategies::get_all_services;
use crate::utils::command_exists;

pub async fn handle_start(args: Vec<String>, debug: bool) -> CliResult<()> {
    if let Err(e) = pm2_start_wrapper(args, debug).await {
        let _ = log_error("start", "Failed to start process", Some(&e)).await;
        return Err(e.into());
    }
    if let Err(e) = pm2_save(debug).await {
        let _ = log_error("start", "pm2 save failed", Some(&e)).await;
        return Err(e.into());
    }
    if let Err(e) = pm2_list(debug).await {
        let _ = log_error("start", "pm2 list failed", Some(&e)).await;
        return Err(e.into());
    }
    Ok(())
}

pub async fn handle_logs_flag() -> CliResult<()> {
    let log_dir = get_log_directory().await?;

    println!(
        "{} {}",
        "Logs directory:".bright_blue(),
        log_dir.display().to_string().cyan()
    );

    if cfg!(target_os = "windows") {
        println!("\n{}", "Opening in Explorer...".dimmed());
        let _ = Command::new("explorer").arg(log_dir).spawn();
    } else {
        println!("\n{}", "To view logs:".bright_blue());
        println!("  cd {}", log_dir.display().to_string().cyan());
        println!("  tail -f xbp-*.log");
    }

    Ok(())
}

pub async fn handle_ports(
    cmd: commands::PortsCmd,
    global_port: Option<u16>,
    debug: bool,
) -> CliResult<()> {
    let mut args: Vec<String> = Vec::new();
    let port: Option<u16> = global_port.or(cmd.port);
    if let Some(p) = port {
        args.push("-p".to_string());
        args.push(p.to_string());
    }
    if cmd.kill {
        args.push("--kill".to_string());
        args.push("-k".to_string());
    }
    if cmd.nginx {
        args.push("-n".to_string());
        args.push("--nginx".to_string());
    }
    if cmd.full {
        args.push("--full".to_string());
    }
    if cmd.no_local {
        args.push("--no-local".to_string());
    }
    if let Err(e) = run_ports(&args, debug).await {
        let _ = log_error("ports", "Error running ports", Some(&e)).await;
    }
    Ok(())
}

pub async fn handle_logs(cmd: commands::LogsCmd, debug: bool) -> CliResult<()> {
    let commands::LogsCmd {
        project,
        ssh_host,
        ssh_username,
        ssh_password,
    } = cmd;

    let remote_requested = ssh_host.is_some() || ssh_username.is_some() || ssh_password.is_some();
    if remote_requested {
        if let Err(e) = crate::commands::ssh_logs::run_remote_logs(
            project.clone(),
            ssh_host,
            ssh_username,
            ssh_password,
            debug,
        )
        .await
        {
            let _ = log_error("logs", "Remote logs failed", Some(&e)).await;
        }
        return Ok(());
    }

    #[cfg(feature = "docker")]
    {
        if let Some(ref target) = project {
            match crate::commands::try_stream_docker_logs(target, debug).await {
                Ok(Some(())) => return Ok(()),
                Ok(None) => {}
                Err(e) => {
                    let _ = log_error("docker", "docker logs failed", Some(&e)).await;
                }
            }
        }
    }

    if let Err(e) = pm2_logs(project, debug).await {
        let _ = log_error("pm2", "pm2 logs failed", Some(&e)).await;
    }
    Ok(())
}

pub async fn handle_list(debug: bool) -> CliResult<()> {
    // Show PM2 processes
    if let Err(e) = pm2_list(debug).await {
        let _ = log_error("pm2", "pm2 list failed", Some(&e)).await;
    }

    // Show docker ps snapshot when available
    #[cfg(feature = "docker")]
    {
        if let Err(e) = print_docker_ps(debug).await {
            let _ = log_warn("docker", "docker ps snapshot failed", Some(&e)).await;
        }
    }

    // Show systemd service status if configured
    if let Err(e) = show_systemd_status(debug).await {
        if debug {
            let _ = log_warn(
                "systemd",
                "Could not check systemd status",
                Some(&e.to_string()),
            )
            .await;
        }
    }

    Ok(())
}

pub async fn handle_snapshot(debug: bool) -> CliResult<()> {
    match pm2_snapshot(debug).await {
        Ok(path) => {
            let _ = log_info(
                "snapshot",
                "Saved PM2 snapshot",
                Some(&path.display().to_string()),
            )
            .await;
            println!("Saved PM2 snapshot to {}", path.display());
            Ok(())
        }
        Err(e) => {
            let _ = log_error("snapshot", "PM2 snapshot failed", Some(&e)).await;
            Err(e.into())
        }
    }
}

pub async fn handle_resurrect(debug: bool) -> CliResult<()> {
    if let Err(e) = pm2_resurrect(debug).await {
        let _ = log_error("pm2", "pm2 resurrect failed", Some(&e)).await;
        return Err(e.into());
    }
    Ok(())
}

pub async fn handle_stop(target: Option<String>, debug: bool) -> CliResult<()> {
    let target = target.unwrap_or_else(|| "all".to_string());
    if let Err(e) = crate::commands::pm2_stop(&target, debug).await {
        let _ = log_error("pm2", "pm2 stop failed", Some(&e)).await;
        return Err(e.into());
    }
    Ok(())
}

pub async fn handle_flush(target: Option<String>, debug: bool) -> CliResult<()> {
    if let Err(e) = pm2_flush(target.as_deref(), debug).await {
        let _ = log_error("pm2", "pm2 flush failed", Some(&e)).await;
        return Err(e.into());
    }
    Ok(())
}

pub async fn handle_env(target: String, debug: bool) -> CliResult<()> {
    if let Err(e) = pm2_env(&target, debug).await {
        let _ = log_error("pm2", "pm2 env failed", Some(&e)).await;
        return Err(e.into());
    }
    Ok(())
}

pub async fn handle_monitor(cmd: commands::MonitorCmd, debug: bool) -> CliResult<()> {
    match cmd.command {
        Some(commands::MonitorSubCommand::Check) => {
            if !crate::commands::service::is_xbp_project().await {
                return crate::cli::handlers::project::handle_project_selection().await;
            }
            if let Err(e) = crate::commands::monitor::run_single_check().await {
                let _ = log_error("monitor", "Monitor check failed", Some(&e.to_string())).await;
            }
        }
        Some(commands::MonitorSubCommand::Start) => {
            if !crate::commands::service::is_xbp_project().await {
                return crate::cli::handlers::project::handle_project_selection().await;
            }
            if let Err(e) = crate::commands::monitor::start_monitor_daemon().await {
                let _ = log_error("monitor", "Monitor daemon failed", Some(&e.to_string())).await;
            }
        }
        None => {
            if let Err(e) = pm2_monitor(debug).await {
                let _ = log_error("monitor", "PM2 monitor failed", Some(&e)).await;
            }
        }
    }
    Ok(())
}

pub async fn handle_tail(cmd: commands::TailCmd, _debug: bool) -> CliResult<()> {
    if cmd.kafka {
        match LogConfig::from_xbp_config().await {
            Ok(Some(config)) => {
                if let Err(e) = tail_kafka_topic(&config).await {
                    let _ =
                        log_error("tail", "Failed to tail Kafka topic", Some(&e.to_string())).await;
                }
            }
            Ok(None) => {
                let _ = log_error("tail", "No log configuration found in xbp.json", None).await;
            }
            Err(e) => {
                let _ =
                    log_error("tail", "Failed to load configuration", Some(&e.to_string())).await;
            }
        }
    } else if cmd.ship {
        if let Err(e) = start_log_shipping().await {
            let _ = log_error("tail", "Failed to ship logs", Some(&e.to_string())).await;
        }
    } else if let Err(e) = start_log_shipping().await {
        let _ = log_error("tail", "Failed to tail logs", Some(&e.to_string())).await;
    }
    Ok(())
}

pub async fn show_systemd_status(debug: bool) -> CliResult<()> {
    if !cfg!(target_os = "linux") || !command_exists("systemctl") {
        return Ok(());
    }

    // Try to load the config - if not in an xbp project, skip silently
    let config = match load_xbp_config().await {
        Ok(cfg) => cfg,
        Err(_) => return Ok(()), // Not in an xbp project, skip
    };

    // Collect all systemd service names from config
    let mut systemd_services = Vec::new();

    // Check project-level systemd_service_name
    if let Some(ref name) = config.systemd_service_name {
        systemd_services.push(name.clone());
    }

    // Check service-level systemd_service_name
    let services = get_all_services(&config);
    for service in services {
        if let Some(ref name) = service.systemd_service_name {
            if !systemd_services.contains(name) {
                systemd_services.push(name.clone());
            }
        }
    }

    if systemd_services.is_empty() {
        return Ok(()); // No systemd services configured
    }

    println!("\nSystemd Services:");
    println!("{}", "".repeat(60));

    for service_name in systemd_services {
        check_systemd_service_status(&service_name, debug).await;
    }

    Ok(())
}

async fn check_systemd_service_status(service_name: &str, debug: bool) {
    // Try to check service status without sudo first
    let output = Command::new("systemctl")
        .arg("status")
        .arg(service_name)
        .arg("--no-pager")
        .output()
        .await;

    match output {
        Ok(output) => {
            let stdout = String::from_utf8_lossy(&output.stdout);
            let stderr = String::from_utf8_lossy(&output.stderr);

            // Check if service exists
            if stderr.contains("could not be found") || stderr.contains("not loaded") {
                println!("  {} - {}", service_name, "Not Found".dimmed());
                if debug {
                    println!("    Service unit not found in systemd");
                }
                return;
            }

            // Check if we have permission issues
            if stderr.contains("Permission denied") || stderr.contains("Failed to get properties") {
                println!("  {} - {}", service_name, "Permission Denied".yellow());
                if debug {
                    println!("    Run with sudo to see full status");
                }
                return;
            }

            // Parse status from output
            let status = if stdout.contains("Active: active (running)") {
                "Running".green()
            } else if stdout.contains("Active: inactive (dead)") {
                "Stopped".dimmed()
            } else if stdout.contains("Active: failed") {
                "Failed".red()
            } else if stdout.contains("Active: activating") {
                "Starting".yellow()
            } else if stdout.contains("Active: deactivating") {
                "Stopping".yellow()
            } else {
                "Unknown".yellow()
            };

            println!("  {} - {}", service_name, status);

            if debug {
                // Show first few lines of status
                let lines: Vec<&str> = stdout.lines().take(3).collect();
                for line in lines {
                    println!("    {}", line.trim());
                }
            }
        }
        Err(e) => {
            println!("  {} - {}", service_name, "Error".red());
            if debug {
                println!("    Failed to check status: {}", e);
            }
        }
    }
}