ccd-cli 1.0.0-alpha.9

Bootstrap and validate Continuous Context Development repositories
use std::collections::{BTreeMap, BTreeSet};
use std::fs;
use std::path::Path;
use std::process::ExitCode;

use anyhow::{bail, Context, Result};
use serde::Serialize;

use crate::extensions;
use crate::output::CommandReport;
use crate::paths::state::StateLayout;
use crate::profile;
use crate::repo::marker;

#[derive(Serialize)]
pub struct PodInitReport {
    command: &'static str,
    ok: bool,
    profile: String,
    pod: String,
    project_id: String,
    locality_id: String,
    action: &'static str,
    pod_dir: String,
    overlay_config_path: String,
}

impl CommandReport for PodInitReport {
    fn exit_code(&self) -> ExitCode {
        if self.ok {
            ExitCode::SUCCESS
        } else {
            ExitCode::FAILURE
        }
    }

    fn render_text(&self) {
        println!(
            "Profile {} {} coordination scope {} for project {}",
            self.profile, self.action, self.pod, self.project_id
        );
    }
}

pub fn init(
    repo_root: &Path,
    explicit_profile: Option<&str>,
    pod_name: &str,
    force: bool,
) -> Result<PodInitReport> {
    // Load the repo marker — error if not attached
    let repo_marker = marker::load(repo_root)?.ok_or_else(|| {
        anyhow::anyhow!("this checkout is not attached to a CCD profile; run `ccd attach` first")
    })?;
    let locality_id = &repo_marker.locality_id;

    // Resolve the profile and StateLayout
    let profile_name = profile::resolve(explicit_profile)?;
    let layout = StateLayout::resolve(repo_root, profile_name.clone())?;

    // Check current pod
    let current_pod = layout.coordination_scope_name(locality_id)?;
    match current_pod {
        Some(ref current) if current == pod_name => {
            // Same pod — no-op
            let pod_dir = layout.pod_repo_overlay_root(pod_name, locality_id)?;
            let overlay_config_path = layout.repo_overlay_config_path(locality_id)?;
            return Ok(PodInitReport {
                command: "pod init",
                ok: true,
                profile: profile_name.to_string(),
                pod: pod_name.to_string(),
                project_id: locality_id.to_string(),
                locality_id: locality_id.to_string(),
                action: "already in",
                pod_dir: pod_dir.display().to_string(),
                overlay_config_path: overlay_config_path.display().to_string(),
            });
        }
        Some(ref current) if !force => {
            bail!(
                "profile `{profile_name}` is already in coordination scope `{current}` for this project. Use `--force` to move it."
            );
        }
        _ => {
            // No pod or different pod with --force — proceed
        }
    }

    // Validate pod name upfront (before writing config) so we fail cleanly
    layout.pod_repo_overlay_root(pod_name, locality_id)?;

    // Write canonical [dispatch].coordination_scope to repo overlay config.
    let overlay_config_path = layout.repo_overlay_config_path(locality_id)?;
    let overlay_root = layout.repo_overlay_root(locality_id)?;
    fs::create_dir_all(&overlay_root)
        .with_context(|| format!("failed to create directory {}", overlay_root.display()))?;

    let existing_content = match fs::read_to_string(&overlay_config_path) {
        Ok(content) => content,
        Err(e) if e.kind() == std::io::ErrorKind::NotFound => String::new(),
        Err(e) => return Err(e).context("failed to read repo overlay config"),
    };

    let mut doc = existing_content
        .parse::<toml_edit::DocumentMut>()
        .context("failed to parse repo overlay config as TOML")?;

    // Ensure [dispatch] table exists and set the canonical coordination scope key.
    if !doc.contains_table("dispatch") {
        doc["dispatch"] = toml_edit::Item::Table(toml_edit::Table::new());
    }
    doc["dispatch"]["coordination_scope"] = toml_edit::value(pod_name);
    if let Some(table) = doc["dispatch"].as_table_mut() {
        table.remove("pod");
    }

    fs::write(&overlay_config_path, doc.to_string())
        .with_context(|| format!("failed to write {}", overlay_config_path.display()))?;

    // Create pod directory
    let pod_dir = layout.pod_repo_overlay_root(pod_name, locality_id)?;
    fs::create_dir_all(&pod_dir)
        .with_context(|| format!("failed to create pod directory {}", pod_dir.display()))?;

    let action = if current_pod.is_some() {
        "moved to"
    } else {
        "joined"
    };

    Ok(PodInitReport {
        command: "pod init",
        ok: true,
        profile: profile_name.to_string(),
        pod: pod_name.to_string(),
        project_id: locality_id.to_string(),
        locality_id: locality_id.to_string(),
        action,
        pod_dir: pod_dir.display().to_string(),
        overlay_config_path: overlay_config_path.display().to_string(),
    })
}

// ── pod list ────────────────────────────────────────────────────────────

#[derive(Serialize)]
pub struct PodListEntry {
    name: String,
    projects: usize,
    profiles: usize,
    localities: usize,
}

#[derive(Serialize)]
pub struct PodListReport {
    command: &'static str,
    ok: bool,
    pods: Vec<PodListEntry>,
}

impl CommandReport for PodListReport {
    fn exit_code(&self) -> ExitCode {
        if self.ok {
            ExitCode::SUCCESS
        } else {
            ExitCode::FAILURE
        }
    }

    fn render_text(&self) {
        if self.pods.is_empty() {
            println!("No coordination scopes configured.");
            return;
        }
        println!("{:<16} {:>8}  {:>8}", "Scope", "Profiles", "Projects");
        for entry in &self.pods {
            println!(
                "{:<16} {:>8}  {:>8}",
                entry.name, entry.profiles, entry.projects
            );
        }
    }
}

pub fn list() -> Result<PodListReport> {
    let ccd_root = crate::paths::state::default_ccd_root()?;
    let memberships = crate::paths::state::scan_all_pod_memberships(&ccd_root)?;

    // Group by pod name — track unique profiles and localities
    let mut pods: BTreeMap<String, (BTreeSet<String>, BTreeSet<String>)> = BTreeMap::new();
    for m in &memberships {
        let entry = pods.entry(m.pod_name.clone()).or_default();
        entry.0.insert(m.profile.clone()); // unique profiles
        entry.1.insert(m.locality_id.clone()); // unique localities
    }

    let entries: Vec<PodListEntry> = pods
        .into_iter()
        .map(|(name, (profiles, localities))| PodListEntry {
            name,
            projects: localities.len(),
            profiles: profiles.len(),
            localities: localities.len(),
        })
        .collect();

    Ok(PodListReport {
        command: "pod-list",
        ok: true,
        pods: entries,
    })
}

// ── pod status ──────────────────────────────────────────────────────────

#[derive(Debug, Serialize)]
pub struct PodStatusMember {
    pub profile: String,
    pub project_id: String,
    pub locality_id: String,
    pub stale: bool,
    #[serde(skip_serializing_if = "Vec::is_empty")]
    pub extra_columns: Vec<(String, String)>,
}

#[derive(Serialize)]
pub struct PodStatusReport {
    command: &'static str,
    ok: bool,
    pod: String,
    members: Vec<PodStatusMember>,
}

impl CommandReport for PodStatusReport {
    fn exit_code(&self) -> ExitCode {
        if self.ok {
            ExitCode::SUCCESS
        } else {
            ExitCode::FAILURE
        }
    }

    fn render_text(&self) {
        if self.members.is_empty() {
            println!("No members found for coordination scope {}.", self.pod);
            return;
        }
        println!("Coordination scope: {}\n", self.pod);
        println!("  {:<12} Project ID", "Profile");
        for member in &self.members {
            let project_display = if member.project_id.len() > 15 {
                format!("{}..", &member.project_id[..13])
            } else {
                member.project_id.clone()
            };
            let suffix = if member.stale { "  (stale)" } else { "" };
            println!("  {:<12} {}{}", member.profile, project_display, suffix);
        }
    }
}

pub fn status(
    pod_name: Option<&str>,
    repo_root: Option<&Path>,
    explicit_profile: Option<&str>,
) -> Result<PodStatusReport> {
    let resolved_pod_name = match (pod_name, repo_root) {
        (Some(name), _) => {
            crate::paths::state::validate_pod_name(name)?;
            name.to_string()
        }
        (None, Some(root)) => {
            let repo_marker = marker::load(root)?.ok_or_else(|| {
                anyhow::anyhow!(
                    "this checkout is not attached to a CCD profile; run `ccd attach` first"
                )
            })?;
            let profile_name = profile::resolve(explicit_profile)?;
            let layout = StateLayout::resolve(root, profile_name)?;
            layout
                .coordination_scope_name(&repo_marker.locality_id)?
                .ok_or_else(|| {
                    anyhow::anyhow!(
                        "this checkout is not in any coordination scope; run `ccd pod init <name>` first"
                    )
                })?
        }
        (None, None) => {
            bail!("provide either a pod name or --path to resolve from context")
        }
    };

    let ccd_root = crate::paths::state::default_ccd_root()?;
    let memberships = crate::paths::state::scan_all_pod_memberships(&ccd_root)?;

    let pod_members: Vec<_> = memberships
        .into_iter()
        .filter(|m| m.pod_name == resolved_pod_name)
        .collect();

    let mut members = Vec::new();
    for m in pod_members {
        let profile_dir = ccd_root.join("profiles").join(&m.profile);
        let stale = !profile_dir.is_dir();

        // Collect extension enrichments
        let pod_shared_root = ccd_root
            .join("pods")
            .join(&resolved_pod_name)
            .join("repos")
            .join(&m.locality_id);
        let mut extra_columns = Vec::new();
        for ext in extensions::registered() {
            if let Some(cols) = ext.enrich_pod_status(
                &resolved_pod_name,
                &m.locality_id,
                &m.profile,
                &pod_shared_root,
            ) {
                extra_columns.extend(cols);
            }
        }

        members.push(PodStatusMember {
            profile: m.profile,
            project_id: m.locality_id.clone(),
            locality_id: m.locality_id,
            stale,
            extra_columns,
        });
    }

    Ok(PodStatusReport {
        command: "pod-status",
        ok: true,
        pod: resolved_pod_name,
        members,
    })
}