void-cli 0.0.3

CLI for void — anonymous encrypted source control
//! Fork command — create an independent repository from a published snapshot.
//!
//! Fetches a single commit from IPFS, decrypts it with a content key,
//! extracts all files, and creates a new independent repository with a
//! fresh root key. The forker becomes the sole owner.
//!
//! Phases:
//! 1. `fetch_published_commit`  — download + decrypt commit + metadata + shards (via ops::import)
//! 2. `extract_files`           — enumerate shard entries, restore to working tree
//! 3. auto-commit               — create initial commit with source CID as parent

use std::path::{Path, PathBuf};
use std::sync::Arc;
use std::time::Duration;

use serde::Serialize;
use void_core::cid;
use void_core::collab::manifest::RepoKey;
use void_core::crypto::{CommitReader, KeyVault, RepoSecret};
use void_core::support::ToVoidCid;
use void_core::metadata::{self, ShardMap};
use void_core::ops::import::{self, ImportOptions};
use void_core::pipeline::{commit_workspace, CommitOptions, SealOptions};
use void_core::store::{FsStore, IpfsStore};
use void_core::support::events::VoidObserver;
use void_core::workspace::checkout::{restore_files, FileToRestore};
use void_core::VoidContext;

use crate::context::{load_signing_key, signing_key_exists, void_err_to_cli};
use crate::ipfs_utils::{format_bytes, make_observer, parse_backend, parse_content_key};
use crate::observer::ProgressObserver;
use crate::output::{run_command, CliError, CliOptions};
use crate::repo_init::{self, NewRepoOpts};

// ============================================================================
// Output types
// ============================================================================

#[derive(Debug, Serialize)]
#[serde(rename_all = "camelCase")]
pub struct ForkOutput {
    /// Path to the new repository.
    pub path: String,
    /// Source commit CID that was forked from.
    pub source_cid: String,
    /// CID of the initial commit in the forked repo.
    pub commit_cid: String,
    /// Number of files in the commit.
    pub files: usize,
    /// Total bytes in the commit.
    pub bytes: u64,
}

// ============================================================================
// Args
// ============================================================================

pub struct ForkArgs {
    /// Source: commit CID.
    pub source: String,
    /// Target directory.
    pub path: Option<PathBuf>,
    /// Scoped content key (64 hex chars).
    pub content_key: Option<String>,
    /// Backend type: kubo or gateway.
    pub backend: String,
    /// Kubo API URL.
    pub kubo_url: String,
    /// Gateway URL (required if backend is gateway).
    pub gateway_url: Option<String>,
    /// Request timeout in milliseconds.
    pub timeout_ms: u64,
}

// ============================================================================
// Implementation
// ============================================================================

pub fn run(cwd: &Path, args: ForkArgs, opts: &CliOptions) -> Result<(), CliError> {
    run_command("fork", opts, |ctx| {
        // Parse content key
        let content_key = parse_content_key(&args.content_key)?;

        // Parse backend + create client
        let backend = parse_backend(&args.backend, &args.kubo_url, &args.gateway_url)?;
        let remote = Arc::new(IpfsStore::new(backend.clone(), Duration::from_millis(args.timeout_ms)));

        // Phase 1: fetch + decrypt commit via shared import pipeline
        // We need to create a temp store first to probe for repo name,
        // then move objects to final location. Or: create temp dir, fetch, then move.
        // Simpler: fetch to a temp dir first to get repo name, then set up final dir.
        let temp_dir = tempfile::tempdir()
            .map_err(|e| CliError::io_error(format!("failed to create temp dir: {e}")))?;
        let temp_void_dir = temp_dir.path().join(".void");
        std::fs::create_dir_all(temp_void_dir.join("objects"))
            .map_err(|e| CliError::io_error(format!("failed to create temp objects dir: {e}")))?;

        ctx.progress("Fetching commit from IPFS...");
        let shard_observer: Arc<ProgressObserver> =
            Arc::new(make_observer(ctx.use_json(), "Fetching shards..."));
        let shard_obs = shard_observer.clone();

        let temp_store = import::objects_store(temp_void_dir.as_path()).map_err(void_err_to_cli)?;
        let foreign = import::fetch_published_commit(ImportOptions {
            store: temp_store,
            remote,
            commit_cid: args.source.clone(),
            content_key,
            on_shard_progress: Some(Box::new(move |fetched, total| {
                shard_obs.set_message(&format!("Fetched {}/{} shards", fetched, total));
            })),
        })
        .map_err(void_err_to_cli)?;
        shard_observer.finish();

        // Resolve target directory
        let target_dir = resolve_target_dir(cwd, &args.path, foreign.repo_name.as_deref())?;
        if !ctx.use_json() {
            ctx.info(format!("Forking into '{}'...", target_dir.display()));
        }

        // Create .void directory structure
        let void_dir = target_dir.join(".void");
        repo_init::create_void_dir_structure(&void_dir)?;

        // Move objects from temp to final location
        let temp_objects = temp_void_dir.join("objects");
        let final_objects = void_dir.join("objects");
        copy_dir_contents(&temp_objects, &final_objects)?;

        // Generate fresh root key BEFORE extraction so staged blobs use the
        // correct derived key (not the content key, which is a leaf key).
        let new_key: [u8; 32] = rand::random();
        let new_repo_key = RepoKey::from_bytes(new_key);
        let new_vault = Arc::new(
            KeyVault::new(new_key)
                .map_err(|e| CliError::internal(format!("failed to initialize encryption: {e}")))?,
        );

        // Phase 2: extract files into working tree
        ctx.progress("Extracting files...");
        let store = import::objects_store(&void_dir).map_err(void_err_to_cli)?;
        let checkout_observer: Arc<ProgressObserver> =
            Arc::new(make_observer(ctx.use_json(), "Restoring files..."));
        let files_extracted = extract_files(
            &store,
            &foreign.commit,
            &foreign.reader,
            &new_vault,
            &target_dir,
            &void_dir,
            &checkout_observer,
        )?;
        checkout_observer.finish();

        // Set up manifest + config via shared helper
        ctx.progress("Creating independent repository...");
        let repo_id = uuid::Uuid::new_v4().to_string();
        let repo_secret_hex = repo_init::generate_repo_secret();
        let result = repo_init::setup_owner_manifest(
            &void_dir,
            &new_repo_key,
            NewRepoOpts {
                repo_name: foreign.repo_name.clone(),
                repo_id: Some(repo_id),
                repo_secret: Some(repo_secret_hex.clone()),
            },
        )?;

        // Register in local registry (best-effort)
        if let (Some(ref id), Some(ref name)) =
            (&result.manifest.repo_id, &result.manifest.repo_name)
        {
            let _ = crate::registry::register_repo(id, name, &target_dir, "fork", None);
        }

        // Phase 3: Create initial commit with source CID as parent for provenance
        ctx.progress("Creating initial commit...");

        let repo_secret_bytes = hex::decode(&repo_secret_hex)
            .map_err(|e| CliError::internal(format!("invalid repo_secret: {e}")))?;
        let mut repo_secret_arr = [0u8; 32];
        repo_secret_arr.copy_from_slice(&repo_secret_bytes);

        let source_cid_obj = cid::parse(&args.source).map_err(void_err_to_cli)?;
        let source_cid_bytes = cid::to_bytes(&source_cid_obj);

        // Load signing key for the initial commit (required in collaboration mode)
        let signing_key = if signing_key_exists() {
            Some(Arc::new(load_signing_key()?))
        } else {
            None
        };

        // Build a VoidContext for the fork's initial commit
        let mut fork_ctx = VoidContext::headless(&void_dir, new_vault.clone(), 0)
            .map_err(void_err_to_cli)?;
        fork_ctx.paths.root = camino::Utf8PathBuf::try_from(target_dir.clone())
            .map_err(|e| CliError::internal(format!("invalid target path: {e}")))?;
        fork_ctx.repo.secret = RepoSecret::new(repo_secret_arr);
        fork_ctx.crypto.signing_key = signing_key;

        let commit_result = commit_workspace(CommitOptions {
            seal: SealOptions {
                ctx: fork_ctx,
                shard_map: ShardMap::new(64),
                content_key: None,
                parent_content_key: None,
            },
            message: format!("Forked from {}", &args.source),
            parent_cid: Some(void_core::crypto::CommitCid::from_bytes(source_cid_bytes)),
            allow_data_loss: false,
            foreign_parent: true,
        })
        .map_err(void_err_to_cli)?;

        let total_files = commit_result.total_files.unwrap_or(files_extracted as u64);
        let total_bytes = commit_result.total_bytes.unwrap_or(0);

        let commit_cid_str = commit_result.commit_cid.to_cid_string();

        if !ctx.use_json() {
            let short_cid = &commit_cid_str[..12.min(commit_cid_str.len())];
            ctx.info(format!(
                "Forked from {}",
                &args.source[..args.source.len().min(12)]
            ));
            ctx.info(format!("Created commit {}...", short_cid));
            ctx.info(format!("  Files: {}", total_files));
            ctx.info(format!("  Size: {}", format_bytes(total_bytes)));
        }

        Ok(ForkOutput {
            path: target_dir.display().to_string(),
            source_cid: args.source.clone(),
            commit_cid: commit_cid_str,
            files: total_files as usize,
            bytes: total_bytes,
        })
    })
}

// ============================================================================
// Phase helpers
// ============================================================================

/// Extract files from a commit to a working tree directory.
fn extract_files(
    store: &FsStore,
    commit: &metadata::Commit,
    reader: &CommitReader,
    new_vault: &KeyVault,
    target_dir: &Path,
    void_dir: &Path,
    observer: &Arc<ProgressObserver>,
) -> Result<usize, CliError> {
    let manifest = void_core::metadata::manifest_tree::TreeManifest::from_commit(store, commit, reader)
        .map_err(void_err_to_cli)?
        .ok_or_else(|| CliError::internal("commit has no manifest_cid"))?;

    let shards = manifest.shards();
    let mut files_to_restore = Vec::new();

    for entry_result in manifest.iter() {
        let entry = entry_result.map_err(void_err_to_cli)?;
        let shard_ref = shards.get(entry.shard_index as usize)
            .ok_or_else(|| CliError::internal(format!("shard_index {} out of range", entry.shard_index)))?;

        files_to_restore.push(FileToRestore {
            entry,
            shard_cid: shard_ref.cid.clone(),
            wrapped_key: shard_ref.wrapped_key.clone(),
        });
    }

    let obs: Option<Arc<dyn VoidObserver>> = Some(observer.clone() as Arc<dyn VoidObserver>);
    let (result, _) = restore_files(
        store,
        reader,
        new_vault.staged_key().map_err(|e| void_err_to_cli(e.into()))?,
        &[], // no ancestor keys — fork is single-commit
        target_dir,
        &files_to_restore,
        &obs,
        Some(void_dir),
    )
    .map_err(void_err_to_cli)?;

    Ok(result.files_restored)
}

// ============================================================================
// Utility helpers
// ============================================================================

/// Resolve the target directory for the fork.
fn resolve_target_dir(
    cwd: &Path,
    explicit: &Option<PathBuf>,
    repo_name: Option<&str>,
) -> Result<PathBuf, CliError> {
    let target_dir = match explicit {
        Some(p) => {
            if p.is_absolute() {
                p.clone()
            } else {
                cwd.join(p)
            }
        }
        None => {
            let dir_name = repo_name.unwrap_or("forked-repo");
            cwd.join(dir_name)
        }
    };
    if target_dir.exists() {
        return Err(CliError::conflict(format!(
            "target directory already exists: {}",
            target_dir.display()
        )));
    }
    Ok(target_dir)
}

/// Copy all files from one directory to another (non-recursive, flat objects dir).
fn copy_dir_contents(src: &Path, dst: &Path) -> Result<(), CliError> {
    let entries = std::fs::read_dir(src)
        .map_err(|e| CliError::io_error(format!("failed to read temp objects: {e}")))?;
    for entry in entries {
        let entry = entry.map_err(|e| CliError::io_error(e.to_string()))?;
        let dest_path = dst.join(entry.file_name());
        if entry.file_type().map_err(|e| CliError::io_error(e.to_string()))?.is_dir() {
            std::fs::create_dir_all(&dest_path)
                .map_err(|e| CliError::io_error(e.to_string()))?;
            copy_dir_contents(&entry.path(), &dest_path)?;
        } else {
            std::fs::copy(entry.path(), &dest_path)
                .map_err(|e| CliError::io_error(format!("failed to copy object: {e}")))?;
        }
    }
    Ok(())
}