void-cli 0.0.4

CLI for void — anonymous encrypted source control
//! Pull-request command — import a published commit as a branch for review and merge.
//!
//! Fetches a contributor's published commit from IPFS, decrypts it with their
//! content key, extracts files, and re-encrypts under the owner's root key as
//! a new commit on a `pr/<name>` branch. The owner can then review with
//! `void diff pr/<name>` and merge with `void merge pr/<name>`.
//!
//! Flow:
//! 1. Fetch + decrypt foreign commit (via ops::import)
//! 2. Extract files to temp directory
//! 3. Seal files under owner's key → new commit
//! 4. Write branch ref `pr/<name>`

use std::path::Path;
use std::sync::Arc;
use std::time::Duration;

use serde::Serialize;
use void_core::cid;
use void_core::crypto::{CommitReader, KeyVault};
use void_core::support::ToVoidCid;
use void_core::metadata::{self, ShardMap};
use void_core::ops::import::{self, ImportOptions};
use void_core::pipeline::{commit_workspace, CommitOptions, SealOptions};
use void_core::refs;
use void_core::store::{FsStore, IpfsStore};
use void_core::support::events::VoidObserver;
use void_core::workspace::checkout::{restore_files, FileToRestore};

use crate::context::{find_void_dir, load_signing_key, open_repo, signing_key_exists, void_err_to_cli};
use crate::ipfs_utils::{format_bytes, make_observer, parse_backend, parse_content_key_required};
use crate::observer::ProgressObserver;
use crate::output::{run_command, CliError, CliOptions};

// ============================================================================
// Output types
// ============================================================================

#[derive(Debug, Serialize)]
#[serde(rename_all = "camelCase")]
pub struct PullRequestOutput {
    /// Branch name (e.g., "pr/feat")
    pub branch: String,
    /// Source commit CID from the contributor's publish
    pub source_cid: String,
    /// CID of the new commit on the pr branch (encrypted under owner's key)
    pub commit_cid: String,
    /// Number of files in the PR
    pub files: usize,
    /// Original commit message from the contributor
    pub original_message: String,
}

// ============================================================================
// Args
// ============================================================================

pub struct PullRequestArgs {
    /// Source commit CID from contributor's publish output.
    pub source: String,
    /// Branch name (will be prefixed with `pr/`).
    pub name: String,
    /// Content key (64 hex chars).
    pub content_key: String,
    /// Backend type: kubo or gateway.
    pub backend: String,
    /// Kubo API URL.
    pub kubo_url: String,
    /// Gateway URL (required if backend is gateway).
    pub gateway_url: Option<String>,
    /// Request timeout in milliseconds.
    pub timeout_ms: u64,
}

// ============================================================================
// Implementation
// ============================================================================

pub fn run(cwd: &Path, args: PullRequestArgs, opts: &CliOptions) -> Result<(), CliError> {
    run_command("pull-request", opts, |ctx| {
        let void_dir = find_void_dir(cwd)?;

        // Build branch name: add pr/ prefix if not already present
        let branch_name = if args.name.starts_with("pr/") {
            args.name.clone()
        } else {
            format!("pr/{}", args.name)
        };
        let void_dir_utf8 = camino::Utf8Path::new(
            void_dir.to_str().ok_or_else(|| CliError::internal("void_dir is not valid UTF-8"))?,
        );
        if refs::read_branch(void_dir_utf8, &branch_name).map_err(void_err_to_cli)?.is_some() {
            return Err(CliError::conflict(format!(
                "branch '{}' already exists — use a different name or delete it first",
                branch_name
            )));
        }

        // Parse content key + backend
        let content_key = parse_content_key_required(&args.content_key)?;
        let backend = parse_backend(&args.backend, &args.kubo_url, &args.gateway_url)?;
        let remote = Arc::new(IpfsStore::new(backend, Duration::from_millis(args.timeout_ms)));

        // Phase 1: Fetch + decrypt foreign commit into this repo's object store
        ctx.progress("Fetching published commit from IPFS...");
        let shard_observer: Arc<ProgressObserver> =
            Arc::new(make_observer(ctx.use_json(), "Fetching shards..."));
        let shard_obs = shard_observer.clone();

        let store = import::objects_store(&void_dir).map_err(void_err_to_cli)?;
        let foreign = import::fetch_published_commit(ImportOptions {
            store,
            remote,
            commit_cid: args.source.clone(),
            content_key,
            on_shard_progress: Some(Box::new(move |fetched, total| {
                shard_obs.set_message(&format!("Fetched {}/{} shards", fetched, total));
            })),
        })
        .map_err(void_err_to_cli)?;
        shard_observer.finish();

        let original_message = foreign.commit.message.clone();

        // Phase 2: Extract files to a temp directory
        ctx.progress("Extracting files...");
        let temp_dir = tempfile::tempdir()
            .map_err(|e| CliError::io_error(format!("failed to create temp dir: {e}")))?;
        let temp_void_dir = temp_dir.path().join(".void");
        std::fs::create_dir_all(temp_void_dir.join("objects"))
            .map_err(|e| CliError::io_error(format!("failed to create temp void dir: {e}")))?;

        // We need the owner's vault for staged_key (for restore_files to write
        // staged blobs that commit_workspace can read)
        let repo = open_repo(cwd)?;
        let owner_ctx = repo.context().clone();
        let store = import::objects_store(&void_dir).map_err(void_err_to_cli)?;

        let checkout_observer: Arc<ProgressObserver> =
            Arc::new(make_observer(ctx.use_json(), "Restoring files..."));
        let files_extracted = extract_foreign_files(
            &store,
            &foreign.commit,
            &foreign.reader,
            &owner_ctx.crypto.vault,
            temp_dir.path(),
            &temp_void_dir,
            &checkout_observer,
        )?;
        checkout_observer.finish();

        // Phase 3: Seal files under owner's key and create commit
        ctx.progress("Creating PR commit...");

        let source_cid_obj = cid::parse(&args.source).map_err(void_err_to_cli)?;
        let source_cid_bytes = cid::to_bytes(&source_cid_obj);

        // Load signing key if available
        let signing_key = if signing_key_exists() {
            Some(Arc::new(load_signing_key()?))
        } else {
            None
        };

        // Build a VoidContext pointing at the temp directory as workspace
        // but using the real repo's void_dir for object storage
        let mut pr_ctx = owner_ctx.clone();
        pr_ctx.paths.root = camino::Utf8PathBuf::try_from(temp_dir.path().to_path_buf())
            .map_err(|e| CliError::internal(format!("invalid temp path: {e}")))?;
        pr_ctx.crypto.signing_key = signing_key;

        // Set up workspace dir to point to temp so seal_workspace reads from there
        pr_ctx.paths.workspace_dir = pr_ctx.paths.root.clone();

        let commit_message = format!(
            "PR: {}\n\nFrom published commit {}",
            original_message,
            &args.source[..args.source.len().min(12)]
        );

        let commit_result = commit_workspace(CommitOptions {
            seal: SealOptions {
                ctx: pr_ctx,
                shard_map: ShardMap::new(64),
                content_key: None,
                parent_content_key: None,
            },
            message: commit_message,
            parent_cid: Some(void_core::crypto::CommitCid::from_bytes(source_cid_bytes)),
            allow_data_loss: false,
            foreign_parent: true,
        })
        .map_err(void_err_to_cli)?;

        // Phase 4: Write branch ref (don't touch HEAD)
        refs::write_branch(void_dir_utf8, &branch_name, &commit_result.commit_cid)
            .map_err(void_err_to_cli)?;

        let commit_cid_str = commit_result.commit_cid.to_cid_string();
        let total_files = commit_result.total_files.unwrap_or(files_extracted as u64);
        let total_bytes = commit_result.total_bytes.unwrap_or(0);

        if !ctx.use_json() {
            ctx.info(format!("Created branch '{}'", branch_name));
            ctx.info(format!(
                "  Source: {}",
                &args.source[..args.source.len().min(20)]
            ));
            ctx.info(format!("  Files: {}", total_files));
            ctx.info(format!("  Size: {}", format_bytes(total_bytes)));
            ctx.info(String::new());
            ctx.info(format!("To review: void diff {}", branch_name));
            ctx.info(format!("To merge:  void merge {}", branch_name));
        }

        Ok(PullRequestOutput {
            branch: branch_name,
            source_cid: args.source.clone(),
            commit_cid: commit_cid_str,
            files: total_files as usize,
            original_message,
        })
    })
}

// ============================================================================
// Helpers
// ============================================================================

/// Extract files from a foreign commit into a target directory.
fn extract_foreign_files(
    store: &FsStore,
    commit: &metadata::Commit,
    reader: &CommitReader,
    owner_vault: &KeyVault,
    target_dir: &Path,
    void_dir: &Path,
    observer: &Arc<ProgressObserver>,
) -> Result<usize, CliError> {
    let manifest = void_core::metadata::manifest_tree::TreeManifest::from_commit(store, commit, reader)
        .map_err(void_err_to_cli)?
        .ok_or_else(|| CliError::internal("commit has no manifest_cid"))?;

    let shards = manifest.shards();
    let mut files_to_restore = Vec::new();

    for entry_result in manifest.iter() {
        let entry = entry_result.map_err(void_err_to_cli)?;
        let shard_ref = shards.get(entry.shard_index as usize)
            .ok_or_else(|| CliError::internal(format!("shard_index {} out of range", entry.shard_index)))?;

        files_to_restore.push(FileToRestore {
            entry,
            shard_cid: shard_ref.cid.clone(),
            wrapped_key: shard_ref.wrapped_key.clone(),
        });
    }

    let obs: Option<Arc<dyn VoidObserver>> = Some(observer.clone() as Arc<dyn VoidObserver>);
    let (result, _) = restore_files(
        store,
        reader,
        owner_vault.staged_key().map_err(|e| void_err_to_cli(e.into()))?,
        &[], // no ancestor keys — foreign single-commit
        target_dir,
        &files_to_restore,
        &obs,
        Some(void_dir),
    )
    .map_err(void_err_to_cli)?;

    Ok(result.files_restored)
}