use std::path::{Path, PathBuf};
use std::sync::Arc;
use std::time::Duration;
use serde::Serialize;
use void_core::cid;
use void_core::collab::manifest::RepoKey;
use void_core::crypto::{CommitReader, KeyVault, RepoSecret};
use void_core::support::ToVoidCid;
use void_core::metadata::{self, ShardMap};
use void_core::ops::import::{self, ImportOptions};
use void_core::pipeline::{commit_workspace, CommitOptions, SealOptions};
use void_core::store::{FsStore, IpfsStore};
use void_core::support::events::VoidObserver;
use void_core::workspace::checkout::{restore_files, FileToRestore};
use void_core::VoidContext;
use crate::context::{load_signing_key, signing_key_exists, void_err_to_cli};
use crate::ipfs_utils::{format_bytes, make_observer, parse_backend, parse_content_key};
use crate::observer::ProgressObserver;
use crate::output::{run_command, CliError, CliOptions};
use crate::repo_init::{self, NewRepoOpts};
#[derive(Debug, Serialize)]
#[serde(rename_all = "camelCase")]
pub struct ForkOutput {
pub path: String,
pub source_cid: String,
pub commit_cid: String,
pub files: usize,
pub bytes: u64,
}
pub struct ForkArgs {
pub source: String,
pub path: Option<PathBuf>,
pub content_key: Option<String>,
pub backend: String,
pub kubo_url: String,
pub gateway_url: Option<String>,
pub timeout_ms: u64,
}
pub fn run(cwd: &Path, args: ForkArgs, opts: &CliOptions) -> Result<(), CliError> {
run_command("fork", opts, |ctx| {
let content_key = parse_content_key(&args.content_key)?;
let backend = parse_backend(&args.backend, &args.kubo_url, &args.gateway_url)?;
let remote = Arc::new(IpfsStore::new(backend.clone(), Duration::from_millis(args.timeout_ms)));
let temp_dir = tempfile::tempdir()
.map_err(|e| CliError::io_error(format!("failed to create temp dir: {e}")))?;
let temp_void_dir = temp_dir.path().join(".void");
std::fs::create_dir_all(temp_void_dir.join("objects"))
.map_err(|e| CliError::io_error(format!("failed to create temp objects dir: {e}")))?;
ctx.progress("Fetching commit from IPFS...");
let shard_observer: Arc<ProgressObserver> =
Arc::new(make_observer(ctx.use_json(), "Fetching shards..."));
let shard_obs = shard_observer.clone();
let temp_store = import::objects_store(temp_void_dir.as_path()).map_err(void_err_to_cli)?;
let foreign = import::fetch_published_commit(ImportOptions {
store: temp_store,
remote,
commit_cid: args.source.clone(),
content_key,
on_shard_progress: Some(Box::new(move |fetched, total| {
shard_obs.set_message(&format!("Fetched {}/{} shards", fetched, total));
})),
})
.map_err(void_err_to_cli)?;
shard_observer.finish();
let target_dir = resolve_target_dir(cwd, &args.path, foreign.repo_name.as_deref())?;
if !ctx.use_json() {
ctx.info(format!("Forking into '{}'...", target_dir.display()));
}
let void_dir = target_dir.join(".void");
repo_init::create_void_dir_structure(&void_dir)?;
let temp_objects = temp_void_dir.join("objects");
let final_objects = void_dir.join("objects");
copy_dir_contents(&temp_objects, &final_objects)?;
let new_key: [u8; 32] = rand::random();
let new_repo_key = RepoKey::from_bytes(new_key);
let new_vault = Arc::new(
KeyVault::new(new_key)
.map_err(|e| CliError::internal(format!("failed to initialize encryption: {e}")))?,
);
ctx.progress("Extracting files...");
let store = import::objects_store(&void_dir).map_err(void_err_to_cli)?;
let checkout_observer: Arc<ProgressObserver> =
Arc::new(make_observer(ctx.use_json(), "Restoring files..."));
let files_extracted = extract_files(
&store,
&foreign.commit,
&foreign.reader,
&new_vault,
&target_dir,
&void_dir,
&checkout_observer,
)?;
checkout_observer.finish();
ctx.progress("Creating independent repository...");
let repo_id = uuid::Uuid::new_v4().to_string();
let repo_secret_hex = repo_init::generate_repo_secret();
let result = repo_init::setup_owner_manifest(
&void_dir,
&new_repo_key,
NewRepoOpts {
repo_name: foreign.repo_name.clone(),
repo_id: Some(repo_id),
repo_secret: Some(repo_secret_hex.clone()),
},
)?;
if let (Some(ref id), Some(ref name)) =
(&result.manifest.repo_id, &result.manifest.repo_name)
{
let _ = crate::registry::register_repo(id, name, &target_dir, "fork", None);
}
ctx.progress("Creating initial commit...");
let repo_secret_bytes = hex::decode(&repo_secret_hex)
.map_err(|e| CliError::internal(format!("invalid repo_secret: {e}")))?;
let mut repo_secret_arr = [0u8; 32];
repo_secret_arr.copy_from_slice(&repo_secret_bytes);
let source_cid_obj = cid::parse(&args.source).map_err(void_err_to_cli)?;
let source_cid_bytes = cid::to_bytes(&source_cid_obj);
let signing_key = if signing_key_exists() {
Some(Arc::new(load_signing_key()?))
} else {
None
};
let mut fork_ctx = VoidContext::headless(&void_dir, new_vault.clone(), 0)
.map_err(void_err_to_cli)?;
fork_ctx.paths.root = camino::Utf8PathBuf::try_from(target_dir.clone())
.map_err(|e| CliError::internal(format!("invalid target path: {e}")))?;
fork_ctx.repo.secret = RepoSecret::new(repo_secret_arr);
fork_ctx.crypto.signing_key = signing_key;
let commit_result = commit_workspace(CommitOptions {
seal: SealOptions {
ctx: fork_ctx,
shard_map: ShardMap::new(64),
content_key: None,
parent_content_key: None,
},
message: format!("Forked from {}", &args.source),
parent_cid: Some(void_core::crypto::CommitCid::from_bytes(source_cid_bytes)),
allow_data_loss: false,
foreign_parent: true,
})
.map_err(void_err_to_cli)?;
let total_files = commit_result.total_files.unwrap_or(files_extracted as u64);
let total_bytes = commit_result.total_bytes.unwrap_or(0);
let commit_cid_str = commit_result.commit_cid.to_cid_string();
if !ctx.use_json() {
let short_cid = &commit_cid_str[..12.min(commit_cid_str.len())];
ctx.info(format!(
"Forked from {}",
&args.source[..args.source.len().min(12)]
));
ctx.info(format!("Created commit {}...", short_cid));
ctx.info(format!(" Files: {}", total_files));
ctx.info(format!(" Size: {}", format_bytes(total_bytes)));
}
Ok(ForkOutput {
path: target_dir.display().to_string(),
source_cid: args.source.clone(),
commit_cid: commit_cid_str,
files: total_files as usize,
bytes: total_bytes,
})
})
}
fn extract_files(
store: &FsStore,
commit: &metadata::Commit,
reader: &CommitReader,
new_vault: &KeyVault,
target_dir: &Path,
void_dir: &Path,
observer: &Arc<ProgressObserver>,
) -> Result<usize, CliError> {
let manifest = void_core::metadata::manifest_tree::TreeManifest::from_commit(store, commit, reader)
.map_err(void_err_to_cli)?
.ok_or_else(|| CliError::internal("commit has no manifest_cid"))?;
let shards = manifest.shards();
let mut files_to_restore = Vec::new();
for entry_result in manifest.iter() {
let entry = entry_result.map_err(void_err_to_cli)?;
let shard_ref = shards.get(entry.shard_index as usize)
.ok_or_else(|| CliError::internal(format!("shard_index {} out of range", entry.shard_index)))?;
files_to_restore.push(FileToRestore {
entry,
shard_cid: shard_ref.cid.clone(),
wrapped_key: shard_ref.wrapped_key.clone(),
});
}
let obs: Option<Arc<dyn VoidObserver>> = Some(observer.clone() as Arc<dyn VoidObserver>);
let (result, _) = restore_files(
store,
reader,
new_vault.staged_key().map_err(|e| void_err_to_cli(e.into()))?,
&[], target_dir,
&files_to_restore,
&obs,
Some(void_dir),
)
.map_err(void_err_to_cli)?;
Ok(result.files_restored)
}
fn resolve_target_dir(
cwd: &Path,
explicit: &Option<PathBuf>,
repo_name: Option<&str>,
) -> Result<PathBuf, CliError> {
let target_dir = match explicit {
Some(p) => {
if p.is_absolute() {
p.clone()
} else {
cwd.join(p)
}
}
None => {
let dir_name = repo_name.unwrap_or("forked-repo");
cwd.join(dir_name)
}
};
if target_dir.exists() {
return Err(CliError::conflict(format!(
"target directory already exists: {}",
target_dir.display()
)));
}
Ok(target_dir)
}
fn copy_dir_contents(src: &Path, dst: &Path) -> Result<(), CliError> {
let entries = std::fs::read_dir(src)
.map_err(|e| CliError::io_error(format!("failed to read temp objects: {e}")))?;
for entry in entries {
let entry = entry.map_err(|e| CliError::io_error(e.to_string()))?;
let dest_path = dst.join(entry.file_name());
if entry.file_type().map_err(|e| CliError::io_error(e.to_string()))?.is_dir() {
std::fs::create_dir_all(&dest_path)
.map_err(|e| CliError::io_error(e.to_string()))?;
copy_dir_contents(&entry.path(), &dest_path)?;
} else {
std::fs::copy(entry.path(), &dest_path)
.map_err(|e| CliError::io_error(format!("failed to copy object: {e}")))?;
}
}
Ok(())
}