use std::path::{Path, PathBuf};
use std::sync::Arc;
use std::time::Duration;
use serde::Serialize;
use void_core::cid;
use void_core::support::ToVoidCid;
use void_core::config::{Config, CoreConfig};
use void_core::pipeline::{clone_repo, CloneMode, CloneOptions, CloneResult};
use void_core::store::FsStore;
use void_core::workspace::checkout::{checkout_tree, CheckoutOptions};
use camino::Utf8PathBuf;
use void_core::crypto::{CommitReader, ContentKey, EncryptedCommit, EncryptedMetadata, EncryptedRepoManifest, EncryptedShard, KeyVault};
use void_core::metadata::CommitStats;
use void_core::store::{RemoteStore, ObjectStoreExt};
use crate::context::void_err_to_cli;
use crate::observer::ProgressObserver;
use crate::output::{run_command, CliError, CliOptions};
#[derive(Debug)]
pub struct CloneArgs {
pub source: String,
pub key: Option<String>,
pub content_key: Option<String>,
pub path: Option<PathBuf>,
pub backend: Option<String>,
pub kubo_url: String,
pub gateway_url: Option<String>,
pub timeout_ms: u64,
pub mode: String,
pub yes: bool,
pub remote: Option<String>,
}
#[derive(Debug, Serialize)]
#[serde(rename_all = "camelCase")]
pub struct CloneOutput {
pub path: String,
pub commit: String,
pub metadata: String,
pub mode: String,
pub shards_fetched: usize,
pub shards_total: usize,
#[serde(skip_serializing_if = "Option::is_none")]
pub files_extracted: Option<usize>,
#[serde(skip_serializing_if = "Option::is_none")]
pub source: Option<String>,
}
pub fn run(cwd: &Path, args: CloneArgs, opts: &CliOptions) -> Result<(), CliError> {
run_command("clone", opts, |ctx| {
let is_cid_source = cid::parse(&args.source).is_ok();
let (commit_cid, source_label, resolved_key, registry_info): (
String,
Option<String>,
Option<[u8; 32]>,
Option<(String, String)>,
) = if is_cid_source {
(args.source.clone(), None, None, None)
} else if let Some(ref remote_name) = args.remote {
ctx.progress(format!("Resolving '{}' from remote '{}'...", args.source, remote_name));
let resolved_remote = crate::remotes::resolve_remote(remote_name, None)
.map_err(|e| CliError::internal(format!("remote '{}': {}", remote_name, e)))?;
let branch = "trunk".to_string();
let rt = tokio::runtime::Builder::new_current_thread()
.enable_all()
.build()
.map_err(|e| CliError::internal(format!("runtime: {e}")))?;
let cid_str = rt.block_on(async {
let peer_id: libp2p::PeerId = resolved_remote.peer_id.parse()
.map_err(|e| format!("invalid peer_id: {e}"))?;
let addr: libp2p::Multiaddr = resolved_remote.addr.parse()
.map_err(|e| format!("invalid addr: {e}"))?;
let client = void_daemon::DaemonClient::connect(addr, peer_id).await
.map_err(|e| format!("connect to remote: {e}"))?;
let signing_key = crate::context::load_signing_key()
.map_err(|e| format!("load identity: {e}"))?;
let dalek_key = ed25519_dalek::SigningKey::from_bytes(signing_key.as_bytes());
client.authenticate(&dalek_key).await
.map_err(|e| format!("auth: {e}"))?;
client.get_head(&args.source, &branch).await
.map_err(|e| format!("get_head: {e}"))
}).map_err(|e| CliError::internal(format!("remote resolve failed: {e}")))?;
ctx.info(format!("Resolved '{}' → {}", args.source, &cid_str[..12.min(cid_str.len())]));
(cid_str, Some(format!("{}@{}", args.source, remote_name)), None, None)
} else {
let (cid_str, resolved_key, repo_id, repo_name) =
resolve_clone_registry_name_with_record(&args.source)?;
(
cid_str,
Some(args.source.clone()),
Some(resolved_key),
Some((repo_id, repo_name)),
)
};
let scoped_content_key: Option<ContentKey> = if let Some(ref ck_hex) = args.content_key {
Some(ContentKey::from_hex(ck_hex)
.map_err(|e| CliError::invalid_args(format!("invalid content-key: {}", e)))?)
} else {
None
};
let key: Option<[u8; 32]> = if let Some(ik) = resolved_key {
Some(ik)
} else {
match &args.key {
Some(k) => Some(parse_hex_key(k)?),
None => {
if scoped_content_key.is_some() {
None } else {
return Err(CliError::invalid_args(
"--key is required when cloning from a CID (or use --content-key for scoped access)",
));
}
}
}
};
let mode = parse_mode(&args.mode)?;
let timeout = Duration::from_millis(args.timeout_ms);
let void_home = dirs::home_dir().map(|h| h.join(".void"));
let resolved = crate::backend::resolve_backend(
args.backend.as_deref(),
&args.kubo_url,
&args.gateway_url,
void_home.as_deref(),
timeout,
)?;
let backend = resolved.ipfs_backend.clone();
let daemon_remote = Some(resolved.remote);
let target_dir = match &args.path {
Some(dir) => {
if dir.is_absolute() {
dir.clone()
} else {
cwd.join(dir)
}
}
None => {
if let Some((_, ref repo_name)) = registry_info {
cwd.join(repo_name)
} else if let Some(ref label) = source_label {
let parts: Vec<&str> = label.split('/').collect();
if parts.len() >= 2 {
cwd.join(parts[1])
} else {
cwd.to_path_buf()
}
} else {
ctx.progress("Probing commit...");
let probe_vault = if let Some(ref ck) = scoped_content_key {
KeyVault::from_content_key(*ck)
} else if let Some(k) = key {
KeyVault::new(k)
.map_err(|e| CliError::internal(format!("failed to initialize encryption: {e}")))?
} else {
return Err(CliError::invalid_args("--key or --content-key is required when cloning from a CID"));
};
let (repo_name, message, stats) =
probe_commit(&commit_cid, &probe_vault, daemon_remote.as_ref().unwrap().as_ref())?;
if !ctx.use_json() {
let short_cid = if commit_cid.len() > 12 {
&commit_cid[..12]
} else {
&commit_cid
};
ctx.info(format!(" Commit {}...", short_cid));
ctx.info(format!(" Message {}", message));
if let Some(ref s) = stats {
ctx.info(format!(
" Files {} files ({})",
s.total_files,
format_bytes(s.total_bytes)
));
}
if let Some(ref name) = repo_name {
ctx.info(format!(" Repo {}", name));
}
eprintln!(); }
let non_interactive = args.yes || ctx.use_json();
if non_interactive {
match repo_name {
Some(name) => cwd.join(&name),
None => {
return Err(CliError::invalid_args(
"could not determine repo name; use --path to specify target directory",
))
}
}
} else {
let dir_name: String = match repo_name {
Some(ref default) => dialoguer::Input::new()
.with_prompt("Clone directory?")
.default(default.clone())
.interact_text()
.map_err(|e| {
CliError::internal(format!("prompt failed: {e}"))
})?,
None => dialoguer::Input::new()
.with_prompt("Clone directory")
.interact_text()
.map_err(|e| {
CliError::internal(format!("prompt failed: {e}"))
})?,
};
cwd.join(&dir_name)
}
}
}
};
let void_dir = target_dir.join(".void");
if void_dir.exists() {
return Err(CliError::conflict(format!(
"repository already exists at {}",
void_dir.display()
)));
}
if !ctx.use_json() {
ctx.info(format!("Cloning into '{}'...", target_dir.display()));
}
ctx.progress("Creating .void directory structure...");
crate::repo_init::create_void_dir_structure(&void_dir)?;
let observer: Arc<ProgressObserver> = if ctx.use_json() {
Arc::new(ProgressObserver::new_hidden())
} else {
Arc::new(ProgressObserver::new("Fetching objects from IPFS..."))
};
ctx.progress("Cloning repository from IPFS...");
let (clone_result, used_content_key_clone) = if let Some(ref ck) = scoped_content_key {
if key.is_none() {
let effective_mode = CloneMode::Depth1;
let result = clone_repo_with_content_key(
&void_dir,
&commit_cid,
&ck,
daemon_remote.clone().unwrap(),
effective_mode,
Some(observer.clone()),
)?;
(result, true)
} else {
let clone_opts = CloneOptions {
ctx: void_core::VoidContext::headless(
void_dir.clone(),
Arc::new(KeyVault::new(key.unwrap()).map_err(|e| {
CliError::internal(format!("failed to initialize encryption: {e}"))
})?),
0,
)
.map_err(void_err_to_cli)?,
commit_cid: commit_cid.clone(),
backend,
timeout,
mode,
observer: Some(observer.clone()),
remote: daemon_remote.clone(),
};
(clone_repo(clone_opts).map_err(void_err_to_cli)?, false)
}
} else {
let clone_opts = CloneOptions {
ctx: void_core::VoidContext::headless(
void_dir.clone(),
Arc::new(KeyVault::new(key.unwrap()).map_err(|e| {
CliError::internal(format!("failed to initialize encryption: {e}"))
})?),
0,
)
.map_err(void_err_to_cli)?,
commit_cid: commit_cid.clone(),
backend,
timeout,
mode,
observer: Some(observer.clone()),
remote: None,
};
(clone_repo(clone_opts).map_err(void_err_to_cli)?, false)
};
observer.finish();
if !ctx.use_json() {
let short_cid = if commit_cid.len() > 12 {
&commit_cid[..12]
} else {
&commit_cid
};
if let Some(ref source) = source_label {
ctx.info(format!("Fetched commit {}... from {}", short_cid, source));
} else {
ctx.info(format!("Fetched commit {}...", short_cid));
}
ctx.info(format!(
"Fetched {}/{} shards (mode: {})",
clone_result.shards_fetched,
clone_result.shards_total,
mode_to_string(clone_result.mode)
));
}
ctx.progress("Saving configuration...");
let (cfg_repo_id, cfg_repo_name) = match ®istry_info {
Some((id, name)) => (Some(id.to_string()), Some(name.to_string())),
None => {
if clone_result.repo_manifest_cid.is_some() {
let manifest = void_core::collab::manifest::load_manifest(&void_dir)
.ok()
.flatten();
(
manifest.as_ref().and_then(|m| m.repo_id.clone()),
manifest.as_ref().and_then(|m| m.repo_name.clone()),
)
} else {
(None, None)
}
}
};
save_config(
&void_dir,
&clone_result,
cfg_repo_id.as_deref(),
cfg_repo_name.as_deref(),
)?;
if let Some(ref k) = key {
ensure_cloner_manifest(&void_dir, k, ctx)?;
}
let mode_str = mode_to_string(clone_result.mode);
let files_extracted = if clone_result.mode != CloneMode::Lazy && clone_result.mode != CloneMode::Virtual {
ctx.progress("Checking out working tree...");
let checkout_observer: Arc<ProgressObserver> = if ctx.use_json() {
Arc::new(ProgressObserver::new_hidden())
} else {
Arc::new(ProgressObserver::new("Restoring files..."))
};
let checkout_vault = if used_content_key_clone {
KeyVault::from_content_key(*scoped_content_key.as_ref().unwrap())
} else {
KeyVault::new(key.unwrap())
.map_err(|e| CliError::internal(format!("failed to initialize encryption: {e}")))?
};
let count = checkout_working_tree(
&void_dir,
&checkout_vault,
&commit_cid,
&target_dir,
checkout_observer.clone(),
)?;
checkout_observer.finish();
if !ctx.use_json() {
ctx.info("Checked out working tree");
}
Some(count)
} else {
if !ctx.use_json() {
ctx.info(format!(
"Cloned in {} mode (no working tree checkout). Use 'void unseal' to extract files.",
mode_str
));
}
None
};
let reg_identity: Option<(String, String)> = if let Some((ref id, ref name)) = registry_info
{
Some((id.clone(), name.clone()))
} else {
None
};
if let Some((ref repo_id, ref repo_name)) = reg_identity {
if let Err(e) =
crate::registry::register_repo(repo_id, repo_name, &target_dir, "clone", None)
{
ctx.warn(format!("Failed to register cloned repo in registry: {}", e));
}
if let Err(e) = crate::registry::update_head(repo_id, "trunk", &commit_cid) {
ctx.warn(format!("Failed to update registry HEAD: {}", e));
}
}
Ok(CloneOutput {
path: target_dir.display().to_string(),
commit: clone_result.commit_cid,
metadata: clone_result.metadata_cid,
mode: mode_str,
shards_fetched: clone_result.shards_fetched,
shards_total: clone_result.shards_total,
files_extracted,
source: source_label,
})
})
}
#[cfg(test)]
fn parse_key(hex_str: &str) -> Result<[u8; 32], CliError> {
let hex_str = hex_str.trim();
let bytes = hex::decode(hex_str)
.map_err(|e| CliError::invalid_args(format!("invalid key hex: {}", e)))?;
if bytes.len() != 32 {
return Err(CliError::invalid_args(format!(
"key must be 32 bytes (64 hex chars), got {} bytes",
bytes.len()
)));
}
let mut key = [0u8; 32];
key.copy_from_slice(&bytes);
Ok(key)
}
fn clone_repo_with_content_key(
void_dir: &Path,
commit_cid_str: &str,
content_key: &ContentKey,
remote: Arc<dyn RemoteStore>,
mode: CloneMode,
observer: Option<Arc<ProgressObserver>>,
) -> Result<CloneResult, CliError> {
use void_core::metadata::MetadataBundle;
let objects_dir = Utf8PathBuf::try_from(void_dir.join("objects"))
.map_err(|e| CliError::internal(format!("invalid objects path: {}", e)))?;
let store = FsStore::new(objects_dir).map_err(void_err_to_cli)?;
let commit_cid_obj = cid::parse(commit_cid_str).map_err(void_err_to_cli)?;
let commit_encrypted = EncryptedCommit::from_bytes(remote.fetch_raw(&commit_cid_obj).map_err(void_err_to_cli)?);
if !store.exists(&commit_cid_obj).map_err(void_err_to_cli)? {
store.put_blob(&commit_encrypted).map_err(void_err_to_cli)?;
}
let vault = KeyVault::from_content_key(*content_key);
let (commit_plaintext, reader) = CommitReader::open_with_vault(&vault, &commit_encrypted)
.map_err(|e| CliError::encryption_error(format!("failed to decrypt commit: {e}")))?;
let commit = commit_plaintext.parse().map_err(void_err_to_cli)?;
let metadata_cid = commit.metadata_bundle.to_void_cid().map_err(void_err_to_cli)?;
let metadata_cid_str = metadata_cid.to_string();
let metadata_encrypted = EncryptedMetadata::from_bytes(remote.fetch_raw(&metadata_cid).map_err(void_err_to_cli)?);
if !store.exists(&metadata_cid).map_err(void_err_to_cli)? {
store.put_blob(&metadata_encrypted).map_err(void_err_to_cli)?;
}
let metadata: MetadataBundle = reader.decrypt_metadata(&metadata_encrypted)
.map_err(void_err_to_cli)?;
let mut fetched = 0usize;
let mut total = 0usize;
if mode != CloneMode::Lazy && mode != CloneMode::Virtual {
for range in &metadata.shard_map.ranges {
let cid_bytes = match range.cid.as_ref() {
Some(c) => c,
None => continue,
};
total += 1;
let shard_cid = cid::from_bytes(cid_bytes.as_bytes()).map_err(void_err_to_cli)?;
let shard_encrypted = EncryptedShard::from_bytes(remote.fetch_raw(&shard_cid).map_err(void_err_to_cli)?);
if !store.exists(&shard_cid).map_err(void_err_to_cli)? {
store.put_blob(&shard_encrypted).map_err(void_err_to_cli)?;
}
fetched += 1;
if let Some(ref obs) = observer {
obs.set_message(&format!("Fetched {}/{} shards", fetched, total));
}
}
} else {
total = metadata.shard_map.ranges.iter().filter(|r| r.cid.is_some()).count();
}
let void_dir_utf8 = Utf8PathBuf::try_from(void_dir.to_path_buf())
.map_err(|e| CliError::internal(format!("invalid void dir path: {}", e)))?;
let commit_cid_bytes = cid::to_bytes(&commit_cid_obj);
void_core::refs::write_branch(&void_dir_utf8, "trunk", &void_core::crypto::CommitCid::from_bytes(commit_cid_bytes))
.map_err(void_err_to_cli)?;
let repo_manifest_cid_str = if let Some(ref rm_cid_bytes) = commit.repo_manifest_cid {
let rm_cid = rm_cid_bytes.to_void_cid().map_err(void_err_to_cli)?;
let rm_cid_str = rm_cid.to_string();
match remote.fetch_raw(&rm_cid) {
Ok(rm_bytes) => {
let rm_blob = EncryptedRepoManifest::from_bytes(rm_bytes);
if !store.exists(&rm_cid).map_err(void_err_to_cli)? {
store.put_blob(&rm_blob).map_err(void_err_to_cli)?;
}
match reader.decrypt_repo_manifest(&rm_blob) {
Ok(manifest) => {
if let Ok(json) = manifest.to_json() {
let _ = std::fs::write(void_dir.join("contributors.json"), &json);
}
Some(rm_cid_str)
}
Err(_) => None,
}
}
Err(_) => None,
}
} else {
None
};
Ok(CloneResult {
commit_cid: commit_cid_str.to_string(),
metadata_cid: metadata_cid_str,
repo_secret: metadata.repo_secret,
shards_fetched: fetched,
shards_total: total,
mode,
repo_manifest_cid: repo_manifest_cid_str,
})
}
fn parse_mode(mode_str: &str) -> Result<CloneMode, CliError> {
match mode_str.to_lowercase().as_str() {
"depth1" => Ok(CloneMode::Depth1),
"full" => Ok(CloneMode::Full),
"lazy" => Ok(CloneMode::Lazy),
"virtual" => Ok(CloneMode::Virtual),
other => Err(CliError::invalid_args(format!(
"invalid clone mode '{}': expected depth1, full, lazy, or virtual",
other
))),
}
}
fn mode_to_string(mode: CloneMode) -> String {
match mode {
CloneMode::Depth1 => "depth1".to_string(),
CloneMode::Full => "full".to_string(),
CloneMode::Lazy => "lazy".to_string(),
CloneMode::Virtual => "virtual".to_string(),
}
}
fn probe_commit(
commit_cid_str: &str,
vault: &KeyVault,
remote: &dyn RemoteStore,
) -> Result<(Option<String>, String, Option<CommitStats>), CliError> {
let commit_cid = cid::parse(commit_cid_str).map_err(void_err_to_cli)?;
let commit_encrypted = EncryptedCommit::from_bytes(remote.fetch_raw(&commit_cid).map_err(void_err_to_cli)?);
let (commit_bytes, reader) =
CommitReader::open_with_vault(vault, &commit_encrypted).map_err(void_err_to_cli)?;
let commit = commit_bytes.parse().map_err(void_err_to_cli)?;
let repo_name = if let Some(ref rm_cid_bytes) = commit.repo_manifest_cid {
let rm_cid = rm_cid_bytes.to_void_cid().map_err(void_err_to_cli)?;
match remote.fetch_raw(&rm_cid) {
Ok(rm_bytes) => {
let rm_blob = EncryptedRepoManifest::from_bytes(rm_bytes);
match reader.decrypt_repo_manifest(&rm_blob) {
Ok(manifest) => manifest.repo_name.clone(),
Err(_) => None,
}
}
Err(_) => None,
}
} else {
None
};
Ok((repo_name, commit.message.clone(), commit.stats.clone()))
}
fn format_bytes(bytes: u64) -> String {
if bytes < 1024 {
return format!("{} B", bytes);
}
if bytes < 1024 * 1024 {
return format!("{:.1} KB", bytes as f64 / 1024.0);
}
if bytes < 1024 * 1024 * 1024 {
return format!("{:.1} MB", bytes as f64 / (1024.0 * 1024.0));
}
format!("{:.1} GB", bytes as f64 / (1024.0 * 1024.0 * 1024.0))
}
fn resolve_clone_registry_name_with_record(
name: &str,
) -> Result<(String, [u8; 32], String, String), CliError> {
let record = crate::registry::resolve_target(name).map_err(|e| CliError::not_found(e))?;
let cid_str = record
.head
.get("trunk")
.or_else(|| record.head.values().next())
.ok_or_else(|| {
CliError::not_found(format!(
"repo '{}' has no HEAD CID in registry (no pushes recorded yet)",
name
))
})?
.clone();
let identity = crate::context::load_identity_cached()?;
let mut key = None;
for local_path in &record.local_paths {
let source_void_dir = local_path.join(".void");
if source_void_dir.exists() {
if let Ok(repo_key) = void_core::collab::manifest::load_repo_key(&source_void_dir, Some(&identity)) {
key = Some(*repo_key.as_bytes());
break;
}
}
}
let key = key.ok_or_else(|| {
CliError::not_found(format!(
"could not load encryption key for repo '{}'. \
Ensure a local checkout exists and your identity has access.",
name,
))
})?;
Ok((cid_str, key, record.id.clone(), record.name.clone()))
}
fn parse_hex_key(hex_str: &str) -> Result<[u8; 32], CliError> {
let trimmed = hex_str.trim();
let bytes = hex::decode(trimmed)
.map_err(|e| CliError::invalid_args(format!("invalid key hex: {}", e)))?;
bytes
.try_into()
.map_err(|_| CliError::invalid_args("key must be 32 bytes (64 hex chars)"))
}
fn ensure_cloner_manifest(
void_dir: &Path,
key: &[u8; 32],
_ctx: &crate::output::CommandContext,
) -> Result<(), CliError> {
use std::time::{SystemTime, UNIX_EPOCH};
use void_core::collab::manifest::{
ecies_wrap_key, save_manifest, Contributor, ContributorId, Manifest, RepoKey,
};
let (username, signing_pubkey, recipient_pubkey, _nostr) =
match crate::context::load_public_identity() {
Ok(id) => id,
Err(_) => {
eprintln!(
"warning: No identity found — repo key not wrapped in manifest. \
Run 'void identity init' then re-clone to enable identity-based access."
);
return Ok(());
}
};
let username = username.unwrap_or_else(|| "anonymous".to_string());
let repo_key = RepoKey::from_bytes(*key);
let wrapped = ecies_wrap_key(&repo_key, &recipient_pubkey)
.map_err(|e| CliError::internal(format!("failed to wrap key: {}", e)))?;
let mut manifest = void_core::collab::manifest::load_manifest(void_dir)
.ok()
.flatten()
.unwrap_or_else(|| {
let mut m = Manifest::new(signing_pubkey.clone(), None);
m.repo_id = None;
m.repo_name = None;
m
});
manifest
.read_keys
.wrapped
.insert(signing_pubkey.clone(), wrapped);
let already_contributor = manifest
.contributors
.iter()
.any(|c| c.identity.signing == signing_pubkey);
if !already_contributor {
let timestamp = SystemTime::now()
.duration_since(UNIX_EPOCH)
.map(|d| d.as_secs())
.unwrap_or(0);
manifest.contributors.push(Contributor {
identity: ContributorId::new(signing_pubkey.clone(), recipient_pubkey),
name: Some(username),
nostr_pubkey: None,
added_at: timestamp,
added_by: signing_pubkey,
signature: vec![],
});
}
save_manifest(void_dir, &manifest)
.map_err(|e| CliError::internal(format!("failed to write manifest: {}", e)))?;
Ok(())
}
fn save_config(
void_dir: &Path,
clone_result: &CloneResult,
repo_id: Option<&str>,
repo_name: Option<&str>,
) -> Result<(), CliError> {
let repo_secret_hex = hex::encode(clone_result.repo_secret.as_bytes());
let config = Config {
version: Some(1),
created: Some(chrono::Utc::now().to_rfc3339()),
repo_secret: Some(repo_secret_hex),
repo_id: repo_id.map(|s| s.to_string()),
repo_name: repo_name.map(|s| s.to_string()),
ipfs: None,
tor: None,
user: Default::default(),
core: CoreConfig::default(),
remote: Default::default(),
};
void_core::config::save(void_dir, &config)
.map_err(|e| CliError::internal(format!("failed to write config file: {}", e)))?;
Ok(())
}
fn checkout_working_tree(
void_dir: &Path,
vault: &KeyVault,
commit_cid_str: &str,
workspace: &Path,
observer: Arc<ProgressObserver>,
) -> Result<usize, CliError> {
let commit_cid = cid::parse(commit_cid_str)
.map_err(|e| CliError::internal(format!("invalid commit CID: {}", e)))?;
let objects_dir = Utf8PathBuf::try_from(void_dir.join("objects"))
.map_err(|e| CliError::internal(format!("invalid objects path: {}", e)))?;
let store = FsStore::new(objects_dir)
.map_err(|e| CliError::internal(format!("failed to open store: {}", e)))?;
let checkout_opts = CheckoutOptions {
paths: None, force: true, observer: Some(observer),
workspace_dir: None,
include_large: false,
};
let result = checkout_tree(&store, vault, &commit_cid, workspace, &checkout_opts)
.map_err(void_err_to_cli)?;
Ok(result.files_restored)
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_parse_key_valid() {
let hex = "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef";
let key = parse_key(hex).unwrap();
assert_eq!(key.len(), 32);
assert_eq!(key[0], 0x01);
assert_eq!(key[1], 0x23);
}
#[test]
fn test_parse_key_with_whitespace() {
let hex = " 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef \n";
let key = parse_key(hex).unwrap();
assert_eq!(key.len(), 32);
}
#[test]
fn test_parse_key_invalid_hex() {
let hex = "not-valid-hex";
let result = parse_key(hex);
assert!(result.is_err());
assert!(result.unwrap_err().message.contains("invalid key hex"));
}
#[test]
fn test_parse_key_wrong_length() {
let hex = "0123456789abcdef"; let result = parse_key(hex);
assert!(result.is_err());
assert!(result.unwrap_err().message.contains("32 bytes"));
}
#[test]
fn test_parse_mode_full() {
assert_eq!(parse_mode("full").unwrap(), CloneMode::Full);
assert_eq!(parse_mode("FULL").unwrap(), CloneMode::Full);
assert_eq!(parse_mode("Full").unwrap(), CloneMode::Full);
}
#[test]
fn test_parse_mode_depth1() {
assert_eq!(parse_mode("depth1").unwrap(), CloneMode::Depth1);
assert_eq!(parse_mode("DEPTH1").unwrap(), CloneMode::Depth1);
}
#[test]
fn test_parse_mode_lazy() {
assert_eq!(parse_mode("lazy").unwrap(), CloneMode::Lazy);
assert_eq!(parse_mode("LAZY").unwrap(), CloneMode::Lazy);
}
#[test]
fn test_parse_mode_invalid() {
let result = parse_mode("invalid");
assert!(result.is_err());
assert!(result.unwrap_err().message.contains("invalid clone mode"));
}
#[test]
fn test_mode_to_string() {
assert_eq!(mode_to_string(CloneMode::Full), "full");
assert_eq!(mode_to_string(CloneMode::Depth1), "depth1");
assert_eq!(mode_to_string(CloneMode::Lazy), "lazy");
}
#[test]
fn test_clone_output_serialization() {
let output = CloneOutput {
path: "/path/to/repo".to_string(),
commit: "bafyabc123".to_string(),
metadata: "bafydef456".to_string(),
mode: "full".to_string(),
shards_fetched: 10,
shards_total: 15,
files_extracted: Some(100),
source: None,
};
let json = serde_json::to_string(&output).unwrap();
assert!(json.contains("\"path\":\"/path/to/repo\""));
assert!(json.contains("\"commit\":\"bafyabc123\""));
assert!(json.contains("\"metadata\":\"bafydef456\""));
assert!(json.contains("\"shardsFetched\":10"));
assert!(json.contains("\"shardsTotal\":15"));
assert!(json.contains("\"mode\":\"full\""));
assert!(json.contains("\"filesExtracted\":100"));
assert!(!json.contains("\"source\""));
}
#[test]
fn test_clone_output_serialization_depth1() {
let output = CloneOutput {
path: "/path/to/repo".to_string(),
commit: "bafyabc123".to_string(),
metadata: "bafydef456".to_string(),
mode: "depth1".to_string(),
shards_fetched: 5,
shards_total: 15,
files_extracted: None,
source: Some("alice/proj/main".to_string()),
};
let json = serde_json::to_string(&output).unwrap();
assert!(json.contains("\"path\":\"/path/to/repo\""));
assert!(json.contains("\"mode\":\"depth1\""));
assert!(json.contains("\"source\":\"alice/proj/main\""));
assert!(!json.contains("\"filesExtracted\""));
}
}