use crate::config::{project_id_from_path, StoreLayout};
use crate::error::{ChkpttError, Result};
use crate::index::FileIndex;
use crate::ops::io_order::sort_scanned_for_locality;
use crate::ops::lock::ProjectLock;
use crate::scanner::ScannedFile;
use crate::store::blob::{bytes_to_hex, hash_path_bytes};
use crate::store::catalog::{ManifestEntry, MetadataCatalog};
use crate::store::pack::{PackLocation, PackSet};
use crate::store::tree::{EntryType, TreeStore};
use std::collections::{BTreeMap, HashMap, HashSet};
use std::io::{BufWriter, Write};
use std::path::Path;
use std::sync::atomic::{AtomicU64, Ordering};
use crate::ops::progress::{emit, ProgressCallback, ProgressEvent};
#[derive(Default)]
pub struct RestoreOptions {
pub dry_run: bool,
pub progress: ProgressCallback,
}
#[derive(Debug)]
pub struct RestoreResult {
pub snapshot_id: String,
pub files_added: u64,
pub files_changed: u64,
pub files_removed: u64,
pub files_unchanged: u64,
}
struct CurrentFileState {
hash: [u8; 16],
is_symlink: bool,
}
struct TargetFileState {
hash: [u8; 16],
is_symlink: bool,
}
struct RestoreDiff {
files_to_add: Vec<String>,
files_to_change: Vec<String>,
files_to_remove: Vec<String>,
files_unchanged: u64,
}
#[derive(Debug, Clone, Copy)]
enum RestoreSource {
Packed(PackLocation),
}
#[derive(Debug, Clone)]
struct RestoreTask {
path: String,
is_symlink: bool,
source: RestoreSource,
}
fn join_relative_path(prefix: &str, name: &str) -> String {
if prefix.is_empty() {
return name.to_owned();
}
let mut path = String::with_capacity(prefix.len() + 1 + name.len());
path.push_str(prefix);
path.push('/');
path.push_str(name);
path
}
fn collect_tree_files(
tree_store: &TreeStore,
tree_hash_hex: &str,
prefix: &str,
result: &mut BTreeMap<String, TargetFileState>,
) -> Result<()> {
let entries = tree_store.read(tree_hash_hex)?;
for entry in &entries {
let path = join_relative_path(prefix, &entry.name);
match entry.entry_type {
EntryType::File => {
result.insert(
path,
TargetFileState {
hash: entry.hash,
is_symlink: false,
},
);
}
EntryType::Dir => {
let subtree_hash_hex = bytes_to_hex(&entry.hash);
collect_tree_files(tree_store, &subtree_hash_hex, &path, result)?;
}
EntryType::Symlink => {
result.insert(
path,
TargetFileState {
hash: entry.hash,
is_symlink: true,
},
);
}
}
}
Ok(())
}
fn target_state_from_manifest(manifest: &[ManifestEntry]) -> BTreeMap<String, TargetFileState> {
manifest
.iter()
.map(|entry| {
(
entry.path.clone(),
TargetFileState {
hash: entry.blob_hash,
is_symlink: mode_is_symlink(entry.mode),
},
)
})
.collect()
}
fn scan_current_state(
workspace_root: &Path,
cached_entries: &HashMap<String, crate::index::FileEntry>,
include_deps: bool,
) -> Result<BTreeMap<String, CurrentFileState>> {
let scanned = crate::scanner::scan_workspace_with_options(workspace_root, None, include_deps)?;
let mut state = BTreeMap::new();
let mut stale_files = Vec::with_capacity(scanned.len());
for file in scanned {
if let Some(hash) = cached_hash_bytes(&file, cached_entries) {
state.insert(
file.relative_path.clone(),
CurrentFileState {
hash,
is_symlink: file.is_symlink,
},
);
} else {
stale_files.push(file);
}
}
for (file, hash) in hash_scanned_files(stale_files)? {
state.insert(
file.relative_path.clone(),
CurrentFileState {
hash,
is_symlink: file.is_symlink,
},
);
}
Ok(state)
}
fn precreate_restore_directories(
workspace_root: &Path,
restore_tasks: &[RestoreTask],
) -> Result<()> {
let mut seen = std::collections::HashSet::with_capacity(restore_tasks.len() / 4);
for task in restore_tasks {
if let Some(parent) = std::path::Path::new(&task.path).parent() {
if parent.as_os_str().is_empty() {
continue;
}
if seen.insert(parent.to_path_buf()) {
std::fs::create_dir_all(workspace_root.join(parent))?;
}
}
}
Ok(())
}
fn restore_files(
workspace_root: &Path,
restore_tasks: &[RestoreTask],
pack_set: &PackSet,
progress: &ProgressCallback,
progress_counter: &AtomicU64,
restore_total: u64,
) -> Result<Vec<String>> {
if restore_tasks.is_empty() {
return Ok(Vec::new());
}
precreate_restore_directories(workspace_root, restore_tasks)?;
let worker_count = std::thread::available_parallelism()
.map(|count| count.get())
.unwrap_or(1)
.min(restore_tasks.len());
if worker_count <= 1 {
let mut restored = Vec::with_capacity(restore_tasks.len());
for task in restore_tasks {
restore_file(workspace_root, task, pack_set)?;
let completed = progress_counter.fetch_add(1, Ordering::Relaxed) + 1;
emit(
progress,
ProgressEvent::RestoreFile {
completed,
total: restore_total,
},
);
restored.push(task.path.clone());
}
return Ok(restored);
}
let chunk_size = restore_tasks.len().div_ceil(worker_count);
std::thread::scope(|scope| {
let workers: Vec<_> = restore_tasks
.chunks(chunk_size)
.map(|chunk| {
scope.spawn(move || -> Result<Vec<String>> {
let mut restored = Vec::with_capacity(chunk.len());
for task in chunk {
restore_file(workspace_root, task, pack_set)?;
let completed = progress_counter.fetch_add(1, Ordering::Relaxed) + 1;
emit(
progress,
ProgressEvent::RestoreFile {
completed,
total: restore_total,
},
);
restored.push(task.path.clone());
}
Ok(restored)
})
})
.collect();
let mut restored_paths = Vec::with_capacity(restore_tasks.len());
for worker in workers {
let chunk = worker
.join()
.map_err(|_| ChkpttError::Other("restore worker thread panicked".into()))??;
restored_paths.extend(chunk);
}
Ok(restored_paths)
})
}
fn restore_file(workspace_root: &Path, task: &RestoreTask, pack_set: &PackSet) -> Result<()> {
let file_path = workspace_root.join(&task.path);
if let Ok(metadata) = std::fs::symlink_metadata(&file_path) {
if metadata.file_type().is_symlink() || task.is_symlink {
std::fs::remove_file(&file_path)?;
}
}
match task.source {
RestoreSource::Packed(location) => {
if task.is_symlink {
let mut content = Vec::new();
pack_set.copy_to_writer(&location, &mut content)?;
restore_symlink(&file_path, &content)?;
} else {
let file = std::fs::File::create(&file_path)?;
let mut writer = BufWriter::with_capacity(256 * 1024, file);
pack_set.copy_to_writer(&location, &mut writer)?;
writer.flush()?;
}
}
}
Ok(())
}
#[cfg(unix)]
fn restore_symlink(path: &Path, target_bytes: &[u8]) -> Result<()> {
use std::os::unix::ffi::OsStrExt;
let target = std::ffi::OsStr::from_bytes(target_bytes);
std::os::unix::fs::symlink(target, path)?;
Ok(())
}
#[cfg(not(unix))]
fn restore_symlink(_path: &Path, _target_bytes: &[u8]) -> Result<()> {
Err(ChkpttError::RestoreFailed(
"symlink restore is only supported on unix platforms".into(),
))
}
fn resolve_restore_sources(
files_to_add: &[String],
files_to_change: &[String],
target_state: &BTreeMap<String, TargetFileState>,
catalog: &MetadataCatalog,
packs_dir: &Path,
) -> Result<(PackSet, HashMap<[u8; 16], RestoreSource>)> {
let candidate_count = files_to_add.len() + files_to_change.len();
let mut seen_hashes = HashSet::with_capacity(candidate_count);
let mut packed_hashes = Vec::with_capacity(candidate_count);
for path in files_to_add.iter().chain(files_to_change.iter()) {
let target = target_state
.get(path)
.expect("target hash missing for restore source");
if !seen_hashes.insert(target.hash) {
continue;
}
packed_hashes.push(target.hash);
}
if packed_hashes.is_empty() {
return Ok((PackSet::empty(), HashMap::new()));
}
let blob_locations = catalog.blob_locations_for_hashes(&packed_hashes)?;
let mut selected_pack_hashes = HashSet::with_capacity(packed_hashes.len());
let mut hash_to_pack: Vec<([u8; 16], String)> = Vec::with_capacity(packed_hashes.len());
for hash in &packed_hashes {
let location = blob_locations
.get(hash)
.ok_or_else(|| ChkpttError::ObjectNotFound(bytes_to_hex(hash)))?;
let pack_hash = location.pack_hash.as_ref().ok_or_else(|| {
ChkpttError::StoreCorrupted(format!(
"blob {} is not stored in a pack",
bytes_to_hex(hash)
))
})?;
selected_pack_hashes.insert(pack_hash.clone());
hash_to_pack.push((*hash, pack_hash.clone()));
}
let mut pack_hashes_vec: Vec<_> = selected_pack_hashes.into_iter().collect();
pack_hashes_vec.sort_unstable();
let pack_set = PackSet::open_selected(packs_dir, &pack_hashes_vec)?;
let mut sources = HashMap::with_capacity(hash_to_pack.len());
for (hash, pack_hash) in hash_to_pack {
let location = pack_set
.locate_in_pack_bytes(&pack_hash, &hash)
.ok_or_else(|| ChkpttError::ObjectNotFound(bytes_to_hex(&hash)))?;
sources.insert(hash, RestoreSource::Packed(location));
}
Ok((pack_set, sources))
}
fn build_restore_tasks(
files_to_add: &[String],
files_to_change: &[String],
target_state: &BTreeMap<String, TargetFileState>,
restore_sources: &HashMap<[u8; 16], RestoreSource>,
) -> Result<Vec<RestoreTask>> {
let mut tasks = Vec::with_capacity(files_to_add.len() + files_to_change.len());
for path in files_to_add.iter().chain(files_to_change.iter()) {
let target = target_state
.get(path)
.expect("target hash missing for restore task");
let source = *restore_sources
.get(&target.hash)
.ok_or_else(|| ChkpttError::ObjectNotFound(bytes_to_hex(&target.hash)))?;
tasks.push(RestoreTask {
path: path.clone(),
is_symlink: target.is_symlink,
source,
});
}
tasks.sort_unstable_by(|left, right| match (&left.source, &right.source) {
(RestoreSource::Packed(left_location), RestoreSource::Packed(right_location)) => (
left_location.reader_index,
left_location.offset,
left.path.as_str(),
)
.cmp(&(
right_location.reader_index,
right_location.offset,
right.path.as_str(),
)),
});
Ok(tasks)
}
fn diff_restore_states(
target_state: &BTreeMap<String, TargetFileState>,
current_state: &BTreeMap<String, CurrentFileState>,
) -> RestoreDiff {
let mut files_to_add = Vec::with_capacity(target_state.len());
let mut files_to_change = Vec::with_capacity(target_state.len().min(current_state.len()));
let mut files_to_remove = Vec::with_capacity(current_state.len());
let mut files_unchanged = 0;
let mut target_iter = target_state.iter().peekable();
let mut current_iter = current_state.iter().peekable();
loop {
match (target_iter.peek(), current_iter.peek()) {
(Some((target_path, target_file)), Some((current_path, current_file))) => {
match target_path.cmp(current_path) {
std::cmp::Ordering::Less => {
files_to_add.push((*target_path).clone());
target_iter.next();
}
std::cmp::Ordering::Greater => {
files_to_remove.push((*current_path).clone());
current_iter.next();
}
std::cmp::Ordering::Equal => {
if target_file.hash != current_file.hash
|| target_file.is_symlink != current_file.is_symlink
{
files_to_change.push((*target_path).clone());
} else {
files_unchanged += 1;
}
target_iter.next();
current_iter.next();
}
}
}
(Some((target_path, _)), None) => {
files_to_add.push((*target_path).clone());
target_iter.next();
}
(None, Some((current_path, _))) => {
files_to_remove.push((*current_path).clone());
current_iter.next();
}
(None, None) => break,
}
}
RestoreDiff {
files_to_add,
files_to_change,
files_to_remove,
files_unchanged,
}
}
pub fn restore(
workspace_root: &Path,
snapshot_id: &str,
options: RestoreOptions,
) -> Result<RestoreResult> {
let project_id = project_id_from_path(workspace_root);
let layout = StoreLayout::new(&project_id);
layout.ensure_dirs()?;
let _lock = ProjectLock::acquire(&layout.locks_dir())?;
let catalog = MetadataCatalog::open(layout.catalog_path())?;
let resolved_snapshot = catalog.resolve_snapshot_ref(snapshot_id)?;
let resolved_id = resolved_snapshot.id.clone();
let manifest = catalog.snapshot_manifest(&resolved_id)?;
let target_state = if resolved_snapshot.stats.total_files == 0 {
BTreeMap::new()
} else if manifest.is_empty() {
let tree_store = TreeStore::new(layout.trees_dir());
let root_tree_hash = resolved_snapshot.root_tree_hash.ok_or_else(|| {
ChkpttError::StoreCorrupted(format!(
"snapshot '{}' is missing both manifest entries and root_tree_hash",
resolved_id
))
})?;
let root_tree_hash_hex = bytes_to_hex(&root_tree_hash);
let mut state = BTreeMap::new();
collect_tree_files(&tree_store, &root_tree_hash_hex, "", &mut state)?;
state
} else {
target_state_from_manifest(&manifest)
};
let target_includes_deps = target_state
.keys()
.any(|path| path_contains_dependency_dir(path));
let mut index = FileIndex::open(layout.index_path())?;
let cached_entries = index.entries();
let current_state = scan_current_state(workspace_root, &cached_entries, target_includes_deps)?;
emit(
&options.progress,
ProgressEvent::ScanCurrentComplete {
file_count: current_state.len() as u64,
},
);
let diff = diff_restore_states(&target_state, ¤t_state);
let files_to_add = diff.files_to_add;
let files_to_change = diff.files_to_change;
let files_to_remove = diff.files_to_remove;
let files_unchanged = diff.files_unchanged;
let result = RestoreResult {
snapshot_id: resolved_id.clone(),
files_added: files_to_add.len() as u64,
files_changed: files_to_change.len() as u64,
files_removed: files_to_remove.len() as u64,
files_unchanged,
};
if options.dry_run {
return Ok(result);
}
let packs_dir = layout.packs_dir();
let (pack_set, restore_sources) = resolve_restore_sources(
&files_to_add,
&files_to_change,
&target_state,
&catalog,
&packs_dir,
)?;
let restore_total = (files_to_add.len() + files_to_change.len() + files_to_remove.len()) as u64;
emit(
&options.progress,
ProgressEvent::RestoreStart {
add: files_to_add.len() as u64,
change: files_to_change.len() as u64,
remove: files_to_remove.len() as u64,
},
);
let restore_tasks = build_restore_tasks(
&files_to_add,
&files_to_change,
&target_state,
&restore_sources,
)?;
let restore_progress = AtomicU64::new(0);
let restored_paths = restore_files(
workspace_root,
&restore_tasks,
&pack_set,
&options.progress,
&restore_progress,
restore_total,
)?;
{
let remove_worker_count = std::thread::available_parallelism()
.map(|n| n.get())
.unwrap_or(1)
.min(files_to_remove.len().max(1));
if remove_worker_count <= 1 {
for path in &files_to_remove {
let file_path = workspace_root.join(path);
match std::fs::remove_file(&file_path) {
Ok(()) => {}
Err(error) if error.kind() == std::io::ErrorKind::NotFound => {}
Err(error) => return Err(error.into()),
}
let completed = restore_progress.fetch_add(1, Ordering::Relaxed) + 1;
emit(
&options.progress,
ProgressEvent::RestoreFile {
completed,
total: restore_total,
},
);
}
} else {
let chunk_size = files_to_remove.len().div_ceil(remove_worker_count);
let progress_ref = &restore_progress;
let progress_cb_ref = &options.progress;
std::thread::scope(|scope| -> Result<()> {
let workers: Vec<_> = files_to_remove
.chunks(chunk_size)
.map(|chunk| {
scope.spawn(move || -> Result<()> {
for path in chunk {
let file_path = workspace_root.join(path);
match std::fs::remove_file(&file_path) {
Ok(()) => {}
Err(error) if error.kind() == std::io::ErrorKind::NotFound => {}
Err(error) => return Err(error.into()),
}
let completed = progress_ref.fetch_add(1, Ordering::Relaxed) + 1;
emit(
progress_cb_ref,
ProgressEvent::RestoreFile {
completed,
total: restore_total,
},
);
}
Ok(())
})
})
.collect();
for worker in workers {
worker.join().map_err(|_| {
ChkpttError::Other("file removal worker thread panicked".into())
})??;
}
Ok(())
})?;
}
}
cleanup_removed_file_parents(workspace_root, &files_to_remove)?;
let file_entries = restored_index_entries(workspace_root, &restored_paths, &target_state)?;
index.apply_changes(&files_to_remove, &file_entries)?;
Ok(result)
}
fn path_contains_dependency_dir(relative_path: &str) -> bool {
relative_path.split('/').any(|component| {
matches!(
component,
"node_modules"
| ".venv"
| "venv"
| "__pypackages__"
| ".tox"
| ".nox"
| ".gradle"
| ".m2"
)
})
}
fn mode_is_symlink(mode: u32) -> bool {
(mode & 0o170000) == 0o120000
}
fn restored_index_entries(
workspace_root: &Path,
restored_paths: &[String],
target_state: &BTreeMap<String, TargetFileState>,
) -> Result<Vec<crate::index::FileEntry>> {
let mut file_entries = Vec::with_capacity(restored_paths.len());
for path in restored_paths {
let absolute_path = workspace_root.join(path);
let metadata = std::fs::symlink_metadata(&absolute_path)?;
let target = target_state.get(path).ok_or_else(|| {
ChkpttError::RestoreFailed(format!("Missing target hash for {}", path))
})?;
let scanned = scanned_file_from_metadata(path.clone(), absolute_path, &metadata);
file_entries.push(crate::index::FileEntry {
path: scanned.relative_path,
blob_hash: target.hash,
size: scanned.size,
mtime_secs: scanned.mtime_secs,
mtime_nanos: scanned.mtime_nanos,
inode: scanned.inode,
mode: scanned.mode,
});
}
Ok(file_entries)
}
fn cached_hash_bytes(
file: &ScannedFile,
cached_entries: &HashMap<String, crate::index::FileEntry>,
) -> Option<[u8; 16]> {
let cached = cached_entries.get(&file.relative_path)?;
if cached.mtime_secs == file.mtime_secs
&& cached.mtime_nanos == file.mtime_nanos
&& cached.size == file.size
&& cached.inode == file.inode
&& cached.mode == file.mode
{
Some(cached.blob_hash)
} else {
None
}
}
fn hash_scanned_files(scanned_files: Vec<ScannedFile>) -> Result<Vec<(ScannedFile, [u8; 16])>> {
if scanned_files.is_empty() {
return Ok(Vec::new());
}
let mut scanned_files = scanned_files;
sort_scanned_for_locality(&mut scanned_files);
let worker_count = std::thread::available_parallelism()
.map(|count| count.get())
.unwrap_or(1)
.min(scanned_files.len());
if worker_count <= 1 {
return scanned_files
.into_iter()
.map(|file| {
Ok((
file.clone(),
hash_path_bytes(&file.absolute_path, file.is_symlink)?,
))
})
.collect();
}
let chunk_size = scanned_files.len().div_ceil(worker_count);
std::thread::scope(|scope| {
let mut workers = Vec::with_capacity(scanned_files.len().div_ceil(chunk_size));
for chunk in scanned_files.chunks(chunk_size) {
workers.push(
scope.spawn(move || -> Result<Vec<(ScannedFile, [u8; 16])>> {
chunk
.iter()
.map(|file| {
Ok((
file.clone(),
hash_path_bytes(&file.absolute_path, file.is_symlink)?,
))
})
.collect()
}),
);
}
let mut hashed = Vec::with_capacity(scanned_files.len());
for worker in workers {
let chunk = worker
.join()
.map_err(|_| ChkpttError::Other("restore worker thread panicked".into()))??;
hashed.extend(chunk);
}
Ok(hashed)
})
}
#[cfg(unix)]
fn scanned_file_from_metadata(
relative_path: String,
absolute_path: std::path::PathBuf,
metadata: &std::fs::Metadata,
) -> ScannedFile {
use std::os::unix::fs::MetadataExt;
ScannedFile {
relative_path,
absolute_path,
size: metadata.len(),
mtime_secs: metadata.mtime(),
mtime_nanos: metadata.mtime_nsec(),
device: Some(metadata.dev()),
inode: Some(metadata.ino()),
mode: metadata.mode(),
is_symlink: metadata.file_type().is_symlink(),
}
}
#[cfg(not(unix))]
fn scanned_file_from_metadata(
relative_path: String,
absolute_path: std::path::PathBuf,
metadata: &std::fs::Metadata,
) -> ScannedFile {
use std::time::UNIX_EPOCH;
let (mtime_secs, mtime_nanos) = metadata
.modified()
.ok()
.and_then(|time| time.duration_since(UNIX_EPOCH).ok())
.map(|duration| (duration.as_secs() as i64, duration.subsec_nanos() as i64))
.unwrap_or((0, 0));
let is_symlink = metadata.file_type().is_symlink();
ScannedFile {
relative_path,
absolute_path,
size: metadata.len(),
mtime_secs,
mtime_nanos,
device: None,
inode: None,
mode: if is_symlink { 0o120000 } else { 0o644 },
is_symlink,
}
}
fn cleanup_removed_file_parents(root: &Path, removed_paths: &[String]) -> Result<()> {
if removed_paths.is_empty() {
return Ok(());
}
let mut dir_depths: HashMap<String, usize> = HashMap::new();
for removed_path in removed_paths {
let mut path_str = removed_path.as_str();
while let Some(pos) = path_str.rfind('/') {
path_str = &path_str[..pos];
let depth = path_str.matches('/').count() + 1;
dir_depths.entry(path_str.to_string()).or_insert(depth);
}
}
let mut candidates: Vec<(String, usize)> = dir_depths.into_iter().collect();
candidates.sort_unstable_by(|a, b| b.1.cmp(&a.1).then_with(|| a.0.cmp(&b.0)));
for (relative_dir, _depth) in candidates {
let dir = root.join(&relative_dir);
match std::fs::remove_dir(&dir) {
Ok(()) => {}
Err(error) if error.kind() == std::io::ErrorKind::NotFound => {}
Err(error) if error.kind() == std::io::ErrorKind::DirectoryNotEmpty => {}
Err(error) => return Err(error.into()),
}
}
Ok(())
}
#[cfg(test)]
mod tests {
use super::*;
use tempfile::TempDir;
#[test]
fn test_cleanup_removed_file_parents_removes_only_empty_ancestor_chain() {
let dir = TempDir::new().unwrap();
let root = dir.path();
let empty_leaf = root.join("a/b/c");
std::fs::create_dir_all(&empty_leaf).unwrap();
std::fs::write(empty_leaf.join("gone.txt"), b"gone").unwrap();
std::fs::remove_file(empty_leaf.join("gone.txt")).unwrap();
let non_empty_leaf = root.join("a/keep");
std::fs::create_dir_all(&non_empty_leaf).unwrap();
std::fs::write(non_empty_leaf.join("keep.txt"), b"keep").unwrap();
cleanup_removed_file_parents(root, &[String::from("a/b/c/gone.txt")]).unwrap();
assert!(!root.join("a/b/c").exists());
assert!(!root.join("a/b").exists());
assert!(root.join("a").exists());
assert!(root.join("a/keep/keep.txt").exists());
}
#[test]
fn test_cleanup_removed_file_parents_skips_non_empty_directories() {
let dir = TempDir::new().unwrap();
let root = dir.path();
let shared = root.join("shared");
std::fs::create_dir_all(&shared).unwrap();
std::fs::write(shared.join("still-here.txt"), b"keep").unwrap();
cleanup_removed_file_parents(root, &[String::from("shared/gone.txt")]).unwrap();
assert!(root.join("shared").exists());
assert!(root.join("shared/still-here.txt").exists());
}
#[test]
fn test_diff_restore_states_classifies_paths() {
let target_state = BTreeMap::from([
(
"a.txt".to_string(),
TargetFileState {
hash: hash_bytes("hash-a"),
is_symlink: false,
},
),
(
"b.txt".to_string(),
TargetFileState {
hash: hash_bytes("hash-b-target"),
is_symlink: true,
},
),
(
"c.txt".to_string(),
TargetFileState {
hash: hash_bytes("hash-c"),
is_symlink: false,
},
),
]);
let current_state = BTreeMap::from([
(
"b.txt".to_string(),
CurrentFileState {
hash: hash_bytes("hash-b-current"),
is_symlink: false,
},
),
(
"c.txt".to_string(),
CurrentFileState {
hash: hash_bytes("hash-c"),
is_symlink: false,
},
),
(
"d.txt".to_string(),
CurrentFileState {
hash: hash_bytes("hash-d"),
is_symlink: false,
},
),
]);
let diff = diff_restore_states(&target_state, ¤t_state);
assert_eq!(diff.files_to_add, vec!["a.txt".to_string()]);
assert_eq!(diff.files_to_change, vec!["b.txt".to_string()]);
assert_eq!(diff.files_to_remove, vec!["d.txt".to_string()]);
assert_eq!(diff.files_unchanged, 1);
}
#[test]
fn test_diff_restore_states_handles_empty_inputs() {
let target_state: BTreeMap<String, TargetFileState> = BTreeMap::new();
let current_state: BTreeMap<String, CurrentFileState> = BTreeMap::new();
let diff = diff_restore_states(&target_state, ¤t_state);
assert!(diff.files_to_add.is_empty());
assert!(diff.files_to_change.is_empty());
assert!(diff.files_to_remove.is_empty());
assert_eq!(diff.files_unchanged, 0);
}
#[test]
fn test_diff_restore_states_detects_type_changes() {
let target_state = BTreeMap::from([(
"link".to_string(),
TargetFileState {
hash: hash_bytes("same-hash"),
is_symlink: true,
},
)]);
let current_state = BTreeMap::from([(
"link".to_string(),
CurrentFileState {
hash: hash_bytes("same-hash"),
is_symlink: false,
},
)]);
let diff = diff_restore_states(&target_state, ¤t_state);
assert_eq!(diff.files_to_change, vec!["link".to_string()]);
}
fn hash_bytes(label: &str) -> [u8; 16] {
xxhash_rust::xxh3::xxh3_128(label.as_bytes()).to_le_bytes()
}
}