Enum git_worktree::fs::cache::State
source · pub enum State {
CreateDirectoryAndAttributesStack {
unlink_on_collision: bool,
test_mkdir_calls: usize,
attributes: Attributes,
},
AttributesAndIgnoreStack {
attributes: Attributes,
ignore: Ignore,
},
IgnoreStack(Ignore),
}
Variants§
CreateDirectoryAndAttributesStack
Fields
§
unlink_on_collision: bool
If there is a symlink or a file in our path, try to unlink it before creating the directory.
§
attributes: Attributes
State to handle attribute information
Useful for checkout where directories need creation, but we need to access attributes as well.
AttributesAndIgnoreStack
Fields
§
attributes: Attributes
State to handle attribute information
Used when adding files, requiring access to both attributes and ignore information, for example during add operations.
IgnoreStack(Ignore)
Used when providing worktree status information.
Implementations§
source§impl State
impl State
sourcepub fn for_checkout(unlink_on_collision: bool, attributes: Attributes) -> Self
pub fn for_checkout(unlink_on_collision: bool, attributes: Attributes) -> Self
Configure a state to be suitable for checking out files.
Examples found in repository?
src/index/mod.rs (line 62)
34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121
fn checkout_inner<Find, E>(
index: &mut git_index::State,
paths: &git_index::PathStorage,
dir: impl Into<std::path::PathBuf>,
find: Find,
files: &mut impl Progress,
bytes: &mut impl Progress,
should_interrupt: &AtomicBool,
options: checkout::Options,
) -> Result<checkout::Outcome, checkout::Error<E>>
where
Find: for<'a> FnMut(&oid, &'a mut Vec<u8>) -> Result<git_object::BlobRef<'a>, E> + Send + Clone,
E: std::error::Error + Send + Sync + 'static,
{
let num_files = AtomicUsize::default();
let dir = dir.into();
let case = if options.fs.ignore_case {
git_glob::pattern::Case::Fold
} else {
git_glob::pattern::Case::Sensitive
};
let (chunk_size, thread_limit, num_threads) = git_features::parallel::optimize_chunk_size_and_thread_limit(
100,
index.entries().len().into(),
options.thread_limit,
None,
);
let state = fs::cache::State::for_checkout(options.overwrite_existing, options.attribute_globals.clone().into());
let attribute_files = state.build_attribute_list(index, paths, case);
let mut ctx = chunk::Context {
buf: Vec::new(),
path_cache: fs::Cache::new(dir, state, case, Vec::with_capacity(512), attribute_files),
find,
options,
num_files: &num_files,
};
let chunk::Outcome {
mut collisions,
mut errors,
mut bytes_written,
delayed,
} = if num_threads == 1 {
let entries_with_paths = interrupt::Iter::new(index.entries_mut_with_paths_in(paths), should_interrupt);
chunk::process(entries_with_paths, files, bytes, &mut ctx)?
} else {
let entries_with_paths = interrupt::Iter::new(index.entries_mut_with_paths_in(paths), should_interrupt);
in_parallel(
git_features::iter::Chunks {
inner: entries_with_paths,
size: chunk_size,
},
thread_limit,
{
let ctx = ctx.clone();
move |_| (progress::Discard, progress::Discard, ctx.clone())
},
|chunk, (files, bytes, ctx)| chunk::process(chunk.into_iter(), files, bytes, ctx),
chunk::Reduce {
files,
bytes,
num_files: &num_files,
aggregate: Default::default(),
marker: Default::default(),
},
)?
};
for (entry, entry_path) in delayed {
bytes_written += chunk::checkout_entry_handle_result(
entry,
entry_path,
&mut errors,
&mut collisions,
files,
bytes,
&mut ctx,
)? as u64;
}
Ok(checkout::Outcome {
files_updated: num_files.load(Ordering::Relaxed),
collisions,
errors,
bytes_written,
})
}
sourcepub fn for_add(attributes: Attributes, ignore: Ignore) -> Self
pub fn for_add(attributes: Attributes, ignore: Ignore) -> Self
Configure a state for adding files.
sourcepub fn for_status(ignore: Ignore) -> Self
pub fn for_status(ignore: Ignore) -> Self
Configure a state for status retrieval.
source§impl State
impl State
sourcepub fn build_attribute_list<'paths>(
&self,
index: &State,
paths: &'paths PathStorageRef,
case: Case
) -> Vec<(&'paths BStr, ObjectId)> ⓘ
pub fn build_attribute_list<'paths>(
&self,
index: &State,
paths: &'paths PathStorageRef,
case: Case
) -> Vec<(&'paths BStr, ObjectId)> ⓘ
Returns a vec of tuples of relative index paths along with the best usable OID for either ignore, attribute files or both.
- ignores entries which aren’t blobs
- ignores ignore entries which are not skip-worktree
- within merges, picks ‘our’ stage both for ignore and attribute files.
Examples found in repository?
src/index/mod.rs (line 63)
34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121
fn checkout_inner<Find, E>(
index: &mut git_index::State,
paths: &git_index::PathStorage,
dir: impl Into<std::path::PathBuf>,
find: Find,
files: &mut impl Progress,
bytes: &mut impl Progress,
should_interrupt: &AtomicBool,
options: checkout::Options,
) -> Result<checkout::Outcome, checkout::Error<E>>
where
Find: for<'a> FnMut(&oid, &'a mut Vec<u8>) -> Result<git_object::BlobRef<'a>, E> + Send + Clone,
E: std::error::Error + Send + Sync + 'static,
{
let num_files = AtomicUsize::default();
let dir = dir.into();
let case = if options.fs.ignore_case {
git_glob::pattern::Case::Fold
} else {
git_glob::pattern::Case::Sensitive
};
let (chunk_size, thread_limit, num_threads) = git_features::parallel::optimize_chunk_size_and_thread_limit(
100,
index.entries().len().into(),
options.thread_limit,
None,
);
let state = fs::cache::State::for_checkout(options.overwrite_existing, options.attribute_globals.clone().into());
let attribute_files = state.build_attribute_list(index, paths, case);
let mut ctx = chunk::Context {
buf: Vec::new(),
path_cache: fs::Cache::new(dir, state, case, Vec::with_capacity(512), attribute_files),
find,
options,
num_files: &num_files,
};
let chunk::Outcome {
mut collisions,
mut errors,
mut bytes_written,
delayed,
} = if num_threads == 1 {
let entries_with_paths = interrupt::Iter::new(index.entries_mut_with_paths_in(paths), should_interrupt);
chunk::process(entries_with_paths, files, bytes, &mut ctx)?
} else {
let entries_with_paths = interrupt::Iter::new(index.entries_mut_with_paths_in(paths), should_interrupt);
in_parallel(
git_features::iter::Chunks {
inner: entries_with_paths,
size: chunk_size,
},
thread_limit,
{
let ctx = ctx.clone();
move |_| (progress::Discard, progress::Discard, ctx.clone())
},
|chunk, (files, bytes, ctx)| chunk::process(chunk.into_iter(), files, bytes, ctx),
chunk::Reduce {
files,
bytes,
num_files: &num_files,
aggregate: Default::default(),
marker: Default::default(),
},
)?
};
for (entry, entry_path) in delayed {
bytes_written += chunk::checkout_entry_handle_result(
entry,
entry_path,
&mut errors,
&mut collisions,
files,
bytes,
&mut ctx,
)? as u64;
}
Ok(checkout::Outcome {
files_updated: num_files.load(Ordering::Relaxed),
collisions,
errors,
bytes_written,
})
}