git_index/access/mod.rs
1use std::cmp::Ordering;
2
3use bstr::{BStr, ByteSlice, ByteVec};
4
5use crate::{entry, extension, Entry, PathStorage, State, Version};
6
7// TODO: integrate this somehow, somewhere, depending on later usage.
8#[allow(dead_code)]
9mod sparse;
10
11/// General information and entries
12impl State {
13 /// Return the version used to store this state's information on disk.
14 pub fn version(&self) -> Version {
15 self.version
16 }
17
18 /// Return the kind of hashes used in this instance.
19 pub fn object_hash(&self) -> git_hash::Kind {
20 self.object_hash
21 }
22
23 /// Return our entries
24 pub fn entries(&self) -> &[Entry] {
25 &self.entries
26 }
27 /// Return our path backing, the place which keeps all paths one after another, with entries storing only the range to access them.
28 pub fn path_backing(&self) -> &PathStorage {
29 &self.path_backing
30 }
31
32 /// Runs `filter_map` on all entries, returning an iterator over all paths along with the result of `filter_map`.
33 pub fn entries_with_paths_by_filter_map<'a, T>(
34 &'a self,
35 mut filter_map: impl FnMut(&'a BStr, &Entry) -> Option<T> + 'a,
36 ) -> impl Iterator<Item = (&'a BStr, T)> + 'a {
37 self.entries.iter().filter_map(move |e| {
38 let p = e.path(self);
39 filter_map(p, e).map(|t| (p, t))
40 })
41 }
42 /// Return mutable entries along with their path, as obtained from `backing`.
43 pub fn entries_mut_with_paths_in<'state, 'backing>(
44 &'state mut self,
45 backing: &'backing PathStorage,
46 ) -> impl Iterator<Item = (&'state mut Entry, &'backing BStr)> {
47 self.entries.iter_mut().map(move |e| {
48 let path = backing[e.path.clone()].as_bstr();
49 (e, path)
50 })
51 }
52
53 /// Find the entry index in [`entries()`][State::entries()] matching the given repository-relative
54 /// `path` and `stage`, or `None`.
55 ///
56 /// Use the index for accessing multiple stages if they exists, but at least the single matching entry.
57 pub fn entry_index_by_path_and_stage(&self, path: &BStr, stage: entry::Stage) -> Option<usize> {
58 self.entries
59 .binary_search_by(|e| e.path(self).cmp(path).then_with(|| e.stage().cmp(&stage)))
60 .ok()
61 }
62
63 /// Find the entry index in [`entries()[..upper_bound]`][State::entries()] matching the given repository-relative
64 /// `path` and `stage`, or `None`.
65 ///
66 /// Use the index for accessing multiple stages if they exists, but at least the single matching entry.
67 ///
68 /// # Panics
69 ///
70 /// If `upper_bound` is out of bounds of our entries array.
71 pub fn entry_index_by_path_and_stage_bounded(
72 &self,
73 path: &BStr,
74 stage: entry::Stage,
75 upper_bound: usize,
76 ) -> Option<usize> {
77 self.entries[..upper_bound]
78 .binary_search_by(|e| e.path(self).cmp(path).then_with(|| e.stage().cmp(&stage)))
79 .ok()
80 }
81
82 /// Like [`entry_index_by_path_and_stage()`][State::entry_index_by_path_and_stage()],
83 /// but returns the entry instead of the index.
84 pub fn entry_by_path_and_stage(&self, path: &BStr, stage: entry::Stage) -> Option<&Entry> {
85 self.entry_index_by_path_and_stage(path, stage)
86 .map(|idx| &self.entries[idx])
87 }
88
89 /// Return the entry at `idx` or _panic_ if the index is out of bounds.
90 ///
91 /// The `idx` is typically returned by [entry_by_path_and_stage()][State::entry_by_path_and_stage()].
92 pub fn entry(&self, idx: usize) -> &Entry {
93 &self.entries[idx]
94 }
95
96 /// Returns a boolean value indicating whether the index is sparse or not.
97 ///
98 /// An index is sparse if it contains at least one [Mode::DIR][entry::Mode::DIR] entry.
99 pub fn is_sparse(&self) -> bool {
100 self.is_sparse
101 }
102}
103
104/// Mutation
105impl State {
106 /// After usage of the storage obtained by [`take_path_backing()`][Self::take_path_backing()], return it here.
107 /// Note that it must not be empty.
108 pub fn return_path_backing(&mut self, backing: PathStorage) {
109 debug_assert!(
110 self.path_backing.is_empty(),
111 "BUG: return path backing only after taking it, once"
112 );
113 self.path_backing = backing;
114 }
115
116 /// Return mutable entries in a slice.
117 pub fn entries_mut(&mut self) -> &mut [Entry] {
118 &mut self.entries
119 }
120 /// Return mutable entries along with their paths in an iterator.
121 pub fn entries_mut_with_paths(&mut self) -> impl Iterator<Item = (&mut Entry, &BStr)> {
122 let paths = &self.path_backing;
123 self.entries.iter_mut().map(move |e| {
124 let path = paths[e.path.clone()].as_bstr();
125 (e, path)
126 })
127 }
128
129 /// Sometimes it's needed to remove the path backing to allow certain mutation to happen in the state while supporting reading the entry's
130 /// path.
131 pub fn take_path_backing(&mut self) -> PathStorage {
132 assert_eq!(
133 self.entries.is_empty(),
134 self.path_backing.is_empty(),
135 "BUG: cannot take out backing multiple times"
136 );
137 std::mem::take(&mut self.path_backing)
138 }
139
140 /// Like [`entry_index_by_path_and_stage()`][State::entry_index_by_path_and_stage()],
141 /// but returns the mutable entry instead of the index.
142 pub fn entry_mut_by_path_and_stage(&mut self, path: &BStr, stage: entry::Stage) -> Option<&mut Entry> {
143 self.entry_index_by_path_and_stage(path, stage)
144 .map(|idx| &mut self.entries[idx])
145 }
146
147 /// Push a new entry containing `stat`, `id`, `flags` and `mode` and `path` to the end of our storage, without performing
148 /// any sanity checks. This means it's possible to push a new entry to the same path on the same stage and even after sorting
149 /// the entries lookups may still return the wrong one of them unless the correct binary search criteria is chosen.
150 ///
151 /// Note that this *is likely* to break invariants that will prevent further lookups by path unless
152 /// [`entry_index_by_path_and_stage_bounded()`][State::entry_index_by_path_and_stage_bounded()] is used with
153 /// the `upper_bound` being the amount of entries before the first call to this method.
154 ///
155 /// Alternatively, make sure to call [sort_entries()][State::sort_entries()] before entry lookup by path to restore
156 /// the invariant.
157 pub fn dangerously_push_entry(
158 &mut self,
159 stat: entry::Stat,
160 id: git_hash::ObjectId,
161 flags: entry::Flags,
162 mode: entry::Mode,
163 path: &BStr,
164 ) {
165 let path = {
166 let path_start = self.path_backing.len();
167 self.path_backing.push_str(path);
168 path_start..self.path_backing.len()
169 };
170
171 self.entries.push(Entry {
172 stat,
173 id,
174 flags,
175 mode,
176 path,
177 });
178 }
179
180 /// Unconditionally sort entries as needed to perform lookups quickly.
181 pub fn sort_entries(&mut self) {
182 let path_backing = &self.path_backing;
183 self.entries.sort_by(|a, b| {
184 Entry::cmp_filepaths(a.path_in(path_backing), b.path_in(path_backing))
185 .then_with(|| a.stage().cmp(&b.stage()))
186 });
187 }
188
189 /// Similar to [`sort_entries()][State::sort_entries()], but applies `compare` after comparing
190 /// by path and stage as a third criteria.
191 pub fn sort_entries_by(&mut self, mut compare: impl FnMut(&Entry, &Entry) -> Ordering) {
192 let path_backing = &self.path_backing;
193 self.entries.sort_by(|a, b| {
194 Entry::cmp_filepaths(a.path_in(path_backing), b.path_in(path_backing))
195 .then_with(|| a.stage().cmp(&b.stage()))
196 .then_with(|| compare(a, b))
197 });
198 }
199}
200
201/// Extensions
202impl State {
203 /// Access the `tree` extension.
204 pub fn tree(&self) -> Option<&extension::Tree> {
205 self.tree.as_ref()
206 }
207 /// Access the `link` extension.
208 pub fn link(&self) -> Option<&extension::Link> {
209 self.link.as_ref()
210 }
211 /// Obtain the resolve-undo extension.
212 pub fn resolve_undo(&self) -> Option<&extension::resolve_undo::Paths> {
213 self.resolve_undo.as_ref()
214 }
215 /// Obtain the untracked extension.
216 pub fn untracked(&self) -> Option<&extension::UntrackedCache> {
217 self.untracked.as_ref()
218 }
219 /// Obtain the fsmonitor extension.
220 pub fn fs_monitor(&self) -> Option<&extension::FsMonitor> {
221 self.fs_monitor.as_ref()
222 }
223}