1#![expect(missing_docs)]
16
17use std::fmt::Debug;
18use std::fmt::Formatter;
19use std::pin::Pin;
20use std::sync::Arc;
21use std::sync::Mutex;
22use std::time::SystemTime;
23
24use clru::CLruCache;
25use futures::stream::BoxStream;
26use pollster::FutureExt as _;
27use tokio::io::AsyncRead;
28
29use crate::backend;
30use crate::backend::Backend;
31use crate::backend::BackendResult;
32use crate::backend::ChangeId;
33use crate::backend::CommitId;
34use crate::backend::CopyRecord;
35use crate::backend::FileId;
36use crate::backend::MergedTreeId;
37use crate::backend::SigningFn;
38use crate::backend::SymlinkId;
39use crate::backend::TreeId;
40use crate::commit::Commit;
41use crate::index::Index;
42use crate::merged_tree::MergedTree;
43use crate::repo_path::RepoPath;
44use crate::repo_path::RepoPathBuf;
45use crate::signing::Signer;
46use crate::tree::Tree;
47use crate::tree_merge::MergeOptions;
48
49pub(crate) const COMMIT_CACHE_CAPACITY: usize = 100;
52const TREE_CACHE_CAPACITY: usize = 1000;
53
54pub struct Store {
57 backend: Box<dyn Backend>,
58 signer: Signer,
59 commit_cache: Mutex<CLruCache<CommitId, Arc<backend::Commit>>>,
60 tree_cache: Mutex<CLruCache<(RepoPathBuf, TreeId), Arc<backend::Tree>>>,
61 merge_options: MergeOptions,
62}
63
64impl Debug for Store {
65 fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), std::fmt::Error> {
66 f.debug_struct("Store")
67 .field("backend", &self.backend)
68 .finish_non_exhaustive()
69 }
70}
71
72impl Store {
73 pub fn new(
74 backend: Box<dyn Backend>,
75 signer: Signer,
76 merge_options: MergeOptions,
77 ) -> Arc<Self> {
78 Arc::new(Self {
79 backend,
80 signer,
81 commit_cache: Mutex::new(CLruCache::new(COMMIT_CACHE_CAPACITY.try_into().unwrap())),
82 tree_cache: Mutex::new(CLruCache::new(TREE_CACHE_CAPACITY.try_into().unwrap())),
83 merge_options,
84 })
85 }
86
87 pub fn backend(&self) -> &dyn Backend {
88 self.backend.as_ref()
89 }
90
91 pub fn backend_impl<T: Backend>(&self) -> Option<&T> {
93 self.backend.downcast_ref()
94 }
95
96 pub fn signer(&self) -> &Signer {
97 &self.signer
98 }
99
100 pub fn merge_options(&self) -> &MergeOptions {
102 &self.merge_options
103 }
104
105 pub fn get_copy_records(
106 &self,
107 paths: Option<&[RepoPathBuf]>,
108 root: &CommitId,
109 head: &CommitId,
110 ) -> BackendResult<BoxStream<'_, BackendResult<CopyRecord>>> {
111 self.backend.get_copy_records(paths, root, head)
112 }
113
114 pub fn commit_id_length(&self) -> usize {
115 self.backend.commit_id_length()
116 }
117
118 pub fn change_id_length(&self) -> usize {
119 self.backend.change_id_length()
120 }
121
122 pub fn root_commit_id(&self) -> &CommitId {
123 self.backend.root_commit_id()
124 }
125
126 pub fn root_change_id(&self) -> &ChangeId {
127 self.backend.root_change_id()
128 }
129
130 pub fn empty_tree_id(&self) -> &TreeId {
131 self.backend.empty_tree_id()
132 }
133
134 pub fn concurrency(&self) -> usize {
135 self.backend.concurrency()
136 }
137
138 pub fn empty_merged_tree_id(&self) -> MergedTreeId {
139 MergedTreeId::resolved(self.backend.empty_tree_id().clone())
140 }
141
142 pub fn root_commit(self: &Arc<Self>) -> Commit {
143 self.get_commit(self.backend.root_commit_id()).unwrap()
144 }
145
146 pub fn get_commit(self: &Arc<Self>, id: &CommitId) -> BackendResult<Commit> {
147 self.get_commit_async(id).block_on()
148 }
149
150 pub async fn get_commit_async(self: &Arc<Self>, id: &CommitId) -> BackendResult<Commit> {
151 let data = self.get_backend_commit(id).await?;
152 Ok(Commit::new(self.clone(), id.clone(), data))
153 }
154
155 async fn get_backend_commit(&self, id: &CommitId) -> BackendResult<Arc<backend::Commit>> {
156 {
157 let mut locked_cache = self.commit_cache.lock().unwrap();
158 if let Some(data) = locked_cache.get(id).cloned() {
159 return Ok(data);
160 }
161 }
162 let commit = self.backend.read_commit(id).await?;
163 let data = Arc::new(commit);
164 let mut locked_cache = self.commit_cache.lock().unwrap();
165 locked_cache.put(id.clone(), data.clone());
166 Ok(data)
167 }
168
169 pub async fn write_commit(
170 self: &Arc<Self>,
171 commit: backend::Commit,
172 sign_with: Option<&mut SigningFn<'_>>,
173 ) -> BackendResult<Commit> {
174 assert!(!commit.parents.is_empty());
175
176 let (commit_id, commit) = self.backend.write_commit(commit, sign_with).await?;
177 let data = Arc::new(commit);
178 {
179 let mut locked_cache = self.commit_cache.lock().unwrap();
180 locked_cache.put(commit_id.clone(), data.clone());
181 }
182
183 Ok(Commit::new(self.clone(), commit_id, data))
184 }
185
186 pub fn get_tree(self: &Arc<Self>, dir: RepoPathBuf, id: &TreeId) -> BackendResult<Tree> {
187 self.get_tree_async(dir, id).block_on()
188 }
189
190 pub async fn get_tree_async(
191 self: &Arc<Self>,
192 dir: RepoPathBuf,
193 id: &TreeId,
194 ) -> BackendResult<Tree> {
195 let data = self.get_backend_tree(&dir, id).await?;
196 Ok(Tree::new(self.clone(), dir, id.clone(), data))
197 }
198
199 async fn get_backend_tree(
200 &self,
201 dir: &RepoPath,
202 id: &TreeId,
203 ) -> BackendResult<Arc<backend::Tree>> {
204 let key = (dir.to_owned(), id.clone());
205 {
206 let mut locked_cache = self.tree_cache.lock().unwrap();
207 if let Some(data) = locked_cache.get(&key).cloned() {
208 return Ok(data);
209 }
210 }
211 let data = self.backend.read_tree(dir, id).await?;
212 let data = Arc::new(data);
213 let mut locked_cache = self.tree_cache.lock().unwrap();
214 locked_cache.put(key, data.clone());
215 Ok(data)
216 }
217
218 pub fn get_root_tree(self: &Arc<Self>, id: &MergedTreeId) -> BackendResult<MergedTree> {
219 self.get_root_tree_async(id).block_on()
220 }
221
222 pub async fn get_root_tree_async(
223 self: &Arc<Self>,
224 id: &MergedTreeId,
225 ) -> BackendResult<MergedTree> {
226 match &id {
227 MergedTreeId::Legacy(id) => {
228 let tree = self.get_tree_async(RepoPathBuf::root(), id).await?;
229 Ok(MergedTree::resolved(tree))
230 }
231 MergedTreeId::Merge(ids) => {
232 let trees = ids
233 .try_map_async(|id| self.get_tree_async(RepoPathBuf::root(), id))
234 .await?;
235 Ok(MergedTree::new(trees))
236 }
237 }
238 }
239
240 pub async fn write_tree(
241 self: &Arc<Self>,
242 path: &RepoPath,
243 tree: backend::Tree,
244 ) -> BackendResult<Tree> {
245 let tree_id = self.backend.write_tree(path, &tree).await?;
246 let data = Arc::new(tree);
247 {
248 let mut locked_cache = self.tree_cache.lock().unwrap();
249 locked_cache.put((path.to_owned(), tree_id.clone()), data.clone());
250 }
251
252 Ok(Tree::new(self.clone(), path.to_owned(), tree_id, data))
253 }
254
255 pub async fn read_file(
256 &self,
257 path: &RepoPath,
258 id: &FileId,
259 ) -> BackendResult<Pin<Box<dyn AsyncRead + Send>>> {
260 self.backend.read_file(path, id).await
261 }
262
263 pub async fn write_file(
264 &self,
265 path: &RepoPath,
266 contents: &mut (dyn AsyncRead + Send + Unpin),
267 ) -> BackendResult<FileId> {
268 self.backend.write_file(path, contents).await
269 }
270
271 pub async fn read_symlink(&self, path: &RepoPath, id: &SymlinkId) -> BackendResult<String> {
272 self.backend.read_symlink(path, id).await
273 }
274
275 pub async fn write_symlink(&self, path: &RepoPath, contents: &str) -> BackendResult<SymlinkId> {
276 self.backend.write_symlink(path, contents).await
277 }
278
279 pub fn gc(&self, index: &dyn Index, keep_newer: SystemTime) -> BackendResult<()> {
280 self.backend.gc(index, keep_newer)
281 }
282
283 pub fn clear_caches(&self) {
285 self.commit_cache.lock().unwrap().clear();
286 self.tree_cache.lock().unwrap().clear();
287 }
288}