jj_lib/
store.rs

1// Copyright 2020 The Jujutsu Authors
2//
3// Licensed under the Apache License, Version 2.0 (the "License");
4// you may not use this file except in compliance with the License.
5// You may obtain a copy of the License at
6//
7// https://www.apache.org/licenses/LICENSE-2.0
8//
9// Unless required by applicable law or agreed to in writing, software
10// distributed under the License is distributed on an "AS IS" BASIS,
11// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12// See the License for the specific language governing permissions and
13// limitations under the License.
14
15#![allow(missing_docs)]
16
17use std::any::Any;
18use std::fmt::Debug;
19use std::fmt::Formatter;
20use std::io::Read;
21use std::sync::Arc;
22use std::sync::Mutex;
23use std::time::SystemTime;
24
25use clru::CLruCache;
26use futures::stream::BoxStream;
27use pollster::FutureExt as _;
28
29use crate::backend;
30use crate::backend::Backend;
31use crate::backend::BackendResult;
32use crate::backend::ChangeId;
33use crate::backend::CommitId;
34use crate::backend::ConflictId;
35use crate::backend::CopyRecord;
36use crate::backend::FileId;
37use crate::backend::MergedTreeId;
38use crate::backend::SigningFn;
39use crate::backend::SymlinkId;
40use crate::backend::TreeId;
41use crate::commit::Commit;
42use crate::index::Index;
43use crate::merge::Merge;
44use crate::merge::MergedTreeValue;
45use crate::merged_tree::MergedTree;
46use crate::repo_path::RepoPath;
47use crate::repo_path::RepoPathBuf;
48use crate::signing::Signer;
49use crate::tree::Tree;
50use crate::tree_builder::TreeBuilder;
51
52// There are more tree objects than commits, and trees are often shared across
53// commits.
54pub(crate) const COMMIT_CACHE_CAPACITY: usize = 100;
55const TREE_CACHE_CAPACITY: usize = 1000;
56
57/// Wraps the low-level backend and makes it return more convenient types. Also
58/// adds caching.
59pub struct Store {
60    backend: Box<dyn Backend>,
61    signer: Signer,
62    commit_cache: Mutex<CLruCache<CommitId, Arc<backend::Commit>>>,
63    tree_cache: Mutex<CLruCache<(RepoPathBuf, TreeId), Arc<backend::Tree>>>,
64}
65
66impl Debug for Store {
67    fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), std::fmt::Error> {
68        f.debug_struct("Store")
69            .field("backend", &self.backend)
70            .finish_non_exhaustive()
71    }
72}
73
74impl Store {
75    pub fn new(backend: Box<dyn Backend>, signer: Signer) -> Arc<Self> {
76        Arc::new(Store {
77            backend,
78            signer,
79            commit_cache: Mutex::new(CLruCache::new(COMMIT_CACHE_CAPACITY.try_into().unwrap())),
80            tree_cache: Mutex::new(CLruCache::new(TREE_CACHE_CAPACITY.try_into().unwrap())),
81        })
82    }
83
84    pub fn backend_impl(&self) -> &dyn Any {
85        self.backend.as_any()
86    }
87
88    pub fn signer(&self) -> &Signer {
89        &self.signer
90    }
91
92    pub fn get_copy_records(
93        &self,
94        paths: Option<&[RepoPathBuf]>,
95        root: &CommitId,
96        head: &CommitId,
97    ) -> BackendResult<BoxStream<BackendResult<CopyRecord>>> {
98        self.backend.get_copy_records(paths, root, head)
99    }
100
101    pub fn commit_id_length(&self) -> usize {
102        self.backend.commit_id_length()
103    }
104
105    pub fn change_id_length(&self) -> usize {
106        self.backend.change_id_length()
107    }
108
109    pub fn root_commit_id(&self) -> &CommitId {
110        self.backend.root_commit_id()
111    }
112
113    pub fn root_change_id(&self) -> &ChangeId {
114        self.backend.root_change_id()
115    }
116
117    pub fn empty_tree_id(&self) -> &TreeId {
118        self.backend.empty_tree_id()
119    }
120
121    pub fn concurrency(&self) -> usize {
122        self.backend.concurrency()
123    }
124
125    pub fn empty_merged_tree_id(&self) -> MergedTreeId {
126        MergedTreeId::resolved(self.backend.empty_tree_id().clone())
127    }
128
129    pub fn root_commit(self: &Arc<Self>) -> Commit {
130        self.get_commit(self.backend.root_commit_id()).unwrap()
131    }
132
133    pub fn get_commit(self: &Arc<Self>, id: &CommitId) -> BackendResult<Commit> {
134        self.get_commit_async(id).block_on()
135    }
136
137    pub async fn get_commit_async(self: &Arc<Self>, id: &CommitId) -> BackendResult<Commit> {
138        let data = self.get_backend_commit(id).await?;
139        Ok(Commit::new(self.clone(), id.clone(), data))
140    }
141
142    async fn get_backend_commit(&self, id: &CommitId) -> BackendResult<Arc<backend::Commit>> {
143        {
144            let mut locked_cache = self.commit_cache.lock().unwrap();
145            if let Some(data) = locked_cache.get(id).cloned() {
146                return Ok(data);
147            }
148        }
149        let commit = self.backend.read_commit(id).await?;
150        let data = Arc::new(commit);
151        let mut locked_cache = self.commit_cache.lock().unwrap();
152        locked_cache.put(id.clone(), data.clone());
153        Ok(data)
154    }
155
156    pub async fn write_commit(
157        self: &Arc<Self>,
158        commit: backend::Commit,
159        sign_with: Option<&mut SigningFn<'_>>,
160    ) -> BackendResult<Commit> {
161        assert!(!commit.parents.is_empty());
162
163        let (commit_id, commit) = self.backend.write_commit(commit, sign_with).await?;
164        let data = Arc::new(commit);
165        {
166            let mut locked_cache = self.commit_cache.lock().unwrap();
167            locked_cache.put(commit_id.clone(), data.clone());
168        }
169
170        Ok(Commit::new(self.clone(), commit_id, data))
171    }
172
173    pub fn get_tree(self: &Arc<Self>, dir: RepoPathBuf, id: &TreeId) -> BackendResult<Tree> {
174        self.get_tree_async(dir, id).block_on()
175    }
176
177    pub async fn get_tree_async(
178        self: &Arc<Self>,
179        dir: RepoPathBuf,
180        id: &TreeId,
181    ) -> BackendResult<Tree> {
182        let data = self.get_backend_tree(&dir, id).await?;
183        Ok(Tree::new(self.clone(), dir, id.clone(), data))
184    }
185
186    async fn get_backend_tree(
187        &self,
188        dir: &RepoPath,
189        id: &TreeId,
190    ) -> BackendResult<Arc<backend::Tree>> {
191        let key = (dir.to_owned(), id.clone());
192        {
193            let mut locked_cache = self.tree_cache.lock().unwrap();
194            if let Some(data) = locked_cache.get(&key).cloned() {
195                return Ok(data);
196            }
197        }
198        let data = self.backend.read_tree(dir, id).await?;
199        let data = Arc::new(data);
200        let mut locked_cache = self.tree_cache.lock().unwrap();
201        locked_cache.put(key, data.clone());
202        Ok(data)
203    }
204
205    pub fn get_root_tree(self: &Arc<Self>, id: &MergedTreeId) -> BackendResult<MergedTree> {
206        match &id {
207            MergedTreeId::Legacy(id) => {
208                let tree = self.get_tree(RepoPathBuf::root(), id)?;
209                MergedTree::from_legacy_tree(tree)
210            }
211            MergedTreeId::Merge(ids) => {
212                let trees = ids.try_map(|id| self.get_tree(RepoPathBuf::root(), id))?;
213                Ok(MergedTree::new(trees))
214            }
215        }
216    }
217
218    pub async fn write_tree(
219        self: &Arc<Self>,
220        path: &RepoPath,
221        tree: backend::Tree,
222    ) -> BackendResult<Tree> {
223        let tree_id = self.backend.write_tree(path, &tree).await?;
224        let data = Arc::new(tree);
225        {
226            let mut locked_cache = self.tree_cache.lock().unwrap();
227            locked_cache.put((path.to_owned(), tree_id.clone()), data.clone());
228        }
229
230        Ok(Tree::new(self.clone(), path.to_owned(), tree_id, data))
231    }
232
233    pub fn read_file(&self, path: &RepoPath, id: &FileId) -> BackendResult<Box<dyn Read>> {
234        self.read_file_async(path, id).block_on()
235    }
236
237    pub async fn read_file_async(
238        &self,
239        path: &RepoPath,
240        id: &FileId,
241    ) -> BackendResult<Box<dyn Read>> {
242        self.backend.read_file(path, id).await
243    }
244
245    pub async fn write_file(
246        &self,
247        path: &RepoPath,
248        contents: &mut (dyn Read + Send),
249    ) -> BackendResult<FileId> {
250        self.backend.write_file(path, contents).await
251    }
252
253    pub fn read_symlink(&self, path: &RepoPath, id: &SymlinkId) -> BackendResult<String> {
254        self.read_symlink_async(path, id).block_on()
255    }
256
257    pub async fn read_symlink_async(
258        &self,
259        path: &RepoPath,
260        id: &SymlinkId,
261    ) -> BackendResult<String> {
262        self.backend.read_symlink(path, id).await
263    }
264
265    pub async fn write_symlink(&self, path: &RepoPath, contents: &str) -> BackendResult<SymlinkId> {
266        self.backend.write_symlink(path, contents).await
267    }
268
269    pub fn read_conflict(
270        &self,
271        path: &RepoPath,
272        id: &ConflictId,
273    ) -> BackendResult<MergedTreeValue> {
274        let backend_conflict = self.backend.read_conflict(path, id)?;
275        Ok(Merge::from_backend_conflict(backend_conflict))
276    }
277
278    pub fn write_conflict(
279        &self,
280        path: &RepoPath,
281        contents: &MergedTreeValue,
282    ) -> BackendResult<ConflictId> {
283        self.backend
284            .write_conflict(path, &contents.clone().into_backend_conflict())
285    }
286
287    pub fn tree_builder(self: &Arc<Self>, base_tree_id: TreeId) -> TreeBuilder {
288        TreeBuilder::new(self.clone(), base_tree_id)
289    }
290
291    pub fn gc(&self, index: &dyn Index, keep_newer: SystemTime) -> BackendResult<()> {
292        self.backend.gc(index, keep_newer)
293    }
294}