jj_lib/
store.rs

1// Copyright 2020 The Jujutsu Authors
2//
3// Licensed under the Apache License, Version 2.0 (the "License");
4// you may not use this file except in compliance with the License.
5// You may obtain a copy of the License at
6//
7// https://www.apache.org/licenses/LICENSE-2.0
8//
9// Unless required by applicable law or agreed to in writing, software
10// distributed under the License is distributed on an "AS IS" BASIS,
11// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12// See the License for the specific language governing permissions and
13// limitations under the License.
14
15#![allow(missing_docs)]
16
17use std::any::Any;
18use std::fmt::Debug;
19use std::fmt::Formatter;
20use std::pin::Pin;
21use std::sync::Arc;
22use std::sync::Mutex;
23use std::time::SystemTime;
24
25use clru::CLruCache;
26use futures::stream::BoxStream;
27use pollster::FutureExt as _;
28use tokio::io::AsyncRead;
29
30use crate::backend;
31use crate::backend::Backend;
32use crate::backend::BackendResult;
33use crate::backend::ChangeId;
34use crate::backend::CommitId;
35use crate::backend::ConflictId;
36use crate::backend::CopyRecord;
37use crate::backend::FileId;
38use crate::backend::MergedTreeId;
39use crate::backend::SigningFn;
40use crate::backend::SymlinkId;
41use crate::backend::TreeId;
42use crate::commit::Commit;
43use crate::index::Index;
44use crate::merge::Merge;
45use crate::merge::MergedTreeValue;
46use crate::merged_tree::MergedTree;
47use crate::repo_path::RepoPath;
48use crate::repo_path::RepoPathBuf;
49use crate::signing::Signer;
50use crate::tree::Tree;
51use crate::tree_builder::TreeBuilder;
52
53// There are more tree objects than commits, and trees are often shared across
54// commits.
55pub(crate) const COMMIT_CACHE_CAPACITY: usize = 100;
56const TREE_CACHE_CAPACITY: usize = 1000;
57
58/// Wraps the low-level backend and makes it return more convenient types. Also
59/// adds caching.
60pub struct Store {
61    backend: Box<dyn Backend>,
62    signer: Signer,
63    commit_cache: Mutex<CLruCache<CommitId, Arc<backend::Commit>>>,
64    tree_cache: Mutex<CLruCache<(RepoPathBuf, TreeId), Arc<backend::Tree>>>,
65}
66
67impl Debug for Store {
68    fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), std::fmt::Error> {
69        f.debug_struct("Store")
70            .field("backend", &self.backend)
71            .finish_non_exhaustive()
72    }
73}
74
75impl Store {
76    pub fn new(backend: Box<dyn Backend>, signer: Signer) -> Arc<Self> {
77        Arc::new(Store {
78            backend,
79            signer,
80            commit_cache: Mutex::new(CLruCache::new(COMMIT_CACHE_CAPACITY.try_into().unwrap())),
81            tree_cache: Mutex::new(CLruCache::new(TREE_CACHE_CAPACITY.try_into().unwrap())),
82        })
83    }
84
85    pub fn backend(&self) -> &dyn Backend {
86        self.backend.as_ref()
87    }
88
89    pub fn backend_impl(&self) -> &dyn Any {
90        self.backend.as_any()
91    }
92
93    pub fn signer(&self) -> &Signer {
94        &self.signer
95    }
96
97    pub fn get_copy_records(
98        &self,
99        paths: Option<&[RepoPathBuf]>,
100        root: &CommitId,
101        head: &CommitId,
102    ) -> BackendResult<BoxStream<BackendResult<CopyRecord>>> {
103        self.backend.get_copy_records(paths, root, head)
104    }
105
106    pub fn commit_id_length(&self) -> usize {
107        self.backend.commit_id_length()
108    }
109
110    pub fn change_id_length(&self) -> usize {
111        self.backend.change_id_length()
112    }
113
114    pub fn root_commit_id(&self) -> &CommitId {
115        self.backend.root_commit_id()
116    }
117
118    pub fn root_change_id(&self) -> &ChangeId {
119        self.backend.root_change_id()
120    }
121
122    pub fn empty_tree_id(&self) -> &TreeId {
123        self.backend.empty_tree_id()
124    }
125
126    pub fn concurrency(&self) -> usize {
127        self.backend.concurrency()
128    }
129
130    pub fn empty_merged_tree_id(&self) -> MergedTreeId {
131        MergedTreeId::resolved(self.backend.empty_tree_id().clone())
132    }
133
134    pub fn root_commit(self: &Arc<Self>) -> Commit {
135        self.get_commit(self.backend.root_commit_id()).unwrap()
136    }
137
138    pub fn get_commit(self: &Arc<Self>, id: &CommitId) -> BackendResult<Commit> {
139        self.get_commit_async(id).block_on()
140    }
141
142    pub async fn get_commit_async(self: &Arc<Self>, id: &CommitId) -> BackendResult<Commit> {
143        let data = self.get_backend_commit(id).await?;
144        Ok(Commit::new(self.clone(), id.clone(), data))
145    }
146
147    async fn get_backend_commit(&self, id: &CommitId) -> BackendResult<Arc<backend::Commit>> {
148        {
149            let mut locked_cache = self.commit_cache.lock().unwrap();
150            if let Some(data) = locked_cache.get(id).cloned() {
151                return Ok(data);
152            }
153        }
154        let commit = self.backend.read_commit(id).await?;
155        let data = Arc::new(commit);
156        let mut locked_cache = self.commit_cache.lock().unwrap();
157        locked_cache.put(id.clone(), data.clone());
158        Ok(data)
159    }
160
161    pub async fn write_commit(
162        self: &Arc<Self>,
163        commit: backend::Commit,
164        sign_with: Option<&mut SigningFn<'_>>,
165    ) -> BackendResult<Commit> {
166        assert!(!commit.parents.is_empty());
167
168        let (commit_id, commit) = self.backend.write_commit(commit, sign_with).await?;
169        let data = Arc::new(commit);
170        {
171            let mut locked_cache = self.commit_cache.lock().unwrap();
172            locked_cache.put(commit_id.clone(), data.clone());
173        }
174
175        Ok(Commit::new(self.clone(), commit_id, data))
176    }
177
178    pub fn get_tree(self: &Arc<Self>, dir: RepoPathBuf, id: &TreeId) -> BackendResult<Tree> {
179        self.get_tree_async(dir, id).block_on()
180    }
181
182    pub async fn get_tree_async(
183        self: &Arc<Self>,
184        dir: RepoPathBuf,
185        id: &TreeId,
186    ) -> BackendResult<Tree> {
187        let data = self.get_backend_tree(&dir, id).await?;
188        Ok(Tree::new(self.clone(), dir, id.clone(), data))
189    }
190
191    async fn get_backend_tree(
192        &self,
193        dir: &RepoPath,
194        id: &TreeId,
195    ) -> BackendResult<Arc<backend::Tree>> {
196        let key = (dir.to_owned(), id.clone());
197        {
198            let mut locked_cache = self.tree_cache.lock().unwrap();
199            if let Some(data) = locked_cache.get(&key).cloned() {
200                return Ok(data);
201            }
202        }
203        let data = self.backend.read_tree(dir, id).await?;
204        let data = Arc::new(data);
205        let mut locked_cache = self.tree_cache.lock().unwrap();
206        locked_cache.put(key, data.clone());
207        Ok(data)
208    }
209
210    pub fn get_root_tree(self: &Arc<Self>, id: &MergedTreeId) -> BackendResult<MergedTree> {
211        match &id {
212            MergedTreeId::Legacy(id) => {
213                let tree = self.get_tree(RepoPathBuf::root(), id)?;
214                MergedTree::from_legacy_tree(tree)
215            }
216            MergedTreeId::Merge(ids) => {
217                let trees = ids.try_map(|id| self.get_tree(RepoPathBuf::root(), id))?;
218                Ok(MergedTree::new(trees))
219            }
220        }
221    }
222
223    pub async fn write_tree(
224        self: &Arc<Self>,
225        path: &RepoPath,
226        tree: backend::Tree,
227    ) -> BackendResult<Tree> {
228        let tree_id = self.backend.write_tree(path, &tree).await?;
229        let data = Arc::new(tree);
230        {
231            let mut locked_cache = self.tree_cache.lock().unwrap();
232            locked_cache.put((path.to_owned(), tree_id.clone()), data.clone());
233        }
234
235        Ok(Tree::new(self.clone(), path.to_owned(), tree_id, data))
236    }
237
238    pub async fn read_file(
239        &self,
240        path: &RepoPath,
241        id: &FileId,
242    ) -> BackendResult<Pin<Box<dyn AsyncRead>>> {
243        self.backend.read_file(path, id).await
244    }
245
246    pub async fn write_file(
247        &self,
248        path: &RepoPath,
249        contents: &mut (dyn AsyncRead + Send + Unpin),
250    ) -> BackendResult<FileId> {
251        self.backend.write_file(path, contents).await
252    }
253
254    pub async fn read_symlink(&self, path: &RepoPath, id: &SymlinkId) -> BackendResult<String> {
255        self.backend.read_symlink(path, id).await
256    }
257
258    pub async fn write_symlink(&self, path: &RepoPath, contents: &str) -> BackendResult<SymlinkId> {
259        self.backend.write_symlink(path, contents).await
260    }
261
262    pub fn read_conflict(
263        &self,
264        path: &RepoPath,
265        id: &ConflictId,
266    ) -> BackendResult<MergedTreeValue> {
267        let backend_conflict = self.backend.read_conflict(path, id)?;
268        Ok(Merge::from_backend_conflict(backend_conflict))
269    }
270
271    pub fn write_conflict(
272        &self,
273        path: &RepoPath,
274        contents: &MergedTreeValue,
275    ) -> BackendResult<ConflictId> {
276        self.backend
277            .write_conflict(path, &contents.clone().into_backend_conflict())
278    }
279
280    pub fn tree_builder(self: &Arc<Self>, base_tree_id: TreeId) -> TreeBuilder {
281        TreeBuilder::new(self.clone(), base_tree_id)
282    }
283
284    pub fn gc(&self, index: &dyn Index, keep_newer: SystemTime) -> BackendResult<()> {
285        self.backend.gc(index, keep_newer)
286    }
287}