jujutsu_lib/
simple_op_store.rs

1// Copyright 2020 The Jujutsu Authors
2//
3// Licensed under the Apache License, Version 2.0 (the "License");
4// you may not use this file except in compliance with the License.
5// You may obtain a copy of the License at
6//
7// https://www.apache.org/licenses/LICENSE-2.0
8//
9// Unless required by applicable law or agreed to in writing, software
10// distributed under the License is distributed on an "AS IS" BASIS,
11// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12// See the License for the specific language governing permissions and
13// limitations under the License.
14
15use std::fmt::Debug;
16use std::path::Path;
17
18use tempfile::PersistError;
19
20use crate::op_store::{OpStore, OpStoreError, OpStoreResult, Operation, OperationId, View, ViewId};
21use crate::proto_op_store::ProtoOpStore;
22
23impl From<std::io::Error> for OpStoreError {
24    fn from(err: std::io::Error) -> Self {
25        OpStoreError::Other(err.to_string())
26    }
27}
28
29impl From<PersistError> for OpStoreError {
30    fn from(err: PersistError) -> Self {
31        OpStoreError::Other(err.to_string())
32    }
33}
34
35// TODO: In version 0.7.0 or so, inline ProtoOpStore into this type and drop
36// support for upgrading from the thrift format
37#[derive(Debug)]
38pub struct SimpleOpStore {
39    delegate: ProtoOpStore,
40}
41
42#[cfg(feature = "legacy-thrift")]
43fn upgrade_from_thrift(store_path: &Path) -> std::io::Result<()> {
44    use std::collections::{HashMap, HashSet};
45    use std::fs;
46
47    use itertools::Itertools;
48
49    use crate::legacy_thrift_op_store::ThriftOpStore;
50
51    println!("Upgrading operation log to Protobuf format...");
52    let repo_path = store_path.parent().unwrap();
53    let old_store = ThriftOpStore::load(store_path.to_path_buf());
54    let tmp_store_dir = tempfile::Builder::new()
55        .prefix("jj-op-store-upgrade-")
56        .tempdir_in(repo_path)
57        .unwrap();
58    let tmp_store_path = tmp_store_dir.path().to_path_buf();
59
60    // Find the current operation head(s) of the operation log
61    let op_heads_store_path = repo_path.join("op_heads");
62    let mut old_op_heads = HashSet::new();
63    for entry in fs::read_dir(op_heads_store_path)? {
64        let basename = entry?.file_name();
65        let op_id_str = basename.to_str().unwrap();
66        if let Ok(op_id_bytes) = hex::decode(op_id_str) {
67            old_op_heads.insert(OperationId::new(op_id_bytes));
68        }
69    }
70
71    // Do a DFS to rewrite the operations
72    let new_store = ProtoOpStore::init(tmp_store_path.clone());
73    let mut converted: HashMap<OperationId, OperationId> = HashMap::new();
74    // The DFS stack
75    let mut to_convert = old_op_heads
76        .iter()
77        .map(|op_id| (op_id.clone(), old_store.read_operation(op_id).unwrap()))
78        .collect_vec();
79    while !to_convert.is_empty() {
80        let (_, op) = to_convert.last().unwrap();
81        let mut new_parent_ids: Vec<OperationId> = vec![];
82        let mut new_to_convert = vec![];
83        // Check which parents are already converted and which ones we need to rewrite
84        // first
85        for parent_id in &op.parents {
86            if let Some(new_parent_id) = converted.get(parent_id) {
87                new_parent_ids.push(new_parent_id.clone());
88            } else {
89                let parent_op = old_store.read_operation(parent_id).unwrap();
90                new_to_convert.push((parent_id.clone(), parent_op));
91            }
92        }
93        if new_to_convert.is_empty() {
94            // If all parents have already been converted, remove this operation from the
95            // stack and convert it
96            let (old_op_id, mut old_op) = to_convert.pop().unwrap();
97            old_op.parents = new_parent_ids;
98            let old_view = old_store.read_view(&old_op.view_id).unwrap();
99            let new_view_id = new_store.write_view(&old_view).unwrap();
100            old_op.view_id = new_view_id;
101            let new_op_id = new_store.write_operation(&old_op).unwrap();
102            converted.insert(old_op_id, new_op_id);
103        } else {
104            to_convert.extend(new_to_convert);
105        }
106    }
107
108    let backup_store_path = repo_path.join("op_store_old");
109    // Delete existing backup (probably from an earlier upgrade to Thrift)
110    fs::remove_dir_all(&backup_store_path).ok();
111    fs::rename(store_path, backup_store_path)?;
112    fs::rename(&tmp_store_path, store_path)?;
113
114    println!("Upgrade complete");
115    Ok(())
116}
117
118impl SimpleOpStore {
119    pub fn init(store_path: &Path) -> Self {
120        let delegate = ProtoOpStore::init(store_path.to_path_buf());
121        SimpleOpStore { delegate }
122    }
123
124    pub fn load(store_path: &Path) -> Self {
125        #[cfg(feature = "legacy-thrift")]
126        if store_path.join("thrift_store").exists() {
127            upgrade_from_thrift(store_path)
128                .expect("Failed to upgrade operation log to Protobuf format");
129        }
130        let delegate = ProtoOpStore::load(store_path.to_path_buf());
131        SimpleOpStore { delegate }
132    }
133}
134
135impl OpStore for SimpleOpStore {
136    fn name(&self) -> &str {
137        "simple_op_store"
138    }
139
140    fn read_view(&self, id: &ViewId) -> OpStoreResult<View> {
141        self.delegate.read_view(id)
142    }
143
144    fn write_view(&self, view: &View) -> OpStoreResult<ViewId> {
145        self.delegate.write_view(view)
146    }
147
148    fn read_operation(&self, id: &OperationId) -> OpStoreResult<Operation> {
149        self.delegate.read_operation(id)
150    }
151
152    fn write_operation(&self, operation: &Operation) -> OpStoreResult<OperationId> {
153        self.delegate.write_operation(operation)
154    }
155}
156
157#[cfg(test)]
158mod tests {
159    use insta::assert_snapshot;
160    use maplit::{btreemap, hashmap, hashset};
161
162    use super::*;
163    use crate::backend::{CommitId, MillisSinceEpoch, ObjectId, Timestamp};
164    use crate::content_hash::blake2b_hash;
165    use crate::op_store::{BranchTarget, OperationMetadata, RefTarget, WorkspaceId};
166
167    fn create_view() -> View {
168        let head_id1 = CommitId::from_hex("aaa111");
169        let head_id2 = CommitId::from_hex("aaa222");
170        let public_head_id1 = CommitId::from_hex("bbb444");
171        let public_head_id2 = CommitId::from_hex("bbb555");
172        let branch_main_local_target = RefTarget::Normal(CommitId::from_hex("ccc111"));
173        let branch_main_origin_target = RefTarget::Normal(CommitId::from_hex("ccc222"));
174        let branch_deleted_origin_target = RefTarget::Normal(CommitId::from_hex("ccc333"));
175        let tag_v1_target = RefTarget::Normal(CommitId::from_hex("ddd111"));
176        let git_refs_main_target = RefTarget::Normal(CommitId::from_hex("fff111"));
177        let git_refs_feature_target = RefTarget::Conflict {
178            removes: vec![CommitId::from_hex("fff111")],
179            adds: vec![CommitId::from_hex("fff222"), CommitId::from_hex("fff333")],
180        };
181        let default_wc_commit_id = CommitId::from_hex("abc111");
182        let test_wc_commit_id = CommitId::from_hex("abc222");
183        View {
184            head_ids: hashset! {head_id1, head_id2},
185            public_head_ids: hashset! {public_head_id1, public_head_id2},
186            branches: btreemap! {
187                "main".to_string() => BranchTarget {
188                    local_target: Some(branch_main_local_target),
189                    remote_targets: btreemap! {
190                        "origin".to_string() => branch_main_origin_target,
191                    }
192                },
193                "deleted".to_string() => BranchTarget {
194                    local_target: None,
195                    remote_targets: btreemap! {
196                        "origin".to_string() => branch_deleted_origin_target,
197                    }
198                },
199            },
200            tags: btreemap! {
201                "v1.0".to_string() => tag_v1_target,
202            },
203            git_refs: btreemap! {
204                "refs/heads/main".to_string() => git_refs_main_target,
205                "refs/heads/feature".to_string() => git_refs_feature_target
206            },
207            git_head: Some(RefTarget::Normal(CommitId::from_hex("fff111"))),
208            wc_commit_ids: hashmap! {
209                WorkspaceId::default() => default_wc_commit_id,
210                WorkspaceId::new("test".to_string()) => test_wc_commit_id,
211            },
212        }
213    }
214
215    fn create_operation() -> Operation {
216        Operation {
217            view_id: ViewId::from_hex("aaa111"),
218            parents: vec![
219                OperationId::from_hex("bbb111"),
220                OperationId::from_hex("bbb222"),
221            ],
222            metadata: OperationMetadata {
223                start_time: Timestamp {
224                    timestamp: MillisSinceEpoch(123456789),
225                    tz_offset: 3600,
226                },
227                end_time: Timestamp {
228                    timestamp: MillisSinceEpoch(123456800),
229                    tz_offset: 3600,
230                },
231                description: "check out foo".to_string(),
232                hostname: "some.host.example.com".to_string(),
233                username: "someone".to_string(),
234                tags: hashmap! {
235                    "key1".to_string() => "value1".to_string(),
236                    "key2".to_string() => "value2".to_string(),
237                },
238            },
239        }
240    }
241
242    #[test]
243    fn test_hash_view() {
244        // Test exact output so we detect regressions in compatibility
245        assert_snapshot!(
246            ViewId::new(blake2b_hash(&create_view()).to_vec()).hex(),
247            @"7f47fa81494d7189cb1827b83b3f834662f0f61b4c4090298067e85cdc60f773bf639c4e6a3554a4e401650218ca240291ce591f45a1c501ade1d2b9f97e1a37"
248        );
249    }
250
251    #[test]
252    fn test_hash_operation() {
253        // Test exact output so we detect regressions in compatibility
254        assert_snapshot!(
255            OperationId::new(blake2b_hash(&create_operation()).to_vec()).hex(),
256            @"3ec986c29ff8eb808ea8f6325d6307cea75ef02987536c8e4645406aba51afc8e229957a6e855170d77a66098c58912309323f5e0b32760caa2b59dc84d45fcf"
257        );
258    }
259
260    #[test]
261    fn test_read_write_view() {
262        let temp_dir = testutils::new_temp_dir();
263        let store = SimpleOpStore::init(temp_dir.path());
264        let view = create_view();
265        let view_id = store.write_view(&view).unwrap();
266        let read_view = store.read_view(&view_id).unwrap();
267        assert_eq!(read_view, view);
268    }
269
270    #[test]
271    fn test_read_write_operation() {
272        let temp_dir = testutils::new_temp_dir();
273        let store = SimpleOpStore::init(temp_dir.path());
274        let operation = create_operation();
275        let op_id = store.write_operation(&operation).unwrap();
276        let read_operation = store.read_operation(&op_id).unwrap();
277        assert_eq!(read_operation, operation);
278    }
279}