jj_lib/
repo.rs

1// Copyright 2020 The Jujutsu Authors
2//
3// Licensed under the Apache License, Version 2.0 (the "License");
4// you may not use this file except in compliance with the License.
5// You may obtain a copy of the License at
6//
7// https://www.apache.org/licenses/LICENSE-2.0
8//
9// Unless required by applicable law or agreed to in writing, software
10// distributed under the License is distributed on an "AS IS" BASIS,
11// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12// See the License for the specific language governing permissions and
13// limitations under the License.
14
15#![allow(missing_docs)]
16
17use std::collections::BTreeMap;
18use std::collections::HashMap;
19use std::collections::HashSet;
20use std::collections::hash_map::Entry;
21use std::fmt::Debug;
22use std::fmt::Formatter;
23use std::fs;
24use std::path::Path;
25use std::slice;
26use std::sync::Arc;
27
28use itertools::Itertools as _;
29use once_cell::sync::OnceCell;
30use pollster::FutureExt as _;
31use thiserror::Error;
32use tracing::instrument;
33
34use self::dirty_cell::DirtyCell;
35use crate::backend::Backend;
36use crate::backend::BackendError;
37use crate::backend::BackendInitError;
38use crate::backend::BackendLoadError;
39use crate::backend::BackendResult;
40use crate::backend::ChangeId;
41use crate::backend::CommitId;
42use crate::backend::MergedTreeId;
43use crate::commit::Commit;
44use crate::commit::CommitByCommitterTimestamp;
45use crate::commit_builder::CommitBuilder;
46use crate::commit_builder::DetachedCommitBuilder;
47use crate::dag_walk;
48use crate::default_index::DefaultIndexStore;
49use crate::default_index::DefaultMutableIndex;
50use crate::default_submodule_store::DefaultSubmoduleStore;
51use crate::file_util::IoResultExt as _;
52use crate::file_util::PathError;
53use crate::index::ChangeIdIndex;
54use crate::index::Index;
55use crate::index::IndexReadError;
56use crate::index::IndexStore;
57use crate::index::MutableIndex;
58use crate::index::ReadonlyIndex;
59use crate::merge::MergeBuilder;
60use crate::merge::trivial_merge;
61use crate::object_id::HexPrefix;
62use crate::object_id::PrefixResolution;
63use crate::op_heads_store;
64use crate::op_heads_store::OpHeadResolutionError;
65use crate::op_heads_store::OpHeadsStore;
66use crate::op_heads_store::OpHeadsStoreError;
67use crate::op_store;
68use crate::op_store::OpStore;
69use crate::op_store::OpStoreError;
70use crate::op_store::OpStoreResult;
71use crate::op_store::OperationId;
72use crate::op_store::RefTarget;
73use crate::op_store::RemoteRef;
74use crate::op_store::RemoteRefState;
75use crate::op_store::RootOperationData;
76use crate::operation::Operation;
77use crate::ref_name::GitRefName;
78use crate::ref_name::RefName;
79use crate::ref_name::RemoteName;
80use crate::ref_name::RemoteRefSymbol;
81use crate::ref_name::WorkspaceName;
82use crate::ref_name::WorkspaceNameBuf;
83use crate::refs::diff_named_commit_ids;
84use crate::refs::diff_named_ref_targets;
85use crate::refs::diff_named_remote_refs;
86use crate::refs::merge_ref_targets;
87use crate::refs::merge_remote_refs;
88use crate::revset;
89use crate::revset::RevsetEvaluationError;
90use crate::revset::RevsetExpression;
91use crate::revset::RevsetIteratorExt as _;
92use crate::rewrite::CommitRewriter;
93use crate::rewrite::RebaseOptions;
94use crate::rewrite::RebasedCommit;
95use crate::rewrite::RewriteRefsOptions;
96use crate::rewrite::merge_commit_trees;
97use crate::rewrite::rebase_commit_with_options;
98use crate::settings::UserSettings;
99use crate::signing::SignInitError;
100use crate::signing::Signer;
101use crate::simple_backend::SimpleBackend;
102use crate::simple_op_heads_store::SimpleOpHeadsStore;
103use crate::simple_op_store::SimpleOpStore;
104use crate::store::Store;
105use crate::submodule_store::SubmoduleStore;
106use crate::transaction::Transaction;
107use crate::transaction::TransactionCommitError;
108use crate::view::RenameWorkspaceError;
109use crate::view::View;
110
111pub trait Repo {
112    /// Base repository that contains all committed data. Returns `self` if this
113    /// is a `ReadonlyRepo`,
114    fn base_repo(&self) -> &ReadonlyRepo;
115
116    fn store(&self) -> &Arc<Store>;
117
118    fn op_store(&self) -> &Arc<dyn OpStore>;
119
120    fn index(&self) -> &dyn Index;
121
122    fn view(&self) -> &View;
123
124    fn submodule_store(&self) -> &Arc<dyn SubmoduleStore>;
125
126    fn resolve_change_id(&self, change_id: &ChangeId) -> Option<Vec<CommitId>> {
127        // Replace this if we added more efficient lookup method.
128        let prefix = HexPrefix::from_id(change_id);
129        match self.resolve_change_id_prefix(&prefix) {
130            PrefixResolution::NoMatch => None,
131            PrefixResolution::SingleMatch(entries) => Some(entries),
132            PrefixResolution::AmbiguousMatch => panic!("complete change_id should be unambiguous"),
133        }
134    }
135
136    fn resolve_change_id_prefix(&self, prefix: &HexPrefix) -> PrefixResolution<Vec<CommitId>>;
137
138    fn shortest_unique_change_id_prefix_len(&self, target_id_bytes: &ChangeId) -> usize;
139}
140
141pub struct ReadonlyRepo {
142    loader: RepoLoader,
143    operation: Operation,
144    index: Box<dyn ReadonlyIndex>,
145    change_id_index: OnceCell<Box<dyn ChangeIdIndex>>,
146    // TODO: This should eventually become part of the index and not be stored fully in memory.
147    view: View,
148}
149
150impl Debug for ReadonlyRepo {
151    fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), std::fmt::Error> {
152        f.debug_struct("ReadonlyRepo")
153            .field("store", &self.loader.store)
154            .finish_non_exhaustive()
155    }
156}
157
158#[derive(Error, Debug)]
159pub enum RepoInitError {
160    #[error(transparent)]
161    Backend(#[from] BackendInitError),
162    #[error(transparent)]
163    OpHeadsStore(#[from] OpHeadsStoreError),
164    #[error(transparent)]
165    Path(#[from] PathError),
166}
167
168impl ReadonlyRepo {
169    pub fn default_op_store_initializer() -> &'static OpStoreInitializer<'static> {
170        &|_settings, store_path, root_data| {
171            Ok(Box::new(SimpleOpStore::init(store_path, root_data)?))
172        }
173    }
174
175    pub fn default_op_heads_store_initializer() -> &'static OpHeadsStoreInitializer<'static> {
176        &|_settings, store_path| Ok(Box::new(SimpleOpHeadsStore::init(store_path)?))
177    }
178
179    pub fn default_index_store_initializer() -> &'static IndexStoreInitializer<'static> {
180        &|_settings, store_path| Ok(Box::new(DefaultIndexStore::init(store_path)?))
181    }
182
183    pub fn default_submodule_store_initializer() -> &'static SubmoduleStoreInitializer<'static> {
184        &|_settings, store_path| Ok(Box::new(DefaultSubmoduleStore::init(store_path)))
185    }
186
187    #[expect(clippy::too_many_arguments)]
188    pub fn init(
189        settings: &UserSettings,
190        repo_path: &Path,
191        backend_initializer: &BackendInitializer,
192        signer: Signer,
193        op_store_initializer: &OpStoreInitializer,
194        op_heads_store_initializer: &OpHeadsStoreInitializer,
195        index_store_initializer: &IndexStoreInitializer,
196        submodule_store_initializer: &SubmoduleStoreInitializer,
197    ) -> Result<Arc<Self>, RepoInitError> {
198        let repo_path = dunce::canonicalize(repo_path).context(repo_path)?;
199
200        let store_path = repo_path.join("store");
201        fs::create_dir(&store_path).context(&store_path)?;
202        let backend = backend_initializer(settings, &store_path)?;
203        let backend_path = store_path.join("type");
204        fs::write(&backend_path, backend.name()).context(&backend_path)?;
205        let store = Store::new(backend, signer);
206
207        let op_store_path = repo_path.join("op_store");
208        fs::create_dir(&op_store_path).context(&op_store_path)?;
209        let root_op_data = RootOperationData {
210            root_commit_id: store.root_commit_id().clone(),
211        };
212        let op_store = op_store_initializer(settings, &op_store_path, root_op_data)?;
213        let op_store_type_path = op_store_path.join("type");
214        fs::write(&op_store_type_path, op_store.name()).context(&op_store_type_path)?;
215        let op_store: Arc<dyn OpStore> = Arc::from(op_store);
216
217        let op_heads_path = repo_path.join("op_heads");
218        fs::create_dir(&op_heads_path).context(&op_heads_path)?;
219        let op_heads_store = op_heads_store_initializer(settings, &op_heads_path)?;
220        let op_heads_type_path = op_heads_path.join("type");
221        fs::write(&op_heads_type_path, op_heads_store.name()).context(&op_heads_type_path)?;
222        op_heads_store.update_op_heads(&[], op_store.root_operation_id())?;
223        let op_heads_store: Arc<dyn OpHeadsStore> = Arc::from(op_heads_store);
224
225        let index_path = repo_path.join("index");
226        fs::create_dir(&index_path).context(&index_path)?;
227        let index_store = index_store_initializer(settings, &index_path)?;
228        let index_type_path = index_path.join("type");
229        fs::write(&index_type_path, index_store.name()).context(&index_type_path)?;
230        let index_store: Arc<dyn IndexStore> = Arc::from(index_store);
231
232        let submodule_store_path = repo_path.join("submodule_store");
233        fs::create_dir(&submodule_store_path).context(&submodule_store_path)?;
234        let submodule_store = submodule_store_initializer(settings, &submodule_store_path)?;
235        let submodule_store_type_path = submodule_store_path.join("type");
236        fs::write(&submodule_store_type_path, submodule_store.name())
237            .context(&submodule_store_type_path)?;
238        let submodule_store = Arc::from(submodule_store);
239
240        let loader = RepoLoader {
241            settings: settings.clone(),
242            store,
243            op_store,
244            op_heads_store,
245            index_store,
246            submodule_store,
247        };
248
249        let root_operation = loader.root_operation();
250        let root_view = root_operation.view().expect("failed to read root view");
251        assert!(!root_view.heads().is_empty());
252        let index = loader
253            .index_store
254            .get_index_at_op(&root_operation, &loader.store)
255            // If the root op index couldn't be read, the index backend wouldn't
256            // be initialized properly.
257            .map_err(|err| BackendInitError(err.into()))?;
258        Ok(Arc::new(Self {
259            loader,
260            operation: root_operation,
261            index,
262            change_id_index: OnceCell::new(),
263            view: root_view,
264        }))
265    }
266
267    pub fn loader(&self) -> &RepoLoader {
268        &self.loader
269    }
270
271    pub fn op_id(&self) -> &OperationId {
272        self.operation.id()
273    }
274
275    pub fn operation(&self) -> &Operation {
276        &self.operation
277    }
278
279    pub fn view(&self) -> &View {
280        &self.view
281    }
282
283    pub fn readonly_index(&self) -> &dyn ReadonlyIndex {
284        self.index.as_ref()
285    }
286
287    fn change_id_index(&self) -> &dyn ChangeIdIndex {
288        self.change_id_index
289            .get_or_init(|| {
290                self.readonly_index()
291                    .change_id_index(&mut self.view().heads().iter())
292            })
293            .as_ref()
294    }
295
296    pub fn op_heads_store(&self) -> &Arc<dyn OpHeadsStore> {
297        self.loader.op_heads_store()
298    }
299
300    pub fn index_store(&self) -> &Arc<dyn IndexStore> {
301        self.loader.index_store()
302    }
303
304    pub fn settings(&self) -> &UserSettings {
305        self.loader.settings()
306    }
307
308    pub fn start_transaction(self: &Arc<Self>) -> Transaction {
309        let mut_repo = MutableRepo::new(self.clone(), self.readonly_index(), &self.view);
310        Transaction::new(mut_repo, self.settings())
311    }
312
313    pub fn reload_at_head(&self) -> Result<Arc<Self>, RepoLoaderError> {
314        self.loader().load_at_head()
315    }
316
317    #[instrument]
318    pub fn reload_at(&self, operation: &Operation) -> Result<Arc<Self>, RepoLoaderError> {
319        self.loader().load_at(operation)
320    }
321}
322
323impl Repo for ReadonlyRepo {
324    fn base_repo(&self) -> &ReadonlyRepo {
325        self
326    }
327
328    fn store(&self) -> &Arc<Store> {
329        self.loader.store()
330    }
331
332    fn op_store(&self) -> &Arc<dyn OpStore> {
333        self.loader.op_store()
334    }
335
336    fn index(&self) -> &dyn Index {
337        self.readonly_index().as_index()
338    }
339
340    fn view(&self) -> &View {
341        &self.view
342    }
343
344    fn submodule_store(&self) -> &Arc<dyn SubmoduleStore> {
345        self.loader.submodule_store()
346    }
347
348    fn resolve_change_id_prefix(&self, prefix: &HexPrefix) -> PrefixResolution<Vec<CommitId>> {
349        self.change_id_index().resolve_prefix(prefix)
350    }
351
352    fn shortest_unique_change_id_prefix_len(&self, target_id: &ChangeId) -> usize {
353        self.change_id_index().shortest_unique_prefix_len(target_id)
354    }
355}
356
357pub type BackendInitializer<'a> =
358    dyn Fn(&UserSettings, &Path) -> Result<Box<dyn Backend>, BackendInitError> + 'a;
359#[rustfmt::skip] // auto-formatted line would exceed the maximum width
360pub type OpStoreInitializer<'a> =
361    dyn Fn(&UserSettings, &Path, RootOperationData) -> Result<Box<dyn OpStore>, BackendInitError>
362    + 'a;
363pub type OpHeadsStoreInitializer<'a> =
364    dyn Fn(&UserSettings, &Path) -> Result<Box<dyn OpHeadsStore>, BackendInitError> + 'a;
365pub type IndexStoreInitializer<'a> =
366    dyn Fn(&UserSettings, &Path) -> Result<Box<dyn IndexStore>, BackendInitError> + 'a;
367pub type SubmoduleStoreInitializer<'a> =
368    dyn Fn(&UserSettings, &Path) -> Result<Box<dyn SubmoduleStore>, BackendInitError> + 'a;
369
370type BackendFactory =
371    Box<dyn Fn(&UserSettings, &Path) -> Result<Box<dyn Backend>, BackendLoadError>>;
372type OpStoreFactory = Box<
373    dyn Fn(&UserSettings, &Path, RootOperationData) -> Result<Box<dyn OpStore>, BackendLoadError>,
374>;
375type OpHeadsStoreFactory =
376    Box<dyn Fn(&UserSettings, &Path) -> Result<Box<dyn OpHeadsStore>, BackendLoadError>>;
377type IndexStoreFactory =
378    Box<dyn Fn(&UserSettings, &Path) -> Result<Box<dyn IndexStore>, BackendLoadError>>;
379type SubmoduleStoreFactory =
380    Box<dyn Fn(&UserSettings, &Path) -> Result<Box<dyn SubmoduleStore>, BackendLoadError>>;
381
382pub fn merge_factories_map<F>(base: &mut HashMap<String, F>, ext: HashMap<String, F>) {
383    for (name, factory) in ext {
384        match base.entry(name) {
385            Entry::Vacant(v) => {
386                v.insert(factory);
387            }
388            Entry::Occupied(o) => {
389                panic!("Conflicting factory definitions for '{}' factory", o.key())
390            }
391        }
392    }
393}
394
395pub struct StoreFactories {
396    backend_factories: HashMap<String, BackendFactory>,
397    op_store_factories: HashMap<String, OpStoreFactory>,
398    op_heads_store_factories: HashMap<String, OpHeadsStoreFactory>,
399    index_store_factories: HashMap<String, IndexStoreFactory>,
400    submodule_store_factories: HashMap<String, SubmoduleStoreFactory>,
401}
402
403impl Default for StoreFactories {
404    fn default() -> Self {
405        let mut factories = Self::empty();
406
407        // Backends
408        factories.add_backend(
409            SimpleBackend::name(),
410            Box::new(|_settings, store_path| Ok(Box::new(SimpleBackend::load(store_path)))),
411        );
412        #[cfg(feature = "git")]
413        factories.add_backend(
414            crate::git_backend::GitBackend::name(),
415            Box::new(|settings, store_path| {
416                Ok(Box::new(crate::git_backend::GitBackend::load(
417                    settings, store_path,
418                )?))
419            }),
420        );
421        #[cfg(feature = "testing")]
422        factories.add_backend(
423            crate::secret_backend::SecretBackend::name(),
424            Box::new(|settings, store_path| {
425                Ok(Box::new(crate::secret_backend::SecretBackend::load(
426                    settings, store_path,
427                )?))
428            }),
429        );
430
431        // OpStores
432        factories.add_op_store(
433            SimpleOpStore::name(),
434            Box::new(|_settings, store_path, root_data| {
435                Ok(Box::new(SimpleOpStore::load(store_path, root_data)))
436            }),
437        );
438
439        // OpHeadsStores
440        factories.add_op_heads_store(
441            SimpleOpHeadsStore::name(),
442            Box::new(|_settings, store_path| Ok(Box::new(SimpleOpHeadsStore::load(store_path)))),
443        );
444
445        // Index
446        factories.add_index_store(
447            DefaultIndexStore::name(),
448            Box::new(|_settings, store_path| Ok(Box::new(DefaultIndexStore::load(store_path)))),
449        );
450
451        // SubmoduleStores
452        factories.add_submodule_store(
453            DefaultSubmoduleStore::name(),
454            Box::new(|_settings, store_path| Ok(Box::new(DefaultSubmoduleStore::load(store_path)))),
455        );
456
457        factories
458    }
459}
460
461#[derive(Debug, Error)]
462pub enum StoreLoadError {
463    #[error("Unsupported {store} backend type '{store_type}'")]
464    UnsupportedType {
465        store: &'static str,
466        store_type: String,
467    },
468    #[error("Failed to read {store} backend type")]
469    ReadError {
470        store: &'static str,
471        source: PathError,
472    },
473    #[error(transparent)]
474    Backend(#[from] BackendLoadError),
475    #[error(transparent)]
476    Signing(#[from] SignInitError),
477}
478
479impl StoreFactories {
480    pub fn empty() -> Self {
481        Self {
482            backend_factories: HashMap::new(),
483            op_store_factories: HashMap::new(),
484            op_heads_store_factories: HashMap::new(),
485            index_store_factories: HashMap::new(),
486            submodule_store_factories: HashMap::new(),
487        }
488    }
489
490    pub fn merge(&mut self, ext: Self) {
491        let Self {
492            backend_factories,
493            op_store_factories,
494            op_heads_store_factories,
495            index_store_factories,
496            submodule_store_factories,
497        } = ext;
498
499        merge_factories_map(&mut self.backend_factories, backend_factories);
500        merge_factories_map(&mut self.op_store_factories, op_store_factories);
501        merge_factories_map(&mut self.op_heads_store_factories, op_heads_store_factories);
502        merge_factories_map(&mut self.index_store_factories, index_store_factories);
503        merge_factories_map(
504            &mut self.submodule_store_factories,
505            submodule_store_factories,
506        );
507    }
508
509    pub fn add_backend(&mut self, name: &str, factory: BackendFactory) {
510        self.backend_factories.insert(name.to_string(), factory);
511    }
512
513    pub fn load_backend(
514        &self,
515        settings: &UserSettings,
516        store_path: &Path,
517    ) -> Result<Box<dyn Backend>, StoreLoadError> {
518        let backend_type = read_store_type("commit", store_path.join("type"))?;
519        let backend_factory = self.backend_factories.get(&backend_type).ok_or_else(|| {
520            StoreLoadError::UnsupportedType {
521                store: "commit",
522                store_type: backend_type.clone(),
523            }
524        })?;
525        Ok(backend_factory(settings, store_path)?)
526    }
527
528    pub fn add_op_store(&mut self, name: &str, factory: OpStoreFactory) {
529        self.op_store_factories.insert(name.to_string(), factory);
530    }
531
532    pub fn load_op_store(
533        &self,
534        settings: &UserSettings,
535        store_path: &Path,
536        root_data: RootOperationData,
537    ) -> Result<Box<dyn OpStore>, StoreLoadError> {
538        let op_store_type = read_store_type("operation", store_path.join("type"))?;
539        let op_store_factory = self.op_store_factories.get(&op_store_type).ok_or_else(|| {
540            StoreLoadError::UnsupportedType {
541                store: "operation",
542                store_type: op_store_type.clone(),
543            }
544        })?;
545        Ok(op_store_factory(settings, store_path, root_data)?)
546    }
547
548    pub fn add_op_heads_store(&mut self, name: &str, factory: OpHeadsStoreFactory) {
549        self.op_heads_store_factories
550            .insert(name.to_string(), factory);
551    }
552
553    pub fn load_op_heads_store(
554        &self,
555        settings: &UserSettings,
556        store_path: &Path,
557    ) -> Result<Box<dyn OpHeadsStore>, StoreLoadError> {
558        let op_heads_store_type = read_store_type("operation heads", store_path.join("type"))?;
559        let op_heads_store_factory = self
560            .op_heads_store_factories
561            .get(&op_heads_store_type)
562            .ok_or_else(|| StoreLoadError::UnsupportedType {
563                store: "operation heads",
564                store_type: op_heads_store_type.clone(),
565            })?;
566        Ok(op_heads_store_factory(settings, store_path)?)
567    }
568
569    pub fn add_index_store(&mut self, name: &str, factory: IndexStoreFactory) {
570        self.index_store_factories.insert(name.to_string(), factory);
571    }
572
573    pub fn load_index_store(
574        &self,
575        settings: &UserSettings,
576        store_path: &Path,
577    ) -> Result<Box<dyn IndexStore>, StoreLoadError> {
578        let index_store_type = read_store_type("index", store_path.join("type"))?;
579        let index_store_factory = self
580            .index_store_factories
581            .get(&index_store_type)
582            .ok_or_else(|| StoreLoadError::UnsupportedType {
583                store: "index",
584                store_type: index_store_type.clone(),
585            })?;
586        Ok(index_store_factory(settings, store_path)?)
587    }
588
589    pub fn add_submodule_store(&mut self, name: &str, factory: SubmoduleStoreFactory) {
590        self.submodule_store_factories
591            .insert(name.to_string(), factory);
592    }
593
594    pub fn load_submodule_store(
595        &self,
596        settings: &UserSettings,
597        store_path: &Path,
598    ) -> Result<Box<dyn SubmoduleStore>, StoreLoadError> {
599        let submodule_store_type = read_store_type("submodule_store", store_path.join("type"))?;
600        let submodule_store_factory = self
601            .submodule_store_factories
602            .get(&submodule_store_type)
603            .ok_or_else(|| StoreLoadError::UnsupportedType {
604                store: "submodule_store",
605                store_type: submodule_store_type.clone(),
606            })?;
607
608        Ok(submodule_store_factory(settings, store_path)?)
609    }
610}
611
612pub fn read_store_type(
613    store: &'static str,
614    path: impl AsRef<Path>,
615) -> Result<String, StoreLoadError> {
616    let path = path.as_ref();
617    fs::read_to_string(path)
618        .context(path)
619        .map_err(|source| StoreLoadError::ReadError { store, source })
620}
621
622#[derive(Debug, Error)]
623pub enum RepoLoaderError {
624    #[error(transparent)]
625    Backend(#[from] BackendError),
626    #[error(transparent)]
627    IndexRead(#[from] IndexReadError),
628    #[error(transparent)]
629    OpHeadResolution(#[from] OpHeadResolutionError),
630    #[error(transparent)]
631    OpHeadsStoreError(#[from] OpHeadsStoreError),
632    #[error(transparent)]
633    OpStore(#[from] OpStoreError),
634    #[error(transparent)]
635    TransactionCommit(#[from] TransactionCommitError),
636}
637
638/// Helps create `ReadonlyRepo` instances of a repo at the head operation or at
639/// a given operation.
640#[derive(Clone)]
641pub struct RepoLoader {
642    settings: UserSettings,
643    store: Arc<Store>,
644    op_store: Arc<dyn OpStore>,
645    op_heads_store: Arc<dyn OpHeadsStore>,
646    index_store: Arc<dyn IndexStore>,
647    submodule_store: Arc<dyn SubmoduleStore>,
648}
649
650impl RepoLoader {
651    pub fn new(
652        settings: UserSettings,
653        store: Arc<Store>,
654        op_store: Arc<dyn OpStore>,
655        op_heads_store: Arc<dyn OpHeadsStore>,
656        index_store: Arc<dyn IndexStore>,
657        submodule_store: Arc<dyn SubmoduleStore>,
658    ) -> Self {
659        Self {
660            settings,
661            store,
662            op_store,
663            op_heads_store,
664            index_store,
665            submodule_store,
666        }
667    }
668
669    /// Creates a `RepoLoader` for the repo at `repo_path` by reading the
670    /// various `.jj/repo/<backend>/type` files and loading the right
671    /// backends from `store_factories`.
672    pub fn init_from_file_system(
673        settings: &UserSettings,
674        repo_path: &Path,
675        store_factories: &StoreFactories,
676    ) -> Result<Self, StoreLoadError> {
677        let store = Store::new(
678            store_factories.load_backend(settings, &repo_path.join("store"))?,
679            Signer::from_settings(settings)?,
680        );
681        let root_op_data = RootOperationData {
682            root_commit_id: store.root_commit_id().clone(),
683        };
684        let op_store = Arc::from(store_factories.load_op_store(
685            settings,
686            &repo_path.join("op_store"),
687            root_op_data,
688        )?);
689        let op_heads_store =
690            Arc::from(store_factories.load_op_heads_store(settings, &repo_path.join("op_heads"))?);
691        let index_store =
692            Arc::from(store_factories.load_index_store(settings, &repo_path.join("index"))?);
693        let submodule_store = Arc::from(
694            store_factories.load_submodule_store(settings, &repo_path.join("submodule_store"))?,
695        );
696        Ok(Self {
697            settings: settings.clone(),
698            store,
699            op_store,
700            op_heads_store,
701            index_store,
702            submodule_store,
703        })
704    }
705
706    pub fn settings(&self) -> &UserSettings {
707        &self.settings
708    }
709
710    pub fn store(&self) -> &Arc<Store> {
711        &self.store
712    }
713
714    pub fn index_store(&self) -> &Arc<dyn IndexStore> {
715        &self.index_store
716    }
717
718    pub fn op_store(&self) -> &Arc<dyn OpStore> {
719        &self.op_store
720    }
721
722    pub fn op_heads_store(&self) -> &Arc<dyn OpHeadsStore> {
723        &self.op_heads_store
724    }
725
726    pub fn submodule_store(&self) -> &Arc<dyn SubmoduleStore> {
727        &self.submodule_store
728    }
729
730    pub fn load_at_head(&self) -> Result<Arc<ReadonlyRepo>, RepoLoaderError> {
731        let op = op_heads_store::resolve_op_heads(
732            self.op_heads_store.as_ref(),
733            &self.op_store,
734            |op_heads| self._resolve_op_heads(op_heads),
735        )?;
736        let view = op.view()?;
737        self._finish_load(op, view)
738    }
739
740    #[instrument(skip(self))]
741    pub fn load_at(&self, op: &Operation) -> Result<Arc<ReadonlyRepo>, RepoLoaderError> {
742        let view = op.view()?;
743        self._finish_load(op.clone(), view)
744    }
745
746    pub fn create_from(
747        &self,
748        operation: Operation,
749        view: View,
750        index: Box<dyn ReadonlyIndex>,
751    ) -> Arc<ReadonlyRepo> {
752        let repo = ReadonlyRepo {
753            loader: self.clone(),
754            operation,
755            index,
756            change_id_index: OnceCell::new(),
757            view,
758        };
759        Arc::new(repo)
760    }
761
762    // If we add a higher-level abstraction of OpStore, root_operation() and
763    // load_operation() will be moved there.
764
765    /// Returns the root operation.
766    pub fn root_operation(&self) -> Operation {
767        self.load_operation(self.op_store.root_operation_id())
768            .expect("failed to read root operation")
769    }
770
771    /// Loads the specified operation from the operation store.
772    pub fn load_operation(&self, id: &OperationId) -> OpStoreResult<Operation> {
773        let data = self.op_store.read_operation(id)?;
774        Ok(Operation::new(self.op_store.clone(), id.clone(), data))
775    }
776
777    /// Merges the given `operations` into a single operation. Returns the root
778    /// operation if the `operations` is empty.
779    pub fn merge_operations(
780        &self,
781        operations: Vec<Operation>,
782        tx_description: Option<&str>,
783    ) -> Result<Operation, RepoLoaderError> {
784        let num_operations = operations.len();
785        let mut operations = operations.into_iter();
786        let Some(base_op) = operations.next() else {
787            return Ok(self.root_operation());
788        };
789        let final_op = if num_operations > 1 {
790            let base_repo = self.load_at(&base_op)?;
791            let mut tx = base_repo.start_transaction();
792            for other_op in operations {
793                tx.merge_operation(other_op)?;
794                tx.repo_mut().rebase_descendants()?;
795            }
796            let tx_description = tx_description.map_or_else(
797                || format!("merge {num_operations} operations"),
798                |tx_description| tx_description.to_string(),
799            );
800            let merged_repo = tx.write(tx_description)?.leave_unpublished();
801            merged_repo.operation().clone()
802        } else {
803            base_op
804        };
805
806        Ok(final_op)
807    }
808
809    fn _resolve_op_heads(&self, op_heads: Vec<Operation>) -> Result<Operation, RepoLoaderError> {
810        assert!(!op_heads.is_empty());
811        self.merge_operations(op_heads, Some("reconcile divergent operations"))
812    }
813
814    fn _finish_load(
815        &self,
816        operation: Operation,
817        view: View,
818    ) -> Result<Arc<ReadonlyRepo>, RepoLoaderError> {
819        let index = self.index_store.get_index_at_op(&operation, &self.store)?;
820        let repo = ReadonlyRepo {
821            loader: self.clone(),
822            operation,
823            index,
824            change_id_index: OnceCell::new(),
825            view,
826        };
827        Ok(Arc::new(repo))
828    }
829}
830
831#[derive(Clone, Debug, PartialEq, Eq)]
832enum Rewrite {
833    /// The old commit was rewritten as this new commit. Children should be
834    /// rebased onto the new commit.
835    Rewritten(CommitId),
836    /// The old commit was rewritten as multiple other commits. Children should
837    /// not be rebased.
838    Divergent(Vec<CommitId>),
839    /// The old commit was abandoned. Children should be rebased onto the given
840    /// commits (typically the parents of the old commit).
841    Abandoned(Vec<CommitId>),
842}
843
844impl Rewrite {
845    fn new_parent_ids(&self) -> &[CommitId] {
846        match self {
847            Self::Rewritten(new_parent_id) => std::slice::from_ref(new_parent_id),
848            Self::Divergent(new_parent_ids) => new_parent_ids.as_slice(),
849            Self::Abandoned(new_parent_ids) => new_parent_ids.as_slice(),
850        }
851    }
852}
853
854pub struct MutableRepo {
855    base_repo: Arc<ReadonlyRepo>,
856    index: Box<dyn MutableIndex>,
857    view: DirtyCell<View>,
858    /// Mapping from new commit to its predecessors.
859    ///
860    /// This is similar to (the reverse of) `parent_mapping`, but
861    /// `commit_predecessors` will never be cleared on `rebase_descendants()`.
862    commit_predecessors: BTreeMap<CommitId, Vec<CommitId>>,
863    // The commit identified by the key has been replaced by all the ones in the value.
864    // * Bookmarks pointing to the old commit should be updated to the new commit, resulting in a
865    //   conflict if there multiple new commits.
866    // * Children of the old commit should be rebased onto the new commits. However, if the type is
867    //   `Divergent`, they should be left in place.
868    // * Working copies pointing to the old commit should be updated to the first of the new
869    //   commits. However, if the type is `Abandoned`, a new working-copy commit should be created
870    //   on top of all of the new commits instead.
871    parent_mapping: HashMap<CommitId, Rewrite>,
872}
873
874impl MutableRepo {
875    pub fn new(base_repo: Arc<ReadonlyRepo>, index: &dyn ReadonlyIndex, view: &View) -> Self {
876        let mut_view = view.clone();
877        let mut_index = index.start_modification();
878        Self {
879            base_repo,
880            index: mut_index,
881            view: DirtyCell::with_clean(mut_view),
882            commit_predecessors: Default::default(),
883            parent_mapping: Default::default(),
884        }
885    }
886
887    pub fn base_repo(&self) -> &Arc<ReadonlyRepo> {
888        &self.base_repo
889    }
890
891    fn view_mut(&mut self) -> &mut View {
892        self.view.get_mut()
893    }
894
895    pub fn mutable_index(&self) -> &dyn MutableIndex {
896        self.index.as_ref()
897    }
898
899    pub(crate) fn is_backed_by_default_index(&self) -> bool {
900        self.index.as_any().is::<DefaultMutableIndex>()
901    }
902
903    pub fn has_changes(&self) -> bool {
904        self.view.ensure_clean(|v| self.enforce_view_invariants(v));
905        !(self.commit_predecessors.is_empty()
906            && self.parent_mapping.is_empty()
907            && self.view() == &self.base_repo.view)
908    }
909
910    pub(crate) fn consume(
911        self,
912    ) -> (
913        Box<dyn MutableIndex>,
914        View,
915        BTreeMap<CommitId, Vec<CommitId>>,
916    ) {
917        self.view.ensure_clean(|v| self.enforce_view_invariants(v));
918        (self.index, self.view.into_inner(), self.commit_predecessors)
919    }
920
921    /// Returns a [`CommitBuilder`] to write new commit to the repo.
922    pub fn new_commit(
923        &mut self,
924        parents: Vec<CommitId>,
925        tree_id: MergedTreeId,
926    ) -> CommitBuilder<'_> {
927        let settings = self.base_repo.settings();
928        DetachedCommitBuilder::for_new_commit(self, settings, parents, tree_id).attach(self)
929    }
930
931    /// Returns a [`CommitBuilder`] to rewrite an existing commit in the repo.
932    pub fn rewrite_commit(&mut self, predecessor: &Commit) -> CommitBuilder<'_> {
933        let settings = self.base_repo.settings();
934        DetachedCommitBuilder::for_rewrite_from(self, settings, predecessor).attach(self)
935        // CommitBuilder::write will record the rewrite in
936        // `self.rewritten_commits`
937    }
938
939    pub(crate) fn set_predecessors(&mut self, id: CommitId, predecessors: Vec<CommitId>) {
940        self.commit_predecessors.insert(id, predecessors);
941    }
942
943    /// Record a commit as having been rewritten to another commit in this
944    /// transaction.
945    ///
946    /// This record is used by `rebase_descendants` to know which commits have
947    /// children that need to be rebased, and where to rebase them to. See the
948    /// docstring for `record_rewritten_commit` for details.
949    pub fn set_rewritten_commit(&mut self, old_id: CommitId, new_id: CommitId) {
950        assert_ne!(old_id, *self.store().root_commit_id());
951        self.parent_mapping
952            .insert(old_id, Rewrite::Rewritten(new_id));
953    }
954
955    /// Record a commit as being rewritten into multiple other commits in this
956    /// transaction.
957    ///
958    /// A later call to `rebase_descendants()` will update bookmarks pointing to
959    /// `old_id` be conflicted and pointing to all pf `new_ids`. Working copies
960    /// pointing to `old_id` will be updated to point to the first commit in
961    /// `new_ids``. Descendants of `old_id` will be left alone.
962    pub fn set_divergent_rewrite(
963        &mut self,
964        old_id: CommitId,
965        new_ids: impl IntoIterator<Item = CommitId>,
966    ) {
967        assert_ne!(old_id, *self.store().root_commit_id());
968        self.parent_mapping.insert(
969            old_id.clone(),
970            Rewrite::Divergent(new_ids.into_iter().collect()),
971        );
972    }
973
974    /// Record a commit as having been abandoned in this transaction.
975    ///
976    /// This record is used by `rebase_descendants` to know which commits have
977    /// children that need to be rebased, and where to rebase the children to.
978    ///
979    /// The `rebase_descendants` logic will rebase the descendants of the old
980    /// commit to become the descendants of parent(s) of the old commit. Any
981    /// bookmarks at the old commit will be either moved to the parent(s) of the
982    /// old commit or deleted depending on [`RewriteRefsOptions`].
983    pub fn record_abandoned_commit(&mut self, old_commit: &Commit) {
984        assert_ne!(old_commit.id(), self.store().root_commit_id());
985        // Descendants should be rebased onto the commit's parents
986        self.record_abandoned_commit_with_parents(
987            old_commit.id().clone(),
988            old_commit.parent_ids().iter().cloned(),
989        );
990    }
991
992    /// Record a commit as having been abandoned in this transaction.
993    ///
994    /// A later `rebase_descendants()` will rebase children of `old_id` onto
995    /// `new_parent_ids`. A working copy pointing to `old_id` will point to a
996    /// new commit on top of `new_parent_ids`.
997    pub fn record_abandoned_commit_with_parents(
998        &mut self,
999        old_id: CommitId,
1000        new_parent_ids: impl IntoIterator<Item = CommitId>,
1001    ) {
1002        assert_ne!(old_id, *self.store().root_commit_id());
1003        self.parent_mapping.insert(
1004            old_id,
1005            Rewrite::Abandoned(new_parent_ids.into_iter().collect()),
1006        );
1007    }
1008
1009    pub fn has_rewrites(&self) -> bool {
1010        !self.parent_mapping.is_empty()
1011    }
1012
1013    /// Calculates new parents for a commit that's currently based on the given
1014    /// parents. It does that by considering how previous commits have been
1015    /// rewritten and abandoned.
1016    ///
1017    /// If `parent_mapping` contains cycles, this function may either panic or
1018    /// drop parents that caused cycles.
1019    pub fn new_parents(&self, old_ids: &[CommitId]) -> Vec<CommitId> {
1020        self.rewritten_ids_with(old_ids, |rewrite| !matches!(rewrite, Rewrite::Divergent(_)))
1021    }
1022
1023    fn rewritten_ids_with(
1024        &self,
1025        old_ids: &[CommitId],
1026        mut predicate: impl FnMut(&Rewrite) -> bool,
1027    ) -> Vec<CommitId> {
1028        assert!(!old_ids.is_empty());
1029        let mut new_ids = Vec::with_capacity(old_ids.len());
1030        let mut to_visit = old_ids.iter().rev().collect_vec();
1031        let mut visited = HashSet::new();
1032        while let Some(id) = to_visit.pop() {
1033            if !visited.insert(id) {
1034                continue;
1035            }
1036            match self.parent_mapping.get(id).filter(|&v| predicate(v)) {
1037                None => {
1038                    new_ids.push(id.clone());
1039                }
1040                Some(rewrite) => {
1041                    let replacements = rewrite.new_parent_ids();
1042                    assert!(
1043                        // Each commit must have a parent, so a parent can
1044                        // not just be mapped to nothing. This assertion
1045                        // could be removed if this function is used for
1046                        // mapping something other than a commit's parents.
1047                        !replacements.is_empty(),
1048                        "Found empty value for key {id:?} in the parent mapping",
1049                    );
1050                    to_visit.extend(replacements.iter().rev());
1051                }
1052            }
1053        }
1054        assert!(
1055            !new_ids.is_empty(),
1056            "new ids become empty because of cycle in the parent mapping"
1057        );
1058        debug_assert!(new_ids.iter().all_unique());
1059        new_ids
1060    }
1061
1062    /// Fully resolves transitive replacements in `parent_mapping`.
1063    ///
1064    /// If `parent_mapping` contains cycles, this function will panic.
1065    fn resolve_rewrite_mapping_with(
1066        &self,
1067        mut predicate: impl FnMut(&Rewrite) -> bool,
1068    ) -> HashMap<CommitId, Vec<CommitId>> {
1069        let sorted_ids = dag_walk::topo_order_forward(
1070            self.parent_mapping.keys(),
1071            |&id| id,
1072            |&id| match self.parent_mapping.get(id).filter(|&v| predicate(v)) {
1073                None => &[],
1074                Some(rewrite) => rewrite.new_parent_ids(),
1075            },
1076        );
1077        let mut new_mapping: HashMap<CommitId, Vec<CommitId>> = HashMap::new();
1078        for old_id in sorted_ids {
1079            let Some(rewrite) = self.parent_mapping.get(old_id).filter(|&v| predicate(v)) else {
1080                continue;
1081            };
1082            let lookup = |id| new_mapping.get(id).map_or(slice::from_ref(id), |ids| ids);
1083            let new_ids = match rewrite.new_parent_ids() {
1084                [id] => lookup(id).to_vec(), // unique() not needed
1085                ids => ids.iter().flat_map(lookup).unique().cloned().collect(),
1086            };
1087            debug_assert_eq!(
1088                new_ids,
1089                self.rewritten_ids_with(slice::from_ref(old_id), &mut predicate)
1090            );
1091            new_mapping.insert(old_id.clone(), new_ids);
1092        }
1093        new_mapping
1094    }
1095
1096    /// Updates bookmarks, working copies, and anonymous heads after rewriting
1097    /// and/or abandoning commits.
1098    pub fn update_rewritten_references(
1099        &mut self,
1100        options: &RewriteRefsOptions,
1101    ) -> BackendResult<()> {
1102        self.update_all_references(options)?;
1103        self.update_heads()
1104            .map_err(|err| err.into_backend_error())?;
1105        Ok(())
1106    }
1107
1108    fn update_all_references(&mut self, options: &RewriteRefsOptions) -> BackendResult<()> {
1109        let rewrite_mapping = self.resolve_rewrite_mapping_with(|_| true);
1110        self.update_local_bookmarks(&rewrite_mapping, options);
1111        self.update_wc_commits(&rewrite_mapping)?;
1112        Ok(())
1113    }
1114
1115    fn update_local_bookmarks(
1116        &mut self,
1117        rewrite_mapping: &HashMap<CommitId, Vec<CommitId>>,
1118        options: &RewriteRefsOptions,
1119    ) {
1120        let changed_branches = self
1121            .view()
1122            .local_bookmarks()
1123            .flat_map(|(name, target)| {
1124                target.added_ids().filter_map(|id| {
1125                    let change = rewrite_mapping.get_key_value(id)?;
1126                    Some((name.to_owned(), change))
1127                })
1128            })
1129            .collect_vec();
1130        for (bookmark_name, (old_commit_id, new_commit_ids)) in changed_branches {
1131            let should_delete = options.delete_abandoned_bookmarks
1132                && matches!(
1133                    self.parent_mapping.get(old_commit_id),
1134                    Some(Rewrite::Abandoned(_))
1135                );
1136            let old_target = RefTarget::normal(old_commit_id.clone());
1137            let new_target = if should_delete {
1138                RefTarget::absent()
1139            } else {
1140                let ids = itertools::intersperse(new_commit_ids, old_commit_id)
1141                    .map(|id| Some(id.clone()));
1142                RefTarget::from_merge(MergeBuilder::from_iter(ids).build())
1143            };
1144
1145            self.merge_local_bookmark(&bookmark_name, &old_target, &new_target);
1146        }
1147    }
1148
1149    fn update_wc_commits(
1150        &mut self,
1151        rewrite_mapping: &HashMap<CommitId, Vec<CommitId>>,
1152    ) -> BackendResult<()> {
1153        let changed_wc_commits = self
1154            .view()
1155            .wc_commit_ids()
1156            .iter()
1157            .filter_map(|(name, commit_id)| {
1158                let change = rewrite_mapping.get_key_value(commit_id)?;
1159                Some((name.to_owned(), change))
1160            })
1161            .collect_vec();
1162        let mut recreated_wc_commits: HashMap<&CommitId, Commit> = HashMap::new();
1163        for (name, (old_commit_id, new_commit_ids)) in changed_wc_commits {
1164            let abandoned_old_commit = matches!(
1165                self.parent_mapping.get(old_commit_id),
1166                Some(Rewrite::Abandoned(_))
1167            );
1168            let new_wc_commit = if !abandoned_old_commit {
1169                // We arbitrarily pick a new working-copy commit among the candidates.
1170                self.store().get_commit(&new_commit_ids[0])?
1171            } else if let Some(commit) = recreated_wc_commits.get(old_commit_id) {
1172                commit.clone()
1173            } else {
1174                let new_commits: Vec<_> = new_commit_ids
1175                    .iter()
1176                    .map(|id| self.store().get_commit(id))
1177                    .try_collect()?;
1178                let merged_parents_tree = merge_commit_trees(self, &new_commits).block_on()?;
1179                let commit = self
1180                    .new_commit(new_commit_ids.clone(), merged_parents_tree.id().clone())
1181                    .write()?;
1182                recreated_wc_commits.insert(old_commit_id, commit.clone());
1183                commit
1184            };
1185            self.edit(name, &new_wc_commit).map_err(|err| match err {
1186                EditCommitError::BackendError(backend_error) => backend_error,
1187                EditCommitError::WorkingCopyCommitNotFound(_)
1188                | EditCommitError::RewriteRootCommit(_) => panic!("unexpected error: {err:?}"),
1189            })?;
1190        }
1191        Ok(())
1192    }
1193
1194    fn update_heads(&mut self) -> Result<(), RevsetEvaluationError> {
1195        let old_commits_expression =
1196            RevsetExpression::commits(self.parent_mapping.keys().cloned().collect());
1197        let heads_to_add_expression = old_commits_expression
1198            .parents()
1199            .minus(&old_commits_expression);
1200        let heads_to_add: Vec<_> = heads_to_add_expression
1201            .evaluate(self)?
1202            .iter()
1203            .try_collect()?;
1204
1205        let mut view = self.view().store_view().clone();
1206        for commit_id in self.parent_mapping.keys() {
1207            view.head_ids.remove(commit_id);
1208        }
1209        view.head_ids.extend(heads_to_add);
1210        self.set_view(view);
1211        Ok(())
1212    }
1213
1214    /// Find descendants of `root`, unless they've already been rewritten
1215    /// (according to `parent_mapping`).
1216    pub fn find_descendants_for_rebase(&self, roots: Vec<CommitId>) -> BackendResult<Vec<Commit>> {
1217        let to_visit_revset = RevsetExpression::commits(roots)
1218            .descendants()
1219            .minus(&RevsetExpression::commits(
1220                self.parent_mapping.keys().cloned().collect(),
1221            ))
1222            .evaluate(self)
1223            .map_err(|err| err.into_backend_error())?;
1224        let to_visit = to_visit_revset
1225            .iter()
1226            .commits(self.store())
1227            .try_collect()
1228            .map_err(|err| err.into_backend_error())?;
1229        Ok(to_visit)
1230    }
1231
1232    /// Order a set of commits in an order they should be rebased in. The result
1233    /// is in reverse order so the next value can be removed from the end.
1234    fn order_commits_for_rebase(
1235        &self,
1236        to_visit: Vec<Commit>,
1237        new_parents_map: &HashMap<CommitId, Vec<CommitId>>,
1238    ) -> BackendResult<Vec<Commit>> {
1239        let to_visit_set: HashSet<CommitId> =
1240            to_visit.iter().map(|commit| commit.id().clone()).collect();
1241        let mut visited = HashSet::new();
1242        // Calculate an order where we rebase parents first, but if the parents were
1243        // rewritten, make sure we rebase the rewritten parent first.
1244        let store = self.store();
1245        dag_walk::topo_order_reverse_ok(
1246            to_visit.into_iter().map(Ok),
1247            |commit| commit.id().clone(),
1248            |commit| -> Vec<BackendResult<Commit>> {
1249                visited.insert(commit.id().clone());
1250                let mut dependents = vec![];
1251                let parent_ids = new_parents_map
1252                    .get(commit.id())
1253                    .map_or(commit.parent_ids(), |parent_ids| parent_ids);
1254                for parent_id in parent_ids {
1255                    let parent = store.get_commit(parent_id);
1256                    let Ok(parent) = parent else {
1257                        dependents.push(parent);
1258                        continue;
1259                    };
1260                    if let Some(rewrite) = self.parent_mapping.get(parent.id()) {
1261                        for target in rewrite.new_parent_ids() {
1262                            if to_visit_set.contains(target) && !visited.contains(target) {
1263                                dependents.push(store.get_commit(target));
1264                            }
1265                        }
1266                    }
1267                    if to_visit_set.contains(parent.id()) {
1268                        dependents.push(Ok(parent));
1269                    }
1270                }
1271                dependents
1272            },
1273            |_| panic!("graph has cycle"),
1274        )
1275    }
1276
1277    /// Rewrite descendants of the given roots.
1278    ///
1279    /// The callback will be called for each commit with the new parents
1280    /// prepopulated. The callback may change the parents and write the new
1281    /// commit, or it may abandon the commit, or it may leave the old commit
1282    /// unchanged.
1283    ///
1284    /// The set of commits to visit is determined at the start. If the callback
1285    /// adds new descendants, then the callback will not be called for those.
1286    /// Similarly, if the callback rewrites unrelated commits, then the callback
1287    /// will not be called for descendants of those commits.
1288    pub fn transform_descendants(
1289        &mut self,
1290        roots: Vec<CommitId>,
1291        callback: impl AsyncFnMut(CommitRewriter) -> BackendResult<()>,
1292    ) -> BackendResult<()> {
1293        let options = RewriteRefsOptions::default();
1294        self.transform_descendants_with_options(roots, &HashMap::new(), &options, callback)
1295    }
1296
1297    /// Rewrite descendants of the given roots with options.
1298    ///
1299    /// If a commit is in the `new_parents_map` is provided, it will be rebased
1300    /// onto the new parents provided in the map instead of its original
1301    /// parents.
1302    ///
1303    /// See [`Self::transform_descendants()`] for details.
1304    pub fn transform_descendants_with_options(
1305        &mut self,
1306        roots: Vec<CommitId>,
1307        new_parents_map: &HashMap<CommitId, Vec<CommitId>>,
1308        options: &RewriteRefsOptions,
1309        callback: impl AsyncFnMut(CommitRewriter) -> BackendResult<()>,
1310    ) -> BackendResult<()> {
1311        let descendants = self.find_descendants_for_rebase(roots)?;
1312        self.transform_commits(descendants, new_parents_map, options, callback)
1313    }
1314
1315    /// Rewrite the given commits in reverse topological order.
1316    ///
1317    /// `commits` should be a connected range.
1318    ///
1319    /// This function is similar to
1320    /// [`Self::transform_descendants_with_options()`], but only rewrites the
1321    /// `commits` provided, and does not rewrite their descendants.
1322    pub fn transform_commits(
1323        &mut self,
1324        commits: Vec<Commit>,
1325        new_parents_map: &HashMap<CommitId, Vec<CommitId>>,
1326        options: &RewriteRefsOptions,
1327        mut callback: impl AsyncFnMut(CommitRewriter) -> BackendResult<()>,
1328    ) -> BackendResult<()> {
1329        let mut to_visit = self.order_commits_for_rebase(commits, new_parents_map)?;
1330        while let Some(old_commit) = to_visit.pop() {
1331            let parent_ids = new_parents_map
1332                .get(old_commit.id())
1333                .map_or(old_commit.parent_ids(), |parent_ids| parent_ids);
1334            let new_parent_ids = self.new_parents(parent_ids);
1335            let rewriter = CommitRewriter::new(self, old_commit, new_parent_ids);
1336            callback(rewriter).block_on()?;
1337        }
1338        self.update_rewritten_references(options)?;
1339        // Since we didn't necessarily visit all descendants of rewritten commits (e.g.
1340        // if they were rewritten in the callback), there can still be commits left to
1341        // rebase, so we don't clear `parent_mapping` here.
1342        // TODO: Should we make this stricter? We could check that there were no
1343        // rewrites before this function was called, and we can check that only
1344        // commits in the `to_visit` set were added by the callback. Then we
1345        // could clear `parent_mapping` here and not have to scan it again at
1346        // the end of the transaction when we call `rebase_descendants()`.
1347
1348        Ok(())
1349    }
1350
1351    /// Rebase descendants of the rewritten commits with options and callback.
1352    ///
1353    /// The descendants of the commits registered in `self.parent_mappings` will
1354    /// be recursively rebased onto the new version of their parents.
1355    ///
1356    /// If `options.empty` is the default (`EmptyBehavior::Keep`), all rebased
1357    /// descendant commits will be preserved even if they were emptied following
1358    /// the rebase operation. Otherwise, this function may rebase some commits
1359    /// and abandon others, based on the given `EmptyBehavior`. The behavior is
1360    /// such that only commits with a single parent will ever be abandoned. The
1361    /// parent will inherit the descendants and the bookmarks of the abandoned
1362    /// commit.
1363    ///
1364    /// The `progress` callback will be invoked for each rebase operation with
1365    /// `(old_commit, rebased_commit)` as arguments.
1366    pub fn rebase_descendants_with_options(
1367        &mut self,
1368        options: &RebaseOptions,
1369        mut progress: impl FnMut(Commit, RebasedCommit),
1370    ) -> BackendResult<()> {
1371        let roots = self.parent_mapping.keys().cloned().collect();
1372        self.transform_descendants_with_options(
1373            roots,
1374            &HashMap::new(),
1375            &options.rewrite_refs,
1376            async |rewriter| {
1377                if rewriter.parents_changed() {
1378                    let old_commit = rewriter.old_commit().clone();
1379                    let rebased_commit = rebase_commit_with_options(rewriter, options)?;
1380                    progress(old_commit, rebased_commit);
1381                }
1382                Ok(())
1383            },
1384        )?;
1385        self.parent_mapping.clear();
1386        Ok(())
1387    }
1388
1389    /// Rebase descendants of the rewritten commits.
1390    ///
1391    /// The descendants of the commits registered in `self.parent_mappings` will
1392    /// be recursively rebased onto the new version of their parents.
1393    /// Returns the number of rebased descendants.
1394    ///
1395    /// All rebased descendant commits will be preserved even if they were
1396    /// emptied following the rebase operation. To customize the rebase
1397    /// behavior, use [`MutableRepo::rebase_descendants_with_options`].
1398    pub fn rebase_descendants(&mut self) -> BackendResult<usize> {
1399        let options = RebaseOptions::default();
1400        let mut num_rebased = 0;
1401        self.rebase_descendants_with_options(&options, |_old_commit, _rebased_commit| {
1402            num_rebased += 1;
1403        })?;
1404        Ok(num_rebased)
1405    }
1406
1407    /// Reparent descendants of the rewritten commits.
1408    ///
1409    /// The descendants of the commits registered in `self.parent_mappings` will
1410    /// be recursively reparented onto the new version of their parents.
1411    /// The content of those descendants will remain untouched.
1412    /// Returns the number of reparented descendants.
1413    pub fn reparent_descendants(&mut self) -> BackendResult<usize> {
1414        let roots = self.parent_mapping.keys().cloned().collect_vec();
1415        let mut num_reparented = 0;
1416        self.transform_descendants(roots, async |rewriter| {
1417            if rewriter.parents_changed() {
1418                let builder = rewriter.reparent();
1419                builder.write()?;
1420                num_reparented += 1;
1421            }
1422            Ok(())
1423        })?;
1424        self.parent_mapping.clear();
1425        Ok(num_reparented)
1426    }
1427
1428    pub fn set_wc_commit(
1429        &mut self,
1430        name: WorkspaceNameBuf,
1431        commit_id: CommitId,
1432    ) -> Result<(), RewriteRootCommit> {
1433        if &commit_id == self.store().root_commit_id() {
1434            return Err(RewriteRootCommit);
1435        }
1436        self.view_mut().set_wc_commit(name, commit_id);
1437        Ok(())
1438    }
1439
1440    pub fn remove_wc_commit(&mut self, name: &WorkspaceName) -> Result<(), EditCommitError> {
1441        self.maybe_abandon_wc_commit(name)?;
1442        self.view_mut().remove_wc_commit(name);
1443        Ok(())
1444    }
1445
1446    /// Merges working-copy commit. If there's a conflict, and if the workspace
1447    /// isn't removed at either side, we keep the self side.
1448    fn merge_wc_commit(
1449        &mut self,
1450        name: &WorkspaceName,
1451        base_id: Option<&CommitId>,
1452        other_id: Option<&CommitId>,
1453    ) {
1454        let view = self.view.get_mut();
1455        let self_id = view.get_wc_commit_id(name);
1456        // Not using merge_ref_targets(). Since the working-copy pointer moves
1457        // towards random direction, it doesn't make sense to resolve conflict
1458        // based on ancestry.
1459        let new_id = if let Some(resolved) = trivial_merge(&[self_id, base_id, other_id]) {
1460            resolved.cloned()
1461        } else if self_id.is_none() || other_id.is_none() {
1462            // We want to remove the workspace even if the self side changed the
1463            // working-copy commit.
1464            None
1465        } else {
1466            self_id.cloned()
1467        };
1468        match new_id {
1469            Some(id) => view.set_wc_commit(name.to_owned(), id),
1470            None => view.remove_wc_commit(name),
1471        }
1472    }
1473
1474    pub fn rename_workspace(
1475        &mut self,
1476        old_name: &WorkspaceName,
1477        new_name: WorkspaceNameBuf,
1478    ) -> Result<(), RenameWorkspaceError> {
1479        self.view_mut().rename_workspace(old_name, new_name)
1480    }
1481
1482    pub fn check_out(
1483        &mut self,
1484        name: WorkspaceNameBuf,
1485        commit: &Commit,
1486    ) -> Result<Commit, CheckOutCommitError> {
1487        let wc_commit = self
1488            .new_commit(vec![commit.id().clone()], commit.tree_id().clone())
1489            .write()?;
1490        self.edit(name, &wc_commit)?;
1491        Ok(wc_commit)
1492    }
1493
1494    pub fn edit(&mut self, name: WorkspaceNameBuf, commit: &Commit) -> Result<(), EditCommitError> {
1495        self.maybe_abandon_wc_commit(&name)?;
1496        self.add_head(commit)?;
1497        Ok(self.set_wc_commit(name, commit.id().clone())?)
1498    }
1499
1500    fn maybe_abandon_wc_commit(
1501        &mut self,
1502        workspace_name: &WorkspaceName,
1503    ) -> Result<(), EditCommitError> {
1504        let is_commit_referenced = |view: &View, commit_id: &CommitId| -> bool {
1505            view.wc_commit_ids()
1506                .iter()
1507                .filter(|&(name, _)| name != workspace_name)
1508                .map(|(_, wc_id)| wc_id)
1509                .chain(
1510                    view.local_bookmarks()
1511                        .flat_map(|(_, target)| target.added_ids()),
1512                )
1513                .any(|id| id == commit_id)
1514        };
1515
1516        let maybe_wc_commit_id = self
1517            .view
1518            .with_ref(|v| v.get_wc_commit_id(workspace_name).cloned());
1519        if let Some(wc_commit_id) = maybe_wc_commit_id {
1520            let wc_commit = self
1521                .store()
1522                .get_commit(&wc_commit_id)
1523                .map_err(EditCommitError::WorkingCopyCommitNotFound)?;
1524            if wc_commit.is_discardable(self)?
1525                && self
1526                    .view
1527                    .with_ref(|v| !is_commit_referenced(v, wc_commit.id()))
1528                && self.view().heads().contains(wc_commit.id())
1529            {
1530                // Abandon the working-copy commit we're leaving if it's
1531                // discardable, not pointed by local bookmark or other working
1532                // copies, and a head commit.
1533                self.record_abandoned_commit(&wc_commit);
1534            }
1535        }
1536
1537        Ok(())
1538    }
1539
1540    fn enforce_view_invariants(&self, view: &mut View) {
1541        let view = view.store_view_mut();
1542        let root_commit_id = self.store().root_commit_id();
1543        if view.head_ids.is_empty() {
1544            view.head_ids.insert(root_commit_id.clone());
1545        } else if view.head_ids.len() > 1 {
1546            // An empty head_ids set is padded with the root_commit_id, but the
1547            // root id is unwanted during the heads resolution.
1548            view.head_ids.remove(root_commit_id);
1549            // It is unclear if `heads` can never fail for default implementation,
1550            // but it can definitely fail for non-default implementations.
1551            // TODO: propagate errors.
1552            view.head_ids = self
1553                .index()
1554                .heads(&mut view.head_ids.iter())
1555                .unwrap()
1556                .into_iter()
1557                .collect();
1558        }
1559        assert!(!view.head_ids.is_empty());
1560    }
1561
1562    /// Ensures that the given `head` and ancestor commits are reachable from
1563    /// the visible heads.
1564    pub fn add_head(&mut self, head: &Commit) -> BackendResult<()> {
1565        self.add_heads(slice::from_ref(head))
1566    }
1567
1568    /// Ensures that the given `heads` and ancestor commits are reachable from
1569    /// the visible heads.
1570    ///
1571    /// The `heads` may contain redundant commits such as already visible ones
1572    /// and ancestors of the other heads. The `heads` and ancestor commits
1573    /// should exist in the store.
1574    pub fn add_heads(&mut self, heads: &[Commit]) -> BackendResult<()> {
1575        let current_heads = self.view.get_mut().heads();
1576        // Use incremental update for common case of adding a single commit on top a
1577        // current head. TODO: Also use incremental update when adding a single
1578        // commit on top a non-head.
1579        match heads {
1580            [] => {}
1581            [head]
1582                if head
1583                    .parent_ids()
1584                    .iter()
1585                    .all(|parent_id| current_heads.contains(parent_id)) =>
1586            {
1587                self.index
1588                    .add_commit(head)
1589                    // TODO: indexing error shouldn't be a "BackendError"
1590                    .map_err(|err| BackendError::Other(err.into()))?;
1591                self.view.get_mut().add_head(head.id());
1592                for parent_id in head.parent_ids() {
1593                    self.view.get_mut().remove_head(parent_id);
1594                }
1595            }
1596            _ => {
1597                let missing_commits = dag_walk::topo_order_reverse_ord_ok(
1598                    heads
1599                        .iter()
1600                        .cloned()
1601                        .map(CommitByCommitterTimestamp)
1602                        .map(Ok),
1603                    |CommitByCommitterTimestamp(commit)| commit.id().clone(),
1604                    |CommitByCommitterTimestamp(commit)| {
1605                        commit
1606                            .parent_ids()
1607                            .iter()
1608                            .filter(|id| !self.index().has_id(id))
1609                            .map(|id| self.store().get_commit(id))
1610                            .map_ok(CommitByCommitterTimestamp)
1611                            .collect_vec()
1612                    },
1613                    |_| panic!("graph has cycle"),
1614                )?;
1615                for CommitByCommitterTimestamp(missing_commit) in missing_commits.iter().rev() {
1616                    self.index
1617                        .add_commit(missing_commit)
1618                        // TODO: indexing error shouldn't be a "BackendError"
1619                        .map_err(|err| BackendError::Other(err.into()))?;
1620                }
1621                for head in heads {
1622                    self.view.get_mut().add_head(head.id());
1623                }
1624                self.view.mark_dirty();
1625            }
1626        }
1627        Ok(())
1628    }
1629
1630    pub fn remove_head(&mut self, head: &CommitId) {
1631        self.view_mut().remove_head(head);
1632        self.view.mark_dirty();
1633    }
1634
1635    pub fn get_local_bookmark(&self, name: &RefName) -> RefTarget {
1636        self.view.with_ref(|v| v.get_local_bookmark(name).clone())
1637    }
1638
1639    pub fn set_local_bookmark_target(&mut self, name: &RefName, target: RefTarget) {
1640        let view = self.view_mut();
1641        for id in target.added_ids() {
1642            view.add_head(id);
1643        }
1644        view.set_local_bookmark_target(name, target);
1645        self.view.mark_dirty();
1646    }
1647
1648    pub fn merge_local_bookmark(
1649        &mut self,
1650        name: &RefName,
1651        base_target: &RefTarget,
1652        other_target: &RefTarget,
1653    ) {
1654        let view = self.view.get_mut();
1655        let index = self.index.as_index();
1656        let self_target = view.get_local_bookmark(name);
1657        let new_target = merge_ref_targets(index, self_target, base_target, other_target);
1658        self.set_local_bookmark_target(name, new_target);
1659    }
1660
1661    pub fn get_remote_bookmark(&self, symbol: RemoteRefSymbol<'_>) -> RemoteRef {
1662        self.view
1663            .with_ref(|v| v.get_remote_bookmark(symbol).clone())
1664    }
1665
1666    pub fn set_remote_bookmark(&mut self, symbol: RemoteRefSymbol<'_>, remote_ref: RemoteRef) {
1667        self.view_mut().set_remote_bookmark(symbol, remote_ref);
1668    }
1669
1670    fn merge_remote_bookmark(
1671        &mut self,
1672        symbol: RemoteRefSymbol<'_>,
1673        base_ref: &RemoteRef,
1674        other_ref: &RemoteRef,
1675    ) {
1676        let view = self.view.get_mut();
1677        let index = self.index.as_index();
1678        let self_ref = view.get_remote_bookmark(symbol);
1679        let new_ref = merge_remote_refs(index, self_ref, base_ref, other_ref);
1680        view.set_remote_bookmark(symbol, new_ref);
1681    }
1682
1683    /// Merges the specified remote bookmark in to local bookmark, and starts
1684    /// tracking it.
1685    pub fn track_remote_bookmark(&mut self, symbol: RemoteRefSymbol<'_>) {
1686        let mut remote_ref = self.get_remote_bookmark(symbol);
1687        let base_target = remote_ref.tracked_target();
1688        self.merge_local_bookmark(symbol.name, base_target, &remote_ref.target);
1689        remote_ref.state = RemoteRefState::Tracked;
1690        self.set_remote_bookmark(symbol, remote_ref);
1691    }
1692
1693    /// Stops tracking the specified remote bookmark.
1694    pub fn untrack_remote_bookmark(&mut self, symbol: RemoteRefSymbol<'_>) {
1695        let mut remote_ref = self.get_remote_bookmark(symbol);
1696        remote_ref.state = RemoteRefState::New;
1697        self.set_remote_bookmark(symbol, remote_ref);
1698    }
1699
1700    pub fn remove_remote(&mut self, remote_name: &RemoteName) {
1701        self.view_mut().remove_remote(remote_name);
1702    }
1703
1704    pub fn rename_remote(&mut self, old: &RemoteName, new: &RemoteName) {
1705        self.view_mut().rename_remote(old, new);
1706    }
1707
1708    pub fn get_tag(&self, name: &RefName) -> RefTarget {
1709        self.view.with_ref(|v| v.get_tag(name).clone())
1710    }
1711
1712    pub fn set_tag_target(&mut self, name: &RefName, target: RefTarget) {
1713        self.view_mut().set_tag_target(name, target);
1714    }
1715
1716    pub fn merge_tag(&mut self, name: &RefName, base_target: &RefTarget, other_target: &RefTarget) {
1717        let view = self.view.get_mut();
1718        let index = self.index.as_index();
1719        let self_target = view.get_tag(name);
1720        let new_target = merge_ref_targets(index, self_target, base_target, other_target);
1721        view.set_tag_target(name, new_target);
1722    }
1723
1724    pub fn get_git_ref(&self, name: &GitRefName) -> RefTarget {
1725        self.view.with_ref(|v| v.get_git_ref(name).clone())
1726    }
1727
1728    pub fn set_git_ref_target(&mut self, name: &GitRefName, target: RefTarget) {
1729        self.view_mut().set_git_ref_target(name, target);
1730    }
1731
1732    fn merge_git_ref(
1733        &mut self,
1734        name: &GitRefName,
1735        base_target: &RefTarget,
1736        other_target: &RefTarget,
1737    ) {
1738        let view = self.view.get_mut();
1739        let index = self.index.as_index();
1740        let self_target = view.get_git_ref(name);
1741        let new_target = merge_ref_targets(index, self_target, base_target, other_target);
1742        view.set_git_ref_target(name, new_target);
1743    }
1744
1745    pub fn git_head(&self) -> RefTarget {
1746        self.view.with_ref(|v| v.git_head().clone())
1747    }
1748
1749    pub fn set_git_head_target(&mut self, target: RefTarget) {
1750        self.view_mut().set_git_head_target(target);
1751    }
1752
1753    pub fn set_view(&mut self, data: op_store::View) {
1754        self.view_mut().set_view(data);
1755        self.view.mark_dirty();
1756    }
1757
1758    pub fn merge(
1759        &mut self,
1760        base_repo: &ReadonlyRepo,
1761        other_repo: &ReadonlyRepo,
1762    ) -> BackendResult<()> {
1763        // First, merge the index, so we can take advantage of a valid index when
1764        // merging the view. Merging in base_repo's index isn't typically
1765        // necessary, but it can be if base_repo is ahead of either self or other_repo
1766        // (e.g. because we're undoing an operation that hasn't been published).
1767        self.index.merge_in(base_repo.readonly_index());
1768        self.index.merge_in(other_repo.readonly_index());
1769
1770        self.view.ensure_clean(|v| self.enforce_view_invariants(v));
1771        self.merge_view(&base_repo.view, &other_repo.view)?;
1772        self.view.mark_dirty();
1773        Ok(())
1774    }
1775
1776    pub fn merge_index(&mut self, other_repo: &ReadonlyRepo) {
1777        self.index.merge_in(other_repo.readonly_index());
1778    }
1779
1780    fn merge_view(&mut self, base: &View, other: &View) -> BackendResult<()> {
1781        let changed_wc_commits = diff_named_commit_ids(base.wc_commit_ids(), other.wc_commit_ids());
1782        for (name, (base_id, other_id)) in changed_wc_commits {
1783            self.merge_wc_commit(name, base_id, other_id);
1784        }
1785
1786        let base_heads = base.heads().iter().cloned().collect_vec();
1787        let own_heads = self.view().heads().iter().cloned().collect_vec();
1788        let other_heads = other.heads().iter().cloned().collect_vec();
1789
1790        // HACK: Don't walk long ranges of commits to find rewrites when using other
1791        // custom implementations. The only custom index implementation we're currently
1792        // aware of is Google's. That repo has too high commit rate for it to be
1793        // feasible to walk all added and removed commits.
1794        // TODO: Fix this somehow. Maybe a method on `Index` to find rewritten commits
1795        // given `base_heads`, `own_heads` and `other_heads`?
1796        if self.is_backed_by_default_index() {
1797            self.record_rewrites(&base_heads, &own_heads)?;
1798            self.record_rewrites(&base_heads, &other_heads)?;
1799            // No need to remove heads removed by `other` because we already
1800            // marked them abandoned or rewritten.
1801        } else {
1802            for removed_head in base.heads().difference(other.heads()) {
1803                self.view_mut().remove_head(removed_head);
1804            }
1805        }
1806        for added_head in other.heads().difference(base.heads()) {
1807            self.view_mut().add_head(added_head);
1808        }
1809
1810        let changed_local_bookmarks =
1811            diff_named_ref_targets(base.local_bookmarks(), other.local_bookmarks());
1812        for (name, (base_target, other_target)) in changed_local_bookmarks {
1813            self.merge_local_bookmark(name, base_target, other_target);
1814        }
1815
1816        let changed_tags = diff_named_ref_targets(base.tags(), other.tags());
1817        for (name, (base_target, other_target)) in changed_tags {
1818            self.merge_tag(name, base_target, other_target);
1819        }
1820
1821        let changed_git_refs = diff_named_ref_targets(base.git_refs(), other.git_refs());
1822        for (name, (base_target, other_target)) in changed_git_refs {
1823            self.merge_git_ref(name, base_target, other_target);
1824        }
1825
1826        let changed_remote_bookmarks =
1827            diff_named_remote_refs(base.all_remote_bookmarks(), other.all_remote_bookmarks());
1828        for (symbol, (base_ref, other_ref)) in changed_remote_bookmarks {
1829            self.merge_remote_bookmark(symbol, base_ref, other_ref);
1830        }
1831
1832        let new_git_head_target = merge_ref_targets(
1833            self.index(),
1834            self.view().git_head(),
1835            base.git_head(),
1836            other.git_head(),
1837        );
1838        self.set_git_head_target(new_git_head_target);
1839
1840        Ok(())
1841    }
1842
1843    /// Finds and records commits that were rewritten or abandoned between
1844    /// `old_heads` and `new_heads`.
1845    fn record_rewrites(
1846        &mut self,
1847        old_heads: &[CommitId],
1848        new_heads: &[CommitId],
1849    ) -> BackendResult<()> {
1850        let mut removed_changes: HashMap<ChangeId, Vec<CommitId>> = HashMap::new();
1851        for item in revset::walk_revs(self, old_heads, new_heads)
1852            .map_err(|err| err.into_backend_error())?
1853            .commit_change_ids()
1854        {
1855            let (commit_id, change_id) = item.map_err(|err| err.into_backend_error())?;
1856            removed_changes
1857                .entry(change_id)
1858                .or_default()
1859                .push(commit_id);
1860        }
1861        if removed_changes.is_empty() {
1862            return Ok(());
1863        }
1864
1865        let mut rewritten_changes = HashSet::new();
1866        let mut rewritten_commits: HashMap<CommitId, Vec<CommitId>> = HashMap::new();
1867        for item in revset::walk_revs(self, new_heads, old_heads)
1868            .map_err(|err| err.into_backend_error())?
1869            .commit_change_ids()
1870        {
1871            let (commit_id, change_id) = item.map_err(|err| err.into_backend_error())?;
1872            if let Some(old_commits) = removed_changes.get(&change_id) {
1873                for old_commit in old_commits {
1874                    rewritten_commits
1875                        .entry(old_commit.clone())
1876                        .or_default()
1877                        .push(commit_id.clone());
1878                }
1879            }
1880            rewritten_changes.insert(change_id);
1881        }
1882        for (old_commit, new_commits) in rewritten_commits {
1883            if new_commits.len() == 1 {
1884                self.set_rewritten_commit(
1885                    old_commit.clone(),
1886                    new_commits.into_iter().next().unwrap(),
1887                );
1888            } else {
1889                self.set_divergent_rewrite(old_commit.clone(), new_commits);
1890            }
1891        }
1892
1893        for (change_id, removed_commit_ids) in &removed_changes {
1894            if !rewritten_changes.contains(change_id) {
1895                for id in removed_commit_ids {
1896                    let commit = self.store().get_commit(id)?;
1897                    self.record_abandoned_commit(&commit);
1898                }
1899            }
1900        }
1901
1902        Ok(())
1903    }
1904}
1905
1906impl Repo for MutableRepo {
1907    fn base_repo(&self) -> &ReadonlyRepo {
1908        &self.base_repo
1909    }
1910
1911    fn store(&self) -> &Arc<Store> {
1912        self.base_repo.store()
1913    }
1914
1915    fn op_store(&self) -> &Arc<dyn OpStore> {
1916        self.base_repo.op_store()
1917    }
1918
1919    fn index(&self) -> &dyn Index {
1920        self.index.as_index()
1921    }
1922
1923    fn view(&self) -> &View {
1924        self.view
1925            .get_or_ensure_clean(|v| self.enforce_view_invariants(v))
1926    }
1927
1928    fn submodule_store(&self) -> &Arc<dyn SubmoduleStore> {
1929        self.base_repo.submodule_store()
1930    }
1931
1932    fn resolve_change_id_prefix(&self, prefix: &HexPrefix) -> PrefixResolution<Vec<CommitId>> {
1933        let change_id_index = self.index.change_id_index(&mut self.view().heads().iter());
1934        change_id_index.resolve_prefix(prefix)
1935    }
1936
1937    fn shortest_unique_change_id_prefix_len(&self, target_id: &ChangeId) -> usize {
1938        let change_id_index = self.index.change_id_index(&mut self.view().heads().iter());
1939        change_id_index.shortest_unique_prefix_len(target_id)
1940    }
1941}
1942
1943/// Error from attempts to check out the root commit for editing
1944#[derive(Debug, Error)]
1945#[error("Cannot rewrite the root commit")]
1946pub struct RewriteRootCommit;
1947
1948/// Error from attempts to edit a commit
1949#[derive(Debug, Error)]
1950pub enum EditCommitError {
1951    #[error("Current working-copy commit not found")]
1952    WorkingCopyCommitNotFound(#[source] BackendError),
1953    #[error(transparent)]
1954    RewriteRootCommit(#[from] RewriteRootCommit),
1955    #[error(transparent)]
1956    BackendError(#[from] BackendError),
1957}
1958
1959/// Error from attempts to check out a commit
1960#[derive(Debug, Error)]
1961pub enum CheckOutCommitError {
1962    #[error("Failed to create new working-copy commit")]
1963    CreateCommit(#[from] BackendError),
1964    #[error("Failed to edit commit")]
1965    EditCommit(#[from] EditCommitError),
1966}
1967
1968mod dirty_cell {
1969    use std::cell::OnceCell;
1970    use std::cell::RefCell;
1971
1972    /// Cell that lazily updates the value after `mark_dirty()`.
1973    ///
1974    /// A clean value can be immutably borrowed within the `self` lifetime.
1975    #[derive(Clone, Debug)]
1976    pub struct DirtyCell<T> {
1977        // Either clean or dirty value is set. The value is boxed to reduce stack space
1978        // and memcopy overhead.
1979        clean: OnceCell<Box<T>>,
1980        dirty: RefCell<Option<Box<T>>>,
1981    }
1982
1983    impl<T> DirtyCell<T> {
1984        pub fn with_clean(value: T) -> Self {
1985            Self {
1986                clean: OnceCell::from(Box::new(value)),
1987                dirty: RefCell::new(None),
1988            }
1989        }
1990
1991        pub fn get_or_ensure_clean(&self, f: impl FnOnce(&mut T)) -> &T {
1992            self.clean.get_or_init(|| {
1993                // Panics if ensure_clean() is invoked from with_ref() callback for example.
1994                let mut value = self.dirty.borrow_mut().take().unwrap();
1995                f(&mut value);
1996                value
1997            })
1998        }
1999
2000        pub fn ensure_clean(&self, f: impl FnOnce(&mut T)) {
2001            self.get_or_ensure_clean(f);
2002        }
2003
2004        pub fn into_inner(self) -> T {
2005            *self
2006                .clean
2007                .into_inner()
2008                .or_else(|| self.dirty.into_inner())
2009                .unwrap()
2010        }
2011
2012        pub fn with_ref<R>(&self, f: impl FnOnce(&T) -> R) -> R {
2013            if let Some(value) = self.clean.get() {
2014                f(value)
2015            } else {
2016                f(self.dirty.borrow().as_ref().unwrap())
2017            }
2018        }
2019
2020        pub fn get_mut(&mut self) -> &mut T {
2021            self.clean
2022                .get_mut()
2023                .or_else(|| self.dirty.get_mut().as_mut())
2024                .unwrap()
2025        }
2026
2027        pub fn mark_dirty(&mut self) {
2028            if let Some(value) = self.clean.take() {
2029                *self.dirty.get_mut() = Some(value);
2030            }
2031        }
2032    }
2033}