jj_lib/
repo.rs

1// Copyright 2020 The Jujutsu Authors
2//
3// Licensed under the Apache License, Version 2.0 (the "License");
4// you may not use this file except in compliance with the License.
5// You may obtain a copy of the License at
6//
7// https://www.apache.org/licenses/LICENSE-2.0
8//
9// Unless required by applicable law or agreed to in writing, software
10// distributed under the License is distributed on an "AS IS" BASIS,
11// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12// See the License for the specific language governing permissions and
13// limitations under the License.
14
15#![expect(missing_docs)]
16
17use std::collections::BTreeMap;
18use std::collections::HashMap;
19use std::collections::HashSet;
20use std::collections::hash_map::Entry;
21use std::fmt::Debug;
22use std::fmt::Formatter;
23use std::fs;
24use std::path::Path;
25use std::slice;
26use std::sync::Arc;
27
28use itertools::Itertools as _;
29use once_cell::sync::OnceCell;
30use pollster::FutureExt as _;
31use thiserror::Error;
32use tracing::instrument;
33
34use self::dirty_cell::DirtyCell;
35use crate::backend::Backend;
36use crate::backend::BackendError;
37use crate::backend::BackendInitError;
38use crate::backend::BackendLoadError;
39use crate::backend::BackendResult;
40use crate::backend::ChangeId;
41use crate::backend::CommitId;
42use crate::backend::MergedTreeId;
43use crate::commit::Commit;
44use crate::commit::CommitByCommitterTimestamp;
45use crate::commit_builder::CommitBuilder;
46use crate::commit_builder::DetachedCommitBuilder;
47use crate::dag_walk;
48use crate::default_index::DefaultIndexStore;
49use crate::default_index::DefaultMutableIndex;
50use crate::default_submodule_store::DefaultSubmoduleStore;
51use crate::file_util::IoResultExt as _;
52use crate::file_util::PathError;
53use crate::index::ChangeIdIndex;
54use crate::index::Index;
55use crate::index::IndexReadError;
56use crate::index::IndexStore;
57use crate::index::MutableIndex;
58use crate::index::ReadonlyIndex;
59use crate::merge::MergeBuilder;
60use crate::merge::SameChange;
61use crate::merge::trivial_merge;
62use crate::object_id::HexPrefix;
63use crate::object_id::PrefixResolution;
64use crate::op_heads_store;
65use crate::op_heads_store::OpHeadResolutionError;
66use crate::op_heads_store::OpHeadsStore;
67use crate::op_heads_store::OpHeadsStoreError;
68use crate::op_store;
69use crate::op_store::OpStore;
70use crate::op_store::OpStoreError;
71use crate::op_store::OpStoreResult;
72use crate::op_store::OperationId;
73use crate::op_store::RefTarget;
74use crate::op_store::RemoteRef;
75use crate::op_store::RemoteRefState;
76use crate::op_store::RootOperationData;
77use crate::operation::Operation;
78use crate::ref_name::GitRefName;
79use crate::ref_name::RefName;
80use crate::ref_name::RemoteName;
81use crate::ref_name::RemoteRefSymbol;
82use crate::ref_name::WorkspaceName;
83use crate::ref_name::WorkspaceNameBuf;
84use crate::refs::diff_named_commit_ids;
85use crate::refs::diff_named_ref_targets;
86use crate::refs::diff_named_remote_refs;
87use crate::refs::merge_ref_targets;
88use crate::refs::merge_remote_refs;
89use crate::revset;
90use crate::revset::RevsetEvaluationError;
91use crate::revset::RevsetExpression;
92use crate::revset::RevsetIteratorExt as _;
93use crate::rewrite::CommitRewriter;
94use crate::rewrite::RebaseOptions;
95use crate::rewrite::RebasedCommit;
96use crate::rewrite::RewriteRefsOptions;
97use crate::rewrite::merge_commit_trees;
98use crate::rewrite::rebase_commit_with_options;
99use crate::settings::UserSettings;
100use crate::signing::SignInitError;
101use crate::signing::Signer;
102use crate::simple_backend::SimpleBackend;
103use crate::simple_op_heads_store::SimpleOpHeadsStore;
104use crate::simple_op_store::SimpleOpStore;
105use crate::store::Store;
106use crate::submodule_store::SubmoduleStore;
107use crate::transaction::Transaction;
108use crate::transaction::TransactionCommitError;
109use crate::tree_merge::MergeOptions;
110use crate::view::RenameWorkspaceError;
111use crate::view::View;
112
113pub trait Repo {
114    /// Base repository that contains all committed data. Returns `self` if this
115    /// is a `ReadonlyRepo`,
116    fn base_repo(&self) -> &ReadonlyRepo;
117
118    fn store(&self) -> &Arc<Store>;
119
120    fn op_store(&self) -> &Arc<dyn OpStore>;
121
122    fn index(&self) -> &dyn Index;
123
124    fn view(&self) -> &View;
125
126    fn submodule_store(&self) -> &Arc<dyn SubmoduleStore>;
127
128    fn resolve_change_id(&self, change_id: &ChangeId) -> Option<Vec<CommitId>> {
129        // Replace this if we added more efficient lookup method.
130        let prefix = HexPrefix::from_id(change_id);
131        match self.resolve_change_id_prefix(&prefix) {
132            PrefixResolution::NoMatch => None,
133            PrefixResolution::SingleMatch(entries) => Some(entries),
134            PrefixResolution::AmbiguousMatch => panic!("complete change_id should be unambiguous"),
135        }
136    }
137
138    fn resolve_change_id_prefix(&self, prefix: &HexPrefix) -> PrefixResolution<Vec<CommitId>>;
139
140    fn shortest_unique_change_id_prefix_len(&self, target_id_bytes: &ChangeId) -> usize;
141}
142
143pub struct ReadonlyRepo {
144    loader: RepoLoader,
145    operation: Operation,
146    index: Box<dyn ReadonlyIndex>,
147    change_id_index: OnceCell<Box<dyn ChangeIdIndex>>,
148    // TODO: This should eventually become part of the index and not be stored fully in memory.
149    view: View,
150}
151
152impl Debug for ReadonlyRepo {
153    fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), std::fmt::Error> {
154        f.debug_struct("ReadonlyRepo")
155            .field("store", &self.loader.store)
156            .finish_non_exhaustive()
157    }
158}
159
160#[derive(Error, Debug)]
161pub enum RepoInitError {
162    #[error(transparent)]
163    Backend(#[from] BackendInitError),
164    #[error(transparent)]
165    OpHeadsStore(#[from] OpHeadsStoreError),
166    #[error(transparent)]
167    Path(#[from] PathError),
168}
169
170impl ReadonlyRepo {
171    pub fn default_op_store_initializer() -> &'static OpStoreInitializer<'static> {
172        &|_settings, store_path, root_data| {
173            Ok(Box::new(SimpleOpStore::init(store_path, root_data)?))
174        }
175    }
176
177    pub fn default_op_heads_store_initializer() -> &'static OpHeadsStoreInitializer<'static> {
178        &|_settings, store_path| Ok(Box::new(SimpleOpHeadsStore::init(store_path)?))
179    }
180
181    pub fn default_index_store_initializer() -> &'static IndexStoreInitializer<'static> {
182        &|_settings, store_path| Ok(Box::new(DefaultIndexStore::init(store_path)?))
183    }
184
185    pub fn default_submodule_store_initializer() -> &'static SubmoduleStoreInitializer<'static> {
186        &|_settings, store_path| Ok(Box::new(DefaultSubmoduleStore::init(store_path)))
187    }
188
189    #[expect(clippy::too_many_arguments)]
190    pub fn init(
191        settings: &UserSettings,
192        repo_path: &Path,
193        backend_initializer: &BackendInitializer,
194        signer: Signer,
195        op_store_initializer: &OpStoreInitializer,
196        op_heads_store_initializer: &OpHeadsStoreInitializer,
197        index_store_initializer: &IndexStoreInitializer,
198        submodule_store_initializer: &SubmoduleStoreInitializer,
199    ) -> Result<Arc<Self>, RepoInitError> {
200        let repo_path = dunce::canonicalize(repo_path).context(repo_path)?;
201
202        let store_path = repo_path.join("store");
203        fs::create_dir(&store_path).context(&store_path)?;
204        let backend = backend_initializer(settings, &store_path)?;
205        let backend_path = store_path.join("type");
206        fs::write(&backend_path, backend.name()).context(&backend_path)?;
207        let merge_options =
208            MergeOptions::from_settings(settings).map_err(|err| BackendInitError(err.into()))?;
209        let store = Store::new(backend, signer, merge_options);
210
211        let op_store_path = repo_path.join("op_store");
212        fs::create_dir(&op_store_path).context(&op_store_path)?;
213        let root_op_data = RootOperationData {
214            root_commit_id: store.root_commit_id().clone(),
215        };
216        let op_store = op_store_initializer(settings, &op_store_path, root_op_data)?;
217        let op_store_type_path = op_store_path.join("type");
218        fs::write(&op_store_type_path, op_store.name()).context(&op_store_type_path)?;
219        let op_store: Arc<dyn OpStore> = Arc::from(op_store);
220
221        let op_heads_path = repo_path.join("op_heads");
222        fs::create_dir(&op_heads_path).context(&op_heads_path)?;
223        let op_heads_store = op_heads_store_initializer(settings, &op_heads_path)?;
224        let op_heads_type_path = op_heads_path.join("type");
225        fs::write(&op_heads_type_path, op_heads_store.name()).context(&op_heads_type_path)?;
226        op_heads_store.update_op_heads(&[], op_store.root_operation_id())?;
227        let op_heads_store: Arc<dyn OpHeadsStore> = Arc::from(op_heads_store);
228
229        let index_path = repo_path.join("index");
230        fs::create_dir(&index_path).context(&index_path)?;
231        let index_store = index_store_initializer(settings, &index_path)?;
232        let index_type_path = index_path.join("type");
233        fs::write(&index_type_path, index_store.name()).context(&index_type_path)?;
234        let index_store: Arc<dyn IndexStore> = Arc::from(index_store);
235
236        let submodule_store_path = repo_path.join("submodule_store");
237        fs::create_dir(&submodule_store_path).context(&submodule_store_path)?;
238        let submodule_store = submodule_store_initializer(settings, &submodule_store_path)?;
239        let submodule_store_type_path = submodule_store_path.join("type");
240        fs::write(&submodule_store_type_path, submodule_store.name())
241            .context(&submodule_store_type_path)?;
242        let submodule_store = Arc::from(submodule_store);
243
244        let loader = RepoLoader {
245            settings: settings.clone(),
246            store,
247            op_store,
248            op_heads_store,
249            index_store,
250            submodule_store,
251        };
252
253        let root_operation = loader.root_operation();
254        let root_view = root_operation.view().expect("failed to read root view");
255        assert!(!root_view.heads().is_empty());
256        let index = loader
257            .index_store
258            .get_index_at_op(&root_operation, &loader.store)
259            // If the root op index couldn't be read, the index backend wouldn't
260            // be initialized properly.
261            .map_err(|err| BackendInitError(err.into()))?;
262        Ok(Arc::new(Self {
263            loader,
264            operation: root_operation,
265            index,
266            change_id_index: OnceCell::new(),
267            view: root_view,
268        }))
269    }
270
271    pub fn loader(&self) -> &RepoLoader {
272        &self.loader
273    }
274
275    pub fn op_id(&self) -> &OperationId {
276        self.operation.id()
277    }
278
279    pub fn operation(&self) -> &Operation {
280        &self.operation
281    }
282
283    pub fn view(&self) -> &View {
284        &self.view
285    }
286
287    pub fn readonly_index(&self) -> &dyn ReadonlyIndex {
288        self.index.as_ref()
289    }
290
291    fn change_id_index(&self) -> &dyn ChangeIdIndex {
292        self.change_id_index
293            .get_or_init(|| {
294                self.readonly_index()
295                    .change_id_index(&mut self.view().heads().iter())
296            })
297            .as_ref()
298    }
299
300    pub fn op_heads_store(&self) -> &Arc<dyn OpHeadsStore> {
301        self.loader.op_heads_store()
302    }
303
304    pub fn index_store(&self) -> &Arc<dyn IndexStore> {
305        self.loader.index_store()
306    }
307
308    pub fn settings(&self) -> &UserSettings {
309        self.loader.settings()
310    }
311
312    pub fn start_transaction(self: &Arc<Self>) -> Transaction {
313        let mut_repo = MutableRepo::new(self.clone(), self.readonly_index(), &self.view);
314        Transaction::new(mut_repo, self.settings())
315    }
316
317    pub fn reload_at_head(&self) -> Result<Arc<Self>, RepoLoaderError> {
318        self.loader().load_at_head()
319    }
320
321    #[instrument]
322    pub fn reload_at(&self, operation: &Operation) -> Result<Arc<Self>, RepoLoaderError> {
323        self.loader().load_at(operation)
324    }
325}
326
327impl Repo for ReadonlyRepo {
328    fn base_repo(&self) -> &ReadonlyRepo {
329        self
330    }
331
332    fn store(&self) -> &Arc<Store> {
333        self.loader.store()
334    }
335
336    fn op_store(&self) -> &Arc<dyn OpStore> {
337        self.loader.op_store()
338    }
339
340    fn index(&self) -> &dyn Index {
341        self.readonly_index().as_index()
342    }
343
344    fn view(&self) -> &View {
345        &self.view
346    }
347
348    fn submodule_store(&self) -> &Arc<dyn SubmoduleStore> {
349        self.loader.submodule_store()
350    }
351
352    fn resolve_change_id_prefix(&self, prefix: &HexPrefix) -> PrefixResolution<Vec<CommitId>> {
353        self.change_id_index().resolve_prefix(prefix)
354    }
355
356    fn shortest_unique_change_id_prefix_len(&self, target_id: &ChangeId) -> usize {
357        self.change_id_index().shortest_unique_prefix_len(target_id)
358    }
359}
360
361pub type BackendInitializer<'a> =
362    dyn Fn(&UserSettings, &Path) -> Result<Box<dyn Backend>, BackendInitError> + 'a;
363#[rustfmt::skip] // auto-formatted line would exceed the maximum width
364pub type OpStoreInitializer<'a> =
365    dyn Fn(&UserSettings, &Path, RootOperationData) -> Result<Box<dyn OpStore>, BackendInitError>
366    + 'a;
367pub type OpHeadsStoreInitializer<'a> =
368    dyn Fn(&UserSettings, &Path) -> Result<Box<dyn OpHeadsStore>, BackendInitError> + 'a;
369pub type IndexStoreInitializer<'a> =
370    dyn Fn(&UserSettings, &Path) -> Result<Box<dyn IndexStore>, BackendInitError> + 'a;
371pub type SubmoduleStoreInitializer<'a> =
372    dyn Fn(&UserSettings, &Path) -> Result<Box<dyn SubmoduleStore>, BackendInitError> + 'a;
373
374type BackendFactory =
375    Box<dyn Fn(&UserSettings, &Path) -> Result<Box<dyn Backend>, BackendLoadError>>;
376type OpStoreFactory = Box<
377    dyn Fn(&UserSettings, &Path, RootOperationData) -> Result<Box<dyn OpStore>, BackendLoadError>,
378>;
379type OpHeadsStoreFactory =
380    Box<dyn Fn(&UserSettings, &Path) -> Result<Box<dyn OpHeadsStore>, BackendLoadError>>;
381type IndexStoreFactory =
382    Box<dyn Fn(&UserSettings, &Path) -> Result<Box<dyn IndexStore>, BackendLoadError>>;
383type SubmoduleStoreFactory =
384    Box<dyn Fn(&UserSettings, &Path) -> Result<Box<dyn SubmoduleStore>, BackendLoadError>>;
385
386pub fn merge_factories_map<F>(base: &mut HashMap<String, F>, ext: HashMap<String, F>) {
387    for (name, factory) in ext {
388        match base.entry(name) {
389            Entry::Vacant(v) => {
390                v.insert(factory);
391            }
392            Entry::Occupied(o) => {
393                panic!("Conflicting factory definitions for '{}' factory", o.key())
394            }
395        }
396    }
397}
398
399pub struct StoreFactories {
400    backend_factories: HashMap<String, BackendFactory>,
401    op_store_factories: HashMap<String, OpStoreFactory>,
402    op_heads_store_factories: HashMap<String, OpHeadsStoreFactory>,
403    index_store_factories: HashMap<String, IndexStoreFactory>,
404    submodule_store_factories: HashMap<String, SubmoduleStoreFactory>,
405}
406
407impl Default for StoreFactories {
408    fn default() -> Self {
409        let mut factories = Self::empty();
410
411        // Backends
412        factories.add_backend(
413            SimpleBackend::name(),
414            Box::new(|_settings, store_path| Ok(Box::new(SimpleBackend::load(store_path)))),
415        );
416        #[cfg(feature = "git")]
417        factories.add_backend(
418            crate::git_backend::GitBackend::name(),
419            Box::new(|settings, store_path| {
420                Ok(Box::new(crate::git_backend::GitBackend::load(
421                    settings, store_path,
422                )?))
423            }),
424        );
425        #[cfg(feature = "testing")]
426        factories.add_backend(
427            crate::secret_backend::SecretBackend::name(),
428            Box::new(|settings, store_path| {
429                Ok(Box::new(crate::secret_backend::SecretBackend::load(
430                    settings, store_path,
431                )?))
432            }),
433        );
434
435        // OpStores
436        factories.add_op_store(
437            SimpleOpStore::name(),
438            Box::new(|_settings, store_path, root_data| {
439                Ok(Box::new(SimpleOpStore::load(store_path, root_data)))
440            }),
441        );
442
443        // OpHeadsStores
444        factories.add_op_heads_store(
445            SimpleOpHeadsStore::name(),
446            Box::new(|_settings, store_path| Ok(Box::new(SimpleOpHeadsStore::load(store_path)))),
447        );
448
449        // Index
450        factories.add_index_store(
451            DefaultIndexStore::name(),
452            Box::new(|_settings, store_path| Ok(Box::new(DefaultIndexStore::load(store_path)))),
453        );
454
455        // SubmoduleStores
456        factories.add_submodule_store(
457            DefaultSubmoduleStore::name(),
458            Box::new(|_settings, store_path| Ok(Box::new(DefaultSubmoduleStore::load(store_path)))),
459        );
460
461        factories
462    }
463}
464
465#[derive(Debug, Error)]
466pub enum StoreLoadError {
467    #[error("Unsupported {store} backend type '{store_type}'")]
468    UnsupportedType {
469        store: &'static str,
470        store_type: String,
471    },
472    #[error("Failed to read {store} backend type")]
473    ReadError {
474        store: &'static str,
475        source: PathError,
476    },
477    #[error(transparent)]
478    Backend(#[from] BackendLoadError),
479    #[error(transparent)]
480    Signing(#[from] SignInitError),
481}
482
483impl StoreFactories {
484    pub fn empty() -> Self {
485        Self {
486            backend_factories: HashMap::new(),
487            op_store_factories: HashMap::new(),
488            op_heads_store_factories: HashMap::new(),
489            index_store_factories: HashMap::new(),
490            submodule_store_factories: HashMap::new(),
491        }
492    }
493
494    pub fn merge(&mut self, ext: Self) {
495        let Self {
496            backend_factories,
497            op_store_factories,
498            op_heads_store_factories,
499            index_store_factories,
500            submodule_store_factories,
501        } = ext;
502
503        merge_factories_map(&mut self.backend_factories, backend_factories);
504        merge_factories_map(&mut self.op_store_factories, op_store_factories);
505        merge_factories_map(&mut self.op_heads_store_factories, op_heads_store_factories);
506        merge_factories_map(&mut self.index_store_factories, index_store_factories);
507        merge_factories_map(
508            &mut self.submodule_store_factories,
509            submodule_store_factories,
510        );
511    }
512
513    pub fn add_backend(&mut self, name: &str, factory: BackendFactory) {
514        self.backend_factories.insert(name.to_string(), factory);
515    }
516
517    pub fn load_backend(
518        &self,
519        settings: &UserSettings,
520        store_path: &Path,
521    ) -> Result<Box<dyn Backend>, StoreLoadError> {
522        let backend_type = read_store_type("commit", store_path.join("type"))?;
523        let backend_factory = self.backend_factories.get(&backend_type).ok_or_else(|| {
524            StoreLoadError::UnsupportedType {
525                store: "commit",
526                store_type: backend_type.clone(),
527            }
528        })?;
529        Ok(backend_factory(settings, store_path)?)
530    }
531
532    pub fn add_op_store(&mut self, name: &str, factory: OpStoreFactory) {
533        self.op_store_factories.insert(name.to_string(), factory);
534    }
535
536    pub fn load_op_store(
537        &self,
538        settings: &UserSettings,
539        store_path: &Path,
540        root_data: RootOperationData,
541    ) -> Result<Box<dyn OpStore>, StoreLoadError> {
542        let op_store_type = read_store_type("operation", store_path.join("type"))?;
543        let op_store_factory = self.op_store_factories.get(&op_store_type).ok_or_else(|| {
544            StoreLoadError::UnsupportedType {
545                store: "operation",
546                store_type: op_store_type.clone(),
547            }
548        })?;
549        Ok(op_store_factory(settings, store_path, root_data)?)
550    }
551
552    pub fn add_op_heads_store(&mut self, name: &str, factory: OpHeadsStoreFactory) {
553        self.op_heads_store_factories
554            .insert(name.to_string(), factory);
555    }
556
557    pub fn load_op_heads_store(
558        &self,
559        settings: &UserSettings,
560        store_path: &Path,
561    ) -> Result<Box<dyn OpHeadsStore>, StoreLoadError> {
562        let op_heads_store_type = read_store_type("operation heads", store_path.join("type"))?;
563        let op_heads_store_factory = self
564            .op_heads_store_factories
565            .get(&op_heads_store_type)
566            .ok_or_else(|| StoreLoadError::UnsupportedType {
567                store: "operation heads",
568                store_type: op_heads_store_type.clone(),
569            })?;
570        Ok(op_heads_store_factory(settings, store_path)?)
571    }
572
573    pub fn add_index_store(&mut self, name: &str, factory: IndexStoreFactory) {
574        self.index_store_factories.insert(name.to_string(), factory);
575    }
576
577    pub fn load_index_store(
578        &self,
579        settings: &UserSettings,
580        store_path: &Path,
581    ) -> Result<Box<dyn IndexStore>, StoreLoadError> {
582        let index_store_type = read_store_type("index", store_path.join("type"))?;
583        let index_store_factory = self
584            .index_store_factories
585            .get(&index_store_type)
586            .ok_or_else(|| StoreLoadError::UnsupportedType {
587                store: "index",
588                store_type: index_store_type.clone(),
589            })?;
590        Ok(index_store_factory(settings, store_path)?)
591    }
592
593    pub fn add_submodule_store(&mut self, name: &str, factory: SubmoduleStoreFactory) {
594        self.submodule_store_factories
595            .insert(name.to_string(), factory);
596    }
597
598    pub fn load_submodule_store(
599        &self,
600        settings: &UserSettings,
601        store_path: &Path,
602    ) -> Result<Box<dyn SubmoduleStore>, StoreLoadError> {
603        let submodule_store_type = read_store_type("submodule_store", store_path.join("type"))?;
604        let submodule_store_factory = self
605            .submodule_store_factories
606            .get(&submodule_store_type)
607            .ok_or_else(|| StoreLoadError::UnsupportedType {
608                store: "submodule_store",
609                store_type: submodule_store_type.clone(),
610            })?;
611
612        Ok(submodule_store_factory(settings, store_path)?)
613    }
614}
615
616pub fn read_store_type(
617    store: &'static str,
618    path: impl AsRef<Path>,
619) -> Result<String, StoreLoadError> {
620    let path = path.as_ref();
621    fs::read_to_string(path)
622        .context(path)
623        .map_err(|source| StoreLoadError::ReadError { store, source })
624}
625
626#[derive(Debug, Error)]
627pub enum RepoLoaderError {
628    #[error(transparent)]
629    Backend(#[from] BackendError),
630    #[error(transparent)]
631    IndexRead(#[from] IndexReadError),
632    #[error(transparent)]
633    OpHeadResolution(#[from] OpHeadResolutionError),
634    #[error(transparent)]
635    OpHeadsStoreError(#[from] OpHeadsStoreError),
636    #[error(transparent)]
637    OpStore(#[from] OpStoreError),
638    #[error(transparent)]
639    TransactionCommit(#[from] TransactionCommitError),
640}
641
642/// Helps create `ReadonlyRepo` instances of a repo at the head operation or at
643/// a given operation.
644#[derive(Clone)]
645pub struct RepoLoader {
646    settings: UserSettings,
647    store: Arc<Store>,
648    op_store: Arc<dyn OpStore>,
649    op_heads_store: Arc<dyn OpHeadsStore>,
650    index_store: Arc<dyn IndexStore>,
651    submodule_store: Arc<dyn SubmoduleStore>,
652}
653
654impl RepoLoader {
655    pub fn new(
656        settings: UserSettings,
657        store: Arc<Store>,
658        op_store: Arc<dyn OpStore>,
659        op_heads_store: Arc<dyn OpHeadsStore>,
660        index_store: Arc<dyn IndexStore>,
661        submodule_store: Arc<dyn SubmoduleStore>,
662    ) -> Self {
663        Self {
664            settings,
665            store,
666            op_store,
667            op_heads_store,
668            index_store,
669            submodule_store,
670        }
671    }
672
673    /// Creates a `RepoLoader` for the repo at `repo_path` by reading the
674    /// various `.jj/repo/<backend>/type` files and loading the right
675    /// backends from `store_factories`.
676    pub fn init_from_file_system(
677        settings: &UserSettings,
678        repo_path: &Path,
679        store_factories: &StoreFactories,
680    ) -> Result<Self, StoreLoadError> {
681        let merge_options =
682            MergeOptions::from_settings(settings).map_err(|err| BackendLoadError(err.into()))?;
683        let store = Store::new(
684            store_factories.load_backend(settings, &repo_path.join("store"))?,
685            Signer::from_settings(settings)?,
686            merge_options,
687        );
688        let root_op_data = RootOperationData {
689            root_commit_id: store.root_commit_id().clone(),
690        };
691        let op_store = Arc::from(store_factories.load_op_store(
692            settings,
693            &repo_path.join("op_store"),
694            root_op_data,
695        )?);
696        let op_heads_store =
697            Arc::from(store_factories.load_op_heads_store(settings, &repo_path.join("op_heads"))?);
698        let index_store =
699            Arc::from(store_factories.load_index_store(settings, &repo_path.join("index"))?);
700        let submodule_store = Arc::from(
701            store_factories.load_submodule_store(settings, &repo_path.join("submodule_store"))?,
702        );
703        Ok(Self {
704            settings: settings.clone(),
705            store,
706            op_store,
707            op_heads_store,
708            index_store,
709            submodule_store,
710        })
711    }
712
713    pub fn settings(&self) -> &UserSettings {
714        &self.settings
715    }
716
717    pub fn store(&self) -> &Arc<Store> {
718        &self.store
719    }
720
721    pub fn index_store(&self) -> &Arc<dyn IndexStore> {
722        &self.index_store
723    }
724
725    pub fn op_store(&self) -> &Arc<dyn OpStore> {
726        &self.op_store
727    }
728
729    pub fn op_heads_store(&self) -> &Arc<dyn OpHeadsStore> {
730        &self.op_heads_store
731    }
732
733    pub fn submodule_store(&self) -> &Arc<dyn SubmoduleStore> {
734        &self.submodule_store
735    }
736
737    pub fn load_at_head(&self) -> Result<Arc<ReadonlyRepo>, RepoLoaderError> {
738        let op = op_heads_store::resolve_op_heads(
739            self.op_heads_store.as_ref(),
740            &self.op_store,
741            |op_heads| self._resolve_op_heads(op_heads),
742        )?;
743        let view = op.view()?;
744        self._finish_load(op, view)
745    }
746
747    #[instrument(skip(self))]
748    pub fn load_at(&self, op: &Operation) -> Result<Arc<ReadonlyRepo>, RepoLoaderError> {
749        let view = op.view()?;
750        self._finish_load(op.clone(), view)
751    }
752
753    pub fn create_from(
754        &self,
755        operation: Operation,
756        view: View,
757        index: Box<dyn ReadonlyIndex>,
758    ) -> Arc<ReadonlyRepo> {
759        let repo = ReadonlyRepo {
760            loader: self.clone(),
761            operation,
762            index,
763            change_id_index: OnceCell::new(),
764            view,
765        };
766        Arc::new(repo)
767    }
768
769    // If we add a higher-level abstraction of OpStore, root_operation() and
770    // load_operation() will be moved there.
771
772    /// Returns the root operation.
773    pub fn root_operation(&self) -> Operation {
774        self.load_operation(self.op_store.root_operation_id())
775            .expect("failed to read root operation")
776    }
777
778    /// Loads the specified operation from the operation store.
779    pub fn load_operation(&self, id: &OperationId) -> OpStoreResult<Operation> {
780        let data = self.op_store.read_operation(id)?;
781        Ok(Operation::new(self.op_store.clone(), id.clone(), data))
782    }
783
784    /// Merges the given `operations` into a single operation. Returns the root
785    /// operation if the `operations` is empty.
786    pub fn merge_operations(
787        &self,
788        operations: Vec<Operation>,
789        tx_description: Option<&str>,
790    ) -> Result<Operation, RepoLoaderError> {
791        let num_operations = operations.len();
792        let mut operations = operations.into_iter();
793        let Some(base_op) = operations.next() else {
794            return Ok(self.root_operation());
795        };
796        let final_op = if num_operations > 1 {
797            let base_repo = self.load_at(&base_op)?;
798            let mut tx = base_repo.start_transaction();
799            for other_op in operations {
800                tx.merge_operation(other_op)?;
801                tx.repo_mut().rebase_descendants()?;
802            }
803            let tx_description = tx_description.map_or_else(
804                || format!("merge {num_operations} operations"),
805                |tx_description| tx_description.to_string(),
806            );
807            let merged_repo = tx.write(tx_description)?.leave_unpublished();
808            merged_repo.operation().clone()
809        } else {
810            base_op
811        };
812
813        Ok(final_op)
814    }
815
816    fn _resolve_op_heads(&self, op_heads: Vec<Operation>) -> Result<Operation, RepoLoaderError> {
817        assert!(!op_heads.is_empty());
818        self.merge_operations(op_heads, Some("reconcile divergent operations"))
819    }
820
821    fn _finish_load(
822        &self,
823        operation: Operation,
824        view: View,
825    ) -> Result<Arc<ReadonlyRepo>, RepoLoaderError> {
826        let index = self.index_store.get_index_at_op(&operation, &self.store)?;
827        let repo = ReadonlyRepo {
828            loader: self.clone(),
829            operation,
830            index,
831            change_id_index: OnceCell::new(),
832            view,
833        };
834        Ok(Arc::new(repo))
835    }
836}
837
838#[derive(Clone, Debug, PartialEq, Eq)]
839enum Rewrite {
840    /// The old commit was rewritten as this new commit. Children should be
841    /// rebased onto the new commit.
842    Rewritten(CommitId),
843    /// The old commit was rewritten as multiple other commits. Children should
844    /// not be rebased.
845    Divergent(Vec<CommitId>),
846    /// The old commit was abandoned. Children should be rebased onto the given
847    /// commits (typically the parents of the old commit).
848    Abandoned(Vec<CommitId>),
849}
850
851impl Rewrite {
852    fn new_parent_ids(&self) -> &[CommitId] {
853        match self {
854            Self::Rewritten(new_parent_id) => std::slice::from_ref(new_parent_id),
855            Self::Divergent(new_parent_ids) => new_parent_ids.as_slice(),
856            Self::Abandoned(new_parent_ids) => new_parent_ids.as_slice(),
857        }
858    }
859}
860
861pub struct MutableRepo {
862    base_repo: Arc<ReadonlyRepo>,
863    index: Box<dyn MutableIndex>,
864    view: DirtyCell<View>,
865    /// Mapping from new commit to its predecessors.
866    ///
867    /// This is similar to (the reverse of) `parent_mapping`, but
868    /// `commit_predecessors` will never be cleared on `rebase_descendants()`.
869    commit_predecessors: BTreeMap<CommitId, Vec<CommitId>>,
870    // The commit identified by the key has been replaced by all the ones in the value.
871    // * Bookmarks pointing to the old commit should be updated to the new commit, resulting in a
872    //   conflict if there multiple new commits.
873    // * Children of the old commit should be rebased onto the new commits. However, if the type is
874    //   `Divergent`, they should be left in place.
875    // * Working copies pointing to the old commit should be updated to the first of the new
876    //   commits. However, if the type is `Abandoned`, a new working-copy commit should be created
877    //   on top of all of the new commits instead.
878    parent_mapping: HashMap<CommitId, Rewrite>,
879}
880
881impl MutableRepo {
882    pub fn new(base_repo: Arc<ReadonlyRepo>, index: &dyn ReadonlyIndex, view: &View) -> Self {
883        let mut_view = view.clone();
884        let mut_index = index.start_modification();
885        Self {
886            base_repo,
887            index: mut_index,
888            view: DirtyCell::with_clean(mut_view),
889            commit_predecessors: Default::default(),
890            parent_mapping: Default::default(),
891        }
892    }
893
894    pub fn base_repo(&self) -> &Arc<ReadonlyRepo> {
895        &self.base_repo
896    }
897
898    fn view_mut(&mut self) -> &mut View {
899        self.view.get_mut()
900    }
901
902    pub fn mutable_index(&self) -> &dyn MutableIndex {
903        self.index.as_ref()
904    }
905
906    pub(crate) fn is_backed_by_default_index(&self) -> bool {
907        self.index.downcast_ref::<DefaultMutableIndex>().is_some()
908    }
909
910    pub fn has_changes(&self) -> bool {
911        self.view.ensure_clean(|v| self.enforce_view_invariants(v));
912        !(self.commit_predecessors.is_empty()
913            && self.parent_mapping.is_empty()
914            && self.view() == &self.base_repo.view)
915    }
916
917    pub(crate) fn consume(
918        self,
919    ) -> (
920        Box<dyn MutableIndex>,
921        View,
922        BTreeMap<CommitId, Vec<CommitId>>,
923    ) {
924        self.view.ensure_clean(|v| self.enforce_view_invariants(v));
925        (self.index, self.view.into_inner(), self.commit_predecessors)
926    }
927
928    /// Returns a [`CommitBuilder`] to write new commit to the repo.
929    pub fn new_commit(
930        &mut self,
931        parents: Vec<CommitId>,
932        tree_id: MergedTreeId,
933    ) -> CommitBuilder<'_> {
934        let settings = self.base_repo.settings();
935        DetachedCommitBuilder::for_new_commit(self, settings, parents, tree_id).attach(self)
936    }
937
938    /// Returns a [`CommitBuilder`] to rewrite an existing commit in the repo.
939    pub fn rewrite_commit(&mut self, predecessor: &Commit) -> CommitBuilder<'_> {
940        let settings = self.base_repo.settings();
941        DetachedCommitBuilder::for_rewrite_from(self, settings, predecessor).attach(self)
942        // CommitBuilder::write will record the rewrite in
943        // `self.rewritten_commits`
944    }
945
946    pub(crate) fn set_predecessors(&mut self, id: CommitId, predecessors: Vec<CommitId>) {
947        self.commit_predecessors.insert(id, predecessors);
948    }
949
950    /// Record a commit as having been rewritten to another commit in this
951    /// transaction.
952    ///
953    /// This record is used by `rebase_descendants` to know which commits have
954    /// children that need to be rebased, and where to rebase them to. See the
955    /// docstring for `record_rewritten_commit` for details.
956    pub fn set_rewritten_commit(&mut self, old_id: CommitId, new_id: CommitId) {
957        assert_ne!(old_id, *self.store().root_commit_id());
958        self.parent_mapping
959            .insert(old_id, Rewrite::Rewritten(new_id));
960    }
961
962    /// Record a commit as being rewritten into multiple other commits in this
963    /// transaction.
964    ///
965    /// A later call to `rebase_descendants()` will update bookmarks pointing to
966    /// `old_id` be conflicted and pointing to all pf `new_ids`. Working copies
967    /// pointing to `old_id` will be updated to point to the first commit in
968    /// `new_ids``. Descendants of `old_id` will be left alone.
969    pub fn set_divergent_rewrite(
970        &mut self,
971        old_id: CommitId,
972        new_ids: impl IntoIterator<Item = CommitId>,
973    ) {
974        assert_ne!(old_id, *self.store().root_commit_id());
975        self.parent_mapping.insert(
976            old_id.clone(),
977            Rewrite::Divergent(new_ids.into_iter().collect()),
978        );
979    }
980
981    /// Record a commit as having been abandoned in this transaction.
982    ///
983    /// This record is used by `rebase_descendants` to know which commits have
984    /// children that need to be rebased, and where to rebase the children to.
985    ///
986    /// The `rebase_descendants` logic will rebase the descendants of the old
987    /// commit to become the descendants of parent(s) of the old commit. Any
988    /// bookmarks at the old commit will be either moved to the parent(s) of the
989    /// old commit or deleted depending on [`RewriteRefsOptions`].
990    pub fn record_abandoned_commit(&mut self, old_commit: &Commit) {
991        assert_ne!(old_commit.id(), self.store().root_commit_id());
992        // Descendants should be rebased onto the commit's parents
993        self.record_abandoned_commit_with_parents(
994            old_commit.id().clone(),
995            old_commit.parent_ids().iter().cloned(),
996        );
997    }
998
999    /// Record a commit as having been abandoned in this transaction.
1000    ///
1001    /// A later `rebase_descendants()` will rebase children of `old_id` onto
1002    /// `new_parent_ids`. A working copy pointing to `old_id` will point to a
1003    /// new commit on top of `new_parent_ids`.
1004    pub fn record_abandoned_commit_with_parents(
1005        &mut self,
1006        old_id: CommitId,
1007        new_parent_ids: impl IntoIterator<Item = CommitId>,
1008    ) {
1009        assert_ne!(old_id, *self.store().root_commit_id());
1010        self.parent_mapping.insert(
1011            old_id,
1012            Rewrite::Abandoned(new_parent_ids.into_iter().collect()),
1013        );
1014    }
1015
1016    pub fn has_rewrites(&self) -> bool {
1017        !self.parent_mapping.is_empty()
1018    }
1019
1020    /// Calculates new parents for a commit that's currently based on the given
1021    /// parents. It does that by considering how previous commits have been
1022    /// rewritten and abandoned.
1023    ///
1024    /// If `parent_mapping` contains cycles, this function may either panic or
1025    /// drop parents that caused cycles.
1026    pub fn new_parents(&self, old_ids: &[CommitId]) -> Vec<CommitId> {
1027        self.rewritten_ids_with(old_ids, |rewrite| !matches!(rewrite, Rewrite::Divergent(_)))
1028    }
1029
1030    fn rewritten_ids_with(
1031        &self,
1032        old_ids: &[CommitId],
1033        mut predicate: impl FnMut(&Rewrite) -> bool,
1034    ) -> Vec<CommitId> {
1035        assert!(!old_ids.is_empty());
1036        let mut new_ids = Vec::with_capacity(old_ids.len());
1037        let mut to_visit = old_ids.iter().rev().collect_vec();
1038        let mut visited = HashSet::new();
1039        while let Some(id) = to_visit.pop() {
1040            if !visited.insert(id) {
1041                continue;
1042            }
1043            match self.parent_mapping.get(id).filter(|&v| predicate(v)) {
1044                None => {
1045                    new_ids.push(id.clone());
1046                }
1047                Some(rewrite) => {
1048                    let replacements = rewrite.new_parent_ids();
1049                    assert!(
1050                        // Each commit must have a parent, so a parent can
1051                        // not just be mapped to nothing. This assertion
1052                        // could be removed if this function is used for
1053                        // mapping something other than a commit's parents.
1054                        !replacements.is_empty(),
1055                        "Found empty value for key {id:?} in the parent mapping",
1056                    );
1057                    to_visit.extend(replacements.iter().rev());
1058                }
1059            }
1060        }
1061        assert!(
1062            !new_ids.is_empty(),
1063            "new ids become empty because of cycle in the parent mapping"
1064        );
1065        debug_assert!(new_ids.iter().all_unique());
1066        new_ids
1067    }
1068
1069    /// Fully resolves transitive replacements in `parent_mapping`.
1070    ///
1071    /// If `parent_mapping` contains cycles, this function will panic.
1072    fn resolve_rewrite_mapping_with(
1073        &self,
1074        mut predicate: impl FnMut(&Rewrite) -> bool,
1075    ) -> HashMap<CommitId, Vec<CommitId>> {
1076        let sorted_ids = dag_walk::topo_order_forward(
1077            self.parent_mapping.keys(),
1078            |&id| id,
1079            |&id| match self.parent_mapping.get(id).filter(|&v| predicate(v)) {
1080                None => &[],
1081                Some(rewrite) => rewrite.new_parent_ids(),
1082            },
1083        );
1084        let mut new_mapping: HashMap<CommitId, Vec<CommitId>> = HashMap::new();
1085        for old_id in sorted_ids {
1086            let Some(rewrite) = self.parent_mapping.get(old_id).filter(|&v| predicate(v)) else {
1087                continue;
1088            };
1089            let lookup = |id| new_mapping.get(id).map_or(slice::from_ref(id), |ids| ids);
1090            let new_ids = match rewrite.new_parent_ids() {
1091                [id] => lookup(id).to_vec(), // unique() not needed
1092                ids => ids.iter().flat_map(lookup).unique().cloned().collect(),
1093            };
1094            debug_assert_eq!(
1095                new_ids,
1096                self.rewritten_ids_with(slice::from_ref(old_id), &mut predicate)
1097            );
1098            new_mapping.insert(old_id.clone(), new_ids);
1099        }
1100        new_mapping
1101    }
1102
1103    /// Updates bookmarks, working copies, and anonymous heads after rewriting
1104    /// and/or abandoning commits.
1105    pub fn update_rewritten_references(
1106        &mut self,
1107        options: &RewriteRefsOptions,
1108    ) -> BackendResult<()> {
1109        self.update_all_references(options)?;
1110        self.update_heads()
1111            .map_err(|err| err.into_backend_error())?;
1112        Ok(())
1113    }
1114
1115    fn update_all_references(&mut self, options: &RewriteRefsOptions) -> BackendResult<()> {
1116        let rewrite_mapping = self.resolve_rewrite_mapping_with(|_| true);
1117        self.update_local_bookmarks(&rewrite_mapping, options);
1118        self.update_wc_commits(&rewrite_mapping)?;
1119        Ok(())
1120    }
1121
1122    fn update_local_bookmarks(
1123        &mut self,
1124        rewrite_mapping: &HashMap<CommitId, Vec<CommitId>>,
1125        options: &RewriteRefsOptions,
1126    ) {
1127        let changed_branches = self
1128            .view()
1129            .local_bookmarks()
1130            .flat_map(|(name, target)| {
1131                target.added_ids().filter_map(|id| {
1132                    let change = rewrite_mapping.get_key_value(id)?;
1133                    Some((name.to_owned(), change))
1134                })
1135            })
1136            .collect_vec();
1137        for (bookmark_name, (old_commit_id, new_commit_ids)) in changed_branches {
1138            let should_delete = options.delete_abandoned_bookmarks
1139                && matches!(
1140                    self.parent_mapping.get(old_commit_id),
1141                    Some(Rewrite::Abandoned(_))
1142                );
1143            let old_target = RefTarget::normal(old_commit_id.clone());
1144            let new_target = if should_delete {
1145                RefTarget::absent()
1146            } else {
1147                let ids = itertools::intersperse(new_commit_ids, old_commit_id)
1148                    .map(|id| Some(id.clone()));
1149                RefTarget::from_merge(MergeBuilder::from_iter(ids).build())
1150            };
1151
1152            self.merge_local_bookmark(&bookmark_name, &old_target, &new_target);
1153        }
1154    }
1155
1156    fn update_wc_commits(
1157        &mut self,
1158        rewrite_mapping: &HashMap<CommitId, Vec<CommitId>>,
1159    ) -> BackendResult<()> {
1160        let changed_wc_commits = self
1161            .view()
1162            .wc_commit_ids()
1163            .iter()
1164            .filter_map(|(name, commit_id)| {
1165                let change = rewrite_mapping.get_key_value(commit_id)?;
1166                Some((name.to_owned(), change))
1167            })
1168            .collect_vec();
1169        let mut recreated_wc_commits: HashMap<&CommitId, Commit> = HashMap::new();
1170        for (name, (old_commit_id, new_commit_ids)) in changed_wc_commits {
1171            let abandoned_old_commit = matches!(
1172                self.parent_mapping.get(old_commit_id),
1173                Some(Rewrite::Abandoned(_))
1174            );
1175            let new_wc_commit = if !abandoned_old_commit {
1176                // We arbitrarily pick a new working-copy commit among the candidates.
1177                self.store().get_commit(&new_commit_ids[0])?
1178            } else if let Some(commit) = recreated_wc_commits.get(old_commit_id) {
1179                commit.clone()
1180            } else {
1181                let new_commits: Vec<_> = new_commit_ids
1182                    .iter()
1183                    .map(|id| self.store().get_commit(id))
1184                    .try_collect()?;
1185                let merged_parents_tree = merge_commit_trees(self, &new_commits).block_on()?;
1186                let commit = self
1187                    .new_commit(new_commit_ids.clone(), merged_parents_tree.id().clone())
1188                    .write()?;
1189                recreated_wc_commits.insert(old_commit_id, commit.clone());
1190                commit
1191            };
1192            self.edit(name, &new_wc_commit).map_err(|err| match err {
1193                EditCommitError::BackendError(backend_error) => backend_error,
1194                EditCommitError::WorkingCopyCommitNotFound(_)
1195                | EditCommitError::RewriteRootCommit(_) => panic!("unexpected error: {err:?}"),
1196            })?;
1197        }
1198        Ok(())
1199    }
1200
1201    fn update_heads(&mut self) -> Result<(), RevsetEvaluationError> {
1202        let old_commits_expression =
1203            RevsetExpression::commits(self.parent_mapping.keys().cloned().collect());
1204        let heads_to_add_expression = old_commits_expression
1205            .parents()
1206            .minus(&old_commits_expression);
1207        let heads_to_add: Vec<_> = heads_to_add_expression
1208            .evaluate(self)?
1209            .iter()
1210            .try_collect()?;
1211
1212        let mut view = self.view().store_view().clone();
1213        for commit_id in self.parent_mapping.keys() {
1214            view.head_ids.remove(commit_id);
1215        }
1216        view.head_ids.extend(heads_to_add);
1217        self.set_view(view);
1218        Ok(())
1219    }
1220
1221    /// Find descendants of `root`, unless they've already been rewritten
1222    /// (according to `parent_mapping`).
1223    pub fn find_descendants_for_rebase(&self, roots: Vec<CommitId>) -> BackendResult<Vec<Commit>> {
1224        let to_visit_revset = RevsetExpression::commits(roots)
1225            .descendants()
1226            .minus(&RevsetExpression::commits(
1227                self.parent_mapping.keys().cloned().collect(),
1228            ))
1229            .evaluate(self)
1230            .map_err(|err| err.into_backend_error())?;
1231        let to_visit = to_visit_revset
1232            .iter()
1233            .commits(self.store())
1234            .try_collect()
1235            .map_err(|err| err.into_backend_error())?;
1236        Ok(to_visit)
1237    }
1238
1239    /// Order a set of commits in an order they should be rebased in. The result
1240    /// is in reverse order so the next value can be removed from the end.
1241    fn order_commits_for_rebase(
1242        &self,
1243        to_visit: Vec<Commit>,
1244        new_parents_map: &HashMap<CommitId, Vec<CommitId>>,
1245    ) -> BackendResult<Vec<Commit>> {
1246        let to_visit_set: HashSet<CommitId> =
1247            to_visit.iter().map(|commit| commit.id().clone()).collect();
1248        let mut visited = HashSet::new();
1249        // Calculate an order where we rebase parents first, but if the parents were
1250        // rewritten, make sure we rebase the rewritten parent first.
1251        let store = self.store();
1252        dag_walk::topo_order_reverse_ok(
1253            to_visit.into_iter().map(Ok),
1254            |commit| commit.id().clone(),
1255            |commit| -> Vec<BackendResult<Commit>> {
1256                visited.insert(commit.id().clone());
1257                let mut dependents = vec![];
1258                let parent_ids = new_parents_map
1259                    .get(commit.id())
1260                    .map_or(commit.parent_ids(), |parent_ids| parent_ids);
1261                for parent_id in parent_ids {
1262                    let parent = store.get_commit(parent_id);
1263                    let Ok(parent) = parent else {
1264                        dependents.push(parent);
1265                        continue;
1266                    };
1267                    if let Some(rewrite) = self.parent_mapping.get(parent.id()) {
1268                        for target in rewrite.new_parent_ids() {
1269                            if to_visit_set.contains(target) && !visited.contains(target) {
1270                                dependents.push(store.get_commit(target));
1271                            }
1272                        }
1273                    }
1274                    if to_visit_set.contains(parent.id()) {
1275                        dependents.push(Ok(parent));
1276                    }
1277                }
1278                dependents
1279            },
1280            |_| panic!("graph has cycle"),
1281        )
1282    }
1283
1284    /// Rewrite descendants of the given roots.
1285    ///
1286    /// The callback will be called for each commit with the new parents
1287    /// prepopulated. The callback may change the parents and write the new
1288    /// commit, or it may abandon the commit, or it may leave the old commit
1289    /// unchanged.
1290    ///
1291    /// The set of commits to visit is determined at the start. If the callback
1292    /// adds new descendants, then the callback will not be called for those.
1293    /// Similarly, if the callback rewrites unrelated commits, then the callback
1294    /// will not be called for descendants of those commits.
1295    pub fn transform_descendants(
1296        &mut self,
1297        roots: Vec<CommitId>,
1298        callback: impl AsyncFnMut(CommitRewriter) -> BackendResult<()>,
1299    ) -> BackendResult<()> {
1300        let options = RewriteRefsOptions::default();
1301        self.transform_descendants_with_options(roots, &HashMap::new(), &options, callback)
1302    }
1303
1304    /// Rewrite descendants of the given roots with options.
1305    ///
1306    /// If a commit is in the `new_parents_map` is provided, it will be rebased
1307    /// onto the new parents provided in the map instead of its original
1308    /// parents.
1309    ///
1310    /// See [`Self::transform_descendants()`] for details.
1311    pub fn transform_descendants_with_options(
1312        &mut self,
1313        roots: Vec<CommitId>,
1314        new_parents_map: &HashMap<CommitId, Vec<CommitId>>,
1315        options: &RewriteRefsOptions,
1316        callback: impl AsyncFnMut(CommitRewriter) -> BackendResult<()>,
1317    ) -> BackendResult<()> {
1318        let descendants = self.find_descendants_for_rebase(roots)?;
1319        self.transform_commits(descendants, new_parents_map, options, callback)
1320    }
1321
1322    /// Rewrite the given commits in reverse topological order.
1323    ///
1324    /// `commits` should be a connected range.
1325    ///
1326    /// This function is similar to
1327    /// [`Self::transform_descendants_with_options()`], but only rewrites the
1328    /// `commits` provided, and does not rewrite their descendants.
1329    pub fn transform_commits(
1330        &mut self,
1331        commits: Vec<Commit>,
1332        new_parents_map: &HashMap<CommitId, Vec<CommitId>>,
1333        options: &RewriteRefsOptions,
1334        mut callback: impl AsyncFnMut(CommitRewriter) -> BackendResult<()>,
1335    ) -> BackendResult<()> {
1336        let mut to_visit = self.order_commits_for_rebase(commits, new_parents_map)?;
1337        while let Some(old_commit) = to_visit.pop() {
1338            let parent_ids = new_parents_map
1339                .get(old_commit.id())
1340                .map_or(old_commit.parent_ids(), |parent_ids| parent_ids);
1341            let new_parent_ids = self.new_parents(parent_ids);
1342            let rewriter = CommitRewriter::new(self, old_commit, new_parent_ids);
1343            callback(rewriter).block_on()?;
1344        }
1345        self.update_rewritten_references(options)?;
1346        // Since we didn't necessarily visit all descendants of rewritten commits (e.g.
1347        // if they were rewritten in the callback), there can still be commits left to
1348        // rebase, so we don't clear `parent_mapping` here.
1349        // TODO: Should we make this stricter? We could check that there were no
1350        // rewrites before this function was called, and we can check that only
1351        // commits in the `to_visit` set were added by the callback. Then we
1352        // could clear `parent_mapping` here and not have to scan it again at
1353        // the end of the transaction when we call `rebase_descendants()`.
1354
1355        Ok(())
1356    }
1357
1358    /// Rebase descendants of the rewritten commits with options and callback.
1359    ///
1360    /// The descendants of the commits registered in `self.parent_mappings` will
1361    /// be recursively rebased onto the new version of their parents.
1362    ///
1363    /// If `options.empty` is the default (`EmptyBehavior::Keep`), all rebased
1364    /// descendant commits will be preserved even if they were emptied following
1365    /// the rebase operation. Otherwise, this function may rebase some commits
1366    /// and abandon others, based on the given `EmptyBehavior`. The behavior is
1367    /// such that only commits with a single parent will ever be abandoned. The
1368    /// parent will inherit the descendants and the bookmarks of the abandoned
1369    /// commit.
1370    ///
1371    /// The `progress` callback will be invoked for each rebase operation with
1372    /// `(old_commit, rebased_commit)` as arguments.
1373    pub fn rebase_descendants_with_options(
1374        &mut self,
1375        options: &RebaseOptions,
1376        mut progress: impl FnMut(Commit, RebasedCommit),
1377    ) -> BackendResult<()> {
1378        let roots = self.parent_mapping.keys().cloned().collect();
1379        self.transform_descendants_with_options(
1380            roots,
1381            &HashMap::new(),
1382            &options.rewrite_refs,
1383            async |rewriter| {
1384                if rewriter.parents_changed() {
1385                    let old_commit = rewriter.old_commit().clone();
1386                    let rebased_commit = rebase_commit_with_options(rewriter, options)?;
1387                    progress(old_commit, rebased_commit);
1388                }
1389                Ok(())
1390            },
1391        )?;
1392        self.parent_mapping.clear();
1393        Ok(())
1394    }
1395
1396    /// Rebase descendants of the rewritten commits.
1397    ///
1398    /// The descendants of the commits registered in `self.parent_mappings` will
1399    /// be recursively rebased onto the new version of their parents.
1400    /// Returns the number of rebased descendants.
1401    ///
1402    /// All rebased descendant commits will be preserved even if they were
1403    /// emptied following the rebase operation. To customize the rebase
1404    /// behavior, use [`MutableRepo::rebase_descendants_with_options`].
1405    pub fn rebase_descendants(&mut self) -> BackendResult<usize> {
1406        let options = RebaseOptions::default();
1407        let mut num_rebased = 0;
1408        self.rebase_descendants_with_options(&options, |_old_commit, _rebased_commit| {
1409            num_rebased += 1;
1410        })?;
1411        Ok(num_rebased)
1412    }
1413
1414    /// Reparent descendants of the rewritten commits.
1415    ///
1416    /// The descendants of the commits registered in `self.parent_mappings` will
1417    /// be recursively reparented onto the new version of their parents.
1418    /// The content of those descendants will remain untouched.
1419    /// Returns the number of reparented descendants.
1420    pub fn reparent_descendants(&mut self) -> BackendResult<usize> {
1421        let roots = self.parent_mapping.keys().cloned().collect_vec();
1422        let mut num_reparented = 0;
1423        self.transform_descendants(roots, async |rewriter| {
1424            if rewriter.parents_changed() {
1425                let builder = rewriter.reparent();
1426                builder.write()?;
1427                num_reparented += 1;
1428            }
1429            Ok(())
1430        })?;
1431        self.parent_mapping.clear();
1432        Ok(num_reparented)
1433    }
1434
1435    pub fn set_wc_commit(
1436        &mut self,
1437        name: WorkspaceNameBuf,
1438        commit_id: CommitId,
1439    ) -> Result<(), RewriteRootCommit> {
1440        if &commit_id == self.store().root_commit_id() {
1441            return Err(RewriteRootCommit);
1442        }
1443        self.view_mut().set_wc_commit(name, commit_id);
1444        Ok(())
1445    }
1446
1447    pub fn remove_wc_commit(&mut self, name: &WorkspaceName) -> Result<(), EditCommitError> {
1448        self.maybe_abandon_wc_commit(name)?;
1449        self.view_mut().remove_wc_commit(name);
1450        Ok(())
1451    }
1452
1453    /// Merges working-copy commit. If there's a conflict, and if the workspace
1454    /// isn't removed at either side, we keep the self side.
1455    fn merge_wc_commit(
1456        &mut self,
1457        name: &WorkspaceName,
1458        base_id: Option<&CommitId>,
1459        other_id: Option<&CommitId>,
1460    ) {
1461        let view = self.view.get_mut();
1462        let self_id = view.get_wc_commit_id(name);
1463        // Not using merge_ref_targets(). Since the working-copy pointer moves
1464        // towards random direction, it doesn't make sense to resolve conflict
1465        // based on ancestry.
1466        let new_id = if let Some(resolved) =
1467            trivial_merge(&[self_id, base_id, other_id], SameChange::Accept)
1468        {
1469            resolved.cloned()
1470        } else if self_id.is_none() || other_id.is_none() {
1471            // We want to remove the workspace even if the self side changed the
1472            // working-copy commit.
1473            None
1474        } else {
1475            self_id.cloned()
1476        };
1477        match new_id {
1478            Some(id) => view.set_wc_commit(name.to_owned(), id),
1479            None => view.remove_wc_commit(name),
1480        }
1481    }
1482
1483    pub fn rename_workspace(
1484        &mut self,
1485        old_name: &WorkspaceName,
1486        new_name: WorkspaceNameBuf,
1487    ) -> Result<(), RenameWorkspaceError> {
1488        self.view_mut().rename_workspace(old_name, new_name)
1489    }
1490
1491    pub fn check_out(
1492        &mut self,
1493        name: WorkspaceNameBuf,
1494        commit: &Commit,
1495    ) -> Result<Commit, CheckOutCommitError> {
1496        let wc_commit = self
1497            .new_commit(vec![commit.id().clone()], commit.tree_id().clone())
1498            .write()?;
1499        self.edit(name, &wc_commit)?;
1500        Ok(wc_commit)
1501    }
1502
1503    pub fn edit(&mut self, name: WorkspaceNameBuf, commit: &Commit) -> Result<(), EditCommitError> {
1504        self.maybe_abandon_wc_commit(&name)?;
1505        self.add_head(commit)?;
1506        Ok(self.set_wc_commit(name, commit.id().clone())?)
1507    }
1508
1509    fn maybe_abandon_wc_commit(
1510        &mut self,
1511        workspace_name: &WorkspaceName,
1512    ) -> Result<(), EditCommitError> {
1513        let is_commit_referenced = |view: &View, commit_id: &CommitId| -> bool {
1514            view.wc_commit_ids()
1515                .iter()
1516                .filter(|&(name, _)| name != workspace_name)
1517                .map(|(_, wc_id)| wc_id)
1518                .chain(
1519                    view.local_bookmarks()
1520                        .flat_map(|(_, target)| target.added_ids()),
1521                )
1522                .any(|id| id == commit_id)
1523        };
1524
1525        let maybe_wc_commit_id = self
1526            .view
1527            .with_ref(|v| v.get_wc_commit_id(workspace_name).cloned());
1528        if let Some(wc_commit_id) = maybe_wc_commit_id {
1529            let wc_commit = self
1530                .store()
1531                .get_commit(&wc_commit_id)
1532                .map_err(EditCommitError::WorkingCopyCommitNotFound)?;
1533            if wc_commit.is_discardable(self)?
1534                && self
1535                    .view
1536                    .with_ref(|v| !is_commit_referenced(v, wc_commit.id()))
1537                && self.view().heads().contains(wc_commit.id())
1538            {
1539                // Abandon the working-copy commit we're leaving if it's
1540                // discardable, not pointed by local bookmark or other working
1541                // copies, and a head commit.
1542                self.record_abandoned_commit(&wc_commit);
1543            }
1544        }
1545
1546        Ok(())
1547    }
1548
1549    fn enforce_view_invariants(&self, view: &mut View) {
1550        let view = view.store_view_mut();
1551        let root_commit_id = self.store().root_commit_id();
1552        if view.head_ids.is_empty() {
1553            view.head_ids.insert(root_commit_id.clone());
1554        } else if view.head_ids.len() > 1 {
1555            // An empty head_ids set is padded with the root_commit_id, but the
1556            // root id is unwanted during the heads resolution.
1557            view.head_ids.remove(root_commit_id);
1558            // It is unclear if `heads` can never fail for default implementation,
1559            // but it can definitely fail for non-default implementations.
1560            // TODO: propagate errors.
1561            view.head_ids = self
1562                .index()
1563                .heads(&mut view.head_ids.iter())
1564                .unwrap()
1565                .into_iter()
1566                .collect();
1567        }
1568        assert!(!view.head_ids.is_empty());
1569    }
1570
1571    /// Ensures that the given `head` and ancestor commits are reachable from
1572    /// the visible heads.
1573    pub fn add_head(&mut self, head: &Commit) -> BackendResult<()> {
1574        self.add_heads(slice::from_ref(head))
1575    }
1576
1577    /// Ensures that the given `heads` and ancestor commits are reachable from
1578    /// the visible heads.
1579    ///
1580    /// The `heads` may contain redundant commits such as already visible ones
1581    /// and ancestors of the other heads. The `heads` and ancestor commits
1582    /// should exist in the store.
1583    pub fn add_heads(&mut self, heads: &[Commit]) -> BackendResult<()> {
1584        let current_heads = self.view.get_mut().heads();
1585        // Use incremental update for common case of adding a single commit on top a
1586        // current head. TODO: Also use incremental update when adding a single
1587        // commit on top a non-head.
1588        match heads {
1589            [] => {}
1590            [head]
1591                if head
1592                    .parent_ids()
1593                    .iter()
1594                    .all(|parent_id| current_heads.contains(parent_id)) =>
1595            {
1596                self.index
1597                    .add_commit(head)
1598                    // TODO: indexing error shouldn't be a "BackendError"
1599                    .map_err(|err| BackendError::Other(err.into()))?;
1600                self.view.get_mut().add_head(head.id());
1601                for parent_id in head.parent_ids() {
1602                    self.view.get_mut().remove_head(parent_id);
1603                }
1604            }
1605            _ => {
1606                let missing_commits = dag_walk::topo_order_reverse_ord_ok(
1607                    heads
1608                        .iter()
1609                        .cloned()
1610                        .map(CommitByCommitterTimestamp)
1611                        .map(Ok),
1612                    |CommitByCommitterTimestamp(commit)| commit.id().clone(),
1613                    |CommitByCommitterTimestamp(commit)| {
1614                        commit
1615                            .parent_ids()
1616                            .iter()
1617                            .filter(|id| !self.index().has_id(id))
1618                            .map(|id| self.store().get_commit(id))
1619                            .map_ok(CommitByCommitterTimestamp)
1620                            .collect_vec()
1621                    },
1622                    |_| panic!("graph has cycle"),
1623                )?;
1624                for CommitByCommitterTimestamp(missing_commit) in missing_commits.iter().rev() {
1625                    self.index
1626                        .add_commit(missing_commit)
1627                        // TODO: indexing error shouldn't be a "BackendError"
1628                        .map_err(|err| BackendError::Other(err.into()))?;
1629                }
1630                for head in heads {
1631                    self.view.get_mut().add_head(head.id());
1632                }
1633                self.view.mark_dirty();
1634            }
1635        }
1636        Ok(())
1637    }
1638
1639    pub fn remove_head(&mut self, head: &CommitId) {
1640        self.view_mut().remove_head(head);
1641        self.view.mark_dirty();
1642    }
1643
1644    pub fn get_local_bookmark(&self, name: &RefName) -> RefTarget {
1645        self.view.with_ref(|v| v.get_local_bookmark(name).clone())
1646    }
1647
1648    pub fn set_local_bookmark_target(&mut self, name: &RefName, target: RefTarget) {
1649        let view = self.view_mut();
1650        for id in target.added_ids() {
1651            view.add_head(id);
1652        }
1653        view.set_local_bookmark_target(name, target);
1654        self.view.mark_dirty();
1655    }
1656
1657    pub fn merge_local_bookmark(
1658        &mut self,
1659        name: &RefName,
1660        base_target: &RefTarget,
1661        other_target: &RefTarget,
1662    ) {
1663        let view = self.view.get_mut();
1664        let index = self.index.as_index();
1665        let self_target = view.get_local_bookmark(name);
1666        let new_target = merge_ref_targets(index, self_target, base_target, other_target);
1667        self.set_local_bookmark_target(name, new_target);
1668    }
1669
1670    pub fn get_remote_bookmark(&self, symbol: RemoteRefSymbol<'_>) -> RemoteRef {
1671        self.view
1672            .with_ref(|v| v.get_remote_bookmark(symbol).clone())
1673    }
1674
1675    pub fn set_remote_bookmark(&mut self, symbol: RemoteRefSymbol<'_>, remote_ref: RemoteRef) {
1676        self.view_mut().set_remote_bookmark(symbol, remote_ref);
1677    }
1678
1679    fn merge_remote_bookmark(
1680        &mut self,
1681        symbol: RemoteRefSymbol<'_>,
1682        base_ref: &RemoteRef,
1683        other_ref: &RemoteRef,
1684    ) {
1685        let view = self.view.get_mut();
1686        let index = self.index.as_index();
1687        let self_ref = view.get_remote_bookmark(symbol);
1688        let new_ref = merge_remote_refs(index, self_ref, base_ref, other_ref);
1689        view.set_remote_bookmark(symbol, new_ref);
1690    }
1691
1692    /// Merges the specified remote bookmark in to local bookmark, and starts
1693    /// tracking it.
1694    pub fn track_remote_bookmark(&mut self, symbol: RemoteRefSymbol<'_>) {
1695        let mut remote_ref = self.get_remote_bookmark(symbol);
1696        let base_target = remote_ref.tracked_target();
1697        self.merge_local_bookmark(symbol.name, base_target, &remote_ref.target);
1698        remote_ref.state = RemoteRefState::Tracked;
1699        self.set_remote_bookmark(symbol, remote_ref);
1700    }
1701
1702    /// Stops tracking the specified remote bookmark.
1703    pub fn untrack_remote_bookmark(&mut self, symbol: RemoteRefSymbol<'_>) {
1704        let mut remote_ref = self.get_remote_bookmark(symbol);
1705        remote_ref.state = RemoteRefState::New;
1706        self.set_remote_bookmark(symbol, remote_ref);
1707    }
1708
1709    pub fn remove_remote(&mut self, remote_name: &RemoteName) {
1710        self.view_mut().remove_remote(remote_name);
1711    }
1712
1713    pub fn rename_remote(&mut self, old: &RemoteName, new: &RemoteName) {
1714        self.view_mut().rename_remote(old, new);
1715    }
1716
1717    pub fn get_local_tag(&self, name: &RefName) -> RefTarget {
1718        self.view.with_ref(|v| v.get_local_tag(name).clone())
1719    }
1720
1721    pub fn set_local_tag_target(&mut self, name: &RefName, target: RefTarget) {
1722        self.view_mut().set_local_tag_target(name, target);
1723    }
1724
1725    pub fn merge_local_tag(
1726        &mut self,
1727        name: &RefName,
1728        base_target: &RefTarget,
1729        other_target: &RefTarget,
1730    ) {
1731        let view = self.view.get_mut();
1732        let index = self.index.as_index();
1733        let self_target = view.get_local_tag(name);
1734        let new_target = merge_ref_targets(index, self_target, base_target, other_target);
1735        view.set_local_tag_target(name, new_target);
1736    }
1737
1738    pub fn get_remote_tag(&self, symbol: RemoteRefSymbol<'_>) -> RemoteRef {
1739        self.view.with_ref(|v| v.get_remote_tag(symbol).clone())
1740    }
1741
1742    pub fn set_remote_tag(&mut self, symbol: RemoteRefSymbol<'_>, remote_ref: RemoteRef) {
1743        self.view_mut().set_remote_tag(symbol, remote_ref);
1744    }
1745
1746    fn merge_remote_tag(
1747        &mut self,
1748        symbol: RemoteRefSymbol<'_>,
1749        base_ref: &RemoteRef,
1750        other_ref: &RemoteRef,
1751    ) {
1752        let view = self.view.get_mut();
1753        let index = self.index.as_index();
1754        let self_ref = view.get_remote_tag(symbol);
1755        let new_ref = merge_remote_refs(index, self_ref, base_ref, other_ref);
1756        view.set_remote_tag(symbol, new_ref);
1757    }
1758
1759    pub fn get_git_ref(&self, name: &GitRefName) -> RefTarget {
1760        self.view.with_ref(|v| v.get_git_ref(name).clone())
1761    }
1762
1763    pub fn set_git_ref_target(&mut self, name: &GitRefName, target: RefTarget) {
1764        self.view_mut().set_git_ref_target(name, target);
1765    }
1766
1767    fn merge_git_ref(
1768        &mut self,
1769        name: &GitRefName,
1770        base_target: &RefTarget,
1771        other_target: &RefTarget,
1772    ) {
1773        let view = self.view.get_mut();
1774        let index = self.index.as_index();
1775        let self_target = view.get_git_ref(name);
1776        let new_target = merge_ref_targets(index, self_target, base_target, other_target);
1777        view.set_git_ref_target(name, new_target);
1778    }
1779
1780    pub fn git_head(&self) -> RefTarget {
1781        self.view.with_ref(|v| v.git_head().clone())
1782    }
1783
1784    pub fn set_git_head_target(&mut self, target: RefTarget) {
1785        self.view_mut().set_git_head_target(target);
1786    }
1787
1788    pub fn set_view(&mut self, data: op_store::View) {
1789        self.view_mut().set_view(data);
1790        self.view.mark_dirty();
1791    }
1792
1793    pub fn merge(
1794        &mut self,
1795        base_repo: &ReadonlyRepo,
1796        other_repo: &ReadonlyRepo,
1797    ) -> BackendResult<()> {
1798        // First, merge the index, so we can take advantage of a valid index when
1799        // merging the view. Merging in base_repo's index isn't typically
1800        // necessary, but it can be if base_repo is ahead of either self or other_repo
1801        // (e.g. because we're undoing an operation that hasn't been published).
1802        self.index.merge_in(base_repo.readonly_index());
1803        self.index.merge_in(other_repo.readonly_index());
1804
1805        self.view.ensure_clean(|v| self.enforce_view_invariants(v));
1806        self.merge_view(&base_repo.view, &other_repo.view)?;
1807        self.view.mark_dirty();
1808        Ok(())
1809    }
1810
1811    pub fn merge_index(&mut self, other_repo: &ReadonlyRepo) {
1812        self.index.merge_in(other_repo.readonly_index());
1813    }
1814
1815    fn merge_view(&mut self, base: &View, other: &View) -> BackendResult<()> {
1816        let changed_wc_commits = diff_named_commit_ids(base.wc_commit_ids(), other.wc_commit_ids());
1817        for (name, (base_id, other_id)) in changed_wc_commits {
1818            self.merge_wc_commit(name, base_id, other_id);
1819        }
1820
1821        let base_heads = base.heads().iter().cloned().collect_vec();
1822        let own_heads = self.view().heads().iter().cloned().collect_vec();
1823        let other_heads = other.heads().iter().cloned().collect_vec();
1824
1825        // HACK: Don't walk long ranges of commits to find rewrites when using other
1826        // custom implementations. The only custom index implementation we're currently
1827        // aware of is Google's. That repo has too high commit rate for it to be
1828        // feasible to walk all added and removed commits.
1829        // TODO: Fix this somehow. Maybe a method on `Index` to find rewritten commits
1830        // given `base_heads`, `own_heads` and `other_heads`?
1831        if self.is_backed_by_default_index() {
1832            self.record_rewrites(&base_heads, &own_heads)?;
1833            self.record_rewrites(&base_heads, &other_heads)?;
1834            // No need to remove heads removed by `other` because we already
1835            // marked them abandoned or rewritten.
1836        } else {
1837            for removed_head in base.heads().difference(other.heads()) {
1838                self.view_mut().remove_head(removed_head);
1839            }
1840        }
1841        for added_head in other.heads().difference(base.heads()) {
1842            self.view_mut().add_head(added_head);
1843        }
1844
1845        let changed_local_bookmarks =
1846            diff_named_ref_targets(base.local_bookmarks(), other.local_bookmarks());
1847        for (name, (base_target, other_target)) in changed_local_bookmarks {
1848            self.merge_local_bookmark(name, base_target, other_target);
1849        }
1850
1851        let changed_local_tags = diff_named_ref_targets(base.local_tags(), other.local_tags());
1852        for (name, (base_target, other_target)) in changed_local_tags {
1853            self.merge_local_tag(name, base_target, other_target);
1854        }
1855
1856        let changed_git_refs = diff_named_ref_targets(base.git_refs(), other.git_refs());
1857        for (name, (base_target, other_target)) in changed_git_refs {
1858            self.merge_git_ref(name, base_target, other_target);
1859        }
1860
1861        let changed_remote_bookmarks =
1862            diff_named_remote_refs(base.all_remote_bookmarks(), other.all_remote_bookmarks());
1863        for (symbol, (base_ref, other_ref)) in changed_remote_bookmarks {
1864            self.merge_remote_bookmark(symbol, base_ref, other_ref);
1865        }
1866
1867        let changed_remote_tags =
1868            diff_named_remote_refs(base.all_remote_tags(), other.all_remote_tags());
1869        for (symbol, (base_ref, other_ref)) in changed_remote_tags {
1870            self.merge_remote_tag(symbol, base_ref, other_ref);
1871        }
1872
1873        let new_git_head_target = merge_ref_targets(
1874            self.index(),
1875            self.view().git_head(),
1876            base.git_head(),
1877            other.git_head(),
1878        );
1879        self.set_git_head_target(new_git_head_target);
1880
1881        Ok(())
1882    }
1883
1884    /// Finds and records commits that were rewritten or abandoned between
1885    /// `old_heads` and `new_heads`.
1886    fn record_rewrites(
1887        &mut self,
1888        old_heads: &[CommitId],
1889        new_heads: &[CommitId],
1890    ) -> BackendResult<()> {
1891        let mut removed_changes: HashMap<ChangeId, Vec<CommitId>> = HashMap::new();
1892        for item in revset::walk_revs(self, old_heads, new_heads)
1893            .map_err(|err| err.into_backend_error())?
1894            .commit_change_ids()
1895        {
1896            let (commit_id, change_id) = item.map_err(|err| err.into_backend_error())?;
1897            removed_changes
1898                .entry(change_id)
1899                .or_default()
1900                .push(commit_id);
1901        }
1902        if removed_changes.is_empty() {
1903            return Ok(());
1904        }
1905
1906        let mut rewritten_changes = HashSet::new();
1907        let mut rewritten_commits: HashMap<CommitId, Vec<CommitId>> = HashMap::new();
1908        for item in revset::walk_revs(self, new_heads, old_heads)
1909            .map_err(|err| err.into_backend_error())?
1910            .commit_change_ids()
1911        {
1912            let (commit_id, change_id) = item.map_err(|err| err.into_backend_error())?;
1913            if let Some(old_commits) = removed_changes.get(&change_id) {
1914                for old_commit in old_commits {
1915                    rewritten_commits
1916                        .entry(old_commit.clone())
1917                        .or_default()
1918                        .push(commit_id.clone());
1919                }
1920            }
1921            rewritten_changes.insert(change_id);
1922        }
1923        for (old_commit, new_commits) in rewritten_commits {
1924            if new_commits.len() == 1 {
1925                self.set_rewritten_commit(
1926                    old_commit.clone(),
1927                    new_commits.into_iter().next().unwrap(),
1928                );
1929            } else {
1930                self.set_divergent_rewrite(old_commit.clone(), new_commits);
1931            }
1932        }
1933
1934        for (change_id, removed_commit_ids) in &removed_changes {
1935            if !rewritten_changes.contains(change_id) {
1936                for id in removed_commit_ids {
1937                    let commit = self.store().get_commit(id)?;
1938                    self.record_abandoned_commit(&commit);
1939                }
1940            }
1941        }
1942
1943        Ok(())
1944    }
1945}
1946
1947impl Repo for MutableRepo {
1948    fn base_repo(&self) -> &ReadonlyRepo {
1949        &self.base_repo
1950    }
1951
1952    fn store(&self) -> &Arc<Store> {
1953        self.base_repo.store()
1954    }
1955
1956    fn op_store(&self) -> &Arc<dyn OpStore> {
1957        self.base_repo.op_store()
1958    }
1959
1960    fn index(&self) -> &dyn Index {
1961        self.index.as_index()
1962    }
1963
1964    fn view(&self) -> &View {
1965        self.view
1966            .get_or_ensure_clean(|v| self.enforce_view_invariants(v))
1967    }
1968
1969    fn submodule_store(&self) -> &Arc<dyn SubmoduleStore> {
1970        self.base_repo.submodule_store()
1971    }
1972
1973    fn resolve_change_id_prefix(&self, prefix: &HexPrefix) -> PrefixResolution<Vec<CommitId>> {
1974        let change_id_index = self.index.change_id_index(&mut self.view().heads().iter());
1975        change_id_index.resolve_prefix(prefix)
1976    }
1977
1978    fn shortest_unique_change_id_prefix_len(&self, target_id: &ChangeId) -> usize {
1979        let change_id_index = self.index.change_id_index(&mut self.view().heads().iter());
1980        change_id_index.shortest_unique_prefix_len(target_id)
1981    }
1982}
1983
1984/// Error from attempts to check out the root commit for editing
1985#[derive(Debug, Error)]
1986#[error("Cannot rewrite the root commit")]
1987pub struct RewriteRootCommit;
1988
1989/// Error from attempts to edit a commit
1990#[derive(Debug, Error)]
1991pub enum EditCommitError {
1992    #[error("Current working-copy commit not found")]
1993    WorkingCopyCommitNotFound(#[source] BackendError),
1994    #[error(transparent)]
1995    RewriteRootCommit(#[from] RewriteRootCommit),
1996    #[error(transparent)]
1997    BackendError(#[from] BackendError),
1998}
1999
2000/// Error from attempts to check out a commit
2001#[derive(Debug, Error)]
2002pub enum CheckOutCommitError {
2003    #[error("Failed to create new working-copy commit")]
2004    CreateCommit(#[from] BackendError),
2005    #[error("Failed to edit commit")]
2006    EditCommit(#[from] EditCommitError),
2007}
2008
2009mod dirty_cell {
2010    use std::cell::OnceCell;
2011    use std::cell::RefCell;
2012
2013    /// Cell that lazily updates the value after `mark_dirty()`.
2014    ///
2015    /// A clean value can be immutably borrowed within the `self` lifetime.
2016    #[derive(Clone, Debug)]
2017    pub struct DirtyCell<T> {
2018        // Either clean or dirty value is set. The value is boxed to reduce stack space
2019        // and memcopy overhead.
2020        clean: OnceCell<Box<T>>,
2021        dirty: RefCell<Option<Box<T>>>,
2022    }
2023
2024    impl<T> DirtyCell<T> {
2025        pub fn with_clean(value: T) -> Self {
2026            Self {
2027                clean: OnceCell::from(Box::new(value)),
2028                dirty: RefCell::new(None),
2029            }
2030        }
2031
2032        pub fn get_or_ensure_clean(&self, f: impl FnOnce(&mut T)) -> &T {
2033            self.clean.get_or_init(|| {
2034                // Panics if ensure_clean() is invoked from with_ref() callback for example.
2035                let mut value = self.dirty.borrow_mut().take().unwrap();
2036                f(&mut value);
2037                value
2038            })
2039        }
2040
2041        pub fn ensure_clean(&self, f: impl FnOnce(&mut T)) {
2042            self.get_or_ensure_clean(f);
2043        }
2044
2045        pub fn into_inner(self) -> T {
2046            *self
2047                .clean
2048                .into_inner()
2049                .or_else(|| self.dirty.into_inner())
2050                .unwrap()
2051        }
2052
2053        pub fn with_ref<R>(&self, f: impl FnOnce(&T) -> R) -> R {
2054            if let Some(value) = self.clean.get() {
2055                f(value)
2056            } else {
2057                f(self.dirty.borrow().as_ref().unwrap())
2058            }
2059        }
2060
2061        pub fn get_mut(&mut self) -> &mut T {
2062            self.clean
2063                .get_mut()
2064                .or_else(|| self.dirty.get_mut().as_mut())
2065                .unwrap()
2066        }
2067
2068        pub fn mark_dirty(&mut self) {
2069            if let Some(value) = self.clean.take() {
2070                *self.dirty.get_mut() = Some(value);
2071            }
2072        }
2073    }
2074}