jj_lib/
repo.rs

1// Copyright 2020 The Jujutsu Authors
2//
3// Licensed under the Apache License, Version 2.0 (the "License");
4// you may not use this file except in compliance with the License.
5// You may obtain a copy of the License at
6//
7// https://www.apache.org/licenses/LICENSE-2.0
8//
9// Unless required by applicable law or agreed to in writing, software
10// distributed under the License is distributed on an "AS IS" BASIS,
11// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12// See the License for the specific language governing permissions and
13// limitations under the License.
14
15#![expect(missing_docs)]
16
17use std::collections::BTreeMap;
18use std::collections::HashMap;
19use std::collections::HashSet;
20use std::collections::hash_map::Entry;
21use std::fmt::Debug;
22use std::fmt::Formatter;
23use std::fs;
24use std::path::Path;
25use std::slice;
26use std::sync::Arc;
27
28use itertools::Itertools as _;
29use once_cell::sync::OnceCell;
30use pollster::FutureExt as _;
31use thiserror::Error;
32use tracing::instrument;
33
34use self::dirty_cell::DirtyCell;
35use crate::backend::Backend;
36use crate::backend::BackendError;
37use crate::backend::BackendInitError;
38use crate::backend::BackendLoadError;
39use crate::backend::BackendResult;
40use crate::backend::ChangeId;
41use crate::backend::CommitId;
42use crate::backend::MergedTreeId;
43use crate::commit::Commit;
44use crate::commit::CommitByCommitterTimestamp;
45use crate::commit_builder::CommitBuilder;
46use crate::commit_builder::DetachedCommitBuilder;
47use crate::dag_walk;
48use crate::default_index::DefaultIndexStore;
49use crate::default_index::DefaultMutableIndex;
50use crate::default_submodule_store::DefaultSubmoduleStore;
51use crate::file_util::IoResultExt as _;
52use crate::file_util::PathError;
53use crate::index::ChangeIdIndex;
54use crate::index::Index;
55use crate::index::IndexError;
56use crate::index::IndexResult;
57use crate::index::IndexStore;
58use crate::index::IndexStoreError;
59use crate::index::MutableIndex;
60use crate::index::ReadonlyIndex;
61use crate::merge::MergeBuilder;
62use crate::merge::SameChange;
63use crate::merge::trivial_merge;
64use crate::object_id::HexPrefix;
65use crate::object_id::PrefixResolution;
66use crate::op_heads_store;
67use crate::op_heads_store::OpHeadResolutionError;
68use crate::op_heads_store::OpHeadsStore;
69use crate::op_heads_store::OpHeadsStoreError;
70use crate::op_store;
71use crate::op_store::OpStore;
72use crate::op_store::OpStoreError;
73use crate::op_store::OpStoreResult;
74use crate::op_store::OperationId;
75use crate::op_store::RefTarget;
76use crate::op_store::RemoteRef;
77use crate::op_store::RemoteRefState;
78use crate::op_store::RootOperationData;
79use crate::operation::Operation;
80use crate::ref_name::GitRefName;
81use crate::ref_name::RefName;
82use crate::ref_name::RemoteName;
83use crate::ref_name::RemoteRefSymbol;
84use crate::ref_name::WorkspaceName;
85use crate::ref_name::WorkspaceNameBuf;
86use crate::refs::diff_named_commit_ids;
87use crate::refs::diff_named_ref_targets;
88use crate::refs::diff_named_remote_refs;
89use crate::refs::merge_ref_targets;
90use crate::refs::merge_remote_refs;
91use crate::revset;
92use crate::revset::RevsetEvaluationError;
93use crate::revset::RevsetExpression;
94use crate::revset::RevsetIteratorExt as _;
95use crate::rewrite::CommitRewriter;
96use crate::rewrite::RebaseOptions;
97use crate::rewrite::RebasedCommit;
98use crate::rewrite::RewriteRefsOptions;
99use crate::rewrite::merge_commit_trees;
100use crate::rewrite::rebase_commit_with_options;
101use crate::settings::UserSettings;
102use crate::signing::SignInitError;
103use crate::signing::Signer;
104use crate::simple_backend::SimpleBackend;
105use crate::simple_op_heads_store::SimpleOpHeadsStore;
106use crate::simple_op_store::SimpleOpStore;
107use crate::store::Store;
108use crate::submodule_store::SubmoduleStore;
109use crate::transaction::Transaction;
110use crate::transaction::TransactionCommitError;
111use crate::tree_merge::MergeOptions;
112use crate::view::RenameWorkspaceError;
113use crate::view::View;
114
115pub trait Repo {
116    /// Base repository that contains all committed data. Returns `self` if this
117    /// is a `ReadonlyRepo`,
118    fn base_repo(&self) -> &ReadonlyRepo;
119
120    fn store(&self) -> &Arc<Store>;
121
122    fn op_store(&self) -> &Arc<dyn OpStore>;
123
124    fn index(&self) -> &dyn Index;
125
126    fn view(&self) -> &View;
127
128    fn submodule_store(&self) -> &Arc<dyn SubmoduleStore>;
129
130    fn resolve_change_id(&self, change_id: &ChangeId) -> IndexResult<Option<Vec<CommitId>>> {
131        // Replace this if we added more efficient lookup method.
132        let prefix = HexPrefix::from_id(change_id);
133        match self.resolve_change_id_prefix(&prefix)? {
134            PrefixResolution::NoMatch => Ok(None),
135            PrefixResolution::SingleMatch(entries) => Ok(Some(entries)),
136            PrefixResolution::AmbiguousMatch => panic!("complete change_id should be unambiguous"),
137        }
138    }
139
140    fn resolve_change_id_prefix(
141        &self,
142        prefix: &HexPrefix,
143    ) -> IndexResult<PrefixResolution<Vec<CommitId>>>;
144
145    fn shortest_unique_change_id_prefix_len(
146        &self,
147        target_id_bytes: &ChangeId,
148    ) -> IndexResult<usize>;
149}
150
151pub struct ReadonlyRepo {
152    loader: RepoLoader,
153    operation: Operation,
154    index: Box<dyn ReadonlyIndex>,
155    change_id_index: OnceCell<Box<dyn ChangeIdIndex>>,
156    // TODO: This should eventually become part of the index and not be stored fully in memory.
157    view: View,
158}
159
160impl Debug for ReadonlyRepo {
161    fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), std::fmt::Error> {
162        f.debug_struct("ReadonlyRepo")
163            .field("store", &self.loader.store)
164            .finish_non_exhaustive()
165    }
166}
167
168#[derive(Error, Debug)]
169pub enum RepoInitError {
170    #[error(transparent)]
171    Backend(#[from] BackendInitError),
172    #[error(transparent)]
173    OpHeadsStore(#[from] OpHeadsStoreError),
174    #[error(transparent)]
175    Path(#[from] PathError),
176}
177
178impl ReadonlyRepo {
179    pub fn default_op_store_initializer() -> &'static OpStoreInitializer<'static> {
180        &|_settings, store_path, root_data| {
181            Ok(Box::new(SimpleOpStore::init(store_path, root_data)?))
182        }
183    }
184
185    pub fn default_op_heads_store_initializer() -> &'static OpHeadsStoreInitializer<'static> {
186        &|_settings, store_path| Ok(Box::new(SimpleOpHeadsStore::init(store_path)?))
187    }
188
189    pub fn default_index_store_initializer() -> &'static IndexStoreInitializer<'static> {
190        &|_settings, store_path| Ok(Box::new(DefaultIndexStore::init(store_path)?))
191    }
192
193    pub fn default_submodule_store_initializer() -> &'static SubmoduleStoreInitializer<'static> {
194        &|_settings, store_path| Ok(Box::new(DefaultSubmoduleStore::init(store_path)))
195    }
196
197    #[expect(clippy::too_many_arguments)]
198    pub fn init(
199        settings: &UserSettings,
200        repo_path: &Path,
201        backend_initializer: &BackendInitializer,
202        signer: Signer,
203        op_store_initializer: &OpStoreInitializer,
204        op_heads_store_initializer: &OpHeadsStoreInitializer,
205        index_store_initializer: &IndexStoreInitializer,
206        submodule_store_initializer: &SubmoduleStoreInitializer,
207    ) -> Result<Arc<Self>, RepoInitError> {
208        let repo_path = dunce::canonicalize(repo_path).context(repo_path)?;
209
210        let store_path = repo_path.join("store");
211        fs::create_dir(&store_path).context(&store_path)?;
212        let backend = backend_initializer(settings, &store_path)?;
213        let backend_path = store_path.join("type");
214        fs::write(&backend_path, backend.name()).context(&backend_path)?;
215        let merge_options =
216            MergeOptions::from_settings(settings).map_err(|err| BackendInitError(err.into()))?;
217        let store = Store::new(backend, signer, merge_options);
218
219        let op_store_path = repo_path.join("op_store");
220        fs::create_dir(&op_store_path).context(&op_store_path)?;
221        let root_op_data = RootOperationData {
222            root_commit_id: store.root_commit_id().clone(),
223        };
224        let op_store = op_store_initializer(settings, &op_store_path, root_op_data)?;
225        let op_store_type_path = op_store_path.join("type");
226        fs::write(&op_store_type_path, op_store.name()).context(&op_store_type_path)?;
227        let op_store: Arc<dyn OpStore> = Arc::from(op_store);
228
229        let op_heads_path = repo_path.join("op_heads");
230        fs::create_dir(&op_heads_path).context(&op_heads_path)?;
231        let op_heads_store = op_heads_store_initializer(settings, &op_heads_path)?;
232        let op_heads_type_path = op_heads_path.join("type");
233        fs::write(&op_heads_type_path, op_heads_store.name()).context(&op_heads_type_path)?;
234        op_heads_store
235            .update_op_heads(&[], op_store.root_operation_id())
236            .block_on()?;
237        let op_heads_store: Arc<dyn OpHeadsStore> = Arc::from(op_heads_store);
238
239        let index_path = repo_path.join("index");
240        fs::create_dir(&index_path).context(&index_path)?;
241        let index_store = index_store_initializer(settings, &index_path)?;
242        let index_type_path = index_path.join("type");
243        fs::write(&index_type_path, index_store.name()).context(&index_type_path)?;
244        let index_store: Arc<dyn IndexStore> = Arc::from(index_store);
245
246        let submodule_store_path = repo_path.join("submodule_store");
247        fs::create_dir(&submodule_store_path).context(&submodule_store_path)?;
248        let submodule_store = submodule_store_initializer(settings, &submodule_store_path)?;
249        let submodule_store_type_path = submodule_store_path.join("type");
250        fs::write(&submodule_store_type_path, submodule_store.name())
251            .context(&submodule_store_type_path)?;
252        let submodule_store = Arc::from(submodule_store);
253
254        let loader = RepoLoader {
255            settings: settings.clone(),
256            store,
257            op_store,
258            op_heads_store,
259            index_store,
260            submodule_store,
261        };
262
263        let root_operation = loader.root_operation();
264        let root_view = root_operation.view().expect("failed to read root view");
265        assert!(!root_view.heads().is_empty());
266        let index = loader
267            .index_store
268            .get_index_at_op(&root_operation, &loader.store)
269            // If the root op index couldn't be read, the index backend wouldn't
270            // be initialized properly.
271            .map_err(|err| BackendInitError(err.into()))?;
272        Ok(Arc::new(Self {
273            loader,
274            operation: root_operation,
275            index,
276            change_id_index: OnceCell::new(),
277            view: root_view,
278        }))
279    }
280
281    pub fn loader(&self) -> &RepoLoader {
282        &self.loader
283    }
284
285    pub fn op_id(&self) -> &OperationId {
286        self.operation.id()
287    }
288
289    pub fn operation(&self) -> &Operation {
290        &self.operation
291    }
292
293    pub fn view(&self) -> &View {
294        &self.view
295    }
296
297    pub fn readonly_index(&self) -> &dyn ReadonlyIndex {
298        self.index.as_ref()
299    }
300
301    fn change_id_index(&self) -> &dyn ChangeIdIndex {
302        self.change_id_index
303            .get_or_init(|| {
304                self.readonly_index()
305                    .change_id_index(&mut self.view().heads().iter())
306            })
307            .as_ref()
308    }
309
310    pub fn op_heads_store(&self) -> &Arc<dyn OpHeadsStore> {
311        self.loader.op_heads_store()
312    }
313
314    pub fn index_store(&self) -> &Arc<dyn IndexStore> {
315        self.loader.index_store()
316    }
317
318    pub fn settings(&self) -> &UserSettings {
319        self.loader.settings()
320    }
321
322    pub fn start_transaction(self: &Arc<Self>) -> Transaction {
323        let mut_repo = MutableRepo::new(self.clone(), self.readonly_index(), &self.view);
324        Transaction::new(mut_repo, self.settings())
325    }
326
327    pub fn reload_at_head(&self) -> Result<Arc<Self>, RepoLoaderError> {
328        self.loader().load_at_head()
329    }
330
331    #[instrument]
332    pub fn reload_at(&self, operation: &Operation) -> Result<Arc<Self>, RepoLoaderError> {
333        self.loader().load_at(operation)
334    }
335}
336
337impl Repo for ReadonlyRepo {
338    fn base_repo(&self) -> &ReadonlyRepo {
339        self
340    }
341
342    fn store(&self) -> &Arc<Store> {
343        self.loader.store()
344    }
345
346    fn op_store(&self) -> &Arc<dyn OpStore> {
347        self.loader.op_store()
348    }
349
350    fn index(&self) -> &dyn Index {
351        self.readonly_index().as_index()
352    }
353
354    fn view(&self) -> &View {
355        &self.view
356    }
357
358    fn submodule_store(&self) -> &Arc<dyn SubmoduleStore> {
359        self.loader.submodule_store()
360    }
361
362    fn resolve_change_id_prefix(
363        &self,
364        prefix: &HexPrefix,
365    ) -> IndexResult<PrefixResolution<Vec<CommitId>>> {
366        self.change_id_index().resolve_prefix(prefix)
367    }
368
369    fn shortest_unique_change_id_prefix_len(&self, target_id: &ChangeId) -> IndexResult<usize> {
370        self.change_id_index().shortest_unique_prefix_len(target_id)
371    }
372}
373
374pub type BackendInitializer<'a> =
375    dyn Fn(&UserSettings, &Path) -> Result<Box<dyn Backend>, BackendInitError> + 'a;
376#[rustfmt::skip] // auto-formatted line would exceed the maximum width
377pub type OpStoreInitializer<'a> =
378    dyn Fn(&UserSettings, &Path, RootOperationData) -> Result<Box<dyn OpStore>, BackendInitError>
379    + 'a;
380pub type OpHeadsStoreInitializer<'a> =
381    dyn Fn(&UserSettings, &Path) -> Result<Box<dyn OpHeadsStore>, BackendInitError> + 'a;
382pub type IndexStoreInitializer<'a> =
383    dyn Fn(&UserSettings, &Path) -> Result<Box<dyn IndexStore>, BackendInitError> + 'a;
384pub type SubmoduleStoreInitializer<'a> =
385    dyn Fn(&UserSettings, &Path) -> Result<Box<dyn SubmoduleStore>, BackendInitError> + 'a;
386
387type BackendFactory =
388    Box<dyn Fn(&UserSettings, &Path) -> Result<Box<dyn Backend>, BackendLoadError>>;
389type OpStoreFactory = Box<
390    dyn Fn(&UserSettings, &Path, RootOperationData) -> Result<Box<dyn OpStore>, BackendLoadError>,
391>;
392type OpHeadsStoreFactory =
393    Box<dyn Fn(&UserSettings, &Path) -> Result<Box<dyn OpHeadsStore>, BackendLoadError>>;
394type IndexStoreFactory =
395    Box<dyn Fn(&UserSettings, &Path) -> Result<Box<dyn IndexStore>, BackendLoadError>>;
396type SubmoduleStoreFactory =
397    Box<dyn Fn(&UserSettings, &Path) -> Result<Box<dyn SubmoduleStore>, BackendLoadError>>;
398
399pub fn merge_factories_map<F>(base: &mut HashMap<String, F>, ext: HashMap<String, F>) {
400    for (name, factory) in ext {
401        match base.entry(name) {
402            Entry::Vacant(v) => {
403                v.insert(factory);
404            }
405            Entry::Occupied(o) => {
406                panic!("Conflicting factory definitions for '{}' factory", o.key())
407            }
408        }
409    }
410}
411
412pub struct StoreFactories {
413    backend_factories: HashMap<String, BackendFactory>,
414    op_store_factories: HashMap<String, OpStoreFactory>,
415    op_heads_store_factories: HashMap<String, OpHeadsStoreFactory>,
416    index_store_factories: HashMap<String, IndexStoreFactory>,
417    submodule_store_factories: HashMap<String, SubmoduleStoreFactory>,
418}
419
420impl Default for StoreFactories {
421    fn default() -> Self {
422        let mut factories = Self::empty();
423
424        // Backends
425        factories.add_backend(
426            SimpleBackend::name(),
427            Box::new(|_settings, store_path| Ok(Box::new(SimpleBackend::load(store_path)))),
428        );
429        #[cfg(feature = "git")]
430        factories.add_backend(
431            crate::git_backend::GitBackend::name(),
432            Box::new(|settings, store_path| {
433                Ok(Box::new(crate::git_backend::GitBackend::load(
434                    settings, store_path,
435                )?))
436            }),
437        );
438        #[cfg(feature = "testing")]
439        factories.add_backend(
440            crate::secret_backend::SecretBackend::name(),
441            Box::new(|settings, store_path| {
442                Ok(Box::new(crate::secret_backend::SecretBackend::load(
443                    settings, store_path,
444                )?))
445            }),
446        );
447
448        // OpStores
449        factories.add_op_store(
450            SimpleOpStore::name(),
451            Box::new(|_settings, store_path, root_data| {
452                Ok(Box::new(SimpleOpStore::load(store_path, root_data)))
453            }),
454        );
455
456        // OpHeadsStores
457        factories.add_op_heads_store(
458            SimpleOpHeadsStore::name(),
459            Box::new(|_settings, store_path| Ok(Box::new(SimpleOpHeadsStore::load(store_path)))),
460        );
461
462        // Index
463        factories.add_index_store(
464            DefaultIndexStore::name(),
465            Box::new(|_settings, store_path| Ok(Box::new(DefaultIndexStore::load(store_path)))),
466        );
467
468        // SubmoduleStores
469        factories.add_submodule_store(
470            DefaultSubmoduleStore::name(),
471            Box::new(|_settings, store_path| Ok(Box::new(DefaultSubmoduleStore::load(store_path)))),
472        );
473
474        factories
475    }
476}
477
478#[derive(Debug, Error)]
479pub enum StoreLoadError {
480    #[error("Unsupported {store} backend type '{store_type}'")]
481    UnsupportedType {
482        store: &'static str,
483        store_type: String,
484    },
485    #[error("Failed to read {store} backend type")]
486    ReadError {
487        store: &'static str,
488        source: PathError,
489    },
490    #[error(transparent)]
491    Backend(#[from] BackendLoadError),
492    #[error(transparent)]
493    Signing(#[from] SignInitError),
494}
495
496impl StoreFactories {
497    pub fn empty() -> Self {
498        Self {
499            backend_factories: HashMap::new(),
500            op_store_factories: HashMap::new(),
501            op_heads_store_factories: HashMap::new(),
502            index_store_factories: HashMap::new(),
503            submodule_store_factories: HashMap::new(),
504        }
505    }
506
507    pub fn merge(&mut self, ext: Self) {
508        let Self {
509            backend_factories,
510            op_store_factories,
511            op_heads_store_factories,
512            index_store_factories,
513            submodule_store_factories,
514        } = ext;
515
516        merge_factories_map(&mut self.backend_factories, backend_factories);
517        merge_factories_map(&mut self.op_store_factories, op_store_factories);
518        merge_factories_map(&mut self.op_heads_store_factories, op_heads_store_factories);
519        merge_factories_map(&mut self.index_store_factories, index_store_factories);
520        merge_factories_map(
521            &mut self.submodule_store_factories,
522            submodule_store_factories,
523        );
524    }
525
526    pub fn add_backend(&mut self, name: &str, factory: BackendFactory) {
527        self.backend_factories.insert(name.to_string(), factory);
528    }
529
530    pub fn load_backend(
531        &self,
532        settings: &UserSettings,
533        store_path: &Path,
534    ) -> Result<Box<dyn Backend>, StoreLoadError> {
535        let backend_type = read_store_type("commit", store_path.join("type"))?;
536        let backend_factory = self.backend_factories.get(&backend_type).ok_or_else(|| {
537            StoreLoadError::UnsupportedType {
538                store: "commit",
539                store_type: backend_type.clone(),
540            }
541        })?;
542        Ok(backend_factory(settings, store_path)?)
543    }
544
545    pub fn add_op_store(&mut self, name: &str, factory: OpStoreFactory) {
546        self.op_store_factories.insert(name.to_string(), factory);
547    }
548
549    pub fn load_op_store(
550        &self,
551        settings: &UserSettings,
552        store_path: &Path,
553        root_data: RootOperationData,
554    ) -> Result<Box<dyn OpStore>, StoreLoadError> {
555        let op_store_type = read_store_type("operation", store_path.join("type"))?;
556        let op_store_factory = self.op_store_factories.get(&op_store_type).ok_or_else(|| {
557            StoreLoadError::UnsupportedType {
558                store: "operation",
559                store_type: op_store_type.clone(),
560            }
561        })?;
562        Ok(op_store_factory(settings, store_path, root_data)?)
563    }
564
565    pub fn add_op_heads_store(&mut self, name: &str, factory: OpHeadsStoreFactory) {
566        self.op_heads_store_factories
567            .insert(name.to_string(), factory);
568    }
569
570    pub fn load_op_heads_store(
571        &self,
572        settings: &UserSettings,
573        store_path: &Path,
574    ) -> Result<Box<dyn OpHeadsStore>, StoreLoadError> {
575        let op_heads_store_type = read_store_type("operation heads", store_path.join("type"))?;
576        let op_heads_store_factory = self
577            .op_heads_store_factories
578            .get(&op_heads_store_type)
579            .ok_or_else(|| StoreLoadError::UnsupportedType {
580                store: "operation heads",
581                store_type: op_heads_store_type.clone(),
582            })?;
583        Ok(op_heads_store_factory(settings, store_path)?)
584    }
585
586    pub fn add_index_store(&mut self, name: &str, factory: IndexStoreFactory) {
587        self.index_store_factories.insert(name.to_string(), factory);
588    }
589
590    pub fn load_index_store(
591        &self,
592        settings: &UserSettings,
593        store_path: &Path,
594    ) -> Result<Box<dyn IndexStore>, StoreLoadError> {
595        let index_store_type = read_store_type("index", store_path.join("type"))?;
596        let index_store_factory = self
597            .index_store_factories
598            .get(&index_store_type)
599            .ok_or_else(|| StoreLoadError::UnsupportedType {
600                store: "index",
601                store_type: index_store_type.clone(),
602            })?;
603        Ok(index_store_factory(settings, store_path)?)
604    }
605
606    pub fn add_submodule_store(&mut self, name: &str, factory: SubmoduleStoreFactory) {
607        self.submodule_store_factories
608            .insert(name.to_string(), factory);
609    }
610
611    pub fn load_submodule_store(
612        &self,
613        settings: &UserSettings,
614        store_path: &Path,
615    ) -> Result<Box<dyn SubmoduleStore>, StoreLoadError> {
616        let submodule_store_type = read_store_type("submodule_store", store_path.join("type"))?;
617        let submodule_store_factory = self
618            .submodule_store_factories
619            .get(&submodule_store_type)
620            .ok_or_else(|| StoreLoadError::UnsupportedType {
621                store: "submodule_store",
622                store_type: submodule_store_type.clone(),
623            })?;
624
625        Ok(submodule_store_factory(settings, store_path)?)
626    }
627}
628
629pub fn read_store_type(
630    store: &'static str,
631    path: impl AsRef<Path>,
632) -> Result<String, StoreLoadError> {
633    let path = path.as_ref();
634    fs::read_to_string(path)
635        .context(path)
636        .map_err(|source| StoreLoadError::ReadError { store, source })
637}
638
639#[derive(Debug, Error)]
640pub enum RepoLoaderError {
641    #[error(transparent)]
642    Backend(#[from] BackendError),
643    #[error(transparent)]
644    Index(#[from] IndexError),
645    #[error(transparent)]
646    IndexStore(#[from] IndexStoreError),
647    #[error(transparent)]
648    OpHeadResolution(#[from] OpHeadResolutionError),
649    #[error(transparent)]
650    OpHeadsStoreError(#[from] OpHeadsStoreError),
651    #[error(transparent)]
652    OpStore(#[from] OpStoreError),
653    #[error(transparent)]
654    TransactionCommit(#[from] TransactionCommitError),
655}
656
657/// Helps create `ReadonlyRepo` instances of a repo at the head operation or at
658/// a given operation.
659#[derive(Clone)]
660pub struct RepoLoader {
661    settings: UserSettings,
662    store: Arc<Store>,
663    op_store: Arc<dyn OpStore>,
664    op_heads_store: Arc<dyn OpHeadsStore>,
665    index_store: Arc<dyn IndexStore>,
666    submodule_store: Arc<dyn SubmoduleStore>,
667}
668
669impl RepoLoader {
670    pub fn new(
671        settings: UserSettings,
672        store: Arc<Store>,
673        op_store: Arc<dyn OpStore>,
674        op_heads_store: Arc<dyn OpHeadsStore>,
675        index_store: Arc<dyn IndexStore>,
676        submodule_store: Arc<dyn SubmoduleStore>,
677    ) -> Self {
678        Self {
679            settings,
680            store,
681            op_store,
682            op_heads_store,
683            index_store,
684            submodule_store,
685        }
686    }
687
688    /// Creates a `RepoLoader` for the repo at `repo_path` by reading the
689    /// various `.jj/repo/<backend>/type` files and loading the right
690    /// backends from `store_factories`.
691    pub fn init_from_file_system(
692        settings: &UserSettings,
693        repo_path: &Path,
694        store_factories: &StoreFactories,
695    ) -> Result<Self, StoreLoadError> {
696        let merge_options =
697            MergeOptions::from_settings(settings).map_err(|err| BackendLoadError(err.into()))?;
698        let store = Store::new(
699            store_factories.load_backend(settings, &repo_path.join("store"))?,
700            Signer::from_settings(settings)?,
701            merge_options,
702        );
703        let root_op_data = RootOperationData {
704            root_commit_id: store.root_commit_id().clone(),
705        };
706        let op_store = Arc::from(store_factories.load_op_store(
707            settings,
708            &repo_path.join("op_store"),
709            root_op_data,
710        )?);
711        let op_heads_store =
712            Arc::from(store_factories.load_op_heads_store(settings, &repo_path.join("op_heads"))?);
713        let index_store =
714            Arc::from(store_factories.load_index_store(settings, &repo_path.join("index"))?);
715        let submodule_store = Arc::from(
716            store_factories.load_submodule_store(settings, &repo_path.join("submodule_store"))?,
717        );
718        Ok(Self {
719            settings: settings.clone(),
720            store,
721            op_store,
722            op_heads_store,
723            index_store,
724            submodule_store,
725        })
726    }
727
728    pub fn settings(&self) -> &UserSettings {
729        &self.settings
730    }
731
732    pub fn store(&self) -> &Arc<Store> {
733        &self.store
734    }
735
736    pub fn index_store(&self) -> &Arc<dyn IndexStore> {
737        &self.index_store
738    }
739
740    pub fn op_store(&self) -> &Arc<dyn OpStore> {
741        &self.op_store
742    }
743
744    pub fn op_heads_store(&self) -> &Arc<dyn OpHeadsStore> {
745        &self.op_heads_store
746    }
747
748    pub fn submodule_store(&self) -> &Arc<dyn SubmoduleStore> {
749        &self.submodule_store
750    }
751
752    pub fn load_at_head(&self) -> Result<Arc<ReadonlyRepo>, RepoLoaderError> {
753        let op = op_heads_store::resolve_op_heads(
754            self.op_heads_store.as_ref(),
755            &self.op_store,
756            |op_heads| self.resolve_op_heads(op_heads),
757        )?;
758        let view = op.view()?;
759        self.finish_load(op, view)
760    }
761
762    #[instrument(skip(self))]
763    pub fn load_at(&self, op: &Operation) -> Result<Arc<ReadonlyRepo>, RepoLoaderError> {
764        let view = op.view()?;
765        self.finish_load(op.clone(), view)
766    }
767
768    pub fn create_from(
769        &self,
770        operation: Operation,
771        view: View,
772        index: Box<dyn ReadonlyIndex>,
773    ) -> Arc<ReadonlyRepo> {
774        let repo = ReadonlyRepo {
775            loader: self.clone(),
776            operation,
777            index,
778            change_id_index: OnceCell::new(),
779            view,
780        };
781        Arc::new(repo)
782    }
783
784    // If we add a higher-level abstraction of OpStore, root_operation() and
785    // load_operation() will be moved there.
786
787    /// Returns the root operation.
788    pub fn root_operation(&self) -> Operation {
789        self.load_operation(self.op_store.root_operation_id())
790            .expect("failed to read root operation")
791    }
792
793    /// Loads the specified operation from the operation store.
794    pub fn load_operation(&self, id: &OperationId) -> OpStoreResult<Operation> {
795        let data = self.op_store.read_operation(id).block_on()?;
796        Ok(Operation::new(self.op_store.clone(), id.clone(), data))
797    }
798
799    /// Merges the given `operations` into a single operation. Returns the root
800    /// operation if the `operations` is empty.
801    pub fn merge_operations(
802        &self,
803        operations: Vec<Operation>,
804        tx_description: Option<&str>,
805    ) -> Result<Operation, RepoLoaderError> {
806        let num_operations = operations.len();
807        let mut operations = operations.into_iter();
808        let Some(base_op) = operations.next() else {
809            return Ok(self.root_operation());
810        };
811        let final_op = if num_operations > 1 {
812            let base_repo = self.load_at(&base_op)?;
813            let mut tx = base_repo.start_transaction();
814            for other_op in operations {
815                tx.merge_operation(other_op)?;
816                tx.repo_mut().rebase_descendants()?;
817            }
818            let tx_description = tx_description.map_or_else(
819                || format!("merge {num_operations} operations"),
820                |tx_description| tx_description.to_string(),
821            );
822            let merged_repo = tx.write(tx_description)?.leave_unpublished();
823            merged_repo.operation().clone()
824        } else {
825            base_op
826        };
827
828        Ok(final_op)
829    }
830
831    fn resolve_op_heads(&self, op_heads: Vec<Operation>) -> Result<Operation, RepoLoaderError> {
832        assert!(!op_heads.is_empty());
833        self.merge_operations(op_heads, Some("reconcile divergent operations"))
834    }
835
836    fn finish_load(
837        &self,
838        operation: Operation,
839        view: View,
840    ) -> Result<Arc<ReadonlyRepo>, RepoLoaderError> {
841        let index = self.index_store.get_index_at_op(&operation, &self.store)?;
842        let repo = ReadonlyRepo {
843            loader: self.clone(),
844            operation,
845            index,
846            change_id_index: OnceCell::new(),
847            view,
848        };
849        Ok(Arc::new(repo))
850    }
851}
852
853#[derive(Clone, Debug, PartialEq, Eq)]
854enum Rewrite {
855    /// The old commit was rewritten as this new commit. Children should be
856    /// rebased onto the new commit.
857    Rewritten(CommitId),
858    /// The old commit was rewritten as multiple other commits. Children should
859    /// not be rebased.
860    Divergent(Vec<CommitId>),
861    /// The old commit was abandoned. Children should be rebased onto the given
862    /// commits (typically the parents of the old commit).
863    Abandoned(Vec<CommitId>),
864}
865
866impl Rewrite {
867    fn new_parent_ids(&self) -> &[CommitId] {
868        match self {
869            Self::Rewritten(new_parent_id) => std::slice::from_ref(new_parent_id),
870            Self::Divergent(new_parent_ids) => new_parent_ids.as_slice(),
871            Self::Abandoned(new_parent_ids) => new_parent_ids.as_slice(),
872        }
873    }
874}
875
876pub struct MutableRepo {
877    base_repo: Arc<ReadonlyRepo>,
878    index: Box<dyn MutableIndex>,
879    view: DirtyCell<View>,
880    /// Mapping from new commit to its predecessors.
881    ///
882    /// This is similar to (the reverse of) `parent_mapping`, but
883    /// `commit_predecessors` will never be cleared on `rebase_descendants()`.
884    commit_predecessors: BTreeMap<CommitId, Vec<CommitId>>,
885    // The commit identified by the key has been replaced by all the ones in the value.
886    // * Bookmarks pointing to the old commit should be updated to the new commit, resulting in a
887    //   conflict if there multiple new commits.
888    // * Children of the old commit should be rebased onto the new commits. However, if the type is
889    //   `Divergent`, they should be left in place.
890    // * Working copies pointing to the old commit should be updated to the first of the new
891    //   commits. However, if the type is `Abandoned`, a new working-copy commit should be created
892    //   on top of all of the new commits instead.
893    parent_mapping: HashMap<CommitId, Rewrite>,
894}
895
896impl MutableRepo {
897    pub fn new(base_repo: Arc<ReadonlyRepo>, index: &dyn ReadonlyIndex, view: &View) -> Self {
898        let mut_view = view.clone();
899        let mut_index = index.start_modification();
900        Self {
901            base_repo,
902            index: mut_index,
903            view: DirtyCell::with_clean(mut_view),
904            commit_predecessors: Default::default(),
905            parent_mapping: Default::default(),
906        }
907    }
908
909    pub fn base_repo(&self) -> &Arc<ReadonlyRepo> {
910        &self.base_repo
911    }
912
913    fn view_mut(&mut self) -> &mut View {
914        self.view.get_mut()
915    }
916
917    pub fn mutable_index(&self) -> &dyn MutableIndex {
918        self.index.as_ref()
919    }
920
921    pub(crate) fn is_backed_by_default_index(&self) -> bool {
922        self.index.downcast_ref::<DefaultMutableIndex>().is_some()
923    }
924
925    pub fn has_changes(&self) -> bool {
926        self.view.ensure_clean(|v| self.enforce_view_invariants(v));
927        !(self.commit_predecessors.is_empty()
928            && self.parent_mapping.is_empty()
929            && self.view() == &self.base_repo.view)
930    }
931
932    pub(crate) fn consume(
933        self,
934    ) -> (
935        Box<dyn MutableIndex>,
936        View,
937        BTreeMap<CommitId, Vec<CommitId>>,
938    ) {
939        self.view.ensure_clean(|v| self.enforce_view_invariants(v));
940        (self.index, self.view.into_inner(), self.commit_predecessors)
941    }
942
943    /// Returns a [`CommitBuilder`] to write new commit to the repo.
944    pub fn new_commit(
945        &mut self,
946        parents: Vec<CommitId>,
947        tree_id: MergedTreeId,
948    ) -> CommitBuilder<'_> {
949        let settings = self.base_repo.settings();
950        DetachedCommitBuilder::for_new_commit(self, settings, parents, tree_id).attach(self)
951    }
952
953    /// Returns a [`CommitBuilder`] to rewrite an existing commit in the repo.
954    pub fn rewrite_commit(&mut self, predecessor: &Commit) -> CommitBuilder<'_> {
955        let settings = self.base_repo.settings();
956        DetachedCommitBuilder::for_rewrite_from(self, settings, predecessor).attach(self)
957        // CommitBuilder::write will record the rewrite in
958        // `self.rewritten_commits`
959    }
960
961    pub(crate) fn set_predecessors(&mut self, id: CommitId, predecessors: Vec<CommitId>) {
962        self.commit_predecessors.insert(id, predecessors);
963    }
964
965    /// Record a commit as having been rewritten to another commit in this
966    /// transaction.
967    ///
968    /// This record is used by `rebase_descendants` to know which commits have
969    /// children that need to be rebased, and where to rebase them to. See the
970    /// docstring for `record_rewritten_commit` for details.
971    pub fn set_rewritten_commit(&mut self, old_id: CommitId, new_id: CommitId) {
972        assert_ne!(old_id, *self.store().root_commit_id());
973        self.parent_mapping
974            .insert(old_id, Rewrite::Rewritten(new_id));
975    }
976
977    /// Record a commit as being rewritten into multiple other commits in this
978    /// transaction.
979    ///
980    /// A later call to `rebase_descendants()` will update bookmarks pointing to
981    /// `old_id` be conflicted and pointing to all pf `new_ids`. Working copies
982    /// pointing to `old_id` will be updated to point to the first commit in
983    /// `new_ids``. Descendants of `old_id` will be left alone.
984    pub fn set_divergent_rewrite(
985        &mut self,
986        old_id: CommitId,
987        new_ids: impl IntoIterator<Item = CommitId>,
988    ) {
989        assert_ne!(old_id, *self.store().root_commit_id());
990        self.parent_mapping.insert(
991            old_id.clone(),
992            Rewrite::Divergent(new_ids.into_iter().collect()),
993        );
994    }
995
996    /// Record a commit as having been abandoned in this transaction.
997    ///
998    /// This record is used by `rebase_descendants` to know which commits have
999    /// children that need to be rebased, and where to rebase the children to.
1000    ///
1001    /// The `rebase_descendants` logic will rebase the descendants of the old
1002    /// commit to become the descendants of parent(s) of the old commit. Any
1003    /// bookmarks at the old commit will be either moved to the parent(s) of the
1004    /// old commit or deleted depending on [`RewriteRefsOptions`].
1005    pub fn record_abandoned_commit(&mut self, old_commit: &Commit) {
1006        assert_ne!(old_commit.id(), self.store().root_commit_id());
1007        // Descendants should be rebased onto the commit's parents
1008        self.record_abandoned_commit_with_parents(
1009            old_commit.id().clone(),
1010            old_commit.parent_ids().iter().cloned(),
1011        );
1012    }
1013
1014    /// Record a commit as having been abandoned in this transaction.
1015    ///
1016    /// A later `rebase_descendants()` will rebase children of `old_id` onto
1017    /// `new_parent_ids`. A working copy pointing to `old_id` will point to a
1018    /// new commit on top of `new_parent_ids`.
1019    pub fn record_abandoned_commit_with_parents(
1020        &mut self,
1021        old_id: CommitId,
1022        new_parent_ids: impl IntoIterator<Item = CommitId>,
1023    ) {
1024        assert_ne!(old_id, *self.store().root_commit_id());
1025        self.parent_mapping.insert(
1026            old_id,
1027            Rewrite::Abandoned(new_parent_ids.into_iter().collect()),
1028        );
1029    }
1030
1031    pub fn has_rewrites(&self) -> bool {
1032        !self.parent_mapping.is_empty()
1033    }
1034
1035    /// Calculates new parents for a commit that's currently based on the given
1036    /// parents. It does that by considering how previous commits have been
1037    /// rewritten and abandoned.
1038    ///
1039    /// If `parent_mapping` contains cycles, this function may either panic or
1040    /// drop parents that caused cycles.
1041    pub fn new_parents(&self, old_ids: &[CommitId]) -> Vec<CommitId> {
1042        self.rewritten_ids_with(old_ids, |rewrite| !matches!(rewrite, Rewrite::Divergent(_)))
1043    }
1044
1045    fn rewritten_ids_with(
1046        &self,
1047        old_ids: &[CommitId],
1048        mut predicate: impl FnMut(&Rewrite) -> bool,
1049    ) -> Vec<CommitId> {
1050        assert!(!old_ids.is_empty());
1051        let mut new_ids = Vec::with_capacity(old_ids.len());
1052        let mut to_visit = old_ids.iter().rev().collect_vec();
1053        let mut visited = HashSet::new();
1054        while let Some(id) = to_visit.pop() {
1055            if !visited.insert(id) {
1056                continue;
1057            }
1058            match self.parent_mapping.get(id).filter(|&v| predicate(v)) {
1059                None => {
1060                    new_ids.push(id.clone());
1061                }
1062                Some(rewrite) => {
1063                    let replacements = rewrite.new_parent_ids();
1064                    assert!(
1065                        // Each commit must have a parent, so a parent can
1066                        // not just be mapped to nothing. This assertion
1067                        // could be removed if this function is used for
1068                        // mapping something other than a commit's parents.
1069                        !replacements.is_empty(),
1070                        "Found empty value for key {id:?} in the parent mapping",
1071                    );
1072                    to_visit.extend(replacements.iter().rev());
1073                }
1074            }
1075        }
1076        assert!(
1077            !new_ids.is_empty(),
1078            "new ids become empty because of cycle in the parent mapping"
1079        );
1080        debug_assert!(new_ids.iter().all_unique());
1081        new_ids
1082    }
1083
1084    /// Fully resolves transitive replacements in `parent_mapping`.
1085    ///
1086    /// Returns an error if `parent_mapping` contains cycles
1087    fn resolve_rewrite_mapping_with(
1088        &self,
1089        mut predicate: impl FnMut(&Rewrite) -> bool,
1090    ) -> BackendResult<HashMap<CommitId, Vec<CommitId>>> {
1091        let sorted_ids = dag_walk::topo_order_forward(
1092            self.parent_mapping.keys(),
1093            |&id| id,
1094            |&id| match self.parent_mapping.get(id).filter(|&v| predicate(v)) {
1095                None => &[],
1096                Some(rewrite) => rewrite.new_parent_ids(),
1097            },
1098            |id| {
1099                BackendError::Other(
1100                    format!("Cycle between rewritten commits involving commit {id}").into(),
1101                )
1102            },
1103        )?;
1104        let mut new_mapping: HashMap<CommitId, Vec<CommitId>> = HashMap::new();
1105        for old_id in sorted_ids {
1106            let Some(rewrite) = self.parent_mapping.get(old_id).filter(|&v| predicate(v)) else {
1107                continue;
1108            };
1109            let lookup = |id| new_mapping.get(id).map_or(slice::from_ref(id), |ids| ids);
1110            let new_ids = match rewrite.new_parent_ids() {
1111                [id] => lookup(id).to_vec(), // unique() not needed
1112                ids => ids.iter().flat_map(lookup).unique().cloned().collect(),
1113            };
1114            debug_assert_eq!(
1115                new_ids,
1116                self.rewritten_ids_with(slice::from_ref(old_id), &mut predicate)
1117            );
1118            new_mapping.insert(old_id.clone(), new_ids);
1119        }
1120        Ok(new_mapping)
1121    }
1122
1123    /// Updates bookmarks, working copies, and anonymous heads after rewriting
1124    /// and/or abandoning commits.
1125    pub fn update_rewritten_references(
1126        &mut self,
1127        options: &RewriteRefsOptions,
1128    ) -> BackendResult<()> {
1129        self.update_all_references(options)?;
1130        self.update_heads()
1131            .map_err(|err| err.into_backend_error())?;
1132        Ok(())
1133    }
1134
1135    fn update_all_references(&mut self, options: &RewriteRefsOptions) -> BackendResult<()> {
1136        let rewrite_mapping = self.resolve_rewrite_mapping_with(|_| true)?;
1137        self.update_local_bookmarks(&rewrite_mapping, options)
1138            // TODO: indexing error shouldn't be a "BackendError"
1139            .map_err(|err| BackendError::Other(err.into()))?;
1140        self.update_wc_commits(&rewrite_mapping)?;
1141        Ok(())
1142    }
1143
1144    fn update_local_bookmarks(
1145        &mut self,
1146        rewrite_mapping: &HashMap<CommitId, Vec<CommitId>>,
1147        options: &RewriteRefsOptions,
1148    ) -> IndexResult<()> {
1149        let changed_branches = self
1150            .view()
1151            .local_bookmarks()
1152            .flat_map(|(name, target)| {
1153                target.added_ids().filter_map(|id| {
1154                    let change = rewrite_mapping.get_key_value(id)?;
1155                    Some((name.to_owned(), change))
1156                })
1157            })
1158            .collect_vec();
1159        for (bookmark_name, (old_commit_id, new_commit_ids)) in changed_branches {
1160            let should_delete = options.delete_abandoned_bookmarks
1161                && matches!(
1162                    self.parent_mapping.get(old_commit_id),
1163                    Some(Rewrite::Abandoned(_))
1164                );
1165            let old_target = RefTarget::normal(old_commit_id.clone());
1166            let new_target = if should_delete {
1167                RefTarget::absent()
1168            } else {
1169                let ids = itertools::intersperse(new_commit_ids, old_commit_id)
1170                    .map(|id| Some(id.clone()));
1171                RefTarget::from_merge(MergeBuilder::from_iter(ids).build())
1172            };
1173
1174            self.merge_local_bookmark(&bookmark_name, &old_target, &new_target)?;
1175        }
1176        Ok(())
1177    }
1178
1179    fn update_wc_commits(
1180        &mut self,
1181        rewrite_mapping: &HashMap<CommitId, Vec<CommitId>>,
1182    ) -> BackendResult<()> {
1183        let changed_wc_commits = self
1184            .view()
1185            .wc_commit_ids()
1186            .iter()
1187            .filter_map(|(name, commit_id)| {
1188                let change = rewrite_mapping.get_key_value(commit_id)?;
1189                Some((name.to_owned(), change))
1190            })
1191            .collect_vec();
1192        let mut recreated_wc_commits: HashMap<&CommitId, Commit> = HashMap::new();
1193        for (name, (old_commit_id, new_commit_ids)) in changed_wc_commits {
1194            let abandoned_old_commit = matches!(
1195                self.parent_mapping.get(old_commit_id),
1196                Some(Rewrite::Abandoned(_))
1197            );
1198            let new_wc_commit = if !abandoned_old_commit {
1199                // We arbitrarily pick a new working-copy commit among the candidates.
1200                self.store().get_commit(&new_commit_ids[0])?
1201            } else if let Some(commit) = recreated_wc_commits.get(old_commit_id) {
1202                commit.clone()
1203            } else {
1204                let new_commits: Vec<_> = new_commit_ids
1205                    .iter()
1206                    .map(|id| self.store().get_commit(id))
1207                    .try_collect()?;
1208                let merged_parents_tree = merge_commit_trees(self, &new_commits).block_on()?;
1209                let commit = self
1210                    .new_commit(new_commit_ids.clone(), merged_parents_tree.id().clone())
1211                    .write()?;
1212                recreated_wc_commits.insert(old_commit_id, commit.clone());
1213                commit
1214            };
1215            self.edit(name, &new_wc_commit).map_err(|err| match err {
1216                EditCommitError::BackendError(backend_error) => backend_error,
1217                EditCommitError::WorkingCopyCommitNotFound(_)
1218                | EditCommitError::RewriteRootCommit(_) => panic!("unexpected error: {err:?}"),
1219            })?;
1220        }
1221        Ok(())
1222    }
1223
1224    fn update_heads(&mut self) -> Result<(), RevsetEvaluationError> {
1225        let old_commits_expression =
1226            RevsetExpression::commits(self.parent_mapping.keys().cloned().collect());
1227        let heads_to_add_expression = old_commits_expression
1228            .parents()
1229            .minus(&old_commits_expression);
1230        let heads_to_add: Vec<_> = heads_to_add_expression
1231            .evaluate(self)?
1232            .iter()
1233            .try_collect()?;
1234
1235        let mut view = self.view().store_view().clone();
1236        for commit_id in self.parent_mapping.keys() {
1237            view.head_ids.remove(commit_id);
1238        }
1239        view.head_ids.extend(heads_to_add);
1240        self.set_view(view);
1241        Ok(())
1242    }
1243
1244    /// Find descendants of `root`, unless they've already been rewritten
1245    /// (according to `parent_mapping`).
1246    pub fn find_descendants_for_rebase(&self, roots: Vec<CommitId>) -> BackendResult<Vec<Commit>> {
1247        let to_visit_revset = RevsetExpression::commits(roots)
1248            .descendants()
1249            .minus(&RevsetExpression::commits(
1250                self.parent_mapping.keys().cloned().collect(),
1251            ))
1252            .evaluate(self)
1253            .map_err(|err| err.into_backend_error())?;
1254        let to_visit = to_visit_revset
1255            .iter()
1256            .commits(self.store())
1257            .try_collect()
1258            .map_err(|err| err.into_backend_error())?;
1259        Ok(to_visit)
1260    }
1261
1262    /// Order a set of commits in an order they should be rebased in. The result
1263    /// is in reverse order so the next value can be removed from the end.
1264    fn order_commits_for_rebase(
1265        &self,
1266        to_visit: Vec<Commit>,
1267        new_parents_map: &HashMap<CommitId, Vec<CommitId>>,
1268    ) -> BackendResult<Vec<Commit>> {
1269        let to_visit_set: HashSet<CommitId> =
1270            to_visit.iter().map(|commit| commit.id().clone()).collect();
1271        let mut visited = HashSet::new();
1272        // Calculate an order where we rebase parents first, but if the parents were
1273        // rewritten, make sure we rebase the rewritten parent first.
1274        let store = self.store();
1275        dag_walk::topo_order_reverse_ok(
1276            to_visit.into_iter().map(Ok),
1277            |commit| commit.id().clone(),
1278            |commit| -> Vec<BackendResult<Commit>> {
1279                visited.insert(commit.id().clone());
1280                let mut dependents = vec![];
1281                let parent_ids = new_parents_map
1282                    .get(commit.id())
1283                    .map_or(commit.parent_ids(), |parent_ids| parent_ids);
1284                for parent_id in parent_ids {
1285                    let parent = store.get_commit(parent_id);
1286                    let Ok(parent) = parent else {
1287                        dependents.push(parent);
1288                        continue;
1289                    };
1290                    if let Some(rewrite) = self.parent_mapping.get(parent.id()) {
1291                        for target in rewrite.new_parent_ids() {
1292                            if to_visit_set.contains(target) && !visited.contains(target) {
1293                                dependents.push(store.get_commit(target));
1294                            }
1295                        }
1296                    }
1297                    if to_visit_set.contains(parent.id()) {
1298                        dependents.push(Ok(parent));
1299                    }
1300                }
1301                dependents
1302            },
1303            |_| panic!("graph has cycle"),
1304        )
1305    }
1306
1307    /// Rewrite descendants of the given roots.
1308    ///
1309    /// The callback will be called for each commit with the new parents
1310    /// prepopulated. The callback may change the parents and write the new
1311    /// commit, or it may abandon the commit, or it may leave the old commit
1312    /// unchanged.
1313    ///
1314    /// The set of commits to visit is determined at the start. If the callback
1315    /// adds new descendants, then the callback will not be called for those.
1316    /// Similarly, if the callback rewrites unrelated commits, then the callback
1317    /// will not be called for descendants of those commits.
1318    pub fn transform_descendants(
1319        &mut self,
1320        roots: Vec<CommitId>,
1321        callback: impl AsyncFnMut(CommitRewriter) -> BackendResult<()>,
1322    ) -> BackendResult<()> {
1323        let options = RewriteRefsOptions::default();
1324        self.transform_descendants_with_options(roots, &HashMap::new(), &options, callback)
1325    }
1326
1327    /// Rewrite descendants of the given roots with options.
1328    ///
1329    /// If a commit is in the `new_parents_map` is provided, it will be rebased
1330    /// onto the new parents provided in the map instead of its original
1331    /// parents.
1332    ///
1333    /// See [`Self::transform_descendants()`] for details.
1334    pub fn transform_descendants_with_options(
1335        &mut self,
1336        roots: Vec<CommitId>,
1337        new_parents_map: &HashMap<CommitId, Vec<CommitId>>,
1338        options: &RewriteRefsOptions,
1339        callback: impl AsyncFnMut(CommitRewriter) -> BackendResult<()>,
1340    ) -> BackendResult<()> {
1341        let descendants = self.find_descendants_for_rebase(roots)?;
1342        self.transform_commits(descendants, new_parents_map, options, callback)
1343    }
1344
1345    /// Rewrite the given commits in reverse topological order.
1346    ///
1347    /// `commits` should be a connected range.
1348    ///
1349    /// This function is similar to
1350    /// [`Self::transform_descendants_with_options()`], but only rewrites the
1351    /// `commits` provided, and does not rewrite their descendants.
1352    pub fn transform_commits(
1353        &mut self,
1354        commits: Vec<Commit>,
1355        new_parents_map: &HashMap<CommitId, Vec<CommitId>>,
1356        options: &RewriteRefsOptions,
1357        mut callback: impl AsyncFnMut(CommitRewriter) -> BackendResult<()>,
1358    ) -> BackendResult<()> {
1359        let mut to_visit = self.order_commits_for_rebase(commits, new_parents_map)?;
1360        while let Some(old_commit) = to_visit.pop() {
1361            let parent_ids = new_parents_map
1362                .get(old_commit.id())
1363                .map_or(old_commit.parent_ids(), |parent_ids| parent_ids);
1364            let new_parent_ids = self.new_parents(parent_ids);
1365            let rewriter = CommitRewriter::new(self, old_commit, new_parent_ids);
1366            callback(rewriter).block_on()?;
1367        }
1368        self.update_rewritten_references(options)?;
1369        // Since we didn't necessarily visit all descendants of rewritten commits (e.g.
1370        // if they were rewritten in the callback), there can still be commits left to
1371        // rebase, so we don't clear `parent_mapping` here.
1372        // TODO: Should we make this stricter? We could check that there were no
1373        // rewrites before this function was called, and we can check that only
1374        // commits in the `to_visit` set were added by the callback. Then we
1375        // could clear `parent_mapping` here and not have to scan it again at
1376        // the end of the transaction when we call `rebase_descendants()`.
1377
1378        Ok(())
1379    }
1380
1381    /// Rebase descendants of the rewritten commits with options and callback.
1382    ///
1383    /// The descendants of the commits registered in `self.parent_mappings` will
1384    /// be recursively rebased onto the new version of their parents.
1385    ///
1386    /// If `options.empty` is the default (`EmptyBehavior::Keep`), all rebased
1387    /// descendant commits will be preserved even if they were emptied following
1388    /// the rebase operation. Otherwise, this function may rebase some commits
1389    /// and abandon others, based on the given `EmptyBehavior`. The behavior is
1390    /// such that only commits with a single parent will ever be abandoned. The
1391    /// parent will inherit the descendants and the bookmarks of the abandoned
1392    /// commit.
1393    ///
1394    /// The `progress` callback will be invoked for each rebase operation with
1395    /// `(old_commit, rebased_commit)` as arguments.
1396    pub fn rebase_descendants_with_options(
1397        &mut self,
1398        options: &RebaseOptions,
1399        mut progress: impl FnMut(Commit, RebasedCommit),
1400    ) -> BackendResult<()> {
1401        let roots = self.parent_mapping.keys().cloned().collect();
1402        self.transform_descendants_with_options(
1403            roots,
1404            &HashMap::new(),
1405            &options.rewrite_refs,
1406            async |rewriter| {
1407                if rewriter.parents_changed() {
1408                    let old_commit = rewriter.old_commit().clone();
1409                    let rebased_commit = rebase_commit_with_options(rewriter, options)?;
1410                    progress(old_commit, rebased_commit);
1411                }
1412                Ok(())
1413            },
1414        )?;
1415        self.parent_mapping.clear();
1416        Ok(())
1417    }
1418
1419    /// Rebase descendants of the rewritten commits.
1420    ///
1421    /// The descendants of the commits registered in `self.parent_mappings` will
1422    /// be recursively rebased onto the new version of their parents.
1423    /// Returns the number of rebased descendants.
1424    ///
1425    /// All rebased descendant commits will be preserved even if they were
1426    /// emptied following the rebase operation. To customize the rebase
1427    /// behavior, use [`MutableRepo::rebase_descendants_with_options`].
1428    pub fn rebase_descendants(&mut self) -> BackendResult<usize> {
1429        let options = RebaseOptions::default();
1430        let mut num_rebased = 0;
1431        self.rebase_descendants_with_options(&options, |_old_commit, _rebased_commit| {
1432            num_rebased += 1;
1433        })?;
1434        Ok(num_rebased)
1435    }
1436
1437    /// Reparent descendants of the rewritten commits.
1438    ///
1439    /// The descendants of the commits registered in `self.parent_mappings` will
1440    /// be recursively reparented onto the new version of their parents.
1441    /// The content of those descendants will remain untouched.
1442    /// Returns the number of reparented descendants.
1443    pub fn reparent_descendants(&mut self) -> BackendResult<usize> {
1444        let roots = self.parent_mapping.keys().cloned().collect_vec();
1445        let mut num_reparented = 0;
1446        self.transform_descendants(roots, async |rewriter| {
1447            if rewriter.parents_changed() {
1448                let builder = rewriter.reparent();
1449                builder.write()?;
1450                num_reparented += 1;
1451            }
1452            Ok(())
1453        })?;
1454        self.parent_mapping.clear();
1455        Ok(num_reparented)
1456    }
1457
1458    pub fn set_wc_commit(
1459        &mut self,
1460        name: WorkspaceNameBuf,
1461        commit_id: CommitId,
1462    ) -> Result<(), RewriteRootCommit> {
1463        if &commit_id == self.store().root_commit_id() {
1464            return Err(RewriteRootCommit);
1465        }
1466        self.view_mut().set_wc_commit(name, commit_id);
1467        Ok(())
1468    }
1469
1470    pub fn remove_wc_commit(&mut self, name: &WorkspaceName) -> Result<(), EditCommitError> {
1471        self.maybe_abandon_wc_commit(name)?;
1472        self.view_mut().remove_wc_commit(name);
1473        Ok(())
1474    }
1475
1476    /// Merges working-copy commit. If there's a conflict, and if the workspace
1477    /// isn't removed at either side, we keep the self side.
1478    fn merge_wc_commit(
1479        &mut self,
1480        name: &WorkspaceName,
1481        base_id: Option<&CommitId>,
1482        other_id: Option<&CommitId>,
1483    ) {
1484        let view = self.view.get_mut();
1485        let self_id = view.get_wc_commit_id(name);
1486        // Not using merge_ref_targets(). Since the working-copy pointer moves
1487        // towards random direction, it doesn't make sense to resolve conflict
1488        // based on ancestry.
1489        let new_id = if let Some(resolved) =
1490            trivial_merge(&[self_id, base_id, other_id], SameChange::Accept)
1491        {
1492            resolved.cloned()
1493        } else if self_id.is_none() || other_id.is_none() {
1494            // We want to remove the workspace even if the self side changed the
1495            // working-copy commit.
1496            None
1497        } else {
1498            self_id.cloned()
1499        };
1500        match new_id {
1501            Some(id) => view.set_wc_commit(name.to_owned(), id),
1502            None => view.remove_wc_commit(name),
1503        }
1504    }
1505
1506    pub fn rename_workspace(
1507        &mut self,
1508        old_name: &WorkspaceName,
1509        new_name: WorkspaceNameBuf,
1510    ) -> Result<(), RenameWorkspaceError> {
1511        self.view_mut().rename_workspace(old_name, new_name)
1512    }
1513
1514    pub fn check_out(
1515        &mut self,
1516        name: WorkspaceNameBuf,
1517        commit: &Commit,
1518    ) -> Result<Commit, CheckOutCommitError> {
1519        let wc_commit = self
1520            .new_commit(vec![commit.id().clone()], commit.tree_id().clone())
1521            .write()?;
1522        self.edit(name, &wc_commit)?;
1523        Ok(wc_commit)
1524    }
1525
1526    pub fn edit(&mut self, name: WorkspaceNameBuf, commit: &Commit) -> Result<(), EditCommitError> {
1527        self.maybe_abandon_wc_commit(&name)?;
1528        self.add_head(commit)?;
1529        Ok(self.set_wc_commit(name, commit.id().clone())?)
1530    }
1531
1532    fn maybe_abandon_wc_commit(
1533        &mut self,
1534        workspace_name: &WorkspaceName,
1535    ) -> Result<(), EditCommitError> {
1536        let is_commit_referenced = |view: &View, commit_id: &CommitId| -> bool {
1537            view.wc_commit_ids()
1538                .iter()
1539                .filter(|&(name, _)| name != workspace_name)
1540                .map(|(_, wc_id)| wc_id)
1541                .chain(
1542                    view.local_bookmarks()
1543                        .flat_map(|(_, target)| target.added_ids()),
1544                )
1545                .any(|id| id == commit_id)
1546        };
1547
1548        let maybe_wc_commit_id = self
1549            .view
1550            .with_ref(|v| v.get_wc_commit_id(workspace_name).cloned());
1551        if let Some(wc_commit_id) = maybe_wc_commit_id {
1552            let wc_commit = self
1553                .store()
1554                .get_commit(&wc_commit_id)
1555                .map_err(EditCommitError::WorkingCopyCommitNotFound)?;
1556            if wc_commit.is_discardable(self)?
1557                && self
1558                    .view
1559                    .with_ref(|v| !is_commit_referenced(v, wc_commit.id()))
1560                && self.view().heads().contains(wc_commit.id())
1561            {
1562                // Abandon the working-copy commit we're leaving if it's
1563                // discardable, not pointed by local bookmark or other working
1564                // copies, and a head commit.
1565                self.record_abandoned_commit(&wc_commit);
1566            }
1567        }
1568
1569        Ok(())
1570    }
1571
1572    fn enforce_view_invariants(&self, view: &mut View) {
1573        let view = view.store_view_mut();
1574        let root_commit_id = self.store().root_commit_id();
1575        if view.head_ids.is_empty() {
1576            view.head_ids.insert(root_commit_id.clone());
1577        } else if view.head_ids.len() > 1 {
1578            // An empty head_ids set is padded with the root_commit_id, but the
1579            // root id is unwanted during the heads resolution.
1580            view.head_ids.remove(root_commit_id);
1581            // It is unclear if `heads` can never fail for default implementation,
1582            // but it can definitely fail for non-default implementations.
1583            // TODO: propagate errors.
1584            view.head_ids = self
1585                .index()
1586                .heads(&mut view.head_ids.iter())
1587                .unwrap()
1588                .into_iter()
1589                .collect();
1590        }
1591        assert!(!view.head_ids.is_empty());
1592    }
1593
1594    /// Ensures that the given `head` and ancestor commits are reachable from
1595    /// the visible heads.
1596    pub fn add_head(&mut self, head: &Commit) -> BackendResult<()> {
1597        self.add_heads(slice::from_ref(head))
1598    }
1599
1600    /// Ensures that the given `heads` and ancestor commits are reachable from
1601    /// the visible heads.
1602    ///
1603    /// The `heads` may contain redundant commits such as already visible ones
1604    /// and ancestors of the other heads. The `heads` and ancestor commits
1605    /// should exist in the store.
1606    pub fn add_heads(&mut self, heads: &[Commit]) -> BackendResult<()> {
1607        let current_heads = self.view.get_mut().heads();
1608        // Use incremental update for common case of adding a single commit on top a
1609        // current head. TODO: Also use incremental update when adding a single
1610        // commit on top a non-head.
1611        match heads {
1612            [] => {}
1613            [head]
1614                if head
1615                    .parent_ids()
1616                    .iter()
1617                    .all(|parent_id| current_heads.contains(parent_id)) =>
1618            {
1619                self.index
1620                    .add_commit(head)
1621                    // TODO: indexing error shouldn't be a "BackendError"
1622                    .map_err(|err| BackendError::Other(err.into()))?;
1623                self.view.get_mut().add_head(head.id());
1624                for parent_id in head.parent_ids() {
1625                    self.view.get_mut().remove_head(parent_id);
1626                }
1627            }
1628            _ => {
1629                let missing_commits = dag_walk::topo_order_reverse_ord_ok(
1630                    heads
1631                        .iter()
1632                        .cloned()
1633                        .map(CommitByCommitterTimestamp)
1634                        .map(Ok),
1635                    |CommitByCommitterTimestamp(commit)| commit.id().clone(),
1636                    |CommitByCommitterTimestamp(commit)| {
1637                        commit
1638                            .parent_ids()
1639                            .iter()
1640                            .filter_map(|id| match self.index().has_id(id) {
1641                                Ok(false) => Some(
1642                                    self.store().get_commit(id).map(CommitByCommitterTimestamp),
1643                                ),
1644                                Ok(true) => None,
1645                                // TODO: indexing error shouldn't be a "BackendError"
1646                                Err(err) => Some(Err(BackendError::Other(err.into()))),
1647                            })
1648                            .collect_vec()
1649                    },
1650                    |_| panic!("graph has cycle"),
1651                )?;
1652                for CommitByCommitterTimestamp(missing_commit) in missing_commits.iter().rev() {
1653                    self.index
1654                        .add_commit(missing_commit)
1655                        // TODO: indexing error shouldn't be a "BackendError"
1656                        .map_err(|err| BackendError::Other(err.into()))?;
1657                }
1658                for head in heads {
1659                    self.view.get_mut().add_head(head.id());
1660                }
1661                self.view.mark_dirty();
1662            }
1663        }
1664        Ok(())
1665    }
1666
1667    pub fn remove_head(&mut self, head: &CommitId) {
1668        self.view_mut().remove_head(head);
1669        self.view.mark_dirty();
1670    }
1671
1672    pub fn get_local_bookmark(&self, name: &RefName) -> RefTarget {
1673        self.view.with_ref(|v| v.get_local_bookmark(name).clone())
1674    }
1675
1676    pub fn set_local_bookmark_target(&mut self, name: &RefName, target: RefTarget) {
1677        let view = self.view_mut();
1678        for id in target.added_ids() {
1679            view.add_head(id);
1680        }
1681        view.set_local_bookmark_target(name, target);
1682        self.view.mark_dirty();
1683    }
1684
1685    pub fn merge_local_bookmark(
1686        &mut self,
1687        name: &RefName,
1688        base_target: &RefTarget,
1689        other_target: &RefTarget,
1690    ) -> IndexResult<()> {
1691        let view = self.view.get_mut();
1692        let index = self.index.as_index();
1693        let self_target = view.get_local_bookmark(name);
1694        let new_target = merge_ref_targets(index, self_target, base_target, other_target)?;
1695        self.set_local_bookmark_target(name, new_target);
1696        Ok(())
1697    }
1698
1699    pub fn get_remote_bookmark(&self, symbol: RemoteRefSymbol<'_>) -> RemoteRef {
1700        self.view
1701            .with_ref(|v| v.get_remote_bookmark(symbol).clone())
1702    }
1703
1704    pub fn set_remote_bookmark(&mut self, symbol: RemoteRefSymbol<'_>, remote_ref: RemoteRef) {
1705        self.view_mut().set_remote_bookmark(symbol, remote_ref);
1706    }
1707
1708    fn merge_remote_bookmark(
1709        &mut self,
1710        symbol: RemoteRefSymbol<'_>,
1711        base_ref: &RemoteRef,
1712        other_ref: &RemoteRef,
1713    ) -> IndexResult<()> {
1714        let view = self.view.get_mut();
1715        let index = self.index.as_index();
1716        let self_ref = view.get_remote_bookmark(symbol);
1717        let new_ref = merge_remote_refs(index, self_ref, base_ref, other_ref)?;
1718        view.set_remote_bookmark(symbol, new_ref);
1719        Ok(())
1720    }
1721
1722    /// Merges the specified remote bookmark in to local bookmark, and starts
1723    /// tracking it.
1724    pub fn track_remote_bookmark(&mut self, symbol: RemoteRefSymbol<'_>) -> IndexResult<()> {
1725        let mut remote_ref = self.get_remote_bookmark(symbol);
1726        let base_target = remote_ref.tracked_target();
1727        self.merge_local_bookmark(symbol.name, base_target, &remote_ref.target)?;
1728        remote_ref.state = RemoteRefState::Tracked;
1729        self.set_remote_bookmark(symbol, remote_ref);
1730        Ok(())
1731    }
1732
1733    /// Stops tracking the specified remote bookmark.
1734    pub fn untrack_remote_bookmark(&mut self, symbol: RemoteRefSymbol<'_>) {
1735        let mut remote_ref = self.get_remote_bookmark(symbol);
1736        remote_ref.state = RemoteRefState::New;
1737        self.set_remote_bookmark(symbol, remote_ref);
1738    }
1739
1740    pub fn ensure_remote(&mut self, remote_name: &RemoteName) {
1741        self.view_mut().ensure_remote(remote_name);
1742    }
1743
1744    pub fn remove_remote(&mut self, remote_name: &RemoteName) {
1745        self.view_mut().remove_remote(remote_name);
1746    }
1747
1748    pub fn rename_remote(&mut self, old: &RemoteName, new: &RemoteName) {
1749        self.view_mut().rename_remote(old, new);
1750    }
1751
1752    pub fn get_local_tag(&self, name: &RefName) -> RefTarget {
1753        self.view.with_ref(|v| v.get_local_tag(name).clone())
1754    }
1755
1756    pub fn set_local_tag_target(&mut self, name: &RefName, target: RefTarget) {
1757        self.view_mut().set_local_tag_target(name, target);
1758    }
1759
1760    pub fn merge_local_tag(
1761        &mut self,
1762        name: &RefName,
1763        base_target: &RefTarget,
1764        other_target: &RefTarget,
1765    ) -> IndexResult<()> {
1766        let view = self.view.get_mut();
1767        let index = self.index.as_index();
1768        let self_target = view.get_local_tag(name);
1769        let new_target = merge_ref_targets(index, self_target, base_target, other_target)?;
1770        view.set_local_tag_target(name, new_target);
1771        Ok(())
1772    }
1773
1774    pub fn get_remote_tag(&self, symbol: RemoteRefSymbol<'_>) -> RemoteRef {
1775        self.view.with_ref(|v| v.get_remote_tag(symbol).clone())
1776    }
1777
1778    pub fn set_remote_tag(&mut self, symbol: RemoteRefSymbol<'_>, remote_ref: RemoteRef) {
1779        self.view_mut().set_remote_tag(symbol, remote_ref);
1780    }
1781
1782    fn merge_remote_tag(
1783        &mut self,
1784        symbol: RemoteRefSymbol<'_>,
1785        base_ref: &RemoteRef,
1786        other_ref: &RemoteRef,
1787    ) -> IndexResult<()> {
1788        let view = self.view.get_mut();
1789        let index = self.index.as_index();
1790        let self_ref = view.get_remote_tag(symbol);
1791        let new_ref = merge_remote_refs(index, self_ref, base_ref, other_ref)?;
1792        view.set_remote_tag(symbol, new_ref);
1793        Ok(())
1794    }
1795
1796    pub fn get_git_ref(&self, name: &GitRefName) -> RefTarget {
1797        self.view.with_ref(|v| v.get_git_ref(name).clone())
1798    }
1799
1800    pub fn set_git_ref_target(&mut self, name: &GitRefName, target: RefTarget) {
1801        self.view_mut().set_git_ref_target(name, target);
1802    }
1803
1804    fn merge_git_ref(
1805        &mut self,
1806        name: &GitRefName,
1807        base_target: &RefTarget,
1808        other_target: &RefTarget,
1809    ) -> IndexResult<()> {
1810        let view = self.view.get_mut();
1811        let index = self.index.as_index();
1812        let self_target = view.get_git_ref(name);
1813        let new_target = merge_ref_targets(index, self_target, base_target, other_target)?;
1814        view.set_git_ref_target(name, new_target);
1815        Ok(())
1816    }
1817
1818    pub fn git_head(&self) -> RefTarget {
1819        self.view.with_ref(|v| v.git_head().clone())
1820    }
1821
1822    pub fn set_git_head_target(&mut self, target: RefTarget) {
1823        self.view_mut().set_git_head_target(target);
1824    }
1825
1826    pub fn set_view(&mut self, data: op_store::View) {
1827        self.view_mut().set_view(data);
1828        self.view.mark_dirty();
1829    }
1830
1831    pub fn merge(
1832        &mut self,
1833        base_repo: &ReadonlyRepo,
1834        other_repo: &ReadonlyRepo,
1835    ) -> Result<(), RepoLoaderError> {
1836        // First, merge the index, so we can take advantage of a valid index when
1837        // merging the view. Merging in base_repo's index isn't typically
1838        // necessary, but it can be if base_repo is ahead of either self or other_repo
1839        // (e.g. because we're undoing an operation that hasn't been published).
1840        self.index.merge_in(base_repo.readonly_index())?;
1841        self.index.merge_in(other_repo.readonly_index())?;
1842
1843        self.view.ensure_clean(|v| self.enforce_view_invariants(v));
1844        self.merge_view(&base_repo.view, &other_repo.view)?;
1845        self.view.mark_dirty();
1846        Ok(())
1847    }
1848
1849    pub fn merge_index(&mut self, other_repo: &ReadonlyRepo) -> IndexResult<()> {
1850        self.index.merge_in(other_repo.readonly_index())
1851    }
1852
1853    fn merge_view(&mut self, base: &View, other: &View) -> Result<(), RepoLoaderError> {
1854        let changed_wc_commits = diff_named_commit_ids(base.wc_commit_ids(), other.wc_commit_ids());
1855        for (name, (base_id, other_id)) in changed_wc_commits {
1856            self.merge_wc_commit(name, base_id, other_id);
1857        }
1858
1859        let base_heads = base.heads().iter().cloned().collect_vec();
1860        let own_heads = self.view().heads().iter().cloned().collect_vec();
1861        let other_heads = other.heads().iter().cloned().collect_vec();
1862
1863        // HACK: Don't walk long ranges of commits to find rewrites when using other
1864        // custom implementations. The only custom index implementation we're currently
1865        // aware of is Google's. That repo has too high commit rate for it to be
1866        // feasible to walk all added and removed commits.
1867        // TODO: Fix this somehow. Maybe a method on `Index` to find rewritten commits
1868        // given `base_heads`, `own_heads` and `other_heads`?
1869        if self.is_backed_by_default_index() {
1870            self.record_rewrites(&base_heads, &own_heads)?;
1871            self.record_rewrites(&base_heads, &other_heads)?;
1872            // No need to remove heads removed by `other` because we already
1873            // marked them abandoned or rewritten.
1874        } else {
1875            for removed_head in base.heads().difference(other.heads()) {
1876                self.view_mut().remove_head(removed_head);
1877            }
1878        }
1879        for added_head in other.heads().difference(base.heads()) {
1880            self.view_mut().add_head(added_head);
1881        }
1882
1883        let changed_local_bookmarks =
1884            diff_named_ref_targets(base.local_bookmarks(), other.local_bookmarks());
1885        for (name, (base_target, other_target)) in changed_local_bookmarks {
1886            self.merge_local_bookmark(name, base_target, other_target)?;
1887        }
1888
1889        let changed_local_tags = diff_named_ref_targets(base.local_tags(), other.local_tags());
1890        for (name, (base_target, other_target)) in changed_local_tags {
1891            self.merge_local_tag(name, base_target, other_target)?;
1892        }
1893
1894        let changed_git_refs = diff_named_ref_targets(base.git_refs(), other.git_refs());
1895        for (name, (base_target, other_target)) in changed_git_refs {
1896            self.merge_git_ref(name, base_target, other_target)?;
1897        }
1898
1899        let changed_remote_bookmarks =
1900            diff_named_remote_refs(base.all_remote_bookmarks(), other.all_remote_bookmarks());
1901        for (symbol, (base_ref, other_ref)) in changed_remote_bookmarks {
1902            self.merge_remote_bookmark(symbol, base_ref, other_ref)?;
1903        }
1904
1905        let changed_remote_tags =
1906            diff_named_remote_refs(base.all_remote_tags(), other.all_remote_tags());
1907        for (symbol, (base_ref, other_ref)) in changed_remote_tags {
1908            self.merge_remote_tag(symbol, base_ref, other_ref)?;
1909        }
1910
1911        let new_git_head_target = merge_ref_targets(
1912            self.index(),
1913            self.view().git_head(),
1914            base.git_head(),
1915            other.git_head(),
1916        )?;
1917        self.set_git_head_target(new_git_head_target);
1918
1919        Ok(())
1920    }
1921
1922    /// Finds and records commits that were rewritten or abandoned between
1923    /// `old_heads` and `new_heads`.
1924    fn record_rewrites(
1925        &mut self,
1926        old_heads: &[CommitId],
1927        new_heads: &[CommitId],
1928    ) -> BackendResult<()> {
1929        let mut removed_changes: HashMap<ChangeId, Vec<CommitId>> = HashMap::new();
1930        for item in revset::walk_revs(self, old_heads, new_heads)
1931            .map_err(|err| err.into_backend_error())?
1932            .commit_change_ids()
1933        {
1934            let (commit_id, change_id) = item.map_err(|err| err.into_backend_error())?;
1935            removed_changes
1936                .entry(change_id)
1937                .or_default()
1938                .push(commit_id);
1939        }
1940        if removed_changes.is_empty() {
1941            return Ok(());
1942        }
1943
1944        let mut rewritten_changes = HashSet::new();
1945        let mut rewritten_commits: HashMap<CommitId, Vec<CommitId>> = HashMap::new();
1946        for item in revset::walk_revs(self, new_heads, old_heads)
1947            .map_err(|err| err.into_backend_error())?
1948            .commit_change_ids()
1949        {
1950            let (commit_id, change_id) = item.map_err(|err| err.into_backend_error())?;
1951            if let Some(old_commits) = removed_changes.get(&change_id) {
1952                for old_commit in old_commits {
1953                    rewritten_commits
1954                        .entry(old_commit.clone())
1955                        .or_default()
1956                        .push(commit_id.clone());
1957                }
1958            }
1959            rewritten_changes.insert(change_id);
1960        }
1961        for (old_commit, new_commits) in rewritten_commits {
1962            if new_commits.len() == 1 {
1963                self.set_rewritten_commit(
1964                    old_commit.clone(),
1965                    new_commits.into_iter().next().unwrap(),
1966                );
1967            } else {
1968                self.set_divergent_rewrite(old_commit.clone(), new_commits);
1969            }
1970        }
1971
1972        for (change_id, removed_commit_ids) in &removed_changes {
1973            if !rewritten_changes.contains(change_id) {
1974                for id in removed_commit_ids {
1975                    let commit = self.store().get_commit(id)?;
1976                    self.record_abandoned_commit(&commit);
1977                }
1978            }
1979        }
1980
1981        Ok(())
1982    }
1983}
1984
1985impl Repo for MutableRepo {
1986    fn base_repo(&self) -> &ReadonlyRepo {
1987        &self.base_repo
1988    }
1989
1990    fn store(&self) -> &Arc<Store> {
1991        self.base_repo.store()
1992    }
1993
1994    fn op_store(&self) -> &Arc<dyn OpStore> {
1995        self.base_repo.op_store()
1996    }
1997
1998    fn index(&self) -> &dyn Index {
1999        self.index.as_index()
2000    }
2001
2002    fn view(&self) -> &View {
2003        self.view
2004            .get_or_ensure_clean(|v| self.enforce_view_invariants(v))
2005    }
2006
2007    fn submodule_store(&self) -> &Arc<dyn SubmoduleStore> {
2008        self.base_repo.submodule_store()
2009    }
2010
2011    fn resolve_change_id_prefix(
2012        &self,
2013        prefix: &HexPrefix,
2014    ) -> IndexResult<PrefixResolution<Vec<CommitId>>> {
2015        let change_id_index = self.index.change_id_index(&mut self.view().heads().iter());
2016        change_id_index.resolve_prefix(prefix)
2017    }
2018
2019    fn shortest_unique_change_id_prefix_len(&self, target_id: &ChangeId) -> IndexResult<usize> {
2020        let change_id_index = self.index.change_id_index(&mut self.view().heads().iter());
2021        change_id_index.shortest_unique_prefix_len(target_id)
2022    }
2023}
2024
2025/// Error from attempts to check out the root commit for editing
2026#[derive(Debug, Error)]
2027#[error("Cannot rewrite the root commit")]
2028pub struct RewriteRootCommit;
2029
2030/// Error from attempts to edit a commit
2031#[derive(Debug, Error)]
2032pub enum EditCommitError {
2033    #[error("Current working-copy commit not found")]
2034    WorkingCopyCommitNotFound(#[source] BackendError),
2035    #[error(transparent)]
2036    RewriteRootCommit(#[from] RewriteRootCommit),
2037    #[error(transparent)]
2038    BackendError(#[from] BackendError),
2039}
2040
2041/// Error from attempts to check out a commit
2042#[derive(Debug, Error)]
2043pub enum CheckOutCommitError {
2044    #[error("Failed to create new working-copy commit")]
2045    CreateCommit(#[from] BackendError),
2046    #[error("Failed to edit commit")]
2047    EditCommit(#[from] EditCommitError),
2048}
2049
2050mod dirty_cell {
2051    use std::cell::OnceCell;
2052    use std::cell::RefCell;
2053
2054    /// Cell that lazily updates the value after `mark_dirty()`.
2055    ///
2056    /// A clean value can be immutably borrowed within the `self` lifetime.
2057    #[derive(Clone, Debug)]
2058    pub struct DirtyCell<T> {
2059        // Either clean or dirty value is set. The value is boxed to reduce stack space
2060        // and memcopy overhead.
2061        clean: OnceCell<Box<T>>,
2062        dirty: RefCell<Option<Box<T>>>,
2063    }
2064
2065    impl<T> DirtyCell<T> {
2066        pub fn with_clean(value: T) -> Self {
2067            Self {
2068                clean: OnceCell::from(Box::new(value)),
2069                dirty: RefCell::new(None),
2070            }
2071        }
2072
2073        pub fn get_or_ensure_clean(&self, f: impl FnOnce(&mut T)) -> &T {
2074            self.clean.get_or_init(|| {
2075                // Panics if ensure_clean() is invoked from with_ref() callback for example.
2076                let mut value = self.dirty.borrow_mut().take().unwrap();
2077                f(&mut value);
2078                value
2079            })
2080        }
2081
2082        pub fn ensure_clean(&self, f: impl FnOnce(&mut T)) {
2083            self.get_or_ensure_clean(f);
2084        }
2085
2086        pub fn into_inner(self) -> T {
2087            *self
2088                .clean
2089                .into_inner()
2090                .or_else(|| self.dirty.into_inner())
2091                .unwrap()
2092        }
2093
2094        pub fn with_ref<R>(&self, f: impl FnOnce(&T) -> R) -> R {
2095            if let Some(value) = self.clean.get() {
2096                f(value)
2097            } else {
2098                f(self.dirty.borrow().as_ref().unwrap())
2099            }
2100        }
2101
2102        pub fn get_mut(&mut self) -> &mut T {
2103            self.clean
2104                .get_mut()
2105                .or_else(|| self.dirty.get_mut().as_mut())
2106                .unwrap()
2107        }
2108
2109        pub fn mark_dirty(&mut self) {
2110            if let Some(value) = self.clean.take() {
2111                *self.dirty.get_mut() = Some(value);
2112            }
2113        }
2114    }
2115}