loro_internal/
undo.rs

1use std::{collections::VecDeque, sync::Arc};
2
3use crate::sync::{AtomicU64, Mutex};
4use either::Either;
5use fxhash::{FxHashMap, FxHashSet};
6use loro_common::{
7    ContainerID, Counter, CounterSpan, HasIdSpan, IdSpan, LoroError, LoroResult, LoroValue, PeerID,
8};
9use tracing::{debug_span, info_span, instrument};
10
11use crate::{
12    change::{get_sys_timestamp, Timestamp},
13    cursor::{AbsolutePosition, Cursor},
14    delta::TreeExternalDiff,
15    event::{Diff, EventTriggerKind},
16    version::Frontiers,
17    ContainerDiff, DiffEvent, DocDiff, LoroDoc, Subscription,
18};
19
20/// A batch of diffs.
21///
22/// You can use `loroDoc.apply_diff(diff)` to apply the diff to the document.
23#[derive(Debug, Clone, Default)]
24pub struct DiffBatch {
25    pub cid_to_events: FxHashMap<ContainerID, Diff>,
26    pub order: Vec<ContainerID>,
27}
28
29impl DiffBatch {
30    pub fn new(diff: Vec<DocDiff>) -> Self {
31        let mut map: FxHashMap<ContainerID, Diff> = Default::default();
32        let mut order: Vec<ContainerID> = Vec::with_capacity(diff.len());
33        for d in diff.into_iter() {
34            for item in d.diff.into_iter() {
35                let old = map.insert(item.id.clone(), item.diff);
36                assert!(old.is_none());
37                order.push(item.id.clone());
38            }
39        }
40
41        Self {
42            cid_to_events: map,
43            order,
44        }
45    }
46
47    pub fn compose(&mut self, other: &Self) {
48        if other.cid_to_events.is_empty() {
49            return;
50        }
51
52        for (id, diff) in other.iter() {
53            if let Some(this_diff) = self.cid_to_events.get_mut(id) {
54                this_diff.compose_ref(diff);
55            } else {
56                self.cid_to_events.insert(id.clone(), diff.clone());
57                self.order.push(id.clone());
58            }
59        }
60    }
61
62    pub fn transform(&mut self, other: &Self, left_priority: bool) {
63        if other.cid_to_events.is_empty() || self.cid_to_events.is_empty() {
64            return;
65        }
66
67        for (idx, diff) in self.cid_to_events.iter_mut() {
68            if let Some(b_diff) = other.cid_to_events.get(idx) {
69                diff.transform(b_diff, left_priority);
70            }
71        }
72    }
73
74    pub fn clear(&mut self) {
75        self.cid_to_events.clear();
76        self.order.clear();
77    }
78
79    pub fn iter(&self) -> impl Iterator<Item = (&ContainerID, &Diff)> + '_ {
80        self.order
81            .iter()
82            .map(|cid| (cid, self.cid_to_events.get(cid).unwrap()))
83    }
84
85    #[allow(clippy::should_implement_trait)]
86    pub fn into_iter(self) -> impl Iterator<Item = (ContainerID, Diff)> {
87        let mut cid_to_events = self.cid_to_events;
88        self.order.into_iter().map(move |cid| {
89            let d = cid_to_events.remove(&cid).unwrap();
90            (cid, d)
91        })
92    }
93}
94
95fn transform_cursor(
96    cursor_with_pos: &mut CursorWithPos,
97    remote_diff: &DiffBatch,
98    doc: &LoroDoc,
99    container_remap: &FxHashMap<ContainerID, ContainerID>,
100) {
101    let mut cid = &cursor_with_pos.cursor.container;
102    while let Some(new_cid) = container_remap.get(cid) {
103        cid = new_cid;
104    }
105
106    if let Some(diff) = remote_diff.cid_to_events.get(cid) {
107        let new_pos = diff.transform_cursor(cursor_with_pos.pos.pos, false);
108        cursor_with_pos.pos.pos = new_pos;
109    };
110
111    let new_pos = cursor_with_pos.pos.pos;
112    match doc.get_handler(cid.clone()).unwrap() {
113        crate::handler::Handler::Text(h) => {
114            let Some(new_cursor) = h.get_cursor_internal(new_pos, cursor_with_pos.pos.side, false)
115            else {
116                return;
117            };
118
119            cursor_with_pos.cursor = new_cursor;
120        }
121        crate::handler::Handler::List(h) => {
122            let Some(new_cursor) = h.get_cursor(new_pos, cursor_with_pos.pos.side) else {
123                return;
124            };
125
126            cursor_with_pos.cursor = new_cursor;
127        }
128        crate::handler::Handler::MovableList(h) => {
129            let Some(new_cursor) = h.get_cursor(new_pos, cursor_with_pos.pos.side) else {
130                return;
131            };
132
133            cursor_with_pos.cursor = new_cursor;
134        }
135        crate::handler::Handler::Map(_) => {}
136        crate::handler::Handler::Tree(_) => {}
137        crate::handler::Handler::Unknown(_) => {}
138        #[cfg(feature = "counter")]
139        crate::handler::Handler::Counter(_) => {}
140    }
141}
142
143/// UndoManager is responsible for managing undo/redo from the current peer's perspective.
144///
145/// Undo/local is local: it cannot be used to undone the changes made by other peers.
146/// If you want to undo changes made by other peers, you may need to use the time travel feature.
147///
148/// PeerID cannot be changed during the lifetime of the UndoManager
149pub struct UndoManager {
150    peer: Arc<AtomicU64>,
151    container_remap: Arc<Mutex<FxHashMap<ContainerID, ContainerID>>>,
152    inner: Arc<Mutex<UndoManagerInner>>,
153    _peer_id_change_sub: Subscription,
154    _undo_sub: Subscription,
155    doc: LoroDoc,
156}
157
158impl std::fmt::Debug for UndoManager {
159    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
160        f.debug_struct("UndoManager")
161            .field("peer", &self.peer)
162            .field("container_remap", &self.container_remap)
163            .field("inner", &self.inner)
164            .finish()
165    }
166}
167
168#[derive(Debug, Clone, Copy, PartialEq, Eq)]
169pub enum UndoOrRedo {
170    Undo,
171    Redo,
172}
173
174impl UndoOrRedo {
175    fn opposite(&self) -> UndoOrRedo {
176        match self {
177            Self::Undo => Self::Redo,
178            Self::Redo => Self::Undo,
179        }
180    }
181}
182
183/// When a undo/redo item is pushed, the undo manager will call the on_push callback to get the meta data of the undo item.
184/// The returned cursors will be recorded for a new pushed undo item.
185pub type OnPush = Box<
186    dyn for<'a> Fn(UndoOrRedo, CounterSpan, Option<DiffEvent<'a>>) -> UndoItemMeta + Send + Sync,
187>;
188pub type OnPop = Box<dyn Fn(UndoOrRedo, CounterSpan, UndoItemMeta) + Send + Sync>;
189
190struct UndoManagerInner {
191    next_counter: Option<Counter>,
192    undo_stack: Stack,
193    redo_stack: Stack,
194    processing_undo: bool,
195    last_undo_time: i64,
196    merge_interval_in_ms: i64,
197    max_stack_size: usize,
198    exclude_origin_prefixes: Vec<Box<str>>,
199    last_popped_selection: Option<Vec<CursorWithPos>>,
200    on_push: Option<OnPush>,
201    on_pop: Option<OnPop>,
202    group: Option<UndoGroup>,
203}
204
205impl std::fmt::Debug for UndoManagerInner {
206    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
207        f.debug_struct("UndoManagerInner")
208            .field("latest_counter", &self.next_counter)
209            .field("undo_stack", &self.undo_stack)
210            .field("redo_stack", &self.redo_stack)
211            .field("processing_undo", &self.processing_undo)
212            .field("last_undo_time", &self.last_undo_time)
213            .field("merge_interval", &self.merge_interval_in_ms)
214            .field("max_stack_size", &self.max_stack_size)
215            .field("exclude_origin_prefixes", &self.exclude_origin_prefixes)
216            .field("group", &self.group)
217            .finish()
218    }
219}
220
221#[derive(Debug, Clone, Default)]
222struct UndoGroup {
223    start_counter: Counter,
224    affected_cids: FxHashSet<ContainerID>,
225}
226
227impl UndoGroup {
228    pub fn new(start_counter: Counter) -> Self {
229        Self {
230            start_counter,
231            affected_cids: Default::default(),
232        }
233    }
234}
235
236#[derive(Debug)]
237struct Stack {
238    stack: VecDeque<(VecDeque<StackItem>, Arc<Mutex<DiffBatch>>)>,
239    size: usize,
240}
241
242#[derive(Debug, Clone)]
243struct StackItem {
244    span: CounterSpan,
245    meta: UndoItemMeta,
246}
247
248/// The metadata of an undo item.
249///
250/// The cursors inside the metadata will be transformed by remote operations as well.
251/// So that when the item is popped, users can restore the cursors position correctly.
252#[derive(Debug, Default, Clone)]
253pub struct UndoItemMeta {
254    pub value: LoroValue,
255    pub cursors: Vec<CursorWithPos>,
256}
257
258#[derive(Debug, Clone)]
259pub struct CursorWithPos {
260    pub cursor: Cursor,
261    pub pos: AbsolutePosition,
262}
263
264impl UndoItemMeta {
265    pub fn new() -> Self {
266        Self {
267            value: LoroValue::Null,
268            cursors: Default::default(),
269        }
270    }
271
272    /// It's assumed that the cursor is just acquired before the ops that
273    /// need to be undo/redo.
274    ///
275    /// We need to rely on the validity of the original pos value
276    pub fn add_cursor(&mut self, cursor: &Cursor) {
277        self.cursors.push(CursorWithPos {
278            cursor: cursor.clone(),
279            pos: AbsolutePosition {
280                pos: cursor.origin_pos,
281                side: cursor.side,
282            },
283        });
284    }
285
286    pub fn set_value(&mut self, value: LoroValue) {
287        self.value = value;
288    }
289}
290
291impl Stack {
292    pub fn new() -> Self {
293        let mut stack = VecDeque::new();
294        stack.push_back((VecDeque::new(), Arc::new(Mutex::new(Default::default()))));
295        Stack { stack, size: 0 }
296    }
297
298    pub fn pop(&mut self) -> Option<(StackItem, Arc<Mutex<DiffBatch>>)> {
299        while self.stack.back().unwrap().0.is_empty() && self.stack.len() > 1 {
300            let (_, diff) = self.stack.pop_back().unwrap();
301            let diff = diff.lock().unwrap();
302            if !diff.cid_to_events.is_empty() {
303                self.stack
304                    .back_mut()
305                    .unwrap()
306                    .1
307                    .lock()
308                    .unwrap()
309                    .compose(&diff);
310            }
311        }
312
313        if self.stack.len() == 1 && self.stack.back().unwrap().0.is_empty() {
314            // If the stack is empty, we need to clear the remote diff
315            self.stack.back_mut().unwrap().1.lock().unwrap().clear();
316            return None;
317        }
318
319        self.size -= 1;
320        let last = self.stack.back_mut().unwrap();
321        last.0.pop_back().map(|x| (x, last.1.clone()))
322        // If this row in stack is empty, we don't pop it right away
323        // Because we still need the remote diff to be available.
324        // Cursor position transformation relies on the remote diff in the same row.
325    }
326
327    pub fn push(&mut self, span: CounterSpan, meta: UndoItemMeta) {
328        self.push_with_merge(span, meta, false, None)
329    }
330
331    pub fn push_with_merge(
332        &mut self,
333        span: CounterSpan,
334        meta: UndoItemMeta,
335        can_merge: bool,
336        group: Option<&UndoGroup>,
337    ) {
338        let last = self.stack.back_mut().unwrap();
339        let last_remote_diff = last.1.lock().unwrap();
340
341        // Check if the remote diff is disjoint with the current undo group
342        let is_disjoint_group = group.is_some_and(|g| {
343            g.affected_cids.iter().all(|cid| {
344                last_remote_diff
345                    .cid_to_events
346                    .get(cid)
347                    .is_none_or(|diff| diff.is_empty())
348            })
349        });
350
351        // Can't merge if remote diffs exist and it's not disjoint with the current undo group
352        let should_create_new_entry =
353            !last_remote_diff.cid_to_events.is_empty() && !is_disjoint_group;
354
355        if should_create_new_entry {
356            // Create a new entry in the stack
357            drop(last_remote_diff);
358            let mut v = VecDeque::new();
359            v.push_back(StackItem { span, meta });
360            self.stack
361                .push_back((v, Arc::new(Mutex::new(DiffBatch::default()))));
362            self.size += 1;
363            return;
364        }
365
366        // Try to merge with the previous entry if allowed
367        if can_merge {
368            if let Some(last_span) = last.0.back_mut() {
369                if last_span.span.end == span.start {
370                    // Merge spans by extending the end of the last span
371                    last_span.span.end = span.end;
372                    return;
373                }
374            }
375        }
376
377        // Add as a new item to the existing entry
378        self.size += 1;
379        last.0.push_back(StackItem { span, meta });
380    }
381
382    pub fn compose_remote_event(&mut self, diff: &[&ContainerDiff]) {
383        if self.is_empty() {
384            return;
385        }
386
387        let remote_diff = &mut self.stack.back_mut().unwrap().1;
388        let mut remote_diff = remote_diff.lock().unwrap();
389        for e in diff {
390            if let Some(d) = remote_diff.cid_to_events.get_mut(&e.id) {
391                d.compose_ref(&e.diff);
392            } else {
393                remote_diff
394                    .cid_to_events
395                    .insert(e.id.clone(), e.diff.clone());
396                remote_diff.order.push(e.id.clone());
397            }
398        }
399    }
400
401    pub fn transform_based_on_this_delta(&mut self, diff: &DiffBatch) {
402        if self.is_empty() {
403            return;
404        }
405        let remote_diff = &mut self.stack.back_mut().unwrap().1;
406        remote_diff.lock().unwrap().transform(diff, false);
407    }
408
409    pub fn clear(&mut self) {
410        self.stack = VecDeque::new();
411        self.stack.push_back((VecDeque::new(), Default::default()));
412        self.size = 0;
413    }
414
415    pub fn is_empty(&self) -> bool {
416        self.size == 0
417    }
418
419    pub fn len(&self) -> usize {
420        self.size
421    }
422
423    fn pop_front(&mut self) {
424        if self.is_empty() {
425            return;
426        }
427
428        self.size -= 1;
429        let first = self.stack.front_mut().unwrap();
430        let f = first.0.pop_front();
431        assert!(f.is_some());
432        if first.0.is_empty() {
433            self.stack.pop_front();
434        }
435    }
436}
437
438impl Default for Stack {
439    fn default() -> Self {
440        Stack::new()
441    }
442}
443
444impl UndoManagerInner {
445    fn new(last_counter: Counter) -> Self {
446        Self {
447            next_counter: Some(last_counter),
448            undo_stack: Default::default(),
449            redo_stack: Default::default(),
450            processing_undo: false,
451            merge_interval_in_ms: 0,
452            last_undo_time: 0,
453            max_stack_size: usize::MAX,
454            exclude_origin_prefixes: vec![],
455            last_popped_selection: None,
456            on_pop: None,
457            on_push: None,
458            group: None,
459        }
460    }
461
462    /// Returns true if a given container diff is disjoint with the current group.
463    /// They are disjoint if they have no overlap in changed container ids.
464    fn is_disjoint_with_group(&self, diff: &[&ContainerDiff]) -> bool {
465        let Some(group) = &self.group else {
466            return false;
467        };
468
469        diff.iter().all(|d| !group.affected_cids.contains(&d.id))
470    }
471
472    fn record_checkpoint(&mut self, latest_counter: Counter, event: Option<DiffEvent>) {
473        let previous_counter = self.next_counter;
474
475        if Some(latest_counter) == self.next_counter {
476            return;
477        }
478
479        if self.next_counter.is_none() {
480            self.next_counter = Some(latest_counter);
481            return;
482        }
483
484        if let Some(group) = &mut self.group {
485            event.iter().for_each(|e| {
486                e.events.iter().for_each(|e| {
487                    group.affected_cids.insert(e.id.clone());
488                })
489            });
490        }
491
492        let now = get_sys_timestamp() as Timestamp;
493        let span = CounterSpan::new(self.next_counter.unwrap(), latest_counter);
494        let meta = self
495            .on_push
496            .as_ref()
497            .map(|x| x(UndoOrRedo::Undo, span, event))
498            .unwrap_or_default();
499
500        // Wether the change is within the accepted merge interval
501        let in_merge_interval = now - self.last_undo_time < self.merge_interval_in_ms;
502
503        // If group is active, but there is nothing in the group, don't merge
504        // If the group is active and it's not the first push in the group, merge
505        let group_should_merge = self.group.is_some()
506            && match (
507                previous_counter,
508                self.group.as_ref().map(|g| g.start_counter),
509            ) {
510                (Some(previous), Some(active)) => previous != active,
511                _ => true,
512            };
513
514        let should_merge = !self.undo_stack.is_empty() && (in_merge_interval || group_should_merge);
515
516        if should_merge {
517            self.undo_stack
518                .push_with_merge(span, meta, true, self.group.as_ref());
519        } else {
520            self.last_undo_time = now;
521            self.undo_stack.push(span, meta);
522        }
523
524        self.next_counter = Some(latest_counter);
525        self.redo_stack.clear();
526        while self.undo_stack.len() > self.max_stack_size {
527            self.undo_stack.pop_front();
528        }
529    }
530}
531
532fn get_counter_end(doc: &LoroDoc, peer: PeerID) -> Counter {
533    doc.oplog()
534        .lock()
535        .unwrap()
536        .vv()
537        .get(&peer)
538        .cloned()
539        .unwrap_or(0)
540}
541
542impl UndoManager {
543    pub fn new(doc: &LoroDoc) -> Self {
544        let peer = Arc::new(AtomicU64::new(doc.peer_id()));
545        let peer_clone = peer.clone();
546        let peer_clone2 = peer.clone();
547        let inner = Arc::new(Mutex::new(UndoManagerInner::new(get_counter_end(
548            doc,
549            doc.peer_id(),
550        ))));
551        let inner_clone = inner.clone();
552        let inner_clone2 = inner.clone();
553        let remap_containers = Arc::new(Mutex::new(FxHashMap::default()));
554        let remap_containers_clone = remap_containers.clone();
555        let undo_sub = doc.subscribe_root(Arc::new(move |event| match event.event_meta.by {
556            EventTriggerKind::Local => {
557                // TODO: PERF undo can be significantly faster if we can get
558                // the DiffBatch for undo here
559                let Ok(mut inner) = inner_clone.lock() else {
560                    return;
561                };
562                if inner.processing_undo {
563                    return;
564                }
565                if let Some(id) = event
566                    .event_meta
567                    .to
568                    .iter()
569                    .find(|x| x.peer == peer_clone.load(std::sync::atomic::Ordering::Relaxed))
570                {
571                    if inner
572                        .exclude_origin_prefixes
573                        .iter()
574                        .any(|x| event.event_meta.origin.starts_with(&**x))
575                    {
576                        // If the event is from the excluded origin, we don't record it
577                        // in the undo stack. But we need to record its effect like it's
578                        // a remote event.
579                        inner.undo_stack.compose_remote_event(event.events);
580                        inner.redo_stack.compose_remote_event(event.events);
581                        inner.next_counter = Some(id.counter + 1);
582                    } else {
583                        inner.record_checkpoint(id.counter + 1, Some(event));
584                    }
585                }
586            }
587            EventTriggerKind::Import => {
588                let mut inner = inner_clone.lock().unwrap();
589
590                for e in event.events {
591                    if let Diff::Tree(tree) = &e.diff {
592                        for item in &tree.diff {
593                            let target = item.target;
594                            if let TreeExternalDiff::Create { .. } = &item.action {
595                                // If the concurrent event is a create event, it may bring the deleted tree node back,
596                                // so we need to remove it from the remap of the container.
597                                remap_containers_clone
598                                    .lock()
599                                    .unwrap()
600                                    .remove(&target.associated_meta_container());
601                            }
602                        }
603                    }
604                }
605
606                let is_import_disjoint = inner.is_disjoint_with_group(event.events);
607
608                inner.undo_stack.compose_remote_event(event.events);
609                inner.redo_stack.compose_remote_event(event.events);
610
611                // If the import is not disjoint, we end the active group
612                // all subsequent changes will be new undo items
613                if !is_import_disjoint {
614                    inner.group = None;
615                }
616            }
617            EventTriggerKind::Checkout => {
618                let mut inner = inner_clone.lock().unwrap();
619                inner.undo_stack.clear();
620                inner.redo_stack.clear();
621                inner.next_counter = None;
622            }
623        }));
624
625        let sub = doc.subscribe_peer_id_change(Box::new(move |id| {
626            let mut inner = inner_clone2.lock().unwrap();
627            inner.undo_stack.clear();
628            inner.redo_stack.clear();
629            inner.next_counter = Some(id.counter);
630            peer_clone2.store(id.peer, std::sync::atomic::Ordering::Relaxed);
631            true
632        }));
633
634        UndoManager {
635            peer,
636            container_remap: remap_containers,
637            inner,
638            _peer_id_change_sub: sub,
639            _undo_sub: undo_sub,
640            doc: doc.clone(),
641        }
642    }
643
644    pub fn group_start(&mut self) -> LoroResult<()> {
645        let mut inner = self.inner.lock().unwrap();
646
647        if inner.group.is_some() {
648            return Err(LoroError::UndoGroupAlreadyStarted);
649        }
650
651        inner.group = Some(UndoGroup::new(inner.next_counter.unwrap()));
652
653        Ok(())
654    }
655
656    pub fn group_end(&mut self) {
657        self.inner.lock().unwrap().group = None;
658    }
659
660    pub fn peer(&self) -> PeerID {
661        self.peer.load(std::sync::atomic::Ordering::Relaxed)
662    }
663
664    pub fn set_merge_interval(&mut self, interval: i64) {
665        self.inner.lock().unwrap().merge_interval_in_ms = interval;
666    }
667
668    pub fn set_max_undo_steps(&mut self, size: usize) {
669        self.inner.lock().unwrap().max_stack_size = size;
670    }
671
672    pub fn add_exclude_origin_prefix(&mut self, prefix: &str) {
673        self.inner
674            .lock()
675            .unwrap()
676            .exclude_origin_prefixes
677            .push(prefix.into());
678    }
679
680    pub fn record_new_checkpoint(&mut self) -> LoroResult<()> {
681        self.doc.commit_then_renew();
682        let counter = get_counter_end(&self.doc, self.peer());
683        self.inner.lock().unwrap().record_checkpoint(counter, None);
684        Ok(())
685    }
686
687    #[instrument(skip_all)]
688    pub fn undo(&mut self) -> LoroResult<bool> {
689        self.perform(
690            |x| &mut x.undo_stack,
691            |x| &mut x.redo_stack,
692            UndoOrRedo::Undo,
693        )
694    }
695
696    #[instrument(skip_all)]
697    pub fn redo(&mut self) -> LoroResult<bool> {
698        self.perform(
699            |x| &mut x.redo_stack,
700            |x| &mut x.undo_stack,
701            UndoOrRedo::Redo,
702        )
703    }
704
705    fn perform(
706        &mut self,
707        get_stack: impl Fn(&mut UndoManagerInner) -> &mut Stack,
708        get_opposite: impl Fn(&mut UndoManagerInner) -> &mut Stack,
709        kind: UndoOrRedo,
710    ) -> LoroResult<bool> {
711        let doc = &self.doc.clone();
712        // When in the undo/redo loop, the new undo/redo stack item should restore the selection
713        // to the state it was in before the item that was popped two steps ago from the stack.
714        //
715        //                          ┌────────────┐
716        //                          │Selection 1 │
717        //                          └─────┬──────┘
718        //                                │   Some
719        //                                ▼   ops
720        //                          ┌────────────┐
721        //                          │Selection 2 │
722        //                          └─────┬──────┘
723        //                                │   Some
724        //                                ▼   ops
725        //                          ┌────────────┐
726        //                          │Selection 3 │◁ ─ ─ ─ ─ ─ ─ ─  Restore  ─ ─ ─
727        //                          └─────┬──────┘                               │
728        //                                │
729        //                                │                                      │
730        //                                │                              ┌ ─ ─ ─ ─ ─ ─ ─
731        //           Enter the            │   Undo ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─▶   Push Redo   │
732        //           undo/redo ─ ─ ─ ▶    ▼                              └ ─ ─ ─ ─ ─ ─ ─
733        //             loop         ┌────────────┐                               │
734        //                          │Selection 2 │◁─ ─ ─  Restore  ─
735        //                          └─────┬──────┘                  │            │
736        //                                │
737        //                                │                         │            │
738        //                                │                 ┌ ─ ─ ─ ─ ─ ─ ─
739        //                                │   Undo ─ ─ ─ ─ ▶   Push Redo   │     │
740        //                                ▼                 └ ─ ─ ─ ─ ─ ─ ─
741        //                          ┌────────────┐                  │            │
742        //                          │Selection 1 │
743        //                          └─────┬──────┘                  │            │
744        //                                │   Redo ◀ ─ ─ ─ ─ ─ ─ ─ ─
745        //                                ▼                                      │
746        //                          ┌────────────┐
747        //         ┌   Restore   ─ ▷│Selection 2 │                               │
748        //                          └─────┬──────┘
749        //         │                      │                                      │
750        // ┌ ─ ─ ─ ─ ─ ─ ─                │
751        //    Push Undo   │◀─ ─ ─ ─ ─ ─ ─ │   Redo ◀ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ┘
752        // └ ─ ─ ─ ─ ─ ─ ─                ▼
753        //         │                ┌────────────┐
754        //                          │Selection 3 │
755        //         │                └─────┬──────┘
756        //          ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ▶ │   Undo
757        //                                ▼
758        //                          ┌────────────┐
759        //                          │Selection 2 │
760        //                          └────────────┘
761        //
762        // Because users may change the selections during the undo/redo loop, it's
763        // more stable to keep the selection stored in the last stack item
764        // rather than using the current selection directly.
765        self.record_new_checkpoint()?;
766        let end_counter = get_counter_end(doc, self.peer());
767        let mut top = {
768            let mut inner = self.inner.lock().unwrap();
769            inner.processing_undo = true;
770            get_stack(&mut inner).pop()
771        };
772
773        let mut executed = false;
774        while let Some((mut span, remote_diff)) = top {
775            let mut next_push_selection = None;
776            {
777                let inner = self.inner.clone();
778                // We need to clone this because otherwise <transform_delta> will be applied to the same remote diff
779                let remote_change_clone = remote_diff.lock().unwrap().clone();
780                let commit = doc.undo_internal(
781                    IdSpan {
782                        peer: self.peer(),
783                        counter: span.span,
784                    },
785                    &mut self.container_remap.lock().unwrap(),
786                    Some(&remote_change_clone),
787                    &mut |diff| {
788                        info_span!("transform remote diff").in_scope(|| {
789                            let mut inner = inner.lock().unwrap();
790                            // <transform_delta>
791                            get_stack(&mut inner).transform_based_on_this_delta(diff);
792                        });
793                    },
794                )?;
795                drop(commit);
796                let mut inner = self.inner.lock().unwrap();
797                if let Some(x) = inner.on_pop.as_ref() {
798                    for cursor in span.meta.cursors.iter_mut() {
799                        // <cursor_transform> We need to transform cursor here.
800                        // Note that right now <transform_delta> is already done,
801                        // remote_diff is also transformed by it now (that's what we need).
802                        transform_cursor(
803                            cursor,
804                            &remote_diff.lock().unwrap(),
805                            doc,
806                            &self.container_remap.lock().unwrap(),
807                        );
808                    }
809
810                    x(kind, span.span, span.meta.clone());
811                    let take = inner.last_popped_selection.take();
812                    next_push_selection = take;
813                    inner.last_popped_selection = Some(span.meta.cursors);
814                }
815            }
816            let new_counter = get_counter_end(doc, self.peer());
817            if end_counter != new_counter {
818                let mut inner = self.inner.lock().unwrap();
819                let mut meta = inner
820                    .on_push
821                    .as_ref()
822                    .map(|x| {
823                        x(
824                            kind.opposite(),
825                            CounterSpan::new(end_counter, new_counter),
826                            None,
827                        )
828                    })
829                    .unwrap_or_default();
830
831                if matches!(kind, UndoOrRedo::Undo) && get_opposite(&mut inner).is_empty() {
832                    // If it's the first undo, we use the cursors from the users
833                } else if let Some(inner) = next_push_selection.take() {
834                    // Otherwise, we use the cursors from the undo/redo loop
835                    meta.cursors = inner;
836                }
837
838                get_opposite(&mut inner).push(CounterSpan::new(end_counter, new_counter), meta);
839                inner.next_counter = Some(new_counter);
840                executed = true;
841                break;
842            } else {
843                // continue to pop the undo item as this undo is a no-op
844                top = get_stack(&mut self.inner.lock().unwrap()).pop();
845                continue;
846            }
847        }
848
849        self.inner.lock().unwrap().processing_undo = false;
850        Ok(executed)
851    }
852
853    pub fn can_undo(&self) -> bool {
854        !self.inner.lock().unwrap().undo_stack.is_empty()
855    }
856
857    pub fn can_redo(&self) -> bool {
858        !self.inner.lock().unwrap().redo_stack.is_empty()
859    }
860
861    pub fn undo_count(&self) -> usize {
862        self.inner.lock().unwrap().undo_stack.len()
863    }
864
865    pub fn redo_count(&self) -> usize {
866        self.inner.lock().unwrap().redo_stack.len()
867    }
868
869    pub fn set_on_push(&self, on_push: Option<OnPush>) {
870        self.inner.lock().unwrap().on_push = on_push;
871    }
872
873    pub fn set_on_pop(&self, on_pop: Option<OnPop>) {
874        self.inner.lock().unwrap().on_pop = on_pop;
875    }
876
877    pub fn clear(&self) {
878        self.inner.lock().unwrap().undo_stack.clear();
879        self.inner.lock().unwrap().redo_stack.clear();
880    }
881}
882
883/// Undo the given spans of operations.
884///
885/// # Parameters
886///
887/// - `spans`: A vector of tuples where each tuple contains an `IdSpan` and its associated `Frontiers`.
888///   - `IdSpan`: Represents a span of operations identified by an ID.
889///   - `Frontiers`: Represents the deps of the given id_span
890/// - `latest_frontiers`: The latest frontiers of the document
891/// - `calc_diff`: A closure that takes two `Frontiers` and calculates the difference between them, returning a `DiffBatch`.
892///
893/// # Returns
894///
895/// - `DiffBatch`: Applying this batch on the `latest_frontiers` will undo the ops in the given spans.
896pub(crate) fn undo(
897    spans: Vec<(IdSpan, Frontiers)>,
898    last_frontiers_or_last_bi: Either<&Frontiers, &DiffBatch>,
899    calc_diff: impl Fn(&Frontiers, &Frontiers) -> DiffBatch,
900    on_last_event_a: &mut dyn FnMut(&DiffBatch),
901) -> DiffBatch {
902    // The process of performing undo is:
903    //
904    // 0. Split the span into a series of continuous spans. There is no external dep within each continuous span.
905    //
906    // For each continuous span_i:
907    //
908    // 1. a. Calculate the event of checkout from id_span.last to id_span.deps, call it Ai. It undo the ops in the current span.
909    //    b. Calculate A'i = Ai + T(Ci-1, Ai) if i > 0, otherwise A'i = Ai.
910    //       NOTE: A'i can undo the ops in the current span and the previous spans, if it's applied on the id_span.last version.
911    // 2. Calculate the event of checkout from id_span.last to [the next span's last id] or [the latest version], call it Bi.
912    // 3. Transform event A'i based on Bi, call it Ci
913    // 4. If span_i is the last span, apply Ci to the current state.
914
915    // -------------------------------------------------------
916    // 0. Split the span into a series of continuous spans
917    // -------------------------------------------------------
918
919    let mut last_ci: Option<DiffBatch> = None;
920    for i in 0..spans.len() {
921        debug_span!("Undo", ?i, "Undo span {:?}", &spans[i]).in_scope(|| {
922            let (this_id_span, this_deps) = &spans[i];
923            // ---------------------------------------
924            // 1.a Calc event A_i
925            // ---------------------------------------
926            let mut event_a_i = debug_span!("1. Calc event A_i").in_scope(|| {
927                // Checkout to the last id of the id_span
928                calc_diff(&this_id_span.id_last().into(), this_deps)
929            });
930
931            // println!("event_a_i: {:?}", event_a_i);
932
933            // ---------------------------------------
934            // 2. Calc event B_i
935            // ---------------------------------------
936            let stack_diff_batch;
937            let event_b_i = 'block: {
938                let next = if i + 1 < spans.len() {
939                    spans[i + 1].0.id_last().into()
940                } else {
941                    match last_frontiers_or_last_bi {
942                        Either::Left(last_frontiers) => last_frontiers.clone(),
943                        Either::Right(right) => break 'block right,
944                    }
945                };
946                stack_diff_batch = Some(calc_diff(&this_id_span.id_last().into(), &next));
947                stack_diff_batch.as_ref().unwrap()
948            };
949
950            // println!("event_b_i: {:?}", event_b_i);
951
952            // event_a_prime can undo the ops in the current span and the previous spans
953            let mut event_a_prime = if let Some(mut last_ci) = last_ci.take() {
954                // ------------------------------------------------------------------------------
955                // 1.b Transform and apply Ci-1 based on Ai, call it A'i
956                // ------------------------------------------------------------------------------
957                last_ci.transform(&event_a_i, true);
958
959                event_a_i.compose(&last_ci);
960                event_a_i
961            } else {
962                event_a_i
963            };
964            if i == spans.len() - 1 {
965                on_last_event_a(&event_a_prime);
966            }
967            // --------------------------------------------------
968            // 3. Transform event A'_i based on B_i, call it C_i
969            // --------------------------------------------------
970            event_a_prime.transform(event_b_i, true);
971
972            // println!("event_a_prime: {:?}", event_a_prime);
973
974            let c_i = event_a_prime;
975            last_ci = Some(c_i);
976        });
977    }
978
979    last_ci.unwrap()
980}