Skip to main content

loro_internal/
undo.rs

1use std::{cell::RefCell, collections::VecDeque, sync::Arc};
2
3use crate::sync::{AtomicU64, Mutex};
4use either::Either;
5use loro_common::{
6    ContainerID, Counter, CounterSpan, HasIdSpan, IdSpan, LoroError, LoroResult, LoroValue, PeerID,
7};
8use parking_lot::lock_api::ReentrantMutex;
9use rustc_hash::{FxHashMap, FxHashSet};
10use tracing::{debug_span, info_span, instrument};
11
12use crate::{
13    change::{get_sys_timestamp, Timestamp},
14    cursor::{AbsolutePosition, Cursor},
15    delta::TreeExternalDiff,
16    event::{Diff, EventTriggerKind},
17    version::Frontiers,
18    ContainerDiff, DiffEvent, DocDiff, LoroDoc, Subscription,
19};
20
21/// A batch of diffs.
22///
23/// You can use `loroDoc.apply_diff(diff)` to apply the diff to the document.
24#[derive(Debug, Clone, Default)]
25pub struct DiffBatch {
26    pub cid_to_events: FxHashMap<ContainerID, Diff>,
27    pub order: Vec<ContainerID>,
28}
29
30impl DiffBatch {
31    pub fn new(diff: Vec<DocDiff>) -> Self {
32        let mut map: FxHashMap<ContainerID, Diff> = Default::default();
33        let mut order: Vec<ContainerID> = Vec::with_capacity(diff.len());
34        for d in diff.into_iter() {
35            for item in d.diff.into_iter() {
36                let old = map.insert(item.id.clone(), item.diff);
37                assert!(old.is_none(), "Duplicate container ID in diff events");
38                order.push(item.id.clone());
39            }
40        }
41
42        Self {
43            cid_to_events: map,
44            order,
45        }
46    }
47
48    pub fn compose(&mut self, other: &Self) {
49        if other.cid_to_events.is_empty() {
50            return;
51        }
52
53        for (id, diff) in other.iter() {
54            if let Some(this_diff) = self.cid_to_events.get_mut(id) {
55                this_diff.compose_ref(diff);
56            } else {
57                self.cid_to_events.insert(id.clone(), diff.clone());
58                self.order.push(id.clone());
59            }
60        }
61    }
62
63    pub fn transform(&mut self, other: &Self, left_priority: bool) {
64        if other.cid_to_events.is_empty() || self.cid_to_events.is_empty() {
65            return;
66        }
67
68        for (idx, diff) in self.cid_to_events.iter_mut() {
69            if let Some(b_diff) = other.cid_to_events.get(idx) {
70                diff.transform(b_diff, left_priority);
71            }
72        }
73    }
74
75    pub fn clear(&mut self) {
76        self.cid_to_events.clear();
77        self.order.clear();
78    }
79
80    pub fn iter(&self) -> impl Iterator<Item = (&ContainerID, &Diff)> + '_ {
81        self.order
82            .iter()
83            .map(|cid| (cid, self.cid_to_events.get(cid).unwrap()))
84    }
85
86    #[allow(clippy::should_implement_trait)]
87    pub fn into_iter(self) -> impl Iterator<Item = (ContainerID, Diff)> {
88        let mut cid_to_events = self.cid_to_events;
89        self.order.into_iter().map(move |cid| {
90            let d = cid_to_events.remove(&cid).unwrap();
91            (cid, d)
92        })
93    }
94}
95
96fn transform_cursor(
97    cursor_with_pos: &mut CursorWithPos,
98    remote_diff: &DiffBatch,
99    doc: &LoroDoc,
100    container_remap: &FxHashMap<ContainerID, ContainerID>,
101) {
102    let mut container_changed = false;
103    let mut cid = &cursor_with_pos.cursor.container;
104    while let Some(new_cid) = container_remap.get(cid) {
105        cid = new_cid;
106        container_changed = true;
107    }
108
109    if cursor_with_pos.cursor.id.is_none() {
110        // We don't need to transform a cursor that always points to the leftmost or rightmost position
111        if container_changed {
112            cursor_with_pos.cursor.container = cid.clone();
113        }
114        return;
115    }
116
117    if let Some(diff) = remote_diff.cid_to_events.get(cid) {
118        let new_pos = diff.transform_cursor(cursor_with_pos.pos.pos, false);
119        cursor_with_pos.pos.pos = new_pos;
120    };
121
122    let new_pos = cursor_with_pos.pos.pos;
123    match doc.get_handler(cid.clone()).unwrap() {
124        crate::handler::Handler::Text(h) => {
125            let Some(new_cursor) = h.get_cursor_internal(new_pos, cursor_with_pos.pos.side, false)
126            else {
127                return;
128            };
129
130            cursor_with_pos.cursor = new_cursor;
131        }
132        crate::handler::Handler::List(h) => {
133            let Some(new_cursor) = h.get_cursor(new_pos, cursor_with_pos.pos.side) else {
134                return;
135            };
136
137            cursor_with_pos.cursor = new_cursor;
138        }
139        crate::handler::Handler::MovableList(h) => {
140            let Some(new_cursor) = h.get_cursor(new_pos, cursor_with_pos.pos.side) else {
141                return;
142            };
143
144            cursor_with_pos.cursor = new_cursor;
145        }
146        crate::handler::Handler::Map(_) => {}
147        crate::handler::Handler::Tree(_) => {}
148        crate::handler::Handler::Unknown(_) => {}
149        #[cfg(feature = "counter")]
150        crate::handler::Handler::Counter(_) => {}
151    }
152}
153
154/// UndoManager is responsible for managing undo/redo from the current peer's perspective.
155///
156/// Undo/local is local: it cannot be used to undone the changes made by other peers.
157/// If you want to undo changes made by other peers, you may need to use the time travel feature.
158///
159/// PeerID cannot be changed during the lifetime of the UndoManager
160pub struct UndoManager {
161    peer: Arc<AtomicU64>,
162    container_remap: Arc<Mutex<FxHashMap<ContainerID, ContainerID>>>,
163    inner: Arc<parking_lot::ReentrantMutex<RefCell<UndoManagerInner>>>,
164    _peer_id_change_sub: Subscription,
165    _undo_sub: Subscription,
166    doc: LoroDoc,
167}
168
169impl std::fmt::Debug for UndoManager {
170    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
171        f.debug_struct("UndoManager")
172            .field("peer", &self.peer)
173            .field("container_remap", &self.container_remap)
174            .field("inner", &self.inner)
175            .finish()
176    }
177}
178
179#[derive(Debug, Clone, Copy, PartialEq, Eq)]
180pub enum UndoOrRedo {
181    Undo,
182    Redo,
183}
184
185impl UndoOrRedo {
186    fn opposite(&self) -> UndoOrRedo {
187        match self {
188            Self::Undo => Self::Redo,
189            Self::Redo => Self::Undo,
190        }
191    }
192}
193
194/// When a undo/redo item is pushed, the undo manager will call the on_push callback to get the meta data of the undo item.
195/// The returned cursors will be recorded for a new pushed undo item.
196pub type OnPush = Box<
197    dyn for<'a> Fn(UndoOrRedo, CounterSpan, Option<DiffEvent<'a>>) -> UndoItemMeta + Send + Sync,
198>;
199pub type OnPop = Box<dyn Fn(UndoOrRedo, CounterSpan, UndoItemMeta) + Send + Sync>;
200
201struct UndoManagerInner {
202    next_counter: Option<Counter>,
203    undo_stack: Stack,
204    redo_stack: Stack,
205    processing_undo: bool,
206    last_undo_time: i64,
207    merge_interval_in_ms: i64,
208    max_stack_size: usize,
209    exclude_origin_prefixes: Vec<Box<str>>,
210    last_popped_selection: Option<Vec<CursorWithPos>>,
211    on_push: Option<OnPush>,
212    on_pop: Option<OnPop>,
213    group: Option<UndoGroup>,
214}
215
216impl std::fmt::Debug for UndoManagerInner {
217    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
218        f.debug_struct("UndoManagerInner")
219            .field("latest_counter", &self.next_counter)
220            .field("undo_stack", &self.undo_stack)
221            .field("redo_stack", &self.redo_stack)
222            .field("processing_undo", &self.processing_undo)
223            .field("last_undo_time", &self.last_undo_time)
224            .field("merge_interval", &self.merge_interval_in_ms)
225            .field("max_stack_size", &self.max_stack_size)
226            .field("exclude_origin_prefixes", &self.exclude_origin_prefixes)
227            .field("group", &self.group)
228            .finish()
229    }
230}
231
232#[derive(Debug, Clone, Default)]
233struct UndoGroup {
234    start_counter: Counter,
235    affected_cids: FxHashSet<ContainerID>,
236}
237
238impl UndoGroup {
239    pub fn new(start_counter: Counter) -> Self {
240        Self {
241            start_counter,
242            affected_cids: Default::default(),
243        }
244    }
245}
246
247#[derive(Debug)]
248struct Stack {
249    stack: VecDeque<(VecDeque<StackItem>, Arc<Mutex<DiffBatch>>)>,
250    size: usize,
251}
252
253#[derive(Debug, Clone)]
254struct StackItem {
255    span: CounterSpan,
256    meta: UndoItemMeta,
257}
258
259/// The metadata of an undo item.
260///
261/// The cursors inside the metadata will be transformed by remote operations as well.
262/// So that when the item is popped, users can restore the cursors position correctly.
263#[derive(Debug, Default, Clone)]
264pub struct UndoItemMeta {
265    pub value: LoroValue,
266    pub cursors: Vec<CursorWithPos>,
267}
268
269#[derive(Debug, Clone)]
270pub struct CursorWithPos {
271    pub cursor: Cursor,
272    pub pos: AbsolutePosition,
273}
274
275impl UndoItemMeta {
276    pub fn new() -> Self {
277        Self {
278            value: LoroValue::Null,
279            cursors: Default::default(),
280        }
281    }
282
283    /// It's assumed that the cursor is just acquired before the ops that
284    /// need to be undo/redo.
285    ///
286    /// We need to rely on the validity of the original pos value
287    pub fn add_cursor(&mut self, cursor: &Cursor) {
288        self.cursors.push(CursorWithPos {
289            cursor: cursor.clone(),
290            pos: AbsolutePosition {
291                pos: cursor.origin_pos,
292                side: cursor.side,
293            },
294        });
295    }
296
297    pub fn set_value(&mut self, value: LoroValue) {
298        self.value = value;
299    }
300}
301
302impl Stack {
303    pub fn new() -> Self {
304        let mut stack = VecDeque::new();
305        stack.push_back((VecDeque::new(), Arc::new(Mutex::new(Default::default()))));
306        Stack { stack, size: 0 }
307    }
308
309    /// Peek the top-most StackItem's metadata without modifying the stack.
310    ///
311    /// Returns None if the stack is empty.
312    fn peek_top_meta(&self) -> Option<UndoItemMeta> {
313        if self.is_empty() {
314            return None;
315        }
316
317        for (items, _) in self.stack.iter().rev() {
318            if let Some(item) = items.back() {
319                return Some(item.meta.clone());
320            }
321        }
322
323        None
324    }
325
326    pub fn pop(&mut self) -> Option<(StackItem, Arc<Mutex<DiffBatch>>)> {
327        while self.stack.back().unwrap().0.is_empty() && self.stack.len() > 1 {
328            let (_, diff) = self.stack.pop_back().unwrap();
329            let diff = diff.lock();
330            if !diff.cid_to_events.is_empty() {
331                self.stack.back_mut().unwrap().1.lock().compose(&diff);
332            }
333        }
334
335        if self.stack.len() == 1 && self.stack.back().unwrap().0.is_empty() {
336            // If the stack is empty, we need to clear the remote diff
337            self.stack.back_mut().unwrap().1.lock().clear();
338            return None;
339        }
340
341        self.size -= 1;
342        let last = self.stack.back_mut().unwrap();
343        last.0.pop_back().map(|x| (x, last.1.clone()))
344        // If this row in stack is empty, we don't pop it right away
345        // Because we still need the remote diff to be available.
346        // Cursor position transformation relies on the remote diff in the same row.
347    }
348
349    pub fn push(&mut self, span: CounterSpan, meta: UndoItemMeta) {
350        self.push_with_merge(span, meta, false, None)
351    }
352
353    pub fn push_with_merge(
354        &mut self,
355        span: CounterSpan,
356        meta: UndoItemMeta,
357        can_merge: bool,
358        group: Option<&UndoGroup>,
359    ) {
360        let last = self.stack.back_mut().unwrap();
361        let last_remote_diff = last.1.lock();
362
363        // Check if the remote diff is disjoint with the current undo group
364        let is_disjoint_group = group.is_some_and(|g| {
365            g.affected_cids.iter().all(|cid| {
366                last_remote_diff
367                    .cid_to_events
368                    .get(cid)
369                    .is_none_or(|diff| diff.is_empty())
370            })
371        });
372
373        // Can't merge if remote diffs exist and it's not disjoint with the current undo group
374        let should_create_new_entry =
375            !last_remote_diff.cid_to_events.is_empty() && !is_disjoint_group;
376
377        if should_create_new_entry {
378            // Create a new entry in the stack
379            drop(last_remote_diff);
380            let mut v = VecDeque::new();
381            v.push_back(StackItem { span, meta });
382            self.stack
383                .push_back((v, Arc::new(Mutex::new(DiffBatch::default()))));
384            self.size += 1;
385            return;
386        }
387
388        // Try to merge with the previous entry if allowed
389        if can_merge {
390            if let Some(last_span) = last.0.back_mut() {
391                if last_span.span.end == span.start {
392                    // Merge spans by extending the end of the last span
393                    last_span.span.end = span.end;
394                    return;
395                }
396            }
397        }
398
399        // Add as a new item to the existing entry
400        self.size += 1;
401        last.0.push_back(StackItem { span, meta });
402    }
403
404    pub fn compose_remote_event(&mut self, diff: &[&ContainerDiff]) {
405        if self.is_empty() {
406            return;
407        }
408
409        let remote_diff = &mut self.stack.back_mut().unwrap().1;
410        let mut remote_diff = remote_diff.lock();
411        for e in diff {
412            if let Some(d) = remote_diff.cid_to_events.get_mut(&e.id) {
413                d.compose_ref(&e.diff);
414            } else {
415                remote_diff
416                    .cid_to_events
417                    .insert(e.id.clone(), e.diff.clone());
418                remote_diff.order.push(e.id.clone());
419            }
420        }
421    }
422
423    pub fn transform_based_on_this_delta(&mut self, diff: &DiffBatch) {
424        if self.is_empty() {
425            return;
426        }
427        let remote_diff = &mut self.stack.back_mut().unwrap().1;
428        remote_diff.lock().transform(diff, false);
429    }
430
431    pub fn clear(&mut self) {
432        self.stack = VecDeque::new();
433        self.stack.push_back((VecDeque::new(), Default::default()));
434        self.size = 0;
435    }
436
437    pub fn is_empty(&self) -> bool {
438        self.size == 0
439    }
440
441    pub fn len(&self) -> usize {
442        self.size
443    }
444
445    fn pop_front(&mut self) {
446        if self.is_empty() {
447            return;
448        }
449
450        self.size -= 1;
451        let first = self.stack.front_mut().unwrap();
452        let f = first.0.pop_front();
453        assert!(f.is_some());
454        if first.0.is_empty() {
455            self.stack.pop_front();
456        }
457    }
458
459    fn set_top_meta(&mut self, meta: UndoItemMeta) {
460        let Some(top) = self.stack.back_mut() else {
461            return;
462        };
463        let Some(last) = top.0.back_mut() else {
464            return;
465        };
466        last.meta = meta;
467    }
468}
469
470impl Default for Stack {
471    fn default() -> Self {
472        Stack::new()
473    }
474}
475
476impl UndoManagerInner {
477    fn new(last_counter: Counter) -> Self {
478        Self {
479            next_counter: Some(last_counter),
480            undo_stack: Default::default(),
481            redo_stack: Default::default(),
482            processing_undo: false,
483            merge_interval_in_ms: 0,
484            last_undo_time: 0,
485            max_stack_size: usize::MAX,
486            exclude_origin_prefixes: vec![],
487            last_popped_selection: None,
488            on_pop: None,
489            on_push: None,
490            group: None,
491        }
492    }
493
494    /// Returns true if a given container diff is disjoint with the current group.
495    /// They are disjoint if they have no overlap in changed container ids.
496    fn is_disjoint_with_group(&self, diff: &[&ContainerDiff]) -> bool {
497        let Some(group) = &self.group else {
498            return false;
499        };
500
501        diff.iter().all(|d| !group.affected_cids.contains(&d.id))
502    }
503
504    fn record_checkpoint(this: &RefCell<Self>, latest_counter: Counter, event: Option<DiffEvent>) {
505        let previous_counter = this.borrow().next_counter;
506
507        if Some(latest_counter) == this.borrow().next_counter {
508            return;
509        }
510
511        if this.borrow().next_counter.is_none() {
512            this.borrow_mut().next_counter = Some(latest_counter);
513            return;
514        }
515
516        if let Some(group) = &mut this.borrow_mut().group {
517            event.iter().for_each(|e| {
518                e.events.iter().for_each(|e| {
519                    group.affected_cids.insert(e.id.clone());
520                })
521            });
522        }
523
524        let now = get_sys_timestamp() as Timestamp;
525        let span = CounterSpan::new(this.borrow().next_counter.unwrap(), latest_counter);
526        let meta = this
527            .borrow()
528            .on_push
529            .as_ref()
530            .map(|x| x(UndoOrRedo::Undo, span, event))
531            .unwrap_or_default();
532
533        let mut this = this.borrow_mut();
534        let this: &mut Self = &mut this;
535        // Wether the change is within the accepted merge interval
536        let in_merge_interval = now - this.last_undo_time < this.merge_interval_in_ms;
537
538        // If group is active, but there is nothing in the group, don't merge
539        // If the group is active and it's not the first push in the group, merge
540        let group_should_merge = this.group.is_some()
541            && match (
542                previous_counter,
543                this.group.as_ref().map(|g| g.start_counter),
544            ) {
545                (Some(previous), Some(active)) => previous != active,
546                _ => true,
547            };
548
549        let should_merge = !this.undo_stack.is_empty() && (in_merge_interval || group_should_merge);
550
551        if should_merge {
552            this.undo_stack
553                .push_with_merge(span, meta, true, this.group.as_ref());
554        } else {
555            this.last_undo_time = now;
556            this.undo_stack.push(span, meta);
557        }
558
559        this.next_counter = Some(latest_counter);
560        this.redo_stack.clear();
561        while this.undo_stack.len() > this.max_stack_size {
562            this.undo_stack.pop_front();
563        }
564    }
565}
566
567fn get_counter_end(doc: &LoroDoc, peer: PeerID) -> Counter {
568    doc.oplog().lock().vv().get(&peer).cloned().unwrap_or(0)
569}
570
571impl UndoManager {
572    pub fn new(doc: &LoroDoc) -> Self {
573        let peer = Arc::new(AtomicU64::new(doc.peer_id()));
574        let peer_clone = peer.clone();
575        let peer_clone2 = peer.clone();
576        let inner = Arc::new(ReentrantMutex::new(RefCell::new(UndoManagerInner::new(
577            get_counter_end(doc, doc.peer_id()),
578        ))));
579        let inner_clone = inner.clone();
580        let inner_clone2 = inner.clone();
581        let remap_containers = Arc::new(Mutex::new(FxHashMap::default()));
582        let remap_containers_clone = remap_containers.clone();
583        let undo_sub = doc.subscribe_root(Arc::new(move |event| match event.event_meta.by {
584            EventTriggerKind::Local => {
585                // TODO: PERF undo can be significantly faster if we can get
586                // the DiffBatch for undo here
587                let lock = inner_clone.lock();
588                if lock.borrow().processing_undo {
589                    return;
590                }
591                if let Some(id) = event
592                    .event_meta
593                    .to
594                    .iter()
595                    .find(|x| x.peer == peer_clone.load(std::sync::atomic::Ordering::Relaxed))
596                {
597                    let should_exclude = lock
598                        .borrow()
599                        .exclude_origin_prefixes
600                        .iter()
601                        .any(|x| event.event_meta.origin.starts_with(&**x));
602                    if should_exclude {
603                        // If the event is from the excluded origin, we don't record it
604                        // in the undo stack. But we need to record its effect like it's
605                        // a remote event.
606                        let mut inner = lock.borrow_mut();
607                        inner.undo_stack.compose_remote_event(event.events);
608                        inner.redo_stack.compose_remote_event(event.events);
609                        inner.next_counter = Some(id.counter + 1);
610                    } else {
611                        UndoManagerInner::record_checkpoint(&lock, id.counter + 1, Some(event));
612                    }
613                }
614            }
615            EventTriggerKind::Import => {
616                let lock = inner_clone.lock();
617                let mut inner = lock.borrow_mut();
618
619                for e in event.events {
620                    if let Diff::Tree(tree) = &e.diff {
621                        for item in &tree.diff {
622                            let target = item.target;
623                            if let TreeExternalDiff::Create { .. } = &item.action {
624                                // If the concurrent event is a create event, it may bring the deleted tree node back,
625                                // so we need to remove it from the remap of the container.
626                                remap_containers_clone
627                                    .lock()
628                                    .remove(&target.associated_meta_container());
629                            }
630                        }
631                    }
632                }
633
634                let is_import_disjoint = inner.is_disjoint_with_group(event.events);
635
636                inner.undo_stack.compose_remote_event(event.events);
637                inner.redo_stack.compose_remote_event(event.events);
638
639                // If the import is not disjoint, we end the active group
640                // all subsequent changes will be new undo items
641                if !is_import_disjoint {
642                    inner.group = None;
643                }
644            }
645            EventTriggerKind::Checkout => {
646                let lock = inner_clone.lock();
647                let mut inner = lock.borrow_mut();
648                inner.undo_stack.clear();
649                inner.redo_stack.clear();
650                inner.next_counter = None;
651            }
652        }));
653
654        let sub = doc.subscribe_peer_id_change(Box::new(move |id| {
655            let lock = inner_clone2.lock();
656            let mut inner = lock.borrow_mut();
657            inner.undo_stack.clear();
658            inner.redo_stack.clear();
659            inner.next_counter = Some(id.counter);
660            peer_clone2.store(id.peer, std::sync::atomic::Ordering::Relaxed);
661            true
662        }));
663
664        UndoManager {
665            peer,
666            container_remap: remap_containers,
667            inner,
668            _peer_id_change_sub: sub,
669            _undo_sub: undo_sub,
670            doc: doc.clone(),
671        }
672    }
673
674    pub fn group_start(&self) -> LoroResult<()> {
675        let lock = self.inner.lock();
676        let mut inner = lock.borrow_mut();
677
678        if inner.group.is_some() {
679            return Err(LoroError::UndoGroupAlreadyStarted);
680        }
681
682        inner.group =
683            Some(UndoGroup::new(inner.next_counter.ok_or_else(|| {
684                LoroError::Unknown("UndoManager is not ready".into())
685            })?));
686
687        Ok(())
688    }
689
690    pub fn group_end(&self) {
691        self.inner.lock().borrow_mut().group = None;
692    }
693
694    pub fn peer(&self) -> PeerID {
695        self.peer.load(std::sync::atomic::Ordering::Relaxed)
696    }
697
698    pub fn set_merge_interval(&self, interval: i64) {
699        self.inner.lock().borrow_mut().merge_interval_in_ms = interval;
700    }
701
702    pub fn set_max_undo_steps(&self, size: usize) {
703        self.inner.lock().borrow_mut().max_stack_size = size;
704    }
705
706    pub fn add_exclude_origin_prefix(&self, prefix: &str) {
707        self.inner
708            .lock()
709            .borrow_mut()
710            .exclude_origin_prefixes
711            .push(prefix.into());
712    }
713
714    pub fn record_new_checkpoint(&self) -> LoroResult<()> {
715        // Use implicit-style barrier to preserve next-commit options across
716        // an empty commit before undo/redo processing.
717        self.doc.with_barrier(|| {});
718        let counter = get_counter_end(&self.doc, self.peer());
719        UndoManagerInner::record_checkpoint(&self.inner.lock(), counter, None);
720        Ok(())
721    }
722
723    #[instrument(skip_all)]
724    pub fn undo(&self) -> LoroResult<bool> {
725        self.perform(
726            |x| &mut x.undo_stack,
727            |x| &mut x.redo_stack,
728            UndoOrRedo::Undo,
729        )
730    }
731
732    #[instrument(skip_all)]
733    pub fn redo(&self) -> LoroResult<bool> {
734        self.perform(
735            |x| &mut x.redo_stack,
736            |x| &mut x.undo_stack,
737            UndoOrRedo::Redo,
738        )
739    }
740
741    fn perform(
742        &self,
743        get_stack: impl Fn(&mut UndoManagerInner) -> &mut Stack,
744        get_opposite: impl Fn(&mut UndoManagerInner) -> &mut Stack,
745        kind: UndoOrRedo,
746    ) -> LoroResult<bool> {
747        let doc = &self.doc.clone();
748        // When in the undo/redo loop, the new undo/redo stack item should restore the selection
749        // to the state it was in before the item that was popped two steps ago from the stack.
750        //
751        //                          ┌────────────┐
752        //                          │Selection 1 │
753        //                          └─────┬──────┘
754        //                                │   Some
755        //                                ▼   ops
756        //                          ┌────────────┐
757        //                          │Selection 2 │
758        //                          └─────┬──────┘
759        //                                │   Some
760        //                                ▼   ops
761        //                          ┌────────────┐
762        //                          │Selection 3 │◁ ─ ─ ─ ─ ─ ─ ─  Restore  ─ ─ ─
763        //                          └─────┬──────┘                               │
764        //                                │
765        //                                │                                      │
766        //                                │                              ┌ ─ ─ ─ ─ ─ ─ ─
767        //           Enter the            │   Undo ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─▶   Push Redo   │
768        //           undo/redo ─ ─ ─ ▶    ▼                              └ ─ ─ ─ ─ ─ ─ ─
769        //             loop         ┌────────────┐                               │
770        //                          │Selection 2 │◁─ ─ ─  Restore  ─
771        //                          └─────┬──────┘                  │            │
772        //                                │
773        //                                │                         │            │
774        //                                │                 ┌ ─ ─ ─ ─ ─ ─ ─
775        //                                │   Undo ─ ─ ─ ─ ▶   Push Redo   │     │
776        //                                ▼                 └ ─ ─ ─ ─ ─ ─ ─
777        //                          ┌────────────┐                  │            │
778        //                          │Selection 1 │
779        //                          └─────┬──────┘                  │            │
780        //                                │   Redo ◀ ─ ─ ─ ─ ─ ─ ─ ─
781        //                                ▼                                      │
782        //                          ┌────────────┐
783        //         ┌   Restore   ─ ▷│Selection 2 │                               │
784        //                          └─────┬──────┘
785        //         │                      │                                      │
786        // ┌ ─ ─ ─ ─ ─ ─ ─                │
787        //    Push Undo   │◀─ ─ ─ ─ ─ ─ ─ │   Redo ◀ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ┘
788        // └ ─ ─ ─ ─ ─ ─ ─                ▼
789        //         │                ┌────────────┐
790        //                          │Selection 3 │
791        //         │                └─────┬──────┘
792        //          ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ▶ │   Undo
793        //                                ▼
794        //                          ┌────────────┐
795        //                          │Selection 2 │
796        //                          └────────────┘
797        //
798        // Because users may change the selections during the undo/redo loop, it's
799        // more stable to keep the selection stored in the last stack item
800        // rather than using the current selection directly.
801        self.record_new_checkpoint()?;
802        let end_counter = get_counter_end(doc, self.peer());
803        let mut top = {
804            let lock = self.inner.lock();
805            let mut inner = lock.borrow_mut();
806            inner.processing_undo = true;
807            get_stack(&mut inner).pop()
808        };
809
810        let mut executed = false;
811        while let Some((mut span, remote_diff)) = top {
812            let mut next_push_selection = None;
813            {
814                let inner = self.inner.clone();
815                // We need to clone this because otherwise <transform_delta> will be applied to the same remote diff
816                let remote_change_clone = remote_diff.lock().clone();
817                let commit = doc.undo_internal(
818                    IdSpan {
819                        peer: self.peer(),
820                        counter: span.span,
821                    },
822                    &mut self.container_remap.lock(),
823                    Some(&remote_change_clone),
824                    &mut |diff| {
825                        info_span!("transform remote diff").in_scope(|| {
826                            let inner = inner.lock();
827                            // <transform_delta>
828                            get_stack(&mut inner.borrow_mut()).transform_based_on_this_delta(diff);
829                        });
830                    },
831                )?;
832                drop(commit);
833                let inner = self.inner.lock();
834                let mut is_some = false;
835
836                if let Some(on_pop) = inner.borrow().on_pop.as_ref() {
837                    is_some = true;
838                    for cursor in span.meta.cursors.iter_mut() {
839                        // <cursor_transform> We need to transform cursor here.
840                        // Note that right now <transform_delta> is already done,
841                        // remote_diff is also transformed by it now (that's what we need).
842                        transform_cursor(
843                            cursor,
844                            &remote_diff.lock(),
845                            doc,
846                            &self.container_remap.lock(),
847                        );
848                    }
849
850                    on_pop(kind, span.span, span.meta.clone());
851                }
852                if is_some {
853                    let take = inner.borrow_mut().last_popped_selection.take();
854                    next_push_selection = take;
855                    inner.borrow_mut().last_popped_selection = Some(span.meta.cursors);
856                }
857            }
858            let new_counter = get_counter_end(doc, self.peer());
859            if end_counter != new_counter {
860                let inner = self.inner.lock();
861                let mut meta = inner
862                    .borrow()
863                    .on_push
864                    .as_ref()
865                    .map(|x| {
866                        x(
867                            kind.opposite(),
868                            CounterSpan::new(end_counter, new_counter),
869                            None,
870                        )
871                    })
872                    .unwrap_or_default();
873
874                if matches!(kind, UndoOrRedo::Undo)
875                    && get_opposite(&mut inner.borrow_mut()).is_empty()
876                {
877                    // If it's the first undo, we use the cursors from the users
878                } else if let Some(inner) = next_push_selection.take() {
879                    // Otherwise, we use the cursors from the undo/redo loop
880                    meta.cursors = inner;
881                }
882
883                get_opposite(&mut inner.borrow_mut())
884                    .push(CounterSpan::new(end_counter, new_counter), meta);
885                inner.borrow_mut().next_counter = Some(new_counter);
886                executed = true;
887                break;
888            } else {
889                // continue to pop the undo item as this undo is a no-op
890                top = get_stack(&mut self.inner.lock().borrow_mut()).pop();
891                continue;
892            }
893        }
894
895        self.inner.lock().borrow_mut().processing_undo = false;
896        Ok(executed)
897    }
898
899    pub fn can_undo(&self) -> bool {
900        !self.inner.lock().borrow().undo_stack.is_empty()
901    }
902
903    pub fn can_redo(&self) -> bool {
904        !self.inner.lock().borrow().redo_stack.is_empty()
905    }
906
907    pub fn undo_count(&self) -> usize {
908        self.inner.lock().borrow().undo_stack.len()
909    }
910
911    pub fn redo_count(&self) -> usize {
912        self.inner.lock().borrow().redo_stack.len()
913    }
914
915    /// Get the metadata of the top undo stack item, if any.
916    pub fn top_undo_meta(&self) -> Option<UndoItemMeta> {
917        self.inner.lock().borrow().undo_stack.peek_top_meta()
918    }
919
920    /// Get the metadata of the top redo stack item, if any.
921    pub fn top_redo_meta(&self) -> Option<UndoItemMeta> {
922        self.inner.lock().borrow().redo_stack.peek_top_meta()
923    }
924
925    /// Get the value associated with the top undo stack item, if any.
926    pub fn top_undo_value(&self) -> Option<LoroValue> {
927        self.top_undo_meta().map(|m| m.value)
928    }
929
930    /// Get the value associated with the top redo stack item, if any.
931    pub fn top_redo_value(&self) -> Option<LoroValue> {
932        self.top_redo_meta().map(|m| m.value)
933    }
934
935    pub fn set_on_push(&self, on_push: Option<OnPush>) {
936        self.inner.lock().borrow_mut().on_push = on_push;
937    }
938
939    pub fn set_on_pop(&self, on_pop: Option<OnPop>) {
940        self.inner.lock().borrow_mut().on_pop = on_pop;
941    }
942
943    pub fn clear(&self) {
944        self.inner.lock().borrow_mut().undo_stack.clear();
945        self.inner.lock().borrow_mut().redo_stack.clear();
946    }
947
948    /// Clear only the redo stack, preserving the undo stack.
949    pub fn clear_redo(&self) {
950        self.inner.lock().borrow_mut().redo_stack.clear();
951    }
952
953    /// Clear only the undo stack, preserving the redo stack.
954    pub fn clear_undo(&self) {
955        self.inner.lock().borrow_mut().undo_stack.clear();
956    }
957
958    pub fn set_top_undo_meta(&self, meta: UndoItemMeta) {
959        self.inner.lock().borrow_mut().undo_stack.set_top_meta(meta);
960    }
961
962    pub fn set_top_redo_meta(&self, meta: UndoItemMeta) {
963        self.inner.lock().borrow_mut().redo_stack.set_top_meta(meta);
964    }
965}
966
967/// Undo the given spans of operations.
968///
969/// # Parameters
970///
971/// - `spans`: A vector of tuples where each tuple contains an `IdSpan` and its associated `Frontiers`.
972///   - `IdSpan`: Represents a span of operations identified by an ID.
973///   - `Frontiers`: Represents the deps of the given id_span
974/// - `latest_frontiers`: The latest frontiers of the document
975/// - `calc_diff`: A closure that takes two `Frontiers` and calculates the difference between them, returning a `DiffBatch`.
976///
977/// # Returns
978///
979/// - `DiffBatch`: Applying this batch on the `latest_frontiers` will undo the ops in the given spans.
980pub(crate) fn undo(
981    spans: Vec<(IdSpan, Frontiers)>,
982    last_frontiers_or_last_bi: Either<&Frontiers, &DiffBatch>,
983    calc_diff: impl Fn(&Frontiers, &Frontiers) -> DiffBatch,
984    on_last_event_a: &mut dyn FnMut(&DiffBatch),
985) -> DiffBatch {
986    // The process of performing undo is:
987    //
988    // 0. Split the span into a series of continuous spans. There is no external dep within each continuous span.
989    //
990    // For each continuous span_i:
991    //
992    // 1. a. Calculate the event of checkout from id_span.last to id_span.deps, call it Ai. It undo the ops in the current span.
993    //    b. Calculate A'i = Ai + T(Ci-1, Ai) if i > 0, otherwise A'i = Ai.
994    //       NOTE: A'i can undo the ops in the current span and the previous spans, if it's applied on the id_span.last version.
995    // 2. Calculate the event of checkout from id_span.last to [the next span's last id] or [the latest version], call it Bi.
996    // 3. Transform event A'i based on Bi, call it Ci
997    // 4. If span_i is the last span, apply Ci to the current state.
998
999    // -------------------------------------------------------
1000    // 0. Split the span into a series of continuous spans
1001    // -------------------------------------------------------
1002
1003    let mut last_ci: Option<DiffBatch> = None;
1004    for i in 0..spans.len() {
1005        debug_span!("Undo", ?i, "Undo span {:?}", &spans[i]).in_scope(|| {
1006            let (this_id_span, this_deps) = &spans[i];
1007            // ---------------------------------------
1008            // 1.a Calc event A_i
1009            // ---------------------------------------
1010            let mut event_a_i = debug_span!("1. Calc event A_i").in_scope(|| {
1011                // Checkout to the last id of the id_span
1012                calc_diff(&this_id_span.id_last().into(), this_deps)
1013            });
1014
1015            // println!("event_a_i: {:?}", event_a_i);
1016
1017            // ---------------------------------------
1018            // 2. Calc event B_i
1019            // ---------------------------------------
1020            let stack_diff_batch;
1021            let event_b_i = 'block: {
1022                let next = if i + 1 < spans.len() {
1023                    spans[i + 1].0.id_last().into()
1024                } else {
1025                    match last_frontiers_or_last_bi {
1026                        Either::Left(last_frontiers) => last_frontiers.clone(),
1027                        Either::Right(right) => break 'block right,
1028                    }
1029                };
1030                stack_diff_batch = Some(calc_diff(&this_id_span.id_last().into(), &next));
1031                stack_diff_batch.as_ref().unwrap()
1032            };
1033
1034            // println!("event_b_i: {:?}", event_b_i);
1035
1036            // event_a_prime can undo the ops in the current span and the previous spans
1037            let mut event_a_prime = if let Some(mut last_ci) = last_ci.take() {
1038                // ------------------------------------------------------------------------------
1039                // 1.b Transform and apply Ci-1 based on Ai, call it A'i
1040                // ------------------------------------------------------------------------------
1041                last_ci.transform(&event_a_i, true);
1042
1043                event_a_i.compose(&last_ci);
1044                event_a_i
1045            } else {
1046                event_a_i
1047            };
1048            if i == spans.len() - 1 {
1049                on_last_event_a(&event_a_prime);
1050            }
1051            // --------------------------------------------------
1052            // 3. Transform event A'_i based on B_i, call it C_i
1053            // --------------------------------------------------
1054            event_a_prime.transform(event_b_i, true);
1055
1056            // println!("event_a_prime: {:?}", event_a_prime);
1057
1058            let c_i = event_a_prime;
1059            last_ci = Some(c_i);
1060        });
1061    }
1062
1063    last_ci.unwrap()
1064}