1use std::{collections::VecDeque, sync::Arc};
2
3use crate::sync::{AtomicU64, Mutex};
4use either::Either;
5use fxhash::{FxHashMap, FxHashSet};
6use loro_common::{
7 ContainerID, Counter, CounterSpan, HasIdSpan, IdSpan, LoroError, LoroResult, LoroValue, PeerID,
8};
9use tracing::{debug_span, info_span, instrument};
10
11use crate::{
12 change::{get_sys_timestamp, Timestamp},
13 cursor::{AbsolutePosition, Cursor},
14 delta::TreeExternalDiff,
15 event::{Diff, EventTriggerKind},
16 version::Frontiers,
17 ContainerDiff, DiffEvent, DocDiff, LoroDoc, Subscription,
18};
19
20#[derive(Debug, Clone, Default)]
24pub struct DiffBatch {
25 pub cid_to_events: FxHashMap<ContainerID, Diff>,
26 pub order: Vec<ContainerID>,
27}
28
29impl DiffBatch {
30 pub fn new(diff: Vec<DocDiff>) -> Self {
31 let mut map: FxHashMap<ContainerID, Diff> = Default::default();
32 let mut order: Vec<ContainerID> = Vec::with_capacity(diff.len());
33 for d in diff.into_iter() {
34 for item in d.diff.into_iter() {
35 let old = map.insert(item.id.clone(), item.diff);
36 assert!(old.is_none());
37 order.push(item.id.clone());
38 }
39 }
40
41 Self {
42 cid_to_events: map,
43 order,
44 }
45 }
46
47 pub fn compose(&mut self, other: &Self) {
48 if other.cid_to_events.is_empty() {
49 return;
50 }
51
52 for (id, diff) in other.iter() {
53 if let Some(this_diff) = self.cid_to_events.get_mut(id) {
54 this_diff.compose_ref(diff);
55 } else {
56 self.cid_to_events.insert(id.clone(), diff.clone());
57 self.order.push(id.clone());
58 }
59 }
60 }
61
62 pub fn transform(&mut self, other: &Self, left_priority: bool) {
63 if other.cid_to_events.is_empty() || self.cid_to_events.is_empty() {
64 return;
65 }
66
67 for (idx, diff) in self.cid_to_events.iter_mut() {
68 if let Some(b_diff) = other.cid_to_events.get(idx) {
69 diff.transform(b_diff, left_priority);
70 }
71 }
72 }
73
74 pub fn clear(&mut self) {
75 self.cid_to_events.clear();
76 self.order.clear();
77 }
78
79 pub fn iter(&self) -> impl Iterator<Item = (&ContainerID, &Diff)> + '_ {
80 self.order
81 .iter()
82 .map(|cid| (cid, self.cid_to_events.get(cid).unwrap()))
83 }
84
85 #[allow(clippy::should_implement_trait)]
86 pub fn into_iter(self) -> impl Iterator<Item = (ContainerID, Diff)> {
87 let mut cid_to_events = self.cid_to_events;
88 self.order.into_iter().map(move |cid| {
89 let d = cid_to_events.remove(&cid).unwrap();
90 (cid, d)
91 })
92 }
93}
94
95fn transform_cursor(
96 cursor_with_pos: &mut CursorWithPos,
97 remote_diff: &DiffBatch,
98 doc: &LoroDoc,
99 container_remap: &FxHashMap<ContainerID, ContainerID>,
100) {
101 let mut container_changed = false;
102 let mut cid = &cursor_with_pos.cursor.container;
103 while let Some(new_cid) = container_remap.get(cid) {
104 cid = new_cid;
105 container_changed = true;
106 }
107
108 if cursor_with_pos.cursor.id.is_none() {
109 if container_changed {
111 cursor_with_pos.cursor.container = cid.clone();
112 }
113 return;
114 }
115
116 if let Some(diff) = remote_diff.cid_to_events.get(cid) {
117 let new_pos = diff.transform_cursor(cursor_with_pos.pos.pos, false);
118 cursor_with_pos.pos.pos = new_pos;
119 };
120
121 let new_pos = cursor_with_pos.pos.pos;
122 match doc.get_handler(cid.clone()).unwrap() {
123 crate::handler::Handler::Text(h) => {
124 let Some(new_cursor) = h.get_cursor_internal(new_pos, cursor_with_pos.pos.side, false)
125 else {
126 return;
127 };
128
129 cursor_with_pos.cursor = new_cursor;
130 }
131 crate::handler::Handler::List(h) => {
132 let Some(new_cursor) = h.get_cursor(new_pos, cursor_with_pos.pos.side) else {
133 return;
134 };
135
136 cursor_with_pos.cursor = new_cursor;
137 }
138 crate::handler::Handler::MovableList(h) => {
139 let Some(new_cursor) = h.get_cursor(new_pos, cursor_with_pos.pos.side) else {
140 return;
141 };
142
143 cursor_with_pos.cursor = new_cursor;
144 }
145 crate::handler::Handler::Map(_) => {}
146 crate::handler::Handler::Tree(_) => {}
147 crate::handler::Handler::Unknown(_) => {}
148 #[cfg(feature = "counter")]
149 crate::handler::Handler::Counter(_) => {}
150 }
151}
152
153pub struct UndoManager {
160 peer: Arc<AtomicU64>,
161 container_remap: Arc<Mutex<FxHashMap<ContainerID, ContainerID>>>,
162 inner: Arc<Mutex<UndoManagerInner>>,
163 _peer_id_change_sub: Subscription,
164 _undo_sub: Subscription,
165 doc: LoroDoc,
166}
167
168impl std::fmt::Debug for UndoManager {
169 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
170 f.debug_struct("UndoManager")
171 .field("peer", &self.peer)
172 .field("container_remap", &self.container_remap)
173 .field("inner", &self.inner)
174 .finish()
175 }
176}
177
178#[derive(Debug, Clone, Copy, PartialEq, Eq)]
179pub enum UndoOrRedo {
180 Undo,
181 Redo,
182}
183
184impl UndoOrRedo {
185 fn opposite(&self) -> UndoOrRedo {
186 match self {
187 Self::Undo => Self::Redo,
188 Self::Redo => Self::Undo,
189 }
190 }
191}
192
193pub type OnPush = Box<
196 dyn for<'a> Fn(UndoOrRedo, CounterSpan, Option<DiffEvent<'a>>) -> UndoItemMeta + Send + Sync,
197>;
198pub type OnPop = Box<dyn Fn(UndoOrRedo, CounterSpan, UndoItemMeta) + Send + Sync>;
199
200struct UndoManagerInner {
201 next_counter: Option<Counter>,
202 undo_stack: Stack,
203 redo_stack: Stack,
204 processing_undo: bool,
205 last_undo_time: i64,
206 merge_interval_in_ms: i64,
207 max_stack_size: usize,
208 exclude_origin_prefixes: Vec<Box<str>>,
209 last_popped_selection: Option<Vec<CursorWithPos>>,
210 on_push: Option<OnPush>,
211 on_pop: Option<OnPop>,
212 group: Option<UndoGroup>,
213}
214
215impl std::fmt::Debug for UndoManagerInner {
216 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
217 f.debug_struct("UndoManagerInner")
218 .field("latest_counter", &self.next_counter)
219 .field("undo_stack", &self.undo_stack)
220 .field("redo_stack", &self.redo_stack)
221 .field("processing_undo", &self.processing_undo)
222 .field("last_undo_time", &self.last_undo_time)
223 .field("merge_interval", &self.merge_interval_in_ms)
224 .field("max_stack_size", &self.max_stack_size)
225 .field("exclude_origin_prefixes", &self.exclude_origin_prefixes)
226 .field("group", &self.group)
227 .finish()
228 }
229}
230
231#[derive(Debug, Clone, Default)]
232struct UndoGroup {
233 start_counter: Counter,
234 affected_cids: FxHashSet<ContainerID>,
235}
236
237impl UndoGroup {
238 pub fn new(start_counter: Counter) -> Self {
239 Self {
240 start_counter,
241 affected_cids: Default::default(),
242 }
243 }
244}
245
246#[derive(Debug)]
247struct Stack {
248 stack: VecDeque<(VecDeque<StackItem>, Arc<Mutex<DiffBatch>>)>,
249 size: usize,
250}
251
252#[derive(Debug, Clone)]
253struct StackItem {
254 span: CounterSpan,
255 meta: UndoItemMeta,
256}
257
258#[derive(Debug, Default, Clone)]
263pub struct UndoItemMeta {
264 pub value: LoroValue,
265 pub cursors: Vec<CursorWithPos>,
266}
267
268#[derive(Debug, Clone)]
269pub struct CursorWithPos {
270 pub cursor: Cursor,
271 pub pos: AbsolutePosition,
272}
273
274impl UndoItemMeta {
275 pub fn new() -> Self {
276 Self {
277 value: LoroValue::Null,
278 cursors: Default::default(),
279 }
280 }
281
282 pub fn add_cursor(&mut self, cursor: &Cursor) {
287 self.cursors.push(CursorWithPos {
288 cursor: cursor.clone(),
289 pos: AbsolutePosition {
290 pos: cursor.origin_pos,
291 side: cursor.side,
292 },
293 });
294 }
295
296 pub fn set_value(&mut self, value: LoroValue) {
297 self.value = value;
298 }
299}
300
301impl Stack {
302 pub fn new() -> Self {
303 let mut stack = VecDeque::new();
304 stack.push_back((VecDeque::new(), Arc::new(Mutex::new(Default::default()))));
305 Stack { stack, size: 0 }
306 }
307
308 pub fn pop(&mut self) -> Option<(StackItem, Arc<Mutex<DiffBatch>>)> {
309 while self.stack.back().unwrap().0.is_empty() && self.stack.len() > 1 {
310 let (_, diff) = self.stack.pop_back().unwrap();
311 let diff = diff.lock().unwrap();
312 if !diff.cid_to_events.is_empty() {
313 self.stack
314 .back_mut()
315 .unwrap()
316 .1
317 .lock()
318 .unwrap()
319 .compose(&diff);
320 }
321 }
322
323 if self.stack.len() == 1 && self.stack.back().unwrap().0.is_empty() {
324 self.stack.back_mut().unwrap().1.lock().unwrap().clear();
326 return None;
327 }
328
329 self.size -= 1;
330 let last = self.stack.back_mut().unwrap();
331 last.0.pop_back().map(|x| (x, last.1.clone()))
332 }
336
337 pub fn push(&mut self, span: CounterSpan, meta: UndoItemMeta) {
338 self.push_with_merge(span, meta, false, None)
339 }
340
341 pub fn push_with_merge(
342 &mut self,
343 span: CounterSpan,
344 meta: UndoItemMeta,
345 can_merge: bool,
346 group: Option<&UndoGroup>,
347 ) {
348 let last = self.stack.back_mut().unwrap();
349 let last_remote_diff = last.1.lock().unwrap();
350
351 let is_disjoint_group = group.is_some_and(|g| {
353 g.affected_cids.iter().all(|cid| {
354 last_remote_diff
355 .cid_to_events
356 .get(cid)
357 .is_none_or(|diff| diff.is_empty())
358 })
359 });
360
361 let should_create_new_entry =
363 !last_remote_diff.cid_to_events.is_empty() && !is_disjoint_group;
364
365 if should_create_new_entry {
366 drop(last_remote_diff);
368 let mut v = VecDeque::new();
369 v.push_back(StackItem { span, meta });
370 self.stack
371 .push_back((v, Arc::new(Mutex::new(DiffBatch::default()))));
372 self.size += 1;
373 return;
374 }
375
376 if can_merge {
378 if let Some(last_span) = last.0.back_mut() {
379 if last_span.span.end == span.start {
380 last_span.span.end = span.end;
382 return;
383 }
384 }
385 }
386
387 self.size += 1;
389 last.0.push_back(StackItem { span, meta });
390 }
391
392 pub fn compose_remote_event(&mut self, diff: &[&ContainerDiff]) {
393 if self.is_empty() {
394 return;
395 }
396
397 let remote_diff = &mut self.stack.back_mut().unwrap().1;
398 let mut remote_diff = remote_diff.lock().unwrap();
399 for e in diff {
400 if let Some(d) = remote_diff.cid_to_events.get_mut(&e.id) {
401 d.compose_ref(&e.diff);
402 } else {
403 remote_diff
404 .cid_to_events
405 .insert(e.id.clone(), e.diff.clone());
406 remote_diff.order.push(e.id.clone());
407 }
408 }
409 }
410
411 pub fn transform_based_on_this_delta(&mut self, diff: &DiffBatch) {
412 if self.is_empty() {
413 return;
414 }
415 let remote_diff = &mut self.stack.back_mut().unwrap().1;
416 remote_diff.lock().unwrap().transform(diff, false);
417 }
418
419 pub fn clear(&mut self) {
420 self.stack = VecDeque::new();
421 self.stack.push_back((VecDeque::new(), Default::default()));
422 self.size = 0;
423 }
424
425 pub fn is_empty(&self) -> bool {
426 self.size == 0
427 }
428
429 pub fn len(&self) -> usize {
430 self.size
431 }
432
433 fn pop_front(&mut self) {
434 if self.is_empty() {
435 return;
436 }
437
438 self.size -= 1;
439 let first = self.stack.front_mut().unwrap();
440 let f = first.0.pop_front();
441 assert!(f.is_some());
442 if first.0.is_empty() {
443 self.stack.pop_front();
444 }
445 }
446}
447
448impl Default for Stack {
449 fn default() -> Self {
450 Stack::new()
451 }
452}
453
454impl UndoManagerInner {
455 fn new(last_counter: Counter) -> Self {
456 Self {
457 next_counter: Some(last_counter),
458 undo_stack: Default::default(),
459 redo_stack: Default::default(),
460 processing_undo: false,
461 merge_interval_in_ms: 0,
462 last_undo_time: 0,
463 max_stack_size: usize::MAX,
464 exclude_origin_prefixes: vec![],
465 last_popped_selection: None,
466 on_pop: None,
467 on_push: None,
468 group: None,
469 }
470 }
471
472 fn is_disjoint_with_group(&self, diff: &[&ContainerDiff]) -> bool {
475 let Some(group) = &self.group else {
476 return false;
477 };
478
479 diff.iter().all(|d| !group.affected_cids.contains(&d.id))
480 }
481
482 fn record_checkpoint(&mut self, latest_counter: Counter, event: Option<DiffEvent>) {
483 let previous_counter = self.next_counter;
484
485 if Some(latest_counter) == self.next_counter {
486 return;
487 }
488
489 if self.next_counter.is_none() {
490 self.next_counter = Some(latest_counter);
491 return;
492 }
493
494 if let Some(group) = &mut self.group {
495 event.iter().for_each(|e| {
496 e.events.iter().for_each(|e| {
497 group.affected_cids.insert(e.id.clone());
498 })
499 });
500 }
501
502 let now = get_sys_timestamp() as Timestamp;
503 let span = CounterSpan::new(self.next_counter.unwrap(), latest_counter);
504 let meta = self
505 .on_push
506 .as_ref()
507 .map(|x| x(UndoOrRedo::Undo, span, event))
508 .unwrap_or_default();
509
510 let in_merge_interval = now - self.last_undo_time < self.merge_interval_in_ms;
512
513 let group_should_merge = self.group.is_some()
516 && match (
517 previous_counter,
518 self.group.as_ref().map(|g| g.start_counter),
519 ) {
520 (Some(previous), Some(active)) => previous != active,
521 _ => true,
522 };
523
524 let should_merge = !self.undo_stack.is_empty() && (in_merge_interval || group_should_merge);
525
526 if should_merge {
527 self.undo_stack
528 .push_with_merge(span, meta, true, self.group.as_ref());
529 } else {
530 self.last_undo_time = now;
531 self.undo_stack.push(span, meta);
532 }
533
534 self.next_counter = Some(latest_counter);
535 self.redo_stack.clear();
536 while self.undo_stack.len() > self.max_stack_size {
537 self.undo_stack.pop_front();
538 }
539 }
540}
541
542fn get_counter_end(doc: &LoroDoc, peer: PeerID) -> Counter {
543 doc.oplog()
544 .lock()
545 .unwrap()
546 .vv()
547 .get(&peer)
548 .cloned()
549 .unwrap_or(0)
550}
551
552impl UndoManager {
553 pub fn new(doc: &LoroDoc) -> Self {
554 let peer = Arc::new(AtomicU64::new(doc.peer_id()));
555 let peer_clone = peer.clone();
556 let peer_clone2 = peer.clone();
557 let inner = Arc::new(Mutex::new(UndoManagerInner::new(get_counter_end(
558 doc,
559 doc.peer_id(),
560 ))));
561 let inner_clone = inner.clone();
562 let inner_clone2 = inner.clone();
563 let remap_containers = Arc::new(Mutex::new(FxHashMap::default()));
564 let remap_containers_clone = remap_containers.clone();
565 let undo_sub = doc.subscribe_root(Arc::new(move |event| match event.event_meta.by {
566 EventTriggerKind::Local => {
567 let Ok(mut inner) = inner_clone.lock() else {
570 return;
571 };
572 if inner.processing_undo {
573 return;
574 }
575 if let Some(id) = event
576 .event_meta
577 .to
578 .iter()
579 .find(|x| x.peer == peer_clone.load(std::sync::atomic::Ordering::Relaxed))
580 {
581 if inner
582 .exclude_origin_prefixes
583 .iter()
584 .any(|x| event.event_meta.origin.starts_with(&**x))
585 {
586 inner.undo_stack.compose_remote_event(event.events);
590 inner.redo_stack.compose_remote_event(event.events);
591 inner.next_counter = Some(id.counter + 1);
592 } else {
593 inner.record_checkpoint(id.counter + 1, Some(event));
594 }
595 }
596 }
597 EventTriggerKind::Import => {
598 let mut inner = inner_clone.lock().unwrap();
599
600 for e in event.events {
601 if let Diff::Tree(tree) = &e.diff {
602 for item in &tree.diff {
603 let target = item.target;
604 if let TreeExternalDiff::Create { .. } = &item.action {
605 remap_containers_clone
608 .lock()
609 .unwrap()
610 .remove(&target.associated_meta_container());
611 }
612 }
613 }
614 }
615
616 let is_import_disjoint = inner.is_disjoint_with_group(event.events);
617
618 inner.undo_stack.compose_remote_event(event.events);
619 inner.redo_stack.compose_remote_event(event.events);
620
621 if !is_import_disjoint {
624 inner.group = None;
625 }
626 }
627 EventTriggerKind::Checkout => {
628 let mut inner = inner_clone.lock().unwrap();
629 inner.undo_stack.clear();
630 inner.redo_stack.clear();
631 inner.next_counter = None;
632 }
633 }));
634
635 let sub = doc.subscribe_peer_id_change(Box::new(move |id| {
636 let mut inner = inner_clone2.lock().unwrap();
637 inner.undo_stack.clear();
638 inner.redo_stack.clear();
639 inner.next_counter = Some(id.counter);
640 peer_clone2.store(id.peer, std::sync::atomic::Ordering::Relaxed);
641 true
642 }));
643
644 UndoManager {
645 peer,
646 container_remap: remap_containers,
647 inner,
648 _peer_id_change_sub: sub,
649 _undo_sub: undo_sub,
650 doc: doc.clone(),
651 }
652 }
653
654 pub fn group_start(&mut self) -> LoroResult<()> {
655 let mut inner = self.inner.lock().unwrap();
656
657 if inner.group.is_some() {
658 return Err(LoroError::UndoGroupAlreadyStarted);
659 }
660
661 inner.group = Some(UndoGroup::new(inner.next_counter.unwrap()));
662
663 Ok(())
664 }
665
666 pub fn group_end(&mut self) {
667 self.inner.lock().unwrap().group = None;
668 }
669
670 pub fn peer(&self) -> PeerID {
671 self.peer.load(std::sync::atomic::Ordering::Relaxed)
672 }
673
674 pub fn set_merge_interval(&mut self, interval: i64) {
675 self.inner.lock().unwrap().merge_interval_in_ms = interval;
676 }
677
678 pub fn set_max_undo_steps(&mut self, size: usize) {
679 self.inner.lock().unwrap().max_stack_size = size;
680 }
681
682 pub fn add_exclude_origin_prefix(&mut self, prefix: &str) {
683 self.inner
684 .lock()
685 .unwrap()
686 .exclude_origin_prefixes
687 .push(prefix.into());
688 }
689
690 pub fn record_new_checkpoint(&mut self) -> LoroResult<()> {
691 self.doc.commit_then_renew();
692 let counter = get_counter_end(&self.doc, self.peer());
693 self.inner.lock().unwrap().record_checkpoint(counter, None);
694 Ok(())
695 }
696
697 #[instrument(skip_all)]
698 pub fn undo(&mut self) -> LoroResult<bool> {
699 self.perform(
700 |x| &mut x.undo_stack,
701 |x| &mut x.redo_stack,
702 UndoOrRedo::Undo,
703 )
704 }
705
706 #[instrument(skip_all)]
707 pub fn redo(&mut self) -> LoroResult<bool> {
708 self.perform(
709 |x| &mut x.redo_stack,
710 |x| &mut x.undo_stack,
711 UndoOrRedo::Redo,
712 )
713 }
714
715 fn perform(
716 &mut self,
717 get_stack: impl Fn(&mut UndoManagerInner) -> &mut Stack,
718 get_opposite: impl Fn(&mut UndoManagerInner) -> &mut Stack,
719 kind: UndoOrRedo,
720 ) -> LoroResult<bool> {
721 let doc = &self.doc.clone();
722 self.record_new_checkpoint()?;
776 let end_counter = get_counter_end(doc, self.peer());
777 let mut top = {
778 let mut inner = self.inner.lock().unwrap();
779 inner.processing_undo = true;
780 get_stack(&mut inner).pop()
781 };
782
783 let mut executed = false;
784 while let Some((mut span, remote_diff)) = top {
785 let mut next_push_selection = None;
786 {
787 let inner = self.inner.clone();
788 let remote_change_clone = remote_diff.lock().unwrap().clone();
790 let commit = doc.undo_internal(
791 IdSpan {
792 peer: self.peer(),
793 counter: span.span,
794 },
795 &mut self.container_remap.lock().unwrap(),
796 Some(&remote_change_clone),
797 &mut |diff| {
798 info_span!("transform remote diff").in_scope(|| {
799 let mut inner = inner.lock().unwrap();
800 get_stack(&mut inner).transform_based_on_this_delta(diff);
802 });
803 },
804 )?;
805 drop(commit);
806 let mut inner = self.inner.lock().unwrap();
807 if let Some(x) = inner.on_pop.as_ref() {
808 for cursor in span.meta.cursors.iter_mut() {
809 transform_cursor(
813 cursor,
814 &remote_diff.lock().unwrap(),
815 doc,
816 &self.container_remap.lock().unwrap(),
817 );
818 }
819
820 x(kind, span.span, span.meta.clone());
821 let take = inner.last_popped_selection.take();
822 next_push_selection = take;
823 inner.last_popped_selection = Some(span.meta.cursors);
824 }
825 }
826 let new_counter = get_counter_end(doc, self.peer());
827 if end_counter != new_counter {
828 let mut inner = self.inner.lock().unwrap();
829 let mut meta = inner
830 .on_push
831 .as_ref()
832 .map(|x| {
833 x(
834 kind.opposite(),
835 CounterSpan::new(end_counter, new_counter),
836 None,
837 )
838 })
839 .unwrap_or_default();
840
841 if matches!(kind, UndoOrRedo::Undo) && get_opposite(&mut inner).is_empty() {
842 } else if let Some(inner) = next_push_selection.take() {
844 meta.cursors = inner;
846 }
847
848 get_opposite(&mut inner).push(CounterSpan::new(end_counter, new_counter), meta);
849 inner.next_counter = Some(new_counter);
850 executed = true;
851 break;
852 } else {
853 top = get_stack(&mut self.inner.lock().unwrap()).pop();
855 continue;
856 }
857 }
858
859 self.inner.lock().unwrap().processing_undo = false;
860 Ok(executed)
861 }
862
863 pub fn can_undo(&self) -> bool {
864 !self.inner.lock().unwrap().undo_stack.is_empty()
865 }
866
867 pub fn can_redo(&self) -> bool {
868 !self.inner.lock().unwrap().redo_stack.is_empty()
869 }
870
871 pub fn undo_count(&self) -> usize {
872 self.inner.lock().unwrap().undo_stack.len()
873 }
874
875 pub fn redo_count(&self) -> usize {
876 self.inner.lock().unwrap().redo_stack.len()
877 }
878
879 pub fn set_on_push(&self, on_push: Option<OnPush>) {
880 self.inner.lock().unwrap().on_push = on_push;
881 }
882
883 pub fn set_on_pop(&self, on_pop: Option<OnPop>) {
884 self.inner.lock().unwrap().on_pop = on_pop;
885 }
886
887 pub fn clear(&self) {
888 self.inner.lock().unwrap().undo_stack.clear();
889 self.inner.lock().unwrap().redo_stack.clear();
890 }
891}
892
893pub(crate) fn undo(
907 spans: Vec<(IdSpan, Frontiers)>,
908 last_frontiers_or_last_bi: Either<&Frontiers, &DiffBatch>,
909 calc_diff: impl Fn(&Frontiers, &Frontiers) -> DiffBatch,
910 on_last_event_a: &mut dyn FnMut(&DiffBatch),
911) -> DiffBatch {
912 let mut last_ci: Option<DiffBatch> = None;
930 for i in 0..spans.len() {
931 debug_span!("Undo", ?i, "Undo span {:?}", &spans[i]).in_scope(|| {
932 let (this_id_span, this_deps) = &spans[i];
933 let mut event_a_i = debug_span!("1. Calc event A_i").in_scope(|| {
937 calc_diff(&this_id_span.id_last().into(), this_deps)
939 });
940
941 let stack_diff_batch;
947 let event_b_i = 'block: {
948 let next = if i + 1 < spans.len() {
949 spans[i + 1].0.id_last().into()
950 } else {
951 match last_frontiers_or_last_bi {
952 Either::Left(last_frontiers) => last_frontiers.clone(),
953 Either::Right(right) => break 'block right,
954 }
955 };
956 stack_diff_batch = Some(calc_diff(&this_id_span.id_last().into(), &next));
957 stack_diff_batch.as_ref().unwrap()
958 };
959
960 let mut event_a_prime = if let Some(mut last_ci) = last_ci.take() {
964 last_ci.transform(&event_a_i, true);
968
969 event_a_i.compose(&last_ci);
970 event_a_i
971 } else {
972 event_a_i
973 };
974 if i == spans.len() - 1 {
975 on_last_event_a(&event_a_prime);
976 }
977 event_a_prime.transform(event_b_i, true);
981
982 let c_i = event_a_prime;
985 last_ci = Some(c_i);
986 });
987 }
988
989 last_ci.unwrap()
990}