1use std::{collections::VecDeque, sync::Arc};
2
3use crate::sync::{AtomicU64, Mutex};
4use either::Either;
5use fxhash::{FxHashMap, FxHashSet};
6use loro_common::{
7 ContainerID, Counter, CounterSpan, HasIdSpan, IdSpan, LoroError, LoroResult, LoroValue, PeerID,
8};
9use tracing::{debug_span, info_span, instrument};
10
11use crate::{
12 change::{get_sys_timestamp, Timestamp},
13 cursor::{AbsolutePosition, Cursor},
14 delta::TreeExternalDiff,
15 event::{Diff, EventTriggerKind},
16 version::Frontiers,
17 ContainerDiff, DiffEvent, DocDiff, LoroDoc, Subscription,
18};
19
20#[derive(Debug, Clone, Default)]
24pub struct DiffBatch {
25 pub cid_to_events: FxHashMap<ContainerID, Diff>,
26 pub order: Vec<ContainerID>,
27}
28
29impl DiffBatch {
30 pub fn new(diff: Vec<DocDiff>) -> Self {
31 let mut map: FxHashMap<ContainerID, Diff> = Default::default();
32 let mut order: Vec<ContainerID> = Vec::with_capacity(diff.len());
33 for d in diff.into_iter() {
34 for item in d.diff.into_iter() {
35 let old = map.insert(item.id.clone(), item.diff);
36 assert!(old.is_none());
37 order.push(item.id.clone());
38 }
39 }
40
41 Self {
42 cid_to_events: map,
43 order,
44 }
45 }
46
47 pub fn compose(&mut self, other: &Self) {
48 if other.cid_to_events.is_empty() {
49 return;
50 }
51
52 for (id, diff) in other.iter() {
53 if let Some(this_diff) = self.cid_to_events.get_mut(id) {
54 this_diff.compose_ref(diff);
55 } else {
56 self.cid_to_events.insert(id.clone(), diff.clone());
57 self.order.push(id.clone());
58 }
59 }
60 }
61
62 pub fn transform(&mut self, other: &Self, left_priority: bool) {
63 if other.cid_to_events.is_empty() || self.cid_to_events.is_empty() {
64 return;
65 }
66
67 for (idx, diff) in self.cid_to_events.iter_mut() {
68 if let Some(b_diff) = other.cid_to_events.get(idx) {
69 diff.transform(b_diff, left_priority);
70 }
71 }
72 }
73
74 pub fn clear(&mut self) {
75 self.cid_to_events.clear();
76 self.order.clear();
77 }
78
79 pub fn iter(&self) -> impl Iterator<Item = (&ContainerID, &Diff)> + '_ {
80 self.order
81 .iter()
82 .map(|cid| (cid, self.cid_to_events.get(cid).unwrap()))
83 }
84
85 #[allow(clippy::should_implement_trait)]
86 pub fn into_iter(self) -> impl Iterator<Item = (ContainerID, Diff)> {
87 let mut cid_to_events = self.cid_to_events;
88 self.order.into_iter().map(move |cid| {
89 let d = cid_to_events.remove(&cid).unwrap();
90 (cid, d)
91 })
92 }
93}
94
95fn transform_cursor(
96 cursor_with_pos: &mut CursorWithPos,
97 remote_diff: &DiffBatch,
98 doc: &LoroDoc,
99 container_remap: &FxHashMap<ContainerID, ContainerID>,
100) {
101 let mut cid = &cursor_with_pos.cursor.container;
102 while let Some(new_cid) = container_remap.get(cid) {
103 cid = new_cid;
104 }
105
106 if let Some(diff) = remote_diff.cid_to_events.get(cid) {
107 let new_pos = diff.transform_cursor(cursor_with_pos.pos.pos, false);
108 cursor_with_pos.pos.pos = new_pos;
109 };
110
111 let new_pos = cursor_with_pos.pos.pos;
112 match doc.get_handler(cid.clone()).unwrap() {
113 crate::handler::Handler::Text(h) => {
114 let Some(new_cursor) = h.get_cursor_internal(new_pos, cursor_with_pos.pos.side, false)
115 else {
116 return;
117 };
118
119 cursor_with_pos.cursor = new_cursor;
120 }
121 crate::handler::Handler::List(h) => {
122 let Some(new_cursor) = h.get_cursor(new_pos, cursor_with_pos.pos.side) else {
123 return;
124 };
125
126 cursor_with_pos.cursor = new_cursor;
127 }
128 crate::handler::Handler::MovableList(h) => {
129 let Some(new_cursor) = h.get_cursor(new_pos, cursor_with_pos.pos.side) else {
130 return;
131 };
132
133 cursor_with_pos.cursor = new_cursor;
134 }
135 crate::handler::Handler::Map(_) => {}
136 crate::handler::Handler::Tree(_) => {}
137 crate::handler::Handler::Unknown(_) => {}
138 #[cfg(feature = "counter")]
139 crate::handler::Handler::Counter(_) => {}
140 }
141}
142
143pub struct UndoManager {
150 peer: Arc<AtomicU64>,
151 container_remap: Arc<Mutex<FxHashMap<ContainerID, ContainerID>>>,
152 inner: Arc<Mutex<UndoManagerInner>>,
153 _peer_id_change_sub: Subscription,
154 _undo_sub: Subscription,
155 doc: LoroDoc,
156}
157
158impl std::fmt::Debug for UndoManager {
159 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
160 f.debug_struct("UndoManager")
161 .field("peer", &self.peer)
162 .field("container_remap", &self.container_remap)
163 .field("inner", &self.inner)
164 .finish()
165 }
166}
167
168#[derive(Debug, Clone, Copy, PartialEq, Eq)]
169pub enum UndoOrRedo {
170 Undo,
171 Redo,
172}
173
174impl UndoOrRedo {
175 fn opposite(&self) -> UndoOrRedo {
176 match self {
177 Self::Undo => Self::Redo,
178 Self::Redo => Self::Undo,
179 }
180 }
181}
182
183pub type OnPush = Box<
186 dyn for<'a> Fn(UndoOrRedo, CounterSpan, Option<DiffEvent<'a>>) -> UndoItemMeta + Send + Sync,
187>;
188pub type OnPop = Box<dyn Fn(UndoOrRedo, CounterSpan, UndoItemMeta) + Send + Sync>;
189
190struct UndoManagerInner {
191 next_counter: Option<Counter>,
192 undo_stack: Stack,
193 redo_stack: Stack,
194 processing_undo: bool,
195 last_undo_time: i64,
196 merge_interval_in_ms: i64,
197 max_stack_size: usize,
198 exclude_origin_prefixes: Vec<Box<str>>,
199 last_popped_selection: Option<Vec<CursorWithPos>>,
200 on_push: Option<OnPush>,
201 on_pop: Option<OnPop>,
202 group: Option<UndoGroup>,
203}
204
205impl std::fmt::Debug for UndoManagerInner {
206 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
207 f.debug_struct("UndoManagerInner")
208 .field("latest_counter", &self.next_counter)
209 .field("undo_stack", &self.undo_stack)
210 .field("redo_stack", &self.redo_stack)
211 .field("processing_undo", &self.processing_undo)
212 .field("last_undo_time", &self.last_undo_time)
213 .field("merge_interval", &self.merge_interval_in_ms)
214 .field("max_stack_size", &self.max_stack_size)
215 .field("exclude_origin_prefixes", &self.exclude_origin_prefixes)
216 .field("group", &self.group)
217 .finish()
218 }
219}
220
221#[derive(Debug, Clone, Default)]
222struct UndoGroup {
223 start_counter: Counter,
224 affected_cids: FxHashSet<ContainerID>,
225}
226
227impl UndoGroup {
228 pub fn new(start_counter: Counter) -> Self {
229 Self {
230 start_counter,
231 affected_cids: Default::default(),
232 }
233 }
234}
235
236#[derive(Debug)]
237struct Stack {
238 stack: VecDeque<(VecDeque<StackItem>, Arc<Mutex<DiffBatch>>)>,
239 size: usize,
240}
241
242#[derive(Debug, Clone)]
243struct StackItem {
244 span: CounterSpan,
245 meta: UndoItemMeta,
246}
247
248#[derive(Debug, Default, Clone)]
253pub struct UndoItemMeta {
254 pub value: LoroValue,
255 pub cursors: Vec<CursorWithPos>,
256}
257
258#[derive(Debug, Clone)]
259pub struct CursorWithPos {
260 pub cursor: Cursor,
261 pub pos: AbsolutePosition,
262}
263
264impl UndoItemMeta {
265 pub fn new() -> Self {
266 Self {
267 value: LoroValue::Null,
268 cursors: Default::default(),
269 }
270 }
271
272 pub fn add_cursor(&mut self, cursor: &Cursor) {
277 self.cursors.push(CursorWithPos {
278 cursor: cursor.clone(),
279 pos: AbsolutePosition {
280 pos: cursor.origin_pos,
281 side: cursor.side,
282 },
283 });
284 }
285
286 pub fn set_value(&mut self, value: LoroValue) {
287 self.value = value;
288 }
289}
290
291impl Stack {
292 pub fn new() -> Self {
293 let mut stack = VecDeque::new();
294 stack.push_back((VecDeque::new(), Arc::new(Mutex::new(Default::default()))));
295 Stack { stack, size: 0 }
296 }
297
298 pub fn pop(&mut self) -> Option<(StackItem, Arc<Mutex<DiffBatch>>)> {
299 while self.stack.back().unwrap().0.is_empty() && self.stack.len() > 1 {
300 let (_, diff) = self.stack.pop_back().unwrap();
301 let diff = diff.lock().unwrap();
302 if !diff.cid_to_events.is_empty() {
303 self.stack
304 .back_mut()
305 .unwrap()
306 .1
307 .lock()
308 .unwrap()
309 .compose(&diff);
310 }
311 }
312
313 if self.stack.len() == 1 && self.stack.back().unwrap().0.is_empty() {
314 self.stack.back_mut().unwrap().1.lock().unwrap().clear();
316 return None;
317 }
318
319 self.size -= 1;
320 let last = self.stack.back_mut().unwrap();
321 last.0.pop_back().map(|x| (x, last.1.clone()))
322 }
326
327 pub fn push(&mut self, span: CounterSpan, meta: UndoItemMeta) {
328 self.push_with_merge(span, meta, false, None)
329 }
330
331 pub fn push_with_merge(
332 &mut self,
333 span: CounterSpan,
334 meta: UndoItemMeta,
335 can_merge: bool,
336 group: Option<&UndoGroup>,
337 ) {
338 let last = self.stack.back_mut().unwrap();
339 let last_remote_diff = last.1.lock().unwrap();
340
341 let is_disjoint_group = group.is_some_and(|g| {
343 g.affected_cids.iter().all(|cid| {
344 last_remote_diff
345 .cid_to_events
346 .get(cid)
347 .is_none_or(|diff| diff.is_empty())
348 })
349 });
350
351 let should_create_new_entry =
353 !last_remote_diff.cid_to_events.is_empty() && !is_disjoint_group;
354
355 if should_create_new_entry {
356 drop(last_remote_diff);
358 let mut v = VecDeque::new();
359 v.push_back(StackItem { span, meta });
360 self.stack
361 .push_back((v, Arc::new(Mutex::new(DiffBatch::default()))));
362 self.size += 1;
363 return;
364 }
365
366 if can_merge {
368 if let Some(last_span) = last.0.back_mut() {
369 if last_span.span.end == span.start {
370 last_span.span.end = span.end;
372 return;
373 }
374 }
375 }
376
377 self.size += 1;
379 last.0.push_back(StackItem { span, meta });
380 }
381
382 pub fn compose_remote_event(&mut self, diff: &[&ContainerDiff]) {
383 if self.is_empty() {
384 return;
385 }
386
387 let remote_diff = &mut self.stack.back_mut().unwrap().1;
388 let mut remote_diff = remote_diff.lock().unwrap();
389 for e in diff {
390 if let Some(d) = remote_diff.cid_to_events.get_mut(&e.id) {
391 d.compose_ref(&e.diff);
392 } else {
393 remote_diff
394 .cid_to_events
395 .insert(e.id.clone(), e.diff.clone());
396 remote_diff.order.push(e.id.clone());
397 }
398 }
399 }
400
401 pub fn transform_based_on_this_delta(&mut self, diff: &DiffBatch) {
402 if self.is_empty() {
403 return;
404 }
405 let remote_diff = &mut self.stack.back_mut().unwrap().1;
406 remote_diff.lock().unwrap().transform(diff, false);
407 }
408
409 pub fn clear(&mut self) {
410 self.stack = VecDeque::new();
411 self.stack.push_back((VecDeque::new(), Default::default()));
412 self.size = 0;
413 }
414
415 pub fn is_empty(&self) -> bool {
416 self.size == 0
417 }
418
419 pub fn len(&self) -> usize {
420 self.size
421 }
422
423 fn pop_front(&mut self) {
424 if self.is_empty() {
425 return;
426 }
427
428 self.size -= 1;
429 let first = self.stack.front_mut().unwrap();
430 let f = first.0.pop_front();
431 assert!(f.is_some());
432 if first.0.is_empty() {
433 self.stack.pop_front();
434 }
435 }
436}
437
438impl Default for Stack {
439 fn default() -> Self {
440 Stack::new()
441 }
442}
443
444impl UndoManagerInner {
445 fn new(last_counter: Counter) -> Self {
446 Self {
447 next_counter: Some(last_counter),
448 undo_stack: Default::default(),
449 redo_stack: Default::default(),
450 processing_undo: false,
451 merge_interval_in_ms: 0,
452 last_undo_time: 0,
453 max_stack_size: usize::MAX,
454 exclude_origin_prefixes: vec![],
455 last_popped_selection: None,
456 on_pop: None,
457 on_push: None,
458 group: None,
459 }
460 }
461
462 fn is_disjoint_with_group(&self, diff: &[&ContainerDiff]) -> bool {
465 let Some(group) = &self.group else {
466 return false;
467 };
468
469 diff.iter().all(|d| !group.affected_cids.contains(&d.id))
470 }
471
472 fn record_checkpoint(&mut self, latest_counter: Counter, event: Option<DiffEvent>) {
473 let previous_counter = self.next_counter;
474
475 if Some(latest_counter) == self.next_counter {
476 return;
477 }
478
479 if self.next_counter.is_none() {
480 self.next_counter = Some(latest_counter);
481 return;
482 }
483
484 if let Some(group) = &mut self.group {
485 event.iter().for_each(|e| {
486 e.events.iter().for_each(|e| {
487 group.affected_cids.insert(e.id.clone());
488 })
489 });
490 }
491
492 let now = get_sys_timestamp() as Timestamp;
493 let span = CounterSpan::new(self.next_counter.unwrap(), latest_counter);
494 let meta = self
495 .on_push
496 .as_ref()
497 .map(|x| x(UndoOrRedo::Undo, span, event))
498 .unwrap_or_default();
499
500 let in_merge_interval = now - self.last_undo_time < self.merge_interval_in_ms;
502
503 let group_should_merge = self.group.is_some()
506 && match (
507 previous_counter,
508 self.group.as_ref().map(|g| g.start_counter),
509 ) {
510 (Some(previous), Some(active)) => previous != active,
511 _ => true,
512 };
513
514 let should_merge = !self.undo_stack.is_empty() && (in_merge_interval || group_should_merge);
515
516 if should_merge {
517 self.undo_stack
518 .push_with_merge(span, meta, true, self.group.as_ref());
519 } else {
520 self.last_undo_time = now;
521 self.undo_stack.push(span, meta);
522 }
523
524 self.next_counter = Some(latest_counter);
525 self.redo_stack.clear();
526 while self.undo_stack.len() > self.max_stack_size {
527 self.undo_stack.pop_front();
528 }
529 }
530}
531
532fn get_counter_end(doc: &LoroDoc, peer: PeerID) -> Counter {
533 doc.oplog()
534 .lock()
535 .unwrap()
536 .vv()
537 .get(&peer)
538 .cloned()
539 .unwrap_or(0)
540}
541
542impl UndoManager {
543 pub fn new(doc: &LoroDoc) -> Self {
544 let peer = Arc::new(AtomicU64::new(doc.peer_id()));
545 let peer_clone = peer.clone();
546 let peer_clone2 = peer.clone();
547 let inner = Arc::new(Mutex::new(UndoManagerInner::new(get_counter_end(
548 doc,
549 doc.peer_id(),
550 ))));
551 let inner_clone = inner.clone();
552 let inner_clone2 = inner.clone();
553 let remap_containers = Arc::new(Mutex::new(FxHashMap::default()));
554 let remap_containers_clone = remap_containers.clone();
555 let undo_sub = doc.subscribe_root(Arc::new(move |event| match event.event_meta.by {
556 EventTriggerKind::Local => {
557 let Ok(mut inner) = inner_clone.lock() else {
560 return;
561 };
562 if inner.processing_undo {
563 return;
564 }
565 if let Some(id) = event
566 .event_meta
567 .to
568 .iter()
569 .find(|x| x.peer == peer_clone.load(std::sync::atomic::Ordering::Relaxed))
570 {
571 if inner
572 .exclude_origin_prefixes
573 .iter()
574 .any(|x| event.event_meta.origin.starts_with(&**x))
575 {
576 inner.undo_stack.compose_remote_event(event.events);
580 inner.redo_stack.compose_remote_event(event.events);
581 inner.next_counter = Some(id.counter + 1);
582 } else {
583 inner.record_checkpoint(id.counter + 1, Some(event));
584 }
585 }
586 }
587 EventTriggerKind::Import => {
588 let mut inner = inner_clone.lock().unwrap();
589
590 for e in event.events {
591 if let Diff::Tree(tree) = &e.diff {
592 for item in &tree.diff {
593 let target = item.target;
594 if let TreeExternalDiff::Create { .. } = &item.action {
595 remap_containers_clone
598 .lock()
599 .unwrap()
600 .remove(&target.associated_meta_container());
601 }
602 }
603 }
604 }
605
606 let is_import_disjoint = inner.is_disjoint_with_group(event.events);
607
608 inner.undo_stack.compose_remote_event(event.events);
609 inner.redo_stack.compose_remote_event(event.events);
610
611 if !is_import_disjoint {
614 inner.group = None;
615 }
616 }
617 EventTriggerKind::Checkout => {
618 let mut inner = inner_clone.lock().unwrap();
619 inner.undo_stack.clear();
620 inner.redo_stack.clear();
621 inner.next_counter = None;
622 }
623 }));
624
625 let sub = doc.subscribe_peer_id_change(Box::new(move |id| {
626 let mut inner = inner_clone2.lock().unwrap();
627 inner.undo_stack.clear();
628 inner.redo_stack.clear();
629 inner.next_counter = Some(id.counter);
630 peer_clone2.store(id.peer, std::sync::atomic::Ordering::Relaxed);
631 true
632 }));
633
634 UndoManager {
635 peer,
636 container_remap: remap_containers,
637 inner,
638 _peer_id_change_sub: sub,
639 _undo_sub: undo_sub,
640 doc: doc.clone(),
641 }
642 }
643
644 pub fn group_start(&mut self) -> LoroResult<()> {
645 let mut inner = self.inner.lock().unwrap();
646
647 if inner.group.is_some() {
648 return Err(LoroError::UndoGroupAlreadyStarted);
649 }
650
651 inner.group = Some(UndoGroup::new(inner.next_counter.unwrap()));
652
653 Ok(())
654 }
655
656 pub fn group_end(&mut self) {
657 self.inner.lock().unwrap().group = None;
658 }
659
660 pub fn peer(&self) -> PeerID {
661 self.peer.load(std::sync::atomic::Ordering::Relaxed)
662 }
663
664 pub fn set_merge_interval(&mut self, interval: i64) {
665 self.inner.lock().unwrap().merge_interval_in_ms = interval;
666 }
667
668 pub fn set_max_undo_steps(&mut self, size: usize) {
669 self.inner.lock().unwrap().max_stack_size = size;
670 }
671
672 pub fn add_exclude_origin_prefix(&mut self, prefix: &str) {
673 self.inner
674 .lock()
675 .unwrap()
676 .exclude_origin_prefixes
677 .push(prefix.into());
678 }
679
680 pub fn record_new_checkpoint(&mut self) -> LoroResult<()> {
681 self.doc.commit_then_renew();
682 let counter = get_counter_end(&self.doc, self.peer());
683 self.inner.lock().unwrap().record_checkpoint(counter, None);
684 Ok(())
685 }
686
687 #[instrument(skip_all)]
688 pub fn undo(&mut self) -> LoroResult<bool> {
689 self.perform(
690 |x| &mut x.undo_stack,
691 |x| &mut x.redo_stack,
692 UndoOrRedo::Undo,
693 )
694 }
695
696 #[instrument(skip_all)]
697 pub fn redo(&mut self) -> LoroResult<bool> {
698 self.perform(
699 |x| &mut x.redo_stack,
700 |x| &mut x.undo_stack,
701 UndoOrRedo::Redo,
702 )
703 }
704
705 fn perform(
706 &mut self,
707 get_stack: impl Fn(&mut UndoManagerInner) -> &mut Stack,
708 get_opposite: impl Fn(&mut UndoManagerInner) -> &mut Stack,
709 kind: UndoOrRedo,
710 ) -> LoroResult<bool> {
711 let doc = &self.doc.clone();
712 self.record_new_checkpoint()?;
766 let end_counter = get_counter_end(doc, self.peer());
767 let mut top = {
768 let mut inner = self.inner.lock().unwrap();
769 inner.processing_undo = true;
770 get_stack(&mut inner).pop()
771 };
772
773 let mut executed = false;
774 while let Some((mut span, remote_diff)) = top {
775 let mut next_push_selection = None;
776 {
777 let inner = self.inner.clone();
778 let remote_change_clone = remote_diff.lock().unwrap().clone();
780 let commit = doc.undo_internal(
781 IdSpan {
782 peer: self.peer(),
783 counter: span.span,
784 },
785 &mut self.container_remap.lock().unwrap(),
786 Some(&remote_change_clone),
787 &mut |diff| {
788 info_span!("transform remote diff").in_scope(|| {
789 let mut inner = inner.lock().unwrap();
790 get_stack(&mut inner).transform_based_on_this_delta(diff);
792 });
793 },
794 )?;
795 drop(commit);
796 let mut inner = self.inner.lock().unwrap();
797 if let Some(x) = inner.on_pop.as_ref() {
798 for cursor in span.meta.cursors.iter_mut() {
799 transform_cursor(
803 cursor,
804 &remote_diff.lock().unwrap(),
805 doc,
806 &self.container_remap.lock().unwrap(),
807 );
808 }
809
810 x(kind, span.span, span.meta.clone());
811 let take = inner.last_popped_selection.take();
812 next_push_selection = take;
813 inner.last_popped_selection = Some(span.meta.cursors);
814 }
815 }
816 let new_counter = get_counter_end(doc, self.peer());
817 if end_counter != new_counter {
818 let mut inner = self.inner.lock().unwrap();
819 let mut meta = inner
820 .on_push
821 .as_ref()
822 .map(|x| {
823 x(
824 kind.opposite(),
825 CounterSpan::new(end_counter, new_counter),
826 None,
827 )
828 })
829 .unwrap_or_default();
830
831 if matches!(kind, UndoOrRedo::Undo) && get_opposite(&mut inner).is_empty() {
832 } else if let Some(inner) = next_push_selection.take() {
834 meta.cursors = inner;
836 }
837
838 get_opposite(&mut inner).push(CounterSpan::new(end_counter, new_counter), meta);
839 inner.next_counter = Some(new_counter);
840 executed = true;
841 break;
842 } else {
843 top = get_stack(&mut self.inner.lock().unwrap()).pop();
845 continue;
846 }
847 }
848
849 self.inner.lock().unwrap().processing_undo = false;
850 Ok(executed)
851 }
852
853 pub fn can_undo(&self) -> bool {
854 !self.inner.lock().unwrap().undo_stack.is_empty()
855 }
856
857 pub fn can_redo(&self) -> bool {
858 !self.inner.lock().unwrap().redo_stack.is_empty()
859 }
860
861 pub fn undo_count(&self) -> usize {
862 self.inner.lock().unwrap().undo_stack.len()
863 }
864
865 pub fn redo_count(&self) -> usize {
866 self.inner.lock().unwrap().redo_stack.len()
867 }
868
869 pub fn set_on_push(&self, on_push: Option<OnPush>) {
870 self.inner.lock().unwrap().on_push = on_push;
871 }
872
873 pub fn set_on_pop(&self, on_pop: Option<OnPop>) {
874 self.inner.lock().unwrap().on_pop = on_pop;
875 }
876
877 pub fn clear(&self) {
878 self.inner.lock().unwrap().undo_stack.clear();
879 self.inner.lock().unwrap().redo_stack.clear();
880 }
881}
882
883pub(crate) fn undo(
897 spans: Vec<(IdSpan, Frontiers)>,
898 last_frontiers_or_last_bi: Either<&Frontiers, &DiffBatch>,
899 calc_diff: impl Fn(&Frontiers, &Frontiers) -> DiffBatch,
900 on_last_event_a: &mut dyn FnMut(&DiffBatch),
901) -> DiffBatch {
902 let mut last_ci: Option<DiffBatch> = None;
920 for i in 0..spans.len() {
921 debug_span!("Undo", ?i, "Undo span {:?}", &spans[i]).in_scope(|| {
922 let (this_id_span, this_deps) = &spans[i];
923 let mut event_a_i = debug_span!("1. Calc event A_i").in_scope(|| {
927 calc_diff(&this_id_span.id_last().into(), this_deps)
929 });
930
931 let stack_diff_batch;
937 let event_b_i = 'block: {
938 let next = if i + 1 < spans.len() {
939 spans[i + 1].0.id_last().into()
940 } else {
941 match last_frontiers_or_last_bi {
942 Either::Left(last_frontiers) => last_frontiers.clone(),
943 Either::Right(right) => break 'block right,
944 }
945 };
946 stack_diff_batch = Some(calc_diff(&this_id_span.id_last().into(), &next));
947 stack_diff_batch.as_ref().unwrap()
948 };
949
950 let mut event_a_prime = if let Some(mut last_ci) = last_ci.take() {
954 last_ci.transform(&event_a_i, true);
958
959 event_a_i.compose(&last_ci);
960 event_a_i
961 } else {
962 event_a_i
963 };
964 if i == spans.len() - 1 {
965 on_last_event_a(&event_a_prime);
966 }
967 event_a_prime.transform(event_b_i, true);
971
972 let c_i = event_a_prime;
975 last_ci = Some(c_i);
976 });
977 }
978
979 last_ci.unwrap()
980}