xi_rope/
engine.rs

1// Copyright 2016 The xi-editor Authors.
2//
3// Licensed under the Apache License, Version 2.0 (the "License");
4// you may not use this file except in compliance with the License.
5// You may obtain a copy of the License at
6//
7//     http://www.apache.org/licenses/LICENSE-2.0
8//
9// Unless required by applicable law or agreed to in writing, software
10// distributed under the License is distributed on an "AS IS" BASIS,
11// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12// See the License for the specific language governing permissions and
13// limitations under the License.
14
15//! An engine for handling edits (possibly from async sources) and undo. It
16//! conceptually represents the current text and all edit history for that
17//! text.
18//!
19//! This module actually implements a mini Conflict-free Replicated Data Type
20//! under `Engine::edit_rev`, which is considerably simpler than the usual
21//! CRDT implementation techniques, because all operations are serialized in
22//! this central engine. It provides the ability to apply edits that depend on
23//! a previously committed version of the text rather than the current text,
24//! which is sufficient for asynchronous plugins that can only have one
25//! pending edit in flight each.
26//!
27//! There is also a full CRDT merge operation implemented under
28//! `Engine::merge`, which is more powerful but considerably more complex.
29//! It enables support for full asynchronous and even peer-to-peer editing.
30
31use std;
32use std::borrow::Cow;
33use std::collections::hash_map::DefaultHasher;
34use std::collections::BTreeSet;
35
36use crate::delta::{Delta, InsertDelta};
37use crate::interval::Interval;
38use crate::multiset::{CountMatcher, Subset};
39use crate::rope::{Rope, RopeInfo};
40
41/// Represents the current state of a document and all of its history
42#[derive(Debug)]
43#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
44pub struct Engine {
45    /// The session ID used to create new `RevId`s for edits made on this device
46    #[cfg_attr(feature = "serde", serde(default = "default_session", skip_serializing))]
47    session: SessionId,
48    /// The incrementing revision number counter for this session used for `RevId`s
49    #[cfg_attr(feature = "serde", serde(default = "initial_revision_counter", skip_serializing))]
50    rev_id_counter: u32,
51    /// The current contents of the document as would be displayed on screen
52    text: Rope,
53    /// Storage for all the characters that have been deleted  but could
54    /// return if a delete is un-done or an insert is re- done.
55    tombstones: Rope,
56    /// Imagine a "union string" that contained all the characters ever
57    /// inserted, including the ones that were later deleted, in the locations
58    /// they would be if they hadn't been deleted.
59    ///
60    /// This is a `Subset` of the "union string" representing the characters
61    /// that are currently deleted, and thus in `tombstones` rather than
62    /// `text`. The count of a character in `deletes_from_union` represents
63    /// how many times it has been deleted, so if a character is deleted twice
64    /// concurrently it will have count `2` so that undoing one delete but not
65    /// the other doesn't make it re-appear.
66    ///
67    /// You could construct the "union string" from `text`, `tombstones` and
68    /// `deletes_from_union` by splicing a segment of `tombstones` into `text`
69    /// wherever there's a non-zero-count segment in `deletes_from_union`.
70    deletes_from_union: Subset,
71    // TODO: switch to a persistent Set representation to avoid O(n) copying
72    undone_groups: BTreeSet<usize>, // set of undo_group id's
73    /// The revision history of the document
74    revs: Vec<Revision>,
75}
76
77// The advantage of using a session ID over random numbers is that it can be
78// easily delta-compressed later.
79#[derive(Debug, Clone, Copy, PartialOrd, Ord, PartialEq, Eq, Hash)]
80#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
81pub struct RevId {
82    // 96 bits has a 10^(-12) chance of collision with 400 million sessions and 10^(-6) with 100 billion.
83    // `session1==session2==0` is reserved for initialization which is the same on all sessions.
84    // A colliding session will break merge invariants and the document will start crashing Xi.
85    session1: u64,
86    // if this was a tuple field instead of two fields, alignment padding would add 8 more bytes.
87    session2: u32,
88    // There will probably never be a document with more than 4 billion edits
89    // in a single session.
90    num: u32,
91}
92
93#[derive(Debug)]
94#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
95struct Revision {
96    /// This uniquely represents the identity of this revision and it stays
97    /// the same even if it is rebased or merged between devices.
98    rev_id: RevId,
99    /// The largest undo group number of any edit in the history up to this
100    /// point. Used to optimize undo to not look further back.
101    max_undo_so_far: usize,
102    edit: Contents,
103}
104
105/// Valid within a session. If there's a collision the most recent matching
106/// Revision will be used, which means only the (small) set of concurrent edits
107/// could trigger incorrect behavior if they collide, so u64 is safe.
108pub type RevToken = u64;
109
110/// the session ID component of a `RevId`
111pub type SessionId = (u64, u32);
112
113/// Type for errors that occur during CRDT operations.
114#[derive(Clone)]
115pub enum Error {
116    /// An edit specified a revision that did not exist. The revision may
117    /// have been GC'd, or it may have specified incorrectly.
118    MissingRevision(RevToken),
119    /// A delta was applied which had a `base_len` that did not match the length
120    /// of the revision it was applied to.
121    MalformedDelta { rev_len: usize, delta_len: usize },
122}
123
124#[derive(Clone, Copy, PartialOrd, Ord, PartialEq, Eq)]
125struct FullPriority {
126    priority: usize,
127    session_id: SessionId,
128}
129
130use self::Contents::*;
131
132#[derive(Debug, Clone)]
133#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
134enum Contents {
135    Edit {
136        /// Used to order concurrent inserts, for example auto-indentation
137        /// should go before typed text.
138        priority: usize,
139        /// Groups related edits together so that they are undone and re-done
140        /// together. For example, an auto-indent insertion would be un-done
141        /// along with the newline that triggered it.
142        undo_group: usize,
143        /// The subset of the characters of the union string from after this
144        /// revision that were added by this revision.
145        inserts: Subset,
146        /// The subset of the characters of the union string from after this
147        /// revision that were deleted by this revision.
148        deletes: Subset,
149    },
150    Undo {
151        /// The set of groups toggled between undone and done.
152        /// Just the `symmetric_difference` (XOR) of the two sets.
153        toggled_groups: BTreeSet<usize>, // set of undo_group id's
154        /// Used to store a reversible difference between the old
155        /// and new deletes_from_union
156        deletes_bitxor: Subset,
157    },
158}
159
160/// for single user cases, used by serde and ::empty
161fn default_session() -> (u64, u32) {
162    (1, 0)
163}
164
165/// Revision 0 is always an Undo of the empty set of groups
166#[cfg(feature = "serde")]
167fn initial_revision_counter() -> u32 {
168    1
169}
170
171impl RevId {
172    /// Returns a u64 that will be equal for equivalent revision IDs and
173    /// should be as unlikely to collide as two random u64s.
174    pub fn token(&self) -> RevToken {
175        use std::hash::{Hash, Hasher};
176        // Rust is unlikely to break the property that this hash is strongly collision-resistant
177        // and it only needs to be consistent over one execution.
178        let mut hasher = DefaultHasher::new();
179        self.hash(&mut hasher);
180        hasher.finish()
181    }
182
183    pub fn session_id(&self) -> SessionId {
184        (self.session1, self.session2)
185    }
186}
187
188impl Engine {
189    /// Create a new Engine with a single edit that inserts `initial_contents`
190    /// if it is non-empty. It needs to be a separate commit rather than just
191    /// part of the initial contents since any two `Engine`s need a common
192    /// ancestor in order to be mergeable.
193    pub fn new(initial_contents: Rope) -> Engine {
194        let mut engine = Engine::empty();
195        if !initial_contents.is_empty() {
196            let first_rev = engine.get_head_rev_id().token();
197            let delta = Delta::simple_edit(Interval::new(0, 0), initial_contents, 0);
198            engine.edit_rev(0, 0, first_rev, delta);
199        }
200        engine
201    }
202
203    pub fn empty() -> Engine {
204        let deletes_from_union = Subset::new(0);
205        let rev = Revision {
206            rev_id: RevId { session1: 0, session2: 0, num: 0 },
207            edit: Undo {
208                toggled_groups: BTreeSet::new(),
209                deletes_bitxor: deletes_from_union.clone(),
210            },
211            max_undo_so_far: 0,
212        };
213        Engine {
214            session: default_session(),
215            rev_id_counter: 1,
216            text: Rope::default(),
217            tombstones: Rope::default(),
218            deletes_from_union,
219            undone_groups: BTreeSet::new(),
220            revs: vec![rev],
221        }
222    }
223
224    fn next_rev_id(&self) -> RevId {
225        RevId { session1: self.session.0, session2: self.session.1, num: self.rev_id_counter }
226    }
227
228    fn find_rev(&self, rev_id: RevId) -> Option<usize> {
229        self.revs
230            .iter()
231            .enumerate()
232            .rev()
233            .find(|&(_, ref rev)| rev.rev_id == rev_id)
234            .map(|(i, _)| i)
235    }
236
237    fn find_rev_token(&self, rev_token: RevToken) -> Option<usize> {
238        self.revs
239            .iter()
240            .enumerate()
241            .rev()
242            .find(|&(_, ref rev)| rev.rev_id.token() == rev_token)
243            .map(|(i, _)| i)
244    }
245
246    // TODO: does Cow really help much here? It certainly won't after making Subsets a rope.
247    /// Find what the `deletes_from_union` field in Engine would have been at the time
248    /// of a certain `rev_index`. In other words, the deletes from the union string at that time.
249    fn deletes_from_union_for_index(&self, rev_index: usize) -> Cow<Subset> {
250        self.deletes_from_union_before_index(rev_index + 1, true)
251    }
252
253    /// Garbage collection means undo can sometimes need to replay the very first
254    /// revision, and so needs a way to get the deletion set before then.
255    fn deletes_from_union_before_index(&self, rev_index: usize, invert_undos: bool) -> Cow<Subset> {
256        let mut deletes_from_union = Cow::Borrowed(&self.deletes_from_union);
257        let mut undone_groups = Cow::Borrowed(&self.undone_groups);
258
259        // invert the changes to deletes_from_union starting in the present and working backwards
260        for rev in self.revs[rev_index..].iter().rev() {
261            deletes_from_union = match rev.edit {
262                Edit { ref inserts, ref deletes, ref undo_group, .. } => {
263                    if undone_groups.contains(undo_group) {
264                        // no need to un-delete undone inserts since we'll just shrink them out
265                        Cow::Owned(deletes_from_union.transform_shrink(inserts))
266                    } else {
267                        let un_deleted = deletes_from_union.subtract(deletes);
268                        Cow::Owned(un_deleted.transform_shrink(inserts))
269                    }
270                }
271                Undo { ref toggled_groups, ref deletes_bitxor } => {
272                    if invert_undos {
273                        let new_undone =
274                            undone_groups.symmetric_difference(toggled_groups).cloned().collect();
275                        undone_groups = Cow::Owned(new_undone);
276                        Cow::Owned(deletes_from_union.bitxor(deletes_bitxor))
277                    } else {
278                        deletes_from_union
279                    }
280                }
281            }
282        }
283        deletes_from_union
284    }
285
286    /// Get the contents of the document at a given revision number
287    fn rev_content_for_index(&self, rev_index: usize) -> Rope {
288        let old_deletes_from_union = self.deletes_from_cur_union_for_index(rev_index);
289        let delta =
290            Delta::synthesize(&self.tombstones, &self.deletes_from_union, &old_deletes_from_union);
291        delta.apply(&self.text)
292    }
293
294    /// Get the Subset to delete from the current union string in order to obtain a revision's content
295    fn deletes_from_cur_union_for_index(&self, rev_index: usize) -> Cow<Subset> {
296        let mut deletes_from_union = self.deletes_from_union_for_index(rev_index);
297        for rev in &self.revs[rev_index + 1..] {
298            if let Edit { ref inserts, .. } = rev.edit {
299                if !inserts.is_empty() {
300                    deletes_from_union = Cow::Owned(deletes_from_union.transform_union(inserts));
301                }
302            }
303        }
304        deletes_from_union
305    }
306
307    /// Returns the largest undo group ID used so far
308    pub fn max_undo_group_id(&self) -> usize {
309        self.revs.last().unwrap().max_undo_so_far
310    }
311
312    /// Get revision id of head revision.
313    pub fn get_head_rev_id(&self) -> RevId {
314        self.revs.last().unwrap().rev_id
315    }
316
317    /// Get text of head revision.
318    pub fn get_head(&self) -> &Rope {
319        &self.text
320    }
321
322    /// Get text of a given revision, if it can be found.
323    pub fn get_rev(&self, rev: RevToken) -> Option<Rope> {
324        self.find_rev_token(rev).map(|rev_index| self.rev_content_for_index(rev_index))
325    }
326
327    /// A delta that, when applied to `base_rev`, results in the current head. Returns
328    /// an error if there is not at least one edit.
329    pub fn try_delta_rev_head(&self, base_rev: RevToken) -> Result<Delta<RopeInfo>, Error> {
330        let ix = self.find_rev_token(base_rev).ok_or_else(|| Error::MissingRevision(base_rev))?;
331        let prev_from_union = self.deletes_from_cur_union_for_index(ix);
332        // TODO: this does 2 calls to Delta::synthesize and 1 to apply, this probably could be better.
333        let old_tombstones = shuffle_tombstones(
334            &self.text,
335            &self.tombstones,
336            &self.deletes_from_union,
337            &prev_from_union,
338        );
339        Ok(Delta::synthesize(&old_tombstones, &prev_from_union, &self.deletes_from_union))
340    }
341
342    // TODO: don't construct transform if subsets are empty
343    // TODO: maybe switch to using a revision index for `base_rev` once we disable GC
344    /// Returns a tuple of a new `Revision` representing the edit based on the
345    /// current head, a new text `Rope`, a new tombstones `Rope` and a new `deletes_from_union`.
346    /// Returns an [`Error`] if `base_rev` cannot be found, or `delta.base_len`
347    /// does not equal the length of the text at `base_rev`.
348    fn mk_new_rev(
349        &self,
350        new_priority: usize,
351        undo_group: usize,
352        base_rev: RevToken,
353        delta: Delta<RopeInfo>,
354    ) -> Result<(Revision, Rope, Rope, Subset), Error> {
355        let ix = self.find_rev_token(base_rev).ok_or_else(|| Error::MissingRevision(base_rev))?;
356
357        let (ins_delta, deletes) = delta.factor();
358
359        // rebase delta to be on the base_rev union instead of the text
360        let deletes_at_rev = self.deletes_from_union_for_index(ix);
361
362        // validate delta
363        if ins_delta.base_len != deletes_at_rev.len_after_delete() {
364            return Err(Error::MalformedDelta {
365                delta_len: ins_delta.base_len,
366                rev_len: deletes_at_rev.len_after_delete(),
367            });
368        }
369
370        let mut union_ins_delta = ins_delta.transform_expand(&deletes_at_rev, true);
371        let mut new_deletes = deletes.transform_expand(&deletes_at_rev);
372
373        // rebase the delta to be on the head union instead of the base_rev union
374        let new_full_priority = FullPriority { priority: new_priority, session_id: self.session };
375        for r in &self.revs[ix + 1..] {
376            if let Edit { priority, ref inserts, .. } = r.edit {
377                if !inserts.is_empty() {
378                    let full_priority =
379                        FullPriority { priority, session_id: r.rev_id.session_id() };
380                    let after = new_full_priority >= full_priority; // should never be ==
381                    union_ins_delta = union_ins_delta.transform_expand(inserts, after);
382                    new_deletes = new_deletes.transform_expand(inserts);
383                }
384            }
385        }
386
387        // rebase the deletion to be after the inserts instead of directly on the head union
388        let new_inserts = union_ins_delta.inserted_subset();
389        if !new_inserts.is_empty() {
390            new_deletes = new_deletes.transform_expand(&new_inserts);
391        }
392
393        // rebase insertions on text and apply
394        let cur_deletes_from_union = &self.deletes_from_union;
395        let text_ins_delta = union_ins_delta.transform_shrink(cur_deletes_from_union);
396        let text_with_inserts = text_ins_delta.apply(&self.text);
397        let rebased_deletes_from_union = cur_deletes_from_union.transform_expand(&new_inserts);
398
399        // is the new edit in an undo group that was already undone due to concurrency?
400        let undone = self.undone_groups.contains(&undo_group);
401        let new_deletes_from_union = {
402            let to_delete = if undone { &new_inserts } else { &new_deletes };
403            rebased_deletes_from_union.union(to_delete)
404        };
405
406        // move deleted or undone-inserted things from text to tombstones
407        let (new_text, new_tombstones) = shuffle(
408            &text_with_inserts,
409            &self.tombstones,
410            &rebased_deletes_from_union,
411            &new_deletes_from_union,
412        );
413
414        let head_rev = &self.revs.last().unwrap();
415        Ok((
416            Revision {
417                rev_id: self.next_rev_id(),
418                max_undo_so_far: std::cmp::max(undo_group, head_rev.max_undo_so_far),
419                edit: Edit {
420                    priority: new_priority,
421                    undo_group,
422                    inserts: new_inserts,
423                    deletes: new_deletes,
424                },
425            },
426            new_text,
427            new_tombstones,
428            new_deletes_from_union,
429        ))
430    }
431    // NOTE: maybe just deprecate this? we can panic on the other side of
432    // the call if/when that makes sense.
433    /// Create a new edit based on `base_rev`.
434    ///
435    /// # Panics
436    ///
437    /// Panics if `base_rev` does not exist, or if `delta` is poorly formed.
438    pub fn edit_rev(
439        &mut self,
440        priority: usize,
441        undo_group: usize,
442        base_rev: RevToken,
443        delta: Delta<RopeInfo>,
444    ) {
445        self.try_edit_rev(priority, undo_group, base_rev, delta).unwrap();
446    }
447
448    // TODO: have `base_rev` be an index so that it can be used maximally
449    // efficiently with the head revision, a token or a revision ID.
450    // Efficiency loss of token is negligible but unfortunate.
451    /// Attempts to apply a new edit based on the [`Revision`] specified by `base_rev`,
452    /// Returning an [`Error`] if the `Revision` cannot be found.
453    pub fn try_edit_rev(
454        &mut self,
455        priority: usize,
456        undo_group: usize,
457        base_rev: RevToken,
458        delta: Delta<RopeInfo>,
459    ) -> Result<(), Error> {
460        let (new_rev, new_text, new_tombstones, new_deletes_from_union) =
461            self.mk_new_rev(priority, undo_group, base_rev, delta)?;
462        self.rev_id_counter += 1;
463        self.revs.push(new_rev);
464        self.text = new_text;
465        self.tombstones = new_tombstones;
466        self.deletes_from_union = new_deletes_from_union;
467        Ok(())
468    }
469
470    // since undo and gc replay history with transforms, we need an empty set
471    // of the union string length *before* the first revision.
472    fn empty_subset_before_first_rev(&self) -> Subset {
473        let first_rev = &self.revs.first().unwrap();
474        // it will be immediately transform_expanded by inserts if it is an Edit, so length must be before
475        let len = match first_rev.edit {
476            Edit { ref inserts, .. } => inserts.count(CountMatcher::Zero),
477            Undo { ref deletes_bitxor, .. } => deletes_bitxor.count(CountMatcher::All),
478        };
479        Subset::new(len)
480    }
481
482    /// Find the first revision that could be affected by toggling a set of undo groups
483    fn find_first_undo_candidate_index(&self, toggled_groups: &BTreeSet<usize>) -> usize {
484        // find the lowest toggled undo group number
485        if let Some(lowest_group) = toggled_groups.iter().cloned().next() {
486            for (i, rev) in self.revs.iter().enumerate().rev() {
487                if rev.max_undo_so_far < lowest_group {
488                    return i + 1; // +1 since we know the one we just found doesn't have it
489                }
490            }
491            return 0;
492        } else {
493            // no toggled groups, return past end
494            return self.revs.len();
495        }
496    }
497
498    // This computes undo all the way from the beginning. An optimization would be to not
499    // recompute the prefix up to where the history diverges, but it's not clear that's
500    // even worth the code complexity.
501    fn compute_undo(&self, groups: &BTreeSet<usize>) -> (Revision, Subset) {
502        let toggled_groups = self.undone_groups.symmetric_difference(&groups).cloned().collect();
503        let first_candidate = self.find_first_undo_candidate_index(&toggled_groups);
504        // the `false` below: don't invert undos since our first_candidate is based on the current undo set, not past
505        let mut deletes_from_union =
506            self.deletes_from_union_before_index(first_candidate, false).into_owned();
507
508        for rev in &self.revs[first_candidate..] {
509            if let Edit { ref undo_group, ref inserts, ref deletes, .. } = rev.edit {
510                if groups.contains(undo_group) {
511                    if !inserts.is_empty() {
512                        deletes_from_union = deletes_from_union.transform_union(inserts);
513                    }
514                } else {
515                    if !inserts.is_empty() {
516                        deletes_from_union = deletes_from_union.transform_expand(inserts);
517                    }
518                    if !deletes.is_empty() {
519                        deletes_from_union = deletes_from_union.union(deletes);
520                    }
521                }
522            }
523        }
524
525        let deletes_bitxor = self.deletes_from_union.bitxor(&deletes_from_union);
526        let max_undo_so_far = self.revs.last().unwrap().max_undo_so_far;
527        (
528            Revision {
529                rev_id: self.next_rev_id(),
530                max_undo_so_far,
531                edit: Undo { toggled_groups, deletes_bitxor },
532            },
533            deletes_from_union,
534        )
535    }
536
537    // TODO: maybe refactor this API to take a toggle set
538    pub fn undo(&mut self, groups: BTreeSet<usize>) {
539        let (new_rev, new_deletes_from_union) = self.compute_undo(&groups);
540
541        let (new_text, new_tombstones) = shuffle(
542            &self.text,
543            &self.tombstones,
544            &self.deletes_from_union,
545            &new_deletes_from_union,
546        );
547
548        self.text = new_text;
549        self.tombstones = new_tombstones;
550        self.deletes_from_union = new_deletes_from_union;
551        self.undone_groups = groups;
552        self.revs.push(new_rev);
553        self.rev_id_counter += 1;
554    }
555
556    pub fn is_equivalent_revision(&self, base_rev: RevId, other_rev: RevId) -> bool {
557        let base_subset = self
558            .find_rev(base_rev)
559            .map(|rev_index| self.deletes_from_cur_union_for_index(rev_index));
560        let other_subset = self
561            .find_rev(other_rev)
562            .map(|rev_index| self.deletes_from_cur_union_for_index(rev_index));
563
564        base_subset.is_some() && base_subset == other_subset
565    }
566
567    // Note: this function would need some work to handle retaining arbitrary revisions,
568    // partly because the reachability calculation would become more complicated (a
569    // revision might hold content from an undo group that would otherwise be gc'ed),
570    // and partly because you need to retain more undo history, to supply input to the
571    // reachability calculation.
572    //
573    // Thus, it's easiest to defer gc to when all plugins quiesce, but it's certainly
574    // possible to fix it so that's not necessary.
575    pub fn gc(&mut self, gc_groups: &BTreeSet<usize>) {
576        let mut gc_dels = self.empty_subset_before_first_rev();
577        // TODO: want to let caller retain more rev_id's.
578        let mut retain_revs = BTreeSet::new();
579        if let Some(last) = self.revs.last() {
580            retain_revs.insert(last.rev_id);
581        }
582        {
583            for rev in &self.revs {
584                if let Edit { ref undo_group, ref inserts, ref deletes, .. } = rev.edit {
585                    if !retain_revs.contains(&rev.rev_id) && gc_groups.contains(undo_group) {
586                        if self.undone_groups.contains(undo_group) {
587                            if !inserts.is_empty() {
588                                gc_dels = gc_dels.transform_union(inserts);
589                            }
590                        } else {
591                            if !inserts.is_empty() {
592                                gc_dels = gc_dels.transform_expand(inserts);
593                            }
594                            if !deletes.is_empty() {
595                                gc_dels = gc_dels.union(deletes);
596                            }
597                        }
598                    } else if !inserts.is_empty() {
599                        gc_dels = gc_dels.transform_expand(inserts);
600                    }
601                }
602            }
603        }
604        if !gc_dels.is_empty() {
605            let not_in_tombstones = self.deletes_from_union.complement();
606            let dels_from_tombstones = gc_dels.transform_shrink(&not_in_tombstones);
607            self.tombstones = dels_from_tombstones.delete_from(&self.tombstones);
608            self.deletes_from_union = self.deletes_from_union.transform_shrink(&gc_dels);
609        }
610        let old_revs = std::mem::replace(&mut self.revs, Vec::new());
611        for rev in old_revs.into_iter().rev() {
612            match rev.edit {
613                Edit { priority, undo_group, inserts, deletes } => {
614                    let new_gc_dels = if inserts.is_empty() {
615                        None
616                    } else {
617                        Some(gc_dels.transform_shrink(&inserts))
618                    };
619                    if retain_revs.contains(&rev.rev_id) || !gc_groups.contains(&undo_group) {
620                        let (inserts, deletes) = if gc_dels.is_empty() {
621                            (inserts, deletes)
622                        } else {
623                            (inserts.transform_shrink(&gc_dels), deletes.transform_shrink(&gc_dels))
624                        };
625                        self.revs.push(Revision {
626                            rev_id: rev.rev_id,
627                            max_undo_so_far: rev.max_undo_so_far,
628                            edit: Edit { priority, undo_group, inserts, deletes },
629                        });
630                    }
631                    if let Some(new_gc_dels) = new_gc_dels {
632                        gc_dels = new_gc_dels;
633                    }
634                }
635                Undo { toggled_groups, deletes_bitxor } => {
636                    // We're super-aggressive about dropping these; after gc, the history
637                    // of which undos were used to compute deletes_from_union in edits may be lost.
638                    if retain_revs.contains(&rev.rev_id) {
639                        let new_deletes_bitxor = if gc_dels.is_empty() {
640                            deletes_bitxor
641                        } else {
642                            deletes_bitxor.transform_shrink(&gc_dels)
643                        };
644                        self.revs.push(Revision {
645                            rev_id: rev.rev_id,
646                            max_undo_so_far: rev.max_undo_so_far,
647                            edit: Undo {
648                                toggled_groups: &toggled_groups - gc_groups,
649                                deletes_bitxor: new_deletes_bitxor,
650                            },
651                        })
652                    }
653                }
654            }
655        }
656        self.revs.reverse();
657    }
658
659    /// Merge the new content from another Engine into this one with a CRDT merge
660    pub fn merge(&mut self, other: &Engine) {
661        let (mut new_revs, text, tombstones, deletes_from_union) = {
662            let base_index = find_base_index(&self.revs, &other.revs);
663            let a_to_merge = &self.revs[base_index..];
664            let b_to_merge = &other.revs[base_index..];
665
666            let common = find_common(a_to_merge, b_to_merge);
667
668            let a_new = rearrange(a_to_merge, &common, self.deletes_from_union.len());
669            let b_new = rearrange(b_to_merge, &common, other.deletes_from_union.len());
670
671            let b_deltas =
672                compute_deltas(&b_new, &other.text, &other.tombstones, &other.deletes_from_union);
673            let expand_by = compute_transforms(a_new);
674
675            let max_undo = self.max_undo_group_id();
676            rebase(
677                expand_by,
678                b_deltas,
679                self.text.clone(),
680                self.tombstones.clone(),
681                self.deletes_from_union.clone(),
682                max_undo,
683            )
684        };
685
686        self.text = text;
687        self.tombstones = tombstones;
688        self.deletes_from_union = deletes_from_union;
689        self.revs.append(&mut new_revs);
690    }
691
692    /// When merging between multiple concurrently-editing sessions, each session should have a unique ID
693    /// set with this function, which will make the revisions they create not have colliding IDs.
694    /// For safety, this will panic if any revisions have already been added to the Engine.
695    ///
696    /// Merge may panic or return incorrect results if session IDs collide, which is why they can be
697    /// 96 bits which is more than sufficient for this to never happen.
698    pub fn set_session_id(&mut self, session: SessionId) {
699        assert_eq!(
700            1,
701            self.revs.len(),
702            "Revisions were added to an Engine before set_session_id, these may collide."
703        );
704        self.session = session;
705    }
706}
707
708// ======== Generic helpers
709
710/// Move sections from text to tombstones and out of tombstones based on a new and old set of deletions
711fn shuffle_tombstones(
712    text: &Rope,
713    tombstones: &Rope,
714    old_deletes_from_union: &Subset,
715    new_deletes_from_union: &Subset,
716) -> Rope {
717    // Taking the complement of deletes_from_union leads to an interleaving valid for swapped text and tombstones,
718    // allowing us to use the same method to insert the text into the tombstones.
719    let inverse_tombstones_map = old_deletes_from_union.complement();
720    let move_delta =
721        Delta::synthesize(text, &inverse_tombstones_map, &new_deletes_from_union.complement());
722    move_delta.apply(tombstones)
723}
724
725/// Move sections from text to tombstones and vice versa based on a new and old set of deletions.
726/// Returns a tuple of a new text `Rope` and a new `Tombstones` rope described by `new_deletes_from_union`.
727fn shuffle(
728    text: &Rope,
729    tombstones: &Rope,
730    old_deletes_from_union: &Subset,
731    new_deletes_from_union: &Subset,
732) -> (Rope, Rope) {
733    // Delta that deletes the right bits from the text
734    let del_delta = Delta::synthesize(tombstones, old_deletes_from_union, new_deletes_from_union);
735    let new_text = del_delta.apply(text);
736    // println!("shuffle: old={:?} new={:?} old_text={:?} new_text={:?} old_tombstones={:?}",
737    //     old_deletes_from_union, new_deletes_from_union, text, new_text, tombstones);
738    (new_text, shuffle_tombstones(text, tombstones, old_deletes_from_union, new_deletes_from_union))
739}
740
741// ======== Merge helpers
742
743/// Find an index before which everything is the same
744fn find_base_index(a: &[Revision], b: &[Revision]) -> usize {
745    assert!(!a.is_empty() && !b.is_empty());
746    assert!(a[0].rev_id == b[0].rev_id);
747    // TODO find the maximum base revision.
748    // this should have the same behavior, but worse performance
749    1
750}
751
752/// Find a set of revisions common to both lists
753fn find_common(a: &[Revision], b: &[Revision]) -> BTreeSet<RevId> {
754    // TODO make this faster somehow?
755    let a_ids: BTreeSet<RevId> = a.iter().map(|r| r.rev_id).collect();
756    let b_ids: BTreeSet<RevId> = b.iter().map(|r| r.rev_id).collect();
757    a_ids.intersection(&b_ids).cloned().collect()
758}
759
760/// Returns the operations in `revs` that don't have their `rev_id` in
761/// `base_revs`, but modified so that they are in the same order but based on
762/// the `base_revs`. This allows the rest of the merge to operate on only
763/// revisions not shared by both sides.
764///
765/// Conceptually, see the diagram below, with `.` being base revs and `n` being
766/// non-base revs, `N` being transformed non-base revs, and rearranges it:
767/// .n..n...nn..  -> ........NNNN -> returns vec![N,N,N,N]
768fn rearrange(revs: &[Revision], base_revs: &BTreeSet<RevId>, head_len: usize) -> Vec<Revision> {
769    // transform representing the characters added by common revisions after a point.
770    let mut s = Subset::new(head_len);
771
772    let mut out = Vec::with_capacity(revs.len() - base_revs.len());
773    for rev in revs.iter().rev() {
774        let is_base = base_revs.contains(&rev.rev_id);
775        let contents = match rev.edit {
776            Contents::Edit { priority, undo_group, ref inserts, ref deletes } => {
777                if is_base {
778                    s = inserts.transform_union(&s);
779                    None
780                } else {
781                    // fast-forward this revision over all common ones after it
782                    let transformed_inserts = inserts.transform_expand(&s);
783                    let transformed_deletes = deletes.transform_expand(&s);
784                    // we don't want new revisions before this to be transformed after us
785                    s = s.transform_shrink(&transformed_inserts);
786                    Some(Contents::Edit {
787                        inserts: transformed_inserts,
788                        deletes: transformed_deletes,
789                        priority,
790                        undo_group,
791                    })
792                }
793            }
794            Contents::Undo { .. } => panic!("can't merge undo yet"),
795        };
796        if let Some(edit) = contents {
797            out.push(Revision { edit, rev_id: rev.rev_id, max_undo_so_far: rev.max_undo_so_far });
798        }
799    }
800
801    out.as_mut_slice().reverse();
802    out
803}
804
805#[derive(Clone, Debug)]
806struct DeltaOp {
807    rev_id: RevId,
808    priority: usize,
809    undo_group: usize,
810    inserts: InsertDelta<RopeInfo>,
811    deletes: Subset,
812}
813
814/// Transform `revs`, which doesn't include information on the actual content of the operations,
815/// into an `InsertDelta`-based representation that does by working backward from the text and tombstones.
816fn compute_deltas(
817    revs: &[Revision],
818    text: &Rope,
819    tombstones: &Rope,
820    deletes_from_union: &Subset,
821) -> Vec<DeltaOp> {
822    let mut out = Vec::with_capacity(revs.len());
823
824    let mut cur_all_inserts = Subset::new(deletes_from_union.len());
825    for rev in revs.iter().rev() {
826        match rev.edit {
827            Contents::Edit { priority, undo_group, ref inserts, ref deletes } => {
828                let older_all_inserts = inserts.transform_union(&cur_all_inserts);
829
830                // TODO could probably be more efficient by avoiding shuffling from head every time
831                let tombstones_here =
832                    shuffle_tombstones(text, tombstones, deletes_from_union, &older_all_inserts);
833                let delta =
834                    Delta::synthesize(&tombstones_here, &older_all_inserts, &cur_all_inserts);
835                // TODO create InsertDelta directly and more efficiently instead of factoring
836                let (ins, _) = delta.factor();
837                out.push(DeltaOp {
838                    rev_id: rev.rev_id,
839                    priority,
840                    undo_group,
841                    inserts: ins,
842                    deletes: deletes.clone(),
843                });
844
845                cur_all_inserts = older_all_inserts;
846            }
847            Contents::Undo { .. } => panic!("can't merge undo yet"),
848        }
849    }
850
851    out.as_mut_slice().reverse();
852    out
853}
854
855/// Computes a series of priorities and transforms for the deltas on the right
856/// from the new revisions on the left.
857///
858/// Applies an optimization where it combines sequential revisions with the
859/// same priority into one transform to decrease the number of transforms that
860/// have to be considered in `rebase` substantially for normal editing
861/// patterns. Any large runs of typing in the same place by the same user (e.g
862/// typing a paragraph) will be combined into a single segment in a transform
863/// as opposed to thousands of revisions.
864fn compute_transforms(revs: Vec<Revision>) -> Vec<(FullPriority, Subset)> {
865    let mut out = Vec::new();
866    let mut last_priority: Option<usize> = None;
867    for r in revs {
868        if let Contents::Edit { priority, inserts, .. } = r.edit {
869            if inserts.is_empty() {
870                continue;
871            }
872            if Some(priority) == last_priority {
873                let last: &mut (FullPriority, Subset) = out.last_mut().unwrap();
874                last.1 = last.1.transform_union(&inserts);
875            } else {
876                last_priority = Some(priority);
877                let prio = FullPriority { priority, session_id: r.rev_id.session_id() };
878                out.push((prio, inserts));
879            }
880        }
881    }
882    out
883}
884
885/// Rebase `b_new` on top of `expand_by` and return revision contents that can be appended as new
886/// revisions on top of the revisions represented by `expand_by`.
887fn rebase(
888    mut expand_by: Vec<(FullPriority, Subset)>,
889    b_new: Vec<DeltaOp>,
890    mut text: Rope,
891    mut tombstones: Rope,
892    mut deletes_from_union: Subset,
893    mut max_undo_so_far: usize,
894) -> (Vec<Revision>, Rope, Rope, Subset) {
895    let mut out = Vec::with_capacity(b_new.len());
896
897    let mut next_expand_by = Vec::with_capacity(expand_by.len());
898    for op in b_new {
899        let DeltaOp { rev_id, priority, undo_group, mut inserts, mut deletes } = op;
900        let full_priority = FullPriority { priority, session_id: rev_id.session_id() };
901        // expand by each in expand_by
902        for &(trans_priority, ref trans_inserts) in &expand_by {
903            let after = full_priority >= trans_priority; // should never be ==
904                                                         // d-expand by other
905            inserts = inserts.transform_expand(trans_inserts, after);
906            // trans-expand other by expanded so they have the same context
907            let inserted = inserts.inserted_subset();
908            let new_trans_inserts = trans_inserts.transform_expand(&inserted);
909            // The deletes are already after our inserts, but we need to include the other inserts
910            deletes = deletes.transform_expand(&new_trans_inserts);
911            // On the next step we want things in expand_by to have op in the context
912            next_expand_by.push((trans_priority, new_trans_inserts));
913        }
914
915        let text_inserts = inserts.transform_shrink(&deletes_from_union);
916        let text_with_inserts = text_inserts.apply(&text);
917        let inserted = inserts.inserted_subset();
918
919        let expanded_deletes_from_union = deletes_from_union.transform_expand(&inserted);
920        let new_deletes_from_union = expanded_deletes_from_union.union(&deletes);
921        let (new_text, new_tombstones) = shuffle(
922            &text_with_inserts,
923            &tombstones,
924            &expanded_deletes_from_union,
925            &new_deletes_from_union,
926        );
927
928        text = new_text;
929        tombstones = new_tombstones;
930        deletes_from_union = new_deletes_from_union;
931
932        max_undo_so_far = std::cmp::max(max_undo_so_far, undo_group);
933        out.push(Revision {
934            rev_id,
935            max_undo_so_far,
936            edit: Contents::Edit { priority, undo_group, deletes, inserts: inserted },
937        });
938
939        expand_by = next_expand_by;
940        next_expand_by = Vec::with_capacity(expand_by.len());
941    }
942
943    (out, text, tombstones, deletes_from_union)
944}
945
946impl std::fmt::Display for Error {
947    fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
948        match self {
949            Error::MissingRevision(_) => write!(f, "Revision not found"),
950            Error::MalformedDelta { delta_len, rev_len } => {
951                write!(f, "Delta base_len {} does not match revision length {}", delta_len, rev_len)
952            }
953        }
954    }
955}
956
957impl std::fmt::Debug for Error {
958    fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
959        std::fmt::Display::fmt(self, f)
960    }
961}
962
963impl std::error::Error for Error {}
964
965#[cfg(test)]
966#[rustfmt::skip]
967mod tests {
968    use crate::engine::*;
969    use crate::rope::{Rope, RopeInfo};
970    use crate::delta::{Builder, Delta, DeltaElement};
971    use crate::multiset::Subset;
972    use crate::interval::Interval;
973    use std::collections::BTreeSet;
974    use crate::test_helpers::{parse_subset_list, parse_subset, parse_delta, debug_subsets};
975
976    const TEST_STR: &'static str = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz";
977
978    fn build_delta_1() -> Delta<RopeInfo> {
979        let mut d_builder = Builder::new(TEST_STR.len());
980        d_builder.delete(Interval::new(10, 36));
981        d_builder.replace(Interval::new(39, 42), Rope::from("DEEF"));
982        d_builder.replace(Interval::new(54, 54), Rope::from("999"));
983        d_builder.delete(Interval::new(58, 61));
984        d_builder.build()
985    }
986
987    fn build_delta_2() -> Delta<RopeInfo> {
988        let mut d_builder = Builder::new(TEST_STR.len());
989        d_builder.replace(Interval::new(1, 3), Rope::from("!"));
990        d_builder.delete(Interval::new(10, 36));
991        d_builder.replace(Interval::new(42, 45), Rope::from("GI"));
992        d_builder.replace(Interval::new(54, 54), Rope::from("888"));
993        d_builder.replace(Interval::new(59, 60), Rope::from("HI"));
994        d_builder.build()
995    }
996
997    #[test]
998    fn edit_rev_simple() {
999        let mut engine = Engine::new(Rope::from(TEST_STR));
1000        let first_rev = engine.get_head_rev_id().token();
1001        engine.edit_rev(0, 1, first_rev, build_delta_1());
1002        assert_eq!("0123456789abcDEEFghijklmnopqr999stuvz", String::from(engine.get_head()));
1003    }
1004
1005    #[test]
1006    fn edit_rev_empty() {
1007        let mut engine = Engine::new(Rope::from(TEST_STR));
1008        let first_rev = engine.get_head_rev_id().token();
1009        let delta = Delta {
1010            base_len: TEST_STR.len(),
1011            els: vec![DeltaElement::Copy(0, TEST_STR.len())],
1012        };
1013        engine.edit_rev(0, 1, first_rev, delta.clone());
1014        assert_eq!(TEST_STR, String::from(engine.get_head()));
1015        engine.edit_rev(0, 1, first_rev, delta.clone());
1016        assert_eq!(TEST_STR, String::from(engine.get_head()));
1017    }
1018
1019    #[test]
1020    fn edit_rev_concurrent() {
1021        let mut engine = Engine::new(Rope::from(TEST_STR));
1022        let first_rev = engine.get_head_rev_id().token();
1023        engine.edit_rev(1, 1, first_rev, build_delta_1());
1024        engine.edit_rev(0, 2, first_rev, build_delta_2());
1025        assert_eq!("0!3456789abcDEEFGIjklmnopqr888999stuvHIz", String::from(engine.get_head()));
1026    }
1027
1028    #[test]
1029    #[should_panic(expected = "Delta base_len 5 does not match revision length 6")]
1030    fn edit_rev_bad_delta_len() {
1031        let test_str = "hello";
1032        let mut engine = Engine::new(Rope::from(test_str));
1033        let iv = Interval::new(1, 1);
1034
1035        let mut builder = Builder::new(test_str.len());
1036        builder.replace(iv, "1".into());
1037        let delta1 = builder.build();
1038
1039        let mut builder = Builder::new(test_str.len());
1040        builder.replace(iv, "2".into());
1041        let delta2 = builder.build();
1042
1043        let rev = engine.get_head_rev_id().token();
1044        engine.edit_rev(1, 1, rev, delta1);
1045
1046        // this second delta now has an incorrect length for the engine
1047        let rev = engine.get_head_rev_id().token();
1048        engine.edit_rev(1, 2, rev, delta2);
1049    }
1050
1051    fn undo_test(before: bool, undos : BTreeSet<usize>, output: &str) {
1052        let mut engine = Engine::new(Rope::from(TEST_STR));
1053        let first_rev = engine.get_head_rev_id().token();
1054        if before {
1055            engine.undo(undos.clone());
1056        }
1057        engine.edit_rev(1, 1, first_rev, build_delta_1());
1058        engine.edit_rev(0, 2, first_rev, build_delta_2());
1059        if !before {
1060            engine.undo(undos);
1061        }
1062        assert_eq!(output, String::from(engine.get_head()));
1063    }
1064
1065    #[test]
1066    fn edit_rev_undo() {
1067        undo_test(true, [1,2].iter().cloned().collect(), TEST_STR);
1068    }
1069
1070    #[test]
1071    fn edit_rev_undo_2() {
1072        undo_test(true, [2].iter().cloned().collect(), "0123456789abcDEEFghijklmnopqr999stuvz");
1073    }
1074
1075    #[test]
1076    fn edit_rev_undo_3() {
1077        undo_test(true, [1].iter().cloned().collect(), "0!3456789abcdefGIjklmnopqr888stuvwHIyz");
1078    }
1079
1080    #[test]
1081    fn try_delta_rev_head() {
1082        let mut engine = Engine::new(Rope::from(TEST_STR));
1083        let first_rev = engine.get_head_rev_id().token();
1084        engine.edit_rev(1, 1, first_rev, build_delta_1());
1085        let d = engine.try_delta_rev_head(first_rev).unwrap();
1086        assert_eq!(String::from(engine.get_head()), d.apply_to_string(TEST_STR));
1087    }
1088
1089    #[test]
1090    fn try_delta_rev_head_2() {
1091        let mut engine = Engine::new(Rope::from(TEST_STR));
1092        let first_rev = engine.get_head_rev_id().token();
1093        engine.edit_rev(1, 1, first_rev, build_delta_1());
1094        engine.edit_rev(0, 2, first_rev, build_delta_2());
1095        let d = engine.try_delta_rev_head(first_rev).unwrap();
1096        assert_eq!(String::from(engine.get_head()), d.apply_to_string(TEST_STR));
1097    }
1098
1099    #[test]
1100    fn try_delta_rev_head_3() {
1101        let mut engine = Engine::new(Rope::from(TEST_STR));
1102        let first_rev = engine.get_head_rev_id().token();
1103        engine.edit_rev(1, 1, first_rev, build_delta_1());
1104        let after_first_edit = engine.get_head_rev_id().token();
1105        engine.edit_rev(0, 2, first_rev, build_delta_2());
1106        let d = engine.try_delta_rev_head(after_first_edit).unwrap();
1107        assert_eq!(String::from(engine.get_head()), d.apply_to_string("0123456789abcDEEFghijklmnopqr999stuvz"));
1108    }
1109
1110    #[test]
1111    fn try_delta_rev_head_missing_token() {
1112        let mut engine = Engine::new(Rope::from(TEST_STR));
1113        let first_rev = engine.get_head_rev_id().token();
1114        let bad_rev = RevToken::default();
1115        engine.edit_rev(1, 1, first_rev, build_delta_1());
1116        let d = engine.try_delta_rev_head(bad_rev);
1117        assert!(d.is_err());
1118    }
1119
1120    #[test]
1121    fn undo() {
1122        undo_test(false, [1,2].iter().cloned().collect(), TEST_STR);
1123    }
1124
1125    #[test]
1126    fn undo_2() {
1127        undo_test(false, [2].iter().cloned().collect(), "0123456789abcDEEFghijklmnopqr999stuvz");
1128    }
1129
1130    #[test]
1131    fn undo_3() {
1132        undo_test(false, [1].iter().cloned().collect(), "0!3456789abcdefGIjklmnopqr888stuvwHIyz");
1133    }
1134
1135    #[test]
1136    fn undo_4() {
1137        let mut engine = Engine::new(Rope::from(TEST_STR));
1138        let d1 = Delta::simple_edit(Interval::new(0,0), Rope::from("a"), TEST_STR.len());
1139        let first_rev = engine.get_head_rev_id().token();
1140        engine.edit_rev(1, 1, first_rev, d1.clone());
1141        let new_head = engine.get_head_rev_id().token();
1142        engine.undo([1].iter().cloned().collect());
1143        let d2 = Delta::simple_edit(Interval::new(0,0), Rope::from("a"), TEST_STR.len()+1);
1144        engine.edit_rev(1, 2, new_head, d2); // note this is based on d1 before, not the undo
1145        let new_head_2 = engine.get_head_rev_id().token();
1146        let d3 = Delta::simple_edit(Interval::new(0,0), Rope::from("b"), TEST_STR.len()+1);
1147        engine.edit_rev(1, 3, new_head_2, d3);
1148        engine.undo([1,3].iter().cloned().collect());
1149        assert_eq!("a0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz", String::from(engine.get_head()));
1150    }
1151
1152    #[test]
1153    fn undo_5() {
1154        let mut engine = Engine::new(Rope::from(TEST_STR));
1155        let d1 = Delta::simple_edit(Interval::new(0,10), Rope::from(""), TEST_STR.len());
1156        let first_rev = engine.get_head_rev_id().token();
1157        engine.edit_rev(1, 1, first_rev, d1.clone());
1158        engine.edit_rev(1, 2, first_rev, d1.clone());
1159        engine.undo([1].iter().cloned().collect());
1160        assert_eq!("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz", String::from(engine.get_head()));
1161        engine.undo([1,2].iter().cloned().collect());
1162        assert_eq!(TEST_STR, String::from(engine.get_head()));
1163        engine.undo([].iter().cloned().collect());
1164        assert_eq!("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz", String::from(engine.get_head()));
1165    }
1166
1167    #[test]
1168    fn gc() {
1169        let mut engine = Engine::new(Rope::from(TEST_STR));
1170        let d1 = Delta::simple_edit(Interval::new(0,0), Rope::from("c"), TEST_STR.len());
1171        let first_rev = engine.get_head_rev_id().token();
1172        engine.edit_rev(1, 1, first_rev, d1);
1173        let new_head = engine.get_head_rev_id().token();
1174        engine.undo([1].iter().cloned().collect());
1175        let d2 = Delta::simple_edit(Interval::new(0,0), Rope::from("a"), TEST_STR.len()+1);
1176        engine.edit_rev(1, 2, new_head, d2);
1177        let gc : BTreeSet<usize> = [1].iter().cloned().collect();
1178        engine.gc(&gc);
1179        let d3 = Delta::simple_edit(Interval::new(0,0), Rope::from("b"), TEST_STR.len()+1);
1180        let new_head_2 = engine.get_head_rev_id().token();
1181        engine.edit_rev(1, 3, new_head_2, d3);
1182        engine.undo([3].iter().cloned().collect());
1183        assert_eq!("a0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz", String::from(engine.get_head()));
1184    }
1185
1186    /// This case is a regression test reproducing a panic I found while using the UI.
1187    /// It does undos and gcs in a pattern that can actually happen when using the editor.
1188    fn gc_scenario(edits: usize, max_undos: usize) {
1189        let mut engine = Engine::new(Rope::from(""));
1190
1191        // insert `edits` letter "b"s in separate undo groups
1192        for i in 0..edits {
1193            let d = Delta::simple_edit(Interval::new(0,0), Rope::from("b"), i);
1194            let head = engine.get_head_rev_id().token();
1195            engine.edit_rev(1, i+1, head, d);
1196            if i >= max_undos {
1197                let to_gc : BTreeSet<usize> = [i-max_undos].iter().cloned().collect();
1198                engine.gc(&to_gc)
1199            }
1200        }
1201
1202        // spam cmd+z until the available undo history is exhausted
1203        let mut to_undo = BTreeSet::new();
1204        for i in ((edits-max_undos)..edits).rev() {
1205            to_undo.insert(i+1);
1206            engine.undo(to_undo.clone());
1207        }
1208
1209        // insert a character at the beginning
1210        let d1 = Delta::simple_edit(Interval::new(0,0), Rope::from("h"), engine.get_head().len());
1211        let head = engine.get_head_rev_id().token();
1212        engine.edit_rev(1, edits+1, head, d1);
1213
1214        // since character was inserted after gc, editor gcs all undone things
1215        engine.gc(&to_undo);
1216
1217        // insert character at end, when this test was added, it panic'd here
1218        let chars_left = (edits-max_undos)+1;
1219        let d2 = Delta::simple_edit(Interval::new(chars_left, chars_left), Rope::from("f"), engine.get_head().len());
1220        let head2 = engine.get_head_rev_id().token();
1221        engine.edit_rev(1, edits+1, head2, d2);
1222
1223        let mut soln = String::from("h");
1224        for _ in 0..(edits-max_undos) {
1225            soln.push('b');
1226        }
1227        soln.push('f');
1228        assert_eq!(soln, String::from(engine.get_head()));
1229    }
1230
1231    #[test]
1232    fn gc_2() {
1233        // the smallest values with which it still fails:
1234        gc_scenario(4,3);
1235    }
1236
1237    #[test]
1238    fn gc_3() {
1239        // original values this test was created/found with in the UI:
1240        gc_scenario(35,20);
1241    }
1242
1243    #[test]
1244    fn gc_4() {
1245        let mut engine = Engine::new(Rope::from(TEST_STR));
1246        let d1 = Delta::simple_edit(Interval::new(0,10), Rope::from(""), TEST_STR.len());
1247        let first_rev = engine.get_head_rev_id().token();
1248        engine.edit_rev(1, 1, first_rev, d1.clone());
1249        engine.edit_rev(1, 2, first_rev, d1.clone());
1250        let gc : BTreeSet<usize> = [1].iter().cloned().collect();
1251        engine.gc(&gc);
1252        // shouldn't do anything since it was double-deleted and one was GC'd
1253        engine.undo([1,2].iter().cloned().collect());
1254        assert_eq!("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz", String::from(engine.get_head()));
1255    }
1256
1257    #[test]
1258    fn gc_5() {
1259        let mut engine = Engine::new(Rope::from(TEST_STR));
1260        let d1 = Delta::simple_edit(Interval::new(0,10), Rope::from(""), TEST_STR.len());
1261        let initial_rev = engine.get_head_rev_id().token();
1262        engine.undo([1].iter().cloned().collect());
1263        engine.edit_rev(1, 1, initial_rev, d1.clone());
1264        engine.edit_rev(1, 2, initial_rev, d1.clone());
1265        let gc : BTreeSet<usize> = [1].iter().cloned().collect();
1266        engine.gc(&gc);
1267        // only one of the deletes was gc'd, the other should still be in effect
1268        assert_eq!("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz", String::from(engine.get_head()));
1269        // since one of the two deletes was gc'd this should undo the one that wasn't
1270        engine.undo([2].iter().cloned().collect());
1271        assert_eq!(TEST_STR, String::from(engine.get_head()));
1272    }
1273
1274    #[test]
1275    fn gc_6() {
1276        let mut engine = Engine::new(Rope::from(TEST_STR));
1277        let d1 = Delta::simple_edit(Interval::new(0,10), Rope::from(""), TEST_STR.len());
1278        let initial_rev = engine.get_head_rev_id().token();
1279        engine.edit_rev(1, 1, initial_rev, d1.clone());
1280        engine.undo([1,2].iter().cloned().collect());
1281        engine.edit_rev(1, 2, initial_rev, d1.clone());
1282        let gc : BTreeSet<usize> = [1].iter().cloned().collect();
1283        engine.gc(&gc);
1284        assert_eq!(TEST_STR, String::from(engine.get_head()));
1285        // since one of the two deletes was gc'd this should re-do the one that wasn't
1286        engine.undo([].iter().cloned().collect());
1287        assert_eq!("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz", String::from(engine.get_head()));
1288    }
1289
1290    fn basic_rev(i: usize) -> RevId {
1291        RevId { session1: 1, session2: 0, num: i as u32 }
1292    }
1293
1294    fn basic_insert_ops(inserts: Vec<Subset>, priority: usize) -> Vec<Revision> {
1295        inserts.into_iter().enumerate().map(|(i, inserts)| {
1296            let deletes = Subset::new(inserts.len());
1297            Revision {
1298                rev_id: basic_rev(i+1),
1299                max_undo_so_far: i+1,
1300                edit: Contents::Edit {
1301                    priority, inserts, deletes,
1302                    undo_group: i+1,
1303                }
1304            }
1305        }).collect()
1306    }
1307
1308    #[test]
1309    fn rearrange_1() {
1310        let inserts = parse_subset_list("
1311        ##
1312        -#-
1313        #---
1314        ---#-
1315        -----#
1316        #------
1317        ");
1318        let revs = basic_insert_ops(inserts, 1);
1319        let base: BTreeSet<RevId> = [3,5].iter().cloned().map(basic_rev).collect();
1320
1321        let rearranged = rearrange(&revs, &base, 7);
1322        let rearranged_inserts: Vec<Subset> = rearranged.into_iter().map(|c| {
1323            match c.edit {
1324                Contents::Edit {inserts, ..} => inserts,
1325                Contents::Undo { .. } => panic!(),
1326            }
1327        }).collect();
1328
1329        debug_subsets(&rearranged_inserts);
1330        let correct = parse_subset_list("
1331        -##-
1332        --#--
1333        ---#--
1334        #------
1335        ");
1336        assert_eq!(correct, rearranged_inserts);
1337    }
1338
1339    fn ids_to_fake_revs(ids: &[usize]) -> Vec<Revision> {
1340        let contents = Contents::Edit {
1341            priority: 0,
1342            undo_group: 0,
1343            inserts: Subset::new(0),
1344            deletes: Subset::new(0),
1345        };
1346
1347        ids.iter().cloned().map(|i| {
1348            Revision {
1349                rev_id: basic_rev(i),
1350                max_undo_so_far: i,
1351                edit: contents.clone()
1352            }
1353        }).collect()
1354    }
1355
1356    #[test]
1357    fn find_common_1() {
1358        let a: Vec<Revision> = ids_to_fake_revs(&[0,2,4,6,8,10,12]);
1359        let b: Vec<Revision> = ids_to_fake_revs(&[0,1,2,4,5,8,9]);
1360        let res = find_common(&a, &b);
1361
1362        let correct: BTreeSet<RevId> = [0,2,4,8].iter().cloned().map(basic_rev).collect();
1363        assert_eq!(correct, res);
1364    }
1365
1366
1367    #[test]
1368    fn find_base_1() {
1369        let a: Vec<Revision> = ids_to_fake_revs(&[0,2,4,6,8,10,12]);
1370        let b: Vec<Revision> = ids_to_fake_revs(&[0,1,2,4,5,8,9]);
1371        let res = find_base_index(&a, &b);
1372
1373        assert_eq!(1, res);
1374    }
1375
1376    #[test]
1377    fn compute_deltas_1() {
1378        let inserts = parse_subset_list("
1379        -##-
1380        --#--
1381        ---#--
1382        #------
1383        ");
1384        let revs = basic_insert_ops(inserts, 1);
1385
1386        let text = Rope::from("13456");
1387        let tombstones = Rope::from("27");
1388        let deletes_from_union = parse_subset("-#----#");
1389        let delta_ops = compute_deltas(&revs, &text, &tombstones, &deletes_from_union);
1390
1391        println!("{:#?}", delta_ops);
1392
1393        let mut r = Rope::from("27");
1394        for op in &delta_ops {
1395            r = op.inserts.apply(&r);
1396        }
1397        assert_eq!("1234567", String::from(r));
1398    }
1399
1400    #[test]
1401    fn compute_transforms_1() {
1402        let inserts = parse_subset_list("
1403        -##-
1404        --#--
1405        ---#--
1406        #------
1407        ");
1408        let revs = basic_insert_ops(inserts, 1);
1409
1410        let expand_by = compute_transforms(revs);
1411        assert_eq!(1, expand_by.len());
1412        assert_eq!(1, expand_by[0].0.priority);
1413        let subset_str = format!("{:#?}", expand_by[0].1);
1414        assert_eq!("#-####-", &subset_str);
1415    }
1416
1417    #[test]
1418    fn compute_transforms_2() {
1419        let inserts_1 = parse_subset_list("
1420        -##-
1421        --#--
1422        ");
1423        let mut revs = basic_insert_ops(inserts_1, 1);
1424        let inserts_2 = parse_subset_list("
1425        ----
1426        ");
1427        let mut revs_2 = basic_insert_ops(inserts_2, 4);
1428        revs.append(&mut revs_2);
1429        let inserts_3 = parse_subset_list("
1430        ---#--
1431        #------
1432        ");
1433        let mut revs_3 = basic_insert_ops(inserts_3, 2);
1434        revs.append(&mut revs_3);
1435
1436        let expand_by = compute_transforms(revs);
1437        assert_eq!(2, expand_by.len());
1438        assert_eq!(1, expand_by[0].0.priority);
1439        assert_eq!(2, expand_by[1].0.priority);
1440
1441        let subset_str = format!("{:#?}", expand_by[0].1);
1442        assert_eq!("-###-", &subset_str);
1443        let subset_str = format!("{:#?}", expand_by[1].1);
1444        assert_eq!("#---#--", &subset_str);
1445    }
1446
1447    #[test]
1448    fn rebase_1() {
1449        let inserts = parse_subset_list("
1450        --#-
1451        ----#
1452        ");
1453        let a_revs = basic_insert_ops(inserts.clone(), 1);
1454        let b_revs = basic_insert_ops(inserts, 2);
1455
1456        let text_b = Rope::from("zpbj");
1457        let tombstones_b = Rope::from("a");
1458        let deletes_from_union_b = parse_subset("-#---");
1459        let b_delta_ops = compute_deltas(&b_revs, &text_b, &tombstones_b, &deletes_from_union_b);
1460
1461        println!("{:#?}", b_delta_ops);
1462
1463        let text_a = Rope::from("zcbd");
1464        let tombstones_a = Rope::from("a");
1465        let deletes_from_union_a = parse_subset("-#---");
1466        let expand_by = compute_transforms(a_revs);
1467
1468        let (revs, text_2, tombstones_2, deletes_from_union_2) =
1469            rebase(expand_by, b_delta_ops, text_a, tombstones_a, deletes_from_union_a, 0);
1470
1471        let rebased_inserts: Vec<Subset> = revs.into_iter().map(|c| {
1472            match c.edit {
1473                Contents::Edit {inserts, ..} => inserts,
1474                Contents::Undo { .. } => panic!(),
1475            }
1476        }).collect();
1477
1478        debug_subsets(&rebased_inserts);
1479        let correct = parse_subset_list("
1480        ---#--
1481        ------#
1482        ");
1483        assert_eq!(correct, rebased_inserts);
1484
1485
1486        assert_eq!("zcpbdj", String::from(&text_2));
1487        assert_eq!("a", String::from(&tombstones_2));
1488        assert_eq!("-#-----", format!("{:#?}", deletes_from_union_2));
1489    }
1490
1491    // ============== Merge script tests
1492
1493    #[derive(Clone, Debug)]
1494    enum MergeTestOp {
1495        Merge(usize, usize),
1496        Assert(usize, String),
1497        AssertAll(String),
1498        AssertMaxUndoSoFar(usize, usize),
1499        Edit { ei: usize, p: usize, u: usize, d: Delta<RopeInfo> },
1500    }
1501
1502    #[derive(Debug)]
1503    struct MergeTestState {
1504        peers: Vec<Engine>,
1505    }
1506
1507    impl MergeTestState {
1508        fn new(count: usize) -> MergeTestState {
1509            let mut peers = Vec::with_capacity(count);
1510            for i in 0..count {
1511                let mut peer = Engine::new(Rope::from(""));
1512                peer.set_session_id(((i*1000) as u64, 0));
1513                peers.push(peer);
1514            }
1515            MergeTestState { peers }
1516        }
1517
1518        fn run_op(&mut self, op: &MergeTestOp) {
1519            match *op {
1520                MergeTestOp::Merge(ai, bi) => {
1521                    let (start, end) = self.peers.split_at_mut(ai);
1522                    let (a, rest) = end.split_first_mut().unwrap();
1523                    let b = if bi < ai {
1524                        &mut start[bi]
1525                    } else {
1526                        &mut rest[bi - ai - 1]
1527                    };
1528                    a.merge(b);
1529                },
1530                MergeTestOp::Assert(ei, ref correct) => {
1531                    let e = &mut self.peers[ei];
1532                    assert_eq!(correct, &String::from(e.get_head()), "for peer {}", ei);
1533                },
1534                MergeTestOp::AssertMaxUndoSoFar(ei, correct) => {
1535                    let e = &mut self.peers[ei];
1536                    assert_eq!(correct, e.max_undo_group_id(), "for peer {}", ei);
1537                },
1538                MergeTestOp::AssertAll(ref correct) => {
1539                    for (ei, e) in self.peers.iter().enumerate() {
1540                        assert_eq!(correct, &String::from(e.get_head()), "for peer {}", ei);
1541                    }
1542                },
1543                MergeTestOp::Edit { ei, p, u, d: ref delta } => {
1544                    let e = &mut self.peers[ei];
1545                    let head = e.get_head_rev_id().token();
1546                    e.edit_rev(p, u, head, delta.clone());
1547                },
1548            }
1549        }
1550
1551        fn run_script(&mut self, script: &[MergeTestOp]) {
1552            for (i, op) in script.iter().enumerate() {
1553                println!("running {:?} at index {}", op, i);
1554                self.run_op(op);
1555            }
1556        }
1557    }
1558
1559    /// Like the scanned whiteboard diagram I have, but without deleting 'a'
1560    #[test]
1561    fn merge_insert_only_whiteboard() {
1562        use self::MergeTestOp::*;
1563        let script = vec![
1564            Edit { ei: 2, p: 1, u: 1, d: parse_delta("ab") },
1565            Merge(0,2), Merge(1, 2),
1566            Assert(0, "ab".to_owned()),
1567            Assert(1, "ab".to_owned()),
1568            Assert(2, "ab".to_owned()),
1569            Edit { ei: 0, p: 3, u: 1, d: parse_delta("-c-") },
1570            Edit { ei: 0, p: 3, u: 1, d: parse_delta("---d") },
1571            Assert(0, "acbd".to_owned()),
1572            Edit { ei: 1, p: 5, u: 1, d: parse_delta("-p-") },
1573            Edit { ei: 1, p: 5, u: 1, d: parse_delta("---j") },
1574            Assert(1, "apbj".to_owned()),
1575            Edit { ei: 2, p: 1, u: 1, d: parse_delta("z--") },
1576            Merge(0,2), Merge(1, 2),
1577            Assert(0, "zacbd".to_owned()),
1578            Assert(1, "zapbj".to_owned()),
1579            Merge(0,1),
1580            Assert(0, "zacpbdj".to_owned()),
1581        ];
1582        MergeTestState::new(3).run_script(&script[..]);
1583    }
1584
1585    /// Tests that priorities are used to break ties correctly
1586    #[test]
1587    fn merge_priorities() {
1588        use self::MergeTestOp::*;
1589        let script = vec![
1590            Edit { ei: 2, p: 1, u: 1, d: parse_delta("ab") },
1591            Merge(0,2), Merge(1, 2),
1592            Assert(0, "ab".to_owned()),
1593            Assert(1, "ab".to_owned()),
1594            Assert(2, "ab".to_owned()),
1595            Edit { ei: 0, p: 3, u: 1, d: parse_delta("-c-") },
1596            Edit { ei: 0, p: 3, u: 1, d: parse_delta("---d") },
1597            Assert(0, "acbd".to_owned()),
1598            Edit { ei: 1, p: 5, u: 1, d: parse_delta("-p-") },
1599            Assert(1, "apb".to_owned()),
1600            Edit { ei: 2, p: 4, u: 1, d: parse_delta("-r-") },
1601            Merge(0,2), Merge(1, 2),
1602            Assert(0, "acrbd".to_owned()),
1603            Assert(1, "arpb".to_owned()),
1604            Edit { ei: 1, p: 5, u: 1, d: parse_delta("----j") },
1605            Assert(1, "arpbj".to_owned()),
1606            Edit { ei: 2, p: 4, u: 1, d: parse_delta("---z") },
1607            Merge(0,2), Merge(1, 2),
1608            Assert(0, "acrbdz".to_owned()),
1609            Assert(1, "arpbzj".to_owned()),
1610            Merge(0,1),
1611            Assert(0, "acrpbdzj".to_owned()),
1612        ];
1613        MergeTestState::new(3).run_script(&script[..]);
1614    }
1615
1616    /// Tests that merging again when there are no new revisions does nothing
1617    #[test]
1618    fn merge_idempotent() {
1619        use self::MergeTestOp::*;
1620        let script = vec![
1621            Edit { ei: 2, p: 1, u: 1, d: parse_delta("ab") },
1622            Merge(0,2), Merge(1, 2),
1623            Assert(0, "ab".to_owned()),
1624            Assert(1, "ab".to_owned()),
1625            Assert(2, "ab".to_owned()),
1626            Edit { ei: 0, p: 3, u: 1, d: parse_delta("-c-") },
1627            Edit { ei: 0, p: 3, u: 1, d: parse_delta("---d") },
1628            Assert(0, "acbd".to_owned()),
1629            Edit { ei: 1, p: 5, u: 1, d: parse_delta("-p-") },
1630            Edit { ei: 1, p: 5, u: 1, d: parse_delta("---j") },
1631            Merge(0,1),
1632            Assert(0, "acpbdj".to_owned()),
1633            Merge(0,1), Merge(1,0), Merge(0,1), Merge(1,0),
1634            Assert(0, "acpbdj".to_owned()),
1635            Assert(1, "acpbdj".to_owned()),
1636        ];
1637        MergeTestState::new(3).run_script(&script[..]);
1638    }
1639
1640    #[test]
1641    fn merge_associative() {
1642        use self::MergeTestOp::*;
1643        let script = vec![
1644            Edit { ei: 2, p: 1, u: 1, d: parse_delta("ab") },
1645            Merge(0,2), Merge(1, 2),
1646            Edit { ei: 0, p: 3, u: 1, d: parse_delta("-c-") },
1647            Edit { ei: 1, p: 5, u: 1, d: parse_delta("-p-") },
1648            Edit { ei: 2, p: 2, u: 1, d: parse_delta("z--") },
1649            // copy the current state
1650            Merge(3, 0), Merge(4, 1), Merge(5, 2),
1651            // Do the merge one direction
1652            Merge(1,2),
1653            Merge(0,1),
1654            Assert(0, "zacpb".to_owned()),
1655            // Do it the other way on the copy
1656            Merge(4,3),
1657            Merge(5,4),
1658            Assert(5, "zacpb".to_owned()),
1659            // Go crazy
1660            Merge(0,5), Merge(2,5), Merge(4,5), Merge(1,4),
1661            Merge(3,1), Merge(5,3),
1662            AssertAll("zacpb".to_owned()),
1663        ];
1664        MergeTestState::new(6).run_script(&script[..]);
1665    }
1666
1667    #[test]
1668    fn merge_simple_delete_1() {
1669        use self::MergeTestOp::*;
1670        let script = vec![
1671            Edit { ei: 0, p: 1, u: 1, d: parse_delta("abc") },
1672            Merge(1,0),
1673            Assert(0, "abc".to_owned()),
1674            Assert(1, "abc".to_owned()),
1675            Edit { ei: 0, p: 1, u: 1, d: parse_delta("!-d-") },
1676            Assert(0, "bdc".to_owned()),
1677            Edit { ei: 1, p: 3, u: 1, d: parse_delta("--efg!") },
1678            Assert(1, "abefg".to_owned()),
1679            Merge(1,0),
1680            Assert(1, "bdefg".to_owned()),
1681        ];
1682        MergeTestState::new(2).run_script(&script[..]);
1683    }
1684
1685    #[test]
1686    fn merge_simple_delete_2() {
1687        use self::MergeTestOp::*;
1688        let script = vec![
1689            Edit { ei: 0, p: 1, u: 1, d: parse_delta("ab") },
1690            Merge(1,0),
1691            Assert(0, "ab".to_owned()),
1692            Assert(1, "ab".to_owned()),
1693            Edit { ei: 0, p: 1, u: 1, d: parse_delta("!-") },
1694            Assert(0, "b".to_owned()),
1695            Edit { ei: 1, p: 3, u: 1, d: parse_delta("-c-") },
1696            Assert(1, "acb".to_owned()),
1697            Merge(1,0),
1698            Assert(1, "cb".to_owned()),
1699        ];
1700        MergeTestState::new(2).run_script(&script[..]);
1701    }
1702
1703    /// I have a scanned whiteboard diagram of doing this merge by hand, good for reference
1704    #[test]
1705    fn merge_whiteboard() {
1706        use self::MergeTestOp::*;
1707        let script = vec![
1708            Edit { ei: 2, p: 1, u: 1, d: parse_delta("ab") },
1709            Merge(0,2), Merge(1, 2), Merge(3, 2),
1710            Assert(0, "ab".to_owned()),
1711            Assert(1, "ab".to_owned()),
1712            Assert(2, "ab".to_owned()),
1713            Assert(3, "ab".to_owned()),
1714            Edit { ei: 2, p: 1, u: 1, d: parse_delta("!-") },
1715            Assert(2, "b".to_owned()),
1716            Edit { ei: 0, p: 3, u: 1, d: parse_delta("-c-") },
1717            Edit { ei: 0, p: 3, u: 1, d: parse_delta("---d") },
1718            Assert(0, "acbd".to_owned()),
1719            Merge(0,2),
1720            Assert(0, "cbd".to_owned()),
1721            Edit { ei: 1, p: 5, u: 1, d: parse_delta("-p-") },
1722            Merge(1,2),
1723            Assert(1, "pb".to_owned()),
1724            Edit { ei: 1, p: 5, u: 1, d: parse_delta("--j") },
1725            Assert(1, "pbj".to_owned()),
1726            // to replicate whiteboard, z must be before a tombstone
1727            // which we can do with another peer that inserts before a and merges.
1728            Edit { ei: 3, p: 7, u: 1, d: parse_delta("z--") },
1729            Merge(2,3),
1730            Merge(0,2), Merge(1, 2),
1731            Assert(0, "zcbd".to_owned()),
1732            Assert(1, "zpbj".to_owned()),
1733            Merge(0,1), // the merge from the whiteboard scan
1734            Assert(0, "zcpbdj".to_owned()),
1735        ];
1736        MergeTestState::new(4).run_script(&script[..]);
1737    }
1738
1739    #[test]
1740    fn merge_max_undo_so_far() {
1741        use self::MergeTestOp::*;
1742        let script = vec![
1743            Edit { ei: 0, p: 1, u: 1, d: parse_delta("ab") },
1744            Merge(1,0), Merge(2,0),
1745            AssertMaxUndoSoFar(1,1),
1746            Edit { ei: 0, p: 1, u: 2, d: parse_delta("!-") },
1747            Edit { ei: 1, p: 3, u: 3, d: parse_delta("-!") },
1748            Merge(1,0),
1749            AssertMaxUndoSoFar(1,3),
1750            AssertMaxUndoSoFar(0,2),
1751            Merge(0,1),
1752            AssertMaxUndoSoFar(0,3),
1753            Edit { ei: 2, p: 1, u: 1, d: parse_delta("!!") },
1754            Merge(1,2),
1755            AssertMaxUndoSoFar(1,3),
1756        ];
1757        MergeTestState::new(3).run_script(&script[..]);
1758    }
1759
1760    /// This is a regression test to ensure that session IDs are used to break
1761    /// ties in edit priorities. Otherwise the results may be inconsistent.
1762    #[test]
1763    fn merge_session_priorities() {
1764        use self::MergeTestOp::*;
1765        let script = vec![
1766            Edit { ei: 0, p: 1, u: 1, d: parse_delta("ac") },
1767            Merge(1,0),
1768            Merge(2,0),
1769            AssertAll("ac".to_owned()),
1770            Edit { ei: 0, p: 1, u: 1, d: parse_delta("-d-") },
1771            Assert(0, "adc".to_owned()),
1772            Edit { ei: 1, p: 1, u: 1, d: parse_delta("-f-") },
1773            Merge(2,1),
1774            Assert(1, "afc".to_owned()),
1775            Assert(2, "afc".to_owned()),
1776            Merge(2,0),
1777            Merge(0,1),
1778            // These two will be different without using session IDs
1779            Assert(2, "adfc".to_owned()),
1780            Assert(0, "adfc".to_owned()),
1781        ];
1782        MergeTestState::new(3).run_script(&script[..]);
1783    }
1784}