1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
mod cell; pub use cell::*;
mod context; pub use context::*;
mod counter; pub use counter::*;
mod dict; pub use dict::*;
mod sequence; pub use sequence::*;
mod time; pub use time::*;
mod util; pub(crate) use util::*;

#[cfg(test)] mod test;

pub trait Mergable: Sized {
	type Diff: Diff;
	type Seq: Ord;

	fn merge(&mut self, other: Self) {
		self.apply(Mergable::diff(&other, self))
			.expect("Implementation error")
	}

	/** Produce a diff.
	 *
	 * The result will contain all of the information in `self` that isn't present in `that`.
	 */
	fn diff(&self, that: &Self) -> Self::Diff;

	fn apply(&mut self, diff: Self::Diff) -> Result<(), ApplyError>;

	/** Clean up history.
	 *
	 * WARNING: This is easy to use incorrectly and doing so may cause unexpected results.
	*
	* This allows optimizing the internal representation of `Mergable` objects by trimming history that is no longer necessary. For example `Dict` objects remember deleted entires so that merging with a version that still has a deleted entry won't cause it to reappear. `clean`ing allows them to eventually forget about deleted entries to avoid growing in size forever even if the live set is not. This method talks a lot about order and it is important to understand that this is talking about `Sequence` order which may not be an absolute ordering across clients. For example the default `TimeSequenceFactory` relies on the client's clock so may be unreliable. When choosing a `cutoff` for `clean` you must factor this into account. This is usually done by providing a generous margin then at some point rejecting stale changes.
	*
	* This method will recursively clean up any `Mergable` children of the object in the same sense that `diff` and `merge` occur recursively.
	*
	* Note that merging objects that haven't been `clean`ed to `cutoff` into an object that has is safe but may re-introduce some of the history.
	*
	* There are two requirements:
	* 1. No new changes from sessions started before `cutoff` will ever be seen.
	* 	- For example a client that has been working offline since before `cutoff` should not be allowed to sync their changes into the network when they come back online. Either `clean` calls must be held until the client returns or the changes should be dropped.
	* 2. This object will never be synced to an object that hasn't been synced to all of the changes before `cutoff` that this object will be seen.
	* 	- For example if a client has been offline since before `cutoff` this object should not be merged into that client's object.
	*
	* This method must still provide the following guarantees, even in the face of "inaccurate" `cuttoff` values.
	* 1. It must not put the object in an invalid state, crash or cause undefined behaviour.
	* 2. It must still converge.
	*
	* However all other forms of "reasonable" behaviour may be violated. For example a counter object may provide any value instead of accurately counting some set of events.
	 */
	fn clean(&mut self, cutoff: &Self::Seq);
}

pub trait Diff: Sized {
	fn is_empty(&self) -> bool;
	fn revert(self) -> Result<Self, RevertError>;
}

#[derive(Debug,thiserror::Error)]
#[non_exhaustive]
pub enum ApplyError {
	#[error("Apply target missing critical information: {0:?}")]
	Missing(String),
}

#[derive(Debug,thiserror::Error)]
#[non_exhaustive]
pub enum RevertError {
	#[error("Cannot revert, value has been undone and redone too many times.")]
	TooManyRedos,
	#[error("Apply target missing critical information: {0:?}")]
	Mismatch(String),
}