Skip to main content

matrix_sdk/event_cache/caches/
event_linked_chunk.rs

1// Copyright 2024 The Matrix.org Foundation C.I.C.
2//
3// Licensed under the Apache License, Version 2.0 (the "License");
4// you may not use this file except in compliance with the License.
5// You may obtain a copy of the License at
6//
7//     http://www.apache.org/licenses/LICENSE-2.0
8//
9// Unless required by applicable law or agreed to in writing, software
10// distributed under the License is distributed on an "AS IS" BASIS,
11// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12// See the License for the specific language governing permissions and
13// limitations under the License.
14
15#[cfg(feature = "e2e-encryption")]
16use std::collections::BTreeSet;
17
18use as_variant::as_variant;
19use eyeball_im::VectorDiff;
20#[cfg(feature = "e2e-encryption")]
21use matrix_sdk_base::deserialized_responses::TimelineEventKind;
22pub use matrix_sdk_base::event_cache::{Event, Gap};
23use matrix_sdk_base::{
24    event_cache::store::DEFAULT_CHUNK_CAPACITY,
25    linked_chunk::{
26        ChunkContent, ChunkIdentifierGenerator, ChunkMetadata, OrderTracker, RawChunk,
27        lazy_loader::{self, LazyLoaderError},
28    },
29};
30use matrix_sdk_common::linked_chunk::{
31    AsVector, Chunk, ChunkIdentifier, Error, Iter, IterBackward, LinkedChunk, ObservableUpdates,
32    Position,
33};
34use tracing::{instrument, trace};
35
36#[cfg(feature = "e2e-encryption")]
37use crate::event_cache::redecryptor::ResolvedUtd;
38
39/// This type represents a linked chunk of events for a single room or thread.
40#[derive(Debug)]
41pub(in crate::event_cache) struct EventLinkedChunk {
42    /// The real in-memory storage for all the events.
43    chunks: LinkedChunk<DEFAULT_CHUNK_CAPACITY, Event, Gap>,
44
45    /// Type mapping [`Update`]s from [`Self::chunks`] to [`VectorDiff`]s.
46    ///
47    /// [`Update`]: matrix_sdk_base::linked_chunk::Update
48    chunks_updates_as_vectordiffs: AsVector<Event, Gap>,
49
50    /// Tracker of the events ordering in this room.
51    pub order_tracker: OrderTracker<Event, Gap>,
52}
53
54impl Default for EventLinkedChunk {
55    fn default() -> Self {
56        Self::new()
57    }
58}
59
60impl EventLinkedChunk {
61    /// Build a new [`EventLinkedChunk`] struct with zero events.
62    pub fn new() -> Self {
63        Self::with_initial_linked_chunk(None, None)
64    }
65
66    /// Build a new [`EventLinkedChunk`] struct with prior chunks knowledge.
67    ///
68    /// The provided [`LinkedChunk`] must have been built with update history.
69    pub fn with_initial_linked_chunk(
70        linked_chunk: Option<LinkedChunk<DEFAULT_CHUNK_CAPACITY, Event, Gap>>,
71        full_linked_chunk_metadata: Option<Vec<ChunkMetadata>>,
72    ) -> Self {
73        let mut linked_chunk = linked_chunk.unwrap_or_else(LinkedChunk::new_with_update_history);
74
75        let chunks_updates_as_vectordiffs = linked_chunk
76            .as_vector()
77            .expect("`LinkedChunk` must have been built with `new_with_update_history`");
78
79        let order_tracker = linked_chunk
80            .order_tracker(full_linked_chunk_metadata)
81            .expect("`LinkedChunk` must have been built with `new_with_update_history`");
82
83        Self { chunks: linked_chunk, chunks_updates_as_vectordiffs, order_tracker }
84    }
85
86    /// Clear all events.
87    ///
88    /// All events, all gaps, everything is dropped, move into the void, into
89    /// the ether, forever.
90    pub fn reset(&mut self) {
91        self.chunks.clear();
92    }
93
94    /// Push events after all events or gaps.
95    ///
96    /// The last event in `events` is the most recent one.
97    #[cfg(test)]
98    pub(in crate::event_cache) fn push_events<I>(&mut self, events: I)
99    where
100        I: IntoIterator<Item = Event>,
101        I::IntoIter: ExactSizeIterator,
102    {
103        self.chunks.push_items_back(events);
104    }
105
106    /// Replace the gap identified by `gap_identifier`, by events.
107    ///
108    /// Because the `gap_identifier` can represent non-gap chunk, this method
109    /// returns a `Result`.
110    ///
111    /// This method returns the position of the (first if many) newly created
112    /// `Chunk` that contains the `items`.
113    #[instrument(err, skip_all, fields(gap_identifier, sentry = true))]
114    fn replace_gap_at(
115        &mut self,
116        gap_identifier: ChunkIdentifier,
117        events: Vec<Event>,
118    ) -> Result<Option<Position>, Error> {
119        // As an optimization, we'll remove the chunk if it's a gap that would be
120        // replaced with no events.
121        //
122        // However, our linked chunk requires that it includes at least one chunk in the
123        // in-memory representation. We could tweak this invariant, but in the
124        // meanwhile, don't remove the gap chunk if it's the only one we know
125        // about.
126        let has_only_one_chunk = {
127            let mut it = self.chunks.chunks();
128
129            // If there's no chunks at all, then we won't be able to find the gap chunk.
130            let _ =
131                it.next().ok_or(Error::InvalidChunkIdentifier { identifier: gap_identifier })?;
132
133            // If there's no next chunk, we can conclude there's only one.
134            it.next().is_none()
135        };
136
137        let next_pos = if events.is_empty() && !has_only_one_chunk {
138            // There are no new events, so there's no need to create a new empty items
139            // chunk; instead, remove the gap.
140            self.chunks.remove_empty_chunk_at(gap_identifier)?
141        } else {
142            // Replace the gap by new events.
143            Some(self.chunks.replace_gap_at(events, gap_identifier)?.first_position())
144        };
145
146        Ok(next_pos)
147    }
148
149    /// Remove some events from the linked chunk.
150    ///
151    /// If a chunk becomes empty, it's going to be removed.
152    #[instrument(err, skip_all, fields(positions, sentry = true))]
153    pub fn remove_events_by_position(&mut self, mut positions: Vec<Position>) -> Result<(), Error> {
154        sort_positions_descending(&mut positions);
155
156        for position in positions {
157            self.chunks.remove_item_at(position)?;
158        }
159
160        Ok(())
161    }
162
163    /// Replace event at a specified position.
164    ///
165    /// `position` must point to a valid item, otherwise the method returns an
166    /// error.
167    #[instrument(err, skip_all, fields(position, sentry = true))]
168    pub fn replace_event_at(&mut self, position: Position, event: Event) -> Result<(), Error> {
169        self.chunks.replace_item_at(position, event)
170    }
171
172    /// Search for a chunk, and return its identifier.
173    pub fn chunk_identifier<'a, P>(&'a self, predicate: P) -> Option<ChunkIdentifier>
174    where
175        P: FnMut(&'a Chunk<DEFAULT_CHUNK_CAPACITY, Event, Gap>) -> bool,
176    {
177        self.chunks.chunk_identifier(predicate)
178    }
179
180    /// Iterate over the chunks, forward.
181    ///
182    /// The oldest chunk comes first.
183    pub fn chunks(&self) -> Iter<'_, DEFAULT_CHUNK_CAPACITY, Event, Gap> {
184        self.chunks.chunks()
185    }
186
187    /// Iterate over the chunks, backward.
188    ///
189    /// The most recent chunk comes first.
190    pub fn rchunks(&self) -> IterBackward<'_, DEFAULT_CHUNK_CAPACITY, Event, Gap> {
191        self.chunks.rchunks()
192    }
193
194    /// Iterate over the events, backward.
195    ///
196    /// The most recent event comes first.
197    pub fn revents(&self) -> impl Iterator<Item = (Position, &Event)> {
198        self.chunks.ritems()
199    }
200
201    /// Iterate over the events, forward.
202    ///
203    /// The oldest event comes first.
204    pub fn events(&self) -> impl Iterator<Item = (Position, &Event)> {
205        self.chunks.items()
206    }
207
208    /// Return the order of an event in the room linked chunk.
209    ///
210    /// Can return `None` if the event can't be found in the linked chunk.
211    pub fn event_order(&self, event_pos: Position) -> Option<usize> {
212        self.order_tracker.ordering(event_pos)
213    }
214
215    #[cfg(any(test, debug_assertions))]
216    #[allow(dead_code)] // Temporarily, until we figure out why it's crashing production builds.
217    fn assert_event_ordering(&self) {
218        let mut iter = self.chunks.items().enumerate();
219        let Some((i, (first_event_pos, _))) = iter.next() else {
220            return;
221        };
222
223        // Sanity check.
224        assert_eq!(i, 0);
225
226        // That's the offset in the full linked chunk. Will be 0 if the linked chunk is
227        // entirely loaded, may be non-zero otherwise.
228        let offset =
229            self.event_order(first_event_pos).expect("first event's ordering must be known");
230
231        for (i, (next_pos, _)) in iter {
232            let next_index =
233                self.event_order(next_pos).expect("next event's ordering must be known");
234            assert_eq!(offset + i, next_index, "event ordering must be continuous");
235        }
236    }
237
238    /// Get all updates from the room events as [`VectorDiff`].
239    ///
240    /// Be careful that each `VectorDiff` is returned only once!
241    ///
242    /// See [`AsVector`] to learn more.
243    pub fn updates_as_vector_diffs(&mut self) -> Vec<VectorDiff<Event>> {
244        let updates = self.chunks_updates_as_vectordiffs.take();
245
246        self.order_tracker.flush_updates(false);
247
248        updates
249    }
250
251    /// Get a mutable reference to the [`LinkedChunk`] updates, aka
252    /// [`ObservableUpdates`] to be consumed by the store.
253    ///
254    /// These updates are expected to be *only* forwarded to storage, as they
255    /// might hide some underlying updates to the in-memory chunk; those
256    /// updates should be reflected with manual updates to
257    /// [`Self::chunks_updates_as_vectordiffs`].
258    pub(in super::super) fn store_updates(&mut self) -> &mut ObservableUpdates<Event, Gap> {
259        self.chunks.updates().expect("this is always built with an update history in the ctor")
260    }
261
262    /// Return a nice debug string (a vector of lines) for the linked chunk of
263    /// events for this room.
264    pub fn debug_string(&self) -> Vec<String> {
265        let mut result = Vec::new();
266
267        for chunk in self.chunks.chunks() {
268            let content =
269                chunk_debug_string(chunk.identifier(), chunk.content(), &self.order_tracker);
270            let lazy_previous = if let Some(cid) = chunk.lazy_previous() {
271                format!(" (lazy previous = {})", cid.index())
272            } else {
273                "".to_owned()
274            };
275            let line = format!("chunk #{}{lazy_previous}: {content}", chunk.identifier().index());
276
277            result.push(line);
278        }
279
280        result
281    }
282
283    /// Return the latest gap, if any.
284    ///
285    /// Latest means "closest to the end", or, since events are ordered
286    /// according to the sync ordering, this means "the most recent one".
287    pub fn rgap(&self) -> Option<Gap> {
288        self.rchunks()
289            .find_map(|chunk| as_variant!(chunk.content(), ChunkContent::Gap(gap) => gap.clone()))
290    }
291
292    /// Add a gap (i.e. pagination token) to the end of the linked chunk.
293    ///
294    /// Also make sure to get rid of empty event chunks before the gap, as they
295    /// wouldn't be useful to keep.
296    pub fn push_gap(&mut self, gap: Gap) {
297        // As a tiny optimization: remove the last chunk if it's an empty event
298        // one, as it's not useful to keep it before a gap.
299        let prev_chunk_to_remove = self.rchunks().next().and_then(|chunk| {
300            (chunk.is_items() && chunk.num_items() == 0).then_some(chunk.identifier())
301        });
302
303        self.chunks.push_gap_back(gap);
304
305        if let Some(prev_chunk_to_remove) = prev_chunk_to_remove {
306            self.chunks
307                .remove_empty_chunk_at(prev_chunk_to_remove)
308                .expect("we just checked the chunk is there, and it's an empty item chunk");
309        }
310    }
311
312    /// Add the previous back-pagination token (if present), followed by the
313    /// timeline events themselves.
314    pub fn push_live_events(&mut self, new_gap: Option<Gap>, events: &[Event]) {
315        if let Some(new_gap) = new_gap {
316            self.push_gap(new_gap);
317        }
318        self.chunks.push_items_back(events.iter().cloned());
319    }
320
321    /// Add events from a backwards pagination for this linked chunk by updating
322    /// the in-memory linked chunk with the results.
323    ///
324    /// ## Arguments
325    ///
326    /// - `prev_gap_id`: the identifier of the previous gap, if any.
327    /// - `new_gap`: the new gap to insert, if any. If missing, we've likely
328    ///   reached the start of the timeline.
329    /// - `events`: new events to insert, in the topological ordering (i.e. from
330    ///   oldest to most recent).
331    ///
332    /// ## Returns
333    ///
334    /// Returns a boolean indicating whether we've hit the start of the
335    /// timeline/linked chunk.
336    pub fn push_backwards_pagination_events(
337        &mut self,
338        prev_gap_id: Option<ChunkIdentifier>,
339        new_gap: Option<Gap>,
340        events: &[Event],
341    ) -> bool {
342        let first_event_pos = self.events().next().map(|(item_pos, _)| item_pos);
343
344        // First, insert events.
345        let insert_new_gap_pos = if let Some(gap_id) = prev_gap_id {
346            // There is a prior gap, let's replace it with the new events!
347            trace!("replacing previous gap with the back-paginated events");
348
349            // Replace the gap with the events we just deduplicated. This might get rid of
350            // the underlying gap, if the conditions are favorable to
351            // us.
352            self.replace_gap_at(gap_id, events.to_vec())
353                .expect("gap_identifier is a valid chunk id we read previously")
354        } else if let Some(pos) = first_event_pos {
355            // No prior gap, but we had some events: assume we need to prepend events
356            // before those.
357            trace!("inserted events before the first known event");
358
359            self.chunks
360                .insert_items_at(pos, events.to_vec())
361                .expect("pos is a valid position we just read above");
362
363            Some(pos)
364        } else {
365            // No prior gap, and no prior events: push the events.
366            trace!("pushing events received from back-pagination");
367
368            self.chunks.push_items_back(events.to_vec());
369
370            // A new gap may be inserted before the new events, if there are any.
371            self.events().next().map(|(item_pos, _)| item_pos)
372        };
373
374        // And insert the new gap if needs be.
375        //
376        // We only do this when at least one new, non-duplicated event, has been added
377        // to the chunk. Otherwise it means we've back-paginated all the
378        // known events.
379        let has_new_gap = new_gap.is_some();
380        if let Some(new_gap) = new_gap {
381            if let Some(new_pos) = insert_new_gap_pos {
382                self.chunks
383                    .insert_gap_at(new_gap, new_pos)
384                    .expect("events_chunk_pos represents a valid chunk position");
385            } else {
386                self.chunks.push_gap_back(new_gap);
387            }
388        }
389
390        // There could be an inconsistency between the network (which thinks we hit the
391        // start of the timeline) and the disk (which has the initial empty
392        // chunks), so tweak the `reached_start` value so that it reflects the
393        // disk state in priority instead.
394
395        let has_gaps = self.chunks().any(|chunk| chunk.is_gap());
396
397        // Whether the first chunk has no predecessors or not.
398        let first_chunk_is_definitive_head =
399            self.chunks().next().map(|chunk| chunk.is_definitive_head());
400
401        let network_reached_start = !has_new_gap;
402        let reached_start =
403            !has_gaps && first_chunk_is_definitive_head.unwrap_or(network_reached_start);
404
405        trace!(
406            ?network_reached_start,
407            ?has_gaps,
408            ?first_chunk_is_definitive_head,
409            ?reached_start,
410            "finished handling network back-pagination"
411        );
412
413        reached_start
414    }
415
416    /// Add events from a forwards paginatino for this linked chunk by updating
417    /// the in-memory linked chunk with the results.
418    ///
419    /// This is similar to [`Self::push_backwards_pagination_events`] but for
420    /// forward pagination where new events are appended at the end.
421    ///
422    /// ## Arguments
423    ///
424    /// - `next_gap_id`: the identifier of the next gap (at the back), if any.
425    /// - `new_gap`: the new gap to insert at the back, if any. If missing,
426    ///   we've likely reached the end of the timeline.
427    /// - `events`: new events to insert, in topological order (oldest to
428    ///   newest).
429    ///
430    /// ## Returns
431    ///
432    /// Returns a boolean indicating whether we've hit the end of the timeline.
433    pub fn push_forwards_pagination_events(
434        &mut self,
435        next_gap_id: Option<ChunkIdentifier>,
436        new_gap: Option<Gap>,
437        events: &[Event],
438    ) -> bool {
439        // First, replace the gap (if any) or append events.
440        if let Some(gap_id) = next_gap_id {
441            // There is a gap at the back, replace it with the new events.
442            trace!("replacing next gap with forward-paginated events");
443
444            self.replace_gap_at(gap_id, events.to_vec())
445                .expect("gap_identifier is a valid chunk id we read previously");
446        } else if !events.is_empty() {
447            // No prior gap, just push the events at the back.
448            trace!("pushing events received from forward-pagination");
449            self.chunks.push_items_back(events.to_vec());
450        }
451
452        // Insert the new gap at the back if needed.
453        let reached_end = new_gap.is_none();
454        if let Some(new_gap) = new_gap {
455            self.chunks.push_gap_back(new_gap);
456        }
457
458        trace!(?reached_end, "finished handling network forward-pagination");
459
460        reached_end
461    }
462
463    /// Find an event in the event linked chunk by its event ID, and return its
464    /// location.
465    #[cfg(feature = "e2e-encryption")]
466    pub fn find_event(&self, event_id: &ruma::EventId) -> Option<(Position, Event)> {
467        for (position, event) in self.revents() {
468            if event.event_id().as_deref() == Some(event_id) {
469                return Some((position, event.clone()));
470            }
471        }
472        None
473    }
474
475    /// Try to locate the events in the linked chunk corresponding to the given
476    /// list of decrypted events, and replace them.
477    ///
478    /// Returns true if at least one event has been replaced, false otherwise.
479    #[cfg(feature = "e2e-encryption")]
480    pub fn replace_utds(&mut self, events: &[ResolvedUtd]) -> bool {
481        let event_set =
482            self.events().filter_map(|(_pos, ev)| ev.event_id()).collect::<BTreeSet<_>>();
483
484        let mut replaced_some = false;
485
486        for (event_id, decrypted, actions) in events {
487            // As a performance optimization, do a lookup in the current pinned events
488            // check, before looking for the event in the linked chunk.
489
490            if !event_set.contains(event_id) {
491                continue;
492            }
493
494            // The event should be in the linked chunk.
495            let Some((position, mut target_event)) = self.find_event(event_id) else {
496                continue;
497            };
498
499            target_event.kind = TimelineEventKind::Decrypted(decrypted.clone());
500
501            if let Some(actions) = actions {
502                target_event.set_push_actions(actions.clone());
503            }
504
505            self.replace_event_at(position, target_event.clone())
506                .expect("position should be valid");
507
508            replaced_some = true;
509        }
510
511        replaced_some
512    }
513
514    /// Return the first chunk as a gap, if it's one.
515    pub fn first_chunk_as_gap(&self) -> Option<(ChunkIdentifier, Gap)> {
516        self.chunks().next().and_then(|chunk| {
517            if let ChunkContent::Gap(gap) = chunk.content() {
518                Some((chunk.identifier(), gap.clone()))
519            } else {
520                None
521            }
522        })
523    }
524
525    /// Return the last chunk as a gap, if it's one.
526    pub fn last_chunk_as_gap(&self) -> Option<(ChunkIdentifier, Gap)> {
527        self.rchunks().next().and_then(|chunk| {
528            if let ChunkContent::Gap(gap) = chunk.content() {
529                Some((chunk.identifier(), gap.clone()))
530            } else {
531                None
532            }
533        })
534    }
535}
536
537// Methods related to lazy-loading.
538impl EventLinkedChunk {
539    /// Inhibits all the linked chunk updates caused by the function `f` on the
540    /// ordering tracker.
541    ///
542    /// Updates to the linked chunk that happen because of lazy loading must not
543    /// be taken into account by the order tracker, otherwise the
544    /// fully-loaded state (tracked by the order tracker) wouldn't match
545    /// reality anymore. This provides a facility to help applying such
546    /// updates.
547    fn inhibit_updates_to_ordering_tracker<F: FnOnce(&mut Self) -> R, R>(&mut self, f: F) -> R {
548        // Start by flushing previous pending updates to the chunk ordering, if any.
549        self.order_tracker.flush_updates(false);
550
551        // Call the function.
552        let r = f(self);
553
554        // Now, flush other pending updates which have been caused by the function, and
555        // ignore them.
556        self.order_tracker.flush_updates(true);
557
558        r
559    }
560
561    /// Replace the events with the given last chunk of events and generator.
562    ///
563    /// Happens only during lazy loading.
564    ///
565    /// This clears all the chunks in memory before resetting to the new chunk,
566    /// if provided.
567    #[instrument(err, skip_all, fields(sentry = true))]
568    pub(in super::super) fn replace_with(
569        &mut self,
570        last_chunk: Option<RawChunk<Event, Gap>>,
571        chunk_identifier_generator: ChunkIdentifierGenerator,
572    ) -> Result<(), LazyLoaderError> {
573        // Since `replace_with` is used only to unload some chunks, we don't want it to
574        // affect the chunk ordering.
575        self.inhibit_updates_to_ordering_tracker(move |this| {
576            lazy_loader::replace_with(&mut this.chunks, last_chunk, chunk_identifier_generator)
577        })
578    }
579
580    /// Prepends a lazily-loaded chunk at the beginning of the linked chunk.
581    #[instrument(err, skip_all, fields(sentry = true))]
582    pub(in super::super) fn insert_new_chunk_as_first(
583        &mut self,
584        raw_new_first_chunk: RawChunk<Event, Gap>,
585    ) -> Result<(), LazyLoaderError> {
586        // This is only used when reinserting a chunk that was in persisted storage, so
587        // we don't need to touch the chunk ordering for this.
588        self.inhibit_updates_to_ordering_tracker(move |this| {
589            lazy_loader::insert_new_first_chunk(&mut this.chunks, raw_new_first_chunk)
590        })
591    }
592}
593
594/// Create a debug string for a [`ChunkContent`] for an event/gap pair.
595fn chunk_debug_string(
596    chunk_id: ChunkIdentifier,
597    content: &ChunkContent<Event, Gap>,
598    order_tracker: &OrderTracker<Event, Gap>,
599) -> String {
600    match content {
601        ChunkContent::Gap(Gap { token: prev_token }) => {
602            format!("gap['{prev_token}']")
603        }
604        ChunkContent::Items(vec) => {
605            let items = vec
606                .iter()
607                .enumerate()
608                .map(|(i, event)| {
609                    event.event_id().map_or_else(
610                        || "<no event id>".to_owned(),
611                        |id| {
612                            let pos = Position::new(chunk_id, i);
613                            let order = format!("#{}: ", order_tracker.ordering(pos).unwrap());
614
615                            // Limit event ids to 8 chars *after* the $.
616                            let event_id = id.as_str().chars().take(1 + 8).collect::<String>();
617
618                            format!("{order}{event_id}")
619                        },
620                    )
621                })
622                .collect::<Vec<_>>()
623                .join(", ");
624
625            format!("events[{items}]")
626        }
627    }
628}
629
630/// Sort positions of events so that events can be removed safely without
631/// messing their position.
632///
633/// Events must be sorted by their position index, from greatest to lowest, so
634/// that all positions remain valid inside the same chunk while they are being
635/// removed. For the sake of debugability, we also sort by position chunk
636/// identifier, but this is not required.
637pub(in super::super) fn sort_positions_descending(positions: &mut [Position]) {
638    positions.sort_by(|a, b| {
639        b.chunk_identifier()
640            .cmp(&a.chunk_identifier())
641            .then_with(|| a.index().cmp(&b.index()).reverse())
642    });
643}
644
645#[cfg(test)]
646mod tests {
647    use assert_matches::assert_matches;
648    use assert_matches2::assert_let;
649    use matrix_sdk_test::{ALICE, DEFAULT_TEST_ROOM_ID, event_factory::EventFactory};
650    use ruma::{EventId, OwnedEventId, event_id, user_id};
651
652    use super::*;
653
654    macro_rules! assert_events_eq {
655        ( $events_iterator:expr, [ $( ( $event_id:ident at ( $chunk_identifier:literal, $index:literal ) ) ),* $(,)? ] ) => {
656            {
657                let mut events = $events_iterator;
658
659                $(
660                    assert_let!(Some((position, event)) = events.next());
661                    assert_eq!(position.chunk_identifier(), $chunk_identifier );
662                    assert_eq!(position.index(), $index );
663                    assert_eq!(event.event_id().unwrap(), $event_id );
664                )*
665
666                assert!(events.next().is_none(), "No more events are expected");
667            }
668        };
669    }
670
671    fn new_event(event_id: &str) -> (OwnedEventId, Event) {
672        let event_id = EventId::parse(event_id).unwrap();
673        let event = EventFactory::new()
674            .text_msg("")
675            .sender(user_id!("@mnt_io:matrix.org"))
676            .event_id(&event_id)
677            .into_event();
678
679        (event_id, event)
680    }
681
682    #[test]
683    fn test_new_event_linked_chunk_has_zero_events() {
684        let linked_chunk = EventLinkedChunk::new();
685
686        assert_eq!(linked_chunk.events().count(), 0);
687    }
688
689    #[test]
690    fn test_replace_gap_at() {
691        let (event_id_0, event_0) = new_event("$ev0");
692        let (event_id_1, event_1) = new_event("$ev1");
693        let (event_id_2, event_2) = new_event("$ev2");
694
695        let mut linked_chunk = EventLinkedChunk::new();
696
697        linked_chunk.chunks.push_items_back([event_0]);
698        linked_chunk.chunks.push_gap_back(Gap { token: "hello".to_owned() });
699
700        let gap_chunk_id = linked_chunk
701            .chunks()
702            .find_map(|chunk| chunk.is_gap().then_some(chunk.identifier()))
703            .unwrap();
704
705        linked_chunk.replace_gap_at(gap_chunk_id, vec![event_1, event_2]).unwrap();
706
707        assert_events_eq!(
708            linked_chunk.events(),
709            [
710                (event_id_0 at (0, 0)),
711                (event_id_1 at (2, 0)),
712                (event_id_2 at (2, 1)),
713            ]
714        );
715
716        {
717            let mut chunks = linked_chunk.chunks();
718
719            assert_let!(Some(chunk) = chunks.next());
720            assert!(chunk.is_items());
721
722            assert_let!(Some(chunk) = chunks.next());
723            assert!(chunk.is_items());
724
725            assert!(chunks.next().is_none());
726        }
727    }
728
729    #[test]
730    fn test_replace_gap_at_with_no_new_events() {
731        let (_, event_0) = new_event("$ev0");
732        let (_, event_1) = new_event("$ev1");
733        let (_, event_2) = new_event("$ev2");
734
735        let mut linked_chunk = EventLinkedChunk::new();
736
737        linked_chunk.chunks.push_items_back([event_0, event_1]);
738        linked_chunk.chunks.push_gap_back(Gap { token: "middle".to_owned() });
739        linked_chunk.chunks.push_items_back([event_2]);
740        linked_chunk.chunks.push_gap_back(Gap { token: "end".to_owned() });
741
742        // Remove the first gap.
743        let first_gap_id = linked_chunk
744            .chunks()
745            .find_map(|chunk| chunk.is_gap().then_some(chunk.identifier()))
746            .unwrap();
747
748        // The next insert position is the next chunk's start.
749        let pos = linked_chunk.replace_gap_at(first_gap_id, vec![]).unwrap();
750        assert_eq!(pos, Some(Position::new(ChunkIdentifier::new(2), 0)));
751
752        // Remove the second gap.
753        let second_gap_id = linked_chunk
754            .chunks()
755            .find_map(|chunk| chunk.is_gap().then_some(chunk.identifier()))
756            .unwrap();
757
758        // No next insert position.
759        let pos = linked_chunk.replace_gap_at(second_gap_id, vec![]).unwrap();
760        assert!(pos.is_none());
761    }
762
763    #[test]
764    fn test_remove_events() {
765        let (event_id_0, event_0) = new_event("$ev0");
766        let (event_id_1, event_1) = new_event("$ev1");
767        let (event_id_2, event_2) = new_event("$ev2");
768        let (event_id_3, event_3) = new_event("$ev3");
769
770        // Push some events.
771        let mut linked_chunk = EventLinkedChunk::new();
772        linked_chunk.chunks.push_items_back([event_0, event_1]);
773        linked_chunk.chunks.push_gap_back(Gap { token: "hello".to_owned() });
774        linked_chunk.chunks.push_items_back([event_2, event_3]);
775
776        assert_events_eq!(
777            linked_chunk.events(),
778            [
779                (event_id_0 at (0, 0)),
780                (event_id_1 at (0, 1)),
781                (event_id_2 at (2, 0)),
782                (event_id_3 at (2, 1)),
783            ]
784        );
785        assert_eq!(linked_chunk.chunks().count(), 3);
786
787        // Remove some events.
788        linked_chunk
789            .remove_events_by_position(vec![
790                Position::new(ChunkIdentifier::new(2), 1),
791                Position::new(ChunkIdentifier::new(0), 1),
792            ])
793            .unwrap();
794
795        assert_events_eq!(
796            linked_chunk.events(),
797            [
798                (event_id_0 at (0, 0)),
799                (event_id_2 at (2, 0)),
800            ]
801        );
802
803        // Ensure chunks are removed once empty.
804        linked_chunk
805            .remove_events_by_position(vec![Position::new(ChunkIdentifier::new(2), 0)])
806            .unwrap();
807
808        assert_events_eq!(
809            linked_chunk.events(),
810            [
811                (event_id_0 at (0, 0)),
812            ]
813        );
814        assert_eq!(linked_chunk.chunks().count(), 2);
815    }
816
817    #[test]
818    fn test_remove_events_unknown_event() {
819        // Push ZERO event.
820        let mut linked_chunk = EventLinkedChunk::new();
821
822        assert_events_eq!(linked_chunk.events(), []);
823
824        // Remove one undefined event.
825        // An error is expected.
826        linked_chunk
827            .remove_events_by_position(vec![Position::new(ChunkIdentifier::new(42), 153)])
828            .unwrap_err();
829
830        assert_events_eq!(linked_chunk.events(), []);
831
832        let mut events = linked_chunk.events();
833        assert!(events.next().is_none());
834    }
835
836    #[test]
837    fn test_reset() {
838        let (event_id_0, event_0) = new_event("$ev0");
839        let (event_id_1, event_1) = new_event("$ev1");
840        let (event_id_2, event_2) = new_event("$ev2");
841        let (event_id_3, event_3) = new_event("$ev3");
842
843        // Push some events.
844        let mut linked_chunk = EventLinkedChunk::new();
845        linked_chunk.chunks.push_items_back([event_0, event_1]);
846        linked_chunk.chunks.push_gap_back(Gap { token: "raclette".to_owned() });
847        linked_chunk.chunks.push_items_back([event_2]);
848
849        // Read the updates as `VectorDiff`.
850        let diffs = linked_chunk.updates_as_vector_diffs();
851
852        assert_eq!(diffs.len(), 2);
853
854        assert_matches!(
855            &diffs[0],
856            VectorDiff::Append { values } => {
857                assert_eq!(values.len(), 2);
858                assert_eq!(values[0].event_id(), Some(event_id_0));
859                assert_eq!(values[1].event_id(), Some(event_id_1));
860            }
861        );
862        assert_matches!(
863            &diffs[1],
864            VectorDiff::Append { values } => {
865                assert_eq!(values.len(), 1);
866                assert_eq!(values[0].event_id(), Some(event_id_2));
867            }
868        );
869
870        // Now we can reset and see what happens.
871        linked_chunk.reset();
872        linked_chunk.chunks.push_items_back([event_3]);
873
874        // Read the updates as `VectorDiff`.
875        let diffs = linked_chunk.updates_as_vector_diffs();
876
877        assert_eq!(diffs.len(), 2);
878
879        assert_matches!(&diffs[0], VectorDiff::Clear);
880        assert_matches!(
881            &diffs[1],
882            VectorDiff::Append { values } => {
883                assert_eq!(values.len(), 1);
884                assert_eq!(values[0].event_id(), Some(event_id_3));
885            }
886        );
887    }
888
889    #[test]
890    fn test_debug_string() {
891        let event_factory = EventFactory::new().room(&DEFAULT_TEST_ROOM_ID).sender(*ALICE);
892
893        let mut linked_chunk = EventLinkedChunk::new();
894        linked_chunk.chunks.push_items_back(vec![
895            event_factory
896                .text_msg("hey")
897                .event_id(event_id!("$123456789101112131415617181920"))
898                .into_event(),
899            event_factory.text_msg("you").event_id(event_id!("$2")).into_event(),
900        ]);
901        linked_chunk.chunks.push_gap_back(Gap { token: "raclette".to_owned() });
902
903        // Flush updates to the order tracker.
904        let _ = linked_chunk.updates_as_vector_diffs();
905
906        let output = linked_chunk.debug_string();
907
908        assert_eq!(output.len(), 2);
909        assert_eq!(&output[0], "chunk #0: events[#0: $12345678, #1: $2]");
910        assert_eq!(&output[1], "chunk #1: gap['raclette']");
911    }
912
913    #[test]
914    fn test_sort_positions_descending() {
915        let mut positions = vec![
916            Position::new(ChunkIdentifier::new(2), 1),
917            Position::new(ChunkIdentifier::new(1), 0),
918            Position::new(ChunkIdentifier::new(2), 0),
919            Position::new(ChunkIdentifier::new(1), 1),
920            Position::new(ChunkIdentifier::new(0), 0),
921        ];
922
923        sort_positions_descending(&mut positions);
924
925        assert_eq!(
926            positions,
927            &[
928                Position::new(ChunkIdentifier::new(2), 1),
929                Position::new(ChunkIdentifier::new(2), 0),
930                Position::new(ChunkIdentifier::new(1), 1),
931                Position::new(ChunkIdentifier::new(1), 0),
932                Position::new(ChunkIdentifier::new(0), 0),
933            ]
934        );
935    }
936}