differential_dataflow/trace/mod.rs
1//! Traits and datastructures representing a collection trace.
2//!
3//! A collection trace is a set of updates of the form `(key, val, time, diff)`, which determine the contents
4//! of a collection at given times by accumulating updates whose time field is less or equal to the target field.
5//!
6//! The `Trace` trait describes those types and methods that a data structure must implement to be viewed as a
7//! collection trace. This trait allows operator implementations to be generic with respect to the type of trace,
8//! and allows various data structures to be interpretable as multiple different types of trace.
9
10pub mod cursor;
11pub mod description;
12pub mod implementations;
13pub mod wrappers;
14
15use timely::progress::{Antichain, frontier::AntichainRef};
16use timely::progress::Timestamp;
17
18use crate::logging::Logger;
19pub use self::cursor::Cursor;
20pub use self::description::Description;
21
22use crate::trace::implementations::LayoutExt;
23
24/// A type used to express how much effort a trace should exert even in the absence of updates.
25pub type ExertionLogic = std::sync::Arc<dyn for<'a> Fn(&'a [(usize, usize, usize)])->Option<usize>+Send+Sync>;
26
27// The traces and batch and cursors want the flexibility to appear as if they manage certain types of keys and
28// values and such, while perhaps using other representations, I'm thinking mostly of wrappers around the keys
29// and vals that change the `Ord` implementation, or stash hash codes, or the like.
30//
31// This complicates what requirements we make so that the trace is still usable by someone who knows only about
32// the base key and value types. For example, the complex types should likely dereference to the simpler types,
33// so that the user can make sense of the result as if they were given references to the simpler types. At the
34// same time, the collection should be formable from base types (perhaps we need an `Into` or `From` constraint)
35// and we should, somehow, be able to take a reference to the simple types to compare against the more complex
36// types. This second one is also like an `Into` or `From` constraint, except that we start with a reference and
37// really don't need anything more complex than a reference, but we can't form an owned copy of the complex type
38// without cloning it.
39//
40// We could just start by cloning things. Worry about wrapping references later on.
41
42/// A trace whose contents may be read.
43///
44/// This is a restricted interface to the more general `Trace` trait, which extends this trait with further methods
45/// to update the contents of the trace. These methods are used to examine the contents, and to update the reader's
46/// capabilities (which may release restrictions on the mutations to the underlying trace and cause work to happen).
47pub trait TraceReader : LayoutExt {
48
49 /// The type of an immutable collection of updates.
50 type Batch:
51 'static +
52 Clone +
53 BatchReader +
54 WithLayout<Layout = Self::Layout> +
55 for<'a> LayoutExt<
56 Key<'a> = Self::Key<'a>,
57 Val<'a> = Self::Val<'a>,
58 ValOwn = Self::ValOwn,
59 Time = Self::Time,
60 TimeGat<'a> = Self::TimeGat<'a>,
61 Diff = Self::Diff,
62 DiffGat<'a> = Self::DiffGat<'a>,
63 KeyContainer = Self::KeyContainer,
64 ValContainer = Self::ValContainer,
65 TimeContainer = Self::TimeContainer,
66 DiffContainer = Self::DiffContainer,
67 >;
68
69
70 /// Storage type for `Self::Cursor`. Likely related to `Self::Batch`.
71 type Storage;
72
73 /// The type used to enumerate the collections contents.
74 type Cursor:
75 Cursor<Storage=Self::Storage> +
76 WithLayout<Layout = Self::Layout> +
77 for<'a> LayoutExt<
78 Key<'a> = Self::Key<'a>,
79 Val<'a> = Self::Val<'a>,
80 ValOwn = Self::ValOwn,
81 Time = Self::Time,
82 TimeGat<'a> = Self::TimeGat<'a>,
83 Diff = Self::Diff,
84 DiffGat<'a> = Self::DiffGat<'a>,
85 KeyContainer = Self::KeyContainer,
86 ValContainer = Self::ValContainer,
87 TimeContainer = Self::TimeContainer,
88 DiffContainer = Self::DiffContainer,
89 >;
90
91
92 /// Provides a cursor over updates contained in the trace.
93 fn cursor(&mut self) -> (Self::Cursor, Self::Storage) {
94 if let Some(cursor) = self.cursor_through(Antichain::new().borrow()) {
95 cursor
96 }
97 else {
98 panic!("unable to acquire complete cursor for trace; is it closed?");
99 }
100 }
101
102 /// Acquires a cursor to the restriction of the collection's contents to updates at times not greater or
103 /// equal to an element of `upper`.
104 ///
105 /// This method is expected to work if called with an `upper` that (i) was an observed bound in batches from
106 /// the trace, and (ii) the trace has not been advanced beyond `upper`. Practically, the implementation should
107 /// be expected to look for a "clean cut" using `upper`, and if it finds such a cut can return a cursor. This
108 /// should allow `upper` such as `&[]` as used by `self.cursor()`, though it is difficult to imagine other uses.
109 fn cursor_through(&mut self, upper: AntichainRef<Self::Time>) -> Option<(Self::Cursor, Self::Storage)>;
110
111 /// Advances the frontier that constrains logical compaction.
112 ///
113 /// Logical compaction is the ability of the trace to change the times of the updates it contains.
114 /// Update times may be changed as long as their comparison to all query times beyond the logical compaction
115 /// frontier remains unchanged. Practically, this means that groups of timestamps not beyond the frontier can
116 /// be coalesced into fewer representative times.
117 ///
118 /// Logical compaction is important, as it allows the trace to forget historical distinctions between update
119 /// times, and maintain a compact memory footprint over an unbounded update history.
120 ///
121 /// By advancing the logical compaction frontier, the caller unblocks merging of otherwise equivalent updates,
122 /// but loses the ability to observe historical detail that is not beyond `frontier`.
123 ///
124 /// It is an error to call this method with a frontier not equal to or beyond the most recent arguments to
125 /// this method, or the initial value of `get_logical_compaction()` if this method has not yet been called.
126 fn set_logical_compaction(&mut self, frontier: AntichainRef<Self::Time>);
127
128 /// Reports the logical compaction frontier.
129 ///
130 /// All update times beyond this frontier will be presented with their original times, and all update times
131 /// not beyond this frontier will present as a time that compares identically with all query times beyond
132 /// this frontier. Practically, update times not beyond this frontier should not be taken to be accurate as
133 /// presented, and should be used carefully, only in accumulation to times that are beyond the frontier.
134 fn get_logical_compaction(&mut self) -> AntichainRef<'_, Self::Time>;
135
136 /// Advances the frontier that constrains physical compaction.
137 ///
138 /// Physical compaction is the ability of the trace to merge the batches of updates it maintains. Physical
139 /// compaction does not change the updates or their timestamps, although it is also the moment at which
140 /// logical compaction is most likely to happen.
141 ///
142 /// Physical compaction allows the trace to maintain a logarithmic number of batches of updates, which is
143 /// what allows the trace to provide efficient random access by keys and values.
144 ///
145 /// By advancing the physical compaction frontier, the caller unblocks the merging of batches of updates,
146 /// but loses the ability to create a cursor through any frontier not beyond `frontier`.
147 ///
148 /// It is an error to call this method with a frontier not equal to or beyond the most recent arguments to
149 /// this method, or the initial value of `get_physical_compaction()` if this method has not yet been called.
150 fn set_physical_compaction(&mut self, frontier: AntichainRef<'_, Self::Time>);
151
152 /// Reports the physical compaction frontier.
153 ///
154 /// All batches containing updates beyond this frontier will not be merged with other batches. This allows
155 /// the caller to create a cursor through any frontier beyond the physical compaction frontier, with the
156 /// `cursor_through()` method. This functionality is primarily of interest to the `join` operator, and any
157 /// other operators who need to take notice of the physical structure of update batches.
158 fn get_physical_compaction(&mut self) -> AntichainRef<'_, Self::Time>;
159
160 /// Maps logic across the non-empty sequence of batches in the trace.
161 ///
162 /// This is currently used only to extract historical data to prime late-starting operators who want to reproduce
163 /// the stream of batches moving past the trace. It could also be a fine basis for a default implementation of the
164 /// cursor methods, as they (by default) just move through batches accumulating cursors into a cursor list.
165 fn map_batches<F: FnMut(&Self::Batch)>(&self, f: F);
166
167 /// Reads the upper frontier of committed times.
168 ///
169 ///
170 #[inline]
171 fn read_upper(&mut self, target: &mut Antichain<Self::Time>) {
172 target.clear();
173 target.insert(<Self::Time as timely::progress::Timestamp>::minimum());
174 self.map_batches(|batch| {
175 target.clone_from(batch.upper());
176 });
177 }
178
179 /// Advances `upper` by any empty batches.
180 ///
181 /// An empty batch whose `batch.lower` bound equals the current
182 /// contents of `upper` will advance `upper` to `batch.upper`.
183 /// Taken across all batches, this should advance `upper` across
184 /// empty batch regions.
185 fn advance_upper(&mut self, upper: &mut Antichain<Self::Time>) {
186 self.map_batches(|batch| {
187 if batch.is_empty() && batch.lower() == upper {
188 upper.clone_from(batch.upper());
189 }
190 });
191 }
192
193}
194
195/// An append-only collection of `(key, val, time, diff)` tuples.
196///
197/// The trace must pretend to look like a collection of `(Key, Val, Time, isize)` tuples, but is permitted
198/// to introduce new types `KeyRef`, `ValRef`, and `TimeRef` which can be dereference to the types above.
199///
200/// The trace must be constructable from, and navigable by the `Key`, `Val`, `Time` types, but does not need
201/// to return them.
202pub trait Trace : TraceReader<Batch: Batch> {
203
204 /// Allocates a new empty trace.
205 fn new(
206 info: ::timely::dataflow::operators::generic::OperatorInfo,
207 logging: Option<crate::logging::Logger>,
208 activator: Option<timely::scheduling::activate::Activator>,
209 ) -> Self;
210
211 /// Exert merge effort, even without updates.
212 fn exert(&mut self);
213
214 /// Sets the logic for exertion in the absence of updates.
215 ///
216 /// The function receives an iterator over batch levels, from large to small, as triples `(level, count, length)`,
217 /// indicating the level, the number of batches, and their total length in updates. It should return a number of
218 /// updates to perform, or `None` if no work is required.
219 fn set_exert_logic(&mut self, logic: ExertionLogic);
220
221 /// Introduces a batch of updates to the trace.
222 ///
223 /// Batches describe the time intervals they contain, and they should be added to the trace in contiguous
224 /// intervals. If a batch arrives with a lower bound that does not equal the upper bound of the most recent
225 /// addition, the trace will add an empty batch. It is an error to then try to populate that region of time.
226 ///
227 /// This restriction could be relaxed, especially if we discover ways in which batch interval order could
228 /// commute. For now, the trace should complain, to the extent that it cares about contiguous intervals.
229 fn insert(&mut self, batch: Self::Batch);
230
231 /// Introduces an empty batch concluding the trace.
232 ///
233 /// This method should be logically equivalent to introducing an empty batch whose lower frontier equals
234 /// the upper frontier of the most recently introduced batch, and whose upper frontier is empty.
235 fn close(&mut self);
236}
237
238use crate::trace::implementations::WithLayout;
239
240/// A batch of updates whose contents may be read.
241///
242/// This is a restricted interface to batches of updates, which support the reading of the batch's contents,
243/// but do not expose ways to construct the batches. This trait is appropriate for views of the batch, and is
244/// especially useful for views derived from other sources in ways that prevent the construction of batches
245/// from the type of data in the view (for example, filtered views, or views with extended time coordinates).
246pub trait BatchReader : LayoutExt + Sized {
247
248 /// The type used to enumerate the batch's contents.
249 type Cursor:
250 Cursor<Storage=Self> +
251 WithLayout<Layout = Self::Layout> +
252 for<'a> LayoutExt<
253 Key<'a> = Self::Key<'a>,
254 Val<'a> = Self::Val<'a>,
255 ValOwn = Self::ValOwn,
256 Time = Self::Time,
257 TimeGat<'a> = Self::TimeGat<'a>,
258 Diff = Self::Diff,
259 DiffGat<'a> = Self::DiffGat<'a>,
260 KeyContainer = Self::KeyContainer,
261 ValContainer = Self::ValContainer,
262 TimeContainer = Self::TimeContainer,
263 DiffContainer = Self::DiffContainer,
264 >;
265
266 /// Acquires a cursor to the batch's contents.
267 fn cursor(&self) -> Self::Cursor;
268 /// The number of updates in the batch.
269 fn len(&self) -> usize;
270 /// True if the batch is empty.
271 fn is_empty(&self) -> bool { self.len() == 0 }
272 /// Describes the times of the updates in the batch.
273 fn description(&self) -> &Description<Self::Time>;
274
275 /// All times in the batch are greater or equal to an element of `lower`.
276 fn lower(&self) -> &Antichain<Self::Time> { self.description().lower() }
277 /// All times in the batch are not greater or equal to any element of `upper`.
278 fn upper(&self) -> &Antichain<Self::Time> { self.description().upper() }
279}
280
281/// An immutable collection of updates.
282pub trait Batch : BatchReader + Sized {
283 /// A type used to progressively merge batches.
284 type Merger: Merger<Self>;
285
286 /// Initiates the merging of consecutive batches.
287 ///
288 /// The result of this method can be exercised to eventually produce the same result
289 /// that a call to `self.merge(other)` would produce, but it can be done in a measured
290 /// fashion. This can help to avoid latency spikes where a large merge needs to happen.
291 fn begin_merge(&self, other: &Self, compaction_frontier: AntichainRef<Self::Time>) -> Self::Merger {
292 Self::Merger::new(self, other, compaction_frontier)
293 }
294
295 /// Produce an empty batch over the indicated interval.
296 fn empty(lower: Antichain<Self::Time>, upper: Antichain<Self::Time>) -> Self;
297}
298
299/// Functionality for collecting and batching updates.
300pub trait Batcher {
301 /// Type pushed into the batcher.
302 type Input;
303 /// Type produced by the batcher.
304 type Output;
305 /// Times at which batches are formed.
306 type Time: Timestamp;
307 /// Allocates a new empty batcher.
308 fn new(logger: Option<Logger>, operator_id: usize) -> Self;
309 /// Adds an unordered container of elements to the batcher.
310 fn push_container(&mut self, batch: &mut Self::Input);
311 /// Returns all updates not greater or equal to an element of `upper`.
312 fn seal<B: Builder<Input=Self::Output, Time=Self::Time>>(&mut self, upper: Antichain<Self::Time>) -> B::Output;
313 /// Returns the lower envelope of contained update times.
314 fn frontier(&mut self) -> AntichainRef<'_, Self::Time>;
315}
316
317/// Functionality for building batches from ordered update sequences.
318pub trait Builder: Sized {
319 /// Input item type.
320 type Input;
321 /// Timestamp type.
322 type Time: Timestamp;
323 /// Output batch type.
324 type Output;
325
326 /// Allocates an empty builder.
327 ///
328 /// Ideally we deprecate this and insist all non-trivial building happens via `with_capacity()`.
329 // #[deprecated]
330 fn new() -> Self { Self::with_capacity(0, 0, 0) }
331 /// Allocates an empty builder with capacity for the specified keys, values, and updates.
332 ///
333 /// They represent respectively the number of distinct `key`, `(key, val)`, and total updates.
334 fn with_capacity(keys: usize, vals: usize, upds: usize) -> Self;
335 /// Adds a chunk of elements to the batch.
336 ///
337 /// Adds all elements from `chunk` to the builder and leaves `chunk` in an undefined state.
338 fn push(&mut self, chunk: &mut Self::Input);
339 /// Completes building and returns the batch.
340 fn done(self, description: Description<Self::Time>) -> Self::Output;
341
342 /// Builds a batch from a chain of updates corresponding to the indicated lower and upper bounds.
343 ///
344 /// This method relies on the chain only containing updates greater or equal to the lower frontier,
345 /// and not greater or equal to the upper frontier, as encoded in the description. Chains must also
346 /// be sorted and consolidated.
347 fn seal(chain: &mut Vec<Self::Input>, description: Description<Self::Time>) -> Self::Output;
348}
349
350/// Represents a merge in progress.
351pub trait Merger<Output: Batch> {
352 /// Creates a new merger to merge the supplied batches, optionally compacting
353 /// up to the supplied frontier.
354 fn new(source1: &Output, source2: &Output, compaction_frontier: AntichainRef<Output::Time>) -> Self;
355 /// Perform some amount of work, decrementing `fuel`.
356 ///
357 /// If `fuel` is non-zero after the call, the merging is complete and
358 /// one should call `done` to extract the merged results.
359 fn work(&mut self, source1: &Output, source2: &Output, fuel: &mut isize);
360 /// Extracts merged results.
361 ///
362 /// This method should only be called after `work` has been called and
363 /// has not brought `fuel` to zero. Otherwise, the merge is still in
364 /// progress.
365 fn done(self) -> Output;
366}
367
368
369/// Blanket implementations for reference counted batches.
370pub mod rc_blanket_impls {
371
372 use std::rc::Rc;
373
374 use timely::progress::{Antichain, frontier::AntichainRef};
375 use super::{Batch, BatchReader, Builder, Merger, Cursor, Description};
376
377 impl<B: BatchReader> WithLayout for Rc<B> {
378 type Layout = B::Layout;
379 }
380
381 impl<B: BatchReader> BatchReader for Rc<B> {
382
383 /// The type used to enumerate the batch's contents.
384 type Cursor = RcBatchCursor<B::Cursor>;
385 /// Acquires a cursor to the batch's contents.
386 fn cursor(&self) -> Self::Cursor {
387 RcBatchCursor::new((**self).cursor())
388 }
389
390 /// The number of updates in the batch.
391 fn len(&self) -> usize { (**self).len() }
392 /// Describes the times of the updates in the batch.
393 fn description(&self) -> &Description<Self::Time> { (**self).description() }
394 }
395
396 /// Wrapper to provide cursor to nested scope.
397 pub struct RcBatchCursor<C> {
398 cursor: C,
399 }
400
401 use crate::trace::implementations::WithLayout;
402 impl<C: Cursor> WithLayout for RcBatchCursor<C> {
403 type Layout = C::Layout;
404 }
405
406 impl<C> RcBatchCursor<C> {
407 fn new(cursor: C) -> Self {
408 RcBatchCursor {
409 cursor,
410 }
411 }
412 }
413
414 impl<C: Cursor> Cursor for RcBatchCursor<C> {
415
416 type Storage = Rc<C::Storage>;
417
418 #[inline] fn key_valid(&self, storage: &Self::Storage) -> bool { self.cursor.key_valid(storage) }
419 #[inline] fn val_valid(&self, storage: &Self::Storage) -> bool { self.cursor.val_valid(storage) }
420
421 #[inline] fn key<'a>(&self, storage: &'a Self::Storage) -> Self::Key<'a> { self.cursor.key(storage) }
422 #[inline] fn val<'a>(&self, storage: &'a Self::Storage) -> Self::Val<'a> { self.cursor.val(storage) }
423
424 #[inline] fn get_key<'a>(&self, storage: &'a Self::Storage) -> Option<Self::Key<'a>> { self.cursor.get_key(storage) }
425 #[inline] fn get_val<'a>(&self, storage: &'a Self::Storage) -> Option<Self::Val<'a>> { self.cursor.get_val(storage) }
426
427 #[inline]
428 fn map_times<L: FnMut(Self::TimeGat<'_>, Self::DiffGat<'_>)>(&mut self, storage: &Self::Storage, logic: L) {
429 self.cursor.map_times(storage, logic)
430 }
431
432 #[inline] fn step_key(&mut self, storage: &Self::Storage) { self.cursor.step_key(storage) }
433 #[inline] fn seek_key(&mut self, storage: &Self::Storage, key: Self::Key<'_>) { self.cursor.seek_key(storage, key) }
434
435 #[inline] fn step_val(&mut self, storage: &Self::Storage) { self.cursor.step_val(storage) }
436 #[inline] fn seek_val(&mut self, storage: &Self::Storage, val: Self::Val<'_>) { self.cursor.seek_val(storage, val) }
437
438 #[inline] fn rewind_keys(&mut self, storage: &Self::Storage) { self.cursor.rewind_keys(storage) }
439 #[inline] fn rewind_vals(&mut self, storage: &Self::Storage) { self.cursor.rewind_vals(storage) }
440 }
441
442 /// An immutable collection of updates.
443 impl<B: Batch> Batch for Rc<B> {
444 type Merger = RcMerger<B>;
445 fn empty(lower: Antichain<Self::Time>, upper: Antichain<Self::Time>) -> Self {
446 Rc::new(B::empty(lower, upper))
447 }
448 }
449
450 /// Wrapper type for building reference counted batches.
451 pub struct RcBuilder<B: Builder> { builder: B }
452
453 /// Functionality for building batches from ordered update sequences.
454 impl<B: Builder> Builder for RcBuilder<B> {
455 type Input = B::Input;
456 type Time = B::Time;
457 type Output = Rc<B::Output>;
458 fn with_capacity(keys: usize, vals: usize, upds: usize) -> Self { RcBuilder { builder: B::with_capacity(keys, vals, upds) } }
459 fn push(&mut self, input: &mut Self::Input) { self.builder.push(input) }
460 fn done(self, description: Description<Self::Time>) -> Rc<B::Output> { Rc::new(self.builder.done(description)) }
461 fn seal(chain: &mut Vec<Self::Input>, description: Description<Self::Time>) -> Self::Output {
462 Rc::new(B::seal(chain, description))
463 }
464 }
465
466 /// Wrapper type for merging reference counted batches.
467 pub struct RcMerger<B:Batch> { merger: B::Merger }
468
469 /// Represents a merge in progress.
470 impl<B:Batch> Merger<Rc<B>> for RcMerger<B> {
471 fn new(source1: &Rc<B>, source2: &Rc<B>, compaction_frontier: AntichainRef<B::Time>) -> Self { RcMerger { merger: B::begin_merge(source1, source2, compaction_frontier) } }
472 fn work(&mut self, source1: &Rc<B>, source2: &Rc<B>, fuel: &mut isize) { self.merger.work(source1, source2, fuel) }
473 fn done(self) -> Rc<B> { Rc::new(self.merger.done()) }
474 }
475}