evmap/write.rs
1use crate::inner::{Entry, Inner};
2use crate::read::ReadHandle;
3use crate::values::ValuesInner;
4use left_right::{aliasing::Aliased, Absorb};
5
6use std::collections::hash_map::RandomState;
7use std::fmt;
8use std::hash::{BuildHasher, Hash};
9
10/// A handle that may be used to modify the eventually consistent map.
11///
12/// Note that any changes made to the map will not be made visible to readers until
13/// [`publish`](Self::publish) is called.
14///
15/// When the `WriteHandle` is dropped, the map is immediately (but safely) taken away from all
16/// readers, causing all future lookups to return `None`.
17///
18/// # Examples
19/// ```
20/// let x = ('x', 42);
21///
22/// let (mut w, r) = evmap::new();
23///
24/// // the map is uninitialized, so all lookups should return None
25/// assert_eq!(r.get(&x.0).map(|rs| rs.len()), None);
26///
27/// w.publish();
28///
29/// // after the first publish, it is empty, but ready
30/// assert_eq!(r.get(&x.0).map(|rs| rs.len()), None);
31///
32/// w.insert(x.0, x);
33///
34/// // it is empty even after an add (we haven't publish yet)
35/// assert_eq!(r.get(&x.0).map(|rs| rs.len()), None);
36///
37/// w.publish();
38///
39/// // but after the swap, the record is there!
40/// assert_eq!(r.get(&x.0).map(|rs| rs.len()), Some(1));
41/// assert_eq!(r.get(&x.0).map(|rs| rs.iter().any(|v| v.0 == x.0 && v.1 == x.1)), Some(true));
42/// ```
43pub struct WriteHandle<K, V, M = (), S = RandomState>
44where
45 K: Eq + Hash + Clone,
46 S: BuildHasher + Clone,
47 V: Eq + Hash,
48 M: 'static + Clone,
49{
50 handle: left_right::WriteHandle<Inner<K, V, M, S>, Operation<K, V, M>>,
51 r_handle: ReadHandle<K, V, M, S>,
52}
53
54impl<K, V, M, S> fmt::Debug for WriteHandle<K, V, M, S>
55where
56 K: Eq + Hash + Clone + fmt::Debug,
57 S: BuildHasher + Clone,
58 V: Eq + Hash + fmt::Debug,
59 M: 'static + Clone + fmt::Debug,
60{
61 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
62 f.debug_struct("WriteHandle")
63 .field("handle", &self.handle)
64 .finish()
65 }
66}
67
68impl<K, V, M, S> WriteHandle<K, V, M, S>
69where
70 K: Eq + Hash + Clone,
71 S: BuildHasher + Clone,
72 V: Eq + Hash,
73 M: 'static + Clone,
74{
75 pub(crate) fn new(
76 handle: left_right::WriteHandle<Inner<K, V, M, S>, Operation<K, V, M>>,
77 ) -> Self {
78 let r_handle = ReadHandle::new(left_right::ReadHandle::clone(&*handle));
79 Self { handle, r_handle }
80 }
81
82 /// Publish all changes since the last call to `publish` to make them visible to readers.
83 ///
84 /// This can take some time, especially if readers are executing slow operations, or if there
85 /// are many of them.
86 pub fn publish(&mut self) -> &mut Self {
87 self.handle.publish();
88 self
89 }
90
91 /// Returns true if there are changes to the map that have not yet been exposed to readers.
92 pub fn has_pending(&self) -> bool {
93 self.handle.has_pending_operations()
94 }
95
96 /// Set the metadata.
97 ///
98 /// Will only be visible to readers after the next call to [`publish`](Self::publish).
99 pub fn set_meta(&mut self, meta: M) {
100 self.add_op(Operation::SetMeta(meta));
101 }
102
103 fn add_op(&mut self, op: Operation<K, V, M>) -> &mut Self {
104 self.handle.append(op);
105 self
106 }
107
108 /// Add the given value to the value-bag of the given key.
109 ///
110 /// The updated value-bag will only be visible to readers after the next call to
111 /// [`publish`](Self::publish).
112 pub fn insert(&mut self, k: K, v: V) -> &mut Self {
113 self.add_op(Operation::Add(k, Aliased::from(v)))
114 }
115
116 /// Replace the value-bag of the given key with the given value.
117 ///
118 /// Replacing the value will automatically deallocate any heap storage and place the new value
119 /// back into the `SmallVec` inline storage. This can improve cache locality for common
120 /// cases where the value-bag is only ever a single element.
121 ///
122 /// See [the doc section on this](./index.html#small-vector-optimization) for more information.
123 ///
124 /// The new value will only be visible to readers after the next call to
125 /// [`publish`](Self::publish).
126 pub fn update(&mut self, k: K, v: V) -> &mut Self {
127 self.add_op(Operation::Replace(k, Aliased::from(v)))
128 }
129
130 /// Clear the value-bag of the given key, without removing it.
131 ///
132 /// If a value-bag already exists, this will clear it but leave the
133 /// allocated memory intact for reuse, or if no associated value-bag exists
134 /// an empty value-bag will be created for the given key.
135 ///
136 /// The new value will only be visible to readers after the next call to
137 /// [`publish`](Self::publish).
138 pub fn clear(&mut self, k: K) -> &mut Self {
139 self.add_op(Operation::Clear(k))
140 }
141
142 /// Remove the given value from the value-bag of the given key.
143 ///
144 /// The updated value-bag will only be visible to readers after the next call to
145 /// [`publish`](Self::publish).
146 #[deprecated(since = "11.0.0", note = "Renamed to remove_value")]
147 pub fn remove(&mut self, k: K, v: V) -> &mut Self {
148 self.remove_value(k, v)
149 }
150
151 /// Remove the given value from the value-bag of the given key.
152 ///
153 /// The updated value-bag will only be visible to readers after the next call to
154 /// [`publish`](Self::publish).
155 pub fn remove_value(&mut self, k: K, v: V) -> &mut Self {
156 self.add_op(Operation::RemoveValue(k, v))
157 }
158
159 /// Remove the value-bag for the given key.
160 ///
161 /// The value-bag will only disappear from readers after the next call to
162 /// [`publish`](Self::publish).
163 #[deprecated(since = "11.0.0", note = "Renamed to remove_entry")]
164 pub fn empty(&mut self, k: K) -> &mut Self {
165 self.remove_entry(k)
166 }
167
168 /// Remove the value-bag for the given key.
169 ///
170 /// The value-bag will only disappear from readers after the next call to
171 /// [`publish`](Self::publish).
172 pub fn remove_entry(&mut self, k: K) -> &mut Self {
173 self.add_op(Operation::RemoveEntry(k))
174 }
175
176 /// Purge all value-bags from the map.
177 ///
178 /// The map will only appear empty to readers after the next call to
179 /// [`publish`](Self::publish).
180 ///
181 /// Note that this will iterate once over all the keys internally.
182 pub fn purge(&mut self) -> &mut Self {
183 self.add_op(Operation::Purge)
184 }
185
186 /// Retain elements for the given key using the provided predicate function.
187 ///
188 /// The remaining value-bag will only be visible to readers after the next call to
189 /// [`publish`](Self::publish)
190 ///
191 /// # Safety
192 ///
193 /// The given closure is called _twice_ for each element, once when called, and once
194 /// on swap. It _must_ retain the same elements each time, otherwise a value may exist in one
195 /// map, but not the other, leaving the two maps permanently out-of-sync. This is _really_ bad,
196 /// as values are aliased between the maps, and are assumed safe to free when they leave the
197 /// map during a `publish`. Returning `true` when `retain` is first called for a value, and
198 /// `false` the second time would free the value, but leave an aliased pointer to it in the
199 /// other side of the map.
200 ///
201 /// The arguments to the predicate function are the current value in the value-bag, and `true`
202 /// if this is the first value in the value-bag on the second map, or `false` otherwise. Use
203 /// the second argument to know when to reset any closure-local state to ensure deterministic
204 /// operation.
205 ///
206 /// So, stated plainly, the given closure _must_ return the same order of true/false for each
207 /// of the two iterations over the value-bag. That is, the sequence of returned booleans before
208 /// the second argument is true must be exactly equal to the sequence of returned booleans
209 /// at and beyond when the second argument is true.
210 pub unsafe fn retain<F>(&mut self, k: K, f: F) -> &mut Self
211 where
212 F: FnMut(&V, bool) -> bool + 'static + Send,
213 {
214 self.add_op(Operation::Retain(k, Predicate(Box::new(f))))
215 }
216
217 /// Shrinks a value-bag to it's minimum necessary size, freeing memory
218 /// and potentially improving cache locality by switching to inline storage.
219 ///
220 /// The optimized value-bag will only be visible to readers after the next call to
221 /// [`publish`](Self::publish)
222 pub fn fit(&mut self, k: K) -> &mut Self {
223 self.add_op(Operation::Fit(Some(k)))
224 }
225
226 /// Like [`WriteHandle::fit`](#method.fit), but shrinks <b>all</b> value-bags in the map.
227 ///
228 /// The optimized value-bags will only be visible to readers after the next call to
229 /// [`publish`](Self::publish)
230 pub fn fit_all(&mut self) -> &mut Self {
231 self.add_op(Operation::Fit(None))
232 }
233
234 /// Reserves capacity for some number of additional elements in a value-bag,
235 /// or creates an empty value-bag for this key with the given capacity if
236 /// it doesn't already exist.
237 ///
238 /// Readers are unaffected by this operation, but it can improve performance
239 /// by pre-allocating space for large value-bags.
240 pub fn reserve(&mut self, k: K, additional: usize) -> &mut Self {
241 self.add_op(Operation::Reserve(k, additional))
242 }
243
244 #[cfg(feature = "eviction")]
245 #[cfg_attr(docsrs, doc(cfg(feature = "eviction")))]
246 /// Remove the value-bag for `n` randomly chosen keys.
247 ///
248 /// This method immediately calls [`publish`](Self::publish) to ensure that the keys and values
249 /// it returns match the elements that will be emptied on the next call to
250 /// [`publish`](Self::publish). The value-bags will only disappear from readers after the next
251 /// call to [`publish`](Self::publish).
252 pub fn empty_random<'a>(
253 &'a mut self,
254 rng: &mut impl rand::Rng,
255 n: usize,
256 ) -> impl ExactSizeIterator<Item = (&'a K, &'a crate::values::Values<V, S>)> {
257 // force a publish so that our view into self.r_handle matches the indices we choose.
258 // if we didn't do this, the `i`th element of r_handle may be a completely different
259 // element than the one that _will_ be evicted when `EmptyAt([.. i ..])` is applied.
260 // this would be bad since we are telling the caller which elements we are evicting!
261 // note also that we _must_ use `r_handle`, not `w_handle`, since `w_handle` may have
262 // pending operations even after a publish!
263 self.publish();
264
265 let inner = self
266 .r_handle
267 .handle
268 .raw_handle()
269 .expect("WriteHandle has not been dropped");
270 // safety: the writer cannot publish until 'a ends, so we know that reading from the read
271 // map is safe for the duration of 'a.
272 let inner: &'a Inner<K, V, M, S> =
273 unsafe { std::mem::transmute::<&Inner<K, V, M, S>, _>(inner.as_ref()) };
274 let inner = &inner.data;
275
276 // let's pick some (distinct) indices to evict!
277 let n = n.min(inner.len());
278 let indices = rand::seq::index::sample(rng, inner.len(), n);
279
280 // we need to sort the indices so that, later, we can make sure to swap remove from last to
281 // first (and so not accidentally remove the wrong index).
282 let mut to_remove = indices.clone().into_vec();
283 to_remove.sort_unstable();
284 self.add_op(Operation::EmptyAt(to_remove));
285
286 indices.into_iter().map(move |i| {
287 let (k, vs) = inner.get_index(i).expect("in-range");
288 (k, vs.as_ref())
289 })
290 }
291}
292
293impl<K, V, M, S> Absorb<Operation<K, V, M>> for Inner<K, V, M, S>
294where
295 K: Eq + Hash + Clone,
296 S: BuildHasher + Clone,
297 V: Eq + Hash,
298 M: 'static + Clone,
299{
300 /// Apply ops in such a way that no values are dropped, only forgotten
301 fn absorb_first(&mut self, op: &mut Operation<K, V, M>, other: &Self) {
302 // Safety note for calls to .alias():
303 //
304 // it is safe to alias this value here because if it is ever removed, one alias is always
305 // first dropped with NoDrop (in absorb_first), and _then_ the other (and only remaining)
306 // alias is dropped with DoDrop (in absorb_second). we won't drop the aliased value until
307 // _after_ absorb_second is called on this operation, so leaving an alias in the oplog is
308 // also safe.
309
310 let hasher = other.data.hasher();
311 match *op {
312 Operation::Replace(ref key, ref mut value) => {
313 let vs = self
314 .data
315 .entry(key.clone())
316 .or_insert_with(ValuesInner::new);
317
318 // truncate vector
319 vs.clear();
320
321 // implicit shrink_to_fit on replace op
322 // so it will switch back to inline allocation for the subsequent push.
323 vs.shrink_to_fit();
324
325 vs.push(unsafe { value.alias() }, hasher);
326 }
327 Operation::Clear(ref key) => {
328 self.data
329 .entry(key.clone())
330 .or_insert_with(ValuesInner::new)
331 .clear();
332 }
333 Operation::Add(ref key, ref mut value) => {
334 self.data
335 .entry(key.clone())
336 .or_insert_with(ValuesInner::new)
337 .push(unsafe { value.alias() }, hasher);
338 }
339 Operation::RemoveEntry(ref key) => {
340 #[cfg(not(feature = "indexed"))]
341 self.data.remove(key);
342 #[cfg(feature = "indexed")]
343 self.data.swap_remove(key);
344 }
345 Operation::Purge => {
346 self.data.clear();
347 }
348 #[cfg(feature = "eviction")]
349 Operation::EmptyAt(ref indices) => {
350 for &index in indices.iter().rev() {
351 self.data.swap_remove_index(index);
352 }
353 }
354 Operation::RemoveValue(ref key, ref value) => {
355 if let Some(e) = self.data.get_mut(key) {
356 e.swap_remove(&value);
357 }
358 }
359 Operation::Retain(ref key, ref mut predicate) => {
360 if let Some(e) = self.data.get_mut(key) {
361 let mut first = true;
362 e.retain(move |v| {
363 let retain = predicate.eval(v, first);
364 first = false;
365 retain
366 });
367 }
368 }
369 Operation::Fit(ref key) => match key {
370 Some(ref key) => {
371 if let Some(e) = self.data.get_mut(key) {
372 e.shrink_to_fit();
373 }
374 }
375 None => {
376 for value_set in self.data.values_mut() {
377 value_set.shrink_to_fit();
378 }
379 }
380 },
381 Operation::Reserve(ref key, additional) => match self.data.entry(key.clone()) {
382 Entry::Occupied(mut entry) => {
383 entry.get_mut().reserve(additional, hasher);
384 }
385 Entry::Vacant(entry) => {
386 entry.insert(ValuesInner::with_capacity_and_hasher(additional, hasher));
387 }
388 },
389 Operation::MarkReady => {
390 self.ready = true;
391 }
392 Operation::SetMeta(ref m) => {
393 self.meta = m.clone();
394 }
395 }
396 }
397
398 /// Apply operations while allowing dropping of values
399 fn absorb_second(&mut self, op: Operation<K, V, M>, other: &Self) {
400 // # Safety (for cast):
401 //
402 // See the module-level documentation for left_right::aliasing.
403 // NoDrop and DoDrop are both private, therefore this cast is (likely) sound.
404 //
405 // # Safety (for NoDrop -> DoDrop):
406 //
407 // It is safe for us to drop values the second time each operation has been
408 // performed, since if they are dropped here, they were also dropped in the first
409 // application of the operation, which removed the only other alias.
410 //
411 // FIXME: This is where the non-determinism of Hash and PartialEq hits us (#78).
412 let inner: &mut Inner<K, V, M, S, crate::aliasing::DoDrop> =
413 unsafe { &mut *(self as *mut _ as *mut _) };
414
415 // Safety note for calls to .change_drop():
416 //
417 // we're turning a NoDrop into DoDrop, so we must be prepared for a drop.
418 // if absorb_first dropped its alias, then `value` is the only alias
419 // if absorb_first did not drop its alias, then `value` will not be dropped here either,
420 // and at the end of scope we revert to `NoDrop`, so all is well.
421 let hasher = other.data.hasher();
422 match op {
423 Operation::Replace(key, value) => {
424 let v = inner.data.entry(key).or_insert_with(ValuesInner::new);
425 v.clear();
426 v.shrink_to_fit();
427
428 v.push(unsafe { value.change_drop() }, hasher);
429 }
430 Operation::Clear(key) => {
431 inner
432 .data
433 .entry(key)
434 .or_insert_with(ValuesInner::new)
435 .clear();
436 }
437 Operation::Add(key, value) => {
438 // safety (below):
439 // we're turning a NoDrop into DoDrop, so we must be prepared for a drop.
440 // if absorb_first dropped the value, then `value` is the only alias
441 // if absorb_first did not drop the value, then `value` will not be dropped here
442 // either, and at the end of scope we revert to `NoDrop`, so all is well.
443 inner
444 .data
445 .entry(key)
446 .or_insert_with(ValuesInner::new)
447 .push(unsafe { value.change_drop() }, hasher);
448 }
449 Operation::RemoveEntry(key) => {
450 #[cfg(not(feature = "indexed"))]
451 inner.data.remove(&key);
452 #[cfg(feature = "indexed")]
453 inner.data.swap_remove(&key);
454 }
455 Operation::Purge => {
456 inner.data.clear();
457 }
458 #[cfg(feature = "eviction")]
459 Operation::EmptyAt(indices) => {
460 for &index in indices.iter().rev() {
461 inner.data.swap_remove_index(index);
462 }
463 }
464 Operation::RemoveValue(key, value) => {
465 if let Some(e) = inner.data.get_mut(&key) {
466 // find the first entry that matches all fields
467 e.swap_remove(&value);
468 }
469 }
470 Operation::Retain(key, mut predicate) => {
471 if let Some(e) = inner.data.get_mut(&key) {
472 let mut first = true;
473 e.retain(move |v| {
474 let retain = predicate.eval(&*v, first);
475 first = false;
476 retain
477 });
478 }
479 }
480 Operation::Fit(key) => match key {
481 Some(ref key) => {
482 if let Some(e) = inner.data.get_mut(key) {
483 e.shrink_to_fit();
484 }
485 }
486 None => {
487 for value_set in inner.data.values_mut() {
488 value_set.shrink_to_fit();
489 }
490 }
491 },
492 Operation::Reserve(key, additional) => match inner.data.entry(key) {
493 Entry::Occupied(mut entry) => {
494 entry.get_mut().reserve(additional, hasher);
495 }
496 Entry::Vacant(entry) => {
497 entry.insert(ValuesInner::with_capacity_and_hasher(additional, hasher));
498 }
499 },
500 Operation::MarkReady => {
501 inner.ready = true;
502 }
503 Operation::SetMeta(m) => {
504 inner.meta = m;
505 }
506 }
507 }
508
509 fn drop_first(self: Box<Self>) {
510 // since the two copies are exactly equal, we need to make sure that we *don't* call the
511 // destructors of any of the values that are in our map, as they'll all be called when the
512 // last read handle goes out of scope. that's easy enough since none of them will be
513 // dropped by default.
514 }
515
516 fn drop_second(self: Box<Self>) {
517 // when the second copy is dropped is where we want to _actually_ drop all the values in
518 // the map. we do this by setting the generic type to the one that causes drops to happen.
519 //
520 // safety: since we're going second, we know that all the aliases in the first map have
521 // gone away, so all of our aliases must be the only ones.
522 let inner: Box<Inner<K, V, M, S, crate::aliasing::DoDrop>> =
523 unsafe { Box::from_raw(Box::into_raw(self) as *mut _ as *mut _) };
524 drop(inner);
525 }
526
527 fn sync_with(&mut self, first: &Self) {
528 let inner: &mut Inner<K, V, M, S, crate::aliasing::DoDrop> =
529 unsafe { &mut *(self as *mut _ as *mut _) };
530 inner.data.extend(first.data.iter().map(|(k, vs)| {
531 // # Safety (for aliasing):
532 //
533 // We are aliasing every value in the read map, and the oplog has no other
534 // pending operations (by the semantics of JustCloneRHandle). For any of the
535 // values we alias to be dropped, the operation that drops it must first be
536 // enqueued to the oplog, at which point it will _first_ go through
537 // absorb_first, which will remove the alias and leave only one alias left.
538 // Only after that, when that operation eventually goes through absorb_second,
539 // will the alias be dropped, and by that time it is the only value.
540 //
541 // # Safety (for hashing):
542 //
543 // Due to `RandomState` there can be subtle differences between the iteration order
544 // of two `HashMap` instances. We prevent this by using `left_right::new_with_empty`,
545 // which `clone`s the first map, making them use the same hasher.
546 //
547 // # Safety (for NoDrop -> DoDrop):
548 //
549 // The oplog has only this one operation in it for the first call to `publish`,
550 // so we are about to turn the alias back into NoDrop.
551 (k.clone(), unsafe {
552 ValuesInner::alias(vs, first.data.hasher())
553 })
554 }));
555 self.ready = true;
556 }
557}
558
559impl<K, V, M, S> Extend<(K, V)> for WriteHandle<K, V, M, S>
560where
561 K: Eq + Hash + Clone,
562 S: BuildHasher + Clone,
563 V: Eq + Hash,
564 M: 'static + Clone,
565{
566 fn extend<I: IntoIterator<Item = (K, V)>>(&mut self, iter: I) {
567 for (k, v) in iter {
568 self.insert(k, v);
569 }
570 }
571}
572
573// allow using write handle for reads
574use std::ops::Deref;
575impl<K, V, M, S> Deref for WriteHandle<K, V, M, S>
576where
577 K: Eq + Hash + Clone,
578 S: BuildHasher + Clone,
579 V: Eq + Hash,
580 M: 'static + Clone,
581{
582 type Target = ReadHandle<K, V, M, S>;
583 fn deref(&self) -> &Self::Target {
584 &self.r_handle
585 }
586}
587
588/// A pending map operation.
589#[non_exhaustive]
590pub(super) enum Operation<K, V, M> {
591 /// Replace the set of entries for this key with this value.
592 Replace(K, Aliased<V, crate::aliasing::NoDrop>),
593 /// Add this value to the set of entries for this key.
594 Add(K, Aliased<V, crate::aliasing::NoDrop>),
595 /// Remove this value from the set of entries for this key.
596 RemoveValue(K, V),
597 /// Remove the value set for this key.
598 RemoveEntry(K),
599 #[cfg(feature = "eviction")]
600 /// Drop keys at the given indices.
601 ///
602 /// The list of indices must be sorted in ascending order.
603 EmptyAt(Vec<usize>),
604 /// Remove all values in the value set for this key.
605 Clear(K),
606 /// Remove all values for all keys.
607 ///
608 /// Note that this will iterate once over all the keys internally.
609 Purge,
610 /// Retains all values matching the given predicate.
611 Retain(K, Predicate<V>),
612 /// Shrinks [`Values`] to their minimum necessary size, freeing memory
613 /// and potentially improving cache locality.
614 ///
615 /// If no key is given, all `Values` will shrink to fit.
616 Fit(Option<K>),
617 /// Reserves capacity for some number of additional elements in [`Values`]
618 /// for the given key. If the given key does not exist, allocate an empty
619 /// `Values` with the given capacity.
620 ///
621 /// This can improve performance by pre-allocating space for large bags of values.
622 Reserve(K, usize),
623 /// Mark the map as ready to be consumed for readers.
624 MarkReady,
625 /// Set the value of the map meta.
626 SetMeta(M),
627}
628
629impl<K, V, M> fmt::Debug for Operation<K, V, M>
630where
631 K: fmt::Debug,
632 V: fmt::Debug,
633 M: fmt::Debug,
634{
635 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
636 match *self {
637 Operation::Replace(ref a, ref b) => f.debug_tuple("Replace").field(a).field(b).finish(),
638 Operation::Add(ref a, ref b) => f.debug_tuple("Add").field(a).field(b).finish(),
639 Operation::RemoveValue(ref a, ref b) => {
640 f.debug_tuple("RemoveValue").field(a).field(b).finish()
641 }
642 Operation::RemoveEntry(ref a) => f.debug_tuple("RemoveEntry").field(a).finish(),
643 #[cfg(feature = "eviction")]
644 Operation::EmptyAt(ref a) => f.debug_tuple("EmptyAt").field(a).finish(),
645 Operation::Clear(ref a) => f.debug_tuple("Clear").field(a).finish(),
646 Operation::Purge => f.debug_tuple("Purge").finish(),
647 Operation::Retain(ref a, ref b) => f.debug_tuple("Retain").field(a).field(b).finish(),
648 Operation::Fit(ref a) => f.debug_tuple("Fit").field(a).finish(),
649 Operation::Reserve(ref a, ref b) => f.debug_tuple("Reserve").field(a).field(b).finish(),
650 Operation::MarkReady => f.debug_tuple("MarkReady").finish(),
651 Operation::SetMeta(ref a) => f.debug_tuple("SetMeta").field(a).finish(),
652 }
653 }
654}
655
656/// Unary predicate used to retain elements.
657///
658/// The predicate function is called once for each distinct value, and `true` if this is the
659/// _first_ call to the predicate on the _second_ application of the operation.
660pub(super) struct Predicate<V: ?Sized>(Box<dyn FnMut(&V, bool) -> bool + Send>);
661
662impl<V: ?Sized> Predicate<V> {
663 /// Evaluate the predicate for the given element
664 #[inline]
665 fn eval(&mut self, value: &V, reset: bool) -> bool {
666 (*self.0)(value, reset)
667 }
668}
669
670impl<V: ?Sized> PartialEq for Predicate<V> {
671 #[inline]
672 fn eq(&self, other: &Self) -> bool {
673 // only compare data, not vtable: https://stackoverflow.com/q/47489449/472927
674 &*self.0 as *const _ as *const () == &*other.0 as *const _ as *const ()
675 }
676}
677
678impl<V: ?Sized> Eq for Predicate<V> {}
679
680impl<V: ?Sized> fmt::Debug for Predicate<V> {
681 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
682 f.debug_tuple("Predicate")
683 .field(&format_args!("{:p}", &*self.0 as *const _))
684 .finish()
685 }
686}