total_order_multi_map/lib.rs
1//! A multi map implementation which also keeps the
2//! total order of inserted elements. I.e. if you
3//! insert `(k1, v1)` then `(k2, v2)` then `(k1, v3)`.
4//! The order when iterating over it will be exact this
5//! insertion order. Through there is an `grouped_values`
6//! method returning a iterator over the values grouped
7//! by key, instead of iteration order.
8//!
9//! The map makes sure that normal iteration is roughly
10//! as fast a iterating over a vector but using `get` to
11//! get a (group of) values is also roughly as fast as
12//! using `get` on a `HashMap`. The draw back is that
13//! insertion is a bit slower.
14//!
15//! Note that this implementation is made for values
16//! which dereference to the actually relevant values.
17//! It is also temporary limited to values which implement
18//! `DerefMut`, this can be lifted in the future. (I.e.
19//! currently `Box<T>` is supported but `Rc<T>` can be
20//! supported in the future just not for `_mut` methods).
21//! When accessing the map references to the inner values
22//! are returned (e.g. with a `Box<T>` references to `&T`/`&mut T`
23//! are returned and the `Box` is not accessible).
24//!
25//! Because of implementation details it is required that
26//! the value containers implement `StableDeref`. Note that
27//! this multi map can, or more precisely is made to, handle
28//! unsized values like trait object or slices.
29//!
30//! # State of Implementation
31//!
32//! Currently a lot of possible and useful methods are
33//! missing, take a look at the readme for more details.
34//! Through core methods like `insert`, `get`, `get_mut`
35//! and multiple iterators are implemented.
36//!
37//! Also currently it is limited to `StableDeref + DerefMut`
38//! but this is only needed for `_mut` methods.
39//!
40//! # Example
41//!
42//! see the example directories `from_readme.rs` example,
43//! which is also present in the README.
44//!
45//! # Implementation Details
46//!
47//! This implementation internally has a `Vec` and
48//! a `HashMap`. The `Vec` contains key-value pairs
49//! and "owns" the values. It is used for simple
50//! iteration and keeps the insertion order. The
51//! `HashMap` is a map from keys to vectors of
52//! pointers to inner values. And represents
53//! the multi map part. The code makes sure that
54//! only pointers are in the hash map iff their
55//! "owned" value is in the `Vec` at _any_ part
56//! of execution, even during insertion. This is
57//! needed to make sure that this type is unwind
58//! safe. Also to make sure that the pointers to
59//! the inner values are always valid the values
60//! have to implement `StableDeref`, so even if
61//! the vec is moved/resized the pointers stay
62//! valid as they don't point into the vec, but
63//! to the inner value the value in the vec points
64//! to. In turn this also means that mutable references
65//! to the containers/values should _never_ be exposed,
66//! through mutable references to the inner values
67//! can be exposed.
68//!
69//! Note that for ease of implementation only Copy
70//! keys are allowed, this should be improved on in
71//! later versions.
72//!
73
74extern crate stable_deref_trait;
75extern crate vec_drain_where;
76
77use std::collections::HashMap;
78use std::{vec, slice};
79use std::hash::Hash;
80use std::cmp::{Eq, PartialEq};
81use std::iter::{ Extend, FromIterator };
82use std::fmt::{self, Debug};
83use std::ops::DerefMut;
84
85use stable_deref_trait::StableDeref;
86
87use self::utils::DebugIterableOpaque;
88pub use self::iter::*;
89pub use self::entry::*;
90pub use self::map_iter::*;
91
92#[macro_use]
93mod utils;
94mod iter;
95mod entry;
96mod map_iter;
97
98
99// # SAFETY constraints (internal):
100//
101// - the ptr. contained in map_access have to be always valid,
102// code adding elements should first add them to `vec_data`,
103// and then to `map_access` code removing elements should
104// first remove them from `map_access` and then from `vec_data`.
105//
106// - giving out references to data always requires `self` to be
107// borrowed by the same kind of reference, a function giving
108// out references (either direct or transitive) should either
109// only use `vec_data` or `map_access` but not both, especially
110// so wrt. `&mut` (I.e. to use the contained `*mut T`'s as
111// `&mut T` it's required to `&mut` borrow the map.
112//
113// - UNDER ANY CIRCUMSTANCE NEVER return a mutable reference to the
114// data container (`&mut V`) in difference to a `&mut T`/`&mut V::Target`
115// it can override the container invalidating the `StableDeref` assumptions.
116//
117//
118// ## StableDeref assumptions
119//
120// - reminder: containers implementing `StableDeref` deref always to the same
121// memory address as long as the container itself is not mutated (but
122// even if the data on the address is mutated)
123//
124// - reminder: as we keep pointers directly to the data we can't allow any
125// mutation of the container
126//
127// - reminder: implementing `StableDeref` for a trait which on a safety level
128// relies on side-effects (e.g. using inner mutability) in deref is unsafe
129//
130/// A multi map with keeps the total ordering of inserted elements
131///
132/// The map is meant to contain values implementing `StableDeref`,
133/// methods like `get` and `iter` will iterate over the inner values
134/// referred to when dereferencing the values.
135///
136/// The key is currently limited to values implementing Copy.
137///
138/// See the module/crate level documentation for more details.
139///
140/// # Unwind Safety
141///
142/// This type is unwind + resume safe.
143/// Through in the unlikely case that a panic happens inside of
144/// a function like `insert`,`get` the resulting state might be
145/// inconsistent in a safe way, i.e. some values might be in the map,
146/// accessible during iteration but hey won't appear when using `get`.
147///
148/// Note that this can only happen in a few rare cases:
149///
150/// 1. `Vec::pop`/`Vec::reserve` panics
151/// 2. `HashMap::insert`/`HashMap::reserve` panics
152/// 3. for some reason `oom` does panic instead of aborting
153///
154/// Which mainly can happen in mainly following cases:
155///
156/// - the vector of key-value pairs tries to contain more then `isize::MAX` bytes
157/// - you use zero-sized values and overflow `usize` (which can't really happen as
158/// we store at last some data per inserting, i.e. you would overflow the vec first)
159///
160/// Generally speaking you won't run into any of this in normal circumstances and if
161/// you do it's likely that you are close to a system wide `oom` anyway.
162pub struct TotalOrderMultiMap<K, V>
163 where V: StableDeref + DerefMut, K: Hash + Eq + Copy
164{
165 vec_data: Vec<(K, V)>,
166 map_access: HashMap<K, Vec<*mut V::Target>>,
167}
168
169impl<K, V> Default for TotalOrderMultiMap<K, V>
170 where K: Hash + Eq + Copy,
171 V: StableDeref + DerefMut
172{
173 fn default() -> Self {
174 TotalOrderMultiMap {
175 vec_data: Default::default(),
176 map_access: Default::default()
177 }
178 }
179
180}
181
182// Note further implementations in sub-modules (entry.rs, iter.rs, ...)
183impl<K, V> TotalOrderMultiMap<K, V>
184 where K: Hash+Eq+Copy,
185 V: StableDeref + DerefMut
186{
187
188 /// Create a new empty map.
189 pub fn new() -> Self {
190 Default::default()
191 }
192
193 /// Crate a new map with a given `capacity`.
194 ///
195 /// Note that this method will reserve the given
196 /// capacity for the internal `Vec` and `HashMap`,
197 /// but as it's a multi map it can not know how
198 /// elements will be distributed, as such using
199 /// `with_capacity` does _not_ guarantee that there
200 /// are no allocations if less than `capacity` elements
201 /// are inserted. Through it still can reduce the
202 /// number of allocations needed.
203 pub fn with_capacity(capacity: usize) -> Self {
204 TotalOrderMultiMap {
205 vec_data: Vec::with_capacity(capacity),
206 map_access: HashMap::with_capacity(capacity),
207 }
208 }
209
210 /// Returns the capacity (unreliable).
211 ///
212 /// This is not reliable as it only returns
213 /// the capacity of the underlying `Vec` not
214 /// considering the underlying `HashMap` or
215 /// the `Vec` used to turn the map into a
216 /// multi map.
217 pub fn capacity(&self) -> usize {
218 self.vec_data.capacity()
219 }
220
221 /// Reserves space for `n` additional elements.
222 ///
223 /// The reservation is done in both the internal
224 /// `Vec` and `HashMap` but as the map is a multi
225 /// map this method is less helpful as e.g. on a
226 /// pure `Vec` or `HashMap`
227 ///
228 /// # Panics
229 /// if the new allocation size overflows `usize`
230 pub fn reserve(&mut self, additional: usize) {
231 self.vec_data.reserve(additional);
232 self.map_access.reserve(additional);
233 }
234
235 /// Reverses insertion order.
236 ///
237 /// After calling this the map will contains values
238 /// as if they had been inserted in reversed order.
239 ///
240 /// This will affect both the iteration order of
241 /// the fill map as well as the iteration order of
242 /// values returned by `get`.
243 pub fn reverse(&mut self) {
244 self.vec_data.reverse();
245 for (_, val) in self.map_access.iter_mut() {
246 val.reverse()
247 }
248 }
249
250 /// Shrinks all internal containers to not contains any additional capacity.
251 ///
252 /// Whether or not memory is freed depends in the dens on the `shrink_to_fit`
253 /// implementation of `Vec` and `HashMap`
254 pub fn shrink_to_fit(&mut self) {
255 self.vec_data.shrink_to_fit();
256 self.map_access.shrink_to_fit();
257 for (_, val) in self.map_access.iter_mut() {
258 val.shrink_to_fit()
259 }
260 }
261
262 /// Returns the total number of elements.
263 pub fn len(&self) -> usize {
264 self.vec_data.len()
265 }
266
267 /// Returns the total number of different keys.
268 pub fn key_count(&self) -> usize {
269 self.map_access.len()
270 }
271
272 /// Returns `true` if the map is empty.
273 pub fn is_empty(&self) -> bool {
274 self.vec_data.is_empty()
275 }
276
277 /// Empties this map.
278 pub fn clear(&mut self) {
279 self.map_access.clear();
280 self.vec_data.clear();
281 }
282
283 /// Returns `true` if the key is contained in the map.
284 ///
285 /// Does not state how many values are associated with it.
286 pub fn contains_key(&self, k: K) -> bool {
287 self.map_access.contains_key(&k)
288 }
289
290 /// Returns values associated with the given key.
291 ///
292 /// If the key is not in the map this will return `None`.
293 /// This also means that `EntryValues` has at last one
294 /// element.
295 pub fn get(&self, k: K) -> EntryValues<V::Target> {
296 self.map_access.get(&k)
297 .map(|vec| EntryValues::new(vec.iter()))
298 .unwrap_or_else(|| EntryValues::empty())
299 }
300
301 /// Returns mutable references associated with the given key.
302 ///
303 /// If the key is not in the map this will return `None`.
304 /// This means the `EntryValuesMut` has at last one element.
305 pub fn get_mut(&mut self, k: K) -> EntryValuesMut<V::Target> {
306 self.map_access.get_mut(&k)
307 .map(|vec| EntryValuesMut::new(vec.iter_mut()))
308 .unwrap_or_else(|| EntryValuesMut::empty())
309 }
310
311 /// Adds a value for a given key to the multi map.
312 ///
313 /// Returns access the all values already added to
314 /// the key previously and this now added value
315 /// through `EntryValuesMut`
316 pub fn add(&mut self, key: K, value: V) -> EntryValuesMut<V::Target> {
317 self.entry(key).add(value)
318 }
319
320 /// Sets the value associated with the given key.
321 ///
322 /// Values previously associated with the key are
323 /// removed and returned.
324 pub fn set(&mut self, key: K, value: V) -> Vec<V> {
325 self.entry(key).set(value)
326 }
327
328 /// Remove and return the element last inserted.
329 pub fn pop(&mut self) -> Option<(K, V)> {
330 if let Some(&(k, ref val)) = self.vec_data.last() {
331 Self::delete_last_inserted_from_map_with_same_ptr(
332 &mut self.map_access, k, val);
333 } else {
334 return None;
335 }
336
337 self.vec_data.pop()
338 }
339
340 /// Keeps the first `to_len` inserted headers, removing the remaining ones.
341 ///
342 /// If `to_len` is equal or larger the current length nothing will happen.
343 ///
344 /// This won't affect the capacity.
345 ///
346 /// Which headers are keeps/removed depends on the insertion order of
347 /// the headers in the map and is independent of the headers name or
348 /// if there had been other headers with the same name inserted before/after.
349 pub fn truncate(&mut self, to_len: usize) {
350 if to_len >= self.len() {
351 return;
352 }
353
354 {
355 let mut to_delete_iter = self.vec_data[to_len..].iter();
356 while let Some(&(key, ref val)) = to_delete_iter.next_back() {
357 Self::delete_last_inserted_from_map_with_same_ptr(
358 &mut self.map_access, key, &val);
359 }
360 }
361
362 self.vec_data.truncate(to_len);
363 }
364
365 /// Removes the last inserted value from the map_access's bucked for the given key.
366 ///
367 /// # Panic
368 ///
369 /// If the key doesn't correspond to a key in `map` or
370 /// there is no ptr in the map equal to the ptr gotten
371 /// from the given value this will panic. As this function
372 /// is only used in places where key/value where given by
373 /// the `vec_data` array and as such the situation can only
374 /// appear if there is inconsistency in the map.
375 ///
376 /// # Design Notes
377 ///
378 /// The first pointer match in the relevant bucket _from the back_
379 /// is removed. Wrt. this it's good to be aware about two thinks:
380 ///
381 /// - for trait objects two pointers might point to the same address
382 /// but be different thinks _but_ if they are different thinks
383 /// their vtable part is not the same and as such they won't be
384 /// equal for pointer equality
385 ///
386 /// - all thinks later in the same bucket had been inserted after
387 /// this match (in total insertion order), which means that even
388 /// if the first statement doesn't held it's okay to use this
389 /// to remove the last "n" entries from the back/front.
390 ///
391 fn delete_last_inserted_from_map_with_same_ptr(
392 map: &mut HashMap<K, Vec<*mut V::Target>>,
393 key: K,
394 val: &V::Target
395 )
396 {
397 let exp_ptr: *const V::Target = val;
398 let bucket = map.get_mut(&key)
399 .expect("[BUG] key in vec_data but not map_access");
400
401 let to_remove_idx = bucket.iter()
402 .rposition(|ptr| *ptr as *const _ == exp_ptr)
403 .expect("[BUG] no ptr for value in map_access");
404
405 bucket.remove(to_remove_idx);
406 }
407
408 //FIXME(UPSTREAM): use drain_filter instead of retain once stable then return Vec<V>
409 // currently it returns true as long as at last one element is removed
410 // once `drain_where` (or `drain_filter`) is stable it should be changed
411 // to returning the removed values
412 /// removes all values associated with the given key
413 ///
414 /// I.e. this removes all key-value pairs which key
415 /// is equal to the given key.
416 ///
417 /// Returns true if at last one value was removed
418 pub fn remove_all(&mut self, key_to_remove: K) -> bool {
419 if let Some(_) = self.map_access.remove(&key_to_remove) {
420 self.vec_data.retain(|&(key, _)| key != key_to_remove);
421 true
422 } else {
423 false
424 }
425 }
426
427 //TODO use iter_mut(), &mut V::Target
428 /// retains only key value pairs for which `func` returns true
429 ///
430 /// All key-value pairs for with the predicate `func` returns
431 /// false will be removed.
432 pub fn retain<FN>(&mut self, mut func: FN)
433 where FN: FnMut(K, &V::Target) -> bool
434 {
435 let mut to_remove_ptr = Vec::new();
436 let mut to_remove_idx = Vec::new();
437
438 for (idx, (key, val)) in self.iter().enumerate() {
439 if !func(key, val) {
440 let vptr: *const V::Target = val;
441 to_remove_idx.push(idx);
442 to_remove_ptr.push((key, vptr));
443 }
444 }
445
446 if to_remove_idx.is_empty() {
447 return
448 }
449
450
451 for (key, ptr) in to_remove_ptr.into_iter() {
452 let needs_key_removal;
453 {
454 if let Some(values) = self.map_access.get_mut(&key) {
455 //TODO use remove_item once stable (rustc #40062) [inlined unstable def]
456 match values.iter().position(|x| *x as *const _ == ptr) {
457 Some(idx) => {
458 values.remove(idx);
459 },
460 None => unreachable!(
461 "[BUG] inconsistent state, value is not in map_access but in vec_data")
462 }
463 needs_key_removal = values.is_empty();
464 } else {
465 unreachable!(
466 "[BUG] inconsistent state, value is not in map_access but in vec_data")
467 }
468 }
469 if needs_key_removal {
470 self.map_access.remove(&key);
471 }
472 }
473
474 let mut idx = 0;
475 //INDEX_SAFE: we shot circuited on empty
476 let mut next_removal = to_remove_idx[0];
477 let mut to_remove_idx = &to_remove_idx[1..];
478 self.vec_data.retain(|_| {
479 let retain =
480 if idx == next_removal {
481 if to_remove_idx.is_empty() {
482 //we won't get to idx == 0 again
483 next_removal = 0;
484 } else {
485 next_removal = to_remove_idx[0];
486 to_remove_idx = &to_remove_idx[1..];
487 }
488 false
489 } else {
490 true
491 };
492 idx += 1;
493 retain
494 })
495 }
496}
497
498impl<K, V> Debug for TotalOrderMultiMap<K, V>
499 where K: Hash + Eq + Copy + Debug,
500 V: StableDeref + DerefMut + Debug
501{
502 fn fmt(&self, fter: &mut fmt::Formatter) -> fmt::Result {
503 write!(fter, "TotalOrderMultiMap {{ ")?;
504 for &(key, ref val_cont) in self.vec_data.iter() {
505 write!(fter, "{:?} => {:?},", key, val_cont)?;
506 }
507 write!(fter, " }}")
508 }
509}
510
511impl<K, V> Clone for TotalOrderMultiMap<K, V>
512 where K: Hash + Eq + Copy,
513 V: StableDeref + DerefMut + Clone
514{
515 fn clone(&self) -> Self {
516 let vec_data = Vec::with_capacity(self.vec_data.len());
517 let map_access = HashMap::with_capacity(self.map_access.len());
518 let mut map = TotalOrderMultiMap { map_access, vec_data};
519
520 for &(k, ref val) in self.vec_data.iter() {
521 map.add(k, val.clone());
522 }
523
524 map
525 }
526}
527
528/// Compares for equality which does consider the insertion order
529impl<K, V> PartialEq<Self> for TotalOrderMultiMap<K, V>
530 where K: Hash + Eq + Copy,
531 V: StableDeref + DerefMut + PartialEq<V>,
532{
533 #[inline]
534 fn eq(&self, other: &Self) -> bool {
535 self.vec_data.eq(&other.vec_data)
536 }
537}
538
539/// Compares for equality which does consider the insertion order
540impl<K, V> Eq for TotalOrderMultiMap<K, V>
541 where K: Hash + Eq + Copy,
542 V: StableDeref + DerefMut + Eq,
543{}
544
545
546impl<K, V> IntoIterator for TotalOrderMultiMap<K, V>
547 where K: Hash + Eq + Copy,
548 V: StableDeref + DerefMut,
549{
550 type Item = (K, V);
551 type IntoIter = vec::IntoIter<(K, V)>;
552
553
554 #[inline]
555 fn into_iter(self) -> Self::IntoIter {
556 let TotalOrderMultiMap { vec_data, map_access } = self;
557 drop(map_access);
558 vec_data.into_iter()
559 }
560}
561
562impl<K, V> FromIterator<(K, V)> for TotalOrderMultiMap<K, V>
563 where K: Hash + Eq + Copy,
564 V: StableDeref + DerefMut,
565{
566
567 fn from_iter<I: IntoIterator<Item=(K,V)>>(src: I) -> Self {
568 let src_iter = src.into_iter();
569
570 let mut out = {
571 let (min, _) = src_iter.size_hint();
572 if min > 0 {
573 Self::with_capacity(min)
574 } else {
575 Self::default()
576 }
577 };
578 <Self as Extend<(K,V)>>::extend(&mut out, src_iter);
579 out
580 }
581}
582
583impl<K, V> Extend<(K, V)> for TotalOrderMultiMap<K, V>
584 where K: Hash + Eq + Copy,
585 V: StableDeref + DerefMut
586{
587 fn extend<I>(&mut self, src: I)
588 where I: IntoIterator<Item=(K,V)>
589 {
590 for (key, value) in src.into_iter() {
591 self.add(key, value);
592 }
593 }
594}
595
596/// A type providing access to all values associated to "some key"
597///
598/// This is mainly an iterator over values, or more precisely
599/// references to the inner value of the values.
600///
601/// This is returned by `TotalOrderMultiMap.get`, so it does not
602/// contain the key as it should be known in any context where this
603/// type appear.
604///
605pub struct EntryValues<'a, T: ?Sized+'a>{
606 /// Note: we might have `*mut T` value but we are only allowed to
607 /// use them as `*const T` in this context!!
608 inner_iter: Option<slice::Iter<'a, *mut T>>,
609}
610
611impl<'a, T> EntryValues<'a, T>
612 where T: ?Sized + 'a
613{
614 pub fn empty() -> Self {
615 EntryValues { inner_iter: None }
616 }
617
618 fn new(inner_iter: slice::Iter<'a, *mut T>) -> Self {
619 EntryValues { inner_iter: Some(inner_iter) }
620 }
621}
622
623
624// This iterator can be cloned cheaply
625impl<'a, T: ?Sized + 'a> Clone for EntryValues<'a, T> {
626 fn clone(&self) -> Self {
627 EntryValues {
628 inner_iter: self.inner_iter.clone(),
629 }
630 }
631}
632
633impl<'a, T: ?Sized + 'a> Iterator for EntryValues<'a, T> {
634 type Item = &'a T;
635
636 #[inline]
637 fn next(&mut self) -> Option<Self::Item> {
638 //SAFE: the pointers are guaranteed to be valid, at last for lifetime 'a
639 self.inner_iter
640 .as_mut()
641 .map(|iter| iter.next().map(|&ptr| unsafe { &*ptr }))
642 .unwrap_or(None)
643 }
644
645 #[inline]
646 fn size_hint(&self) -> (usize, Option<usize>) {
647 self.inner_iter
648 .as_ref()
649 .map(|iter| iter.size_hint())
650 .unwrap_or((0, Some(0)))
651 }
652}
653
654impl<'a, T: ?Sized + 'a> ExactSizeIterator for EntryValues<'a, T> {
655
656 #[inline]
657 fn len(&self) -> usize {
658 self.inner_iter
659 .as_ref()
660 .map(|iter| iter.len())
661 .unwrap_or(0)
662 }
663}
664
665impl<'a, T> Debug for EntryValues<'a, T>
666 where T: ?Sized + Debug + 'a
667{
668
669 fn fmt(&self, fter: &mut fmt::Formatter) -> fmt::Result {
670 let metoo = DebugIterableOpaque::new(self.clone());
671 fter.debug_struct("EntryValues")
672 .field("inner_iter", &metoo)
673 .finish()
674 }
675}
676
677/// A type providing mut access to all values associated to "some key"
678///
679/// This is mainly an iterator over values, or more precisely
680/// mut references to the inner value of the values.
681///
682/// This is returned by `TotalOrderMultiMap.get_mut`, so it does not
683/// contain the key as it should be known in any context where this
684/// type appear.
685///
686pub struct EntryValuesMut<'a, T: ?Sized+'a>{
687 inner_iter: Option<slice::IterMut<'a, *mut T>>,
688}
689
690impl<'a, T: ?Sized + 'a> From<EntryValuesMut<'a, T>> for EntryValues<'a, T> {
691 fn from(valmut: EntryValuesMut<'a, T>) -> Self {
692 let EntryValuesMut { inner_iter } = valmut;
693 let inner_iter = inner_iter.map(|iter_mut| {
694 let as_slice = iter_mut.into_slice();
695 as_slice.iter()
696 });
697 EntryValues { inner_iter }
698 }
699}
700
701impl<'a, T> EntryValuesMut<'a, T>
702 where T: ?Sized + 'a
703{
704 pub fn empty() -> Self {
705 EntryValuesMut { inner_iter: None }
706 }
707
708 fn new(inner_iter: slice::IterMut<'a, *mut T>) -> Self {
709 EntryValuesMut { inner_iter: Some(inner_iter) }
710 }
711}
712
713impl<'a, T: ?Sized + 'a> Iterator for EntryValuesMut<'a, T> {
714 type Item = &'a mut T;
715
716 #[inline]
717 fn next(&mut self) -> Option<Self::Item> {
718 //SAFE: the pointers are guaranteed to be valid, at last for lifetime 'a
719 self.inner_iter
720 .as_mut()
721 .map(|iter| iter.next().map(|&mut ptr| unsafe { &mut *ptr }))
722 .unwrap_or(None)
723 }
724
725 #[inline]
726 fn size_hint(&self) -> (usize, Option<usize>) {
727 self.inner_iter
728 .as_ref()
729 .map(|iter| iter.size_hint())
730 .unwrap_or((0, Some(0)))
731 }
732}
733
734impl<'a, T: ?Sized + 'a> ExactSizeIterator for EntryValuesMut<'a, T> {
735
736 #[inline]
737 fn len(&self) -> usize {
738 self.inner_iter
739 .as_ref()
740 .map(|iter| iter.len())
741 .unwrap_or(0)
742 }
743}
744
745impl<'a, T> Debug for EntryValuesMut<'a, T>
746 where T: ?Sized + Debug + 'a
747{
748
749 fn fmt(&self, fter: &mut fmt::Formatter) -> fmt::Result {
750 fter.write_str("EntryValuesMut(..)")
751 }
752}
753
754/// see `SendSyncHelper`
755unsafe impl<K, V> Send for TotalOrderMultiMap<K, V>
756 where SyncSendHelper<K,V>: Send, V: StableDeref + DerefMut, K: Hash + Eq + Copy {}
757
758
759/// see `SendSyncHelper`
760unsafe impl<K, V> Sync for TotalOrderMultiMap<K, V>
761 where SyncSendHelper<K,V>: Sync, V: StableDeref + DerefMut, K: Hash + Eq + Copy {}
762
763/// Delegate the job of deciding about Send, Sync to rustc (ignore this)
764///
765/// only the *const V::Target is not default Send/Sync in TotalOrderMultiMap as
766/// it's a pointer, but we can ignore it as whenever we accessed a value through
767/// it we can argue that we could have accessed the value "just" through safe code.
768/// It would just have been slower. And using the fast path doesn't circumvent any
769/// safety mechanisms like e.g. lock guards. As such if this struct is `Send`/`Sync`
770/// than `TotalOrderMultiMap` can be `Send`/`Sync`, too.
771pub struct SyncSendHelper<K, V>{ _p: ::std::marker::PhantomData<(K, V)> }
772
773#[cfg(test)]
774mod test {
775 use std::mem;
776 use std::collections::HashSet;
777 use super::*;
778
779
780 // use std::sync::Arc;
781 // fn arc_str(s: &str) -> Arc<str> {
782 // <Arc<str> as From<String>>::from(s.to_owned())
783 // }
784
785 #[test]
786 fn convert_entry_values_mut_to_non_mut() {
787 let mut map = TotalOrderMultiMap::new();
788 map.add("k1", "v1".to_owned());
789 let iter: EntryValues<_> = map.get_mut("k1").into();
790 assert_eq!(
791 vec!["v1"],
792 iter.collect::<Vec<_>>()
793 );
794 }
795
796 #[test]
797 fn clone_works_fine() {
798 let obj_single = Box::new("hy".to_owned());
799 let obj_multi_a = Box::new("there".to_owned());
800 let obj_multi_b = Box::new("so".to_owned());
801
802 // make sure the pointer are to the new addresses
803 let mut used_addresses = HashSet::new();
804 used_addresses.insert(&*obj_single as *const String as usize);
805 used_addresses.insert(&*obj_multi_a as *const String as usize);
806 used_addresses.insert(&*obj_multi_b as *const String as usize);
807
808 let mut map = TotalOrderMultiMap::with_capacity(10);
809 map.add("k1", obj_single);
810 map.add("k2", obj_multi_a);
811 map.add("k2", obj_multi_b);
812
813 let map2 = map.clone();
814 // "hide" map to make sure there
815 // is no accidental missuse
816 let __map = map;
817
818
819 //check if the addresses are "new" addresses
820 for val in map2.get("k1") {
821 let ptr = val as *const String as usize;
822 assert!(!used_addresses.contains(&ptr));
823 }
824 for val in map2.get("k2") {
825 let ptr = val as *const String as usize;
826 assert!(!used_addresses.contains(&ptr));
827 }
828
829 for (_k, v) in map2.iter() {
830 let ptr = v as *const String as usize;
831 assert!(!used_addresses.contains(&ptr));
832 // here we also make sure there are no collisions
833 used_addresses.insert(ptr);
834 }
835
836 assert_eq!(used_addresses.len(), 2 * map2.len());
837
838 mem::drop(__map);
839 mem::drop(map2);
840 }
841
842 #[test]
843 fn aux_fns() {
844 let mut map = TotalOrderMultiMap::with_capacity(10);
845
846 assert_eq!(true, map.is_empty());
847
848 map.add("key1", Box::new(13u32));
849
850 assert_eq!(false, map.is_empty());
851 map.add("key2", Box::new(1));
852
853 assert_eq!(10, map.capacity());
854 assert_eq!(2, map.len());
855
856 map.shrink_to_fit();
857
858 assert_eq!(2, map.capacity());
859
860 // this is not reserve_exact so it can reserve more
861 // than space for one element
862 map.reserve(1);
863
864 assert!(map.capacity() >= 3);
865
866 map.add("key1", Box::new(44));
867 map.add("key1", Box::new(44));
868
869 assert_eq!(4, map.len());
870 assert!(map.capacity() >= 4);
871
872 map.clear();
873 assert_eq!(true, map.is_empty());
874 assert_eq!(0, map.len());
875 }
876
877 #[test]
878 fn works_with_trait_objects() {
879 use std::fmt::Debug;
880 let mut map = TotalOrderMultiMap::<&'static str, Box<Debug>>::new();
881 map.add("hy", Box::new("h".to_owned()));
882 map.add("hy", Box::new(2));
883 map.add("ho", Box::new("o".to_owned()));
884
885 let view = map.values().collect::<Vec<_>>();
886 assert_eq!(
887 "\"h\" 2 \"o\"",
888 format!("{:?} {:?} {:?}", view[0], view[1], view[2])
889 )
890 }
891
892 #[test]
893 fn get_set() {
894 let mut map = TotalOrderMultiMap::new();
895 let a = "a".to_owned();
896 map.add("k1", a.clone());
897 map.add("k1", a.clone());
898 map.add("k2", a.clone());
899 map.add("k3", "y".to_owned());
900 map.add("k4", "z".to_owned());
901 map.add("k4", "a".to_owned());
902 map.add("k1", "e".to_owned());
903
904 let val_k1 = map.get("k1");
905 assert_eq!(3, val_k1.len());
906 assert_eq!((3, Some(3)), val_k1.size_hint());
907 assert_eq!(
908 ["a", "a", "e"],
909 val_k1.collect::<Vec<_>>().as_slice()
910 );
911 }
912
913 #[test]
914 fn get_mut() {
915 let ka = "aa";
916 let kb = "bb";
917
918 let a: Box<u32> = Box::new(12u32);
919 let b: Box<u32> = Box::new(13u32);
920
921 let mut map = TotalOrderMultiMap::new();
922 map.add(ka, a);
923 map.add(kb, b);
924
925 {
926 let mut a_vals = map.get_mut(ka);
927 let ref_a = a_vals.next().unwrap();
928 *ref_a = 44;
929 }
930 assert_eq!(2, map.len());
931 assert_eq!(vec![44], map.get(ka).map(|v|*v).collect::<Vec<_>>());
932 assert_eq!(vec![13], map.get(kb).map(|v|*v).collect::<Vec<_>>());
933 assert_eq!(0, map.get(&ka[..1]).len());
934 }
935
936 #[test]
937 fn truncate_longer() {
938 let mut map = TotalOrderMultiMap::new();
939 map.add("a", "hy".to_owned());
940 map.add("b", "ho".to_owned());
941 map.add("a", "urgs".to_owned());
942
943 map.truncate(10);
944
945 assert_eq!(
946 vec![("a", "hy"), ("b", "ho"), ("a", "urgs")],
947 map.iter().collect::<Vec<_>>()
948 );
949
950 assert_eq!(
951 vec!["hy", "urgs"],
952 map.get("a").collect::<Vec<_>>()
953 );
954
955 assert_eq!(
956 vec!["ho"],
957 map.get("b").collect::<Vec<_>>()
958 );
959 }
960
961 #[test]
962 fn truncate_a_view_elements() {
963 let mut map = TotalOrderMultiMap::new();
964 map.add("a", "hy".to_owned());
965 map.add("b", "ho".to_owned());
966 map.add("a", "urgs".to_owned());
967 map.add("c", "cirgs".to_owned());
968
969 map.truncate(2);
970
971 assert_eq!(
972 vec![("a", "hy"), ("b", "ho")],
973 map.iter().collect::<Vec<_>>()
974 );
975
976 assert_eq!(
977 vec!["hy"],
978 map.get("a").collect::<Vec<_>>()
979 );
980
981 assert_eq!(
982 vec!["ho"],
983 map.get("b").collect::<Vec<_>>()
984 );
985
986 assert_eq!(
987 Vec::<&'static str>::new(),
988 map.get("c").collect::<Vec<_>>()
989 );
990 }
991
992 // Re-enabled on non DerefMut can be used again
993 // #[test]
994 // fn get_set() {
995 // let mut map = TotalOrderMultiMap::new();
996 // let a = arc_str("a");
997 // let co_a = a.clone();
998 // let eq_a = arc_str("a");
999 // map.add("k1", a);
1000 // map.add("k1", co_a);
1001 // map.add("k1", eq_a.clone());
1002 // map.add("k2", eq_a);
1003 // map.add("k3", arc_str("y"));
1004 // map.add("k4", arc_str("z"));
1005 // map.add("k4", arc_str("a"));
1006 // map.add("k1", arc_str("e"));
1007
1008 // let val_k1 = map.get("k1");
1009 // assert_eq!(true, val_k1.is_some());
1010 // let val_k1 = val_k1.unwrap();
1011 // assert_eq!(4, val_k1.len());
1012 // assert_eq!((4,Some(4)), val_k1.size_hint());
1013 // assert_eq!(["a", "a", "a", "e"], val_k1.collect::<Vec<_>>().as_slice());
1014
1015 // let val_k2 = map.get("k2");
1016 // assert_eq!(true, val_k2.is_some());
1017 // let val_k2 = val_k2.unwrap();
1018 // assert_eq!(1, val_k2.len());
1019 // assert_eq!((1,Some(1)), val_k2.size_hint());
1020 // assert_eq!(["a"], val_k2.collect::<Vec<_>>().as_slice());
1021
1022 // assert_eq!(
1023 // ["a", "a", "a", "a", "y", "z", "a", "e" ],
1024 // map.values().collect::<Vec<_>>().as_slice()
1025 // );
1026
1027 // let mut expected = HashSet::new();
1028 // expected.add(("k1", vec![ "a", "a", "a", "e" ]));
1029 // expected.add(("k2", vec![ "a" ]));
1030 // expected.add(("k3", vec![ "y" ]));
1031 // expected.add(("k4", vec![ "z", "a" ]));
1032 // assert_eq!(
1033 // expected,
1034 // map.group_iter()
1035 // .map(|giter| (giter.key(), giter.collect::<Vec<_>>()) )
1036 // .collect::<HashSet<_>>()
1037 // );
1038
1039 // assert_eq!(
1040 // [ ("k1", "a"), ("k1", "a"), ("k1", "a"), ("k2", "a"),
1041 // ("k3", "y"), ("k4", "z"), ("k4", "a"), ("k1", "e")],
1042 // map.iter().collect::<Vec<_>>().as_slice()
1043 // );
1044 // }
1045
1046 #[test]
1047 fn pop() {
1048 let mut map = TotalOrderMultiMap::new();
1049 map.add("k1", "hy".to_owned());
1050 map.add("k2", "ho".to_owned());
1051 map.add("k1", "last".to_owned());
1052
1053 let last = map.pop();
1054 assert_eq!(
1055 Some(("k1", "last".to_owned())),
1056 last
1057 );
1058 }
1059
1060// #[test]
1061// fn get_mut() {
1062// use std::sync::Arc;
1063// use std::cell::RefCell;
1064// let mut map = TotalOrderMultiMap::new();
1065// map.add("k1", Arc::new(RefCell::new(12)));
1066//
1067// let cell = map.get_mut("k1").unwrap().next().unwrap();
1068// *cell.borrow_mut() = 55;
1069//
1070// let exp_cell = RefCell::new(55);
1071// assert_eq!(
1072// [ ("k1", &exp_cell) ],
1073// map.iter().collect::<Vec<_>>().as_slice()
1074// )
1075// }
1076
1077 #[test]
1078 fn reverse() {
1079 let mut map = TotalOrderMultiMap::new();
1080 map.add("k1", "ok".to_owned());
1081 map.add("k2", "why not?".to_owned());
1082 map.reverse();
1083
1084 assert_eq!(
1085 [("k2", "why not?"), ("k1", "ok")],
1086 map.iter().collect::<Vec<_>>().as_slice()
1087 );
1088 }
1089
1090
1091 #[test]
1092 fn remove_all() {
1093 let mut map = TotalOrderMultiMap::new();
1094 map.add("k1", "ok".to_owned());
1095 map.add("k2", "why not?".to_owned());
1096 map.add("k1", "run".to_owned());
1097 map.add("k2", "jump".to_owned());
1098 let did_rm = map.remove_all("k1");
1099 assert_eq!(true, did_rm);
1100 assert_eq!(false, map.remove_all("not_a_key"));
1101
1102 assert_eq!(
1103 [("k2", "why not?"), ("k2", "jump")],
1104 map.iter().collect::<Vec<_>>().as_slice()
1105 );
1106 }
1107
1108 #[test]
1109 fn retain() {
1110 let mut map = TotalOrderMultiMap::new();
1111 map.add("k1", "ok".to_owned());
1112 map.add("k2", "why not?".to_owned());
1113 map.add("k1", "run".to_owned());
1114 map.add("k2", "uh".to_owned());
1115
1116 map.retain(|key, val| {
1117 assert!(key.len() == 2);
1118 val.len() > 2
1119 });
1120
1121 assert_eq!(
1122 [("k2", "why not?"), ("k1", "run")],
1123 map.iter().collect::<Vec<_>>().as_slice()
1124 );
1125 }
1126
1127 //Test can only be re-enabled once non DerefMut values are supported again.
1128 // #[test]
1129 // fn retain_with_equal_pointers() {
1130 // let mut map = TotalOrderMultiMap::new();
1131 // let v1 = arc_str("v1");
1132 // map.add("k1", v1.clone());
1133 // map.add("k2", v1.clone());
1134 // map.add("k1", arc_str("v2"));
1135 // map.add("k1", v1);
1136
1137 // let mut rem_count = 0;
1138 // map.retain(|_key, val| {
1139 // if rem_count >= 2 { return true; }
1140 // if &*val == "v1" {
1141 // rem_count += 1;
1142 // false
1143 // } else {
1144 // true
1145 // }
1146 // });
1147
1148 // assert_eq!(
1149 // [("k1", "v2"), ("k1", "v1")],
1150 // map.iter().collect::<Vec<_>>().as_slice()
1151 // );
1152 // }
1153
1154
1155 trait AssertSend: Send {}
1156 impl<K: Send, V: Send> AssertSend for TotalOrderMultiMap<K, V>
1157 where V: StableDeref + DerefMut, K: Hash + Eq + Copy, V::Target: Sync {}
1158
1159 trait AssertSync: Sync {}
1160 impl<K: Sync, V: Sync> AssertSync for TotalOrderMultiMap<K, V>
1161 where V: StableDeref + DerefMut, K: Hash + Eq + Copy, V::Target: Sync {}
1162}