use alloc::heap::{EMPTY, allocate, deallocate};
use core::cmp;
use core::hash::{BuildHasher, Hash, Hasher};
use core::intrinsics::needs_drop;
use core::marker;
use core::mem::{align_of, size_of};
use core::mem;
use core::ops::{Deref, DerefMut};
use core::ptr::{self, Unique, Shared};
use self::BucketState::*;
const EMPTY_BUCKET: u64 = 0;
#[unsafe_no_drop_flag]
pub struct RawTable<K, V> {
capacity: usize,
size: usize,
hashes: Unique<u64>,
marker: marker::PhantomData<(K, V)>,
}
unsafe impl<K: Send, V: Send> Send for RawTable<K, V> {}
unsafe impl<K: Sync, V: Sync> Sync for RawTable<K, V> {}
struct RawBucket<K, V> {
hash: *mut u64,
key: *const K,
val: *const V,
_marker: marker::PhantomData<(K, V)>,
}
impl<K, V> Copy for RawBucket<K, V> {}
impl<K, V> Clone for RawBucket<K, V> {
fn clone(&self) -> RawBucket<K, V> {
*self
}
}
pub struct Bucket<K, V, M> {
raw: RawBucket<K, V>,
idx: usize,
table: M,
}
impl<K, V, M: Copy> Copy for Bucket<K, V, M> {}
impl<K, V, M: Copy> Clone for Bucket<K, V, M> {
fn clone(&self) -> Bucket<K, V, M> {
*self
}
}
pub struct EmptyBucket<K, V, M> {
raw: RawBucket<K, V>,
idx: usize,
table: M,
}
pub struct FullBucket<K, V, M> {
raw: RawBucket<K, V>,
idx: usize,
table: M,
}
pub type EmptyBucketImm<'table, K, V> = EmptyBucket<K, V, &'table RawTable<K, V>>;
pub type FullBucketImm<'table, K, V> = FullBucket<K, V, &'table RawTable<K, V>>;
pub type EmptyBucketMut<'table, K, V> = EmptyBucket<K, V, &'table mut RawTable<K, V>>;
pub type FullBucketMut<'table, K, V> = FullBucket<K, V, &'table mut RawTable<K, V>>;
pub enum BucketState<K, V, M> {
Empty(EmptyBucket<K, V, M>),
Full(FullBucket<K, V, M>),
}
pub struct GapThenFull<K, V, M> {
gap: EmptyBucket<K, V, ()>,
full: FullBucket<K, V, M>,
}
#[derive(PartialEq, Copy, Clone)]
pub struct SafeHash {
hash: u64,
}
impl SafeHash {
#[inline(always)]
pub fn inspect(&self) -> u64 {
self.hash
}
}
pub fn make_hash<T: ?Sized, S>(hash_state: &S, t: &T) -> SafeHash
where T: Hash,
S: BuildHasher
{
let mut state = hash_state.build_hasher();
t.hash(&mut state);
SafeHash { hash: 0x8000_0000_0000_0000 | state.finish() }
}
#[test]
fn can_alias_safehash_as_u64() {
assert_eq!(size_of::<SafeHash>(), size_of::<u64>())
}
impl<K, V> RawBucket<K, V> {
unsafe fn offset(self, count: isize) -> RawBucket<K, V> {
RawBucket {
hash: self.hash.offset(count),
key: self.key.offset(count),
val: self.val.offset(count),
_marker: marker::PhantomData,
}
}
}
impl<K, V, M> FullBucket<K, V, M> {
pub fn table(&self) -> &M {
&self.table
}
pub fn into_table(self) -> M {
self.table
}
pub fn index(&self) -> usize {
self.idx
}
}
impl<K, V, M> EmptyBucket<K, V, M> {
pub fn table(&self) -> &M {
&self.table
}
}
impl<K, V, M> Bucket<K, V, M> {
pub fn index(&self) -> usize {
self.idx
}
}
impl<K, V, M> Deref for FullBucket<K, V, M>
where M: Deref<Target = RawTable<K, V>>
{
type Target = RawTable<K, V>;
fn deref(&self) -> &RawTable<K, V> {
&self.table
}
}
pub trait Put<K, V> {
unsafe fn borrow_table_mut(&mut self) -> &mut RawTable<K, V>;
}
impl<'t, K, V> Put<K, V> for &'t mut RawTable<K, V> {
unsafe fn borrow_table_mut(&mut self) -> &mut RawTable<K, V> {
*self
}
}
impl<K, V, M> Put<K, V> for Bucket<K, V, M>
where M: Put<K, V>
{
unsafe fn borrow_table_mut(&mut self) -> &mut RawTable<K, V> {
self.table.borrow_table_mut()
}
}
impl<K, V, M> Put<K, V> for FullBucket<K, V, M>
where M: Put<K, V>
{
unsafe fn borrow_table_mut(&mut self) -> &mut RawTable<K, V> {
self.table.borrow_table_mut()
}
}
impl<K, V, M: Deref<Target = RawTable<K, V>>> Bucket<K, V, M> {
pub fn new(table: M, hash: SafeHash) -> Bucket<K, V, M> {
Bucket::at_index(table, hash.inspect() as usize)
}
pub fn at_index(table: M, ib_index: usize) -> Bucket<K, V, M> {
debug_assert!(table.capacity() > 0,
"Table should have capacity at this point");
let ib_index = ib_index & (table.capacity() - 1);
Bucket {
raw: unsafe { table.first_bucket_raw().offset(ib_index as isize) },
idx: ib_index,
table: table,
}
}
pub fn first(table: M) -> Bucket<K, V, M> {
Bucket {
raw: table.first_bucket_raw(),
idx: 0,
table: table,
}
}
pub fn peek(self) -> BucketState<K, V, M> {
match unsafe { *self.raw.hash } {
EMPTY_BUCKET => {
Empty(EmptyBucket {
raw: self.raw,
idx: self.idx,
table: self.table,
})
}
_ => {
Full(FullBucket {
raw: self.raw,
idx: self.idx,
table: self.table,
})
}
}
}
pub fn next(&mut self) {
self.idx += 1;
let range = self.table.capacity();
let dist = if self.idx & (range - 1) == 0 {
1 - range as isize
} else {
1
};
unsafe {
self.raw = self.raw.offset(dist);
}
}
}
impl<K, V, M: Deref<Target = RawTable<K, V>>> EmptyBucket<K, V, M> {
#[inline]
pub fn next(self) -> Bucket<K, V, M> {
let mut bucket = self.into_bucket();
bucket.next();
bucket
}
#[inline]
pub fn into_bucket(self) -> Bucket<K, V, M> {
Bucket {
raw: self.raw,
idx: self.idx,
table: self.table,
}
}
pub fn gap_peek(self) -> Option<GapThenFull<K, V, M>> {
let gap = EmptyBucket {
raw: self.raw,
idx: self.idx,
table: (),
};
match self.next().peek() {
Full(bucket) => {
Some(GapThenFull {
gap: gap,
full: bucket,
})
}
Empty(..) => None,
}
}
}
impl<K, V, M> EmptyBucket<K, V, M>
where M: Put<K, V>
{
pub fn put(mut self, hash: SafeHash, key: K, value: V) -> FullBucket<K, V, M> {
unsafe {
*self.raw.hash = hash.inspect();
ptr::write(self.raw.key as *mut K, key);
ptr::write(self.raw.val as *mut V, value);
self.table.borrow_table_mut().size += 1;
}
FullBucket {
raw: self.raw,
idx: self.idx,
table: self.table,
}
}
}
impl<K, V, M: Deref<Target = RawTable<K, V>>> FullBucket<K, V, M> {
#[inline]
pub fn next(self) -> Bucket<K, V, M> {
let mut bucket = self.into_bucket();
bucket.next();
bucket
}
#[inline]
pub fn into_bucket(self) -> Bucket<K, V, M> {
Bucket {
raw: self.raw,
idx: self.idx,
table: self.table,
}
}
pub fn stash(self) -> FullBucket<K, V, Self> {
FullBucket {
raw: self.raw,
idx: self.idx,
table: self,
}
}
pub fn displacement(&self) -> usize {
(self.idx.wrapping_sub(self.hash().inspect() as usize)) & (self.table.capacity() - 1)
}
#[inline]
pub fn hash(&self) -> SafeHash {
unsafe { SafeHash { hash: *self.raw.hash } }
}
pub fn read(&self) -> (&K, &V) {
unsafe { (&*self.raw.key, &*self.raw.val) }
}
}
impl<'t, K, V> FullBucket<K, V, &'t mut RawTable<K, V>> {
pub fn take(mut self) -> (EmptyBucket<K, V, &'t mut RawTable<K, V>>, K, V) {
self.table.size -= 1;
unsafe {
*self.raw.hash = EMPTY_BUCKET;
(EmptyBucket {
raw: self.raw,
idx: self.idx,
table: self.table,
},
ptr::read(self.raw.key),
ptr::read(self.raw.val))
}
}
}
impl<K, V, M> FullBucket<K, V, M>
where M: Put<K, V>
{
pub fn replace(&mut self, h: SafeHash, k: K, v: V) -> (SafeHash, K, V) {
unsafe {
let old_hash = ptr::replace(self.raw.hash as *mut SafeHash, h);
let old_key = ptr::replace(self.raw.key as *mut K, k);
let old_val = ptr::replace(self.raw.val as *mut V, v);
(old_hash, old_key, old_val)
}
}
}
impl<K, V, M> FullBucket<K, V, M>
where M: Deref<Target = RawTable<K, V>> + DerefMut
{
pub fn read_mut(&mut self) -> (&mut K, &mut V) {
unsafe { (&mut *(self.raw.key as *mut K), &mut *(self.raw.val as *mut V)) }
}
}
impl<'t, K, V, M> FullBucket<K, V, M>
where M: Deref<Target = RawTable<K, V>> + 't
{
pub fn into_refs(self) -> (&'t K, &'t V) {
unsafe { (&*self.raw.key, &*self.raw.val) }
}
}
impl<'t, K, V, M> FullBucket<K, V, M>
where M: Deref<Target = RawTable<K, V>> + DerefMut + 't
{
pub fn into_mut_refs(self) -> (&'t mut K, &'t mut V) {
unsafe { (&mut *(self.raw.key as *mut K), &mut *(self.raw.val as *mut V)) }
}
}
impl<K, V, M> GapThenFull<K, V, M>
where M: Deref<Target = RawTable<K, V>>
{
#[inline]
pub fn full(&self) -> &FullBucket<K, V, M> {
&self.full
}
pub fn shift(mut self) -> Option<GapThenFull<K, V, M>> {
unsafe {
*self.gap.raw.hash = mem::replace(&mut *self.full.raw.hash, EMPTY_BUCKET);
ptr::copy_nonoverlapping(self.full.raw.key, self.gap.raw.key as *mut K, 1);
ptr::copy_nonoverlapping(self.full.raw.val, self.gap.raw.val as *mut V, 1);
}
let FullBucket { raw: prev_raw, idx: prev_idx, .. } = self.full;
match self.full.next().peek() {
Full(bucket) => {
self.gap.raw = prev_raw;
self.gap.idx = prev_idx;
self.full = bucket;
Some(self)
}
Empty(..) => None,
}
}
}
#[inline]
fn round_up_to_next(unrounded: usize, target_alignment: usize) -> usize {
assert!(target_alignment.is_power_of_two());
(unrounded + target_alignment - 1) & !(target_alignment - 1)
}
#[test]
fn test_rounding() {
assert_eq!(round_up_to_next(0, 4), 0);
assert_eq!(round_up_to_next(1, 4), 4);
assert_eq!(round_up_to_next(2, 4), 4);
assert_eq!(round_up_to_next(3, 4), 4);
assert_eq!(round_up_to_next(4, 4), 4);
assert_eq!(round_up_to_next(5, 4), 8);
}
#[inline]
fn calculate_offsets(hashes_size: usize,
keys_size: usize,
keys_align: usize,
vals_align: usize)
-> (usize, usize, bool) {
let keys_offset = round_up_to_next(hashes_size, keys_align);
let (end_of_keys, oflo) = keys_offset.overflowing_add(keys_size);
let vals_offset = round_up_to_next(end_of_keys, vals_align);
(keys_offset, vals_offset, oflo)
}
fn calculate_allocation(hash_size: usize,
hash_align: usize,
keys_size: usize,
keys_align: usize,
vals_size: usize,
vals_align: usize)
-> (usize, usize, usize, bool) {
let hash_offset = 0;
let (_, vals_offset, oflo) = calculate_offsets(hash_size, keys_size, keys_align, vals_align);
let (end_of_vals, oflo2) = vals_offset.overflowing_add(vals_size);
let align = cmp::max(hash_align, cmp::max(keys_align, vals_align));
(align, hash_offset, end_of_vals, oflo || oflo2)
}
#[test]
fn test_offset_calculation() {
assert_eq!(calculate_allocation(128, 8, 15, 1, 4, 4),
(8, 0, 148, false));
assert_eq!(calculate_allocation(3, 1, 2, 1, 1, 1), (1, 0, 6, false));
assert_eq!(calculate_allocation(6, 2, 12, 4, 24, 8), (8, 0, 48, false));
assert_eq!(calculate_offsets(128, 15, 1, 4), (128, 144, false));
assert_eq!(calculate_offsets(3, 2, 1, 1), (3, 5, false));
assert_eq!(calculate_offsets(6, 12, 4, 8), (8, 24, false));
}
impl<K, V> RawTable<K, V> {
unsafe fn new_uninitialized(capacity: usize) -> RawTable<K, V> {
if capacity == 0 {
return RawTable {
size: 0,
capacity: 0,
hashes: Unique::new(EMPTY as *mut u64),
marker: marker::PhantomData,
};
}
let hashes_size = capacity * size_of::<u64>();
let keys_size = capacity * size_of::<K>();
let vals_size = capacity * size_of::<V>();
let (malloc_alignment, hash_offset, size, oflo) = calculate_allocation(hashes_size,
align_of::<u64>(),
keys_size,
align_of::<K>(),
vals_size,
align_of::<V>());
assert!(!oflo, "capacity overflow");
let size_of_bucket = size_of::<u64>()
.checked_add(size_of::<K>())
.unwrap()
.checked_add(size_of::<V>())
.unwrap();
assert!(size >=
capacity.checked_mul(size_of_bucket)
.expect("capacity overflow"),
"capacity overflow");
let buffer = allocate(size, malloc_alignment);
if buffer.is_null() {
::alloc::oom()
}
let hashes = buffer.offset(hash_offset as isize) as *mut u64;
RawTable {
capacity: capacity,
size: 0,
hashes: Unique::new(hashes),
marker: marker::PhantomData,
}
}
fn first_bucket_raw(&self) -> RawBucket<K, V> {
let hashes_size = self.capacity * size_of::<u64>();
let keys_size = self.capacity * size_of::<K>();
let buffer = *self.hashes as *const u8;
let (keys_offset, vals_offset, oflo) = calculate_offsets(hashes_size,
keys_size,
align_of::<K>(),
align_of::<V>());
debug_assert!(!oflo, "capacity overflow");
unsafe {
RawBucket {
hash: *self.hashes,
key: buffer.offset(keys_offset as isize) as *const K,
val: buffer.offset(vals_offset as isize) as *const V,
_marker: marker::PhantomData,
}
}
}
pub fn new(capacity: usize) -> RawTable<K, V> {
unsafe {
let ret = RawTable::new_uninitialized(capacity);
ptr::write_bytes(*ret.hashes, 0, capacity);
ret
}
}
pub fn capacity(&self) -> usize {
self.capacity
}
pub fn size(&self) -> usize {
self.size
}
fn raw_buckets(&self) -> RawBuckets<K, V> {
RawBuckets {
raw: self.first_bucket_raw(),
hashes_end: unsafe { self.hashes.offset(self.capacity as isize) },
marker: marker::PhantomData,
}
}
pub fn iter(&self) -> Iter<K, V> {
Iter {
iter: self.raw_buckets(),
elems_left: self.size(),
}
}
pub fn iter_mut(&mut self) -> IterMut<K, V> {
IterMut {
iter: self.raw_buckets(),
elems_left: self.size(),
_marker: marker::PhantomData,
}
}
pub fn into_iter(self) -> IntoIter<K, V> {
let RawBuckets { raw, hashes_end, .. } = self.raw_buckets();
IntoIter {
iter: RawBuckets {
raw: raw,
hashes_end: hashes_end,
marker: marker::PhantomData,
},
table: self,
}
}
pub fn drain(&mut self) -> Drain<K, V> {
let RawBuckets { raw, hashes_end, .. } = self.raw_buckets();
Drain {
iter: RawBuckets {
raw: raw,
hashes_end: hashes_end,
marker: marker::PhantomData,
},
table: unsafe { Shared::new(self) },
marker: marker::PhantomData,
}
}
unsafe fn rev_move_buckets(&mut self) -> RevMoveBuckets<K, V> {
let raw_bucket = self.first_bucket_raw();
RevMoveBuckets {
raw: raw_bucket.offset(self.capacity as isize),
hashes_end: raw_bucket.hash,
elems_left: self.size,
marker: marker::PhantomData,
}
}
}
struct RawBuckets<'a, K, V> {
raw: RawBucket<K, V>,
hashes_end: *mut u64,
marker: marker::PhantomData<&'a ()>,
}
impl<'a, K, V> Clone for RawBuckets<'a, K, V> {
fn clone(&self) -> RawBuckets<'a, K, V> {
RawBuckets {
raw: self.raw,
hashes_end: self.hashes_end,
marker: marker::PhantomData,
}
}
}
impl<'a, K, V> Iterator for RawBuckets<'a, K, V> {
type Item = RawBucket<K, V>;
fn next(&mut self) -> Option<RawBucket<K, V>> {
while self.raw.hash != self.hashes_end {
unsafe {
let prev = ptr::replace(&mut self.raw, self.raw.offset(1));
if *prev.hash != EMPTY_BUCKET {
return Some(prev);
}
}
}
None
}
}
struct RevMoveBuckets<'a, K, V> {
raw: RawBucket<K, V>,
hashes_end: *mut u64,
elems_left: usize,
marker: marker::PhantomData<&'a ()>,
}
impl<'a, K, V> Iterator for RevMoveBuckets<'a, K, V> {
type Item = (K, V);
fn next(&mut self) -> Option<(K, V)> {
if self.elems_left == 0 {
return None;
}
loop {
debug_assert!(self.raw.hash != self.hashes_end);
unsafe {
self.raw = self.raw.offset(-1);
if *self.raw.hash != EMPTY_BUCKET {
self.elems_left -= 1;
return Some((ptr::read(self.raw.key), ptr::read(self.raw.val)));
}
}
}
}
}
pub struct Iter<'a, K: 'a, V: 'a> {
iter: RawBuckets<'a, K, V>,
elems_left: usize,
}
unsafe impl<'a, K: Sync, V: Sync> Sync for Iter<'a, K, V> {}
unsafe impl<'a, K: Sync, V: Sync> Send for Iter<'a, K, V> {}
impl<'a, K, V> Clone for Iter<'a, K, V> {
fn clone(&self) -> Iter<'a, K, V> {
Iter {
iter: self.iter.clone(),
elems_left: self.elems_left,
}
}
}
pub struct IterMut<'a, K: 'a, V: 'a> {
iter: RawBuckets<'a, K, V>,
elems_left: usize,
_marker: marker::PhantomData<&'a mut V>,
}
unsafe impl<'a, K: Sync, V: Sync> Sync for IterMut<'a, K, V> {}
unsafe impl<'a, K: Send, V: Send> Send for IterMut<'a, K, V> {}
pub struct IntoIter<K, V> {
table: RawTable<K, V>,
iter: RawBuckets<'static, K, V>,
}
unsafe impl<K: Sync, V: Sync> Sync for IntoIter<K, V> {}
unsafe impl<K: Send, V: Send> Send for IntoIter<K, V> {}
pub struct Drain<'a, K: 'a, V: 'a> {
table: Shared<RawTable<K, V>>,
iter: RawBuckets<'static, K, V>,
marker: marker::PhantomData<&'a RawTable<K, V>>,
}
unsafe impl<'a, K: Sync, V: Sync> Sync for Drain<'a, K, V> {}
unsafe impl<'a, K: Send, V: Send> Send for Drain<'a, K, V> {}
impl<'a, K, V> Iterator for Iter<'a, K, V> {
type Item = (&'a K, &'a V);
fn next(&mut self) -> Option<(&'a K, &'a V)> {
self.iter.next().map(|bucket| {
self.elems_left -= 1;
unsafe { (&*bucket.key, &*bucket.val) }
})
}
fn size_hint(&self) -> (usize, Option<usize>) {
(self.elems_left, Some(self.elems_left))
}
}
impl<'a, K, V> ExactSizeIterator for Iter<'a, K, V> {
fn len(&self) -> usize {
self.elems_left
}
}
impl<'a, K, V> Iterator for IterMut<'a, K, V> {
type Item = (&'a K, &'a mut V);
fn next(&mut self) -> Option<(&'a K, &'a mut V)> {
self.iter.next().map(|bucket| {
self.elems_left -= 1;
unsafe { (&*bucket.key, &mut *(bucket.val as *mut V)) }
})
}
fn size_hint(&self) -> (usize, Option<usize>) {
(self.elems_left, Some(self.elems_left))
}
}
impl<'a, K, V> ExactSizeIterator for IterMut<'a, K, V> {
fn len(&self) -> usize {
self.elems_left
}
}
impl<K, V> Iterator for IntoIter<K, V> {
type Item = (SafeHash, K, V);
fn next(&mut self) -> Option<(SafeHash, K, V)> {
self.iter.next().map(|bucket| {
self.table.size -= 1;
unsafe {
(SafeHash { hash: *bucket.hash }, ptr::read(bucket.key), ptr::read(bucket.val))
}
})
}
fn size_hint(&self) -> (usize, Option<usize>) {
let size = self.table.size();
(size, Some(size))
}
}
impl<K, V> ExactSizeIterator for IntoIter<K, V> {
fn len(&self) -> usize {
self.table.size()
}
}
impl<'a, K, V> Iterator for Drain<'a, K, V> {
type Item = (SafeHash, K, V);
#[inline]
fn next(&mut self) -> Option<(SafeHash, K, V)> {
self.iter.next().map(|bucket| {
unsafe {
(**self.table).size -= 1;
(SafeHash { hash: ptr::replace(bucket.hash, EMPTY_BUCKET) },
ptr::read(bucket.key),
ptr::read(bucket.val))
}
})
}
fn size_hint(&self) -> (usize, Option<usize>) {
let size = unsafe { (**self.table).size() };
(size, Some(size))
}
}
impl<'a, K, V> ExactSizeIterator for Drain<'a, K, V> {
fn len(&self) -> usize {
unsafe {
(**self.table).size()
}
}
}
impl<'a, K: 'a, V: 'a> Drop for Drain<'a, K, V> {
fn drop(&mut self) {
for _ in self {}
}
}
impl<K: Clone, V: Clone> Clone for RawTable<K, V> {
fn clone(&self) -> RawTable<K, V> {
unsafe {
let mut new_ht = RawTable::new_uninitialized(self.capacity());
{
let cap = self.capacity();
let mut new_buckets = Bucket::first(&mut new_ht);
let mut buckets = Bucket::first(self);
while buckets.index() != cap {
match buckets.peek() {
Full(full) => {
let (h, k, v) = {
let (k, v) = full.read();
(full.hash(), k.clone(), v.clone())
};
*new_buckets.raw.hash = h.inspect();
ptr::write(new_buckets.raw.key as *mut K, k);
ptr::write(new_buckets.raw.val as *mut V, v);
}
Empty(..) => {
*new_buckets.raw.hash = EMPTY_BUCKET;
}
}
new_buckets.next();
buckets.next();
}
};
new_ht.size = self.size();
new_ht
}
}
}
impl<K, V> Drop for RawTable<K, V> {
#[unsafe_destructor_blind_to_params]
fn drop(&mut self) {
if self.capacity == 0 || self.capacity == mem::POST_DROP_USIZE {
return;
}
unsafe {
if needs_drop::<(K, V)>() {
for _ in self.rev_move_buckets() {}
}
}
let hashes_size = self.capacity * size_of::<u64>();
let keys_size = self.capacity * size_of::<K>();
let vals_size = self.capacity * size_of::<V>();
let (align, _, size, oflo) = calculate_allocation(hashes_size,
align_of::<u64>(),
keys_size,
align_of::<K>(),
vals_size,
align_of::<V>());
debug_assert!(!oflo, "should be impossible");
unsafe {
deallocate(*self.hashes as *mut u8, size, align);
}
}
}