use std::borrow::Borrow;
use std::fmt::{self, Debug, Formatter};
use std::hash::{BuildHasher, Hash};
use std::mem;
use hashbrown::hash_map::DefaultHashBuilder;
use hashbrown::raw::RawTable;
use hashbrown::TryReserveError;
use entry::{Entry, EntryPtr, UnhingedEntry};
pub use entry::entry_size;
pub use error::{InsertError, MutateError, TryInsertError};
pub use iter::{Drain, IntoIter, IntoKeys, IntoValues, Iter, Keys, Values};
pub use mem_size::{HeapSize, MemSize, ValueSize};
mod entry;
mod error;
mod iter;
mod mem_size;
pub struct LruCache<K, V, S = DefaultHashBuilder> {
table: RawTable<Entry<K, V>>,
seal: EntryPtr<K, V>,
current_size: usize,
max_size: usize,
hash_builder: S
}
impl<K, V> LruCache<K, V> {
pub fn new(max_size: usize) -> LruCache<K, V> {
LruCache::with_table_and_hasher(max_size, RawTable::new(),
DefaultHashBuilder::default())
}
pub fn with_capacity(max_size: usize, capacity: usize) -> LruCache<K, V> {
LruCache::with_table_and_hasher(max_size,
RawTable::with_capacity(capacity), DefaultHashBuilder::default())
}
}
impl<K, V, S> LruCache<K, V, S> {
fn with_table_and_hasher(max_size: usize, table: RawTable<Entry<K, V>>,
hash_builder: S) -> LruCache<K, V, S> {
let seal = EntryPtr::new_seal();
LruCache {
table,
seal,
current_size: 0,
max_size,
hash_builder
}
}
pub fn with_hasher(max_size: usize, hash_builder: S) -> LruCache<K, V, S> {
LruCache::with_table_and_hasher(max_size, RawTable::new(),
hash_builder)
}
pub fn with_capacity_and_hasher(max_size: usize, capacity: usize,
hash_builder: S) -> LruCache<K, V, S> {
LruCache::with_table_and_hasher(max_size,
RawTable::with_capacity(capacity), hash_builder)
}
pub fn max_size(&self) -> usize {
self.max_size
}
pub fn current_size(&self) -> usize {
self.current_size
}
pub fn len(&self) -> usize {
self.table.len()
}
pub fn is_empty(&self) -> bool {
self.len() == 0
}
pub fn capacity(&self) -> usize {
self.table.capacity()
}
pub fn hasher(&self) -> &S {
&self.hash_builder
}
pub fn clear(&mut self) {
for entry in self.table.drain() {
unsafe { entry.drop(); }
}
self.current_size = 0;
self.seal.get_mut().next = self.seal;
self.seal.get_mut().prev = self.seal;
}
pub fn iter(&self) -> Iter<'_, K, V> {
Iter::new(self)
}
pub fn keys(&self) -> Keys<'_, K, V> {
Keys::new(self)
}
pub fn values(&self) -> Values<'_, K, V> {
Values::new(self)
}
pub fn drain(&mut self) -> Drain<'_, K, V, S> {
Drain::new(self)
}
pub fn into_keys(self) -> IntoKeys<K, V, S> {
IntoKeys::new(self)
}
pub fn into_values(self) -> IntoValues<K, V, S> {
IntoValues::new(self)
}
}
fn make_hash<K, S>(hash_builder: &S, val: &K) -> u64
where
K: Hash + ?Sized,
S: BuildHasher,
{
use core::hash::Hasher;
let mut state = hash_builder.build_hasher();
val.hash(&mut state);
state.finish()
}
fn make_insert_hash<K, S>(hash_builder: &S, val: &K) -> u64
where
K: Hash,
S: BuildHasher,
{
use core::hash::Hasher;
let mut state = hash_builder.build_hasher();
val.hash(&mut state);
state.finish()
}
fn make_hasher<K, V, S>(hash_builder: &S) -> impl Fn(&Entry<K, V>) -> u64 + '_
where
K: Hash,
S: BuildHasher
{
move |val| make_hash::<K, S>(hash_builder, unsafe { val.key() })
}
fn equivalent_key<Q, K, V>(k: &Q) -> impl Fn(&Entry<K, V>) -> bool + '_
where
K: Borrow<Q>,
Q: ?Sized + Eq,
{
move |x| k.eq(unsafe { x.key() }.borrow())
}
impl<K, V, S> LruCache<K, V, S>
where
K: Eq + Hash,
S: BuildHasher
{
fn remove_from_table<Q>(&mut self, key: &Q) -> Option<Entry<K, V>>
where
K: Borrow<Q>,
Q: Eq + Hash + ?Sized
{
let hash = make_hash::<Q, S>(&self.hash_builder, key);
self.table.remove_entry(hash, equivalent_key(key))
}
fn get_from_table<Q>(&self, key: &Q) -> Option<&Entry<K, V>>
where
K: Borrow<Q>,
Q: Eq + Hash + ?Sized
{
let hash = make_hash::<Q, S>(&self.hash_builder, key);
self.table.get(hash, equivalent_key(key))
}
fn get_mut_from_table<Q>(&mut self, key: &Q) -> Option<&mut Entry<K, V>>
where
K: Borrow<Q>,
Q: Eq + Hash + ?Sized
{
let hash = make_hash::<Q, S>(&self.hash_builder, key);
self.table.get_mut(hash, equivalent_key(key))
}
#[inline]
fn insert_into_table_with_hash(&mut self, hash: u64, entry: Entry<K, V>)
-> Result<EntryPtr<K, V>, Entry<K, V>> {
match self.table.try_insert_no_grow(hash, entry) {
Ok(bucket) => Ok(EntryPtr::new(bucket.as_ptr())),
Err(entry) => Err(entry)
}
}
fn insert_into_table(&mut self, entry: Entry<K, V>)
-> Result<EntryPtr<K, V>, Entry<K, V>> {
let key = unsafe { entry.key() };
let hash = make_insert_hash::<K, S>(&self.hash_builder, key);
self.insert_into_table_with_hash(hash, entry)
}
fn set_head(&mut self, mut entry: EntryPtr<K, V>) {
entry.insert(self.seal, self.seal.get().next);
}
fn touch_ptr(&mut self, entry: EntryPtr<K, V>) {
unsafe { entry.unhinge(); }
self.set_head(entry);
}
unsafe fn remove_metadata(&mut self, entry: Entry<K, V>) -> (K, V) {
let entry = entry.unhinge();
self.current_size -= entry.size();
entry.into_key_value()
}
#[inline]
unsafe fn remove_ptr(&mut self, entry: EntryPtr<K, V>) -> (K, V) {
let entry = self.remove_from_table(entry.get().key()).unwrap();
self.remove_metadata(entry)
}
fn eject_to_target(&mut self, target: usize) {
while self.current_size > target {
self.remove_lru();
}
}
fn insert_untracked(&mut self, entry: Entry<K, V>) {
let entry_ptr = unsafe {
self.insert_into_table(entry).unwrap_unchecked()
};
self.set_head(entry_ptr);
}
fn try_reallocate(&mut self, new_capacity: usize) -> Result<(), TryReserveError> {
let hasher = make_hasher(&self.hash_builder);
let mut old_table = RawTable::try_with_capacity(new_capacity)?;
mem::swap(&mut self.table, &mut old_table);
for entry in old_table.into_iter() {
let mut prev_entry = entry.prev;
let mut next_entry = entry.next;
let bucket = self.table.insert(hasher(&entry), entry, &hasher);
let entry_ptr = EntryPtr::new(bucket.as_ptr());
prev_entry.get_mut().next = entry_ptr;
next_entry.get_mut().prev = entry_ptr;
}
Ok(())
}
fn reallocate(&mut self, new_capacity: usize) {
self.try_reallocate(new_capacity).unwrap()
}
fn lru_ptr(&self) -> Option<EntryPtr<K, V>> {
let lru = self.seal.get().prev;
if lru == self.seal {
None
}
else {
Some(lru)
}
}
fn mru_ptr(&self) -> Option<EntryPtr<K, V>> {
let mru = self.seal.get().next;
if mru == self.seal {
None
}
else {
Some(mru)
}
}
pub fn remove_lru(&mut self) -> Option<(K, V)> {
self.lru_ptr().map(|ptr| unsafe { self.remove_ptr(ptr) })
}
pub fn get_lru(&mut self) -> Option<(&K, &V)> {
self.lru_ptr().map(|entry| {
unsafe {
self.touch_ptr(entry);
let entry = entry.get_extended();
(entry.key(), entry.value())
}
})
}
pub fn peek_lru(&self) -> Option<(&K, &V)> {
self.lru_ptr().map(|ptr|
unsafe {
let entry = ptr.get_extended();
(entry.key(), entry.value())
})
}
pub fn remove_mru(&mut self) -> Option<(K, V)> {
self.mru_ptr().map(|ptr| unsafe { self.remove_ptr(ptr) })
}
pub fn peek_mru(&self) -> Option<(&K, &V)> {
self.mru_ptr().map(|ptr|
unsafe {
let entry = ptr.get_extended();
(entry.key(), entry.value())
})
}
fn new_capacity(&self, additional: usize)
-> Result<usize, TryReserveError> {
self.len().checked_add(additional)
.ok_or(TryReserveError::CapacityOverflow)
}
pub fn reserve(&mut self, additional: usize) {
let new_capacity = self.new_capacity(additional).unwrap();
if self.capacity() < new_capacity {
self.reallocate(new_capacity);
}
}
pub fn try_reserve(&mut self, additional: usize)
-> Result<(), TryReserveError> {
let new_capacity = self.new_capacity(additional)?;
if self.capacity() < new_capacity {
self.try_reallocate(new_capacity)
}
else {
Ok(())
}
}
pub fn shrink_to(&mut self, min_capacity: usize) {
let new_capacity = self.len().max(min_capacity);
if self.capacity() > new_capacity {
self.reallocate(new_capacity);
}
}
pub fn shrink_to_fit(&mut self) {
self.shrink_to(0)
}
pub fn set_max_size(&mut self, max_size: usize) {
self.eject_to_target(max_size);
self.max_size = max_size;
}
pub fn touch<Q>(&mut self, key: &Q)
where
K: Borrow<Q>,
Q: Eq + Hash + ?Sized
{
if let Some(entry) = self.get_mut_from_table(key) {
let entry_ptr = EntryPtr::new(entry as *mut Entry<K, V>);
self.touch_ptr(entry_ptr);
}
}
pub fn get_entry<Q>(&mut self, key: &Q) -> Option<(&K, &V)>
where
K: Borrow<Q>,
Q: Eq + Hash + ?Sized
{
if let Some(entry) = self.get_mut_from_table(key) {
let entry_ptr = EntryPtr::new(entry as *mut Entry<K, V>);
self.touch_ptr(entry_ptr);
let entry = unsafe { entry_ptr.get_extended() };
Some(unsafe { (entry.key(), entry.value()) })
}
else {
None
}
}
pub fn get<Q>(&mut self, key: &Q) -> Option<&V>
where
K: Borrow<Q>,
Q: Eq + Hash + ?Sized
{
self.get_entry(key).map(|(_, v)| v)
}
pub fn peek_entry<Q>(&self, key: &Q) -> Option<(&K, &V)>
where
K: Borrow<Q>,
Q: Eq + Hash + ?Sized
{
self.get_from_table(key).map(|e| unsafe { (e.key(), e.value()) })
}
pub fn peek<Q>(&self, key: &Q) -> Option<&V>
where
K: Borrow<Q>,
Q: Eq + Hash + ?Sized
{
self.get_from_table(key).map(|e| unsafe { e.value() })
}
pub fn contains<Q>(&self, key: &Q) -> bool
where
K: Borrow<Q>,
Q: Eq + Hash + ?Sized
{
let hash = make_hash::<Q, S>(&self.hash_builder, key);
self.table.find(hash, equivalent_key(key)).is_some()
}
pub fn remove_entry<Q>(&mut self, key: &Q) -> Option<(K, V)>
where
K: Borrow<Q>,
Q: Eq + Hash + ?Sized
{
self.remove_from_table(key)
.map(|entry| unsafe { self.remove_metadata(entry) })
}
pub fn remove<Q>(&mut self, key: &Q) -> Option<V>
where
K: Borrow<Q>,
Q: Eq + Hash + ?Sized
{
self.remove_entry(key).map(|(_, v)| v)
}
pub fn retain<F>(&mut self, mut pred: F)
where
F: FnMut(&K, &V) -> bool
{
let mut tail = self.seal.get().prev;
while tail != self.seal {
unsafe {
let entry = tail.get();
let key = entry.key();
if !pred(key, entry.value()) {
self.remove_entry(key);
}
tail = entry.prev;
}
}
}
}
struct EntryTooLarge<K, V> {
key: K,
value: V,
entry_size: usize,
max_size: usize
}
impl<K, V> From<EntryTooLarge<K, V>> for InsertError<K, V> {
fn from(data: EntryTooLarge<K, V>) -> InsertError<K, V> {
InsertError::EntryTooLarge {
key: data.key,
value: data.value,
entry_size: data.entry_size,
max_size: data.max_size
}
}
}
impl<K, V> From<EntryTooLarge<K, V>> for TryInsertError<K, V> {
fn from(data: EntryTooLarge<K, V>) -> TryInsertError<K, V> {
TryInsertError::EntryTooLarge {
key: data.key,
value: data.value,
entry_size: data.entry_size,
max_size: data.max_size
}
}
}
impl<K, V, S> LruCache<K, V, S>
where
K: Eq + Hash + MemSize,
V: MemSize,
S: BuildHasher
{
fn prepare_insert(&mut self, key: K, value: V)
-> Result<UnhingedEntry<K, V>, EntryTooLarge<K, V>> {
let entry = UnhingedEntry::new(key, value);
let entry_size = entry.size();
if entry_size > self.max_size {
let (key, value) = entry.into_key_value();
Err(EntryTooLarge {
key,
value,
entry_size,
max_size: self.max_size
})
}
else {
Ok(entry)
}
}
fn insert_unchecked(&mut self, entry: UnhingedEntry<K, V>, hash: u64) {
let size = entry.size();
let mut entry = Entry::new(entry, self.seal, self.seal.get().next);
loop {
match self.insert_into_table_with_hash(hash, entry) {
Ok(entry_ptr) => {
self.current_size += size;
self.set_head(entry_ptr);
return;
},
Err(returned_entry) => {
entry = returned_entry;
self.reallocate((self.table.capacity() * 2).max(1));
entry.next = self.seal.get().next;
}
}
}
}
pub fn insert(&mut self, key: K, value: V)
-> Result<Option<V>, InsertError<K, V>> {
let entry = self.prepare_insert(key, value)?;
let key = entry.key();
let hash = make_insert_hash::<K, S>(&self.hash_builder, key);
let result = self.table.remove_entry(hash, equivalent_key(key))
.map(|e| unsafe { self.remove_metadata(e).1 });
self.eject_to_target(self.max_size - entry.size());
self.insert_unchecked(entry, hash);
Ok(result)
}
pub fn try_insert(&mut self, key: K, value: V)
-> Result<(), TryInsertError<K, V>> {
let entry = self.prepare_insert(key, value)?;
let free_memory = self.max_size - self.current_size;
let entry_size = entry.size();
if entry_size > free_memory {
let (key, value) = entry.into_key_value();
return Err(TryInsertError::WouldEjectLru {
key,
value,
entry_size,
free_memory
})
}
let key = entry.key();
let hash = make_insert_hash::<K, S>(&self.hash_builder, key);
if self.table.find(hash, equivalent_key(key)).is_some() {
let (key, value) = entry.into_key_value();
return Err(TryInsertError::OccupiedEntry {
key,
value
})
}
self.insert_unchecked(entry, hash);
Ok(())
}
pub fn mutate<Q, R, F>(&mut self, key: &Q, op: F)
-> Result<Option<R>, MutateError<K, V>>
where
K: Borrow<Q>,
Q: Eq + Hash + ?Sized,
F: FnOnce(&mut V) -> R
{
let max_size = self.max_size;
if let Some(entry) = self.get_mut_from_table(key) {
let new_value_size;
let result;
let old_value_size;
unsafe {
old_value_size = entry.value().mem_size();
result = op(entry.value_mut());
new_value_size = entry.value().mem_size();
}
if new_value_size > old_value_size {
let diff = new_value_size - old_value_size;
let new_entry_size = entry.size + diff;
if new_entry_size > max_size {
let old_entry_size = entry.size;
let (key, value) = self.remove_entry(key).unwrap();
return Err(MutateError::EntryTooLarge {
key,
value,
old_entry_size,
new_entry_size,
max_size
});
}
entry.size = new_entry_size;
let entry_ptr = EntryPtr::new(entry as *mut Entry<K, V>);
self.current_size += diff;
self.touch_ptr(entry_ptr);
self.eject_to_target(max_size);
}
else {
let diff = old_value_size - new_value_size;
entry.size -= diff;
let entry_ptr = EntryPtr::new(entry as *mut Entry<K, V>);
self.current_size -= diff;
self.touch_ptr(entry_ptr);
}
Ok(Some(result))
}
else {
Ok(None)
}
}
}
impl<K, V, S> IntoIterator for LruCache<K, V, S> {
type Item = (K, V);
type IntoIter = IntoIter<K, V, S>;
fn into_iter(self) -> IntoIter<K, V, S> {
IntoIter::new(self)
}
}
impl<K, V, S> Clone for LruCache<K, V, S>
where
K: Clone + Eq + Hash,
V: Clone,
S: BuildHasher + Clone
{
fn clone(&self) -> LruCache<K, V, S> {
let max_size = self.max_size;
let capacity = self.capacity();
let hash_builder = self.hash_builder.clone();
let mut clone = LruCache::with_capacity_and_hasher(
max_size, capacity, hash_builder);
clone.current_size = self.current_size;
let mut next = self.seal.get().prev;
while next != self.seal {
let entry = unsafe { next.get().clone() };
next = entry.prev;
clone.insert_untracked(entry);
}
clone
}
}
impl<K, V, S> Drop for LruCache<K, V, S> {
fn drop(&mut self) {
for entry in self.table.drain() {
unsafe { entry.drop() };
}
unsafe {
self.seal.drop_seal();
}
}
}
impl<K: Debug, V: Debug, S> Debug for LruCache<K, V, S> {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
f.debug_map().entries(self.iter()).finish()
}
}
unsafe impl<K: Send, V: Send, S: Send> Send for LruCache<K, V, S> { }
unsafe impl<K: Sync, V: Sync, S: Sync> Sync for LruCache<K, V, S> { }
#[cfg(test)]
mod tests {
use std::hash::Hasher;
use std::sync::{Arc, Mutex};
use super::*;
pub(crate) fn singleton_test_cache() -> LruCache<&'static str, &'static str> {
let mut cache = LruCache::new(1024);
cache.insert("hello", "world").unwrap();
cache
}
pub(crate) fn large_test_cache() -> LruCache<&'static str, &'static str> {
let mut cache = LruCache::new(1024);
cache.insert("hello", "world").unwrap();
cache.insert("greetings", "moon").unwrap();
cache.insert("ahoy", "mars").unwrap();
cache.insert("hi", "venus").unwrap();
cache.insert("good morning", "jupiter").unwrap();
cache
}
#[test]
fn cache_correctly_inserts_with_sufficient_capacity() {
let mut cache = LruCache::new(1024);
assert_eq!(0, cache.len());
assert_eq!(0, cache.current_size());
assert_eq!(1024, cache.max_size());
assert_eq!(Ok(None),
cache.insert("key1".to_owned(), "value1".to_owned()));
let new_size = cache.current_size();
assert_eq!(1, cache.len());
assert!(new_size > 0);
assert_eq!(Some(&"value1".to_owned()), cache.get("key1"));
assert_eq!(Ok(None),
cache.insert("key2".to_owned(), "value2".to_owned()));
assert_eq!(2, cache.len());
assert!(cache.current_size() > new_size);
assert_eq!(Some(&"value1".to_owned()), cache.get("key1"));
assert_eq!(Some(&"value2".to_owned()), cache.get("key2"));
}
#[test]
fn cache_deduplicates_keys() {
let mut cache = LruCache::new(1024);
assert_eq!(Ok(None),
cache.insert("key".to_owned(), "value".to_owned()));
let expected_size = cache.current_size;
assert_eq!(Ok(Some("value".to_owned())),
cache.insert("key".to_owned(), "value".to_owned()));
assert_eq!(expected_size, cache.current_size());
assert_eq!(1, cache.len());
}
fn string_with_size(size: usize) -> String {
let mut s = String::with_capacity(size);
for _ in 0..size {
s.push('0');
}
s
}
#[test]
fn cache_ejects_lru_if_overflowing() {
let mut cache = LruCache::new(2048);
cache.insert("a".to_owned(), string_with_size(676)).unwrap();
cache.insert("b".to_owned(), string_with_size(676)).unwrap();
let expected_size = cache.current_size();
cache.insert("c".to_owned(), string_with_size(676)).unwrap();
assert_eq!(expected_size, cache.current_size());
assert_eq!(2, cache.len());
assert!(cache.peek("a").is_none());
assert!(cache.peek("b").is_some());
assert!(cache.peek("c").is_some());
assert_eq!(Some("b"), cache.peek_lru().map(|(key, _)| key.as_str()));
assert_eq!(Some("c"), cache.peek_mru().map(|(key, _)| key.as_str()));
}
#[test]
fn getting_sets_most_recently_used() {
let mut cache = LruCache::new(2048);
cache.insert("a".to_owned(), string_with_size(674)).unwrap();
cache.insert("b".to_owned(), string_with_size(674)).unwrap();
cache.get(&"a".to_owned());
cache.insert("c".to_owned(), string_with_size(674)).unwrap();
assert!(cache.get("a").is_some());
assert!(cache.get("b").is_none());
assert!(cache.get("c").is_some());
assert_eq!("a", unsafe { cache.seal.get().prev.get().key() });
assert_eq!("c", unsafe { cache.seal.get().next.get().key() });
cache.get("a");
assert_eq!("c", unsafe { cache.seal.get().prev.get().key() });
assert_eq!("a", unsafe { cache.seal.get().next.get().key() });
}
#[test]
fn empty_cache_has_no_lru_and_mru() {
let cache = LruCache::<&str, &str>::new(1024);
assert!(cache.peek_lru().is_none());
assert!(cache.peek_mru().is_none());
}
#[test]
fn get_lru_sets_most_recently_used() {
let mut cache = LruCache::new(2048);
cache.insert("hello", "world").unwrap();
cache.insert("greetings", "moon").unwrap();
let lru = cache.get_lru();
assert_eq!(Some((&"hello", &"world")), lru);
cache.set_max_size(cache.current_size() - 1);
assert!(cache.contains("hello"));
assert!(!cache.contains("greetings"));
}
#[test]
fn peeking_does_not_set_most_recently_used() {
let mut cache = LruCache::new(2048);
cache.insert("a".to_owned(), string_with_size(674)).unwrap();
cache.insert("b".to_owned(), string_with_size(674)).unwrap();
cache.peek(&"a".to_owned());
cache.insert("c".to_owned(), string_with_size(674)).unwrap();
assert!(cache.get("a").is_none());
assert!(cache.get("b").is_some());
assert!(cache.get("c").is_some());
cache.peek_entry(&"b".to_owned());
cache.insert("d".to_owned(), string_with_size(674)).unwrap();
assert!(cache.get("b").is_none());
assert!(cache.get("c").is_some());
assert!(cache.get("d").is_some());
}
#[test]
fn cache_rejects_too_large_entry() {
let mut cache = LruCache::new(256);
let key = "This is a pretty long key, especially considering that \
keys should normally be rather small to avoid long hashing times."
.to_owned();
let value = "Although the key alone has insufficient size, together \
with this string it pushes pushes the total memory requirement of the \
entry over the capacity of the cache.".to_owned();
assert!(matches!(cache.insert(key, value),
Err(InsertError::EntryTooLarge { .. })));
}
#[test]
fn precisely_fitting_entry_does_not_eject_lru() {
let mut cache = LruCache::new(1024);
cache.insert("hello".to_owned(), "world".to_owned()).unwrap();
cache.insert("greetings".to_owned(), "moon".to_owned()).unwrap();
let key = "ahoy".to_owned();
let mut value = "mars".to_owned();
value.shrink_to_fit();
let required_size = cache.max_size() - cache.current_size();
let additional_bytes = required_size - entry_size(&key, &value);
let additional_data = vec![b's'; additional_bytes];
value.push_str(&String::from_utf8(additional_data).unwrap());
value.shrink_to_fit();
assert_eq!(required_size, entry_size(&key, &value));
cache.insert(key, value).unwrap();
assert_eq!(3, cache.len());
}
#[test]
fn auto_reallocation_doubles_capacity() {
let mut cache = LruCache::with_capacity(4096, 10);
let capacity = cache.capacity();
for index in 0..capacity {
cache.insert(index, 0).unwrap();
}
cache.insert(capacity, 0).unwrap();
assert_eq!(2 * capacity, cache.capacity());
}
#[test]
fn try_insert_works_as_insert_if_ok() {
let mut cache = LruCache::new(1024);
cache.insert("hello".to_owned(), "world".to_owned()).unwrap();
cache.try_insert("greetings".to_owned(), "moon".to_owned()).unwrap();
assert_eq!(2, cache.len());
assert_eq!("world".to_owned(), cache.remove_lru().unwrap().1);
assert_eq!("moon".to_owned(), cache.remove_lru().unwrap().1);
}
#[test]
fn try_insert_fails_on_duplication() {
let mut cache = LruCache::new(1024);
cache.insert("hello".to_owned(), "world".to_owned()).unwrap();
let result = cache.try_insert("hello".to_owned(), "moon".to_owned());
assert!(matches!(result, Err(TryInsertError::OccupiedEntry { .. })));
assert_eq!(1, cache.len());
assert_eq!(&"world".to_owned(), cache.get("hello").unwrap());
}
#[test]
fn try_insert_fails_if_eject_required() {
let mut cache = LruCache::new(1024);
cache.insert("hello".to_owned(), "world".to_owned()).unwrap();
cache.insert("greetings".to_owned(), "moon".to_owned()).unwrap();
cache.set_max_size(cache.current_size());
let result = cache.try_insert("ahoy".to_owned(), "mars".to_owned());
assert!(matches!(result, Err(TryInsertError::WouldEjectLru { .. })));
assert_eq!(2, cache.len());
assert!(!cache.contains("ahoy"));
}
#[test]
fn try_insert_fails_if_too_large() {
let mut cache = LruCache::new(1024);
cache.insert("hello".to_owned(), "world".to_owned()).unwrap();
let value = String::from_utf8(vec![b'0'; 1024]).unwrap();
let result = cache.try_insert("key".to_owned(), value);
assert!(matches!(result, Err(TryInsertError::EntryTooLarge { .. })));
assert_eq!(1, cache.len());
assert!(!cache.contains("key"));
}
#[test]
fn removing_works() {
let mut cache = LruCache::new(1024);
cache.insert("hello", "world").unwrap();
cache.insert("greetings", "moon").unwrap();
cache.insert("ahoy", "mars").unwrap();
assert_eq!(Some(("hello", "world")), cache.remove_entry("hello"));
assert_eq!(None, cache.remove("hello"));
}
#[test]
fn removing_mru_works() {
let mut cache = LruCache::new(1024);
cache.insert("hello", "world").unwrap();
cache.insert("greetings", "moon").unwrap();
cache.insert("ahoy", "mars").unwrap();
cache.remove_mru();
assert_eq!(2, cache.len());
assert!(cache.contains("hello"));
assert!(cache.contains("greetings"));
}
#[test]
fn clearing_works() {
let mut cache = large_test_cache();
cache.clear();
assert!(cache.is_empty());
assert!(cache.peek_lru().is_none());
assert!(cache.peek_mru().is_none());
assert!(cache.iter().next().is_none());
}
#[test]
fn retain_does_not_affect_empty_cache() {
let mut cache: LruCache<u64, Vec<u8>> = LruCache::new(1024);
cache.retain(|k, v| v.len() as u64 == *k);
assert_eq!(0, cache.len());
}
#[test]
fn retain_works_if_all_match() {
let mut cache: LruCache<u64, Vec<u8>> = LruCache::new(1024);
cache.insert(1, vec![0]).unwrap();
cache.insert(5, vec![2, 3, 5, 7, 11]).unwrap();
cache.insert(4, vec![1, 4, 9, 16]).unwrap();
cache.retain(|k, v| v.len() as u64 == *k);
assert_eq!(3, cache.len());
assert!(cache.contains(&1));
assert!(cache.contains(&4));
assert!(cache.contains(&5));
}
#[test]
fn retain_works_if_none_match() {
let mut cache: LruCache<u64, Vec<u8>> = LruCache::new(1024);
cache.insert(1, vec![0, 1]).unwrap();
cache.insert(5, vec![2, 3, 5, 7, 11, 13]).unwrap();
cache.insert(4, vec![1, 4, 9, 16, 25]).unwrap();
cache.retain(|k, v| v.len() as u64 == *k);
assert_eq!(0, cache.len());
assert!(!cache.contains(&1));
assert!(!cache.contains(&4));
assert!(!cache.contains(&5));
}
#[test]
fn retain_works_if_some_match() {
let mut cache: LruCache<u64, Vec<u8>> = LruCache::new(1024);
cache.insert(1, vec![0, 1]).unwrap();
cache.insert(5, vec![2, 3, 5, 7, 11]).unwrap();
cache.insert(4, vec![1, 4, 9, 16]).unwrap();
cache.retain(|k, v| v.len() as u64 == *k);
assert_eq!(2, cache.len());
assert!(!cache.contains(&1));
assert!(cache.contains(&4));
assert!(cache.contains(&5));
}
#[test]
fn retain_sets_lru_and_mru_if_necessary() {
let mut cache = LruCache::new(1024);
cache.insert(1, 0).unwrap();
cache.insert(2, 0).unwrap();
cache.insert(3, 0).unwrap();
cache.insert(4, 0).unwrap();
cache.retain(|&k, _| k != 1 && k != 4);
assert_eq!(Some((&2, &0)), cache.peek_lru());
assert_eq!(Some((&3, &0)), cache.peek_mru());
}
#[test]
fn contains_works() {
let mut cache = LruCache::new(1024);
cache.insert("hello", "world").unwrap();
assert!(cache.contains("hello"));
assert!(!cache.contains("greetings"));
}
#[test]
fn increasing_max_size_keeps_all_elements() {
let mut cache = LruCache::new(1024);
cache.insert("hello", "world").unwrap();
cache.insert("greetings", "moon").unwrap();
cache.set_max_size(2048);
assert_eq!(2, cache.len());
assert_eq!(2048, cache.max_size());
}
#[test]
fn decreasing_max_size_below_current_size_drops_elements() {
let mut cache = LruCache::new(1024);
cache.insert("hello", "world").unwrap();
cache.insert("greetings", "moon").unwrap();
cache.set_max_size(cache.current_size() - 1);
assert_eq!(1, cache.len());
assert!(cache.current_size() < cache.max_size());
assert!(cache.max_size() < 1024);
}
#[test]
fn cache_correctly_applies_mutation() {
let mut cache = LruCache::new(1024);
cache.insert("hello".to_owned(), "world".to_owned()).unwrap();
cache.insert("greetings".to_owned(), "moon".to_owned()).unwrap();
let old_size = cache.current_size();
let result = cache.mutate("greetings", |s| {
s.push_str(", from 384400 km away");
s.shrink_to_fit();
s.len()
});
assert_eq!(Ok(Some(25)), result);
assert_eq!(2, cache.len());
assert_eq!(old_size + 21, cache.current_size());
assert_eq!(Some(&"moon, from 384400 km away".to_owned()),
cache.get("greetings"));
}
#[test]
fn cache_rejects_too_expanding_mutation() {
let mut cache = LruCache::new(1024);
cache.insert("hello", vec![0u8; 32]).unwrap();
cache.insert("greetings", vec![0u8; 32]).unwrap();
let old_size = cache.current_size();
let result = cache.mutate("hello", |v| {
v.append(&mut vec![0u8; 1000]);
});
assert!(matches!(result, Err(MutateError::EntryTooLarge { .. })));
assert_eq!(1, cache.len());
assert!(cache.current_size() < old_size);
assert_eq!(None, cache.get("hello"));
}
#[test]
fn non_expanding_mutation_works() {
let mut cache = LruCache::new(1024);
cache.insert("hello", vec![0u8; 32]).unwrap();
cache.insert("greetings", vec![0u8; 32]).unwrap();
let old_size = cache.current_size();
let result = cache.mutate("hello", |v| {
*v = Vec::new();
});
assert!(matches!(result, Ok(Some(_))));
assert!(cache.current_size() < old_size);
assert_eq!(2, cache.len());
}
#[test]
fn mutation_on_non_existent_element_is_never_called() {
let mut cache = LruCache::<&str, &str>::new(1024);
let result = cache.mutate("hello", |_| { panic!("mutation was called") });
assert_eq!(Ok(None), result);
}
#[test]
fn reserving_adds_capacity() {
let mut cache = LruCache::new(1024);
cache.insert("hello", "world").unwrap();
cache.insert("greetings", "moon").unwrap();
let additional = cache.capacity() - cache.len() + 5;
cache.reserve(additional);
assert!(cache.capacity() >= additional + 2);
assert_eq!(2, cache.len());
assert_eq!(Some((&"hello", &"world")), cache.peek_lru());
assert_eq!(Some((&"greetings", &"moon")), cache.peek_mru());
let additional = additional + 10;
assert!(cache.try_reserve(additional).is_ok());
assert!(cache.capacity() >= additional + 2);
}
#[test]
fn reserving_does_not_change_anything_when_capacity_is_not_exceeded() {
let mut cache = LruCache::with_capacity(1024, 5);
cache.insert("hello", "world").unwrap();
cache.insert("greetings", "moon").unwrap();
let capacity_before = cache.capacity();
assert!(cache.try_reserve(3).is_ok());
assert_eq!(capacity_before, cache.capacity());
}
#[test]
fn reserving_fails_on_internal_overflow() {
let mut cache = LruCache::new(1024);
cache.insert("hello", "world").unwrap();
let try_reserve_result = cache.try_reserve(usize::MAX);
assert_eq!(Err(TryReserveError::CapacityOverflow), try_reserve_result);
}
#[test]
fn reserving_fails_on_external_overflow() {
let mut cache = LruCache::new(1024);
cache.insert("hello", "world").unwrap();
let additional = usize::MAX / mem::size_of::<Entry<&str, &str>>();
let try_reserve_result = cache.try_reserve(additional);
assert_eq!(Err(TryReserveError::CapacityOverflow), try_reserve_result);
}
#[test]
fn shrinking_does_not_drop_below_len() {
let mut cache = large_test_cache();
cache.shrink_to(4);
assert!(cache.capacity() >= 5);
assert_eq!(5, cache.len());
cache.insert("hey", "mercury").unwrap();
cache.insert("what's up", "saturn").unwrap();
cache.shrink_to_fit();
assert!(cache.capacity() >= 7);
assert_eq!(7, cache.len());
}
#[test]
fn shrinking_large_cache_decreases_capacity() {
let mut cache = LruCache::with_capacity(1024, 256);
cache.shrink_to(64);
assert!(cache.capacity() < 256);
assert!(cache.capacity() >= 64);
cache.insert("hey", "mercury").unwrap();
cache.insert("what's up", "saturn").unwrap();
cache.shrink_to_fit();
assert!(cache.capacity() < 64);
assert!(cache.capacity() >= 2);
}
#[test]
fn cache_created_with_capacity_does_not_reallocate_before_capacity_is_reached() {
let mut cache = LruCache::with_capacity(4096, 10);
let capacity_before = cache.capacity();
assert!(capacity_before >= 10);
for i in 0..10 {
cache.insert(i, "value").unwrap();
}
assert_eq!(capacity_before, cache.capacity());
}
struct MockHasher {
hash_requests: Arc<Mutex<u32>>
}
impl Hasher for MockHasher {
fn finish(&self) -> u64 {
*self.hash_requests.lock().unwrap() += 1;
0
}
fn write(&mut self, _bytes: &[u8]) { }
}
struct MockBuildHasher {
hash_requests: Arc<Mutex<u32>>
}
impl BuildHasher for MockBuildHasher {
type Hasher = MockHasher;
fn build_hasher(&self) -> Self::Hasher {
MockHasher {
hash_requests: Arc::clone(&self.hash_requests)
}
}
}
#[test]
fn cache_uses_given_hasher() {
let build_hasher = MockBuildHasher {
hash_requests: Arc::new(Mutex::new(0))
};
let mut cache = LruCache::with_hasher(1024, build_hasher);
assert_eq!(0, *cache.hasher().hash_requests.lock().unwrap());
cache.insert("hello", "world").unwrap();
assert_eq!(1, *cache.hasher().hash_requests.lock().unwrap());
cache.insert("greetings", "moon").unwrap();
cache.insert("ahoy", "mars").unwrap();
assert_eq!(3, *cache.hasher().hash_requests.lock().unwrap());
}
#[test]
fn clone_creates_independent_cache() {
let mut cache = LruCache::new(1024);
cache.insert("hello", "world").unwrap();
cache.insert("greetings", "moon").unwrap();
let mut clone = cache.clone();
clone.insert("ahoy", "mars").unwrap();
cache.remove(&"hello");
cache.touch(&"greetings");
assert_eq!(1, cache.len());
assert_eq!(None, cache.get(&"hello"));
assert_eq!(Some(&"moon"), cache.get(&"greetings"));
assert_eq!(None, cache.get(&"ahoy"));
assert_eq!(3, clone.len());
assert_eq!(Some(&"world"), clone.get(&"hello"));
assert_eq!(Some(&"moon"), clone.get(&"greetings"));
assert_eq!(Some(&"mars"), clone.get(&"ahoy"));
assert!(clone.current_size() > cache.current_size());
let cache_drained = cache.drain().collect::<Vec<_>>();
let cache_drained_expected = vec![("greetings", "moon")];
let clone_drained = clone.drain().collect::<Vec<_>>();
let clone_drained_expected = vec![
("hello", "world"),
("greetings", "moon"),
("ahoy", "mars")
];
assert_eq!(cache_drained_expected, cache_drained);
assert_eq!(clone_drained_expected, clone_drained);
}
#[test]
fn touching_in_singleton_works() {
let mut cache = LruCache::new(1024);
cache.insert("hello", "world").unwrap();
cache.touch("hello");
let mut iter = cache.keys();
assert_eq!(Some(&"hello"), iter.next());
assert_eq!(None, iter.next());
}
#[test]
fn empty_cache_formats_for_debug_correctly() {
let cache = LruCache::<&str, &str>::new(1024);
assert_eq!("{}", format!("{:?}", cache));
}
#[test]
fn singleton_cache_formats_for_debug_correctly() {
let mut cache = LruCache::new(1024);
cache.insert("hello", "world").unwrap();
assert_eq!("{\"hello\": \"world\"}", format!("{:?}", cache));
}
#[test]
fn larger_cache_formats_for_debug_correctly() {
let mut cache = LruCache::new(1024);
cache.insert(2, 4).unwrap();
cache.insert(3, 9).unwrap();
cache.insert(5, 25).unwrap();
cache.insert(7, 49).unwrap();
cache.insert(11, 121).unwrap();
cache.touch(&2);
assert_eq!("{3: 9, 5: 25, 7: 49, 11: 121, 2: 4}", format!("{:?}", cache));
}
}