foyer_memory/
raw.rs

1// Copyright 2025 foyer Project Authors
2//
3// Licensed under the Apache License, Version 2.0 (the "License");
4// you may not use this file except in compliance with the License.
5// You may obtain a copy of the License at
6//
7//     http://www.apache.org/licenses/LICENSE-2.0
8//
9// Unless required by applicable law or agreed to in writing, software
10// distributed under the License is distributed on an "AS IS" BASIS,
11// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12// See the License for the specific language governing permissions and
13// limitations under the License.
14
15use std::{
16    collections::hash_map::{Entry as HashMapEntry, HashMap},
17    fmt::Debug,
18    future::Future,
19    hash::Hash,
20    ops::Deref,
21    pin::Pin,
22    sync::Arc,
23    task::{Context, Poll},
24};
25
26use arc_swap::ArcSwap;
27use equivalent::Equivalent;
28#[cfg(feature = "tracing")]
29use fastrace::{
30    future::{FutureExt, InSpan},
31    Span,
32};
33use foyer_common::{
34    code::HashBuilder,
35    event::{Event, EventListener},
36    future::{Diversion, DiversionFuture},
37    metrics::Metrics,
38    properties::{Location, Properties, Source},
39    runtime::SingletonHandle,
40    strict_assert,
41    utils::scope::Scope,
42};
43use itertools::Itertools;
44use parking_lot::{Mutex, RwLock};
45use pin_project::pin_project;
46use tokio::{sync::oneshot, task::JoinHandle};
47
48use crate::{
49    error::{Error, Result},
50    eviction::{Eviction, Op},
51    indexer::{hash_table::HashTableIndexer, sentry::Sentry, Indexer},
52    pipe::NoopPipe,
53    record::{Data, Record},
54    Piece, Pipe,
55};
56
57/// The weighter for the in-memory cache.
58///
59/// The weighter is used to calculate the weight of the cache entry.
60pub trait Weighter<K, V>: Fn(&K, &V) -> usize + Send + Sync + 'static {}
61impl<K, V, T> Weighter<K, V> for T where T: Fn(&K, &V) -> usize + Send + Sync + 'static {}
62
63pub struct RawCacheConfig<E, S>
64where
65    E: Eviction,
66    S: HashBuilder,
67{
68    pub capacity: usize,
69    pub shards: usize,
70    pub eviction_config: E::Config,
71    pub hash_builder: S,
72    pub weighter: Arc<dyn Weighter<E::Key, E::Value>>,
73    pub event_listener: Option<Arc<dyn EventListener<Key = E::Key, Value = E::Value>>>,
74    pub metrics: Arc<Metrics>,
75}
76
77struct RawCacheShard<E, S, I>
78where
79    E: Eviction,
80    S: HashBuilder,
81    I: Indexer<Eviction = E>,
82{
83    eviction: E,
84    indexer: Sentry<I>,
85
86    usage: usize,
87    capacity: usize,
88
89    #[expect(clippy::type_complexity)]
90    waiters: Mutex<HashMap<E::Key, Vec<oneshot::Sender<RawCacheEntry<E, S, I>>>>>,
91
92    metrics: Arc<Metrics>,
93    _event_listener: Option<Arc<dyn EventListener<Key = E::Key, Value = E::Value>>>,
94}
95
96impl<E, S, I> RawCacheShard<E, S, I>
97where
98    E: Eviction,
99    S: HashBuilder,
100    I: Indexer<Eviction = E>,
101{
102    /// Evict entries to fit the target usage.
103    fn evict(&mut self, target: usize, garbages: &mut Vec<(Event, Arc<Record<E>>)>) {
104        // Evict overflow records.
105        while self.usage > target {
106            let evicted = match self.eviction.pop() {
107                Some(evicted) => evicted,
108                None => break,
109            };
110            self.metrics.memory_evict.increase(1);
111
112            let e = self.indexer.remove(evicted.hash(), evicted.key()).unwrap();
113            assert_eq!(Arc::as_ptr(&evicted), Arc::as_ptr(&e));
114
115            strict_assert!(!evicted.as_ref().is_in_indexer());
116            strict_assert!(!evicted.as_ref().is_in_eviction());
117
118            self.usage -= evicted.weight();
119
120            garbages.push((Event::Evict, evicted));
121        }
122    }
123
124    fn emplace(
125        &mut self,
126        data: Data<E>,
127        garbages: &mut Vec<(Event, Arc<Record<E>>)>,
128        waiters: &mut Vec<oneshot::Sender<RawCacheEntry<E, S, I>>>,
129    ) -> Arc<Record<E>> {
130        *waiters = self.waiters.lock().remove(&data.key).unwrap_or_default();
131
132        let weight = data.weight;
133        let old_usage = self.usage;
134
135        let record = Arc::new(Record::new(data));
136
137        // Evict overflow records.
138        self.evict(self.capacity.saturating_sub(weight), garbages);
139
140        // Insert new record
141        if let Some(old) = self.indexer.insert(record.clone()) {
142            self.metrics.memory_replace.increase(1);
143
144            strict_assert!(!old.is_in_indexer());
145
146            if old.is_in_eviction() {
147                self.eviction.remove(&old);
148            }
149            strict_assert!(!old.is_in_eviction());
150
151            self.usage -= old.weight();
152
153            garbages.push((Event::Replace, old));
154        } else {
155            self.metrics.memory_insert.increase(1);
156        }
157        strict_assert!(record.is_in_indexer());
158
159        let ephemeral = record.properties().ephemeral().unwrap_or_default();
160        record.set_ephemeral(ephemeral);
161        if !ephemeral {
162            self.eviction.push(record.clone());
163            strict_assert!(record.is_in_eviction());
164        }
165
166        self.usage += weight;
167        // Increase the reference count within the lock section.
168        // The reference count of the new record must be at the moment.
169        let refs = waiters.len() + 1;
170        let inc = record.inc_refs(refs);
171        assert_eq!(refs, inc);
172
173        match self.usage.cmp(&old_usage) {
174            std::cmp::Ordering::Greater => self.metrics.memory_usage.increase((self.usage - old_usage) as _),
175            std::cmp::Ordering::Less => self.metrics.memory_usage.decrease((old_usage - self.usage) as _),
176            std::cmp::Ordering::Equal => {}
177        }
178
179        record
180    }
181
182    #[cfg_attr(feature = "tracing", fastrace::trace(name = "foyer::memory::raw::shard::remove"))]
183    fn remove<Q>(&mut self, hash: u64, key: &Q) -> Option<Arc<Record<E>>>
184    where
185        Q: Hash + Equivalent<E::Key> + ?Sized,
186    {
187        let record = self.indexer.remove(hash, key)?;
188
189        if record.is_in_eviction() {
190            self.eviction.remove(&record);
191        }
192        strict_assert!(!record.is_in_indexer());
193        strict_assert!(!record.is_in_eviction());
194
195        self.usage -= record.weight();
196
197        self.metrics.memory_remove.increase(1);
198        self.metrics.memory_usage.decrease(record.weight() as _);
199
200        record.inc_refs(1);
201
202        Some(record)
203    }
204
205    #[cfg_attr(feature = "tracing", fastrace::trace(name = "foyer::memory::raw::shard::get_noop"))]
206    fn get_noop<Q>(&self, hash: u64, key: &Q) -> Option<Arc<Record<E>>>
207    where
208        Q: Hash + Equivalent<E::Key> + ?Sized,
209    {
210        self.get_inner(hash, key)
211    }
212
213    #[cfg_attr(
214        feature = "tracing",
215        fastrace::trace(name = "foyer::memory::raw::shard::get_immutable")
216    )]
217    fn get_immutable<Q>(&self, hash: u64, key: &Q) -> Option<Arc<Record<E>>>
218    where
219        Q: Hash + Equivalent<E::Key> + ?Sized,
220    {
221        self.get_inner(hash, key)
222            .inspect(|record| self.acquire_immutable(record))
223    }
224
225    #[cfg_attr(
226        feature = "tracing",
227        fastrace::trace(name = "foyer::memory::raw::shard::get_mutable")
228    )]
229    fn get_mutable<Q>(&mut self, hash: u64, key: &Q) -> Option<Arc<Record<E>>>
230    where
231        Q: Hash + Equivalent<E::Key> + ?Sized,
232    {
233        self.get_inner(hash, key).inspect(|record| self.acquire_mutable(record))
234    }
235
236    #[cfg_attr(feature = "tracing", fastrace::trace(name = "foyer::memory::raw::shard::get_inner"))]
237    fn get_inner<Q>(&self, hash: u64, key: &Q) -> Option<Arc<Record<E>>>
238    where
239        Q: Hash + Equivalent<E::Key> + ?Sized,
240    {
241        let record = match self.indexer.get(hash, key).cloned() {
242            Some(record) => {
243                self.metrics.memory_hit.increase(1);
244                record
245            }
246            None => {
247                self.metrics.memory_miss.increase(1);
248                return None;
249            }
250        };
251
252        strict_assert!(record.is_in_indexer());
253
254        record.set_ephemeral(false);
255
256        record.inc_refs(1);
257
258        Some(record)
259    }
260
261    #[cfg_attr(feature = "tracing", fastrace::trace(name = "foyer::memory::raw::shard::clear"))]
262    fn clear(&mut self, garbages: &mut Vec<Arc<Record<E>>>) {
263        let records = self.indexer.drain().collect_vec();
264        self.eviction.clear();
265
266        let mut count = 0;
267
268        for record in records {
269            count += 1;
270            strict_assert!(!record.is_in_indexer());
271            strict_assert!(!record.is_in_eviction());
272
273            garbages.push(record);
274        }
275
276        self.metrics.memory_remove.increase(count);
277    }
278
279    #[cfg_attr(
280        feature = "tracing",
281        fastrace::trace(name = "foyer::memory::raw::shard::acquire_immutable")
282    )]
283    fn acquire_immutable(&self, record: &Arc<Record<E>>) {
284        match E::acquire() {
285            Op::Immutable(f) => f(&self.eviction, record),
286            _ => unreachable!(),
287        }
288    }
289
290    #[cfg_attr(
291        feature = "tracing",
292        fastrace::trace(name = "foyer::memory::raw::shard::acquire_mutable")
293    )]
294    fn acquire_mutable(&mut self, record: &Arc<Record<E>>) {
295        match E::acquire() {
296            Op::Mutable(mut f) => f(&mut self.eviction, record),
297            _ => unreachable!(),
298        }
299    }
300
301    #[cfg_attr(
302        feature = "tracing",
303        fastrace::trace(name = "foyer::memory::raw::shard::release_immutable")
304    )]
305    fn release_immutable(&self, record: &Arc<Record<E>>) {
306        match E::release() {
307            Op::Immutable(f) => f(&self.eviction, record),
308            _ => unreachable!(),
309        }
310    }
311
312    #[cfg_attr(
313        feature = "tracing",
314        fastrace::trace(name = "foyer::memory::raw::shard::release_mutable")
315    )]
316    fn release_mutable(&mut self, record: &Arc<Record<E>>) {
317        match E::release() {
318            Op::Mutable(mut f) => f(&mut self.eviction, record),
319            _ => unreachable!(),
320        }
321    }
322
323    #[cfg_attr(feature = "tracing", fastrace::trace(name = "foyer::memory::raw::shard::fetch_noop"))]
324    fn fetch_noop(&self, hash: u64, key: &E::Key) -> RawShardFetch<E, S, I>
325    where
326        E::Key: Clone,
327    {
328        if let Some(record) = self.get_noop(hash, key) {
329            return RawShardFetch::Hit(record);
330        }
331
332        self.fetch_queue(key.clone())
333    }
334
335    #[cfg_attr(
336        feature = "tracing",
337        fastrace::trace(name = "foyer::memory::raw::shard::fetch_immutable")
338    )]
339    fn fetch_immutable(&self, hash: u64, key: &E::Key) -> RawShardFetch<E, S, I>
340    where
341        E::Key: Clone,
342    {
343        if let Some(record) = self.get_immutable(hash, key) {
344            return RawShardFetch::Hit(record);
345        }
346
347        self.fetch_queue(key.clone())
348    }
349
350    #[cfg_attr(
351        feature = "tracing",
352        fastrace::trace(name = "foyer::memory::raw::shard::fetch_mutable")
353    )]
354    fn fetch_mutable(&mut self, hash: u64, key: &E::Key) -> RawShardFetch<E, S, I>
355    where
356        E::Key: Clone,
357    {
358        if let Some(record) = self.get_mutable(hash, key) {
359            return RawShardFetch::Hit(record);
360        }
361
362        self.fetch_queue(key.clone())
363    }
364
365    #[cfg_attr(
366        feature = "tracing",
367        fastrace::trace(name = "foyer::memory::raw::shard::fetch_queue")
368    )]
369    fn fetch_queue(&self, key: E::Key) -> RawShardFetch<E, S, I> {
370        match self.waiters.lock().entry(key) {
371            HashMapEntry::Occupied(mut o) => {
372                let (tx, rx) = oneshot::channel();
373                o.get_mut().push(tx);
374                self.metrics.memory_queue.increase(1);
375                #[cfg(feature = "tracing")]
376                let wait = rx.in_span(Span::enter_with_local_parent(
377                    "foyer::memory::raw::fetch_with_runtime::wait",
378                ));
379                #[cfg(not(feature = "tracing"))]
380                let wait = rx;
381                RawShardFetch::Wait(wait)
382            }
383            HashMapEntry::Vacant(v) => {
384                v.insert(vec![]);
385                self.metrics.memory_fetch.increase(1);
386                RawShardFetch::Miss
387            }
388        }
389    }
390}
391
392#[expect(clippy::type_complexity)]
393struct RawCacheInner<E, S, I>
394where
395    E: Eviction,
396    S: HashBuilder,
397    I: Indexer<Eviction = E>,
398{
399    shards: Vec<RwLock<RawCacheShard<E, S, I>>>,
400
401    capacity: usize,
402
403    hash_builder: Arc<S>,
404    weighter: Arc<dyn Weighter<E::Key, E::Value>>,
405
406    metrics: Arc<Metrics>,
407    event_listener: Option<Arc<dyn EventListener<Key = E::Key, Value = E::Value>>>,
408    pipe: ArcSwap<Box<dyn Pipe<Key = E::Key, Value = E::Value, Properties = E::Properties>>>,
409}
410
411impl<E, S, I> RawCacheInner<E, S, I>
412where
413    E: Eviction,
414    S: HashBuilder,
415    I: Indexer<Eviction = E>,
416{
417    #[cfg_attr(feature = "tracing", fastrace::trace(name = "foyer::memory::raw::inner::clear"))]
418    fn clear(&self) {
419        let mut garbages = vec![];
420
421        self.shards
422            .iter()
423            .map(|shard| shard.write())
424            .for_each(|mut shard| shard.clear(&mut garbages));
425
426        // Do not deallocate data within the lock section.
427        if let Some(listener) = self.event_listener.as_ref() {
428            for record in garbages {
429                listener.on_leave(Event::Clear, record.key(), record.value());
430            }
431        }
432    }
433}
434
435pub struct RawCache<E, S, I = HashTableIndexer<E>>
436where
437    E: Eviction,
438    S: HashBuilder,
439    I: Indexer<Eviction = E>,
440{
441    inner: Arc<RawCacheInner<E, S, I>>,
442}
443
444impl<E, S, I> Drop for RawCacheInner<E, S, I>
445where
446    E: Eviction,
447    S: HashBuilder,
448    I: Indexer<Eviction = E>,
449{
450    fn drop(&mut self) {
451        self.clear();
452    }
453}
454
455impl<E, S, I> Clone for RawCache<E, S, I>
456where
457    E: Eviction,
458    S: HashBuilder,
459    I: Indexer<Eviction = E>,
460{
461    fn clone(&self) -> Self {
462        Self {
463            inner: self.inner.clone(),
464        }
465    }
466}
467
468impl<E, S, I> RawCache<E, S, I>
469where
470    E: Eviction,
471    S: HashBuilder,
472    I: Indexer<Eviction = E>,
473{
474    pub fn new(config: RawCacheConfig<E, S>) -> Self {
475        let shard_capacity = config.capacity / config.shards;
476
477        let shards = (0..config.shards)
478            .map(|_| RawCacheShard {
479                eviction: E::new(shard_capacity, &config.eviction_config),
480                indexer: Sentry::default(),
481                usage: 0,
482                capacity: shard_capacity,
483                waiters: Mutex::default(),
484                metrics: config.metrics.clone(),
485                _event_listener: config.event_listener.clone(),
486            })
487            .map(RwLock::new)
488            .collect_vec();
489
490        let pipe: Box<dyn Pipe<Key = E::Key, Value = E::Value, Properties = E::Properties>> =
491            Box::new(NoopPipe::default());
492
493        let inner = RawCacheInner {
494            shards,
495            capacity: config.capacity,
496            hash_builder: Arc::new(config.hash_builder),
497            weighter: config.weighter,
498            metrics: config.metrics,
499            event_listener: config.event_listener,
500            pipe: ArcSwap::new(Arc::new(pipe)),
501        };
502
503        Self { inner: Arc::new(inner) }
504    }
505
506    #[cfg_attr(feature = "tracing", fastrace::trace(name = "foyer::memory::raw::resize"))]
507    pub fn resize(&self, capacity: usize) -> Result<()> {
508        let shards = self.inner.shards.len();
509        let shard_capacity = capacity / shards;
510
511        let handles = (0..shards)
512            .map(|i| {
513                let inner = self.inner.clone();
514                std::thread::spawn(move || {
515                    let mut garbages = vec![];
516                    let res = inner.shards[i].write().with(|mut shard| {
517                        shard.eviction.update(shard_capacity, None).inspect(|_| {
518                            shard.capacity = shard_capacity;
519                            shard.evict(shard_capacity, &mut garbages)
520                        })
521                    });
522                    // Deallocate data out of the lock critical section.
523                    let pipe = inner.pipe.load();
524                    let piped = pipe.is_enabled();
525                    if inner.event_listener.is_some() || piped {
526                        for (event, record) in garbages {
527                            if let Some(listener) = inner.event_listener.as_ref() {
528                                listener.on_leave(event, record.key(), record.value())
529                            }
530                            if piped && event == Event::Evict {
531                                pipe.send(Piece::new(record));
532                            }
533                        }
534                    }
535                    res
536                })
537            })
538            .collect_vec();
539
540        let errs = handles
541            .into_iter()
542            .map(|handle| handle.join().unwrap())
543            .filter(|res| res.is_err())
544            .map(|res| res.unwrap_err())
545            .collect_vec();
546        if !errs.is_empty() {
547            return Err(Error::multiple(errs));
548        }
549
550        Ok(())
551    }
552
553    #[cfg_attr(feature = "tracing", fastrace::trace(name = "foyer::memory::raw::insert"))]
554    pub fn insert(&self, key: E::Key, value: E::Value) -> RawCacheEntry<E, S, I> {
555        self.insert_with_properties(key, value, Default::default())
556    }
557
558    #[cfg_attr(
559        feature = "tracing",
560        fastrace::trace(name = "foyer::memory::raw::insert_with_properties")
561    )]
562    pub fn insert_with_properties(
563        &self,
564        key: E::Key,
565        value: E::Value,
566        properties: E::Properties,
567    ) -> RawCacheEntry<E, S, I> {
568        let hash = self.inner.hash_builder.hash_one(&key);
569        let weight = (self.inner.weighter)(&key, &value);
570
571        let mut garbages = vec![];
572        let mut waiters = vec![];
573
574        let record = self.inner.shards[self.shard(hash)].write().with(|mut shard| {
575            shard.emplace(
576                Data {
577                    key,
578                    value,
579                    properties,
580                    hash,
581                    weight,
582                },
583                &mut garbages,
584                &mut waiters,
585            )
586        });
587
588        // Notify waiters out of the lock critical section.
589        for waiter in waiters {
590            let _ = waiter.send(RawCacheEntry {
591                record: record.clone(),
592                inner: self.inner.clone(),
593            });
594        }
595
596        // Deallocate data out of the lock critical section.
597        let pipe = self.inner.pipe.load();
598        let piped = pipe.is_enabled();
599        if self.inner.event_listener.is_some() || piped {
600            for (event, record) in garbages {
601                if let Some(listener) = self.inner.event_listener.as_ref() {
602                    listener.on_leave(event, record.key(), record.value())
603                }
604                if piped && event == Event::Evict {
605                    pipe.send(Piece::new(record));
606                }
607            }
608        }
609
610        RawCacheEntry {
611            record,
612            inner: self.inner.clone(),
613        }
614    }
615
616    /// Evict all entries in the cache and offload them into the disk cache via the pipe if needed.
617    #[cfg_attr(feature = "tracing", fastrace::trace(name = "foyer::memory::raw::evict_all"))]
618    pub fn evict_all(&self) {
619        let mut garbages = vec![];
620        for shard in self.inner.shards.iter() {
621            shard.write().evict(0, &mut garbages);
622        }
623
624        // Deallocate data out of the lock critical section.
625        let pipe = self.inner.pipe.load();
626        let piped = pipe.is_enabled();
627        if self.inner.event_listener.is_some() || piped {
628            for (event, record) in garbages {
629                if let Some(listener) = self.inner.event_listener.as_ref() {
630                    listener.on_leave(event, record.key(), record.value())
631                }
632                if piped && event == Event::Evict {
633                    pipe.send(Piece::new(record));
634                }
635            }
636        }
637    }
638
639    /// Evict all entries in the cache and offload them into the disk cache via the pipe if needed.
640    ///
641    /// This function obeys the io throttler of the disk cache and make sure all entries will be offloaded.
642    /// Therefore, this function is asynchronous.
643    #[cfg_attr(feature = "tracing", fastrace::trace(name = "foyer::memory::raw::flush"))]
644    pub async fn flush(&self) {
645        let mut garbages = vec![];
646        for shard in self.inner.shards.iter() {
647            shard.write().evict(0, &mut garbages);
648        }
649
650        // Deallocate data out of the lock critical section.
651        let pipe = self.inner.pipe.load();
652        let piped = pipe.is_enabled();
653
654        if let Some(listener) = self.inner.event_listener.as_ref() {
655            for (event, record) in garbages.iter() {
656                listener.on_leave(*event, record.key(), record.value());
657            }
658        }
659        if piped {
660            let pieces = garbages.into_iter().map(|(_, record)| Piece::new(record)).collect_vec();
661            pipe.flush(pieces).await;
662        }
663    }
664
665    #[cfg_attr(feature = "tracing", fastrace::trace(name = "foyer::memory::raw::remove"))]
666    pub fn remove<Q>(&self, key: &Q) -> Option<RawCacheEntry<E, S, I>>
667    where
668        Q: Hash + Equivalent<E::Key> + ?Sized,
669    {
670        let hash = self.inner.hash_builder.hash_one(key);
671
672        self.inner.shards[self.shard(hash)]
673            .write()
674            .with(|mut shard| {
675                shard.remove(hash, key).map(|record| RawCacheEntry {
676                    inner: self.inner.clone(),
677                    record,
678                })
679            })
680            .inspect(|record| {
681                // Deallocate data out of the lock critical section.
682                if let Some(listener) = self.inner.event_listener.as_ref() {
683                    listener.on_leave(Event::Remove, record.key(), record.value());
684                }
685            })
686    }
687
688    #[cfg_attr(feature = "tracing", fastrace::trace(name = "foyer::memory::raw::get"))]
689    pub fn get<Q>(&self, key: &Q) -> Option<RawCacheEntry<E, S, I>>
690    where
691        Q: Hash + Equivalent<E::Key> + ?Sized,
692    {
693        let hash = self.inner.hash_builder.hash_one(key);
694
695        let record = match E::acquire() {
696            Op::Noop => self.inner.shards[self.shard(hash)].read().get_noop(hash, key),
697            Op::Immutable(_) => self.inner.shards[self.shard(hash)]
698                .read()
699                .with(|shard| shard.get_immutable(hash, key)),
700            Op::Mutable(_) => self.inner.shards[self.shard(hash)]
701                .write()
702                .with(|mut shard| shard.get_mutable(hash, key)),
703        }?;
704
705        Some(RawCacheEntry {
706            inner: self.inner.clone(),
707            record,
708        })
709    }
710
711    #[cfg_attr(feature = "tracing", fastrace::trace(name = "foyer::memory::raw::contains"))]
712    pub fn contains<Q>(&self, key: &Q) -> bool
713    where
714        Q: Hash + Equivalent<E::Key> + ?Sized,
715    {
716        let hash = self.inner.hash_builder.hash_one(key);
717
718        self.inner.shards[self.shard(hash)]
719            .read()
720            .with(|shard| shard.indexer.get(hash, key).is_some())
721    }
722
723    #[cfg_attr(feature = "tracing", fastrace::trace(name = "foyer::memory::raw::touch"))]
724    pub fn touch<Q>(&self, key: &Q) -> bool
725    where
726        Q: Hash + Equivalent<E::Key> + ?Sized,
727    {
728        let hash = self.inner.hash_builder.hash_one(key);
729
730        match E::acquire() {
731            Op::Noop => self.inner.shards[self.shard(hash)].read().get_noop(hash, key),
732            Op::Immutable(_) => self.inner.shards[self.shard(hash)]
733                .read()
734                .with(|shard| shard.get_immutable(hash, key)),
735            Op::Mutable(_) => self.inner.shards[self.shard(hash)]
736                .write()
737                .with(|mut shard| shard.get_mutable(hash, key)),
738        }
739        .is_some()
740    }
741
742    #[cfg_attr(feature = "tracing", fastrace::trace(name = "foyer::memory::raw::clear"))]
743    pub fn clear(&self) {
744        self.inner.clear();
745    }
746
747    pub fn capacity(&self) -> usize {
748        self.inner.capacity
749    }
750
751    pub fn usage(&self) -> usize {
752        self.inner.shards.iter().map(|shard| shard.read().usage).sum()
753    }
754
755    pub fn metrics(&self) -> &Metrics {
756        &self.inner.metrics
757    }
758
759    pub fn hash_builder(&self) -> &Arc<S> {
760        &self.inner.hash_builder
761    }
762
763    pub fn shards(&self) -> usize {
764        self.inner.shards.len()
765    }
766
767    pub fn set_pipe(&self, pipe: Box<dyn Pipe<Key = E::Key, Value = E::Value, Properties = E::Properties>>) {
768        self.inner.pipe.store(Arc::new(pipe));
769    }
770
771    fn shard(&self, hash: u64) -> usize {
772        hash as usize % self.inner.shards.len()
773    }
774}
775
776pub struct RawCacheEntry<E, S, I = HashTableIndexer<E>>
777where
778    E: Eviction,
779    S: HashBuilder,
780    I: Indexer<Eviction = E>,
781{
782    inner: Arc<RawCacheInner<E, S, I>>,
783    record: Arc<Record<E>>,
784}
785
786impl<E, S, I> Debug for RawCacheEntry<E, S, I>
787where
788    E: Eviction,
789    S: HashBuilder,
790    I: Indexer<Eviction = E>,
791{
792    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
793        f.debug_struct("RawCacheEntry").field("record", &self.record).finish()
794    }
795}
796
797impl<E, S, I> Drop for RawCacheEntry<E, S, I>
798where
799    E: Eviction,
800    S: HashBuilder,
801    I: Indexer<Eviction = E>,
802{
803    fn drop(&mut self) {
804        let hash = self.record.hash();
805        let shard = &self.inner.shards[hash as usize % self.inner.shards.len()];
806
807        if self.record.dec_refs(1) == 0 {
808            match E::release() {
809                Op::Noop => {}
810                Op::Immutable(_) => shard.read().with(|shard| shard.release_immutable(&self.record)),
811                Op::Mutable(_) => shard.write().with(|mut shard| shard.release_mutable(&self.record)),
812            }
813
814            if self.record.is_ephemeral() {
815                shard
816                    .write()
817                    .with(|mut shard| shard.remove(hash, self.key()))
818                    .inspect(|record| {
819                        // Deallocate data out of the lock critical section.
820                        let pipe = self.inner.pipe.load();
821                        let piped = pipe.is_enabled();
822                        let event = Event::Evict;
823                        if self.inner.event_listener.is_some() || piped {
824                            if let Some(listener) = self.inner.event_listener.as_ref() {
825                                listener.on_leave(Event::Evict, record.key(), record.value());
826                            }
827                        }
828                        if piped && event == Event::Evict {
829                            pipe.send(self.piece());
830                        }
831                    });
832            }
833        }
834    }
835}
836
837impl<E, S, I> Clone for RawCacheEntry<E, S, I>
838where
839    E: Eviction,
840    S: HashBuilder,
841    I: Indexer<Eviction = E>,
842{
843    fn clone(&self) -> Self {
844        self.record.inc_refs(1);
845        Self {
846            inner: self.inner.clone(),
847            record: self.record.clone(),
848        }
849    }
850}
851
852impl<E, S, I> Deref for RawCacheEntry<E, S, I>
853where
854    E: Eviction,
855    S: HashBuilder,
856    I: Indexer<Eviction = E>,
857{
858    type Target = E::Value;
859
860    fn deref(&self) -> &Self::Target {
861        self.value()
862    }
863}
864
865unsafe impl<E, S, I> Send for RawCacheEntry<E, S, I>
866where
867    E: Eviction,
868    S: HashBuilder,
869    I: Indexer<Eviction = E>,
870{
871}
872
873unsafe impl<E, S, I> Sync for RawCacheEntry<E, S, I>
874where
875    E: Eviction,
876    S: HashBuilder,
877    I: Indexer<Eviction = E>,
878{
879}
880
881impl<E, S, I> RawCacheEntry<E, S, I>
882where
883    E: Eviction,
884    S: HashBuilder,
885    I: Indexer<Eviction = E>,
886{
887    pub fn hash(&self) -> u64 {
888        self.record.hash()
889    }
890
891    pub fn key(&self) -> &E::Key {
892        self.record.key()
893    }
894
895    pub fn value(&self) -> &E::Value {
896        self.record.value()
897    }
898
899    pub fn properties(&self) -> &E::Properties {
900        self.record.properties()
901    }
902
903    pub fn weight(&self) -> usize {
904        self.record.weight()
905    }
906
907    pub fn refs(&self) -> usize {
908        self.record.refs()
909    }
910
911    pub fn is_outdated(&self) -> bool {
912        !self.record.is_in_indexer()
913    }
914
915    pub fn piece(&self) -> Piece<E::Key, E::Value, E::Properties> {
916        Piece::new(self.record.clone())
917    }
918}
919
920/// The state of `fetch`.
921#[derive(Debug, Clone, Copy, PartialEq, Eq)]
922pub enum FetchState {
923    /// Cache hit.
924    Hit,
925    /// Cache miss, but wait in queue.
926    Wait,
927    /// Cache miss, and there is no other waiters at the moment.
928    Miss,
929}
930
931/// Context for fetch calls.
932#[derive(Debug)]
933pub struct FetchContext {
934    /// If this fetch is caused by disk cache throttled.
935    pub throttled: bool,
936    /// Fetched entry source.
937    pub source: Source,
938}
939
940enum RawShardFetch<E, S, I>
941where
942    E: Eviction,
943    S: HashBuilder,
944    I: Indexer<Eviction = E>,
945{
946    Hit(Arc<Record<E>>),
947    Wait(RawFetchWait<E, S, I>),
948    Miss,
949}
950
951pub type RawFetch<E, ER, S, I = HashTableIndexer<E>> =
952    DiversionFuture<RawFetchInner<E, ER, S, I>, std::result::Result<RawCacheEntry<E, S, I>, ER>, FetchContext>;
953
954type RawFetchHit<E, S, I> = Option<RawCacheEntry<E, S, I>>;
955#[cfg(feature = "tracing")]
956type RawFetchWait<E, S, I> = InSpan<oneshot::Receiver<RawCacheEntry<E, S, I>>>;
957#[cfg(not(feature = "tracing"))]
958type RawFetchWait<E, S, I> = oneshot::Receiver<RawCacheEntry<E, S, I>>;
959type RawFetchMiss<E, I, S, ER, DFS> = JoinHandle<Diversion<std::result::Result<RawCacheEntry<E, S, I>, ER>, DFS>>;
960
961#[pin_project(project = RawFetchInnerProj)]
962pub enum RawFetchInner<E, ER, S, I>
963where
964    E: Eviction,
965    S: HashBuilder,
966    I: Indexer<Eviction = E>,
967{
968    Hit(RawFetchHit<E, S, I>),
969    Wait(#[pin] RawFetchWait<E, S, I>),
970    Miss(#[pin] RawFetchMiss<E, I, S, ER, FetchContext>),
971}
972
973impl<E, ER, S, I> RawFetchInner<E, ER, S, I>
974where
975    E: Eviction,
976    S: HashBuilder,
977    I: Indexer<Eviction = E>,
978{
979    pub fn state(&self) -> FetchState {
980        match self {
981            RawFetchInner::Hit(_) => FetchState::Hit,
982            RawFetchInner::Wait(_) => FetchState::Wait,
983            RawFetchInner::Miss(_) => FetchState::Miss,
984        }
985    }
986}
987
988impl<E, ER, S, I> Future for RawFetchInner<E, ER, S, I>
989where
990    E: Eviction,
991    ER: From<Error>,
992    S: HashBuilder,
993    I: Indexer<Eviction = E>,
994{
995    type Output = Diversion<std::result::Result<RawCacheEntry<E, S, I>, ER>, FetchContext>;
996
997    fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
998        match self.project() {
999            RawFetchInnerProj::Hit(opt) => Poll::Ready(Ok(opt.take().unwrap()).into()),
1000            RawFetchInnerProj::Wait(waiter) => waiter.poll(cx).map_err(|e| Error::wait(e).into()).map(Diversion::from),
1001            RawFetchInnerProj::Miss(handle) => handle.poll(cx).map(|join| join.unwrap()),
1002        }
1003    }
1004}
1005
1006impl<E, S, I> RawCache<E, S, I>
1007where
1008    E: Eviction,
1009    S: HashBuilder,
1010    I: Indexer<Eviction = E>,
1011    E::Key: Clone,
1012{
1013    #[cfg_attr(feature = "tracing", fastrace::trace(name = "foyer::memory::raw::fetch"))]
1014    pub fn fetch<F, FU, ER>(&self, key: E::Key, fetch: F) -> RawFetch<E, ER, S, I>
1015    where
1016        F: FnOnce() -> FU,
1017        FU: Future<Output = std::result::Result<E::Value, ER>> + Send + 'static,
1018        ER: Send + 'static + Debug,
1019    {
1020        self.fetch_inner(
1021            key,
1022            Default::default(),
1023            fetch,
1024            &tokio::runtime::Handle::current().into(),
1025        )
1026    }
1027
1028    #[cfg_attr(
1029        feature = "tracing",
1030        fastrace::trace(name = "foyer::memory::raw::fetch_with_properties")
1031    )]
1032    pub fn fetch_with_properties<F, FU, ER, ID>(
1033        &self,
1034        key: E::Key,
1035        properties: E::Properties,
1036        fetch: F,
1037    ) -> RawFetch<E, ER, S, I>
1038    where
1039        F: FnOnce() -> FU,
1040        FU: Future<Output = ID> + Send + 'static,
1041        ER: Send + 'static + Debug,
1042        ID: Into<Diversion<std::result::Result<E::Value, ER>, FetchContext>>,
1043    {
1044        self.fetch_inner(key, properties, fetch, &tokio::runtime::Handle::current().into())
1045    }
1046
1047    /// Advanced fetch with specified runtime.
1048    ///
1049    /// This function is for internal usage and the doc is hidden.
1050    #[doc(hidden)]
1051    #[cfg_attr(feature = "tracing", fastrace::trace(name = "foyer::memory::raw::fetch_inner"))]
1052    pub fn fetch_inner<F, FU, ER, ID>(
1053        &self,
1054        key: E::Key,
1055        mut properties: E::Properties,
1056        fetch: F,
1057        runtime: &SingletonHandle,
1058    ) -> RawFetch<E, ER, S, I>
1059    where
1060        F: FnOnce() -> FU,
1061        FU: Future<Output = ID> + Send + 'static,
1062        ER: Send + 'static + Debug,
1063        ID: Into<Diversion<std::result::Result<E::Value, ER>, FetchContext>>,
1064    {
1065        let hash = self.inner.hash_builder.hash_one(&key);
1066
1067        let raw = match E::acquire() {
1068            Op::Noop => self.inner.shards[self.shard(hash)].read().fetch_noop(hash, &key),
1069            Op::Immutable(_) => self.inner.shards[self.shard(hash)].read().fetch_immutable(hash, &key),
1070            Op::Mutable(_) => self.inner.shards[self.shard(hash)].write().fetch_mutable(hash, &key),
1071        };
1072
1073        match raw {
1074            RawShardFetch::Hit(record) => {
1075                return RawFetch::new(RawFetchInner::Hit(Some(RawCacheEntry {
1076                    record,
1077                    inner: self.inner.clone(),
1078                })))
1079            }
1080            RawShardFetch::Wait(future) => return RawFetch::new(RawFetchInner::Wait(future)),
1081            RawShardFetch::Miss => {}
1082        }
1083
1084        let cache = self.clone();
1085        let future = fetch();
1086        let join = runtime.spawn({
1087            let task = async move {
1088                #[cfg(feature = "tracing")]
1089                let Diversion { target, store } = future
1090                    .in_span(Span::enter_with_local_parent("foyer::memory::raw::fetch_inner::fn"))
1091                    .await
1092                    .into();
1093                #[cfg(not(feature = "tracing"))]
1094                let Diversion { target, store } = future.await.into();
1095
1096                let value = match target {
1097                    Ok(value) => value,
1098                    Err(e) => {
1099                        cache.inner.shards[cache.shard(hash)].read().waiters.lock().remove(&key);
1100                        tracing::debug!("[fetch]: error raise while fetching, all waiter are dropped, err: {e:?}");
1101                        return Diversion { target: Err(e), store };
1102                    }
1103                };
1104                if let Some(ctx) = store.as_ref() {
1105                    if ctx.throttled {
1106                        properties = properties.with_location(Location::InMem)
1107                    }
1108                    properties = properties.with_source(ctx.source)
1109                };
1110                let location = properties.location().unwrap_or_default();
1111                properties = properties.with_ephemeral(matches! {location, Location::OnDisk});
1112                let entry = cache.insert_with_properties(key, value, properties);
1113                Diversion {
1114                    target: Ok(entry),
1115                    store,
1116                }
1117            };
1118            #[cfg(feature = "tracing")]
1119            let task = task.in_span(Span::enter_with_local_parent(
1120                "foyer::memory::generic::fetch_with_runtime::spawn",
1121            ));
1122            task
1123        });
1124
1125        RawFetch::new(RawFetchInner::Miss(join))
1126    }
1127}
1128
1129#[cfg(test)]
1130mod tests {
1131    use foyer_common::hasher::ModHasher;
1132    use rand::{rngs::SmallRng, seq::IndexedRandom, RngCore, SeedableRng};
1133
1134    use super::*;
1135    use crate::{
1136        eviction::{
1137            fifo::{Fifo, FifoConfig},
1138            lfu::{Lfu, LfuConfig},
1139            lru::{Lru, LruConfig},
1140            s3fifo::{S3Fifo, S3FifoConfig},
1141            sieve::{Sieve, SieveConfig},
1142            test_utils::TestProperties,
1143        },
1144        test_utils::PiecePipe,
1145    };
1146
1147    fn is_send_sync_static<T: Send + Sync + 'static>() {}
1148
1149    #[test]
1150    fn test_send_sync_static() {
1151        is_send_sync_static::<RawCache<Fifo<(), (), TestProperties>, ModHasher>>();
1152        is_send_sync_static::<RawCache<S3Fifo<(), (), TestProperties>, ModHasher>>();
1153        is_send_sync_static::<RawCache<Lfu<(), (), TestProperties>, ModHasher>>();
1154        is_send_sync_static::<RawCache<Lru<(), (), TestProperties>, ModHasher>>();
1155        is_send_sync_static::<RawCache<Sieve<(), (), TestProperties>, ModHasher>>();
1156    }
1157
1158    #[expect(clippy::type_complexity)]
1159    fn fifo_cache_for_test(
1160    ) -> RawCache<Fifo<u64, u64, TestProperties>, ModHasher, HashTableIndexer<Fifo<u64, u64, TestProperties>>> {
1161        RawCache::new(RawCacheConfig {
1162            capacity: 256,
1163            shards: 4,
1164            eviction_config: FifoConfig::default(),
1165            hash_builder: Default::default(),
1166            weighter: Arc::new(|_, _| 1),
1167            event_listener: None,
1168            metrics: Arc::new(Metrics::noop()),
1169        })
1170    }
1171
1172    #[expect(clippy::type_complexity)]
1173    fn s3fifo_cache_for_test(
1174    ) -> RawCache<S3Fifo<u64, u64, TestProperties>, ModHasher, HashTableIndexer<S3Fifo<u64, u64, TestProperties>>> {
1175        RawCache::new(RawCacheConfig {
1176            capacity: 256,
1177            shards: 4,
1178            eviction_config: S3FifoConfig::default(),
1179            hash_builder: Default::default(),
1180            weighter: Arc::new(|_, _| 1),
1181            event_listener: None,
1182            metrics: Arc::new(Metrics::noop()),
1183        })
1184    }
1185
1186    #[expect(clippy::type_complexity)]
1187    fn lru_cache_for_test(
1188    ) -> RawCache<Lru<u64, u64, TestProperties>, ModHasher, HashTableIndexer<Lru<u64, u64, TestProperties>>> {
1189        RawCache::new(RawCacheConfig {
1190            capacity: 256,
1191            shards: 4,
1192            eviction_config: LruConfig::default(),
1193            hash_builder: Default::default(),
1194            weighter: Arc::new(|_, _| 1),
1195            event_listener: None,
1196            metrics: Arc::new(Metrics::noop()),
1197        })
1198    }
1199
1200    #[expect(clippy::type_complexity)]
1201    fn lfu_cache_for_test(
1202    ) -> RawCache<Lfu<u64, u64, TestProperties>, ModHasher, HashTableIndexer<Lfu<u64, u64, TestProperties>>> {
1203        RawCache::new(RawCacheConfig {
1204            capacity: 256,
1205            shards: 4,
1206            eviction_config: LfuConfig::default(),
1207            hash_builder: Default::default(),
1208            weighter: Arc::new(|_, _| 1),
1209            event_listener: None,
1210            metrics: Arc::new(Metrics::noop()),
1211        })
1212    }
1213
1214    #[expect(clippy::type_complexity)]
1215    fn sieve_cache_for_test(
1216    ) -> RawCache<Sieve<u64, u64, TestProperties>, ModHasher, HashTableIndexer<Sieve<u64, u64, TestProperties>>> {
1217        RawCache::new(RawCacheConfig {
1218            capacity: 256,
1219            shards: 4,
1220            eviction_config: SieveConfig {},
1221            hash_builder: Default::default(),
1222            weighter: Arc::new(|_, _| 1),
1223            event_listener: None,
1224            metrics: Arc::new(Metrics::noop()),
1225        })
1226    }
1227
1228    #[test_log::test]
1229    fn test_insert_ephemeral() {
1230        let fifo = fifo_cache_for_test();
1231
1232        let e1 = fifo.insert_with_properties(1, 1, TestProperties::default().with_ephemeral(true));
1233        assert_eq!(fifo.usage(), 1);
1234        drop(e1);
1235        assert_eq!(fifo.usage(), 0);
1236
1237        let e2a = fifo.insert_with_properties(2, 2, TestProperties::default().with_ephemeral(true));
1238        assert_eq!(fifo.usage(), 1);
1239        let e2b = fifo.get(&2).expect("entry 2 should exist");
1240        drop(e2a);
1241        assert_eq!(fifo.usage(), 1);
1242        drop(e2b);
1243        assert_eq!(fifo.usage(), 1);
1244    }
1245
1246    #[test]
1247    fn test_evict_all() {
1248        let pipe = Box::new(PiecePipe::default());
1249
1250        let fifo = fifo_cache_for_test();
1251        fifo.set_pipe(pipe.clone());
1252        for i in 0..fifo.capacity() as _ {
1253            fifo.insert(i, i);
1254        }
1255        assert_eq!(fifo.usage(), fifo.capacity());
1256
1257        fifo.evict_all();
1258        let mut pieces = pipe
1259            .pieces()
1260            .iter()
1261            .map(|p| (p.hash(), *p.key(), *p.value()))
1262            .collect_vec();
1263        pieces.sort_by_key(|t| t.0);
1264        let expected = (0..fifo.capacity() as u64).map(|i| (i, i, i)).collect_vec();
1265        assert_eq!(pieces, expected);
1266    }
1267
1268    #[test]
1269    fn test_insert_size_over_capacity() {
1270        let cache: RawCache<Fifo<Vec<u8>, Vec<u8>, TestProperties>, ModHasher> = RawCache::new(RawCacheConfig {
1271            capacity: 4 * 1024, // 4KB
1272            shards: 1,
1273            eviction_config: FifoConfig::default(),
1274            hash_builder: Default::default(),
1275            weighter: Arc::new(|k, v| k.len() + v.len()),
1276            event_listener: None,
1277            metrics: Arc::new(Metrics::noop()),
1278        });
1279
1280        let key = vec![b'k'; 1024]; // 1KB
1281        let value = vec![b'v'; 5 * 1024]; // 5KB
1282
1283        cache.insert(key.clone(), value.clone());
1284        assert_eq!(cache.usage(), 6 * 1024);
1285        assert_eq!(cache.get(&key).unwrap().value(), &value);
1286    }
1287
1288    fn test_resize<E>(cache: &RawCache<E, ModHasher, HashTableIndexer<E>>)
1289    where
1290        E: Eviction<Key = u64, Value = u64>,
1291    {
1292        let capacity = cache.capacity();
1293        for i in 0..capacity as u64 * 2 {
1294            cache.insert(i, i);
1295        }
1296        assert_eq!(cache.usage(), capacity);
1297        cache.resize(capacity / 2).unwrap();
1298        assert_eq!(cache.usage(), capacity / 2);
1299        for i in 0..capacity as u64 * 2 {
1300            cache.insert(i, i);
1301        }
1302        assert_eq!(cache.usage(), capacity / 2);
1303    }
1304
1305    #[test]
1306    fn test_fifo_cache_resize() {
1307        let cache = fifo_cache_for_test();
1308        test_resize(&cache);
1309    }
1310
1311    #[test]
1312    fn test_s3fifo_cache_resize() {
1313        let cache = s3fifo_cache_for_test();
1314        test_resize(&cache);
1315    }
1316
1317    #[test]
1318    fn test_lru_cache_resize() {
1319        let cache = lru_cache_for_test();
1320        test_resize(&cache);
1321    }
1322
1323    #[test]
1324    fn test_lfu_cache_resize() {
1325        let cache = lfu_cache_for_test();
1326        test_resize(&cache);
1327    }
1328
1329    #[test]
1330    fn test_sieve_cache_resize() {
1331        let cache = sieve_cache_for_test();
1332        test_resize(&cache);
1333    }
1334
1335    mod fuzzy {
1336        use foyer_common::properties::Hint;
1337
1338        use super::*;
1339
1340        fn fuzzy<E, S>(cache: RawCache<E, S>, hints: Vec<Hint>)
1341        where
1342            E: Eviction<Key = u64, Value = u64, Properties = TestProperties>,
1343            S: HashBuilder,
1344        {
1345            let handles = (0..8)
1346                .map(|i| {
1347                    let c = cache.clone();
1348                    let hints = hints.clone();
1349                    std::thread::spawn(move || {
1350                        let mut rng = SmallRng::seed_from_u64(i);
1351                        for _ in 0..100000 {
1352                            let key = rng.next_u64();
1353                            if let Some(entry) = c.get(&key) {
1354                                assert_eq!(key, *entry);
1355                                drop(entry);
1356                                continue;
1357                            }
1358                            let hint = hints.choose(&mut rng).cloned().unwrap();
1359                            c.insert_with_properties(key, key, TestProperties::default().with_hint(hint));
1360                        }
1361                    })
1362                })
1363                .collect_vec();
1364
1365            handles.into_iter().for_each(|handle| handle.join().unwrap());
1366
1367            assert_eq!(cache.usage(), cache.capacity());
1368        }
1369
1370        #[test_log::test]
1371        fn test_fifo_cache_fuzzy() {
1372            let cache: RawCache<Fifo<u64, u64, TestProperties>, ModHasher> = RawCache::new(RawCacheConfig {
1373                capacity: 256,
1374                shards: 4,
1375                eviction_config: FifoConfig::default(),
1376                hash_builder: Default::default(),
1377                weighter: Arc::new(|_, _| 1),
1378                event_listener: None,
1379                metrics: Arc::new(Metrics::noop()),
1380            });
1381            let hints = vec![Hint::Normal];
1382            fuzzy(cache, hints);
1383        }
1384
1385        #[test_log::test]
1386        fn test_s3fifo_cache_fuzzy() {
1387            let cache: RawCache<S3Fifo<u64, u64, TestProperties>, ModHasher> = RawCache::new(RawCacheConfig {
1388                capacity: 256,
1389                shards: 4,
1390                eviction_config: S3FifoConfig::default(),
1391                hash_builder: Default::default(),
1392                weighter: Arc::new(|_, _| 1),
1393                event_listener: None,
1394                metrics: Arc::new(Metrics::noop()),
1395            });
1396            let hints = vec![Hint::Normal];
1397            fuzzy(cache, hints);
1398        }
1399
1400        #[test_log::test]
1401        fn test_lru_cache_fuzzy() {
1402            let cache: RawCache<Lru<u64, u64, TestProperties>, ModHasher> = RawCache::new(RawCacheConfig {
1403                capacity: 256,
1404                shards: 4,
1405                eviction_config: LruConfig::default(),
1406                hash_builder: Default::default(),
1407                weighter: Arc::new(|_, _| 1),
1408                event_listener: None,
1409                metrics: Arc::new(Metrics::noop()),
1410            });
1411            let hints = vec![Hint::Normal, Hint::Low];
1412            fuzzy(cache, hints);
1413        }
1414
1415        #[test_log::test]
1416        fn test_lfu_cache_fuzzy() {
1417            let cache: RawCache<Lfu<u64, u64, TestProperties>, ModHasher> = RawCache::new(RawCacheConfig {
1418                capacity: 256,
1419                shards: 4,
1420                eviction_config: LfuConfig::default(),
1421                hash_builder: Default::default(),
1422                weighter: Arc::new(|_, _| 1),
1423                event_listener: None,
1424                metrics: Arc::new(Metrics::noop()),
1425            });
1426            let hints = vec![Hint::Normal];
1427            fuzzy(cache, hints);
1428        }
1429
1430        #[test_log::test]
1431        fn test_sieve_cache_fuzzy() {
1432            let cache: RawCache<Sieve<u64, u64, TestProperties>, ModHasher> = RawCache::new(RawCacheConfig {
1433                capacity: 256,
1434                shards: 4,
1435                eviction_config: SieveConfig {},
1436                hash_builder: Default::default(),
1437                weighter: Arc::new(|_, _| 1),
1438                event_listener: None,
1439                metrics: Arc::new(Metrics::noop()),
1440            });
1441            let hints = vec![Hint::Normal];
1442            fuzzy(cache, hints);
1443        }
1444    }
1445}