rust_rocksdb/
cache.rs

1use crate::{ffi, LruCacheOptions};
2use libc::size_t;
3use std::ptr::NonNull;
4use std::sync::Arc;
5
6pub(crate) struct CacheWrapper {
7    pub(crate) inner: NonNull<ffi::rocksdb_cache_t>,
8}
9
10unsafe impl Send for CacheWrapper {}
11unsafe impl Sync for CacheWrapper {}
12
13impl Drop for CacheWrapper {
14    fn drop(&mut self) {
15        unsafe {
16            ffi::rocksdb_cache_destroy(self.inner.as_ptr());
17        }
18    }
19}
20
21#[derive(Clone)]
22pub struct Cache(pub(crate) Arc<CacheWrapper>);
23
24impl Cache {
25    /// Creates an LRU cache with capacity in bytes.
26    pub fn new_lru_cache(capacity: size_t) -> Cache {
27        let inner = NonNull::new(unsafe { ffi::rocksdb_cache_create_lru(capacity) }).unwrap();
28        Cache(Arc::new(CacheWrapper { inner }))
29    }
30
31    /// Creates an LRU cache with custom options.
32    pub fn new_lru_cache_opts(opts: &LruCacheOptions) -> Cache {
33        let inner =
34            NonNull::new(unsafe { ffi::rocksdb_cache_create_lru_opts(opts.inner) }).unwrap();
35        Cache(Arc::new(CacheWrapper { inner }))
36    }
37
38    /// Creates a HyperClockCache with capacity in bytes.
39    ///
40    /// `estimated_entry_charge` is an important tuning parameter. The optimal
41    /// choice at any given time is
42    /// `(cache.get_usage() - 64 * cache.get_table_address_count()) /
43    /// cache.get_occupancy_count()`, or approximately `cache.get_usage() /
44    /// cache.get_occupancy_count()`.
45    ///
46    /// However, the value cannot be changed dynamically, so as the cache
47    /// composition changes at runtime, the following tradeoffs apply:
48    ///
49    /// * If the estimate is substantially too high (e.g., 25% higher),
50    ///   the cache may have to evict entries to prevent load factors that
51    ///   would dramatically affect lookup times.
52    /// * If the estimate is substantially too low (e.g., less than half),
53    ///   then meta data space overhead is substantially higher.
54    ///
55    /// The latter is generally preferable, and picking the larger of
56    /// block size and meta data block size is a reasonable choice that
57    /// errs towards this side.
58    pub fn new_hyper_clock_cache(capacity: size_t, estimated_entry_charge: size_t) -> Cache {
59        Cache(Arc::new(CacheWrapper {
60            inner: NonNull::new(unsafe {
61                ffi::rocksdb_cache_create_hyper_clock(capacity, estimated_entry_charge)
62            })
63            .unwrap(),
64        }))
65    }
66
67    /// Returns the cache memory usage in bytes.
68    pub fn get_usage(&self) -> usize {
69        unsafe { ffi::rocksdb_cache_get_usage(self.0.inner.as_ptr()) }
70    }
71
72    /// Returns the pinned memory usage in bytes.
73    pub fn get_pinned_usage(&self) -> usize {
74        unsafe { ffi::rocksdb_cache_get_pinned_usage(self.0.inner.as_ptr()) }
75    }
76
77    /// Sets cache capacity in bytes.
78    pub fn set_capacity(&mut self, capacity: size_t) {
79        unsafe {
80            ffi::rocksdb_cache_set_capacity(self.0.inner.as_ptr(), capacity);
81        }
82    }
83}