Skip to main content

ember_core/
memory.rs

1//! Memory tracking for the keyspace.
2//!
3//! Provides byte-level accounting of memory used by entries. Updated
4//! on every mutation so the engine can enforce memory limits and
5//! report stats without scanning the entire keyspace.
6
7use crate::types::Value;
8
9/// Estimated overhead per entry in the HashMap.
10///
11/// Accounts for: HashMap bucket pointer (8), Entry struct fields
12/// (Option<Instant> = 16, last_access Instant = 8, Value enum tag + padding),
13/// plus HashMap per-entry bookkeeping.
14///
15/// This is an approximation measured empirically on x86-64 linux. The exact
16/// value varies by platform and compiler version, but precision isn't critical —
17/// we use this for eviction triggers and memory reporting, not for correctness.
18/// Overestimating is fine (triggers eviction earlier); underestimating could
19/// theoretically let memory grow slightly beyond the configured limit.
20pub(crate) const ENTRY_OVERHEAD: usize = 96;
21
22/// Tracks memory usage for a single keyspace.
23///
24/// All updates are explicit — callers must call `add` / `remove` on every
25/// mutation. This avoids any hidden scanning cost.
26#[derive(Debug)]
27pub struct MemoryTracker {
28    used_bytes: usize,
29    key_count: usize,
30}
31
32impl MemoryTracker {
33    /// Creates a tracker with zero usage.
34    pub fn new() -> Self {
35        Self {
36            used_bytes: 0,
37            key_count: 0,
38        }
39    }
40
41    /// Resets tracking to zero. Used by FLUSHDB.
42    pub fn reset(&mut self) {
43        self.used_bytes = 0;
44        self.key_count = 0;
45    }
46
47    /// Returns the current estimated memory usage in bytes.
48    pub fn used_bytes(&self) -> usize {
49        self.used_bytes
50    }
51
52    /// Returns the number of tracked keys.
53    pub fn key_count(&self) -> usize {
54        self.key_count
55    }
56
57    /// Records the addition of a new entry.
58    pub fn add(&mut self, key: &str, value: &Value) {
59        self.used_bytes += entry_size(key, value);
60        self.key_count += 1;
61    }
62
63    /// Records the removal of an entry.
64    pub fn remove(&mut self, key: &str, value: &Value) {
65        let size = entry_size(key, value);
66        self.used_bytes = self.used_bytes.saturating_sub(size);
67        self.key_count = self.key_count.saturating_sub(1);
68    }
69
70    /// Adjusts tracking when a key's value is overwritten.
71    ///
72    /// Removes the old value's contribution and adds the new one.
73    /// Key count stays the same.
74    pub fn replace(&mut self, key: &str, old_value: &Value, new_value: &Value) {
75        let old_size = entry_size(key, old_value);
76        let new_size = entry_size(key, new_value);
77        self.used_bytes = self
78            .used_bytes
79            .saturating_sub(old_size)
80            .saturating_add(new_size);
81    }
82
83    /// Adjusts used bytes for an in-place mutation (e.g. list push/pop)
84    /// without changing the key count.
85    ///
86    /// `old_entry_size` and `new_entry_size` are the full entry sizes
87    /// (as returned by `entry_size`) before and after the mutation.
88    pub fn adjust(&mut self, old_entry_size: usize, new_entry_size: usize) {
89        self.used_bytes = self
90            .used_bytes
91            .saturating_sub(old_entry_size)
92            .saturating_add(new_entry_size);
93    }
94
95    /// Removes an entry with an explicit size, useful when the value has
96    /// already been mutated and the original size was captured beforehand.
97    pub fn remove_with_size(&mut self, size: usize) {
98        self.used_bytes = self.used_bytes.saturating_sub(size);
99        self.key_count = self.key_count.saturating_sub(1);
100    }
101}
102
103impl Default for MemoryTracker {
104    fn default() -> Self {
105        Self::new()
106    }
107}
108
109/// Estimates the total memory footprint of a single entry.
110///
111/// key heap allocation + value bytes + fixed per-entry overhead.
112pub fn entry_size(key: &str, value: &Value) -> usize {
113    key.len() + value_size(value) + ENTRY_OVERHEAD
114}
115
116/// Estimated overhead per element in a VecDeque.
117///
118/// Each slot holds a `Bytes` (pointer + len + capacity = 24 bytes on 64-bit)
119/// plus VecDeque's internal bookkeeping per slot.
120pub(crate) const VECDEQUE_ELEMENT_OVERHEAD: usize = 32;
121
122/// Base overhead for an empty VecDeque (internal buffer pointer + head/len).
123pub(crate) const VECDEQUE_BASE_OVERHEAD: usize = 24;
124
125/// Returns the byte size of a value's payload.
126pub fn value_size(value: &Value) -> usize {
127    match value {
128        Value::String(data) => data.len(),
129        Value::List(deque) => {
130            let element_bytes: usize = deque
131                .iter()
132                .map(|b| b.len() + VECDEQUE_ELEMENT_OVERHEAD)
133                .sum();
134            VECDEQUE_BASE_OVERHEAD + element_bytes
135        }
136        Value::SortedSet(ss) => ss.memory_usage(),
137    }
138}
139
140#[cfg(test)]
141mod tests {
142    use super::*;
143    use bytes::Bytes;
144
145    fn string_val(s: &str) -> Value {
146        Value::String(Bytes::from(s.to_string()))
147    }
148
149    #[test]
150    fn new_tracker_is_empty() {
151        let t = MemoryTracker::new();
152        assert_eq!(t.used_bytes(), 0);
153        assert_eq!(t.key_count(), 0);
154    }
155
156    #[test]
157    fn add_increases_usage() {
158        let mut t = MemoryTracker::new();
159        let val = string_val("hello");
160        t.add("key", &val);
161        assert_eq!(t.key_count(), 1);
162        assert_eq!(t.used_bytes(), entry_size("key", &val));
163    }
164
165    #[test]
166    fn remove_decreases_usage() {
167        let mut t = MemoryTracker::new();
168        let val = string_val("data");
169        t.add("k", &val);
170        t.remove("k", &val);
171        assert_eq!(t.used_bytes(), 0);
172        assert_eq!(t.key_count(), 0);
173    }
174
175    #[test]
176    fn replace_adjusts_usage() {
177        let mut t = MemoryTracker::new();
178        let old = string_val("short");
179        let new = string_val("a much longer value here");
180        t.add("k", &old);
181
182        let before = t.used_bytes();
183        t.replace("k", &old, &new);
184
185        assert_eq!(t.key_count(), 1);
186        // new value is longer, so usage should increase
187        assert!(t.used_bytes() > before);
188        assert_eq!(t.used_bytes(), entry_size("k", &new),);
189    }
190
191    #[test]
192    fn remove_saturates_at_zero() {
193        let mut t = MemoryTracker::new();
194        let val = string_val("x");
195        // remove without add — should not underflow
196        t.remove("k", &val);
197        assert_eq!(t.used_bytes(), 0);
198        assert_eq!(t.key_count(), 0);
199    }
200
201    #[test]
202    fn entry_size_accounts_for_key_and_value() {
203        let val = string_val("test");
204        let size = entry_size("mykey", &val);
205        // 5 (key) + 4 (value) + 96 (overhead)
206        assert_eq!(size, 5 + 4 + ENTRY_OVERHEAD);
207    }
208
209    #[test]
210    fn list_value_size() {
211        let mut deque = std::collections::VecDeque::new();
212        deque.push_back(Bytes::from("hello"));
213        deque.push_back(Bytes::from("world"));
214        let val = Value::List(deque);
215
216        let size = value_size(&val);
217        // base overhead + 2 elements (each: data len + element overhead)
218        let expected = VECDEQUE_BASE_OVERHEAD
219            + (5 + VECDEQUE_ELEMENT_OVERHEAD)
220            + (5 + VECDEQUE_ELEMENT_OVERHEAD);
221        assert_eq!(size, expected);
222    }
223
224    #[test]
225    fn empty_list_value_size() {
226        let val = Value::List(std::collections::VecDeque::new());
227        assert_eq!(value_size(&val), VECDEQUE_BASE_OVERHEAD);
228    }
229
230    #[test]
231    fn multiple_entries() {
232        let mut t = MemoryTracker::new();
233        let v1 = string_val("aaa");
234        let v2 = string_val("bbbbb");
235        t.add("k1", &v1);
236        t.add("k2", &v2);
237
238        assert_eq!(t.key_count(), 2);
239        assert_eq!(
240            t.used_bytes(),
241            entry_size("k1", &v1) + entry_size("k2", &v2),
242        );
243
244        t.remove("k1", &v1);
245        assert_eq!(t.key_count(), 1);
246        assert_eq!(t.used_bytes(), entry_size("k2", &v2));
247    }
248}