torrust_index_backend/cache/
mod.rs

1pub mod image;
2
3use bytes::Bytes;
4use indexmap::IndexMap;
5
6#[derive(Debug)]
7pub enum Error {
8    EntrySizeLimitExceedsTotalCapacity,
9    BytesExceedEntrySizeLimit,
10    CacheCapacityIsTooSmall,
11}
12
13#[derive(Debug, Clone)]
14pub struct BytesCacheEntry {
15    pub bytes: Bytes,
16}
17
18// Individual entry destined for the byte cache.
19impl BytesCacheEntry {
20    pub fn new(bytes: Bytes) -> Self {
21        Self { bytes }
22    }
23}
24#[allow(clippy::module_name_repetitions)]
25pub struct BytesCache {
26    bytes_table: IndexMap<String, BytesCacheEntry>,
27    total_capacity: usize,
28    entry_size_limit: usize,
29}
30
31impl BytesCache {
32    #[must_use]
33    pub fn new() -> Self {
34        Self {
35            bytes_table: IndexMap::new(),
36            total_capacity: 0,
37            entry_size_limit: 0,
38        }
39    }
40
41    // With a total capacity in bytes.
42    #[must_use]
43    pub fn with_capacity(capacity: usize) -> Self {
44        let mut new = Self::new();
45
46        new.total_capacity = capacity;
47
48        new
49    }
50
51    // With a limit for individual entry sizes.
52    #[must_use]
53    pub fn with_entry_size_limit(entry_size_limit: usize) -> Self {
54        let mut new = Self::new();
55
56        new.entry_size_limit = entry_size_limit;
57
58        new
59    }
60
61    /// Helper to create a new bytes cache with both an individual entry and size limit.
62    ///
63    /// # Errors
64    ///
65    /// This function will return `Error::EntrySizeLimitExceedsTotalCapacity` if the specified size is too large.
66    ///
67    pub fn with_capacity_and_entry_size_limit(capacity: usize, entry_size_limit: usize) -> Result<Self, Error> {
68        if entry_size_limit > capacity {
69            return Err(Error::EntrySizeLimitExceedsTotalCapacity);
70        }
71
72        let mut new = Self::new();
73
74        new.total_capacity = capacity;
75        new.entry_size_limit = entry_size_limit;
76
77        Ok(new)
78    }
79
80    #[allow(clippy::unused_async)]
81    pub async fn get(&self, key: &str) -> Option<BytesCacheEntry> {
82        self.bytes_table.get(key).cloned()
83    }
84
85    // Return the amount of entries in the map.
86    #[allow(clippy::unused_async)]
87    pub async fn len(&self) -> usize {
88        self.bytes_table.len()
89    }
90
91    #[allow(clippy::unused_async)]
92    pub async fn is_empty(&self) -> bool {
93        self.bytes_table.is_empty()
94    }
95
96    // Size of all the entry bytes combined.
97    #[must_use]
98    pub fn total_size(&self) -> usize {
99        let mut size: usize = 0;
100
101        for (_, entry) in &self.bytes_table {
102            size += entry.bytes.len();
103        }
104
105        size
106    }
107
108    /// Adds a image to the cache.
109    ///
110    /// # Errors
111    ///
112    /// This function will return an error if there is not enough free size.
113    ///
114    // Insert bytes using key.
115    // TODO: Freed space might need to be reserved. Hold and pass write lock between functions?
116    // For TO DO above: semaphore: Arc<tokio::sync::Semaphore>, might be a solution.
117    #[allow(clippy::unused_async)]
118    pub async fn set(&mut self, key: String, bytes: Bytes) -> Result<Option<BytesCacheEntry>, Error> {
119        if bytes.len() > self.entry_size_limit {
120            return Err(Error::BytesExceedEntrySizeLimit);
121        }
122
123        // Remove the old entry so that a new entry will be added as last in the queue.
124        let _ = self.bytes_table.shift_remove(&key);
125
126        let bytes_cache_entry = BytesCacheEntry::new(bytes);
127
128        self.free_size(bytes_cache_entry.bytes.len())?;
129
130        Ok(self.bytes_table.insert(key, bytes_cache_entry))
131    }
132
133    // Free space. Size amount in bytes.
134    fn free_size(&mut self, size: usize) -> Result<(), Error> {
135        // Size may not exceed the total capacity of the bytes cache.
136        if size > self.total_capacity {
137            return Err(Error::CacheCapacityIsTooSmall);
138        }
139
140        let cache_size = self.total_size();
141        let size_to_be_freed = size.saturating_sub(self.total_capacity - cache_size);
142        let mut size_freed: usize = 0;
143
144        while size_freed < size_to_be_freed {
145            let oldest_entry = self
146                .pop()
147                .expect("bytes cache has no more entries, yet there isn't enough space.");
148
149            size_freed += oldest_entry.bytes.len();
150        }
151
152        Ok(())
153    }
154
155    // Remove and return the oldest entry.
156    pub fn pop(&mut self) -> Option<BytesCacheEntry> {
157        self.bytes_table.shift_remove_index(0).map(|(_, entry)| entry)
158    }
159}
160
161impl Default for BytesCache {
162    fn default() -> Self {
163        Self::new()
164    }
165}
166
167#[cfg(test)]
168mod tests {
169    use bytes::Bytes;
170
171    use crate::cache::BytesCache;
172
173    #[tokio::test]
174    async fn set_bytes_cache_with_capacity_and_entry_size_limit_should_succeed() {
175        let mut bytes_cache = BytesCache::with_capacity_and_entry_size_limit(6, 6).unwrap();
176        let bytes: Bytes = Bytes::from("abcdef");
177
178        assert!(bytes_cache.set("1".to_string(), bytes).await.is_ok());
179    }
180
181    #[tokio::test]
182    async fn given_a_bytes_cache_with_a_capacity_and_entry_size_limit_it_should_allow_adding_new_entries_if_the_limit_is_not_exceeded(
183    ) {
184        let bytes: Bytes = Bytes::from("abcdef");
185
186        let mut bytes_cache = BytesCache::with_capacity_and_entry_size_limit(bytes.len() * 2, bytes.len()).unwrap();
187
188        // Add first entry (6 bytes)
189        assert!(bytes_cache.set("key1".to_string(), bytes.clone()).await.is_ok());
190
191        // Add second entry (6 bytes)
192        assert!(bytes_cache.set("key2".to_string(), bytes).await.is_ok());
193
194        // Both entries were added because we did not reach the limit
195        assert_eq!(bytes_cache.len().await, 2);
196    }
197
198    #[tokio::test]
199    async fn given_a_bytes_cache_with_a_capacity_and_entry_size_limit_it_should_not_allow_adding_new_entries_if_the_capacity_is_exceeded(
200    ) {
201        let bytes: Bytes = Bytes::from("abcdef");
202
203        let mut bytes_cache = BytesCache::with_capacity_and_entry_size_limit(bytes.len() * 2 - 1, bytes.len()).unwrap();
204
205        // Add first entry (6 bytes)
206        assert!(bytes_cache.set("key1".to_string(), bytes.clone()).await.is_ok());
207
208        // Add second entry (6 bytes)
209        assert!(bytes_cache.set("key2".to_string(), bytes).await.is_ok());
210
211        // Only one entry is in the cache, because otherwise the total capacity would have been exceeded
212        assert_eq!(bytes_cache.len().await, 1);
213    }
214
215    #[tokio::test]
216    async fn set_bytes_cache_with_capacity_and_entry_size_limit_should_fail() {
217        let mut bytes_cache = BytesCache::with_capacity_and_entry_size_limit(6, 5).unwrap();
218        let bytes: Bytes = Bytes::from("abcdef");
219
220        assert!(bytes_cache.set("1".to_string(), bytes).await.is_err());
221    }
222}