hitbox_moka/builder.rs
1//! Builder for configuring [`MokaBackend`].
2
3use std::time::{Duration, Instant};
4
5use chrono::Utc;
6use moka::Expiry;
7use moka::future::{Cache, CacheBuilder};
8use moka::policy::EvictionPolicy;
9
10use crate::backend::MokaBackend;
11use hitbox::{BackendLabel, CacheKey, CacheValue, Raw};
12use hitbox_backend::format::{Format, JsonFormat};
13use hitbox_backend::{CacheKeyFormat, Compressor, PassthroughCompressor};
14
15/// Custom expiration policy that calculates TTL from [`CacheValue::expire`] timestamps.
16#[derive(Clone, Copy, Debug, Eq, PartialEq)]
17struct Expiration;
18
19impl Expiry<CacheKey, CacheValue<Raw>> for Expiration {
20 fn expire_after_create(
21 &self,
22 _key: &CacheKey,
23 value: &CacheValue<Raw>,
24 _created_at: Instant,
25 ) -> Option<Duration> {
26 Self::calculate_ttl(value)
27 }
28
29 fn expire_after_update(
30 &self,
31 _key: &CacheKey,
32 value: &CacheValue<Raw>,
33 _updated_at: Instant,
34 _duration_until_expiry: Option<Duration>,
35 ) -> Option<Duration> {
36 // IMPORTANT: Always use the NEW value's expiration time.
37 //
38 // Moka's default `expire_after_update` returns `duration_until_expiry`,
39 // which preserves the OLD expiration time. This causes premature expiration
40 // when updating a cache entry with a new (longer) TTL.
41 Self::calculate_ttl(value)
42 }
43}
44
45impl Expiration {
46 fn calculate_ttl(value: &CacheValue<Raw>) -> Option<Duration> {
47 value.expire().map(|expiration| {
48 let delta = expiration - Utc::now();
49 let millis = delta.num_milliseconds();
50 if millis <= 0 {
51 Duration::ZERO
52 } else {
53 Duration::from_millis(millis as u64)
54 }
55 })
56 }
57}
58
59/// Marker type: capacity has not been configured yet.
60///
61/// This is the initial state of a [`MokaBackendBuilder`]. You must call either
62/// [`max_entries()`](MokaBackendBuilder::max_entries) or
63/// [`max_bytes()`](MokaBackendBuilder::max_bytes) before calling `build()`.
64#[derive(Debug, Clone, Copy, Default)]
65pub struct NoCapacity;
66
67/// Marker type: entry-count capacity has been configured.
68///
69/// The cache will hold at most `n` entries, evicting least recently used
70/// entries when capacity is exceeded.
71#[derive(Debug, Clone, Copy)]
72pub struct EntryCapacity(pub(crate) u64);
73
74/// Marker type: byte-based capacity has been configured.
75///
76/// The cache will use at most `n` bytes (approximate), evicting least recently
77/// used entries when the memory budget is exceeded.
78#[derive(Debug, Clone, Copy)]
79pub struct ByteCapacity(pub(crate) u64);
80
81/// Builder for creating and configuring a [`MokaBackend`].
82///
83/// Use [`MokaBackend::builder`] to create a new builder instance.
84///
85/// # Capacity Configuration (Required)
86///
87/// You must configure capacity using exactly one of:
88/// - [`max_entries(n)`](Self::max_entries) - limit by entry count
89/// - [`max_bytes(n)`](Self::max_bytes) - limit by approximate memory usage
90///
91/// These methods use the typestate pattern to enforce compile-time guarantees:
92/// - `build()` is only available after setting capacity
93/// - You cannot set both entry and byte limits
94///
95/// # Examples
96///
97/// Entry-based capacity:
98///
99/// ```
100/// use hitbox_moka::MokaBackend;
101///
102/// let backend = MokaBackend::builder()
103/// .max_entries(10_000)
104/// .build();
105/// ```
106///
107/// Byte-based capacity (100 MB):
108///
109/// ```
110/// use hitbox_moka::MokaBackend;
111///
112/// let backend = MokaBackend::builder()
113/// .max_bytes(100 * 1024 * 1024)
114/// .build();
115/// ```
116///
117/// With custom configuration:
118///
119/// ```ignore
120/// use hitbox_moka::MokaBackend;
121/// use hitbox_backend::format::BincodeFormat;
122/// use hitbox_backend::{CacheKeyFormat, GzipCompressor};
123///
124/// let backend = MokaBackend::builder()
125/// .label("sessions")
126/// .max_bytes(50_000_000)
127/// .key_format(CacheKeyFormat::UrlEncoded)
128/// .value_format(BincodeFormat)
129/// .compressor(GzipCompressor::default())
130/// .build();
131/// ```
132///
133/// **Note:** `GzipCompressor` and `ZstdCompressor` require enabling the `gzip`
134/// or `zstd` feature on `hitbox-backend`.
135pub struct MokaBackendBuilder<Cap, S = JsonFormat, C = PassthroughCompressor>
136where
137 S: Format,
138 C: Compressor,
139{
140 capacity: Cap,
141 key_format: CacheKeyFormat,
142 serializer: S,
143 compressor: C,
144 label: BackendLabel,
145 eviction_policy: Option<EvictionPolicy>,
146}
147
148impl MokaBackendBuilder<NoCapacity, JsonFormat, PassthroughCompressor> {
149 /// Creates a new builder with no capacity configured.
150 ///
151 /// You must call [`max_entries()`](Self::max_entries) or
152 /// [`max_bytes()`](Self::max_bytes) before calling `build()`.
153 pub fn new() -> Self {
154 Self {
155 capacity: NoCapacity,
156 key_format: CacheKeyFormat::Bitcode,
157 serializer: JsonFormat,
158 compressor: PassthroughCompressor,
159 label: BackendLabel::new_static("moka"),
160 eviction_policy: None,
161 }
162 }
163}
164
165impl Default for MokaBackendBuilder<NoCapacity, JsonFormat, PassthroughCompressor> {
166 fn default() -> Self {
167 Self::new()
168 }
169}
170
171impl<S, C> MokaBackendBuilder<NoCapacity, S, C>
172where
173 S: Format,
174 C: Compressor,
175{
176 /// Sets the maximum number of entries the cache can hold.
177 ///
178 /// When the cache exceeds this capacity, least recently used entries are
179 /// evicted.
180 ///
181 /// # Example
182 ///
183 /// ```
184 /// use hitbox_moka::MokaBackend;
185 ///
186 /// let backend = MokaBackend::builder()
187 /// .max_entries(10_000)
188 /// .build();
189 /// ```
190 pub fn max_entries(self, capacity: u64) -> MokaBackendBuilder<EntryCapacity, S, C> {
191 MokaBackendBuilder {
192 capacity: EntryCapacity(capacity),
193 key_format: self.key_format,
194 serializer: self.serializer,
195 compressor: self.compressor,
196 label: self.label,
197 eviction_policy: self.eviction_policy,
198 }
199 }
200
201 /// Sets the maximum memory budget in bytes.
202 ///
203 /// The cache will use approximately this many bytes for stored values,
204 /// evicting least recently used entries when the budget is exceeded.
205 ///
206 /// The byte count includes:
207 /// - Serialized value data
208 /// - Fixed overhead estimate for keys and metadata (~112 bytes per entry)
209 ///
210 /// # Example
211 ///
212 /// ```
213 /// use hitbox_moka::MokaBackend;
214 ///
215 /// // 100 MB cache
216 /// let backend = MokaBackend::builder()
217 /// .max_bytes(100 * 1024 * 1024)
218 /// .build();
219 /// ```
220 pub fn max_bytes(self, bytes: u64) -> MokaBackendBuilder<ByteCapacity, S, C> {
221 MokaBackendBuilder {
222 capacity: ByteCapacity(bytes),
223 key_format: self.key_format,
224 serializer: self.serializer,
225 compressor: self.compressor,
226 label: self.label,
227 eviction_policy: self.eviction_policy,
228 }
229 }
230}
231
232impl<Cap, S, C> MokaBackendBuilder<Cap, S, C>
233where
234 S: Format,
235 C: Compressor,
236{
237 /// Sets a custom label for this backend.
238 ///
239 /// The label identifies this backend in multi-tier cache compositions and
240 /// appears in metrics and debug output.
241 ///
242 /// # Default
243 ///
244 /// `"moka"`
245 pub fn label(mut self, label: impl Into<BackendLabel>) -> Self {
246 self.label = label.into();
247 self
248 }
249
250 /// Sets the cache key serialization format.
251 ///
252 /// The key format determines how [`CacheKey`] values are serialized for
253 /// storage. This affects key size and debuggability.
254 ///
255 /// # Default
256 ///
257 /// [`CacheKeyFormat::Bitcode`]
258 ///
259 /// # Options
260 ///
261 /// | Format | Size | Human-readable |
262 /// |--------|------|----------------|
263 /// | [`Bitcode`](CacheKeyFormat::Bitcode) | Compact | No |
264 /// | [`UrlEncoded`](CacheKeyFormat::UrlEncoded) | Larger | Yes |
265 ///
266 /// [`CacheKey`]: hitbox_core::CacheKey
267 pub fn key_format(mut self, format: CacheKeyFormat) -> Self {
268 self.key_format = format;
269 self
270 }
271
272 /// Sets the eviction policy for the cache.
273 ///
274 /// The eviction policy determines how entries are selected for removal when
275 /// the cache reaches capacity.
276 ///
277 /// # Default
278 ///
279 /// - **Entry-based capacity** ([`max_entries`]): [`EvictionPolicy::tiny_lfu()`] -
280 /// combines LRU eviction with LFU admission for optimal hit rates
281 /// - **Byte-based capacity** ([`max_bytes`]): [`EvictionPolicy::lru()`] -
282 /// pure LRU for predictable eviction behavior with weighted entries
283 ///
284 /// # Options
285 ///
286 /// | Policy | Description | Best for |
287 /// |--------|-------------|----------|
288 /// | [`tiny_lfu()`](EvictionPolicy::tiny_lfu) | LRU eviction + LFU admission | General caching, web workloads |
289 /// | [`lru()`](EvictionPolicy::lru) | Pure least-recently-used | Recency-biased, streaming data |
290 ///
291 /// # Example
292 ///
293 /// ```
294 /// use hitbox_moka::{MokaBackend, EvictionPolicy};
295 ///
296 /// // Use TinyLFU with byte-based capacity (overriding default LRU)
297 /// let backend = MokaBackend::builder()
298 /// .max_bytes(100 * 1024 * 1024)
299 /// .eviction_policy(EvictionPolicy::tiny_lfu())
300 /// .build();
301 /// ```
302 ///
303 /// [`max_entries`]: Self::max_entries
304 /// [`max_bytes`]: Self::max_bytes
305 pub fn eviction_policy(mut self, policy: EvictionPolicy) -> Self {
306 self.eviction_policy = Some(policy);
307 self
308 }
309
310 /// Sets the cache value serialization format.
311 ///
312 /// The value format determines how cached data is serialized before storage.
313 ///
314 /// # Default
315 ///
316 /// [`JsonFormat`]
317 ///
318 /// # Options
319 ///
320 /// | Format | Speed | Size | Human-readable |
321 /// |--------|-------|------|----------------|
322 /// | [`JsonFormat`] | Slow | Large | Yes |
323 /// | [`BincodeFormat`](hitbox_backend::format::BincodeFormat) | Fast | Compact | No |
324 /// | [`RonFormat`](hitbox_backend::format::RonFormat) | Medium | Medium | Yes |
325 pub fn value_format<NewS>(self, serializer: NewS) -> MokaBackendBuilder<Cap, NewS, C>
326 where
327 NewS: Format,
328 {
329 MokaBackendBuilder {
330 capacity: self.capacity,
331 key_format: self.key_format,
332 serializer,
333 compressor: self.compressor,
334 label: self.label,
335 eviction_policy: self.eviction_policy,
336 }
337 }
338
339 /// Sets the compression strategy for cache values.
340 ///
341 /// Compression reduces memory usage at the cost of CPU time. For in-memory
342 /// caches like Moka, compression is typically **not recommended** since
343 /// memory access is fast and compression adds latency.
344 ///
345 /// # Default
346 ///
347 /// [`PassthroughCompressor`] (no compression)
348 ///
349 /// # Options
350 ///
351 /// | Compressor | Ratio | Speed | Feature flag |
352 /// |------------|-------|-------|--------------|
353 /// | [`PassthroughCompressor`] | None | Fastest | — |
354 /// | [`GzipCompressor`] | Good | Medium | `gzip` |
355 /// | [`ZstdCompressor`] | Best | Fast | `zstd` |
356 ///
357 /// # When to Use Compression
358 ///
359 /// - Large cached values (>10KB)
360 /// - Memory-constrained environments
361 /// - When composing with network backends (compression done once, reused)
362 ///
363 /// [`PassthroughCompressor`]: hitbox_backend::PassthroughCompressor
364 /// [`GzipCompressor`]: https://docs.rs/hitbox-backend/latest/hitbox_backend/struct.GzipCompressor.html
365 /// [`ZstdCompressor`]: https://docs.rs/hitbox-backend/latest/hitbox_backend/struct.ZstdCompressor.html
366 pub fn compressor<NewC>(self, compressor: NewC) -> MokaBackendBuilder<Cap, S, NewC>
367 where
368 NewC: Compressor,
369 {
370 MokaBackendBuilder {
371 capacity: self.capacity,
372 key_format: self.key_format,
373 serializer: self.serializer,
374 compressor,
375 label: self.label,
376 eviction_policy: self.eviction_policy,
377 }
378 }
379}
380
381impl<S, C> MokaBackendBuilder<EntryCapacity, S, C>
382where
383 S: Format,
384 C: Compressor,
385{
386 /// Builds the [`MokaBackend`] with entry-count based capacity.
387 ///
388 /// Consumes the builder and returns a fully configured backend ready for use.
389 pub fn build(self) -> MokaBackend<S, C> {
390 let policy = self
391 .eviction_policy
392 .unwrap_or_else(EvictionPolicy::tiny_lfu);
393 let cache: Cache<CacheKey, CacheValue<Raw>> = CacheBuilder::new(self.capacity.0)
394 .eviction_policy(policy)
395 .expire_after(Expiration)
396 .build();
397
398 MokaBackend {
399 cache,
400 key_format: self.key_format,
401 serializer: self.serializer,
402 compressor: self.compressor,
403 label: self.label,
404 }
405 }
406}
407
408impl<S, C> MokaBackendBuilder<ByteCapacity, S, C>
409where
410 S: Format + 'static,
411 C: Compressor + 'static,
412{
413 /// Builds the [`MokaBackend`] with byte-based capacity.
414 ///
415 /// Consumes the builder and returns a fully configured backend ready for use.
416 /// The cache will use a weigher function to track approximate memory usage.
417 ///
418 /// Note: Default eviction policy is LRU (not TinyLFU) to ensure predictable
419 /// eviction behavior with weighted capacity. TinyLFU's admission policy can
420 /// reject new entries even when eviction could make room. Override with
421 /// [`eviction_policy()`](MokaBackendBuilder::eviction_policy) if needed.
422 pub fn build(self) -> MokaBackend<S, C> {
423 let policy = self.eviction_policy.unwrap_or_else(EvictionPolicy::lru);
424 let cache: Cache<CacheKey, CacheValue<Raw>> = CacheBuilder::new(self.capacity.0)
425 .weigher(Self::byte_weigher)
426 .eviction_policy(policy)
427 .expire_after(Expiration)
428 .build();
429
430 MokaBackend {
431 cache,
432 key_format: self.key_format,
433 serializer: self.serializer,
434 compressor: self.compressor,
435 label: self.label,
436 }
437 }
438
439 /// Weigher function that calculates the approximate byte cost of a cache entry.
440 ///
441 /// Uses the precalculated `memory_size()` methods on `CacheKey` and `CacheValue`
442 /// which account for struct overhead and variable-length content.
443 fn byte_weigher(key: &CacheKey, value: &CacheValue<Raw>) -> u32 {
444 (key.memory_size() + value.memory_size()).min(u32::MAX as usize) as u32
445 }
446}