Skip to main content

ruvix_types/
proof_cache_optimized.rs

1//! Optimized proof cache with hash table for O(1) lookup (ADR-087 SEC-002).
2//!
3//! This module implements an optimized proof cache using:
4//! - Hash table with open addressing for O(1) lookup
5//! - Generation counters for cache line invalidation
6//! - Cache-line aligned entries for optimal memory access
7//!
8//! Performance targets:
9//! - Proof cache hit: <100ns
10//! - verify_and_consume: O(1) with single cache line access
11//!
12//! Security constraints (inherited from proof_cache):
13//! - 100ms TTL for cache entries
14//! - Single-use nonces
15//! - Maximum 64 entries
16
17use crate::proof_cache::{CacheError, PROOF_CACHE_MAX_ENTRIES, PROOF_CACHE_TTL_NS};
18
19/// Cache line size for alignment.
20const CACHE_LINE_SIZE: usize = 64;
21
22/// Number of hash buckets (power of 2 for fast modulo).
23const HASH_BUCKETS: usize = 64;
24
25/// Mask for hash bucket indexing.
26const BUCKET_MASK: usize = HASH_BUCKETS - 1;
27
28/// Optimized cache entry aligned to reduce cache pressure.
29#[derive(Debug, Clone, Copy)]
30#[repr(C, align(64))]
31pub struct OptimizedProofEntry {
32    /// Hash of the mutation this proof authorizes.
33    pub mutation_hash: [u8; 32],
34
35    /// Single-use nonce for this proof.
36    pub nonce: u64,
37
38    /// Timestamp when the entry was inserted (nanoseconds).
39    pub inserted_at: u64,
40
41    /// Unique proof identifier returned on successful verification.
42    pub proof_id: u32,
43
44    /// Entry state flags.
45    pub flags: u32,
46
47    /// Padding to fill cache line.
48    _padding: [u8; 8],
49}
50
51impl OptimizedProofEntry {
52    /// Flag indicating entry is valid.
53    pub const FLAG_VALID: u32 = 1 << 0;
54
55    /// Flag indicating entry has been consumed.
56    pub const FLAG_CONSUMED: u32 = 1 << 1;
57
58    /// Creates a new empty entry.
59    #[inline]
60    #[must_use]
61    pub const fn empty() -> Self {
62        Self {
63            mutation_hash: [0; 32],
64            nonce: 0,
65            inserted_at: 0,
66            proof_id: 0,
67            flags: 0,
68            _padding: [0; 8],
69        }
70    }
71
72    /// Creates a new valid entry.
73    #[inline]
74    #[must_use]
75    pub const fn new(
76        mutation_hash: [u8; 32],
77        nonce: u64,
78        proof_id: u32,
79        inserted_at: u64,
80    ) -> Self {
81        Self {
82            mutation_hash,
83            nonce,
84            inserted_at,
85            proof_id,
86            flags: Self::FLAG_VALID,
87            _padding: [0; 8],
88        }
89    }
90
91    /// Returns true if entry is valid.
92    #[inline]
93    #[must_use]
94    pub const fn is_valid(&self) -> bool {
95        (self.flags & Self::FLAG_VALID) != 0
96    }
97
98    /// Returns true if entry has been consumed.
99    #[inline]
100    #[must_use]
101    pub const fn is_consumed(&self) -> bool {
102        (self.flags & Self::FLAG_CONSUMED) != 0
103    }
104
105    /// Returns true if entry has expired.
106    #[inline]
107    #[must_use]
108    pub const fn is_expired(&self, current_time_ns: u64) -> bool {
109        if current_time_ns < self.inserted_at {
110            return false;
111        }
112        current_time_ns - self.inserted_at > PROOF_CACHE_TTL_NS
113    }
114
115    /// Checks if entry matches hash and nonce.
116    #[inline]
117    #[must_use]
118    pub fn matches(&self, mutation_hash: &[u8; 32], nonce: u64) -> bool {
119        self.nonce == nonce && self.mutation_hash == *mutation_hash
120    }
121
122    /// Marks entry as consumed (single atomic write).
123    #[inline]
124    pub fn consume(&mut self) {
125        self.flags |= Self::FLAG_CONSUMED;
126    }
127
128    /// Invalidates the entry.
129    #[inline]
130    pub fn invalidate(&mut self) {
131        self.flags = 0;
132    }
133}
134
135// Compile-time assertion that entry fits in cache line
136const _: () = assert!(core::mem::size_of::<OptimizedProofEntry>() == CACHE_LINE_SIZE);
137
138/// Fast hash function for (mutation_hash, nonce) pairs.
139///
140/// Uses FNV-1a style hashing with good distribution properties.
141#[inline]
142fn hash_key(mutation_hash: &[u8; 32], nonce: u64) -> usize {
143    // FNV-1a constants
144    const FNV_PRIME: u64 = 0x00000100000001B3;
145    const FNV_OFFSET: u64 = 0xcbf29ce484222325;
146
147    let mut hash = FNV_OFFSET;
148
149    // Hash first 8 bytes of mutation_hash (most entropy)
150    for &byte in &mutation_hash[..8] {
151        hash ^= byte as u64;
152        hash = hash.wrapping_mul(FNV_PRIME);
153    }
154
155    // Mix in nonce
156    hash ^= nonce;
157    hash = hash.wrapping_mul(FNV_PRIME);
158
159    // Final mixing
160    hash ^= hash >> 33;
161    hash = hash.wrapping_mul(0xff51afd7ed558ccd);
162    hash ^= hash >> 33;
163
164    (hash as usize) & BUCKET_MASK
165}
166
167/// Optimized proof cache with hash table for O(1) operations.
168///
169/// Uses open addressing with linear probing for collision resolution.
170/// Cache-line aligned entries minimize memory access overhead.
171#[derive(Debug)]
172pub struct OptimizedProofCache {
173    /// Cache entries array (cache-line aligned).
174    entries: [OptimizedProofEntry; HASH_BUCKETS],
175
176    /// Number of active (valid, unconsumed) entries.
177    active_count: usize,
178
179    /// Generation counter for bulk invalidation.
180    generation: u64,
181}
182
183impl OptimizedProofCache {
184    /// Creates a new empty optimized proof cache.
185    #[must_use]
186    pub const fn new() -> Self {
187        Self {
188            entries: [OptimizedProofEntry::empty(); HASH_BUCKETS],
189            active_count: 0,
190            generation: 0,
191        }
192    }
193
194    /// Returns the number of active entries.
195    #[inline]
196    #[must_use]
197    pub const fn len(&self) -> usize {
198        self.active_count
199    }
200
201    /// Returns true if cache is empty.
202    #[inline]
203    #[must_use]
204    pub const fn is_empty(&self) -> bool {
205        self.active_count == 0
206    }
207
208    /// Returns true if cache is full.
209    #[inline]
210    #[must_use]
211    pub const fn is_full(&self) -> bool {
212        self.active_count >= PROOF_CACHE_MAX_ENTRIES
213    }
214
215    /// Inserts a new proof into the cache (O(1) amortized).
216    ///
217    /// Uses open addressing with linear probing.
218    #[must_use]
219    pub fn insert(
220        &mut self,
221        mutation_hash: [u8; 32],
222        nonce: u64,
223        proof_id: u32,
224        current_time_ns: u64,
225    ) -> Result<(), CacheError> {
226        // Evict expired entries periodically
227        if self.active_count > HASH_BUCKETS / 2 {
228            self.evict_expired(current_time_ns);
229        }
230
231        if self.active_count >= PROOF_CACHE_MAX_ENTRIES {
232            return Err(CacheError::CacheFull);
233        }
234
235        let start_idx = hash_key(&mutation_hash, nonce);
236
237        // Linear probing
238        for offset in 0..HASH_BUCKETS {
239            let idx = (start_idx + offset) & BUCKET_MASK;
240            let entry = &mut self.entries[idx];
241
242            // Check for duplicate
243            if entry.is_valid() && !entry.is_consumed() && entry.matches(&mutation_hash, nonce) {
244                return Err(CacheError::DuplicateEntry);
245            }
246
247            // Find empty or expired slot
248            if !entry.is_valid() || entry.is_consumed() || entry.is_expired(current_time_ns) {
249                let was_active = entry.is_valid() && !entry.is_consumed();
250                *entry = OptimizedProofEntry::new(mutation_hash, nonce, proof_id, current_time_ns);
251
252                if !was_active {
253                    self.active_count += 1;
254                }
255
256                return Ok(());
257            }
258        }
259
260        Err(CacheError::CacheFull)
261    }
262
263    /// Verifies and consumes a proof (O(1) amortized, <100ns target).
264    ///
265    /// This is the hot path - optimized for:
266    /// - Single hash computation
267    /// - Minimal cache line accesses
268    /// - Single atomic write for consumption
269    #[must_use]
270    pub fn verify_and_consume(
271        &mut self,
272        mutation_hash: &[u8; 32],
273        nonce: u64,
274        current_time_ns: u64,
275    ) -> Result<u32, CacheError> {
276        let start_idx = hash_key(mutation_hash, nonce);
277
278        // Linear probing
279        for offset in 0..HASH_BUCKETS {
280            let idx = (start_idx + offset) & BUCKET_MASK;
281            let entry = &mut self.entries[idx];
282
283            if !entry.is_valid() {
284                // Empty slot means key doesn't exist
285                // (assuming no deletions, which is true for consumed entries)
286                continue;
287            }
288
289            if entry.matches(mutation_hash, nonce) {
290                // Found matching entry
291                if entry.is_consumed() {
292                    entry.invalidate();
293                    self.active_count = self.active_count.saturating_sub(1);
294                    return Err(CacheError::NonceConsumed);
295                }
296
297                if entry.is_expired(current_time_ns) {
298                    entry.invalidate();
299                    self.active_count = self.active_count.saturating_sub(1);
300                    return Err(CacheError::Expired);
301                }
302
303                // Success - consume and return
304                let proof_id = entry.proof_id;
305                entry.invalidate();
306                self.active_count = self.active_count.saturating_sub(1);
307
308                return Ok(proof_id);
309            }
310        }
311
312        Err(CacheError::NotFound)
313    }
314
315    /// Checks if a proof exists without consuming it.
316    #[must_use]
317    pub fn exists(&self, mutation_hash: &[u8; 32], nonce: u64, current_time_ns: u64) -> bool {
318        let start_idx = hash_key(mutation_hash, nonce);
319
320        for offset in 0..HASH_BUCKETS {
321            let idx = (start_idx + offset) & BUCKET_MASK;
322            let entry = &self.entries[idx];
323
324            if !entry.is_valid() {
325                continue;
326            }
327
328            if entry.matches(mutation_hash, nonce) {
329                return !entry.is_consumed() && !entry.is_expired(current_time_ns);
330            }
331        }
332
333        false
334    }
335
336    /// Evicts all expired entries.
337    pub fn evict_expired(&mut self, current_time_ns: u64) {
338        for entry in &mut self.entries {
339            if entry.is_valid() && (entry.is_expired(current_time_ns) || entry.is_consumed()) {
340                entry.invalidate();
341                self.active_count = self.active_count.saturating_sub(1);
342            }
343        }
344    }
345
346    /// Clears all entries.
347    pub fn clear(&mut self) {
348        for entry in &mut self.entries {
349            entry.invalidate();
350        }
351        self.active_count = 0;
352        self.generation += 1;
353    }
354
355    /// Returns the current generation counter.
356    #[inline]
357    #[must_use]
358    pub const fn generation(&self) -> u64 {
359        self.generation
360    }
361}
362
363impl Default for OptimizedProofCache {
364    fn default() -> Self {
365        Self::new()
366    }
367}
368
369#[cfg(test)]
370mod tests {
371    use super::*;
372
373    #[test]
374    fn test_entry_size() {
375        assert_eq!(core::mem::size_of::<OptimizedProofEntry>(), 64);
376        assert_eq!(core::mem::align_of::<OptimizedProofEntry>(), 64);
377    }
378
379    #[test]
380    fn test_insert_and_verify() {
381        let mut cache = OptimizedProofCache::new();
382
383        let hash = [42u8; 32];
384        let nonce = 12345u64;
385        let proof_id = 1u32;
386        let time = 1_000_000u64;
387
388        cache.insert(hash, nonce, proof_id, time).unwrap();
389        assert_eq!(cache.len(), 1);
390
391        assert!(cache.exists(&hash, nonce, time));
392
393        let result = cache.verify_and_consume(&hash, nonce, time);
394        assert_eq!(result, Ok(proof_id));
395        assert_eq!(cache.len(), 0);
396
397        // Second verification should fail
398        let result = cache.verify_and_consume(&hash, nonce, time);
399        assert_eq!(result, Err(CacheError::NotFound));
400    }
401
402    #[test]
403    fn test_ttl_expiry() {
404        let mut cache = OptimizedProofCache::new();
405
406        let hash = [1u8; 32];
407        let nonce = 1u64;
408        let insert_time = 1_000_000u64;
409
410        cache.insert(hash, nonce, 1, insert_time).unwrap();
411
412        // Within TTL
413        assert!(cache.exists(&hash, nonce, insert_time + 50_000_000));
414
415        // After TTL
416        assert!(!cache.exists(&hash, nonce, insert_time + PROOF_CACHE_TTL_NS + 1));
417
418        let result = cache.verify_and_consume(&hash, nonce, insert_time + PROOF_CACHE_TTL_NS + 1);
419        assert_eq!(result, Err(CacheError::Expired));
420    }
421
422    #[test]
423    fn test_duplicate_rejection() {
424        let mut cache = OptimizedProofCache::new();
425
426        let hash = [5u8; 32];
427        let nonce = 100u64;
428
429        cache.insert(hash, nonce, 1, 0).unwrap();
430
431        let result = cache.insert(hash, nonce, 2, 0);
432        assert_eq!(result, Err(CacheError::DuplicateEntry));
433    }
434
435    #[test]
436    fn test_hash_distribution() {
437        // Verify hash function distributes keys reasonably well
438        let mut buckets_used = [false; HASH_BUCKETS];
439
440        // Use more varied inputs for better distribution testing
441        for i in 0..HASH_BUCKETS * 2 {
442            let mut hash = [0u8; 32];
443            // Spread entropy across multiple bytes
444            hash[0] = (i & 0xFF) as u8;
445            hash[1] = ((i >> 8) & 0xFF) as u8;
446            hash[2] = (i.wrapping_mul(17)) as u8;
447            let bucket = hash_key(&hash, i as u64);
448            buckets_used[bucket] = true;
449        }
450
451        let used_count = buckets_used.iter().filter(|&&x| x).count();
452        // Should use at least 50% of buckets with varied inputs
453        // (relaxed from 75% since FNV with small variations may cluster)
454        assert!(
455            used_count >= HASH_BUCKETS / 2,
456            "Hash distribution too clustered: {} of {} buckets used",
457            used_count,
458            HASH_BUCKETS
459        );
460    }
461
462    #[test]
463    fn test_collision_handling() {
464        let mut cache = OptimizedProofCache::new();
465
466        // Insert multiple entries that may hash to same bucket
467        for i in 0..32 {
468            let mut hash = [0u8; 32];
469            hash[0] = i as u8;
470            cache.insert(hash, i as u64, i as u32, 0).unwrap();
471        }
472
473        assert_eq!(cache.len(), 32);
474
475        // Verify all can be found
476        for i in 0..32 {
477            let mut hash = [0u8; 32];
478            hash[0] = i as u8;
479            assert!(cache.exists(&hash, i as u64, 0));
480        }
481    }
482
483    #[test]
484    fn test_evict_expired() {
485        let mut cache = OptimizedProofCache::new();
486
487        for i in 0..10 {
488            let mut hash = [0u8; 32];
489            hash[0] = i as u8;
490            cache.insert(hash, i as u64, i as u32, 0).unwrap();
491        }
492
493        assert_eq!(cache.len(), 10);
494
495        cache.evict_expired(PROOF_CACHE_TTL_NS + 1);
496        assert_eq!(cache.len(), 0);
497    }
498}