zipora 3.1.5

High-performance Rust implementation providing advanced data structures and compression algorithms with memory safety guarantees. Features LRU page cache, sophisticated caching layer, fiber-based concurrency, real-time compression, secure memory pools, SIMD optimizations, and complete C FFI for migration from C++.
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
//! Cache-aware blob store implementation

use super::BlobStore;
use crate::RecordId;
use crate::cache::{LruPageCache, CacheBuffer, PageCacheConfig, FileId};
use crate::error::Result;
use std::sync::Arc;
//use std::io::{Read, Seek, SeekFrom};

/// Cache write strategy
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum CacheWriteStrategy {
    /// Write-through: write to store and cache simultaneously
    WriteThrough,
    /// Write-back: write to cache first, defer store writes
    WriteBack,
    /// Write-around: write to store only, bypass cache
    WriteAround,
}

/// Cache-aware blob store wrapper
pub struct CachedBlobStore<T> {
    /// Underlying blob store
    inner: T,
    
    /// Page cache for performance
    cache: Arc<LruPageCache>,
    
    /// File ID for cache operations
    file_id: FileId,
    
    /// Cache statistics
    cache_enabled: bool,
    
    /// Write strategy for cache operations
    write_strategy: CacheWriteStrategy,
    
    /// Cache of blob metadata (offset, size) for better integration
    blob_metadata: std::sync::Mutex<std::collections::HashMap<RecordId, (u64, usize)>>,
    
    /// Next offset for new blobs (simple allocation strategy)
    next_offset: std::sync::atomic::AtomicU64,
}

impl<T: BlobStore> CachedBlobStore<T> {
    /// Create new cached blob store with write-through strategy
    pub fn new(inner: T, cache_config: PageCacheConfig) -> Result<Self> {
        Self::with_write_strategy(inner, cache_config, CacheWriteStrategy::WriteThrough)
    }
    
    /// Create new cached blob store with specified write strategy
    pub fn with_write_strategy(inner: T, cache_config: PageCacheConfig, strategy: CacheWriteStrategy) -> Result<Self> {
        let cache = Arc::new(LruPageCache::new(cache_config)?);
        
        // Register with cache (use dummy file descriptor for now)
        let file_id = cache.register_file(-1)?;
        
        Ok(Self {
            inner,
            cache,
            file_id,
            cache_enabled: true,
            write_strategy: strategy,
            blob_metadata: std::sync::Mutex::new(std::collections::HashMap::new()),
            next_offset: std::sync::atomic::AtomicU64::new(0),
        })
    }
    
    /// Create with existing cache
    pub fn with_cache(inner: T, cache: Arc<LruPageCache>) -> Result<Self> {
        Self::with_cache_and_strategy(inner, cache, CacheWriteStrategy::WriteThrough)
    }
    
    /// Create with existing cache and write strategy
    pub fn with_cache_and_strategy(inner: T, cache: Arc<LruPageCache>, strategy: CacheWriteStrategy) -> Result<Self> {
        let file_id = cache.register_file(-1)?;
        
        Ok(Self {
            inner,
            cache,
            file_id,
            cache_enabled: true,
            write_strategy: strategy,
            blob_metadata: std::sync::Mutex::new(std::collections::HashMap::new()),
            next_offset: std::sync::atomic::AtomicU64::new(0),
        })
    }
    
    /// Disable caching for this store
    pub fn disable_cache(&mut self) {
        self.cache_enabled = false;
    }
    
    /// Enable caching for this store
    pub fn enable_cache(&mut self) {
        self.cache_enabled = true;
    }
    
    /// Set write strategy
    pub fn set_write_strategy(&mut self, strategy: CacheWriteStrategy) {
        self.write_strategy = strategy;
    }
    
    /// Get current write strategy
    pub fn write_strategy(&self) -> CacheWriteStrategy {
        self.write_strategy
    }
    
    /// Get cache statistics
    pub fn cache_stats(&self) -> crate::cache::CacheStatsSnapshot {
        self.cache.stats()
    }
    
    /// Get underlying blob store
    pub fn inner(&self) -> &T {
        &self.inner
    }
    
    /// Get mutable reference to underlying store
    pub fn inner_mut(&mut self) -> &mut T {
        &mut self.inner
    }
    
    /// Prefetch data for better cache performance
    pub fn prefetch_range(&self, start_offset: u64, length: usize) -> Result<()> {
        if self.cache_enabled {
            self.cache.prefetch(self.file_id, start_offset, length)?;
        }
        Ok(())
    }
    
    /// Read with cache awareness
    fn read_cached(&self, offset: u64, length: usize) -> Result<CacheBuffer> {
        if self.cache_enabled {
            self.cache.read(self.file_id, offset, length)
        } else {
            // Fallback to direct read
            let data = vec![0u8; length];
            // TODO: This would need actual file I/O integration
            Ok(CacheBuffer::from_data(data))
        }
    }
    
    /// Write data to cache and/or store based on strategy
    fn write_cached(&mut self, id: RecordId, data: &[u8]) -> Result<()> {
        let offset = self.next_offset.fetch_add(data.len() as u64, std::sync::atomic::Ordering::Relaxed);
        
        match self.write_strategy {
            CacheWriteStrategy::WriteThrough => {
                // Write to both store and cache
                self.inner.put(data)?;
                if self.cache_enabled {
                    // Cache the written data
                    self.cache_data_at_offset(offset, data)?;
                }
            }
            CacheWriteStrategy::WriteBack => {
                // Write to cache first, defer store write
                if self.cache_enabled {
                    self.cache_data_at_offset(offset, data)?;
                    // Mark as dirty (would need dirty tracking in real implementation)
                }
                // In a real implementation, store write would be deferred
                self.inner.put(data)?;
            }
            CacheWriteStrategy::WriteAround => {
                // Write to store only, bypass cache
                self.inner.put(data)?;
                // Don't cache the data
            }
        }
        
        // Update metadata
        {
            let mut metadata = self.blob_metadata.lock()
                .map_err(|_| crate::error::ZiporaError::invalid_data("Metadata lock poisoned".to_string()))?;
            metadata.insert(id, (offset, data.len()));
        }
        
        Ok(())
    }
    
    /// Cache data at specific offset (helper method)
    fn cache_data_at_offset(&self, offset: u64, data: &[u8]) -> Result<()> {
        // Mark affected pages as dirty since we're writing new data
        let start_page = crate::cache::FileManager::offset_to_page_id(offset);
        let end_offset = offset + data.len() as u64;
        let end_page = crate::cache::FileManager::offset_to_page_id(end_offset.saturating_sub(1));
        
        for page_id in start_page..=end_page {
            self.cache.mark_dirty(self.file_id, page_id)?;
        }
        
        // In a real implementation, we'd write the data to the cache pages
        // For now, this marks the operation as successful
        Ok(())
    }
    
    /// Invalidate cached data for a blob
    fn invalidate_cached_blob(&self, id: RecordId) -> Result<()> {
        if let Some((offset, size)) = self.get_blob_metadata(id)? {
            self.invalidate_range(offset, size)?;
        }
        Ok(())
    }
    
    /// Invalidate cache range
    fn invalidate_range(&self, offset: u64, size: usize) -> Result<()> {
        if !self.cache_enabled {
            return Ok(());
        }
        
        // Use the cache's built-in invalidation functionality
        self.cache.invalidate_range(self.file_id, offset, size)?;
        
        Ok(())
    }
    
    /// Flush dirty pages to ensure data persistence
    pub fn flush(&self) -> Result<()> {
        if self.cache_enabled {
            self.cache.flush_file(self.file_id)?;
        }
        Ok(())
    }
    
    /// Get cache invalidation statistics
    pub fn invalidation_stats(&self) -> Result<(usize, usize)> {
        // Return (invalidated_count, dirty_count) - simplified implementation
        // In a real implementation, we'd get this from the cache
        Ok((0, 0))
    }
    
    /// Get blob metadata (offset and size)
    fn get_blob_metadata(&self, id: RecordId) -> Result<Option<(u64, usize)>> {
        let metadata = self.blob_metadata.lock()
            .map_err(|_| crate::error::ZiporaError::invalid_data("Metadata lock poisoned".to_string()))?;
        Ok(metadata.get(&id).copied())
    }
}

impl<T: BlobStore> BlobStore for CachedBlobStore<T> {
    fn size(&self, id: RecordId) -> Result<Option<usize>> {
        self.inner.size(id)
    }
    fn put(&mut self, data: &[u8]) -> Result<RecordId> {
        let offset = self.next_offset.fetch_add(data.len() as u64, std::sync::atomic::Ordering::Relaxed);
        
        // Write to underlying store first to get the actual ID
        let id = self.inner.put(data)?;
        
        // Apply caching strategy
        match self.write_strategy {
            CacheWriteStrategy::WriteThrough => {
                // Data is already written to store, now cache it
                if self.cache_enabled {
                    self.cache_data_at_offset(offset, data)?;
                }
            }
            CacheWriteStrategy::WriteBack => {
                // Data is already written to store, now cache it
                if self.cache_enabled {
                    self.cache_data_at_offset(offset, data)?;
                }
            }
            CacheWriteStrategy::WriteAround => {
                // Data is already written to store, no caching needed
            }
        }
        
        // Update metadata
        {
            let mut metadata = self.blob_metadata.lock()
                .map_err(|_| crate::error::ZiporaError::invalid_data("Metadata lock poisoned".to_string()))?;
            metadata.insert(id, (offset, data.len()));
        }
        
        Ok(id)
    }
    
    fn get(&self, id: RecordId) -> Result<Vec<u8>> {
        if !self.cache_enabled {
            return self.inner.get(id);
        }
        
        // Try to get from cache using stored metadata
        if let Some((offset, size)) = self.get_blob_metadata(id)? {
            match self.read_cached(offset, size) {
                Ok(buffer) => {
                    // If we got data from cache, use it
                    if buffer.has_data() {
                        return Ok(buffer.data().to_vec());
                    }
                }
                Err(_) => {
                    // Cache error, continue to fallback
                }
            }
        }
        
        // Cache miss or no metadata, fall back to underlying store
        let data = self.inner.get(id)?;
        
        // For write-back strategy, cache the read data
        if self.cache_enabled && matches!(self.write_strategy, CacheWriteStrategy::WriteBack) {
            // Would cache the data here in a real implementation
        }
        
        Ok(data)
    }
    
    fn remove(&mut self, id: RecordId) -> Result<()> {
        // Invalidate cache first
        self.invalidate_cached_blob(id)?;
        
        // Remove from underlying store
        self.inner.remove(id)?;
        
        // Remove metadata
        {
            let mut metadata = self.blob_metadata.lock()
                .map_err(|_| crate::error::ZiporaError::invalid_data("Metadata lock poisoned".to_string()))?;
            metadata.remove(&id);
        }
        
        Ok(())
    }
    
    fn contains(&self, id: RecordId) -> bool {
        self.inner.contains(id)
    }
    
    fn len(&self) -> usize {
        self.inner.len()
    }
    
    fn is_empty(&self) -> bool {
        self.inner.is_empty()
    }
}
/// Cache statistics for blob store operations
#[derive(Debug, Clone)]
pub struct BlobCacheStats {
    /// Total blob reads
    pub total_reads: u64,
    
    /// Cache hits for blob operations
    pub cache_hits: u64,
    
    /// Cache misses for blob operations
    pub cache_misses: u64,
    
    /// Total bytes read through cache
    pub bytes_cached: u64,
    
    /// Total bytes read directly
    pub bytes_direct: u64,
    
    /// Cache hit ratio
    pub hit_ratio: f64,
}

impl BlobCacheStats {
    pub fn new() -> Self {
        Self {
            total_reads: 0,
            cache_hits: 0,
            cache_misses: 0,
            bytes_cached: 0,
            bytes_direct: 0,
            hit_ratio: 0.0,
        }
    }
    
    pub fn record_hit(&mut self, bytes: usize) {
        self.total_reads += 1;
        self.cache_hits += 1;
        self.bytes_cached += bytes as u64;
        self.update_hit_ratio();
    }
    
    pub fn record_miss(&mut self, bytes: usize) {
        self.total_reads += 1;
        self.cache_misses += 1;
        self.bytes_direct += bytes as u64;
        self.update_hit_ratio();
    }
    
    fn update_hit_ratio(&mut self) {
        if self.total_reads > 0 {
            self.hit_ratio = self.cache_hits as f64 / self.total_reads as f64;
        }
    }
    
    pub fn bytes_saved(&self) -> u64 {
        self.bytes_cached
    }
    
    pub fn efficiency_ratio(&self) -> f64 {
        let total_bytes = self.bytes_cached + self.bytes_direct;
        if total_bytes > 0 {
            self.bytes_cached as f64 / total_bytes as f64
        } else {
            0.0
        }
    }
}

impl Default for BlobCacheStats {
    fn default() -> Self {
        Self::new()
    }
}

#[cfg(test)]
mod tests {
    use super::*;
    use crate::blob_store::MemoryBlobStore;
    use crate::cache::PageCacheConfig;
    
    #[test]
    fn test_cached_blob_store_creation() {
        let inner = MemoryBlobStore::new();
        let config = PageCacheConfig::balanced();
        
        let cached_store = CachedBlobStore::new(inner, config);
        assert!(cached_store.is_ok());
    }
    
    #[test]
    fn test_cache_disable_enable() {
        let inner = MemoryBlobStore::new();
        let config = PageCacheConfig::balanced();
        let mut cached_store = CachedBlobStore::new(inner, config).unwrap();
        
        // Should start enabled
        assert!(cached_store.cache_enabled);
        
        cached_store.disable_cache();
        assert!(!cached_store.cache_enabled);
        
        cached_store.enable_cache();
        assert!(cached_store.cache_enabled);
    }
    
    #[test]
    fn test_write_strategies() {
        // Test write-through strategy
        let inner1 = MemoryBlobStore::new();
        let config1 = PageCacheConfig::balanced();
        let cached_store = CachedBlobStore::with_write_strategy(
            inner1, config1, CacheWriteStrategy::WriteThrough
        ).unwrap();
        assert_eq!(cached_store.write_strategy(), CacheWriteStrategy::WriteThrough);
        
        // Test write-back strategy
        let inner2 = MemoryBlobStore::new();
        let config2 = PageCacheConfig::balanced();
        let cached_store = CachedBlobStore::with_write_strategy(
            inner2, config2, CacheWriteStrategy::WriteBack
        ).unwrap();
        assert_eq!(cached_store.write_strategy(), CacheWriteStrategy::WriteBack);
        
        // Test write-around strategy
        let inner3 = MemoryBlobStore::new();
        let config3 = PageCacheConfig::balanced();
        let cached_store = CachedBlobStore::with_write_strategy(
            inner3, config3, CacheWriteStrategy::WriteAround
        ).unwrap();
        assert_eq!(cached_store.write_strategy(), CacheWriteStrategy::WriteAround);
    }
    
    #[test]
    fn test_write_strategy_modification() {
        let inner = MemoryBlobStore::new();
        let config = PageCacheConfig::balanced();
        let mut cached_store = CachedBlobStore::new(inner, config).unwrap();
        
        // Should start with write-through
        assert_eq!(cached_store.write_strategy(), CacheWriteStrategy::WriteThrough);
        
        // Change to write-back
        cached_store.set_write_strategy(CacheWriteStrategy::WriteBack);
        assert_eq!(cached_store.write_strategy(), CacheWriteStrategy::WriteBack);
        
        // Change to write-around
        cached_store.set_write_strategy(CacheWriteStrategy::WriteAround);
        assert_eq!(cached_store.write_strategy(), CacheWriteStrategy::WriteAround);
    }
    
    #[test]
    fn test_blob_cache_stats() {
        let mut stats = BlobCacheStats::new();
        
        assert_eq!(stats.total_reads, 0);
        assert_eq!(stats.hit_ratio, 0.0);
        
        stats.record_hit(1024);
        assert_eq!(stats.total_reads, 1);
        assert_eq!(stats.cache_hits, 1);
        assert_eq!(stats.hit_ratio, 1.0);
        
        stats.record_miss(512);
        assert_eq!(stats.total_reads, 2);
        assert_eq!(stats.cache_misses, 1);
        assert_eq!(stats.hit_ratio, 0.5);
    }
    
    #[test]
    fn test_basic_blob_operations() {
        let inner = MemoryBlobStore::new();
        let config = PageCacheConfig::memory_optimized();
        let mut cached_store = CachedBlobStore::new(inner, config).unwrap();
        
        // Test basic blob store operations
        let data = b"Hello, cached world!";
        let id = cached_store.put(data).unwrap();
        
        assert!(cached_store.contains(id));
        assert_eq!(cached_store.len(), 1);
        assert!(!cached_store.is_empty());
        
        let retrieved = cached_store.get(id).unwrap();
        assert_eq!(retrieved, data);
        
        cached_store.remove(id).unwrap();
        assert!(!cached_store.contains(id));
        assert_eq!(cached_store.len(), 0);
    }
    
    #[test]
    fn test_write_through_operations() {
        let inner = MemoryBlobStore::new();
        let config = PageCacheConfig::performance_optimized();
        let mut cached_store = CachedBlobStore::with_write_strategy(
            inner, config, CacheWriteStrategy::WriteThrough
        ).unwrap();
        
        let data1 = b"Write-through data 1";
        let data2 = b"Write-through data 2";
        
        let id1 = cached_store.put(data1).unwrap();
        let id2 = cached_store.put(data2).unwrap();
        
        // Both should be available immediately
        assert_eq!(cached_store.get(id1).unwrap(), data1);
        assert_eq!(cached_store.get(id2).unwrap(), data2);
        
        // Test metadata is stored
        assert!(cached_store.get_blob_metadata(id1).unwrap().is_some());
        assert!(cached_store.get_blob_metadata(id2).unwrap().is_some());
    }
    
    #[test]
    fn test_write_back_operations() {
        let inner = MemoryBlobStore::new();
        let config = PageCacheConfig::performance_optimized();
        let mut cached_store = CachedBlobStore::with_write_strategy(
            inner, config, CacheWriteStrategy::WriteBack
        ).unwrap();
        
        let data = b"Write-back test data";
        let id = cached_store.put(data).unwrap();
        
        // Should be available for read
        assert_eq!(cached_store.get(id).unwrap(), data);
        
        // Test that metadata is properly maintained
        assert!(cached_store.get_blob_metadata(id).unwrap().is_some());
    }
    
    #[test]
    fn test_cache_invalidation() {
        let inner = MemoryBlobStore::new();
        let config = PageCacheConfig::balanced();
        let mut cached_store = CachedBlobStore::new(inner, config).unwrap();
        
        let data1 = b"Data to be invalidated";
        let data2 = b"Replacement data";
        
        // Store initial data
        let id = cached_store.put(data1).unwrap();
        assert_eq!(cached_store.get(id).unwrap(), data1);
        
        // Test invalidation by removing and re-adding
        cached_store.remove(id).unwrap();
        
        // Should not contain the removed item
        assert!(!cached_store.contains(id));
        
        // Test flush functionality
        let id2 = cached_store.put(data2).unwrap();
        assert!(cached_store.flush().is_ok());
    }
    
    #[test]
    fn test_invalidation_stats() {
        let inner = MemoryBlobStore::new();
        let config = PageCacheConfig::memory_optimized();
        let cached_store = CachedBlobStore::new(inner, config).unwrap();
        
        // Test that invalidation stats can be retrieved
        let stats = cached_store.invalidation_stats().unwrap();
        assert_eq!(stats, (0, 0)); // Should start with no invalidations or dirty pages
    }
    
    #[test]
    fn test_prefetch_functionality() {
        let inner = MemoryBlobStore::new();
        let config = PageCacheConfig::performance_optimized();
        let cached_store = CachedBlobStore::new(inner, config).unwrap();
        
        // Test prefetch range - should not fail
        assert!(cached_store.prefetch_range(0, 4096).is_ok());
        assert!(cached_store.prefetch_range(4096, 8192).is_ok());
    }
}