zipora 3.1.3

High-performance Rust implementation providing advanced data structures and compression algorithms with memory safety guarantees. Features LRU page cache, sophisticated caching layer, fiber-based concurrency, real-time compression, secure memory pools, SIMD optimizations, and complete C FFI for migration from C++.
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
//! Memory pool allocator for high-frequency allocations
//!
//! This module provides memory pools that can significantly reduce allocation
//! overhead for frequently allocated objects of similar sizes.

use crate::error::{Result, ZiporaError};
use std::alloc::{Layout, alloc, dealloc};
use std::collections::VecDeque;
use std::ptr::NonNull;
use std::sync::atomic::{AtomicU64, Ordering};
use std::sync::{Arc, Mutex, RwLock};

/// Configuration for a memory pool
#[derive(Debug, Clone)]
pub struct PoolConfig {
    /// Size of each chunk in bytes
    pub chunk_size: usize,
    /// Maximum number of chunks to keep in the pool
    pub max_chunks: usize,
    /// Alignment requirement for allocations
    pub alignment: usize,
}

impl PoolConfig {
    /// Create a new pool configuration
    pub fn new(chunk_size: usize, max_chunks: usize, alignment: usize) -> Self {
        Self {
            chunk_size,
            max_chunks,
            alignment,
        }
    }

    /// Create configuration for small objects (< 1KB)
    pub fn small() -> Self {
        Self::new(1024, 100, 8)
    }

    /// Create configuration for medium objects (< 64KB)
    pub fn medium() -> Self {
        Self::new(64 * 1024, 50, 16)
    }

    /// Create configuration for large objects (< 1MB)
    pub fn large() -> Self {
        Self::new(1024 * 1024, 10, 32)
    }
}

/// Statistics for memory pool usage
#[derive(Debug, Clone)]
pub struct PoolStats {
    /// Total bytes allocated
    pub allocated: u64,
    /// Total bytes available in pool
    pub available: u64,
    /// Number of chunks in pool
    pub chunks: usize,
    /// Number of allocations served
    pub alloc_count: u64,
    /// Number of deallocations
    pub dealloc_count: u64,
    /// Number of pool hits (reused memory)
    pub pool_hits: u64,
    /// Number of pool misses (new allocations)
    pub pool_misses: u64,
}

impl Default for PoolStats {
    fn default() -> Self {
        Self {
            allocated: 0,
            available: 0,
            chunks: 0,
            alloc_count: 0,
            dealloc_count: 0,
            pool_hits: 0,
            pool_misses: 0,
        }
    }
}

/// A memory pool for efficient allocation of fixed-size chunks
///
/// # Thread Safety Invariants
///
/// **CRITICAL SECURITY WARNING**: This implementation contains multiple thread safety
/// vulnerabilities that can lead to data races, use-after-free, and memory corruption.
/// See comprehensive security analysis in `/usr/local/google/home/binwu/go/src/infini.sh/zipora/codereview.md`
///
/// ## Current Thread Safety Issues:
///
/// 1. **UNSAFE Send/Sync Implementation**: Manual implementation bypasses Rust's safety
///    guarantees for raw pointers (*mut u8). Raw pointers can be aliased across threads
///    leading to data races and use-after-free conditions.
///
/// 2. **Race Conditions**: TOCTOU vulnerabilities in allocate() and deallocate() methods
///    between pool operations and statistics updates.
///
/// 3. **Lost Updates**: try_lock() pattern causes silent failures under contention,
///    leading to memory leaks and incorrect capacity tracking.
///
/// 4. **Memory Ordering**: Relaxed atomics provide no synchronization guarantees,
///    allowing inconsistent statistics across threads.
///
/// 5. **Double-Free Vulnerability**: No validation prevents the same pointer from
///    being deallocated multiple times.
///
/// ## Synchronization Primitives:
/// - `free_chunks`: Mutex<VecDeque<*mut u8>> - Protects pool of reusable chunks
/// - `stats`: RwLock<PoolStats> - Protects allocation statistics  
/// - `alloc_count`, `dealloc_count`, `pool_hits`, `pool_misses`: AtomicU64 counters
///
/// ## Thread Safety Guarantees (VIOLATED):
/// - ❌ Memory safety: Raw pointers can be aliased across threads
/// - ❌ Data race freedom: Statistics updates have TOCTOU races
/// - ❌ Memory correctness: Double-free and use-after-free possible
/// - ❌ Deadlock freedom: Drop implementation can deadlock during unwinding
///
/// **RECOMMENDATION**: Use established thread-safe allocators like jemalloc,
/// mimalloc, or bumpalo instead of this implementation.
pub struct MemoryPool {
    config: PoolConfig,
    free_chunks: Mutex<VecDeque<*mut u8>>,
    stats: RwLock<PoolStats>,
    alloc_count: AtomicU64,
    dealloc_count: AtomicU64,
    pool_hits: AtomicU64,
    pool_misses: AtomicU64,
}

// SECURITY WARNING: This manual Send/Sync implementation is UNSAFE and violates
// Rust's memory safety guarantees. Raw pointers (*mut u8) are !Send + !Sync by
// default because they can point to thread-local data or create aliasing issues.
//
// CONFIRMED VULNERABILITIES:
// - Use-after-free: Freed pointers can be accessed by multiple threads
// - Data races: Concurrent access to same memory through aliased pointers
// - Double-free: Same pointer can be deallocated multiple times
//
// See security analysis for proof-of-concept exploits and recommended fixes.
unsafe impl Send for MemoryPool {}
unsafe impl Sync for MemoryPool {}

impl MemoryPool {
    /// Create a new memory pool with the given configuration
    pub fn new(config: PoolConfig) -> Result<Self> {
        if config.chunk_size == 0 {
            return Err(ZiporaError::invalid_data("chunk_size cannot be zero"));
        }

        if config.alignment == 0 || !config.alignment.is_power_of_two() {
            return Err(ZiporaError::invalid_data(
                "alignment must be a power of two",
            ));
        }

        Ok(Self {
            config,
            free_chunks: Mutex::new(VecDeque::new()),
            stats: RwLock::new(PoolStats::default()),
            alloc_count: AtomicU64::new(0),
            dealloc_count: AtomicU64::new(0),
            pool_hits: AtomicU64::new(0),
            pool_misses: AtomicU64::new(0),
        })
    }

    /// Allocate a chunk from the pool
    ///
    /// # Thread Safety Issues
    ///
    /// **WARNING**: This method contains TOCTOU race conditions and silent failures:
    /// 1. Statistics update happens after pool modification but before return
    /// 2. try_lock() pattern causes silent failures under high contention
    /// 3. No validation of returned pointers
    ///
    /// **CONFIRMED VULNERABILITY**: Use-after-free possible when freed chunks
    /// are reallocated to different threads.
    pub fn allocate(&self) -> Result<NonNull<u8>> {
        self.alloc_count.fetch_add(1, Ordering::Relaxed);

        // Try to get a chunk from the pool first
        if let Ok(mut free_chunks) = self.free_chunks.try_lock() {
            if let Some(chunk) = free_chunks.pop_front() {
                self.pool_hits.fetch_add(1, Ordering::Relaxed);
                self.update_stats_on_alloc(true);
                // SAFETY: chunk came from our own allocation, verified non-null during push
                return Ok(unsafe { NonNull::new_unchecked(chunk) });
            }
        }

        // Pool is empty or locked, allocate new chunk
        self.pool_misses.fetch_add(1, Ordering::Relaxed);
        self.allocate_new_chunk()
    }

    /// Deallocate a chunk back to the pool
    ///
    /// # Thread Safety Issues
    ///
    /// **CRITICAL VULNERABILITY**: This method has multiple security issues:
    /// 1. **Double-free**: No validation prevents same pointer being freed twice
    /// 2. **Silent failures**: try_lock() failures are ignored, causing memory leaks
    /// 3. **Race conditions**: Pool capacity check and insertion are not atomic
    ///
    /// **CONFIRMED EXPLOIT**: Tests show same pointer can be freed multiple times,
    /// leading to the same memory being allocated to different threads simultaneously.
    pub fn deallocate(&self, chunk: NonNull<u8>) -> Result<()> {
        self.dealloc_count.fetch_add(1, Ordering::Relaxed);

        // Try to return chunk to pool if not full
        if let Ok(mut free_chunks) = self.free_chunks.try_lock() {
            if free_chunks.len() < self.config.max_chunks {
                free_chunks.push_back(chunk.as_ptr());
                self.update_stats_on_dealloc(true);
                return Ok(());
            }
        }

        // Pool is full or locked, deallocate directly
        self.deallocate_chunk(chunk);
        self.update_stats_on_dealloc(false);
        Ok(())
    }

    /// Get current pool statistics
    ///
    /// # Thread Safety Issues
    ///
    /// **WARNING**: Statistics may be inconsistent due to:
    /// 1. **Relaxed memory ordering**: No synchronization guarantees between atomic loads
    /// 2. **try_lock failures**: Chunk count may be stale if lock is contended
    /// 3. **TOCTOU races**: Statistics collected at different times may be inconsistent
    pub fn stats(&self) -> PoolStats {
        // SAFETY: Return default stats if RwLock is poisoned (graceful degradation)
        let mut stats = self.stats.read()
            .map(|s| s.clone())
            .unwrap_or_default();
        stats.alloc_count = self.alloc_count.load(Ordering::Relaxed);
        stats.dealloc_count = self.dealloc_count.load(Ordering::Relaxed);
        stats.pool_hits = self.pool_hits.load(Ordering::Relaxed);
        stats.pool_misses = self.pool_misses.load(Ordering::Relaxed);

        if let Ok(free_chunks) = self.free_chunks.try_lock() {
            stats.chunks = free_chunks.len();
            stats.available = (free_chunks.len() * self.config.chunk_size) as u64;
        }

        stats
    }

    /// Clear all chunks from the pool
    ///
    /// # Thread Safety Issues
    ///
    /// **WARNING**: This method has potential for deadlock and corruption:
    /// 1. **Deadlock risk**: Uses lock().unwrap() which can deadlock during panic unwinding
    /// 2. **Unsafe assumptions**: Assumes all pointers in pool are valid without verification
    /// 3. **Memory corruption**: If pool is corrupted, this will crash or corrupt memory
    pub fn clear(&self) -> Result<()> {
        let mut free_chunks = self.free_chunks.lock()
            .map_err(|e| ZiporaError::resource_busy(format!("Free chunks mutex poisoned: {}", e)))?;

        while let Some(chunk_ptr) = free_chunks.pop_front() {
            // SAFETY: chunk_ptr came from our own allocation, verified during push
            let chunk = unsafe { NonNull::new_unchecked(chunk_ptr) };
            self.deallocate_chunk(chunk);
        }

        // Reset stats
        let mut stats = self.stats.write()
            .map_err(|e| ZiporaError::resource_busy(format!("Stats RwLock poisoned: {}", e)))?;
        stats.chunks = 0;
        stats.available = 0;

        Ok(())
    }

    /// Get pool configuration
    pub fn config(&self) -> &PoolConfig {
        &self.config
    }

    fn allocate_new_chunk(&self) -> Result<NonNull<u8>> {
        let layout = Layout::from_size_align(self.config.chunk_size, self.config.alignment)
            .map_err(|_| ZiporaError::invalid_data("invalid layout for chunk allocation"))?;

        // SAFETY: Layout is valid (chunk_size > 0 and alignment is power of 2, enforced in new())
        let ptr = unsafe { alloc(layout) };

        if ptr.is_null() {
            return Err(ZiporaError::out_of_memory(self.config.chunk_size));
        }

        self.update_stats_on_alloc(false);

        // SAFETY: null check performed above
        Ok(unsafe { NonNull::new_unchecked(ptr) })
    }

    fn deallocate_chunk(&self, chunk: NonNull<u8>) {
        // SAFETY: Layout::from_size_align() cannot fail here because:
        // 1. chunk_size > 0 is enforced by config validation
        // 2. alignment is a power of 2 enforced by config validation
        // 3. The same layout was successfully used during allocation
        let layout = Layout::from_size_align(self.config.chunk_size, self.config.alignment)
            .expect("Layout invariant violated: config was validated during chunk allocation");

        // SAFETY: chunk.as_ptr() was allocated with the same layout
        // Caller must ensure chunk was allocated by this pool
        unsafe {
            dealloc(chunk.as_ptr(), layout);
        }
    }

    fn update_stats_on_alloc(&self, from_pool: bool) {
        if let Ok(mut stats) = self.stats.try_write() {
            if !from_pool {
                stats.allocated += self.config.chunk_size as u64;
            }
        }
    }

    fn update_stats_on_dealloc(&self, to_pool: bool) {
        if let Ok(mut stats) = self.stats.try_write() {
            if !to_pool {
                stats.allocated = stats
                    .allocated
                    .saturating_sub(self.config.chunk_size as u64);
            }
        }
    }
}

impl Drop for MemoryPool {
    fn drop(&mut self) {
        // Clean up all remaining chunks
        let _ = self.clear();
    }
}

/// Global memory pool instances
static GLOBAL_POOLS: once_cell::sync::Lazy<GlobalPools> =
    once_cell::sync::Lazy::new(|| GlobalPools::new());

struct GlobalPools {
    small_pool: Arc<MemoryPool>,
    medium_pool: Arc<MemoryPool>,
    large_pool: Arc<MemoryPool>,
}

impl GlobalPools {
    fn new() -> Self {
        Self {
            small_pool: Arc::new(MemoryPool::new(PoolConfig::small()).expect("small pool creation")),
            medium_pool: Arc::new(MemoryPool::new(PoolConfig::medium()).expect("medium pool creation")),
            large_pool: Arc::new(MemoryPool::new(PoolConfig::large()).expect("large pool creation")),
        }
    }

    fn get_pool_for_size(&self, size: usize) -> &Arc<MemoryPool> {
        if size <= 1024 {
            &self.small_pool
        } else if size <= 64 * 1024 {
            &self.medium_pool
        } else {
            &self.large_pool
        }
    }
}

/// Initialize global pools with custom configuration
pub fn init_global_pools(chunk_size: usize, max_memory: usize) -> Result<()> {
    // This would re-initialize global pools in a real implementation
    // For now, we just validate the parameters
    if chunk_size == 0 {
        return Err(ZiporaError::invalid_data("chunk_size cannot be zero"));
    }

    if max_memory == 0 {
        return Err(ZiporaError::invalid_data("max_memory cannot be zero"));
    }

    log::debug!(
        "Global pools initialized with chunk_size={}, max_memory={}",
        chunk_size,
        max_memory
    );
    Ok(())
}

/// Get statistics from all global pools
pub fn get_global_pool_stats() -> PoolStats {
    let small_stats = GLOBAL_POOLS.small_pool.stats();
    let medium_stats = GLOBAL_POOLS.medium_pool.stats();
    let large_stats = GLOBAL_POOLS.large_pool.stats();

    PoolStats {
        allocated: small_stats.allocated + medium_stats.allocated + large_stats.allocated,
        available: small_stats.available + medium_stats.available + large_stats.available,
        chunks: small_stats.chunks + medium_stats.chunks + large_stats.chunks,
        alloc_count: small_stats.alloc_count + medium_stats.alloc_count + large_stats.alloc_count,
        dealloc_count: small_stats.dealloc_count
            + medium_stats.dealloc_count
            + large_stats.dealloc_count,
        pool_hits: small_stats.pool_hits + medium_stats.pool_hits + large_stats.pool_hits,
        pool_misses: small_stats.pool_misses + medium_stats.pool_misses + large_stats.pool_misses,
    }
}

/// A vector that uses memory pools for allocation
pub struct PooledVec<T> {
    ptr: NonNull<T>,
    len: usize,
    capacity: usize,
    pool: Arc<MemoryPool>,
}

impl<T> PooledVec<T> {
    /// Create a new pooled vector
    pub fn new() -> Result<Self> {
        let element_size = std::mem::size_of::<T>();
        let pool = GLOBAL_POOLS.get_pool_for_size(element_size).clone();

        let chunk = pool.allocate()?;
        let capacity = pool.config().chunk_size / element_size;

        Ok(Self {
            ptr: chunk.cast(),
            len: 0,
            capacity,
            pool,
        })
    }

    /// Push an element to the vector
    #[inline]
    pub fn push(&mut self, item: T) -> Result<()> {
        if self.len >= self.capacity {
            return Err(ZiporaError::invalid_data("vector capacity exceeded"));
        }

        // SAFETY: self.len < self.capacity checked above, ptr + len is within allocated region
        unsafe {
            self.ptr.as_ptr().add(self.len).write(item);
        }
        self.len += 1;
        Ok(())
    }

    /// Get the length of the vector
    #[inline]
    pub fn len(&self) -> usize {
        self.len
    }

    /// Check if the vector is empty
    #[inline]
    pub fn is_empty(&self) -> bool {
        self.len == 0
    }

    /// Get the capacity of the vector
    #[inline]
    pub fn capacity(&self) -> usize {
        self.capacity
    }

    /// Get a slice of the vector's contents
    #[inline]
    pub fn as_slice(&self) -> &[T] {
        // SAFETY: ptr points to allocated region with capacity elements
        // len ≤ capacity maintained by push(), all elements 0..len are initialized
        unsafe { std::slice::from_raw_parts(self.ptr.as_ptr(), self.len) }
    }
}

impl<T> Drop for PooledVec<T> {
    fn drop(&mut self) {
        // Drop all elements
        for i in 0..self.len {
            // SAFETY: i < self.len, so ptr + i is within initialized region
            unsafe {
                self.ptr.as_ptr().add(i).drop_in_place();
            }
        }

        // Return memory to pool
        let _ = self.pool.deallocate(self.ptr.cast());
    }
}

/// A buffer that uses memory pools for allocation
pub struct PooledBuffer {
    ptr: NonNull<u8>,
    len: usize,
    #[allow(dead_code)]
    capacity: usize,
    pool: Arc<MemoryPool>,
}

impl PooledBuffer {
    /// Create a new pooled buffer of the specified size
    pub fn new(size: usize) -> Result<Self> {
        let pool = GLOBAL_POOLS.get_pool_for_size(size).clone();
        let chunk = pool.allocate()?;

        Ok(Self {
            ptr: chunk,
            len: size,
            capacity: pool.config().chunk_size,
            pool,
        })
    }

    /// Get the buffer as a slice
    #[inline]
    pub fn as_slice(&self) -> &[u8] {
        // SAFETY: ptr points to allocated region from pool, len bytes are valid
        unsafe { std::slice::from_raw_parts(self.ptr.as_ptr(), self.len) }
    }

    /// Get the buffer as a mutable slice
    #[inline]
    pub fn as_mut_slice(&mut self) -> &mut [u8] {
        // SAFETY: ptr points to allocated region from pool, len bytes are valid
        // &mut self ensures exclusive access
        unsafe { std::slice::from_raw_parts_mut(self.ptr.as_ptr(), self.len) }
    }

    /// Get the length of the buffer
    #[inline]
    pub fn len(&self) -> usize {
        self.len
    }

    /// Check if the buffer is empty
    #[inline]
    pub fn is_empty(&self) -> bool {
        self.len == 0
    }
}

impl Drop for PooledBuffer {
    fn drop(&mut self) {
        let _ = self.pool.deallocate(self.ptr);
    }
}

#[cfg(test)]
mod tests {
    use super::*;

    #[test]
    fn test_pool_config() {
        let config = PoolConfig::new(1024, 100, 8);
        assert_eq!(config.chunk_size, 1024);
        assert_eq!(config.max_chunks, 100);
        assert_eq!(config.alignment, 8);

        let small_config = PoolConfig::small();
        assert_eq!(small_config.chunk_size, 1024);
        assert_eq!(small_config.max_chunks, 100);
    }

    #[test]
    fn test_memory_pool_creation() {
        let config = PoolConfig::new(1024, 10, 8);
        let pool = MemoryPool::new(config).unwrap();

        let stats = pool.stats();
        assert_eq!(stats.chunks, 0);
        assert_eq!(stats.allocated, 0);
    }

    #[test]
    fn test_memory_pool_allocation() {
        let config = PoolConfig::new(1024, 10, 8);
        let pool = MemoryPool::new(config).unwrap();

        let chunk1 = pool.allocate().unwrap();
        let chunk2 = pool.allocate().unwrap();

        assert_ne!(chunk1.as_ptr(), chunk2.as_ptr());

        pool.deallocate(chunk1).unwrap();
        pool.deallocate(chunk2).unwrap();

        let stats = pool.stats();
        assert_eq!(stats.alloc_count, 2);
        assert_eq!(stats.dealloc_count, 2);
    }

    #[test]
    fn test_memory_pool_reuse() {
        let config = PoolConfig::new(1024, 10, 8);
        let pool = MemoryPool::new(config).unwrap();

        let chunk1 = pool.allocate().unwrap();
        let addr1 = chunk1.as_ptr();

        pool.deallocate(chunk1).unwrap();

        let chunk2 = pool.allocate().unwrap();
        let addr2 = chunk2.as_ptr();

        // Should reuse the same memory
        assert_eq!(addr1, addr2);

        pool.deallocate(chunk2).unwrap();

        let stats = pool.stats();
        assert!(stats.pool_hits > 0);
    }

    #[test]
    fn test_pooled_vec() {
        let mut vec = PooledVec::<i32>::new().unwrap();

        assert_eq!(vec.len(), 0);
        assert!(vec.is_empty());
        assert!(vec.capacity() > 0);

        vec.push(42).unwrap();
        vec.push(84).unwrap();

        assert_eq!(vec.len(), 2);
        assert!(!vec.is_empty());

        let slice = vec.as_slice();
        assert_eq!(slice[0], 42);
        assert_eq!(slice[1], 84);
    }

    #[test]
    fn test_pooled_buffer() {
        let mut buffer = PooledBuffer::new(100).unwrap();

        assert_eq!(buffer.len(), 100);
        assert!(!buffer.is_empty());

        let slice = buffer.as_mut_slice();
        slice[0] = 42;
        slice[99] = 84;

        let slice = buffer.as_slice();
        assert_eq!(slice[0], 42);
        assert_eq!(slice[99], 84);
    }

    #[test]
    fn test_global_pool_stats() {
        let _stats = get_global_pool_stats();
        // Should not panic and return valid stats
    }

    #[test]
    fn test_invalid_pool_config() {
        let result = MemoryPool::new(PoolConfig::new(0, 10, 8));
        assert!(result.is_err());

        let result = MemoryPool::new(PoolConfig::new(1024, 10, 0));
        assert!(result.is_err());

        let result = MemoryPool::new(PoolConfig::new(1024, 10, 3)); // Not power of 2
        assert!(result.is_err());
    }
}