velesdb_core/
perf_optimizations.rs

1//! Performance optimizations module for ultra-fast vector operations.
2//!
3//! This module provides:
4//! - **Contiguous vector storage**: Cache-friendly memory layout
5//! - **Prefetch hints**: CPU cache warming for HNSW traversal
6//! - **Batch distance computation**: SIMD-optimized batch operations
7//!
8//! # Performance Targets
9//!
10//! - Bulk import: 50K+ vectors/sec at 768D
11//! - Search latency: < 1ms for 1M vectors
12//! - Memory efficiency: 50% reduction with FP16
13
14use std::alloc::{alloc, dealloc, Layout};
15use std::fmt;
16use std::ptr;
17
18// =============================================================================
19// Contiguous Vector Storage (Cache-Optimized)
20// =============================================================================
21
22/// Contiguous memory layout for vectors (cache-friendly).
23///
24/// Stores all vectors in a single contiguous buffer to maximize
25/// cache locality and enable SIMD prefetching.
26///
27/// # Memory Layout
28///
29/// ```text
30/// [v0_d0, v0_d1, ..., v0_dn, v1_d0, v1_d1, ..., v1_dn, ...]
31/// ```
32pub struct ContiguousVectors {
33    /// Raw contiguous data buffer
34    data: *mut f32,
35    /// Vector dimension
36    dimension: usize,
37    /// Number of vectors stored
38    count: usize,
39    /// Allocated capacity (number of vectors)
40    capacity: usize,
41}
42
43// SAFETY: ContiguousVectors owns its data and doesn't share mutable access
44unsafe impl Send for ContiguousVectors {}
45unsafe impl Sync for ContiguousVectors {}
46
47impl fmt::Debug for ContiguousVectors {
48    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
49        f.debug_struct("ContiguousVectors")
50            .field("dimension", &self.dimension)
51            .field("count", &self.count)
52            .field("capacity", &self.capacity)
53            .finish_non_exhaustive()
54    }
55}
56
57impl ContiguousVectors {
58    /// Creates a new `ContiguousVectors` with the given dimension and initial capacity.
59    ///
60    /// # Arguments
61    ///
62    /// * `dimension` - Vector dimension
63    /// * `capacity` - Initial capacity (number of vectors)
64    ///
65    /// # Panics
66    ///
67    /// Panics if dimension is 0 or allocation fails.
68    #[must_use]
69    #[allow(clippy::cast_ptr_alignment)] // Layout is 64-byte aligned
70    pub fn new(dimension: usize, capacity: usize) -> Self {
71        assert!(dimension > 0, "Dimension must be > 0");
72
73        let capacity = capacity.max(16); // Minimum 16 vectors
74        let layout = Self::layout(dimension, capacity);
75
76        // SAFETY: Layout is valid (non-zero, aligned)
77        let data = unsafe { alloc(layout).cast::<f32>() };
78
79        assert!(!data.is_null(), "Failed to allocate ContiguousVectors");
80
81        Self {
82            data,
83            dimension,
84            count: 0,
85            capacity,
86        }
87    }
88
89    /// Returns the memory layout for the given dimension and capacity.
90    fn layout(dimension: usize, capacity: usize) -> Layout {
91        let size = dimension * capacity * std::mem::size_of::<f32>();
92        let align = 64; // Cache line alignment for optimal prefetch
93        Layout::from_size_align(size.max(64), align).expect("Invalid layout")
94    }
95
96    /// Returns the dimension of stored vectors.
97    #[inline]
98    #[must_use]
99    pub const fn dimension(&self) -> usize {
100        self.dimension
101    }
102
103    /// Returns the number of vectors stored.
104    #[inline]
105    #[must_use]
106    pub const fn len(&self) -> usize {
107        self.count
108    }
109
110    /// Returns true if no vectors are stored.
111    #[inline]
112    #[must_use]
113    pub const fn is_empty(&self) -> bool {
114        self.count == 0
115    }
116
117    /// Returns the capacity (max vectors before reallocation).
118    #[inline]
119    #[must_use]
120    pub const fn capacity(&self) -> usize {
121        self.capacity
122    }
123
124    /// Returns total memory usage in bytes.
125    #[inline]
126    #[must_use]
127    pub const fn memory_bytes(&self) -> usize {
128        self.capacity * self.dimension * std::mem::size_of::<f32>()
129    }
130
131    /// Ensures the storage has capacity for at least `required_capacity` vectors.
132    pub fn ensure_capacity(&mut self, required_capacity: usize) {
133        if required_capacity > self.capacity {
134            let new_capacity = required_capacity.max(self.capacity * 2);
135            self.resize(new_capacity);
136        }
137    }
138
139    /// Inserts a vector at a specific index.
140    ///
141    /// Automatically grows capacity if needed.
142    /// Note: This allows sparse population. Uninitialized slots contain undefined data (or 0.0 if alloc gave zeroed memory).
143    ///
144    /// # Panics
145    ///
146    /// Panics if vector dimension doesn't match.
147    pub fn insert_at(&mut self, index: usize, vector: &[f32]) {
148        assert_eq!(
149            vector.len(),
150            self.dimension,
151            "Vector dimension mismatch: expected {}, got {}",
152            self.dimension,
153            vector.len()
154        );
155
156        self.ensure_capacity(index + 1);
157
158        let offset = index * self.dimension;
159        // SAFETY: We ensured capacity covers index
160        unsafe {
161            ptr::copy_nonoverlapping(vector.as_ptr(), self.data.add(offset), self.dimension);
162        }
163
164        // Update count if we're extending the "used" range
165        if index >= self.count {
166            self.count = index + 1;
167        }
168    }
169
170    /// Adds a vector to the storage.
171    ///
172    /// # Panics
173    ///
174    /// Panics if vector dimension doesn't match.
175    pub fn push(&mut self, vector: &[f32]) {
176        self.insert_at(self.count, vector);
177    }
178
179    /// Adds multiple vectors in batch (optimized).
180    ///
181    /// # Arguments
182    ///
183    /// * `vectors` - Iterator of vectors to add
184    ///
185    /// # Returns
186    ///
187    /// Number of vectors added.
188    pub fn push_batch<'a>(&mut self, vectors: impl Iterator<Item = &'a [f32]>) -> usize {
189        let mut added = 0;
190        for vector in vectors {
191            self.push(vector);
192            added += 1;
193        }
194        added
195    }
196
197    /// Gets a vector by index.
198    ///
199    /// # Returns
200    ///
201    /// Slice to the vector data, or `None` if index is out of bounds.
202    #[inline]
203    #[must_use]
204    pub fn get(&self, index: usize) -> Option<&[f32]> {
205        if index >= self.count {
206            // Note: In sparse mode, index < count doesn't guarantee it was initialized,
207            // but for HNSW dense IDs it typically does.
208            return None;
209        }
210
211        let offset = index * self.dimension;
212        // SAFETY: Index is within bounds (checked against count, which is <= capacity)
213        Some(unsafe { std::slice::from_raw_parts(self.data.add(offset), self.dimension) })
214    }
215
216    /// Gets a vector by index (unchecked).
217    ///
218    /// # Safety
219    ///
220    /// Caller must ensure `index < self.len()`.
221    ///
222    /// # Debug Assertions
223    ///
224    /// In debug builds, this function will panic if `index >= self.len()`.
225    /// This catches bugs early during development without impacting release performance.
226    #[inline]
227    #[must_use]
228    pub unsafe fn get_unchecked(&self, index: usize) -> &[f32] {
229        debug_assert!(
230            index < self.count,
231            "index out of bounds: index={index}, count={}",
232            self.count
233        );
234        let offset = index * self.dimension;
235        std::slice::from_raw_parts(self.data.add(offset), self.dimension)
236    }
237
238    /// Prefetches a vector for upcoming access.
239    ///
240    /// This hints the CPU to load the vector into L2 cache.
241    #[inline]
242    pub fn prefetch(&self, index: usize) {
243        if index < self.count {
244            let offset = index * self.dimension;
245            let ptr = unsafe { self.data.add(offset) };
246
247            #[cfg(target_arch = "x86_64")]
248            unsafe {
249                use std::arch::x86_64::_mm_prefetch;
250                // Prefetch for read, into L2 cache
251                _mm_prefetch(ptr.cast::<i8>(), std::arch::x86_64::_MM_HINT_T1);
252            }
253
254            // aarch64 prefetch requires nightly (stdarch_aarch64_prefetch)
255            // For now, we skip prefetch on ARM64 until the feature is stabilized
256            #[cfg(not(target_arch = "x86_64"))]
257            let _ = ptr;
258        }
259    }
260
261    /// Prefetches multiple vectors for batch processing.
262    #[inline]
263    pub fn prefetch_batch(&self, indices: &[usize]) {
264        for &idx in indices {
265            self.prefetch(idx);
266        }
267    }
268
269    /// Resizes the internal buffer.
270    ///
271    /// # P2 Audit + PERF-002: Panic-Safety with RAII Guard
272    ///
273    /// This function uses `AllocGuard` for panic-safe allocation:
274    /// 1. New buffer is allocated via RAII guard (auto-freed on panic)
275    /// 2. Data is copied to new buffer
276    /// 3. Guard ownership is transferred (no auto-free)
277    /// 4. Old buffer is deallocated
278    /// 5. State is updated atomically
279    ///
280    /// If panic occurs during copy, the guard ensures new buffer is freed.
281    #[allow(clippy::cast_ptr_alignment)] // Layout is 64-byte aligned
282    fn resize(&mut self, new_capacity: usize) {
283        use crate::alloc_guard::AllocGuard;
284
285        if new_capacity <= self.capacity {
286            return;
287        }
288
289        let old_layout = Self::layout(self.dimension, self.capacity);
290        let new_layout = Self::layout(self.dimension, new_capacity);
291
292        // Step 1: Allocate new buffer with RAII guard (PERF-002)
293        // If panic occurs before into_raw(), memory is automatically freed
294        let guard = AllocGuard::new(new_layout).unwrap_or_else(|| {
295            panic!(
296                "Failed to allocate {} bytes for ContiguousVectors resize",
297                new_layout.size()
298            )
299        });
300
301        let new_data: *mut f32 = guard.cast();
302
303        // Step 2: Copy existing data to new buffer
304        // If this panics, guard drops and frees new_data automatically
305        let copy_count = self.count;
306        if copy_count > 0 {
307            let copy_size = copy_count * self.dimension;
308            // SAFETY: Both pointers are valid, non-overlapping, and properly aligned
309            unsafe {
310                ptr::copy_nonoverlapping(self.data, new_data, copy_size);
311            }
312        }
313
314        // Step 3: Transfer ownership - guard won't free on drop anymore
315        let _ = guard.into_raw();
316
317        // Step 4: Deallocate old buffer
318        // SAFETY: self.data was allocated with old_layout
319        unsafe {
320            dealloc(self.data.cast::<u8>(), old_layout);
321        }
322
323        // Step 5: Update state (all-or-nothing)
324        self.data = new_data;
325        self.capacity = new_capacity;
326    }
327
328    /// Computes dot product with another vector using SIMD.
329    #[inline]
330    #[must_use]
331    pub fn dot_product(&self, index: usize, query: &[f32]) -> Option<f32> {
332        let vector = self.get(index)?;
333        Some(crate::simd_avx512::dot_product_auto(vector, query))
334    }
335
336    /// Prefetch distance for cache warming.
337    const PREFETCH_DISTANCE: usize = 4;
338
339    /// Computes batch dot products with a query vector.
340    ///
341    /// This is optimized for HNSW search with prefetching.
342    #[must_use]
343    pub fn batch_dot_products(&self, indices: &[usize], query: &[f32]) -> Vec<f32> {
344        let mut results = Vec::with_capacity(indices.len());
345
346        for (i, &idx) in indices.iter().enumerate() {
347            // Prefetch upcoming vectors
348            if i + Self::PREFETCH_DISTANCE < indices.len() {
349                self.prefetch(indices[i + Self::PREFETCH_DISTANCE]);
350            }
351
352            if let Some(score) = self.dot_product(idx, query) {
353                results.push(score);
354            }
355        }
356
357        results
358    }
359}
360
361impl Drop for ContiguousVectors {
362    fn drop(&mut self) {
363        if !self.data.is_null() {
364            let layout = Self::layout(self.dimension, self.capacity);
365            // SAFETY: data was allocated with this layout
366            unsafe {
367                dealloc(self.data.cast::<u8>(), layout);
368            }
369        }
370    }
371}
372
373// =============================================================================
374// Batch Distance Computation
375// =============================================================================
376
377/// Computes multiple dot products in a single pass (cache-optimized).
378///
379/// Uses prefetching and SIMD for maximum throughput.
380#[must_use]
381pub fn batch_dot_products_simd(vectors: &[&[f32]], query: &[f32]) -> Vec<f32> {
382    vectors
383        .iter()
384        .map(|v| crate::simd_avx512::dot_product_auto(v, query))
385        .collect()
386}
387
388/// Computes multiple cosine similarities in a single pass.
389#[must_use]
390pub fn batch_cosine_similarities(vectors: &[&[f32]], query: &[f32]) -> Vec<f32> {
391    vectors
392        .iter()
393        .map(|v| crate::simd_avx512::cosine_similarity_auto(v, query))
394        .collect()
395}
396
397// =============================================================================
398// Tests (TDD - Tests First!)
399// =============================================================================
400
401#[cfg(test)]
402#[allow(clippy::cast_precision_loss)]
403mod tests {
404    use super::*;
405
406    const EPSILON: f32 = 1e-5;
407
408    // =========================================================================
409    // ContiguousVectors Tests
410    // =========================================================================
411
412    #[test]
413    fn test_contiguous_vectors_new() {
414        let cv = ContiguousVectors::new(768, 100);
415        assert_eq!(cv.dimension(), 768);
416        assert_eq!(cv.len(), 0);
417        assert!(cv.is_empty());
418        assert!(cv.capacity() >= 100);
419    }
420
421    #[test]
422    fn test_contiguous_vectors_push() {
423        let mut cv = ContiguousVectors::new(3, 10);
424        let v1 = vec![1.0, 2.0, 3.0];
425        let v2 = vec![4.0, 5.0, 6.0];
426
427        cv.push(&v1);
428        assert_eq!(cv.len(), 1);
429
430        cv.push(&v2);
431        assert_eq!(cv.len(), 2);
432
433        let retrieved = cv.get(0).unwrap();
434        assert_eq!(retrieved, &v1[..]);
435
436        let retrieved = cv.get(1).unwrap();
437        assert_eq!(retrieved, &v2[..]);
438    }
439
440    #[test]
441    fn test_contiguous_vectors_push_batch() {
442        let mut cv = ContiguousVectors::new(128, 100);
443        let vectors: Vec<Vec<f32>> = (0..50)
444            .map(|i| (0..128).map(|j| (i * 128 + j) as f32).collect())
445            .collect();
446
447        let refs: Vec<&[f32]> = vectors.iter().map(Vec::as_slice).collect();
448        let added = cv.push_batch(refs.into_iter());
449
450        assert_eq!(added, 50);
451        assert_eq!(cv.len(), 50);
452    }
453
454    #[test]
455    fn test_contiguous_vectors_grow() {
456        let mut cv = ContiguousVectors::new(64, 16);
457        let vector: Vec<f32> = (0..64).map(|i| i as f32).collect();
458
459        // Push more than initial capacity
460        for _ in 0..50 {
461            cv.push(&vector);
462        }
463
464        assert_eq!(cv.len(), 50);
465        assert!(cv.capacity() >= 50);
466
467        // Verify data integrity
468        for i in 0..50 {
469            let retrieved = cv.get(i).unwrap();
470            assert_eq!(retrieved, &vector[..]);
471        }
472    }
473
474    #[test]
475    fn test_contiguous_vectors_get_out_of_bounds() {
476        let cv = ContiguousVectors::new(3, 10);
477        assert!(cv.get(0).is_none());
478        assert!(cv.get(100).is_none());
479    }
480
481    #[test]
482    #[should_panic(expected = "dimension mismatch")]
483    fn test_contiguous_vectors_dimension_mismatch() {
484        let mut cv = ContiguousVectors::new(3, 10);
485        cv.push(&[1.0, 2.0]); // Wrong dimension
486    }
487
488    #[test]
489    fn test_contiguous_vectors_memory_bytes() {
490        let cv = ContiguousVectors::new(768, 1000);
491        let expected = 1000 * 768 * 4; // capacity * dimension * sizeof(f32)
492        assert!(cv.memory_bytes() >= expected);
493    }
494
495    #[test]
496    fn test_contiguous_vectors_prefetch() {
497        let mut cv = ContiguousVectors::new(64, 100);
498        for i in 0..50 {
499            let v: Vec<f32> = (0..64).map(|j| (i * 64 + j) as f32).collect();
500            cv.push(&v);
501        }
502
503        // Should not panic
504        cv.prefetch(0);
505        cv.prefetch(25);
506        cv.prefetch(49);
507        cv.prefetch(100); // Out of bounds - should be no-op
508    }
509
510    #[test]
511    fn test_contiguous_vectors_dot_product() {
512        let mut cv = ContiguousVectors::new(3, 10);
513        cv.push(&[1.0, 0.0, 0.0]);
514        cv.push(&[0.0, 1.0, 0.0]);
515
516        let query = vec![1.0, 0.0, 0.0];
517
518        let dp0 = cv.dot_product(0, &query).unwrap();
519        assert!((dp0 - 1.0).abs() < EPSILON);
520
521        let dp1 = cv.dot_product(1, &query).unwrap();
522        assert!((dp1 - 0.0).abs() < EPSILON);
523    }
524
525    #[test]
526    fn test_contiguous_vectors_batch_dot_products() {
527        let mut cv = ContiguousVectors::new(64, 100);
528
529        // Add normalized vectors
530        for i in 0..50 {
531            let mut v: Vec<f32> = (0..64).map(|j| ((i + j) % 10) as f32).collect();
532            let norm: f32 = v.iter().map(|x| x * x).sum::<f32>().sqrt();
533            if norm > 0.0 {
534                for x in &mut v {
535                    *x /= norm;
536                }
537            }
538            cv.push(&v);
539        }
540
541        let query: Vec<f32> = (0..64).map(|i| i as f32 / 64.0).collect();
542        let indices: Vec<usize> = (0..50).collect();
543
544        let results = cv.batch_dot_products(&indices, &query);
545        assert_eq!(results.len(), 50);
546    }
547
548    // =========================================================================
549    // Batch Distance Tests
550    // =========================================================================
551
552    #[test]
553    fn test_batch_dot_products_simd() {
554        let v1 = vec![1.0, 0.0, 0.0];
555        let v2 = vec![0.0, 1.0, 0.0];
556        let v3 = vec![0.5, 0.5, 0.0];
557        let query = vec![1.0, 0.0, 0.0];
558
559        let vectors: Vec<&[f32]> = vec![&v1, &v2, &v3];
560        let results = batch_dot_products_simd(&vectors, &query);
561
562        assert_eq!(results.len(), 3);
563        assert!((results[0] - 1.0).abs() < EPSILON);
564        assert!((results[1] - 0.0).abs() < EPSILON);
565        assert!((results[2] - 0.5).abs() < EPSILON);
566    }
567
568    #[test]
569    fn test_batch_cosine_similarities() {
570        let v1 = vec![1.0, 0.0, 0.0];
571        let v2 = vec![0.0, 1.0, 0.0];
572        let query = vec![1.0, 0.0, 0.0];
573
574        let vectors: Vec<&[f32]> = vec![&v1, &v2];
575        let results = batch_cosine_similarities(&vectors, &query);
576
577        assert_eq!(results.len(), 2);
578        assert!((results[0] - 1.0).abs() < EPSILON); // Same direction
579        assert!((results[1] - 0.0).abs() < EPSILON); // Orthogonal
580    }
581
582    // =========================================================================
583    // Performance-Critical Tests
584    // =========================================================================
585
586    #[test]
587    fn test_contiguous_large_dimension() {
588        // Test with BERT-like dimensions (768D)
589        let mut cv = ContiguousVectors::new(768, 1000);
590
591        for i in 0..100 {
592            let v: Vec<f32> = (0..768).map(|j| ((i + j) % 100) as f32 / 100.0).collect();
593            cv.push(&v);
594        }
595
596        assert_eq!(cv.len(), 100);
597
598        // Verify random access works
599        let v50 = cv.get(50).unwrap();
600        assert_eq!(v50.len(), 768);
601    }
602
603    #[test]
604    fn test_contiguous_gpt4_dimension() {
605        // Test with GPT-4 dimensions (1536D)
606        let mut cv = ContiguousVectors::new(1536, 100);
607
608        for i in 0..20 {
609            let v: Vec<f32> = (0..1536).map(|j| ((i + j) % 100) as f32 / 100.0).collect();
610            cv.push(&v);
611        }
612
613        assert_eq!(cv.len(), 20);
614        assert_eq!(cv.dimension(), 1536);
615    }
616
617    // =========================================================================
618    // Safety: get_unchecked bounds check tests (TDD)
619    // =========================================================================
620
621    #[test]
622    fn test_get_unchecked_valid_index() {
623        // Arrange
624        let mut cv = ContiguousVectors::new(3, 10);
625        cv.push(&[1.0, 2.0, 3.0]);
626        cv.push(&[4.0, 5.0, 6.0]);
627
628        // Act - Valid indices should work
629        let v0 = unsafe { cv.get_unchecked(0) };
630        let v1 = unsafe { cv.get_unchecked(1) };
631
632        // Assert
633        assert_eq!(v0, &[1.0, 2.0, 3.0]);
634        assert_eq!(v1, &[4.0, 5.0, 6.0]);
635    }
636
637    #[test]
638    #[cfg(debug_assertions)]
639    #[should_panic(expected = "index out of bounds")]
640    fn test_get_unchecked_panics_on_invalid_index_in_debug() {
641        // Arrange
642        let mut cv = ContiguousVectors::new(3, 10);
643        cv.push(&[1.0, 2.0, 3.0]);
644
645        // Act - Out of bounds index should panic in debug mode
646        let _ = unsafe { cv.get_unchecked(5) };
647    }
648
649    #[test]
650    #[cfg(debug_assertions)]
651    #[should_panic(expected = "index out of bounds")]
652    fn test_get_unchecked_panics_on_boundary_index_in_debug() {
653        // Arrange
654        let mut cv = ContiguousVectors::new(3, 10);
655        cv.push(&[1.0, 2.0, 3.0]);
656        cv.push(&[4.0, 5.0, 6.0]);
657
658        // Act - Index == count should panic (off by one)
659        let _ = unsafe { cv.get_unchecked(2) };
660    }
661
662    // =========================================================================
663    // P2 Audit: Resize panic-safety tests
664    // =========================================================================
665
666    #[test]
667    fn test_resize_preserves_data_integrity() {
668        // Arrange
669        let mut cv = ContiguousVectors::new(64, 16);
670        let vectors: Vec<Vec<f32>> = (0..10)
671            .map(|i| (0..64).map(|j| (i * 64 + j) as f32).collect())
672            .collect();
673
674        for v in &vectors {
675            cv.push(v);
676        }
677
678        // Act - Force resize by adding more vectors
679        for i in 10..100 {
680            let v: Vec<f32> = (0..64).map(|j| (i * 64 + j) as f32).collect();
681            cv.push(&v);
682        }
683
684        // Assert - Original vectors should be intact
685        for (i, expected) in vectors.iter().enumerate() {
686            let actual = cv.get(i).expect("Vector should exist");
687            assert_eq!(
688                actual,
689                expected.as_slice(),
690                "Vector {i} corrupted after resize"
691            );
692        }
693    }
694
695    #[test]
696    fn test_resize_multiple_times() {
697        // Arrange - Start with minimal capacity
698        let mut cv = ContiguousVectors::new(128, 16);
699
700        // Act - Trigger multiple resizes
701        for i in 0..500 {
702            let v: Vec<f32> = (0..128).map(|j| (i * 128 + j) as f32).collect();
703            cv.push(&v);
704        }
705
706        // Assert
707        assert_eq!(cv.len(), 500);
708        assert!(cv.capacity() >= 500);
709
710        // Verify first and last vectors
711        let first = cv.get(0).unwrap();
712        assert!((first[0] - 0.0).abs() < f32::EPSILON);
713
714        let last = cv.get(499).unwrap();
715        #[allow(clippy::cast_precision_loss)]
716        let expected = (499 * 128) as f32;
717        assert!((last[0] - expected).abs() < f32::EPSILON);
718    }
719
720    #[test]
721    fn test_drop_after_resize_no_leak() {
722        // Arrange - Create and resize multiple times
723        for _ in 0..10 {
724            let mut cv = ContiguousVectors::new(256, 8);
725
726            // Trigger multiple resizes
727            for i in 0..100 {
728                let v: Vec<f32> = (0..256).map(|j| (i + j) as f32).collect();
729                cv.push(&v);
730            }
731
732            // cv is dropped here - should not leak memory
733        }
734
735        // If we get here without memory issues, the test passes
736        // Note: In a real scenario, use tools like valgrind or miri to verify
737    }
738
739    #[test]
740    fn test_ensure_capacity_idempotent() {
741        // Arrange
742        let mut cv = ContiguousVectors::new(64, 100);
743        cv.push(&vec![1.0; 64]);
744
745        let initial_capacity = cv.capacity();
746
747        // Act - Call ensure_capacity multiple times with same value
748        cv.ensure_capacity(50);
749        cv.ensure_capacity(50);
750        cv.ensure_capacity(50);
751
752        // Assert - Capacity should not change
753        assert_eq!(cv.capacity(), initial_capacity);
754        assert_eq!(cv.len(), 1);
755    }
756}