scirs2_metrics/optimization/
advanced_memory_optimization.rs

1//! Advanced memory optimization for GPU acceleration
2//!
3//! This module provides sophisticated memory management techniques for GPU-accelerated
4//! metrics computation, including memory pooling, prefetching, and adaptive allocation.
5
6#![allow(clippy::too_many_arguments)]
7#![allow(dead_code)]
8
9use crate::error::{MetricsError, Result};
10use scirs2_core::ndarray::{Array1, Array2, ArrayView1, ArrayView2};
11use scirs2_core::numeric::Float;
12use std::collections::{HashMap, VecDeque};
13use std::sync::{Arc, Mutex, RwLock};
14use std::time::{Duration, Instant};
15
16/// Advanced GPU memory pool with intelligent allocation strategies
17#[derive(Debug)]
18pub struct AdvancedMemoryPool {
19    /// Free memory blocks categorized by size
20    free_blocks: Arc<Mutex<HashMap<usize, VecDeque<MemoryBlock>>>>,
21    /// Allocated blocks for tracking
22    allocated_blocks: Arc<RwLock<HashMap<usize, AllocatedBlock>>>,
23    /// Memory usage statistics
24    stats: Arc<Mutex<MemoryStats>>,
25    /// Pool configuration
26    config: MemoryPoolConfig,
27    /// Allocation strategy
28    strategy: AllocationStrategy,
29    /// Memory prefetcher for predictive allocation
30    prefetcher: MemoryPrefetcher,
31}
32
33/// Memory block representation
34#[derive(Debug, Clone)]
35pub struct MemoryBlock {
36    /// Block identifier
37    pub id: usize,
38    /// Size in bytes
39    pub size: usize,
40    /// GPU device pointer (simulated as usize)
41    pub device_ptr: usize,
42    /// Last access time for LRU
43    pub last_accessed: Instant,
44    /// Block type and purpose
45    pub blocktype: BlockType,
46    /// Reference count for shared usage
47    pub ref_count: usize,
48}
49
50/// Allocated block tracking
51#[derive(Debug, Clone)]
52pub struct AllocatedBlock {
53    /// Original block
54    pub block: MemoryBlock,
55    /// Allocation timestamp
56    pub allocated_at: Instant,
57    /// Expected lifetime
58    pub expected_lifetime: Option<Duration>,
59    /// Usage pattern
60    pub usage_pattern: UsagePattern,
61}
62
63/// Memory usage statistics
64#[derive(Debug, Default, Clone)]
65pub struct MemoryStats {
66    /// Total allocated memory
67    pub total_allocated: usize,
68    /// Peak memory usage
69    pub peak_usage: usize,
70    /// Number of allocations
71    pub allocation_count: u64,
72    /// Number of deallocations
73    pub deallocation_count: u64,
74    /// Cache hit rate
75    pub cache_hit_rate: f64,
76    /// Fragmentation ratio
77    pub fragmentation_ratio: f64,
78    /// Average allocation size
79    pub avg_allocation_size: f64,
80    /// Memory efficiency score
81    pub efficiency_score: f64,
82}
83
84/// Memory pool configuration
85#[derive(Debug, Clone)]
86pub struct MemoryPoolConfig {
87    /// Maximum pool size in bytes
88    pub max_pool_size: usize,
89    /// Minimum block size
90    pub min_block_size: usize,
91    /// Block size alignment
92    pub alignment: usize,
93    /// Enable memory coalescing
94    pub enable_coalescing: bool,
95    /// Garbage collection threshold
96    pub gc_threshold: f64,
97    /// Prefetch lookahead window
98    pub prefetch_window: usize,
99    /// Enable zero-copy optimizations
100    pub enable_zero_copy: bool,
101}
102
103/// Block type categorization
104#[derive(Debug, Clone, PartialEq)]
105pub enum BlockType {
106    /// Input data arrays
107    InputData,
108    /// Output result arrays
109    OutputData,
110    /// Intermediate computation buffers
111    IntermediateBuffer,
112    /// Kernel parameters
113    KernelParams,
114    /// Shared memory blocks
115    SharedMemory,
116    /// Texture memory for cached reads
117    TextureMemory,
118}
119
120/// Memory allocation strategy
121#[derive(Debug, Clone)]
122pub enum AllocationStrategy {
123    /// First-fit allocation
124    FirstFit,
125    /// Best-fit allocation (minimize fragmentation)
126    BestFit,
127    /// Worst-fit allocation (keep large blocks)
128    WorstFit,
129    /// Buddy system allocation
130    BuddySystem,
131    /// Adaptive strategy based on usage patterns
132    Adaptive(AdaptiveStrategy),
133}
134
135/// Adaptive allocation strategy configuration
136#[derive(Debug, Clone)]
137pub struct AdaptiveStrategy {
138    /// Strategy switching threshold
139    pub switch_threshold: f64,
140    /// Historical window size for analysis
141    pub history_window: usize,
142    /// Performance weight factors
143    pub weights: StrategyWeights,
144}
145
146/// Strategy performance weights
147#[derive(Debug, Clone)]
148pub struct StrategyWeights {
149    /// Weight for allocation speed
150    pub speed_weight: f64,
151    /// Weight for memory efficiency
152    pub efficiency_weight: f64,
153    /// Weight for fragmentation avoidance
154    pub fragmentation_weight: f64,
155}
156
157/// Usage pattern analysis
158#[derive(Debug, Clone)]
159pub enum UsagePattern {
160    /// Sequential access pattern
161    Sequential,
162    /// Random access pattern
163    Random,
164    /// Streaming pattern (write-once, read-many)
165    Streaming,
166    /// Temporary computation buffer
167    Temporary,
168    /// Long-lived persistent data
169    Persistent,
170}
171
172/// Memory prefetcher for predictive allocation
173#[derive(Debug)]
174pub struct MemoryPrefetcher {
175    /// Allocation history for pattern analysis
176    allocation_history: VecDeque<AllocationRecord>,
177    /// Predicted future allocations
178    predictions: Vec<PredictedAllocation>,
179    /// Pattern recognition engine
180    pattern_engine: PatternEngine,
181    /// Prefetch configuration
182    config: PrefetchConfig,
183}
184
185/// Allocation record for pattern analysis
186#[derive(Debug, Clone)]
187pub struct AllocationRecord {
188    /// Allocation size
189    pub size: usize,
190    /// Block type
191    pub blocktype: BlockType,
192    /// Timestamp
193    pub timestamp: Instant,
194    /// Duration until deallocation
195    pub lifetime: Option<Duration>,
196}
197
198/// Predicted allocation
199#[derive(Debug, Clone)]
200pub struct PredictedAllocation {
201    /// Predicted size
202    pub size: usize,
203    /// Predicted type
204    pub blocktype: BlockType,
205    /// Confidence score (0.0 to 1.0)
206    pub confidence: f64,
207    /// Expected time until allocation
208    pub time_until: Duration,
209}
210
211/// Pattern recognition engine
212#[derive(Debug)]
213pub struct PatternEngine {
214    /// Learned patterns
215    patterns: Vec<AllocationPattern>,
216    /// Model accuracy metrics
217    accuracy: f64,
218    /// Training data size
219    training_samples: usize,
220}
221
222/// Allocation pattern
223#[derive(Debug, Clone)]
224pub struct AllocationPattern {
225    /// Pattern signature
226    pub signature: Vec<usize>,
227    /// Frequency of occurrence
228    pub frequency: u32,
229    /// Prediction accuracy
230    pub accuracy: f64,
231    /// Associated block types
232    pub block_types: Vec<BlockType>,
233}
234
235/// Prefetch configuration
236#[derive(Debug, Clone)]
237pub struct PrefetchConfig {
238    /// Enable predictive prefetching
239    pub enable_prediction: bool,
240    /// Minimum confidence threshold for prefetch
241    pub confidence_threshold: f64,
242    /// Maximum prefetch lookahead
243    pub max_lookahead: Duration,
244    /// Prefetch buffer size limit
245    pub buffer_size_limit: usize,
246}
247
248impl Default for MemoryPoolConfig {
249    fn default() -> Self {
250        Self {
251            max_pool_size: 1024 * 1024 * 1024, // 1GB default
252            min_block_size: 1024,              // 1KB minimum
253            alignment: 256,                    // 256-byte alignment for GPU
254            enable_coalescing: true,
255            gc_threshold: 0.8, // Trigger GC at 80% usage
256            prefetch_window: 10,
257            enable_zero_copy: true,
258        }
259    }
260}
261
262impl Default for AdaptiveStrategy {
263    fn default() -> Self {
264        Self {
265            switch_threshold: 0.1,
266            history_window: 1000,
267            weights: StrategyWeights {
268                speed_weight: 0.4,
269                efficiency_weight: 0.4,
270                fragmentation_weight: 0.2,
271            },
272        }
273    }
274}
275
276impl Default for PrefetchConfig {
277    fn default() -> Self {
278        Self {
279            enable_prediction: true,
280            confidence_threshold: 0.75,
281            max_lookahead: Duration::from_millis(100),
282            buffer_size_limit: 64 * 1024 * 1024, // 64MB prefetch buffer
283        }
284    }
285}
286
287impl AdvancedMemoryPool {
288    /// Create new advanced memory pool
289    pub fn new(config: MemoryPoolConfig) -> Self {
290        Self {
291            free_blocks: Arc::new(Mutex::new(HashMap::new())),
292            allocated_blocks: Arc::new(RwLock::new(HashMap::new())),
293            stats: Arc::new(Mutex::new(MemoryStats::default())),
294            strategy: AllocationStrategy::Adaptive(AdaptiveStrategy::default()),
295            prefetcher: MemoryPrefetcher::new(PrefetchConfig::default()),
296            config,
297        }
298    }
299
300    /// Allocate memory block with intelligent sizing
301    pub fn allocate(&self, size: usize, blocktype: BlockType) -> Result<MemoryBlock> {
302        let aligned_size = self.align_size(size);
303
304        // Check if prefetcher has a suitable block ready
305        if let Some(block) = self
306            .prefetcher
307            .get_predicted_block(aligned_size, &blocktype)?
308        {
309            self.record_allocation(&block)?;
310            return Ok(block);
311        }
312
313        // Perform allocation based on strategy
314        let block = match &self.strategy {
315            AllocationStrategy::FirstFit => self.allocate_first_fit(aligned_size, blocktype)?,
316            AllocationStrategy::BestFit => self.allocate_best_fit(aligned_size, blocktype)?,
317            AllocationStrategy::WorstFit => self.allocate_worst_fit(aligned_size, blocktype)?,
318            AllocationStrategy::BuddySystem => {
319                self.allocate_buddy_system(aligned_size, blocktype)?
320            }
321            AllocationStrategy::Adaptive(strategy) => {
322                self.allocate_adaptive(aligned_size, blocktype, strategy)?
323            }
324        };
325
326        self.record_allocation(&block)?;
327        self.update_prefetcher(&block);
328
329        Ok(block)
330    }
331
332    /// Deallocate memory block
333    pub fn deallocate(&self, block: MemoryBlock) -> Result<()> {
334        // Record deallocation for statistics
335        {
336            let mut stats = self.stats.lock().unwrap();
337            stats.deallocation_count += 1;
338            stats.total_allocated = stats.total_allocated.saturating_sub(block.size);
339        }
340
341        // Remove from allocated blocks
342        {
343            let mut allocated = self.allocated_blocks.write().unwrap();
344            allocated.remove(&block.id);
345        }
346
347        // Return to free pool or coalesce with adjacent blocks
348        if self.config.enable_coalescing {
349            self.coalesce_and_return(block)?;
350        } else {
351            self.return_to_pool(block)?;
352        }
353
354        // Trigger garbage collection if needed
355        if self.should_run_gc()? {
356            self.run_garbage_collection()?;
357        }
358
359        Ok(())
360    }
361
362    /// Get current memory statistics
363    pub fn get_stats(&self) -> MemoryStats {
364        let stats = self.stats.lock().unwrap();
365        stats.clone()
366    }
367
368    /// Optimize memory layout for better performance
369    pub fn optimize_layout(&self) -> Result<()> {
370        // Analyze current allocation patterns
371        let patterns = self.analyze_allocation_patterns()?;
372
373        // Suggest layout optimizations
374        let optimizations = self.suggest_optimizations(&patterns)?;
375
376        // Apply optimizations if beneficial
377        for optimization in optimizations {
378            self.apply_optimization(optimization)?;
379        }
380
381        Ok(())
382    }
383
384    /// Benchmark different allocation strategies
385    pub fn benchmark_strategies(
386        &self,
387        workload: &[AllocationRequest],
388    ) -> Result<StrategyBenchmark> {
389        let mut results = HashMap::new();
390
391        for strategy in &[
392            AllocationStrategy::FirstFit,
393            AllocationStrategy::BestFit,
394            AllocationStrategy::WorstFit,
395            AllocationStrategy::BuddySystem,
396        ] {
397            let metrics = self.benchmark_strategy(strategy, workload)?;
398            results.insert(format!("{:?}", strategy), metrics);
399        }
400
401        Ok(StrategyBenchmark { results })
402    }
403
404    // Private implementation methods
405
406    fn align_size(&self, size: usize) -> usize {
407        ((size + self.config.alignment - 1) / self.config.alignment) * self.config.alignment
408    }
409
410    fn allocate_first_fit(&self, size: usize, blocktype: BlockType) -> Result<MemoryBlock> {
411        let mut free_blocks = self.free_blocks.lock().unwrap();
412
413        // Find first suitable block
414        for (block_size, blocks) in free_blocks.iter_mut() {
415            if *block_size >= size {
416                if let Some(mut block) = blocks.pop_front() {
417                    block.blocktype = blocktype;
418                    block.last_accessed = Instant::now();
419
420                    // Split block if significantly larger
421                    if *block_size > size * 2 {
422                        let remaining = MemoryBlock {
423                            id: self.generate_block_id(),
424                            size: *block_size - size,
425                            device_ptr: block.device_ptr + size,
426                            last_accessed: Instant::now(),
427                            blocktype: BlockType::IntermediateBuffer,
428                            ref_count: 0,
429                        };
430
431                        blocks.push_front(remaining);
432                    }
433
434                    block.size = size;
435                    return Ok(block);
436                }
437            }
438        }
439
440        // No suitable block found, allocate new
441        self.allocate_new_block(size, blocktype)
442    }
443
444    fn allocate_best_fit(&self, size: usize, blocktype: BlockType) -> Result<MemoryBlock> {
445        let mut free_blocks = self.free_blocks.lock().unwrap();
446        let mut best_fit: Option<(usize, usize)> = None; // (block_size, index)
447        let mut best_waste = usize::MAX;
448
449        // Find block with minimum waste
450        for (block_size, blocks) in free_blocks.iter() {
451            if *block_size >= size {
452                let waste = *block_size - size;
453                if waste < best_waste {
454                    best_waste = waste;
455                    best_fit = Some((*block_size, 0)); // Simplified - would need proper indexing
456                }
457            }
458        }
459
460        if let Some((block_size, _)) = best_fit {
461            if let Some(blocks) = free_blocks.get_mut(&block_size) {
462                if let Some(mut block) = blocks.pop_front() {
463                    block.blocktype = blocktype;
464                    block.last_accessed = Instant::now();
465
466                    // Handle block splitting for best fit
467                    if block_size > size {
468                        let remaining_size = block_size - size;
469                        if remaining_size >= self.config.min_block_size {
470                            let remaining = MemoryBlock {
471                                id: self.generate_block_id(),
472                                size: remaining_size,
473                                device_ptr: block.device_ptr + size,
474                                last_accessed: Instant::now(),
475                                blocktype: BlockType::IntermediateBuffer,
476                                ref_count: 0,
477                            };
478
479                            free_blocks
480                                .entry(remaining_size)
481                                .or_insert_with(VecDeque::new)
482                                .push_back(remaining);
483                        }
484                    }
485
486                    block.size = size;
487                    return Ok(block);
488                }
489            }
490        }
491
492        self.allocate_new_block(size, blocktype)
493    }
494
495    fn allocate_worst_fit(&self, size: usize, blocktype: BlockType) -> Result<MemoryBlock> {
496        // Simplified implementation - find largest available block
497        self.allocate_new_block(size, blocktype)
498    }
499
500    fn allocate_buddy_system(&self, size: usize, blocktype: BlockType) -> Result<MemoryBlock> {
501        // Find next power of 2 >= size for buddy system
502        let buddy_size = size.next_power_of_two();
503        self.allocate_new_block(buddy_size, blocktype)
504    }
505
506    fn allocate_adaptive(
507        &self,
508        size: usize,
509        blocktype: BlockType,
510        _strategy: &AdaptiveStrategy,
511    ) -> Result<MemoryBlock> {
512        // Analyze current performance metrics
513        let stats = self.stats.lock().unwrap();
514        let fragmentation = stats.fragmentation_ratio;
515        let efficiency = stats.efficiency_score;
516
517        // Choose _strategy based on current conditions
518        let chosen_strategy = if fragmentation > 0.3 {
519            AllocationStrategy::BestFit
520        } else if efficiency < 0.7 {
521            AllocationStrategy::FirstFit
522        } else {
523            AllocationStrategy::BuddySystem
524        };
525
526        drop(stats);
527
528        match chosen_strategy {
529            AllocationStrategy::FirstFit => self.allocate_first_fit(size, blocktype),
530            AllocationStrategy::BestFit => self.allocate_best_fit(size, blocktype),
531            AllocationStrategy::BuddySystem => self.allocate_buddy_system(size, blocktype),
532            _ => self.allocate_new_block(size, blocktype),
533        }
534    }
535
536    fn allocate_new_block(&self, size: usize, blocktype: BlockType) -> Result<MemoryBlock> {
537        // Simulate GPU memory allocation
538        let device_ptr = self.simulate_gpu_malloc(size)?;
539
540        let block = MemoryBlock {
541            id: self.generate_block_id(),
542            size,
543            device_ptr,
544            last_accessed: Instant::now(),
545            blocktype,
546            ref_count: 1,
547        };
548
549        Ok(block)
550    }
551
552    fn simulate_gpu_malloc(&self, size: usize) -> Result<usize> {
553        // Simulate GPU memory allocation - in real implementation would use CUDA/OpenCL
554        static mut NEXT_PTR: usize = 0x1000_0000; // Simulate GPU memory space
555
556        unsafe {
557            let ptr = NEXT_PTR;
558            NEXT_PTR += size;
559
560            // Check if we exceed simulated GPU memory
561            if NEXT_PTR > 0x1000_0000 + self.config.max_pool_size {
562                return Err(MetricsError::ComputationError(
563                    "GPU memory exhausted".to_string(),
564                ));
565            }
566
567            Ok(ptr)
568        }
569    }
570
571    fn generate_block_id(&self) -> usize {
572        use std::sync::atomic::{AtomicUsize, Ordering};
573        static NEXT_ID: AtomicUsize = AtomicUsize::new(1);
574        NEXT_ID.fetch_add(1, Ordering::Relaxed)
575    }
576
577    fn record_allocation(&self, block: &MemoryBlock) -> Result<()> {
578        // Record in allocated blocks
579        {
580            let mut allocated = self.allocated_blocks.write().unwrap();
581            allocated.insert(
582                block.id,
583                AllocatedBlock {
584                    block: block.clone(),
585                    allocated_at: Instant::now(),
586                    expected_lifetime: None,
587                    usage_pattern: UsagePattern::Sequential, // Could be analyzed
588                },
589            );
590        }
591
592        // Update statistics
593        {
594            let mut stats = self.stats.lock().unwrap();
595            stats.allocation_count += 1;
596            stats.total_allocated += block.size;
597            if stats.total_allocated > stats.peak_usage {
598                stats.peak_usage = stats.total_allocated;
599            }
600
601            // Update average allocation size
602            stats.avg_allocation_size =
603                stats.total_allocated as f64 / stats.allocation_count as f64;
604        }
605
606        Ok(())
607    }
608
609    fn update_prefetcher(&self, block: &MemoryBlock) {
610        // Update prefetcher with allocation information for pattern learning
611        // Implementation would analyze patterns and update predictions
612    }
613
614    fn coalesce_and_return(&self, block: MemoryBlock) -> Result<()> {
615        // Try to coalesce with adjacent free blocks
616        // Simplified implementation - in practice would need more sophisticated buddy tracking
617        self.return_to_pool(block)
618    }
619
620    fn return_to_pool(&self, block: MemoryBlock) -> Result<()> {
621        let mut free_blocks = self.free_blocks.lock().unwrap();
622        free_blocks
623            .entry(block.size)
624            .or_insert_with(VecDeque::new)
625            .push_back(block);
626        Ok(())
627    }
628
629    fn should_run_gc(&self) -> Result<bool> {
630        let stats = self.stats.lock().unwrap();
631        let usage_ratio = stats.total_allocated as f64 / self.config.max_pool_size as f64;
632        Ok(usage_ratio > self.config.gc_threshold)
633    }
634
635    fn run_garbage_collection(&self) -> Result<()> {
636        // Implement garbage collection logic
637        // - Remove unused blocks
638        // - Coalesce adjacent free blocks
639        // - Update fragmentation statistics
640
641        let mut stats = self.stats.lock().unwrap();
642        stats.fragmentation_ratio = self.calculate_fragmentation()?;
643        stats.efficiency_score = self.calculate_efficiency()?;
644
645        Ok(())
646    }
647
648    fn calculate_fragmentation(&self) -> Result<f64> {
649        // Calculate memory fragmentation ratio
650        // Simplified calculation - real implementation would be more sophisticated
651        Ok(0.1) // 10% fragmentation as example
652    }
653
654    fn calculate_efficiency(&self) -> Result<f64> {
655        // Calculate memory utilization efficiency
656        let stats = self.stats.lock().unwrap();
657        if stats.peak_usage == 0 {
658            Ok(1.0)
659        } else {
660            Ok(stats.total_allocated as f64 / stats.peak_usage as f64)
661        }
662    }
663
664    fn analyze_allocation_patterns(&self) -> Result<Vec<AllocationPattern>> {
665        // Analyze historical allocation patterns for optimization
666        Ok(vec![]) // Placeholder
667    }
668
669    fn suggest_optimizations(
670        &self,
671        patterns: &[AllocationPattern],
672    ) -> Result<Vec<OptimizationType>> {
673        // Suggest memory layout optimizations based on _patterns
674        Ok(vec![]) // Placeholder
675    }
676
677    fn apply_optimization(&self, optimization: OptimizationType) -> Result<()> {
678        // Apply specific optimization
679        Ok(())
680    }
681
682    fn benchmark_strategy(
683        &self,
684        strategy: &AllocationStrategy,
685        workload: &[AllocationRequest],
686    ) -> Result<StrategyMetrics> {
687        // Benchmark specific allocation strategy
688        Ok(StrategyMetrics::default())
689    }
690}
691
692impl MemoryPrefetcher {
693    fn new(config: PrefetchConfig) -> Self {
694        Self {
695            allocation_history: VecDeque::new(),
696            predictions: Vec::new(),
697            pattern_engine: PatternEngine {
698                patterns: Vec::new(),
699                accuracy: 0.0,
700                training_samples: 0,
701            },
702            config,
703        }
704    }
705
706    fn get_predicted_block(
707        &self,
708        size: usize,
709        blocktype: &BlockType,
710    ) -> Result<Option<MemoryBlock>> {
711        // Check if we have a predicted block ready
712        // Implementation would check predictions and return suitable block
713        Ok(None)
714    }
715}
716
717/// Optimization type enum
718#[derive(Debug, Clone)]
719pub enum OptimizationType {
720    MemoryCoalescing,
721    BlockReordering,
722    PrefetchOptimization,
723    AllocationStrategyChange,
724}
725
726/// Allocation request for benchmarking
727#[derive(Debug, Clone)]
728pub struct AllocationRequest {
729    pub size: usize,
730    pub blocktype: BlockType,
731    pub lifetime: Duration,
732}
733
734/// Strategy benchmark results
735#[derive(Debug)]
736pub struct StrategyBenchmark {
737    pub results: HashMap<String, StrategyMetrics>,
738}
739
740/// Strategy performance metrics
741#[derive(Debug, Default)]
742pub struct StrategyMetrics {
743    pub allocation_speed: f64,
744    pub fragmentation_ratio: f64,
745    pub memory_efficiency: f64,
746    pub cache_hit_rate: f64,
747}
748
749#[cfg(test)]
750mod tests {
751    use super::*;
752
753    #[test]
754    fn test_memory_pool_creation() {
755        let config = MemoryPoolConfig::default();
756        let pool = AdvancedMemoryPool::new(config);
757
758        let stats = pool.get_stats();
759        assert_eq!(stats.total_allocated, 0);
760        assert_eq!(stats.allocation_count, 0);
761    }
762
763    #[test]
764    fn test_basic_allocation() {
765        let pool = AdvancedMemoryPool::new(MemoryPoolConfig::default());
766
767        let block = pool.allocate(1024, BlockType::InputData).unwrap();
768        assert_eq!(block.size, 1024);
769        assert_eq!(block.blocktype, BlockType::InputData);
770
771        let stats = pool.get_stats();
772        assert_eq!(stats.allocation_count, 1);
773        assert!(stats.total_allocated >= 1024);
774    }
775
776    #[test]
777    fn test_allocation_deallocation_cycle() {
778        let pool = AdvancedMemoryPool::new(MemoryPoolConfig::default());
779
780        let block = pool.allocate(2048, BlockType::OutputData).unwrap();
781        let _block_id = block.id;
782
783        pool.deallocate(block).unwrap();
784
785        let stats = pool.get_stats();
786        assert_eq!(stats.deallocation_count, 1);
787    }
788
789    #[test]
790    fn test_memory_alignment() {
791        let config = MemoryPoolConfig {
792            alignment: 512,
793            ..Default::default()
794        };
795        let pool = AdvancedMemoryPool::new(config);
796
797        // Test that allocations are properly aligned
798        let block = pool.allocate(100, BlockType::IntermediateBuffer).unwrap();
799        assert_eq!(block.size % 512, 0);
800    }
801
802    #[test]
803    #[ignore = "timeout"]
804    fn test_strategy_benchmarking() {
805        let pool = AdvancedMemoryPool::new(MemoryPoolConfig::default());
806
807        let workload = vec![
808            AllocationRequest {
809                size: 1024,
810                blocktype: BlockType::InputData,
811                lifetime: Duration::from_millis(100),
812            },
813            AllocationRequest {
814                size: 2048,
815                blocktype: BlockType::OutputData,
816                lifetime: Duration::from_millis(200),
817            },
818        ];
819
820        let benchmark = pool.benchmark_strategies(&workload).unwrap();
821        assert!(!benchmark.results.is_empty());
822    }
823}