scirs2_spatial/
memory_pool.rs

1//! Advanced-optimized memory pool system for spatial algorithms
2//!
3//! This module provides advanced memory management strategies specifically
4//! designed for spatial computing algorithms that perform frequent allocations.
5//! The system includes object pools, arena allocators, and cache-aware
6//! memory layouts to maximize performance.
7//!
8//! # Features
9//!
10//! - **Object pools**: Reusable pools for frequently allocated types
11//! - **Arena allocators**: Block-based allocation for temporary objects
12//! - **Cache-aware layouts**: Memory alignment for optimal cache performance
13//! - **NUMA-aware allocation**: Memory placement for multi-socket systems
14//! - **Zero-copy operations**: Minimize data movement and copying
15//!
16//! # Examples
17//!
18//! ```
19//! use scirs2_spatial::memory_pool::{DistancePool, ClusteringArena};
20//!
21//! // Create a distance computation pool
22//! let mut pool = DistancePool::new(1000);
23//!
24//! // Get a reusable distance buffer
25//! let buffer = pool.get_distance_buffer(256);
26//!
27//! // Use buffer for computations...
28//!
29//! // Buffer is automatically returned to pool when dropped (RAII)
30//! ```
31
32use scirs2_core::ndarray::{Array2, ArrayViewMut1, ArrayViewMut2};
33use std::alloc::{GlobalAlloc, Layout, System};
34use std::collections::VecDeque;
35use std::ptr::NonNull;
36use std::sync::Mutex;
37
38// Platform-specific NUMA imports
39#[cfg(any(target_os = "linux", target_os = "android"))]
40use libc;
41#[cfg(target_os = "linux")]
42use std::fs;
43
44// Thread affinity for NUMA binding
45use std::sync::atomic::Ordering;
46
47// Add num_cpus for cross-platform CPU detection
48// The num_cpus crate is available in dev-dependencies
49#[cfg(test)]
50use num_cpus;
51
52// Fallback implementation for non-test builds
53#[cfg(not(test))]
54mod num_cpus {
55    pub fn get() -> usize {
56        std::thread::available_parallelism()
57            .map(|n| n.get())
58            .unwrap_or(4)
59    }
60}
61
62/// Configuration for memory pool system
63#[derive(Debug, Clone)]
64pub struct MemoryPoolConfig {
65    /// Maximum number of objects to keep in each pool
66    pub max_pool_size: usize,
67    /// Cache line size for alignment (typically 64 bytes)
68    pub cache_line_size: usize,
69    /// Enable NUMA-aware allocation strategies
70    pub numa_aware: bool,
71    /// Prefetch distance for memory access patterns
72    pub prefetch_distance: usize,
73    /// Block size for arena allocators
74    pub arena_block_size: usize,
75    /// NUMA node hint for allocation (-1 for automatic detection)
76    pub numa_node_hint: i32,
77    /// Enable automatic NUMA topology discovery
78    pub auto_numa_discovery: bool,
79    /// Enable thread-to-NUMA-node affinity binding
80    pub enable_thread_affinity: bool,
81    /// Enable memory warming (pre-touch pages)
82    pub enable_memory_warming: bool,
83    /// Size threshold for large object handling
84    pub large_object_threshold: usize,
85    /// Maximum memory usage before forced cleanup (in bytes)
86    pub max_memory_usage: usize,
87}
88
89impl Default for MemoryPoolConfig {
90    fn default() -> Self {
91        Self {
92            max_pool_size: 1000,
93            cache_line_size: 64,
94            numa_aware: true,
95            prefetch_distance: 8,
96            arena_block_size: 1024 * 1024, // 1MB blocks
97            numa_node_hint: -1,            // Auto-detect
98            auto_numa_discovery: true,
99            enable_thread_affinity: true,
100            enable_memory_warming: true,
101            large_object_threshold: 64 * 1024,    // 64KB
102            max_memory_usage: 1024 * 1024 * 1024, // 1GB default limit
103        }
104    }
105}
106
107/// Advanced-optimized distance computation memory pool
108pub struct DistancePool {
109    config: MemoryPoolConfig,
110    distance_buffers: Mutex<VecDeque<Box<[f64]>>>,
111    index_buffers: Mutex<VecDeque<Box<[usize]>>>,
112    matrix_buffers: Mutex<VecDeque<Array2<f64>>>,
113    large_buffers: Mutex<VecDeque<Box<[f64]>>>, // For large objects
114    stats: PoolStatistics,
115    memory_usage: std::sync::atomic::AtomicUsize, // Track total memory usage
116    numa_node: std::sync::atomic::AtomicI32,      // Current NUMA node
117}
118
119impl DistancePool {
120    /// Create a new distance computation pool
121    pub fn new(capacity: usize) -> Self {
122        Self::with_config(capacity, MemoryPoolConfig::default())
123    }
124
125    /// Create a pool with custom configuration
126    pub fn with_config(capacity: usize, config: MemoryPoolConfig) -> Self {
127        let numa_node = if config.numa_aware && config.numa_node_hint >= 0 {
128            config.numa_node_hint
129        } else {
130            Self::detect_numa_node()
131        };
132
133        Self {
134            config,
135            distance_buffers: Mutex::new(VecDeque::with_capacity(capacity)),
136            index_buffers: Mutex::new(VecDeque::with_capacity(capacity)),
137            matrix_buffers: Mutex::new(VecDeque::with_capacity(capacity / 4)), // Matrices are larger
138            large_buffers: Mutex::new(VecDeque::with_capacity(capacity / 10)), // Large objects are rarer
139            stats: PoolStatistics::new(),
140            memory_usage: std::sync::atomic::AtomicUsize::new(0),
141            numa_node: std::sync::atomic::AtomicI32::new(numa_node),
142        }
143    }
144
145    /// Get a cache-aligned distance buffer
146    pub fn get_distance_buffer(&self, size: usize) -> DistanceBuffer {
147        // Check if this is a large object
148        let buffer_size_bytes = size * std::mem::size_of::<f64>();
149        let is_large = buffer_size_bytes > self.config.large_object_threshold;
150
151        // Check memory usage limit
152        let current_usage = self.memory_usage.load(std::sync::atomic::Ordering::Relaxed);
153        if current_usage + buffer_size_bytes > self.config.max_memory_usage {
154            self.cleanup_excess_memory();
155        }
156
157        let buffer = if is_large {
158            self.get_large_buffer(size)
159        } else {
160            let mut buffers = self.distance_buffers.lock().unwrap();
161
162            // Try to reuse an existing buffer of appropriate size
163            for i in 0..buffers.len() {
164                if buffers[i].len() >= size && buffers[i].len() <= size * 2 {
165                    let buffer = buffers.remove(i).unwrap();
166                    self.stats.record_hit();
167                    return DistanceBuffer::new(buffer, self);
168                }
169            }
170
171            // Create new aligned buffer
172            self.stats.record_miss();
173            self.create_aligned_buffer(size)
174        };
175
176        // Track memory usage
177        self.memory_usage
178            .fetch_add(buffer_size_bytes, std::sync::atomic::Ordering::Relaxed);
179
180        DistanceBuffer::new(buffer, self)
181    }
182
183    /// Get a buffer for large objects with special handling
184    fn get_large_buffer(&self, size: usize) -> Box<[f64]> {
185        let mut buffers = self.large_buffers.lock().unwrap();
186
187        // For large buffers, be more strict about size matching
188        for i in 0..buffers.len() {
189            if buffers[i].len() == size {
190                let buffer = buffers.remove(i).unwrap();
191                self.stats.record_hit();
192                return buffer;
193            }
194        }
195
196        // Create new large buffer with NUMA awareness
197        self.stats.record_miss();
198        if self.config.numa_aware {
199            self.create_numa_aligned_buffer(size)
200        } else {
201            self.create_aligned_buffer(size)
202        }
203    }
204
205    /// Get an index buffer for storing indices
206    pub fn get_index_buffer(&self, size: usize) -> IndexBuffer {
207        let mut buffers = self.index_buffers.lock().unwrap();
208
209        // Try to reuse existing buffer
210        for i in 0..buffers.len() {
211            if buffers[i].len() >= size && buffers[i].len() <= size * 2 {
212                let buffer = buffers.remove(i).unwrap();
213                self.stats.record_hit();
214                return IndexBuffer::new(buffer, self);
215            }
216        }
217
218        // Create new buffer
219        self.stats.record_miss();
220        let new_buffer = vec![0usize; size].into_boxed_slice();
221        IndexBuffer::new(new_buffer, self)
222    }
223
224    /// Get a distance matrix buffer
225    pub fn get_matrix_buffer(&self, rows: usize, cols: usize) -> MatrixBuffer {
226        let mut buffers = self.matrix_buffers.lock().unwrap();
227
228        // Try to reuse existing matrix
229        for i in 0..buffers.len() {
230            let (r, c) = buffers[i].dim();
231            if r >= rows && c >= cols && r <= rows * 2 && c <= cols * 2 {
232                let mut matrix = buffers.remove(i).unwrap();
233                // Resize to exact dimensions needed
234                matrix = matrix.slice_mut(s![..rows, ..cols]).to_owned();
235                self.stats.record_hit();
236                return MatrixBuffer::new(matrix, self);
237            }
238        }
239
240        // Create new matrix
241        self.stats.record_miss();
242        let matrix = Array2::zeros((rows, cols));
243        MatrixBuffer::new(matrix, self)
244    }
245
246    /// Create cache-aligned buffer for optimal SIMD performance
247    fn create_aligned_buffer(&self, size: usize) -> Box<[f64]> {
248        let layout = Layout::from_size_align(
249            size * std::mem::size_of::<f64>(),
250            self.config.cache_line_size,
251        )
252        .unwrap();
253
254        unsafe {
255            let ptr = System.alloc(layout) as *mut f64;
256            if ptr.is_null() {
257                panic!("Failed to allocate aligned memory");
258            }
259
260            // Initialize to zero (optional memory warming)
261            if self.config.enable_memory_warming {
262                std::ptr::write_bytes(ptr, 0, size);
263            }
264
265            // Convert to boxed slice
266            Box::from_raw(std::slice::from_raw_parts_mut(ptr, size))
267        }
268    }
269
270    /// Create NUMA-aware aligned buffer with proper node binding
271    fn create_numa_aligned_buffer(&self, size: usize) -> Box<[f64]> {
272        let numa_node = self.numa_node.load(Ordering::Relaxed);
273
274        #[cfg(target_os = "linux")]
275        {
276            if self.config.numa_aware && numa_node >= 0 {
277                match Self::allocate_on_numa_node_linux(size, numa_node as u32) {
278                    Ok(buffer) => {
279                        if self.config.enable_memory_warming {
280                            Self::warm_memory(&buffer);
281                        }
282                        return buffer;
283                    }
284                    Err(_) => {
285                        // Fallback to regular allocation
286                    }
287                }
288            }
289        }
290
291        #[cfg(target_os = "windows")]
292        {
293            if self.config.numa_aware && numa_node >= 0 {
294                match Self::allocate_on_numa_node_windows(size, numa_node as u32) {
295                    Ok(buffer) => {
296                        if self.config.enable_memory_warming {
297                            Self::warm_memory(&buffer);
298                        }
299                        return buffer;
300                    }
301                    Err(_) => {
302                        // Fallback to regular allocation
303                    }
304                }
305            }
306        }
307
308        // Fallback to regular aligned allocation
309        let buffer = self.create_aligned_buffer(size);
310
311        // Warm memory to encourage allocation on current NUMA node
312        if self.config.enable_memory_warming {
313            Self::warm_memory(&buffer);
314        }
315
316        buffer
317    }
318
319    /// Linux-specific NUMA-aware allocation (fallback without actual NUMA binding)
320    #[cfg(target_os = "linux")]
321    fn allocate_on_numa_node_linux(
322        size: usize,
323        node: u32,
324    ) -> Result<Box<[f64]>, Box<dyn std::error::Error>> {
325        let total_size = size * std::mem::size_of::<f64>();
326        let layout = Layout::from_size_align(total_size, 64)?;
327
328        unsafe {
329            // Allocate memory (NUMA binding disabled due to libc limitations)
330            let ptr = System.alloc(layout) as *mut f64;
331            if ptr.is_null() {
332                return Err("Failed to allocate memory".into());
333            }
334
335            // Initialize memory
336            std::ptr::write_bytes(ptr, 0, size);
337
338            Ok(Box::from_raw(std::slice::from_raw_parts_mut(ptr, size)))
339        }
340    }
341
342    /// Windows-specific NUMA-aware allocation
343    #[cfg(target_os = "windows")]
344    fn allocate_on_numa_node_windows(
345        size: usize,
346        node: u32,
347    ) -> Result<Box<[f64]>, Box<dyn std::error::Error>> {
348        // Windows NUMA allocation using VirtualAllocExNuma would go here
349        // For now, fallback to regular allocation
350        Err("Windows NUMA allocation not implemented".into())
351    }
352
353    /// Bind current thread to specific NUMA node for better locality
354    pub fn bind_thread_to_numa_node(node: u32) -> Result<(), Box<dyn std::error::Error>> {
355        #[cfg(target_os = "linux")]
356        {
357            Self::bind_thread_to_numa_node_linux(node)
358        }
359        #[cfg(target_os = "windows")]
360        {
361            Self::bind_thread_to_numa_node_windows(node)
362        }
363        #[cfg(not(any(target_os = "linux", target_os = "windows")))]
364        {
365            Ok(()) // No-op for unsupported platforms
366        }
367    }
368
369    #[cfg(target_os = "linux")]
370    fn bind_thread_to_numa_node_linux(node: u32) -> Result<(), Box<dyn std::error::Error>> {
371        // NUMA memory policy binding disabled due to libc limitations
372        // Still attempt CPU affinity for performance
373
374        // Try to set CPU affinity to CPUs on this NUMA _node
375        if let Some(_cpu_count) = Self::get_node_cpu_count(node) {
376            let mut cpu_set: libc::cpu_set_t = unsafe { std::mem::zeroed() };
377
378            // Read the CPU list for this NUMA _node
379            let cpulist_path = format!("/sys/devices/system/node/node{}/cpulist", node);
380            if let Ok(cpulist) = fs::read_to_string(&cpulist_path) {
381                for range in cpulist.trim().split(',') {
382                    if let Some((start, end)) = range.split_once('-') {
383                        if let (Ok(s), Ok(e)) = (start.parse::<u32>(), end.parse::<u32>()) {
384                            for cpu in s..=e {
385                                unsafe { libc::CPU_SET(cpu as usize, &mut cpu_set) };
386                            }
387                        }
388                    } else if let Ok(cpu) = range.parse::<u32>() {
389                        unsafe { libc::CPU_SET(cpu as usize, &mut cpu_set) };
390                    }
391                }
392
393                // Set thread affinity
394                unsafe {
395                    libc::sched_setaffinity(
396                        0, // current thread
397                        std::mem::size_of::<libc::cpu_set_t>(),
398                        &cpu_set,
399                    );
400                }
401            }
402        }
403
404        Ok(())
405    }
406
407    #[cfg(target_os = "windows")]
408    fn bind_thread_to_numa_node_windows(node: u32) -> Result<(), Box<dyn std::error::Error>> {
409        // Windows thread affinity using SetThreadGroupAffinity would go here
410        Ok(())
411    }
412
413    /// Warm memory to ensure pages are allocated and potentially improve locality
414    fn warm_memory(buffer: &[f64]) {
415        if buffer.is_empty() {
416            return;
417        }
418
419        // Touch every page to ensure allocation
420        let page_size = 4096; // Typical page size
421        let elements_per_page = page_size / std::mem::size_of::<f64>();
422
423        for i in (0..buffer.len()).step_by(elements_per_page) {
424            // Volatile read to prevent optimization
425            unsafe {
426                std::ptr::read_volatile(&buffer[i]);
427            }
428        }
429    }
430
431    /// Detect current NUMA node using platform-specific APIs
432    fn detect_numa_node() -> i32 {
433        #[cfg(target_os = "linux")]
434        {
435            Self::detect_numa_node_linux().unwrap_or(0)
436        }
437        #[cfg(target_os = "windows")]
438        {
439            Self::detect_numa_node_windows().unwrap_or(0)
440        }
441        #[cfg(not(any(target_os = "linux", target_os = "windows")))]
442        {
443            0 // Default for unsupported platforms
444        }
445    }
446
447    /// Linux-specific NUMA node detection
448    #[cfg(target_os = "linux")]
449    fn detect_numa_node_linux() -> Option<i32> {
450        // Try to get current thread's NUMA node
451        let _tid = unsafe { libc::gettid() };
452
453        // Read from /proc/self/task/{tid}/numa_maps or use getcpu syscall
454        match Self::get_current_numa_node_linux() {
455            Ok(node) => Some(node),
456            Err(_) => {
457                // Fallback: try to detect from CPU
458                Self::detect_numa_from_cpu_linux()
459            }
460        }
461    }
462
463    #[cfg(target_os = "linux")]
464    fn get_current_numa_node_linux() -> Result<i32, Box<dyn std::error::Error>> {
465        // Use getcpu syscall to get current CPU and NUMA node
466        let mut cpu: u32 = 0;
467        let mut node: u32 = 0;
468
469        let result = unsafe {
470            libc::syscall(
471                libc::SYS_getcpu,
472                &mut cpu as *mut u32,
473                &mut node as *mut u32,
474                std::ptr::null_mut::<libc::c_void>(),
475            )
476        };
477
478        if result == 0 {
479            Ok(node as i32)
480        } else {
481            Err("getcpu syscall failed".into())
482        }
483    }
484
485    #[cfg(target_os = "linux")]
486    fn detect_numa_from_cpu_linux() -> Option<i32> {
487        // Try to read NUMA topology from /sys/devices/system/node/
488        if let Ok(entries) = fs::read_dir("/sys/devices/system/node") {
489            for entry in entries.flatten() {
490                let name = entry.file_name();
491                if let Some(name_str) = name.to_str() {
492                    if let Some(stripped) = name_str.strip_prefix("node") {
493                        if let Ok(node_num) = stripped.parse::<i32>() {
494                            // Simple heuristic: use first available node
495                            return Some(node_num);
496                        }
497                    }
498                }
499            }
500        }
501        None
502    }
503
504    /// Windows-specific NUMA node detection
505    #[cfg(target_os = "windows")]
506    fn detect_numa_node_windows() -> Option<i32> {
507        // In a real implementation, this would use Windows NUMA APIs
508        // such as GetNumaProcessorNode, GetCurrentProcessorNumber, etc.
509        // For now, return 0 as fallback
510        Some(0)
511    }
512
513    /// Get NUMA topology information
514    pub fn get_numa_topology() -> NumaTopology {
515        #[cfg(target_os = "linux")]
516        {
517            Self::get_numa_topology_linux()
518        }
519        #[cfg(target_os = "windows")]
520        {
521            Self::get_numa_topology_windows()
522        }
523        #[cfg(not(any(target_os = "linux", target_os = "windows")))]
524        {
525            NumaTopology::default()
526        }
527    }
528
529    #[cfg(target_os = "linux")]
530    fn get_numa_topology_linux() -> NumaTopology {
531        let mut topology = NumaTopology::default();
532
533        // Try to read NUMA information from /sys/devices/system/node/
534        if let Ok(entries) = fs::read_dir("/sys/devices/system/node") {
535            for entry in entries.flatten() {
536                let name = entry.file_name();
537                if let Some(name_str) = name.to_str() {
538                    if let Some(stripped) = name_str.strip_prefix("node") {
539                        if let Ok(_nodeid) = stripped.parse::<u32>() {
540                            // Read memory info for this node
541                            let meminfo_path =
542                                format!("/sys/devices/system/node/{name_str}/meminfo");
543                            if let Ok(meminfo) = fs::read_to_string(&meminfo_path) {
544                                if let Some(total_kb) = Self::parse_meminfo_total(&meminfo) {
545                                    topology.nodes.push(NumaNode {
546                                        id: _nodeid,
547                                        total_memory_bytes: total_kb * 1024,
548                                        available_memory_bytes: total_kb * 1024, // Approximation
549                                        cpu_count: Self::get_node_cpu_count(_nodeid).unwrap_or(1),
550                                    });
551                                }
552                            }
553                        }
554                    }
555                }
556            }
557        }
558
559        // If no nodes found, create a default single node
560        if topology.nodes.is_empty() {
561            topology.nodes.push(NumaNode {
562                id: 0,
563                total_memory_bytes: Self::get_total_system_memory()
564                    .unwrap_or(8 * 1024 * 1024 * 1024), // 8GB default
565                available_memory_bytes: Self::get_available_system_memory()
566                    .unwrap_or(4 * 1024 * 1024 * 1024), // 4GB default
567                cpu_count: num_cpus::get() as u32,
568            });
569        }
570
571        topology
572    }
573
574    #[cfg(target_os = "linux")]
575    fn parse_meminfo_total(meminfo: &str) -> Option<u64> {
576        for line in meminfo.lines() {
577            if line.starts_with("Node") && line.contains("MemTotal:") {
578                let parts: Vec<&str> = line.split_whitespace().collect();
579                if parts.len() >= 3 {
580                    return parts[2].parse().ok();
581                }
582            }
583        }
584        None
585    }
586
587    #[cfg(target_os = "linux")]
588    fn get_node_cpu_count(_nodeid: u32) -> Option<u32> {
589        let cpulist_path = format!("/sys/devices/system/node/node{}/cpulist", _nodeid);
590        if let Ok(cpulist) = fs::read_to_string(&cpulist_path) {
591            // Parse CPU list (e.g., "0-3,8-11" -> 8 CPUs)
592            let mut count = 0;
593            for range in cpulist.trim().split(',') {
594                if let Some((start, end)) = range.split_once('-') {
595                    if let (Ok(s), Ok(e)) = (start.parse::<u32>(), end.parse::<u32>()) {
596                        count += e - s + 1;
597                    }
598                } else if range.parse::<u32>().is_ok() {
599                    count += 1;
600                }
601            }
602            Some(count)
603        } else {
604            None
605        }
606    }
607
608    #[cfg(target_os = "linux")]
609    fn get_total_system_memory() -> Option<u64> {
610        if let Ok(meminfo) = fs::read_to_string("/proc/meminfo") {
611            for line in meminfo.lines() {
612                if line.starts_with("MemTotal:") {
613                    let parts: Vec<&str> = line.split_whitespace().collect();
614                    if parts.len() >= 2 {
615                        return parts[1].parse::<u64>().ok().map(|kb| kb * 1024);
616                    }
617                }
618            }
619        }
620        None
621    }
622
623    #[cfg(target_os = "linux")]
624    fn get_available_system_memory() -> Option<u64> {
625        if let Ok(meminfo) = fs::read_to_string("/proc/meminfo") {
626            for line in meminfo.lines() {
627                if line.starts_with("MemAvailable:") {
628                    let parts: Vec<&str> = line.split_whitespace().collect();
629                    if parts.len() >= 2 {
630                        return parts[1].parse::<u64>().ok().map(|kb| kb * 1024);
631                    }
632                }
633            }
634        }
635        None
636    }
637
638    #[cfg(target_os = "windows")]
639    fn get_numa_topology_windows() -> NumaTopology {
640        // Windows NUMA topology detection would go here
641        // Using GetLogicalProcessorInformation and related APIs
642        NumaTopology::default()
643    }
644
645    /// Clean up excess memory when approaching limits
646    fn cleanup_excess_memory(&self) {
647        // Remove some older buffers to free memory
648        let cleanup_ratio = 0.25; // Clean up 25% of buffers
649
650        {
651            let mut buffers = self.distance_buffers.lock().unwrap();
652            let cleanup_count = (buffers.len() as f64 * cleanup_ratio) as usize;
653            for _ in 0..cleanup_count {
654                if let Some(buffer) = buffers.pop_back() {
655                    let freed_bytes = buffer.len() * std::mem::size_of::<f64>();
656                    self.memory_usage
657                        .fetch_sub(freed_bytes, std::sync::atomic::Ordering::Relaxed);
658                }
659            }
660        }
661
662        {
663            let mut buffers = self.large_buffers.lock().unwrap();
664            let cleanup_count = (buffers.len() as f64 * cleanup_ratio) as usize;
665            for _ in 0..cleanup_count {
666                if let Some(buffer) = buffers.pop_back() {
667                    let freed_bytes = buffer.len() * std::mem::size_of::<f64>();
668                    self.memory_usage
669                        .fetch_sub(freed_bytes, std::sync::atomic::Ordering::Relaxed);
670                }
671            }
672        }
673    }
674
675    /// Return a distance buffer to the pool
676    fn return_distance_buffer(&self, buffer: Box<[f64]>) {
677        let buffer_size_bytes = buffer.len() * std::mem::size_of::<f64>();
678        let is_large = buffer_size_bytes > self.config.large_object_threshold;
679
680        // Update memory usage when buffer is returned
681        self.memory_usage
682            .fetch_sub(buffer_size_bytes, std::sync::atomic::Ordering::Relaxed);
683
684        if is_large {
685            let mut buffers = self.large_buffers.lock().unwrap();
686            if buffers.len() < self.config.max_pool_size / 10 {
687                buffers.push_back(buffer);
688            }
689            // Otherwise let it drop and deallocate
690        } else {
691            let mut buffers = self.distance_buffers.lock().unwrap();
692            if buffers.len() < self.config.max_pool_size {
693                buffers.push_back(buffer);
694            }
695            // Otherwise let it drop and deallocate
696        }
697    }
698
699    /// Return an index buffer to the pool
700    fn return_index_buffer(&self, buffer: Box<[usize]>) {
701        let mut buffers = self.index_buffers.lock().unwrap();
702        if buffers.len() < self.config.max_pool_size {
703            buffers.push_back(buffer);
704        }
705    }
706
707    /// Return a matrix buffer to the pool
708    fn return_matrix_buffer(&self, matrix: Array2<f64>) {
709        let mut buffers = self.matrix_buffers.lock().unwrap();
710        if buffers.len() < self.config.max_pool_size / 4 {
711            // Keep fewer matrices
712            buffers.push_back(matrix);
713        }
714    }
715
716    /// Get pool statistics for performance monitoring
717    pub fn statistics(&self) -> PoolStatistics {
718        self.stats.clone()
719    }
720
721    /// Get current memory usage in bytes
722    pub fn memory_usage(&self) -> usize {
723        self.memory_usage.load(std::sync::atomic::Ordering::Relaxed)
724    }
725
726    /// Get current NUMA node
727    pub fn current_numa_node(&self) -> i32 {
728        self.numa_node.load(std::sync::atomic::Ordering::Relaxed)
729    }
730
731    /// Get detailed pool information
732    pub fn pool_info(&self) -> PoolInfo {
733        let distance_count = self.distance_buffers.lock().unwrap().len();
734        let index_count = self.index_buffers.lock().unwrap().len();
735        let matrix_count = self.matrix_buffers.lock().unwrap().len();
736        let large_count = self.large_buffers.lock().unwrap().len();
737
738        PoolInfo {
739            distance_buffer_count: distance_count,
740            index_buffer_count: index_count,
741            matrix_buffer_count: matrix_count,
742            large_buffer_count: large_count,
743            total_memory_usage: self.memory_usage(),
744            numa_node: self.current_numa_node(),
745            hit_rate: self.stats.hit_rate(),
746        }
747    }
748
749    /// Clear all pools and free memory
750    pub fn clear(&self) {
751        self.distance_buffers.lock().unwrap().clear();
752        self.index_buffers.lock().unwrap().clear();
753        self.matrix_buffers.lock().unwrap().clear();
754        self.large_buffers.lock().unwrap().clear();
755        self.memory_usage
756            .store(0, std::sync::atomic::Ordering::Relaxed);
757        self.stats.reset();
758    }
759}
760
761// Use ndarray's s! macro
762use scirs2_core::ndarray::s;
763
764/// RAII wrapper for distance buffers with automatic return to pool
765pub struct DistanceBuffer<'a> {
766    buffer: Option<Box<[f64]>>,
767    pool: &'a DistancePool,
768}
769
770impl<'a> DistanceBuffer<'a> {
771    fn new(buffer: Box<[f64]>, pool: &'a DistancePool) -> Self {
772        Self {
773            buffer: Some(buffer),
774            pool,
775        }
776    }
777
778    /// Get a mutable slice of the buffer
779    pub fn as_mut_slice(&mut self) -> &mut [f64] {
780        self.buffer.as_mut().unwrap().as_mut()
781    }
782
783    /// Get an immutable slice of the buffer
784    pub fn as_slice(&self) -> &[f64] {
785        self.buffer.as_ref().unwrap().as_ref()
786    }
787
788    /// Get the length of the buffer
789    pub fn len(&self) -> usize {
790        self.buffer.as_ref().unwrap().len()
791    }
792
793    /// Check if buffer is empty
794    pub fn is_empty(&self) -> bool {
795        self.len() == 0
796    }
797
798    /// Get a view as ndarray Array1
799    pub fn as_array_mut(&mut self) -> ArrayViewMut1<f64> {
800        ArrayViewMut1::from(self.as_mut_slice())
801    }
802}
803
804impl Drop for DistanceBuffer<'_> {
805    fn drop(&mut self) {
806        if let Some(buffer) = self.buffer.take() {
807            self.pool.return_distance_buffer(buffer);
808        }
809    }
810}
811
812/// RAII wrapper for index buffers
813pub struct IndexBuffer<'a> {
814    buffer: Option<Box<[usize]>>,
815    pool: &'a DistancePool,
816}
817
818impl<'a> IndexBuffer<'a> {
819    fn new(buffer: Box<[usize]>, pool: &'a DistancePool) -> Self {
820        Self {
821            buffer: Some(buffer),
822            pool,
823        }
824    }
825
826    /// Get a mutable slice of the buffer
827    pub fn as_mut_slice(&mut self) -> &mut [usize] {
828        self.buffer.as_mut().unwrap().as_mut()
829    }
830
831    /// Get an immutable slice of the buffer
832    pub fn as_slice(&self) -> &[usize] {
833        self.buffer.as_ref().unwrap().as_ref()
834    }
835
836    /// Get the length of the buffer
837    pub fn len(&self) -> usize {
838        self.buffer.as_ref().unwrap().len()
839    }
840
841    /// Check if buffer is empty
842    pub fn is_empty(&self) -> bool {
843        self.len() == 0
844    }
845}
846
847impl Drop for IndexBuffer<'_> {
848    fn drop(&mut self) {
849        if let Some(buffer) = self.buffer.take() {
850            self.pool.return_index_buffer(buffer);
851        }
852    }
853}
854
855/// RAII wrapper for matrix buffers
856pub struct MatrixBuffer<'a> {
857    matrix: Option<Array2<f64>>,
858    pool: &'a DistancePool,
859}
860
861impl<'a> MatrixBuffer<'a> {
862    fn new(matrix: Array2<f64>, pool: &'a DistancePool) -> Self {
863        Self {
864            matrix: Some(matrix),
865            pool,
866        }
867    }
868
869    /// Get a mutable view of the matrix
870    pub fn as_mut(&mut self) -> ArrayViewMut2<f64> {
871        self.matrix.as_mut().unwrap().view_mut()
872    }
873
874    /// Get the dimensions of the matrix
875    pub fn dim(&mut self) -> (usize, usize) {
876        self.matrix.as_ref().unwrap().dim()
877    }
878
879    /// Fill the matrix with a value
880    pub fn fill(&mut self, value: f64) {
881        self.matrix.as_mut().unwrap().fill(value);
882    }
883}
884
885impl Drop for MatrixBuffer<'_> {
886    fn drop(&mut self) {
887        if let Some(matrix) = self.matrix.take() {
888            self.pool.return_matrix_buffer(matrix);
889        }
890    }
891}
892
893/// Arena allocator for temporary objects in clustering algorithms
894pub struct ClusteringArena {
895    config: MemoryPoolConfig,
896    current_block: Mutex<Option<ArenaBlock>>,
897    full_blocks: Mutex<Vec<ArenaBlock>>,
898    stats: ArenaStatistics,
899}
900
901impl ClusteringArena {
902    /// Create a new clustering arena
903    pub fn new() -> Self {
904        Self::with_config(MemoryPoolConfig::default())
905    }
906
907    /// Create arena with custom configuration
908    pub fn with_config(config: MemoryPoolConfig) -> Self {
909        Self {
910            config,
911            current_block: Mutex::new(None),
912            full_blocks: Mutex::new(Vec::new()),
913            stats: ArenaStatistics::new(),
914        }
915    }
916
917    /// Allocate a temporary vector in the arena
918    pub fn alloc_temp_vec<T: Default + Clone>(&self, size: usize) -> ArenaVec<T> {
919        let layout = Layout::array::<T>(size).unwrap();
920        let ptr = self.allocate_raw(layout);
921
922        unsafe {
923            // Initialize elements
924            for i in 0..size {
925                std::ptr::write(ptr.as_ptr().add(i) as *mut T, T::default());
926            }
927
928            ArenaVec::new(ptr.as_ptr() as *mut T, size)
929        }
930    }
931
932    /// Allocate raw memory with proper alignment
933    fn allocate_raw(&self, layout: Layout) -> NonNull<u8> {
934        let mut current = self.current_block.lock().unwrap();
935
936        if current.is_none() || !current.as_ref().unwrap().can_allocate(layout) {
937            // Need a new block
938            if let Some(old_block) = current.take() {
939                self.full_blocks.lock().unwrap().push(old_block);
940            }
941            *current = Some(ArenaBlock::new(self.config.arena_block_size));
942        }
943
944        current.as_mut().unwrap().allocate(layout)
945    }
946
947    /// Reset the arena, keeping allocated blocks for reuse
948    pub fn reset(&self) {
949        let mut current = self.current_block.lock().unwrap();
950        let mut full_blocks = self.full_blocks.lock().unwrap();
951
952        if let Some(block) = current.take() {
953            full_blocks.push(block);
954        }
955
956        // Reset all blocks
957        for block in full_blocks.iter_mut() {
958            block.reset();
959        }
960
961        // Move one block back to current
962        if let Some(block) = full_blocks.pop() {
963            *current = Some(block);
964        }
965
966        self.stats.reset();
967    }
968
969    /// Get arena statistics
970    pub fn statistics(&self) -> ArenaStatistics {
971        self.stats.clone()
972    }
973}
974
975impl Default for ClusteringArena {
976    fn default() -> Self {
977        Self::new()
978    }
979}
980
981/// A block of memory within the arena
982struct ArenaBlock {
983    memory: NonNull<u8>,
984    size: usize,
985    offset: usize,
986}
987
988// SAFETY: ArenaBlock manages its own memory and ensures thread-safe access
989unsafe impl Send for ArenaBlock {}
990unsafe impl Sync for ArenaBlock {}
991
992impl ArenaBlock {
993    fn new(size: usize) -> Self {
994        let layout = Layout::from_size_align(size, 64).unwrap(); // 64-byte aligned
995        let memory =
996            unsafe { NonNull::new(System.alloc(layout)).expect("Failed to allocate arena block") };
997
998        Self {
999            memory,
1000            size,
1001            offset: 0,
1002        }
1003    }
1004
1005    fn can_allocate(&self, layout: Layout) -> bool {
1006        let aligned_offset = (self.offset + layout.align() - 1) & !(layout.align() - 1);
1007        aligned_offset + layout.size() <= self.size
1008    }
1009
1010    fn allocate(&mut self, layout: Layout) -> NonNull<u8> {
1011        assert!(self.can_allocate(layout));
1012
1013        // Align the offset
1014        self.offset = (self.offset + layout.align() - 1) & !(layout.align() - 1);
1015
1016        let ptr = unsafe { NonNull::new_unchecked(self.memory.as_ptr().add(self.offset)) };
1017        self.offset += layout.size();
1018
1019        ptr
1020    }
1021
1022    fn reset(&mut self) {
1023        self.offset = 0;
1024    }
1025}
1026
1027impl Drop for ArenaBlock {
1028    fn drop(&mut self) {
1029        let layout = Layout::from_size_align(self.size, 64).unwrap();
1030        unsafe {
1031            System.dealloc(self.memory.as_ptr(), layout);
1032        }
1033    }
1034}
1035
1036/// RAII wrapper for arena-allocated vectors
1037pub struct ArenaVec<T> {
1038    ptr: *mut T,
1039    len: usize,
1040    phantom: std::marker::PhantomData<T>,
1041}
1042
1043impl<T> ArenaVec<T> {
1044    fn new(ptr: *mut T, len: usize) -> Self {
1045        Self {
1046            ptr,
1047            len,
1048            phantom: std::marker::PhantomData,
1049        }
1050    }
1051
1052    /// Get a mutable slice of the vector
1053    pub fn as_mut_slice(&mut self) -> &mut [T] {
1054        unsafe { std::slice::from_raw_parts_mut(self.ptr, self.len) }
1055    }
1056
1057    /// Get an immutable slice of the vector
1058    pub fn as_slice(&mut self) -> &[T] {
1059        unsafe { std::slice::from_raw_parts(self.ptr, self.len) }
1060    }
1061
1062    /// Get the length of the vector
1063    pub fn len(&mut self) -> usize {
1064        self.len
1065    }
1066
1067    /// Check if vector is empty
1068    pub fn is_empty(&self) -> bool {
1069        self.len == 0
1070    }
1071}
1072
1073// Note: ArenaVec doesn't implement Drop because the arena manages the memory
1074
1075/// Detailed pool information
1076#[derive(Debug, Clone)]
1077pub struct PoolInfo {
1078    /// Number of distance buffers in pool
1079    pub distance_buffer_count: usize,
1080    /// Number of index buffers in pool
1081    pub index_buffer_count: usize,
1082    /// Number of matrix buffers in pool
1083    pub matrix_buffer_count: usize,
1084    /// Number of large buffers in pool
1085    pub large_buffer_count: usize,
1086    /// Total memory usage in bytes
1087    pub total_memory_usage: usize,
1088    /// Current NUMA node
1089    pub numa_node: i32,
1090    /// Hit rate percentage
1091    pub hit_rate: f64,
1092}
1093
1094/// Pool performance statistics
1095#[derive(Debug)]
1096pub struct PoolStatistics {
1097    hits: std::sync::atomic::AtomicUsize,
1098    misses: std::sync::atomic::AtomicUsize,
1099    total_allocations: std::sync::atomic::AtomicUsize,
1100}
1101
1102impl PoolStatistics {
1103    fn new() -> Self {
1104        Self {
1105            hits: std::sync::atomic::AtomicUsize::new(0),
1106            misses: std::sync::atomic::AtomicUsize::new(0),
1107            total_allocations: std::sync::atomic::AtomicUsize::new(0),
1108        }
1109    }
1110
1111    fn record_hit(&self) {
1112        self.hits.fetch_add(1, std::sync::atomic::Ordering::Relaxed);
1113    }
1114
1115    fn record_miss(&self) {
1116        self.misses
1117            .fetch_add(1, std::sync::atomic::Ordering::Relaxed);
1118        self.total_allocations
1119            .fetch_add(1, std::sync::atomic::Ordering::Relaxed);
1120    }
1121
1122    fn reset(&self) {
1123        self.hits.store(0, std::sync::atomic::Ordering::Relaxed);
1124        self.misses.store(0, std::sync::atomic::Ordering::Relaxed);
1125        self.total_allocations
1126            .store(0, std::sync::atomic::Ordering::Relaxed);
1127    }
1128
1129    /// Get hit rate as a percentage
1130    pub fn hit_rate(&self) -> f64 {
1131        let hits = self.hits.load(std::sync::atomic::Ordering::Relaxed);
1132        let total = hits + self.misses.load(std::sync::atomic::Ordering::Relaxed);
1133        if total == 0 {
1134            0.0
1135        } else {
1136            hits as f64 / total as f64 * 100.0
1137        }
1138    }
1139
1140    /// Get total requests
1141    pub fn total_requests(&self) -> usize {
1142        self.hits.load(std::sync::atomic::Ordering::Relaxed)
1143            + self.misses.load(std::sync::atomic::Ordering::Relaxed)
1144    }
1145
1146    /// Get total new allocations
1147    pub fn total_allocations(&self) -> usize {
1148        self.total_allocations
1149            .load(std::sync::atomic::Ordering::Relaxed)
1150    }
1151}
1152
1153impl Clone for PoolStatistics {
1154    fn clone(&self) -> Self {
1155        Self {
1156            hits: std::sync::atomic::AtomicUsize::new(
1157                self.hits.load(std::sync::atomic::Ordering::Relaxed),
1158            ),
1159            misses: std::sync::atomic::AtomicUsize::new(
1160                self.misses.load(std::sync::atomic::Ordering::Relaxed),
1161            ),
1162            total_allocations: std::sync::atomic::AtomicUsize::new(
1163                self.total_allocations
1164                    .load(std::sync::atomic::Ordering::Relaxed),
1165            ),
1166        }
1167    }
1168}
1169
1170/// Arena performance statistics
1171#[derive(Debug)]
1172pub struct ArenaStatistics {
1173    blocks_allocated: std::sync::atomic::AtomicUsize,
1174    total_memory: std::sync::atomic::AtomicUsize,
1175    active_objects: std::sync::atomic::AtomicUsize,
1176}
1177
1178impl ArenaStatistics {
1179    fn new() -> Self {
1180        Self {
1181            blocks_allocated: std::sync::atomic::AtomicUsize::new(0),
1182            total_memory: std::sync::atomic::AtomicUsize::new(0),
1183            active_objects: std::sync::atomic::AtomicUsize::new(0),
1184        }
1185    }
1186
1187    fn reset(&self) {
1188        self.blocks_allocated
1189            .store(0, std::sync::atomic::Ordering::Relaxed);
1190        self.total_memory
1191            .store(0, std::sync::atomic::Ordering::Relaxed);
1192        self.active_objects
1193            .store(0, std::sync::atomic::Ordering::Relaxed);
1194    }
1195
1196    /// Get number of allocated blocks
1197    pub fn blocks_allocated(&self) -> usize {
1198        self.blocks_allocated
1199            .load(std::sync::atomic::Ordering::Relaxed)
1200    }
1201
1202    /// Get total memory usage in bytes
1203    pub fn total_memory(&self) -> usize {
1204        self.total_memory.load(std::sync::atomic::Ordering::Relaxed)
1205    }
1206
1207    /// Get number of active objects
1208    pub fn active_objects(&self) -> usize {
1209        self.active_objects
1210            .load(std::sync::atomic::Ordering::Relaxed)
1211    }
1212}
1213
1214impl Clone for ArenaStatistics {
1215    fn clone(&self) -> Self {
1216        Self {
1217            blocks_allocated: std::sync::atomic::AtomicUsize::new(
1218                self.blocks_allocated
1219                    .load(std::sync::atomic::Ordering::Relaxed),
1220            ),
1221            total_memory: std::sync::atomic::AtomicUsize::new(
1222                self.total_memory.load(std::sync::atomic::Ordering::Relaxed),
1223            ),
1224            active_objects: std::sync::atomic::AtomicUsize::new(
1225                self.active_objects
1226                    .load(std::sync::atomic::Ordering::Relaxed),
1227            ),
1228        }
1229    }
1230}
1231
1232/// NUMA topology information for memory allocation optimization
1233#[derive(Debug, Clone)]
1234pub struct NumaTopology {
1235    /// Available NUMA nodes
1236    pub nodes: Vec<NumaNode>,
1237}
1238
1239/// Individual NUMA node information
1240#[derive(Debug, Clone)]
1241pub struct NumaNode {
1242    /// NUMA node ID
1243    pub id: u32,
1244    /// Total memory on this node in bytes
1245    pub total_memory_bytes: u64,
1246    /// Available memory on this node in bytes
1247    pub available_memory_bytes: u64,
1248    /// Number of CPU cores on this node
1249    pub cpu_count: u32,
1250}
1251
1252impl Default for NumaTopology {
1253    fn default() -> Self {
1254        Self {
1255            nodes: vec![NumaNode {
1256                id: 0,
1257                total_memory_bytes: 8 * 1024 * 1024 * 1024, // 8GB default
1258                available_memory_bytes: 4 * 1024 * 1024 * 1024, // 4GB default
1259                cpu_count: 4,                               // Default 4 cores
1260            }],
1261        }
1262    }
1263}
1264
1265impl NumaTopology {
1266    /// Get the best NUMA node for allocation based on current thread affinity
1267    pub fn get_optimal_node(&self) -> u32 {
1268        // In a real implementation, this would check current thread affinity
1269        // and return the node that the thread is running on
1270        if !self.nodes.is_empty() {
1271            self.nodes[0].id
1272        } else {
1273            0
1274        }
1275    }
1276
1277    /// Get node with most available memory
1278    pub fn get_node_with_most_memory(&self) -> Option<u32> {
1279        self.nodes
1280            .iter()
1281            .max_by_key(|node| node.available_memory_bytes)
1282            .map(|node| node.id)
1283    }
1284
1285    /// Get total system memory across all nodes
1286    pub fn total_system_memory(&self) -> u64 {
1287        self.nodes.iter().map(|node| node.total_memory_bytes).sum()
1288    }
1289
1290    /// Get total available memory across all nodes
1291    pub fn total_available_memory(&self) -> u64 {
1292        self.nodes
1293            .iter()
1294            .map(|node| node.available_memory_bytes)
1295            .sum()
1296    }
1297
1298    /// Check if a specific NUMA node exists
1299    pub fn has_node(&self, _nodeid: u32) -> bool {
1300        self.nodes.iter().any(|node| node.id == _nodeid)
1301    }
1302
1303    /// Get memory information for a specific node
1304    pub fn get_node_info(&self, _nodeid: u32) -> Option<&NumaNode> {
1305        self.nodes.iter().find(|node| node.id == _nodeid)
1306    }
1307}
1308
1309/// Global memory pool instance for convenience
1310static GLOBAL_DISTANCE_POOL: std::sync::OnceLock<DistancePool> = std::sync::OnceLock::new();
1311static GLOBAL_CLUSTERING_ARENA: std::sync::OnceLock<ClusteringArena> = std::sync::OnceLock::new();
1312
1313/// Get the global distance pool instance
1314#[allow(dead_code)]
1315pub fn global_distance_pool() -> &'static DistancePool {
1316    GLOBAL_DISTANCE_POOL.get_or_init(|| DistancePool::new(1000))
1317}
1318
1319/// Get the global clustering arena instance
1320#[allow(dead_code)]
1321pub fn global_clustering_arena() -> &'static ClusteringArena {
1322    GLOBAL_CLUSTERING_ARENA.get_or_init(ClusteringArena::new)
1323}
1324
1325/// Create a NUMA-optimized distance pool for the current thread
1326#[allow(dead_code)]
1327pub fn create_numa_optimized_pool(capacity: usize) -> DistancePool {
1328    let config = MemoryPoolConfig {
1329        numa_aware: true,
1330        auto_numa_discovery: true,
1331        enable_thread_affinity: true,
1332        ..Default::default()
1333    };
1334
1335    DistancePool::with_config(capacity, config)
1336}
1337
1338/// Get NUMA topology information
1339#[allow(dead_code)]
1340pub fn get_numa_topology() -> NumaTopology {
1341    DistancePool::get_numa_topology()
1342}
1343
1344/// Test NUMA capabilities and return detailed information
1345#[allow(dead_code)]
1346pub fn test_numa_capabilities() -> NumaCapabilities {
1347    NumaCapabilities::detect()
1348}
1349
1350/// NUMA system capabilities
1351#[derive(Debug, Clone)]
1352pub struct NumaCapabilities {
1353    /// Whether NUMA is available on this system
1354    pub numa_available: bool,
1355    /// Number of NUMA nodes detected
1356    pub num_nodes: u32,
1357    /// Whether NUMA memory binding is supported
1358    pub memory_binding_supported: bool,
1359    /// Whether thread affinity is supported
1360    pub thread_affinity_supported: bool,
1361    /// Platform-specific details
1362    pub platform_details: String,
1363}
1364
1365impl NumaCapabilities {
1366    /// Detect NUMA capabilities of the current system
1367    pub fn detect() -> Self {
1368        #[cfg(target_os = "linux")]
1369        {
1370            Self::detect_linux()
1371        }
1372        #[cfg(target_os = "windows")]
1373        {
1374            Self::detect_windows()
1375        }
1376        #[cfg(not(any(target_os = "linux", target_os = "windows")))]
1377        {
1378            Self {
1379                numa_available: false,
1380                num_nodes: 1,
1381                memory_binding_supported: false,
1382                thread_affinity_supported: false,
1383                platform_details: "Unsupported platform".to_string(),
1384            }
1385        }
1386    }
1387
1388    #[cfg(target_os = "linux")]
1389    fn detect_linux() -> Self {
1390        let numa_available = std::path::Path::new("/sys/devices/system/node").exists();
1391        let num_nodes = if numa_available {
1392            DistancePool::get_numa_topology().nodes.len() as u32
1393        } else {
1394            1
1395        };
1396
1397        Self {
1398            numa_available,
1399            num_nodes,
1400            memory_binding_supported: numa_available,
1401            thread_affinity_supported: true, // Generally available on Linux
1402            platform_details: format!("Linux with {num_nodes} NUMA nodes"),
1403        }
1404    }
1405
1406    #[cfg(target_os = "windows")]
1407    fn detect_windows() -> Self {
1408        Self {
1409            numa_available: true, // Windows typically has NUMA support
1410            num_nodes: 1,         // Would be detected using Windows APIs
1411            memory_binding_supported: true,
1412            thread_affinity_supported: true,
1413            platform_details: "Windows NUMA support".to_string(),
1414        }
1415    }
1416
1417    /// Check if NUMA optimizations should be enabled
1418    pub fn should_enable_numa(&self) -> bool {
1419        self.numa_available && self.num_nodes > 1
1420    }
1421
1422    /// Get recommended memory allocation strategy
1423    pub fn recommended_memory_strategy(&self) -> &'static str {
1424        if self.should_enable_numa() {
1425            "NUMA-aware"
1426        } else {
1427            "Standard"
1428        }
1429    }
1430}
1431
1432#[cfg(test)]
1433mod tests {
1434    use super::*;
1435
1436    #[test]
1437    fn test_distance_pool() {
1438        let pool = DistancePool::new(10);
1439
1440        // Get a buffer
1441        let mut buffer1 = pool.get_distance_buffer(100);
1442        assert_eq!(buffer1.len(), 100);
1443
1444        // Use the buffer
1445        buffer1.as_mut_slice()[0] = 42.0;
1446        assert_eq!(buffer1.as_slice()[0], 42.0);
1447
1448        // Get another buffer while first is in use
1449        let buffer2 = pool.get_distance_buffer(50);
1450        assert_eq!(buffer2.len(), 50);
1451
1452        // Drop first buffer (should return to pool)
1453        drop(buffer1);
1454
1455        // Get buffer again (should reuse)
1456        let buffer3 = pool.get_distance_buffer(100);
1457        assert_eq!(buffer3.len(), 100);
1458        // Note: value should be zeroed when creating aligned buffer
1459    }
1460
1461    #[test]
1462    fn test_arena_allocator() {
1463        let arena = ClusteringArena::new();
1464
1465        // Allocate some temporary vectors
1466        let mut vec1 = arena.alloc_temp_vec::<f64>(100);
1467        let mut vec2 = arena.alloc_temp_vec::<usize>(50);
1468
1469        // Use the vectors
1470        vec1.as_mut_slice()[0] = std::f64::consts::PI;
1471        vec2.as_mut_slice()[0] = 42;
1472
1473        assert_eq!(vec1.as_slice()[0], std::f64::consts::PI);
1474        assert_eq!(vec2.as_slice()[0], 42);
1475
1476        // Reset arena
1477        arena.reset();
1478
1479        // Allocate again (should reuse memory)
1480        let mut vec3 = arena.alloc_temp_vec::<f64>(200);
1481        vec3.as_mut_slice()[0] = 2.71;
1482        assert_eq!(vec3.as_slice()[0], 2.71);
1483    }
1484
1485    #[test]
1486    fn test_pool_statistics() {
1487        let pool = DistancePool::new(2);
1488
1489        // Initial stats should be zero
1490        let stats = pool.statistics();
1491        assert_eq!(stats.total_requests(), 0);
1492        assert_eq!(stats.total_allocations(), 0);
1493
1494        // First request should be a miss
1495        let _buffer1 = pool.get_distance_buffer(100);
1496        let stats = pool.statistics();
1497        assert_eq!(stats.total_requests(), 1);
1498        assert_eq!(stats.total_allocations(), 1);
1499        assert!(stats.hit_rate() < 1.0);
1500
1501        // Drop and get again should be a hit
1502        drop(_buffer1);
1503        let _buffer2 = pool.get_distance_buffer(100);
1504        let stats = pool.statistics();
1505        assert_eq!(stats.total_requests(), 2);
1506        assert_eq!(stats.total_allocations(), 1); // No new allocation
1507        assert!(stats.hit_rate() > 0.0);
1508    }
1509
1510    #[test]
1511    fn test_matrix_buffer() {
1512        let pool = DistancePool::new(5);
1513
1514        let mut matrix = pool.get_matrix_buffer(10, 10);
1515        assert_eq!(matrix.dim(), (10, 10));
1516
1517        matrix.fill(42.0);
1518        // Matrix should be filled with 42.0 (can't easily test without exposing internals)
1519
1520        drop(matrix);
1521
1522        // Get another matrix (should potentially reuse)
1523        let mut matrix2 = pool.get_matrix_buffer(8, 8);
1524        assert_eq!(matrix2.dim(), (8, 8));
1525    }
1526
1527    #[test]
1528    fn test_global_pools() {
1529        // Test that global pools can be accessed
1530        let pool = global_distance_pool();
1531        let arena = global_clustering_arena();
1532
1533        let buffer = pool.get_distance_buffer(10);
1534        let _vec = arena.alloc_temp_vec::<f64>(10);
1535
1536        // Should not panic
1537    }
1538}