1use scirs2_core::ndarray::{Array2, ArrayViewMut1, ArrayViewMut2};
33use std::alloc::{GlobalAlloc, Layout, System};
34use std::collections::VecDeque;
35use std::ptr::NonNull;
36use std::sync::Mutex;
37
38#[cfg(any(target_os = "linux", target_os = "android"))]
40use libc;
41#[cfg(target_os = "linux")]
42use std::fs;
43
44use std::sync::atomic::Ordering;
46
47#[cfg(test)]
50use num_cpus;
51
52#[cfg(not(test))]
54mod num_cpus {
55 pub fn get() -> usize {
56 std::thread::available_parallelism()
57 .map(|n| n.get())
58 .unwrap_or(4)
59 }
60}
61
62#[derive(Debug, Clone)]
64pub struct MemoryPoolConfig {
65 pub max_pool_size: usize,
67 pub cache_line_size: usize,
69 pub numa_aware: bool,
71 pub prefetch_distance: usize,
73 pub arena_block_size: usize,
75 pub numa_node_hint: i32,
77 pub auto_numa_discovery: bool,
79 pub enable_thread_affinity: bool,
81 pub enable_memory_warming: bool,
83 pub large_object_threshold: usize,
85 pub max_memory_usage: usize,
87}
88
89impl Default for MemoryPoolConfig {
90 fn default() -> Self {
91 Self {
92 max_pool_size: 1000,
93 cache_line_size: 64,
94 numa_aware: true,
95 prefetch_distance: 8,
96 arena_block_size: 1024 * 1024, numa_node_hint: -1, auto_numa_discovery: true,
99 enable_thread_affinity: true,
100 enable_memory_warming: true,
101 large_object_threshold: 64 * 1024, max_memory_usage: 1024 * 1024 * 1024, }
104 }
105}
106
107pub struct DistancePool {
109 config: MemoryPoolConfig,
110 distance_buffers: Mutex<VecDeque<Box<[f64]>>>,
111 index_buffers: Mutex<VecDeque<Box<[usize]>>>,
112 matrix_buffers: Mutex<VecDeque<Array2<f64>>>,
113 large_buffers: Mutex<VecDeque<Box<[f64]>>>, stats: PoolStatistics,
115 memory_usage: std::sync::atomic::AtomicUsize, numa_node: std::sync::atomic::AtomicI32, }
118
119impl DistancePool {
120 pub fn new(capacity: usize) -> Self {
122 Self::with_config(capacity, MemoryPoolConfig::default())
123 }
124
125 pub fn with_config(capacity: usize, config: MemoryPoolConfig) -> Self {
127 let numa_node = if config.numa_aware && config.numa_node_hint >= 0 {
128 config.numa_node_hint
129 } else {
130 Self::detect_numa_node()
131 };
132
133 Self {
134 config,
135 distance_buffers: Mutex::new(VecDeque::with_capacity(capacity)),
136 index_buffers: Mutex::new(VecDeque::with_capacity(capacity)),
137 matrix_buffers: Mutex::new(VecDeque::with_capacity(capacity / 4)), large_buffers: Mutex::new(VecDeque::with_capacity(capacity / 10)), stats: PoolStatistics::new(),
140 memory_usage: std::sync::atomic::AtomicUsize::new(0),
141 numa_node: std::sync::atomic::AtomicI32::new(numa_node),
142 }
143 }
144
145 pub fn get_distance_buffer(&self, size: usize) -> DistanceBuffer {
147 let buffer_size_bytes = size * std::mem::size_of::<f64>();
149 let is_large = buffer_size_bytes > self.config.large_object_threshold;
150
151 let current_usage = self.memory_usage.load(std::sync::atomic::Ordering::Relaxed);
153 if current_usage + buffer_size_bytes > self.config.max_memory_usage {
154 self.cleanup_excess_memory();
155 }
156
157 let buffer = if is_large {
158 self.get_large_buffer(size)
159 } else {
160 let mut buffers = self.distance_buffers.lock().unwrap();
161
162 for i in 0..buffers.len() {
164 if buffers[i].len() >= size && buffers[i].len() <= size * 2 {
165 let buffer = buffers.remove(i).unwrap();
166 self.stats.record_hit();
167 return DistanceBuffer::new(buffer, self);
168 }
169 }
170
171 self.stats.record_miss();
173 self.create_aligned_buffer(size)
174 };
175
176 self.memory_usage
178 .fetch_add(buffer_size_bytes, std::sync::atomic::Ordering::Relaxed);
179
180 DistanceBuffer::new(buffer, self)
181 }
182
183 fn get_large_buffer(&self, size: usize) -> Box<[f64]> {
185 let mut buffers = self.large_buffers.lock().unwrap();
186
187 for i in 0..buffers.len() {
189 if buffers[i].len() == size {
190 let buffer = buffers.remove(i).unwrap();
191 self.stats.record_hit();
192 return buffer;
193 }
194 }
195
196 self.stats.record_miss();
198 if self.config.numa_aware {
199 self.create_numa_aligned_buffer(size)
200 } else {
201 self.create_aligned_buffer(size)
202 }
203 }
204
205 pub fn get_index_buffer(&self, size: usize) -> IndexBuffer {
207 let mut buffers = self.index_buffers.lock().unwrap();
208
209 for i in 0..buffers.len() {
211 if buffers[i].len() >= size && buffers[i].len() <= size * 2 {
212 let buffer = buffers.remove(i).unwrap();
213 self.stats.record_hit();
214 return IndexBuffer::new(buffer, self);
215 }
216 }
217
218 self.stats.record_miss();
220 let new_buffer = vec![0usize; size].into_boxed_slice();
221 IndexBuffer::new(new_buffer, self)
222 }
223
224 pub fn get_matrix_buffer(&self, rows: usize, cols: usize) -> MatrixBuffer {
226 let mut buffers = self.matrix_buffers.lock().unwrap();
227
228 for i in 0..buffers.len() {
230 let (r, c) = buffers[i].dim();
231 if r >= rows && c >= cols && r <= rows * 2 && c <= cols * 2 {
232 let mut matrix = buffers.remove(i).unwrap();
233 matrix = matrix.slice_mut(s![..rows, ..cols]).to_owned();
235 self.stats.record_hit();
236 return MatrixBuffer::new(matrix, self);
237 }
238 }
239
240 self.stats.record_miss();
242 let matrix = Array2::zeros((rows, cols));
243 MatrixBuffer::new(matrix, self)
244 }
245
246 fn create_aligned_buffer(&self, size: usize) -> Box<[f64]> {
248 let layout = Layout::from_size_align(
249 size * std::mem::size_of::<f64>(),
250 self.config.cache_line_size,
251 )
252 .unwrap();
253
254 unsafe {
255 let ptr = System.alloc(layout) as *mut f64;
256 if ptr.is_null() {
257 panic!("Failed to allocate aligned memory");
258 }
259
260 if self.config.enable_memory_warming {
262 std::ptr::write_bytes(ptr, 0, size);
263 }
264
265 Box::from_raw(std::slice::from_raw_parts_mut(ptr, size))
267 }
268 }
269
270 fn create_numa_aligned_buffer(&self, size: usize) -> Box<[f64]> {
272 let numa_node = self.numa_node.load(Ordering::Relaxed);
273
274 #[cfg(target_os = "linux")]
275 {
276 if self.config.numa_aware && numa_node >= 0 {
277 match Self::allocate_on_numa_node_linux(size, numa_node as u32) {
278 Ok(buffer) => {
279 if self.config.enable_memory_warming {
280 Self::warm_memory(&buffer);
281 }
282 return buffer;
283 }
284 Err(_) => {
285 }
287 }
288 }
289 }
290
291 #[cfg(target_os = "windows")]
292 {
293 if self.config.numa_aware && numa_node >= 0 {
294 match Self::allocate_on_numa_node_windows(size, numa_node as u32) {
295 Ok(buffer) => {
296 if self.config.enable_memory_warming {
297 Self::warm_memory(&buffer);
298 }
299 return buffer;
300 }
301 Err(_) => {
302 }
304 }
305 }
306 }
307
308 let buffer = self.create_aligned_buffer(size);
310
311 if self.config.enable_memory_warming {
313 Self::warm_memory(&buffer);
314 }
315
316 buffer
317 }
318
319 #[cfg(target_os = "linux")]
321 fn allocate_on_numa_node_linux(
322 size: usize,
323 node: u32,
324 ) -> Result<Box<[f64]>, Box<dyn std::error::Error>> {
325 let total_size = size * std::mem::size_of::<f64>();
326 let layout = Layout::from_size_align(total_size, 64)?;
327
328 unsafe {
329 let ptr = System.alloc(layout) as *mut f64;
331 if ptr.is_null() {
332 return Err("Failed to allocate memory".into());
333 }
334
335 std::ptr::write_bytes(ptr, 0, size);
337
338 Ok(Box::from_raw(std::slice::from_raw_parts_mut(ptr, size)))
339 }
340 }
341
342 #[cfg(target_os = "windows")]
344 fn allocate_on_numa_node_windows(
345 size: usize,
346 node: u32,
347 ) -> Result<Box<[f64]>, Box<dyn std::error::Error>> {
348 Err("Windows NUMA allocation not implemented".into())
351 }
352
353 pub fn bind_thread_to_numa_node(node: u32) -> Result<(), Box<dyn std::error::Error>> {
355 #[cfg(target_os = "linux")]
356 {
357 Self::bind_thread_to_numa_node_linux(node)
358 }
359 #[cfg(target_os = "windows")]
360 {
361 Self::bind_thread_to_numa_node_windows(node)
362 }
363 #[cfg(not(any(target_os = "linux", target_os = "windows")))]
364 {
365 Ok(()) }
367 }
368
369 #[cfg(target_os = "linux")]
370 fn bind_thread_to_numa_node_linux(node: u32) -> Result<(), Box<dyn std::error::Error>> {
371 if let Some(_cpu_count) = Self::get_node_cpu_count(node) {
376 let mut cpu_set: libc::cpu_set_t = unsafe { std::mem::zeroed() };
377
378 let cpulist_path = format!("/sys/devices/system/node/node{}/cpulist", node);
380 if let Ok(cpulist) = fs::read_to_string(&cpulist_path) {
381 for range in cpulist.trim().split(',') {
382 if let Some((start, end)) = range.split_once('-') {
383 if let (Ok(s), Ok(e)) = (start.parse::<u32>(), end.parse::<u32>()) {
384 for cpu in s..=e {
385 unsafe { libc::CPU_SET(cpu as usize, &mut cpu_set) };
386 }
387 }
388 } else if let Ok(cpu) = range.parse::<u32>() {
389 unsafe { libc::CPU_SET(cpu as usize, &mut cpu_set) };
390 }
391 }
392
393 unsafe {
395 libc::sched_setaffinity(
396 0, std::mem::size_of::<libc::cpu_set_t>(),
398 &cpu_set,
399 );
400 }
401 }
402 }
403
404 Ok(())
405 }
406
407 #[cfg(target_os = "windows")]
408 fn bind_thread_to_numa_node_windows(node: u32) -> Result<(), Box<dyn std::error::Error>> {
409 Ok(())
411 }
412
413 fn warm_memory(buffer: &[f64]) {
415 if buffer.is_empty() {
416 return;
417 }
418
419 let page_size = 4096; let elements_per_page = page_size / std::mem::size_of::<f64>();
422
423 for i in (0..buffer.len()).step_by(elements_per_page) {
424 unsafe {
426 std::ptr::read_volatile(&buffer[i]);
427 }
428 }
429 }
430
431 fn detect_numa_node() -> i32 {
433 #[cfg(target_os = "linux")]
434 {
435 Self::detect_numa_node_linux().unwrap_or(0)
436 }
437 #[cfg(target_os = "windows")]
438 {
439 Self::detect_numa_node_windows().unwrap_or(0)
440 }
441 #[cfg(not(any(target_os = "linux", target_os = "windows")))]
442 {
443 0 }
445 }
446
447 #[cfg(target_os = "linux")]
449 fn detect_numa_node_linux() -> Option<i32> {
450 let _tid = unsafe { libc::gettid() };
452
453 match Self::get_current_numa_node_linux() {
455 Ok(node) => Some(node),
456 Err(_) => {
457 Self::detect_numa_from_cpu_linux()
459 }
460 }
461 }
462
463 #[cfg(target_os = "linux")]
464 fn get_current_numa_node_linux() -> Result<i32, Box<dyn std::error::Error>> {
465 let mut cpu: u32 = 0;
467 let mut node: u32 = 0;
468
469 let result = unsafe {
470 libc::syscall(
471 libc::SYS_getcpu,
472 &mut cpu as *mut u32,
473 &mut node as *mut u32,
474 std::ptr::null_mut::<libc::c_void>(),
475 )
476 };
477
478 if result == 0 {
479 Ok(node as i32)
480 } else {
481 Err("getcpu syscall failed".into())
482 }
483 }
484
485 #[cfg(target_os = "linux")]
486 fn detect_numa_from_cpu_linux() -> Option<i32> {
487 if let Ok(entries) = fs::read_dir("/sys/devices/system/node") {
489 for entry in entries.flatten() {
490 let name = entry.file_name();
491 if let Some(name_str) = name.to_str() {
492 if let Some(stripped) = name_str.strip_prefix("node") {
493 if let Ok(node_num) = stripped.parse::<i32>() {
494 return Some(node_num);
496 }
497 }
498 }
499 }
500 }
501 None
502 }
503
504 #[cfg(target_os = "windows")]
506 fn detect_numa_node_windows() -> Option<i32> {
507 Some(0)
511 }
512
513 pub fn get_numa_topology() -> NumaTopology {
515 #[cfg(target_os = "linux")]
516 {
517 Self::get_numa_topology_linux()
518 }
519 #[cfg(target_os = "windows")]
520 {
521 Self::get_numa_topology_windows()
522 }
523 #[cfg(not(any(target_os = "linux", target_os = "windows")))]
524 {
525 NumaTopology::default()
526 }
527 }
528
529 #[cfg(target_os = "linux")]
530 fn get_numa_topology_linux() -> NumaTopology {
531 let mut topology = NumaTopology::default();
532
533 if let Ok(entries) = fs::read_dir("/sys/devices/system/node") {
535 for entry in entries.flatten() {
536 let name = entry.file_name();
537 if let Some(name_str) = name.to_str() {
538 if let Some(stripped) = name_str.strip_prefix("node") {
539 if let Ok(_nodeid) = stripped.parse::<u32>() {
540 let meminfo_path =
542 format!("/sys/devices/system/node/{name_str}/meminfo");
543 if let Ok(meminfo) = fs::read_to_string(&meminfo_path) {
544 if let Some(total_kb) = Self::parse_meminfo_total(&meminfo) {
545 topology.nodes.push(NumaNode {
546 id: _nodeid,
547 total_memory_bytes: total_kb * 1024,
548 available_memory_bytes: total_kb * 1024, cpu_count: Self::get_node_cpu_count(_nodeid).unwrap_or(1),
550 });
551 }
552 }
553 }
554 }
555 }
556 }
557 }
558
559 if topology.nodes.is_empty() {
561 topology.nodes.push(NumaNode {
562 id: 0,
563 total_memory_bytes: Self::get_total_system_memory()
564 .unwrap_or(8 * 1024 * 1024 * 1024), available_memory_bytes: Self::get_available_system_memory()
566 .unwrap_or(4 * 1024 * 1024 * 1024), cpu_count: num_cpus::get() as u32,
568 });
569 }
570
571 topology
572 }
573
574 #[cfg(target_os = "linux")]
575 fn parse_meminfo_total(meminfo: &str) -> Option<u64> {
576 for line in meminfo.lines() {
577 if line.starts_with("Node") && line.contains("MemTotal:") {
578 let parts: Vec<&str> = line.split_whitespace().collect();
579 if parts.len() >= 3 {
580 return parts[2].parse().ok();
581 }
582 }
583 }
584 None
585 }
586
587 #[cfg(target_os = "linux")]
588 fn get_node_cpu_count(_nodeid: u32) -> Option<u32> {
589 let cpulist_path = format!("/sys/devices/system/node/node{}/cpulist", _nodeid);
590 if let Ok(cpulist) = fs::read_to_string(&cpulist_path) {
591 let mut count = 0;
593 for range in cpulist.trim().split(',') {
594 if let Some((start, end)) = range.split_once('-') {
595 if let (Ok(s), Ok(e)) = (start.parse::<u32>(), end.parse::<u32>()) {
596 count += e - s + 1;
597 }
598 } else if range.parse::<u32>().is_ok() {
599 count += 1;
600 }
601 }
602 Some(count)
603 } else {
604 None
605 }
606 }
607
608 #[cfg(target_os = "linux")]
609 fn get_total_system_memory() -> Option<u64> {
610 if let Ok(meminfo) = fs::read_to_string("/proc/meminfo") {
611 for line in meminfo.lines() {
612 if line.starts_with("MemTotal:") {
613 let parts: Vec<&str> = line.split_whitespace().collect();
614 if parts.len() >= 2 {
615 return parts[1].parse::<u64>().ok().map(|kb| kb * 1024);
616 }
617 }
618 }
619 }
620 None
621 }
622
623 #[cfg(target_os = "linux")]
624 fn get_available_system_memory() -> Option<u64> {
625 if let Ok(meminfo) = fs::read_to_string("/proc/meminfo") {
626 for line in meminfo.lines() {
627 if line.starts_with("MemAvailable:") {
628 let parts: Vec<&str> = line.split_whitespace().collect();
629 if parts.len() >= 2 {
630 return parts[1].parse::<u64>().ok().map(|kb| kb * 1024);
631 }
632 }
633 }
634 }
635 None
636 }
637
638 #[cfg(target_os = "windows")]
639 fn get_numa_topology_windows() -> NumaTopology {
640 NumaTopology::default()
643 }
644
645 fn cleanup_excess_memory(&self) {
647 let cleanup_ratio = 0.25; {
651 let mut buffers = self.distance_buffers.lock().unwrap();
652 let cleanup_count = (buffers.len() as f64 * cleanup_ratio) as usize;
653 for _ in 0..cleanup_count {
654 if let Some(buffer) = buffers.pop_back() {
655 let freed_bytes = buffer.len() * std::mem::size_of::<f64>();
656 self.memory_usage
657 .fetch_sub(freed_bytes, std::sync::atomic::Ordering::Relaxed);
658 }
659 }
660 }
661
662 {
663 let mut buffers = self.large_buffers.lock().unwrap();
664 let cleanup_count = (buffers.len() as f64 * cleanup_ratio) as usize;
665 for _ in 0..cleanup_count {
666 if let Some(buffer) = buffers.pop_back() {
667 let freed_bytes = buffer.len() * std::mem::size_of::<f64>();
668 self.memory_usage
669 .fetch_sub(freed_bytes, std::sync::atomic::Ordering::Relaxed);
670 }
671 }
672 }
673 }
674
675 fn return_distance_buffer(&self, buffer: Box<[f64]>) {
677 let buffer_size_bytes = buffer.len() * std::mem::size_of::<f64>();
678 let is_large = buffer_size_bytes > self.config.large_object_threshold;
679
680 self.memory_usage
682 .fetch_sub(buffer_size_bytes, std::sync::atomic::Ordering::Relaxed);
683
684 if is_large {
685 let mut buffers = self.large_buffers.lock().unwrap();
686 if buffers.len() < self.config.max_pool_size / 10 {
687 buffers.push_back(buffer);
688 }
689 } else {
691 let mut buffers = self.distance_buffers.lock().unwrap();
692 if buffers.len() < self.config.max_pool_size {
693 buffers.push_back(buffer);
694 }
695 }
697 }
698
699 fn return_index_buffer(&self, buffer: Box<[usize]>) {
701 let mut buffers = self.index_buffers.lock().unwrap();
702 if buffers.len() < self.config.max_pool_size {
703 buffers.push_back(buffer);
704 }
705 }
706
707 fn return_matrix_buffer(&self, matrix: Array2<f64>) {
709 let mut buffers = self.matrix_buffers.lock().unwrap();
710 if buffers.len() < self.config.max_pool_size / 4 {
711 buffers.push_back(matrix);
713 }
714 }
715
716 pub fn statistics(&self) -> PoolStatistics {
718 self.stats.clone()
719 }
720
721 pub fn memory_usage(&self) -> usize {
723 self.memory_usage.load(std::sync::atomic::Ordering::Relaxed)
724 }
725
726 pub fn current_numa_node(&self) -> i32 {
728 self.numa_node.load(std::sync::atomic::Ordering::Relaxed)
729 }
730
731 pub fn pool_info(&self) -> PoolInfo {
733 let distance_count = self.distance_buffers.lock().unwrap().len();
734 let index_count = self.index_buffers.lock().unwrap().len();
735 let matrix_count = self.matrix_buffers.lock().unwrap().len();
736 let large_count = self.large_buffers.lock().unwrap().len();
737
738 PoolInfo {
739 distance_buffer_count: distance_count,
740 index_buffer_count: index_count,
741 matrix_buffer_count: matrix_count,
742 large_buffer_count: large_count,
743 total_memory_usage: self.memory_usage(),
744 numa_node: self.current_numa_node(),
745 hit_rate: self.stats.hit_rate(),
746 }
747 }
748
749 pub fn clear(&self) {
751 self.distance_buffers.lock().unwrap().clear();
752 self.index_buffers.lock().unwrap().clear();
753 self.matrix_buffers.lock().unwrap().clear();
754 self.large_buffers.lock().unwrap().clear();
755 self.memory_usage
756 .store(0, std::sync::atomic::Ordering::Relaxed);
757 self.stats.reset();
758 }
759}
760
761use scirs2_core::ndarray::s;
763
764pub struct DistanceBuffer<'a> {
766 buffer: Option<Box<[f64]>>,
767 pool: &'a DistancePool,
768}
769
770impl<'a> DistanceBuffer<'a> {
771 fn new(buffer: Box<[f64]>, pool: &'a DistancePool) -> Self {
772 Self {
773 buffer: Some(buffer),
774 pool,
775 }
776 }
777
778 pub fn as_mut_slice(&mut self) -> &mut [f64] {
780 self.buffer.as_mut().unwrap().as_mut()
781 }
782
783 pub fn as_slice(&self) -> &[f64] {
785 self.buffer.as_ref().unwrap().as_ref()
786 }
787
788 pub fn len(&self) -> usize {
790 self.buffer.as_ref().unwrap().len()
791 }
792
793 pub fn is_empty(&self) -> bool {
795 self.len() == 0
796 }
797
798 pub fn as_array_mut(&mut self) -> ArrayViewMut1<f64> {
800 ArrayViewMut1::from(self.as_mut_slice())
801 }
802}
803
804impl Drop for DistanceBuffer<'_> {
805 fn drop(&mut self) {
806 if let Some(buffer) = self.buffer.take() {
807 self.pool.return_distance_buffer(buffer);
808 }
809 }
810}
811
812pub struct IndexBuffer<'a> {
814 buffer: Option<Box<[usize]>>,
815 pool: &'a DistancePool,
816}
817
818impl<'a> IndexBuffer<'a> {
819 fn new(buffer: Box<[usize]>, pool: &'a DistancePool) -> Self {
820 Self {
821 buffer: Some(buffer),
822 pool,
823 }
824 }
825
826 pub fn as_mut_slice(&mut self) -> &mut [usize] {
828 self.buffer.as_mut().unwrap().as_mut()
829 }
830
831 pub fn as_slice(&self) -> &[usize] {
833 self.buffer.as_ref().unwrap().as_ref()
834 }
835
836 pub fn len(&self) -> usize {
838 self.buffer.as_ref().unwrap().len()
839 }
840
841 pub fn is_empty(&self) -> bool {
843 self.len() == 0
844 }
845}
846
847impl Drop for IndexBuffer<'_> {
848 fn drop(&mut self) {
849 if let Some(buffer) = self.buffer.take() {
850 self.pool.return_index_buffer(buffer);
851 }
852 }
853}
854
855pub struct MatrixBuffer<'a> {
857 matrix: Option<Array2<f64>>,
858 pool: &'a DistancePool,
859}
860
861impl<'a> MatrixBuffer<'a> {
862 fn new(matrix: Array2<f64>, pool: &'a DistancePool) -> Self {
863 Self {
864 matrix: Some(matrix),
865 pool,
866 }
867 }
868
869 pub fn as_mut(&mut self) -> ArrayViewMut2<f64> {
871 self.matrix.as_mut().unwrap().view_mut()
872 }
873
874 pub fn dim(&mut self) -> (usize, usize) {
876 self.matrix.as_ref().unwrap().dim()
877 }
878
879 pub fn fill(&mut self, value: f64) {
881 self.matrix.as_mut().unwrap().fill(value);
882 }
883}
884
885impl Drop for MatrixBuffer<'_> {
886 fn drop(&mut self) {
887 if let Some(matrix) = self.matrix.take() {
888 self.pool.return_matrix_buffer(matrix);
889 }
890 }
891}
892
893pub struct ClusteringArena {
895 config: MemoryPoolConfig,
896 current_block: Mutex<Option<ArenaBlock>>,
897 full_blocks: Mutex<Vec<ArenaBlock>>,
898 stats: ArenaStatistics,
899}
900
901impl ClusteringArena {
902 pub fn new() -> Self {
904 Self::with_config(MemoryPoolConfig::default())
905 }
906
907 pub fn with_config(config: MemoryPoolConfig) -> Self {
909 Self {
910 config,
911 current_block: Mutex::new(None),
912 full_blocks: Mutex::new(Vec::new()),
913 stats: ArenaStatistics::new(),
914 }
915 }
916
917 pub fn alloc_temp_vec<T: Default + Clone>(&self, size: usize) -> ArenaVec<T> {
919 let layout = Layout::array::<T>(size).unwrap();
920 let ptr = self.allocate_raw(layout);
921
922 unsafe {
923 for i in 0..size {
925 std::ptr::write(ptr.as_ptr().add(i) as *mut T, T::default());
926 }
927
928 ArenaVec::new(ptr.as_ptr() as *mut T, size)
929 }
930 }
931
932 fn allocate_raw(&self, layout: Layout) -> NonNull<u8> {
934 let mut current = self.current_block.lock().unwrap();
935
936 if current.is_none() || !current.as_ref().unwrap().can_allocate(layout) {
937 if let Some(old_block) = current.take() {
939 self.full_blocks.lock().unwrap().push(old_block);
940 }
941 *current = Some(ArenaBlock::new(self.config.arena_block_size));
942 }
943
944 current.as_mut().unwrap().allocate(layout)
945 }
946
947 pub fn reset(&self) {
949 let mut current = self.current_block.lock().unwrap();
950 let mut full_blocks = self.full_blocks.lock().unwrap();
951
952 if let Some(block) = current.take() {
953 full_blocks.push(block);
954 }
955
956 for block in full_blocks.iter_mut() {
958 block.reset();
959 }
960
961 if let Some(block) = full_blocks.pop() {
963 *current = Some(block);
964 }
965
966 self.stats.reset();
967 }
968
969 pub fn statistics(&self) -> ArenaStatistics {
971 self.stats.clone()
972 }
973}
974
975impl Default for ClusteringArena {
976 fn default() -> Self {
977 Self::new()
978 }
979}
980
981struct ArenaBlock {
983 memory: NonNull<u8>,
984 size: usize,
985 offset: usize,
986}
987
988unsafe impl Send for ArenaBlock {}
990unsafe impl Sync for ArenaBlock {}
991
992impl ArenaBlock {
993 fn new(size: usize) -> Self {
994 let layout = Layout::from_size_align(size, 64).unwrap(); let memory =
996 unsafe { NonNull::new(System.alloc(layout)).expect("Failed to allocate arena block") };
997
998 Self {
999 memory,
1000 size,
1001 offset: 0,
1002 }
1003 }
1004
1005 fn can_allocate(&self, layout: Layout) -> bool {
1006 let aligned_offset = (self.offset + layout.align() - 1) & !(layout.align() - 1);
1007 aligned_offset + layout.size() <= self.size
1008 }
1009
1010 fn allocate(&mut self, layout: Layout) -> NonNull<u8> {
1011 assert!(self.can_allocate(layout));
1012
1013 self.offset = (self.offset + layout.align() - 1) & !(layout.align() - 1);
1015
1016 let ptr = unsafe { NonNull::new_unchecked(self.memory.as_ptr().add(self.offset)) };
1017 self.offset += layout.size();
1018
1019 ptr
1020 }
1021
1022 fn reset(&mut self) {
1023 self.offset = 0;
1024 }
1025}
1026
1027impl Drop for ArenaBlock {
1028 fn drop(&mut self) {
1029 let layout = Layout::from_size_align(self.size, 64).unwrap();
1030 unsafe {
1031 System.dealloc(self.memory.as_ptr(), layout);
1032 }
1033 }
1034}
1035
1036pub struct ArenaVec<T> {
1038 ptr: *mut T,
1039 len: usize,
1040 phantom: std::marker::PhantomData<T>,
1041}
1042
1043impl<T> ArenaVec<T> {
1044 fn new(ptr: *mut T, len: usize) -> Self {
1045 Self {
1046 ptr,
1047 len,
1048 phantom: std::marker::PhantomData,
1049 }
1050 }
1051
1052 pub fn as_mut_slice(&mut self) -> &mut [T] {
1054 unsafe { std::slice::from_raw_parts_mut(self.ptr, self.len) }
1055 }
1056
1057 pub fn as_slice(&mut self) -> &[T] {
1059 unsafe { std::slice::from_raw_parts(self.ptr, self.len) }
1060 }
1061
1062 pub fn len(&mut self) -> usize {
1064 self.len
1065 }
1066
1067 pub fn is_empty(&self) -> bool {
1069 self.len == 0
1070 }
1071}
1072
1073#[derive(Debug, Clone)]
1077pub struct PoolInfo {
1078 pub distance_buffer_count: usize,
1080 pub index_buffer_count: usize,
1082 pub matrix_buffer_count: usize,
1084 pub large_buffer_count: usize,
1086 pub total_memory_usage: usize,
1088 pub numa_node: i32,
1090 pub hit_rate: f64,
1092}
1093
1094#[derive(Debug)]
1096pub struct PoolStatistics {
1097 hits: std::sync::atomic::AtomicUsize,
1098 misses: std::sync::atomic::AtomicUsize,
1099 total_allocations: std::sync::atomic::AtomicUsize,
1100}
1101
1102impl PoolStatistics {
1103 fn new() -> Self {
1104 Self {
1105 hits: std::sync::atomic::AtomicUsize::new(0),
1106 misses: std::sync::atomic::AtomicUsize::new(0),
1107 total_allocations: std::sync::atomic::AtomicUsize::new(0),
1108 }
1109 }
1110
1111 fn record_hit(&self) {
1112 self.hits.fetch_add(1, std::sync::atomic::Ordering::Relaxed);
1113 }
1114
1115 fn record_miss(&self) {
1116 self.misses
1117 .fetch_add(1, std::sync::atomic::Ordering::Relaxed);
1118 self.total_allocations
1119 .fetch_add(1, std::sync::atomic::Ordering::Relaxed);
1120 }
1121
1122 fn reset(&self) {
1123 self.hits.store(0, std::sync::atomic::Ordering::Relaxed);
1124 self.misses.store(0, std::sync::atomic::Ordering::Relaxed);
1125 self.total_allocations
1126 .store(0, std::sync::atomic::Ordering::Relaxed);
1127 }
1128
1129 pub fn hit_rate(&self) -> f64 {
1131 let hits = self.hits.load(std::sync::atomic::Ordering::Relaxed);
1132 let total = hits + self.misses.load(std::sync::atomic::Ordering::Relaxed);
1133 if total == 0 {
1134 0.0
1135 } else {
1136 hits as f64 / total as f64 * 100.0
1137 }
1138 }
1139
1140 pub fn total_requests(&self) -> usize {
1142 self.hits.load(std::sync::atomic::Ordering::Relaxed)
1143 + self.misses.load(std::sync::atomic::Ordering::Relaxed)
1144 }
1145
1146 pub fn total_allocations(&self) -> usize {
1148 self.total_allocations
1149 .load(std::sync::atomic::Ordering::Relaxed)
1150 }
1151}
1152
1153impl Clone for PoolStatistics {
1154 fn clone(&self) -> Self {
1155 Self {
1156 hits: std::sync::atomic::AtomicUsize::new(
1157 self.hits.load(std::sync::atomic::Ordering::Relaxed),
1158 ),
1159 misses: std::sync::atomic::AtomicUsize::new(
1160 self.misses.load(std::sync::atomic::Ordering::Relaxed),
1161 ),
1162 total_allocations: std::sync::atomic::AtomicUsize::new(
1163 self.total_allocations
1164 .load(std::sync::atomic::Ordering::Relaxed),
1165 ),
1166 }
1167 }
1168}
1169
1170#[derive(Debug)]
1172pub struct ArenaStatistics {
1173 blocks_allocated: std::sync::atomic::AtomicUsize,
1174 total_memory: std::sync::atomic::AtomicUsize,
1175 active_objects: std::sync::atomic::AtomicUsize,
1176}
1177
1178impl ArenaStatistics {
1179 fn new() -> Self {
1180 Self {
1181 blocks_allocated: std::sync::atomic::AtomicUsize::new(0),
1182 total_memory: std::sync::atomic::AtomicUsize::new(0),
1183 active_objects: std::sync::atomic::AtomicUsize::new(0),
1184 }
1185 }
1186
1187 fn reset(&self) {
1188 self.blocks_allocated
1189 .store(0, std::sync::atomic::Ordering::Relaxed);
1190 self.total_memory
1191 .store(0, std::sync::atomic::Ordering::Relaxed);
1192 self.active_objects
1193 .store(0, std::sync::atomic::Ordering::Relaxed);
1194 }
1195
1196 pub fn blocks_allocated(&self) -> usize {
1198 self.blocks_allocated
1199 .load(std::sync::atomic::Ordering::Relaxed)
1200 }
1201
1202 pub fn total_memory(&self) -> usize {
1204 self.total_memory.load(std::sync::atomic::Ordering::Relaxed)
1205 }
1206
1207 pub fn active_objects(&self) -> usize {
1209 self.active_objects
1210 .load(std::sync::atomic::Ordering::Relaxed)
1211 }
1212}
1213
1214impl Clone for ArenaStatistics {
1215 fn clone(&self) -> Self {
1216 Self {
1217 blocks_allocated: std::sync::atomic::AtomicUsize::new(
1218 self.blocks_allocated
1219 .load(std::sync::atomic::Ordering::Relaxed),
1220 ),
1221 total_memory: std::sync::atomic::AtomicUsize::new(
1222 self.total_memory.load(std::sync::atomic::Ordering::Relaxed),
1223 ),
1224 active_objects: std::sync::atomic::AtomicUsize::new(
1225 self.active_objects
1226 .load(std::sync::atomic::Ordering::Relaxed),
1227 ),
1228 }
1229 }
1230}
1231
1232#[derive(Debug, Clone)]
1234pub struct NumaTopology {
1235 pub nodes: Vec<NumaNode>,
1237}
1238
1239#[derive(Debug, Clone)]
1241pub struct NumaNode {
1242 pub id: u32,
1244 pub total_memory_bytes: u64,
1246 pub available_memory_bytes: u64,
1248 pub cpu_count: u32,
1250}
1251
1252impl Default for NumaTopology {
1253 fn default() -> Self {
1254 Self {
1255 nodes: vec![NumaNode {
1256 id: 0,
1257 total_memory_bytes: 8 * 1024 * 1024 * 1024, available_memory_bytes: 4 * 1024 * 1024 * 1024, cpu_count: 4, }],
1261 }
1262 }
1263}
1264
1265impl NumaTopology {
1266 pub fn get_optimal_node(&self) -> u32 {
1268 if !self.nodes.is_empty() {
1271 self.nodes[0].id
1272 } else {
1273 0
1274 }
1275 }
1276
1277 pub fn get_node_with_most_memory(&self) -> Option<u32> {
1279 self.nodes
1280 .iter()
1281 .max_by_key(|node| node.available_memory_bytes)
1282 .map(|node| node.id)
1283 }
1284
1285 pub fn total_system_memory(&self) -> u64 {
1287 self.nodes.iter().map(|node| node.total_memory_bytes).sum()
1288 }
1289
1290 pub fn total_available_memory(&self) -> u64 {
1292 self.nodes
1293 .iter()
1294 .map(|node| node.available_memory_bytes)
1295 .sum()
1296 }
1297
1298 pub fn has_node(&self, _nodeid: u32) -> bool {
1300 self.nodes.iter().any(|node| node.id == _nodeid)
1301 }
1302
1303 pub fn get_node_info(&self, _nodeid: u32) -> Option<&NumaNode> {
1305 self.nodes.iter().find(|node| node.id == _nodeid)
1306 }
1307}
1308
1309static GLOBAL_DISTANCE_POOL: std::sync::OnceLock<DistancePool> = std::sync::OnceLock::new();
1311static GLOBAL_CLUSTERING_ARENA: std::sync::OnceLock<ClusteringArena> = std::sync::OnceLock::new();
1312
1313#[allow(dead_code)]
1315pub fn global_distance_pool() -> &'static DistancePool {
1316 GLOBAL_DISTANCE_POOL.get_or_init(|| DistancePool::new(1000))
1317}
1318
1319#[allow(dead_code)]
1321pub fn global_clustering_arena() -> &'static ClusteringArena {
1322 GLOBAL_CLUSTERING_ARENA.get_or_init(ClusteringArena::new)
1323}
1324
1325#[allow(dead_code)]
1327pub fn create_numa_optimized_pool(capacity: usize) -> DistancePool {
1328 let config = MemoryPoolConfig {
1329 numa_aware: true,
1330 auto_numa_discovery: true,
1331 enable_thread_affinity: true,
1332 ..Default::default()
1333 };
1334
1335 DistancePool::with_config(capacity, config)
1336}
1337
1338#[allow(dead_code)]
1340pub fn get_numa_topology() -> NumaTopology {
1341 DistancePool::get_numa_topology()
1342}
1343
1344#[allow(dead_code)]
1346pub fn test_numa_capabilities() -> NumaCapabilities {
1347 NumaCapabilities::detect()
1348}
1349
1350#[derive(Debug, Clone)]
1352pub struct NumaCapabilities {
1353 pub numa_available: bool,
1355 pub num_nodes: u32,
1357 pub memory_binding_supported: bool,
1359 pub thread_affinity_supported: bool,
1361 pub platform_details: String,
1363}
1364
1365impl NumaCapabilities {
1366 pub fn detect() -> Self {
1368 #[cfg(target_os = "linux")]
1369 {
1370 Self::detect_linux()
1371 }
1372 #[cfg(target_os = "windows")]
1373 {
1374 Self::detect_windows()
1375 }
1376 #[cfg(not(any(target_os = "linux", target_os = "windows")))]
1377 {
1378 Self {
1379 numa_available: false,
1380 num_nodes: 1,
1381 memory_binding_supported: false,
1382 thread_affinity_supported: false,
1383 platform_details: "Unsupported platform".to_string(),
1384 }
1385 }
1386 }
1387
1388 #[cfg(target_os = "linux")]
1389 fn detect_linux() -> Self {
1390 let numa_available = std::path::Path::new("/sys/devices/system/node").exists();
1391 let num_nodes = if numa_available {
1392 DistancePool::get_numa_topology().nodes.len() as u32
1393 } else {
1394 1
1395 };
1396
1397 Self {
1398 numa_available,
1399 num_nodes,
1400 memory_binding_supported: numa_available,
1401 thread_affinity_supported: true, platform_details: format!("Linux with {num_nodes} NUMA nodes"),
1403 }
1404 }
1405
1406 #[cfg(target_os = "windows")]
1407 fn detect_windows() -> Self {
1408 Self {
1409 numa_available: true, num_nodes: 1, memory_binding_supported: true,
1412 thread_affinity_supported: true,
1413 platform_details: "Windows NUMA support".to_string(),
1414 }
1415 }
1416
1417 pub fn should_enable_numa(&self) -> bool {
1419 self.numa_available && self.num_nodes > 1
1420 }
1421
1422 pub fn recommended_memory_strategy(&self) -> &'static str {
1424 if self.should_enable_numa() {
1425 "NUMA-aware"
1426 } else {
1427 "Standard"
1428 }
1429 }
1430}
1431
1432#[cfg(test)]
1433mod tests {
1434 use super::*;
1435
1436 #[test]
1437 fn test_distance_pool() {
1438 let pool = DistancePool::new(10);
1439
1440 let mut buffer1 = pool.get_distance_buffer(100);
1442 assert_eq!(buffer1.len(), 100);
1443
1444 buffer1.as_mut_slice()[0] = 42.0;
1446 assert_eq!(buffer1.as_slice()[0], 42.0);
1447
1448 let buffer2 = pool.get_distance_buffer(50);
1450 assert_eq!(buffer2.len(), 50);
1451
1452 drop(buffer1);
1454
1455 let buffer3 = pool.get_distance_buffer(100);
1457 assert_eq!(buffer3.len(), 100);
1458 }
1460
1461 #[test]
1462 fn test_arena_allocator() {
1463 let arena = ClusteringArena::new();
1464
1465 let mut vec1 = arena.alloc_temp_vec::<f64>(100);
1467 let mut vec2 = arena.alloc_temp_vec::<usize>(50);
1468
1469 vec1.as_mut_slice()[0] = std::f64::consts::PI;
1471 vec2.as_mut_slice()[0] = 42;
1472
1473 assert_eq!(vec1.as_slice()[0], std::f64::consts::PI);
1474 assert_eq!(vec2.as_slice()[0], 42);
1475
1476 arena.reset();
1478
1479 let mut vec3 = arena.alloc_temp_vec::<f64>(200);
1481 vec3.as_mut_slice()[0] = 2.71;
1482 assert_eq!(vec3.as_slice()[0], 2.71);
1483 }
1484
1485 #[test]
1486 fn test_pool_statistics() {
1487 let pool = DistancePool::new(2);
1488
1489 let stats = pool.statistics();
1491 assert_eq!(stats.total_requests(), 0);
1492 assert_eq!(stats.total_allocations(), 0);
1493
1494 let _buffer1 = pool.get_distance_buffer(100);
1496 let stats = pool.statistics();
1497 assert_eq!(stats.total_requests(), 1);
1498 assert_eq!(stats.total_allocations(), 1);
1499 assert!(stats.hit_rate() < 1.0);
1500
1501 drop(_buffer1);
1503 let _buffer2 = pool.get_distance_buffer(100);
1504 let stats = pool.statistics();
1505 assert_eq!(stats.total_requests(), 2);
1506 assert_eq!(stats.total_allocations(), 1); assert!(stats.hit_rate() > 0.0);
1508 }
1509
1510 #[test]
1511 fn test_matrix_buffer() {
1512 let pool = DistancePool::new(5);
1513
1514 let mut matrix = pool.get_matrix_buffer(10, 10);
1515 assert_eq!(matrix.dim(), (10, 10));
1516
1517 matrix.fill(42.0);
1518 drop(matrix);
1521
1522 let mut matrix2 = pool.get_matrix_buffer(8, 8);
1524 assert_eq!(matrix2.dim(), (8, 8));
1525 }
1526
1527 #[test]
1528 fn test_global_pools() {
1529 let pool = global_distance_pool();
1531 let arena = global_clustering_arena();
1532
1533 let buffer = pool.get_distance_buffer(10);
1534 let _vec = arena.alloc_temp_vec::<f64>(10);
1535
1536 }
1538}