1use scirs2_core::ndarray::{ArrayBase, Data, Ix1};
7use std::marker::PhantomData;
8
9use super::parallel::ParallelConfig;
10use crate::error::{MetricsError, Result};
11
12pub trait StreamingMetric<T> {
17 type State;
19
20 fn init_state(&self) -> Self::State;
22
23 fn update_state(
25 &self,
26 state: &mut Self::State,
27 batch_true: &[T],
28 batch_pred: &[T],
29 ) -> Result<()>;
30
31 fn finalize(&self, state: &Self::State) -> Result<f64>;
33}
34
35#[derive(Debug, Clone)]
40pub struct ChunkedMetrics {
41 pub chunk_size: usize,
43 pub parallel_config: ParallelConfig,
45}
46
47impl Default for ChunkedMetrics {
48 fn default() -> Self {
49 ChunkedMetrics {
50 chunk_size: 10000,
51 parallel_config: ParallelConfig::default(),
52 }
53 }
54}
55
56impl ChunkedMetrics {
57 pub fn new() -> Self {
59 Default::default()
60 }
61
62 pub fn with_chunk_size(mut self, size: usize) -> Self {
64 self.chunk_size = size;
65 self
66 }
67
68 pub fn with_parallel_config(mut self, config: ParallelConfig) -> Self {
70 self.parallel_config = config;
71 self
72 }
73
74 pub fn compute_streaming<T, S1, S2, M>(
133 &self,
134 y_true: &ArrayBase<S1, Ix1>,
135 y_pred: &ArrayBase<S2, Ix1>,
136 metric: &M,
137 ) -> Result<f64>
138 where
139 T: Clone,
140 S1: Data<Elem = T>,
141 S2: Data<Elem = T>,
142 M: StreamingMetric<T>,
143 {
144 if y_true.len() != y_pred.len() {
146 return Err(MetricsError::DimensionMismatch(format!(
147 "y_true and y_pred must have the same length, got {} and {}",
148 y_true.len(),
149 y_pred.len()
150 )));
151 }
152
153 let y_true_vec: Vec<T> = y_true.iter().cloned().collect();
155 let y_pred_vec: Vec<T> = y_pred.iter().cloned().collect();
156
157 let mut state = metric.init_state();
159
160 for chunk_idx in 0..y_true_vec.len().div_ceil(self.chunk_size) {
162 let start = chunk_idx * self.chunk_size;
163 let end = (start + self.chunk_size).min(y_true_vec.len());
164
165 metric.update_state(&mut state, &y_true_vec[start..end], &y_pred_vec[start..end])?;
166 }
167
168 metric.finalize(&state)
170 }
171
172 pub fn compute_rowwise<T, R>(
186 &self,
187 data: &[T],
188 row_op: impl Fn(&[T]) -> Result<R>,
189 combine: impl Fn(&[R]) -> Result<R>,
190 ) -> Result<R>
191 where
192 T: Clone,
193 R: Clone,
194 {
195 if data.len() <= self.chunk_size {
196 return row_op(data);
198 }
199
200 let mut results = Vec::new();
202
203 for chunk_idx in 0..data.len().div_ceil(self.chunk_size) {
204 let start = chunk_idx * self.chunk_size;
205 let end = (start + self.chunk_size).min(data.len());
206
207 let result = row_op(&data[start..end])?;
208 results.push(result);
209 }
210
211 combine(&results)
213 }
214}
215
216#[derive(Debug, Clone)]
221pub struct IncrementalMetrics<T, S> {
222 state: S,
224 count: usize,
226 _marker: PhantomData<T>,
228}
229
230impl<T, S> Default for IncrementalMetrics<T, S>
231where
232 S: Default,
233{
234 fn default() -> Self {
235 Self::new()
236 }
237}
238
239impl<T, S> IncrementalMetrics<T, S>
240where
241 S: Default,
242{
243 pub fn new() -> Self {
245 IncrementalMetrics {
246 state: S::default(),
247 count: 0,
248 _marker: PhantomData,
249 }
250 }
251
252 pub fn with_state(state: S) -> Self {
254 IncrementalMetrics {
255 state,
256 count: 0,
257 _marker: PhantomData,
258 }
259 }
260
261 pub fn state(&self) -> &S {
263 &self.state
264 }
265
266 pub fn count(&self) -> usize {
268 self.count
269 }
270
271 pub fn update<F>(&mut self, y_true: T, y_pred: T, updatefn: F) -> Result<()>
283 where
284 F: FnOnce(&mut S, T, T) -> Result<()>,
285 {
286 updatefn(&mut self.state, y_true, y_pred)?;
287 self.count += 1;
288 Ok(())
289 }
290
291 pub fn update_batch<F>(&mut self, y_true: &[T], y_pred: &[T], updatefn: F) -> Result<()>
303 where
304 F: Fn(&mut S, &[T], &[T]) -> Result<()>,
305 {
306 if y_true.len() != y_pred.len() {
307 return Err(MetricsError::DimensionMismatch(
308 "y_true and y_pred must have the same length".to_string(),
309 ));
310 }
311
312 updatefn(&mut self.state, y_true, y_pred)?;
313 self.count += y_true.len();
314 Ok(())
315 }
316
317 pub fn finalize<F, R>(&self, finalizefn: F) -> Result<R>
327 where
328 F: FnOnce(&S, usize) -> Result<R>,
329 {
330 finalizefn(&self.state, self.count)
331 }
332}
333
334pub trait MemoryMappedMetric<T> {
339 type State;
341
342 fn init_state(&self) -> Self::State;
344
345 fn process_chunk(&self, state: &mut Self::State, chunkidx: usize, chunk: &[T]) -> Result<()>;
347
348 fn finalize(&self, state: &Self::State) -> Result<f64>;
350}
351
352use crossbeam_utils::CachePadded;
353use scirs2_core::ndarray::{Array1, Array2, ArrayView1, ArrayView2, ArrayViewMut1, ArrayViewMut2};
354use scirs2_core::numeric::Float;
355use std::alloc::{alloc, dealloc, GlobalAlloc, Layout, System};
360use std::cell::UnsafeCell;
361use std::collections::HashMap;
362use std::mem::{align_of, size_of, MaybeUninit};
363use std::ptr::{addr_of_mut, NonNull};
364use std::sync::atomic::{AtomicPtr, AtomicUsize, Ordering};
365use std::sync::{Arc, Mutex, RwLock};
366
367#[derive(Debug)]
369pub struct ZeroCopyMemoryManager {
370 memory_pools: HashMap<usize, MemoryPool>,
372 simd_allocator: SimdAlignedAllocator,
374 arena_allocator: ArenaAllocator,
376 mmap_manager: MemoryMappingManager,
378 recycler: LockFreeRecycler,
380 stats: MemoryStats,
382}
383
384#[derive(Debug)]
386pub struct MemoryPool {
387 block_size: usize,
389 alignment: usize,
391 free_blocks: Arc<Mutex<Vec<NonNull<u8>>>>,
393 capacity: AtomicUsize,
395 allocated_count: AtomicUsize,
397 pool_stats: PoolStatistics,
399}
400
401#[derive(Debug)]
403pub struct SimdAlignedAllocator {
404 alignment_cache: HashMap<usize, Vec<NonNull<u8>>>,
406 simd_stats: SimdStats,
408}
409
410#[derive(Debug)]
412pub struct ArenaAllocator {
413 current_arena: Arc<Mutex<Arena>>,
415 arenas: Vec<Arc<Mutex<Arena>>>,
417 _default_arenasize: usize,
419 arena_stats: ArenaStats,
421}
422
423#[derive(Debug)]
425pub struct Arena {
426 memory: NonNull<u8>,
428 size: usize,
430 offset: usize,
432 alignment: usize,
434}
435
436#[derive(Debug)]
438pub struct MemoryMappingManager {
439 mappings: HashMap<String, MemoryMapping>,
441 mapping_stats: MappingStats,
443}
444
445#[derive(Debug)]
447pub struct MemoryMapping {
448 file_handle: i32,
450 memory_region: NonNull<u8>,
452 size: usize,
454 access_mode: AccessMode,
456 ref_count: AtomicUsize,
458}
459
460#[derive(Debug, Clone, Copy)]
462pub enum AccessMode {
463 ReadOnly,
464 ReadWrite,
465 WriteOnly,
466 Execute,
467}
468
469#[derive(Debug)]
471pub struct LockFreeRecycler {
472 free_lists: Vec<AtomicPtr<RecyclerNode>>,
474 hazard_pointers: Vec<AtomicPtr<RecyclerNode>>,
476 retired_nodes: CachePadded<Mutex<Vec<*mut RecyclerNode>>>,
478 recycler_stats: RecyclerStats,
480}
481
482#[repr(align(64))] #[derive(Debug)]
485pub struct RecyclerNode {
486 next: AtomicPtr<RecyclerNode>,
488 size: usize,
490 data: NonNull<u8>,
492 timestamp: AtomicUsize,
494}
495
496#[derive(Debug)]
498pub struct ZeroCopyArrayView<'a, T> {
499 data: NonNull<T>,
501 len: usize,
503 _lifetime: std::marker::PhantomData<&'a T>,
505 memory_manager: &'a ZeroCopyMemoryManager,
507}
508
509#[derive(Debug)]
511pub struct ZeroCopyArrayViewMut<'a, T> {
512 data: NonNull<T>,
514 len: usize,
516 _lifetime: std::marker::PhantomData<&'a mut T>,
518 memory_manager: &'a ZeroCopyMemoryManager,
520}
521
522pub struct ZeroCopyBuffer<T> {
524 data: NonNull<T>,
526 capacity: usize,
528 length: usize,
530 layout: Layout,
532 allocator: Arc<dyn CustomAllocator>,
534}
535
536pub trait CustomAllocator: Send + Sync + std::fmt::Debug {
538 fn allocate(&self, size: usize, alignment: usize) -> Result<NonNull<u8>>;
540
541 fn deallocate(&self, ptr: NonNull<u8>, size: usize, alignment: usize);
543
544 fn reallocate(
546 &self,
547 ptr: NonNull<u8>,
548 old_size: usize,
549 newsize: usize,
550 alignment: usize,
551 ) -> Result<NonNull<u8>>;
552
553 fn get_stats(&self) -> AllocatorStats;
555
556 fn reset(&self);
558}
559
560pub struct ThreadLocalAllocator {
562 local_pools: std::thread::LocalKey<UnsafeCell<HashMap<usize, Vec<NonNull<u8>>>>>,
564 global_fallback: Arc<dyn CustomAllocator>,
566 local_stats: std::thread::LocalKey<UnsafeCell<AllocatorStats>>,
568}
569
570#[derive(Debug)]
572pub struct SlabAllocator {
573 slab_size: usize,
575 object_size: usize,
577 objects_per_slab: usize,
579 slabs: Vec<Slab>,
581 free_objects: Vec<NonNull<u8>>,
583 slab_stats: SlabStats,
585}
586
587#[derive(Debug)]
589pub struct Slab {
590 memory: NonNull<u8>,
592 free_mask: Vec<u64>,
594 free_count: usize,
596 id: usize,
598}
599
600#[derive(Debug)]
602pub struct BuddyAllocator {
603 memory_block: NonNull<u8>,
605 block_size: usize,
607 free_lists: Vec<Vec<NonNull<u8>>>,
609 allocation_bitmap: Vec<u64>,
611 buddy_stats: BuddyStats,
613}
614
615#[derive(Debug)]
617pub struct MemoryStats {
618 pub total_allocated: AtomicUsize,
620 pub total_deallocated: AtomicUsize,
622 pub peak_usage: AtomicUsize,
624 pub current_usage: AtomicUsize,
626 pub allocation_count: AtomicUsize,
628 pub deallocation_count: AtomicUsize,
630 pub fragmentation_ratio: AtomicUsize, }
633
634#[derive(Debug)]
636pub struct PoolStatistics {
637 pub hits: AtomicUsize,
639 pub misses: AtomicUsize,
641 pub utilization: AtomicUsize,
643 pub avg_allocation_time: AtomicUsize,
645}
646
647#[derive(Debug)]
649pub struct SimdStats {
650 pub allocations_by_alignment: HashMap<usize, AtomicUsize>,
652 pub simd_memory_usage: AtomicUsize,
654 pub vectorization_efficiency: AtomicUsize,
656}
657
658#[derive(Debug)]
660pub struct ArenaStats {
661 pub arenas_created: AtomicUsize,
663 pub total_arena_memory: AtomicUsize,
665 pub arena_utilization: AtomicUsize,
667 pub fragmentation_waste: AtomicUsize,
669}
670
671#[derive(Debug)]
673pub struct MappingStats {
674 pub active_mappings: AtomicUsize,
676 pub total_mapped_memory: AtomicUsize,
678 pub cache_hits: AtomicUsize,
680 pub cache_misses: AtomicUsize,
682}
683
684#[derive(Debug)]
686pub struct RecyclerStats {
687 pub successful_recycles: AtomicUsize,
689 pub failed_recycles: AtomicUsize,
691 pub hazard_contentions: AtomicUsize,
693 pub memory_reclaimed: AtomicUsize,
695}
696
697#[derive(Debug)]
699pub struct AllocatorStats {
700 pub allocation_requests: AtomicUsize,
702 pub deallocation_requests: AtomicUsize,
704 pub bytes_allocated: AtomicUsize,
706 pub bytes_deallocated: AtomicUsize,
708 pub allocation_failures: AtomicUsize,
710}
711
712#[derive(Debug)]
714pub struct SlabStats {
715 pub slabs_allocated: AtomicUsize,
717 pub objects_allocated: AtomicUsize,
719 pub slab_utilization: AtomicUsize,
721 pub internal_fragmentation: AtomicUsize,
723}
724
725#[derive(Debug)]
727pub struct BuddyStats {
728 pub allocations_by_order: Vec<AtomicUsize>,
730 pub coalescing_operations: AtomicUsize,
732 pub splitting_operations: AtomicUsize,
734 pub external_fragmentation: AtomicUsize,
736}
737
738impl ZeroCopyMemoryManager {
739 pub fn new() -> Result<Self> {
741 Ok(Self {
742 memory_pools: HashMap::new(),
743 simd_allocator: SimdAlignedAllocator::new(),
744 arena_allocator: ArenaAllocator::new(1024 * 1024)?, mmap_manager: MemoryMappingManager::new(),
746 recycler: LockFreeRecycler::new(),
747 stats: MemoryStats::new(),
748 })
749 }
750
751 pub fn allocate_buffer<T>(&self, capacity: usize) -> Result<ZeroCopyBuffer<T>> {
753 let layout = Layout::array::<T>(capacity)
754 .map_err(|_| MetricsError::MemoryError("Invalid layout".to_string()))?;
755
756 let allocator = self.get_optimal_allocator(layout.size(), layout.align());
757 let ptr = allocator.allocate(layout.size(), layout.align())?;
758
759 self.stats
760 .total_allocated
761 .fetch_add(layout.size(), Ordering::Relaxed);
762 self.stats.allocation_count.fetch_add(1, Ordering::Relaxed);
763
764 Ok(ZeroCopyBuffer {
765 data: ptr.cast::<T>(),
766 capacity,
767 length: 0,
768 layout,
769 allocator,
770 })
771 }
772
773 pub fn create_view<'a, T>(&'a self, data: &'a [T]) -> ZeroCopyArrayView<'a, T> {
775 ZeroCopyArrayView {
776 data: NonNull::new(data.as_ptr() as *mut T).unwrap(),
777 len: data.len(),
778 _lifetime: std::marker::PhantomData,
779 memory_manager: self,
780 }
781 }
782
783 pub fn create_view_mut<'a, T>(&'a self, data: &'a mut [T]) -> ZeroCopyArrayViewMut<'a, T> {
785 ZeroCopyArrayViewMut {
786 data: NonNull::new(data.as_mut_ptr()).unwrap(),
787 len: data.len(),
788 _lifetime: std::marker::PhantomData,
789 memory_manager: self,
790 }
791 }
792
793 pub fn allocate_simd_aligned<T: Float>(
795 &mut self,
796 count: usize,
797 alignment: usize,
798 ) -> Result<ZeroCopyBuffer<T>> {
799 let size = count * size_of::<T>();
800 let ptr = self.simd_allocator.allocate_aligned(size, alignment)?;
801
802 Ok(ZeroCopyBuffer {
803 data: ptr.cast::<T>(),
804 capacity: count,
805 length: 0,
806 layout: Layout::from_size_align(size, alignment).unwrap(),
807 allocator: Arc::new(SystemAllocator),
808 })
809 }
810
811 pub fn map_file<T>(
813 &self,
814 file_path: &str,
815 access_mode: AccessMode,
816 ) -> Result<ZeroCopyArrayView<T>> {
817 let mapping = self.mmap_manager.map_file(file_path, access_mode)?;
818 let len = mapping.size / size_of::<T>();
819
820 Ok(ZeroCopyArrayView {
821 data: mapping.memory_region.cast::<T>(),
822 len,
823 _lifetime: std::marker::PhantomData,
824 memory_manager: self,
825 })
826 }
827
828 fn get_optimal_allocator(&self, size: usize, alignment: usize) -> Arc<dyn CustomAllocator> {
830 if size <= 4096 && alignment <= 64 {
832 Arc::new(PoolAllocator::new(size))
834 } else if alignment > 64 {
835 Arc::new(SimdAllocatorWrapper::new())
837 } else if size >= 1024 * 1024 {
838 Arc::new(ArenaAllocatorWrapper::new())
840 } else {
841 Arc::new(SystemAllocator)
843 }
844 }
845
846 pub fn get_stats(&self) -> &MemoryStats {
848 &self.stats
849 }
850
851 pub fn garbage_collect(&self) -> Result<usize> {
853 let mut reclaimed = 0;
854
855 reclaimed += self.recycler.reclaim_memory()?;
857
858 for pool in self.memory_pools.values() {
860 reclaimed += pool.compact()?;
861 }
862
863 reclaimed += self.arena_allocator.compact()?;
865
866 Ok(reclaimed)
867 }
868}
869
870impl MemoryPool {
871 pub fn new(_blocksize: usize, alignment: usize, initialcapacity: usize) -> Self {
873 Self {
874 block_size: _blocksize,
875 alignment,
876 free_blocks: Arc::new(Mutex::new(Vec::with_capacity(initialcapacity))),
877 capacity: AtomicUsize::new(0),
878 allocated_count: AtomicUsize::new(0),
879 pool_stats: PoolStatistics::new(),
880 }
881 }
882
883 pub fn allocate(&self) -> Result<NonNull<u8>> {
885 let start_time = std::time::Instant::now();
886
887 let mut free_blocks = self.free_blocks.lock().unwrap();
888 if let Some(ptr) = free_blocks.pop() {
889 self.pool_stats.hits.fetch_add(1, Ordering::Relaxed);
890 drop(free_blocks);
891
892 self.allocated_count.fetch_add(1, Ordering::Relaxed);
893 let allocation_time = start_time.elapsed().as_nanos() as usize;
894 self.update_avg_allocation_time(allocation_time);
895
896 Ok(ptr)
897 } else {
898 self.pool_stats.misses.fetch_add(1, Ordering::Relaxed);
899 drop(free_blocks);
900
901 let layout = Layout::from_size_align(self.block_size, self.alignment)
903 .map_err(|_| MetricsError::MemoryError("Invalid layout".to_string()))?;
904
905 let ptr = unsafe { alloc(layout) };
906 if ptr.is_null() {
907 return Err(MetricsError::MemoryError("Allocation failed".to_string()));
908 }
909
910 self.capacity.fetch_add(1, Ordering::Relaxed);
911 self.allocated_count.fetch_add(1, Ordering::Relaxed);
912
913 Ok(NonNull::new(ptr).unwrap())
914 }
915 }
916
917 pub fn deallocate(&self, ptr: NonNull<u8>) {
919 self.free_blocks.lock().unwrap().push(ptr);
920 self.allocated_count.fetch_sub(1, Ordering::Relaxed);
921 }
922
923 pub fn compact(&self) -> Result<usize> {
925 let mut free_blocks = self.free_blocks.lock().unwrap();
926 let mut reclaimed = 0;
927
928 let keep_count = free_blocks.len() / 2;
930 let to_deallocate = free_blocks.split_off(keep_count);
931
932 for ptr in to_deallocate {
933 unsafe {
934 let layout = Layout::from_size_align(self.block_size, self.alignment).unwrap();
935 dealloc(ptr.as_ptr(), layout);
936 }
937 reclaimed += self.block_size;
938 }
939
940 self.capacity
941 .fetch_sub(reclaimed / self.block_size, Ordering::Relaxed);
942 Ok(reclaimed)
943 }
944
945 fn update_avg_allocation_time(&self, newtime: usize) {
946 let current_avg = self.pool_stats.avg_allocation_time.load(Ordering::Relaxed);
948 let new_avg = if current_avg == 0 {
949 newtime
950 } else {
951 (current_avg * 7 + newtime) / 8 };
953 self.pool_stats
954 .avg_allocation_time
955 .store(new_avg, Ordering::Relaxed);
956 }
957}
958
959impl SimdAlignedAllocator {
960 pub fn new() -> Self {
962 Self {
963 alignment_cache: HashMap::new(),
964 simd_stats: SimdStats::new(),
965 }
966 }
967
968 pub fn allocate_aligned(&mut self, size: usize, alignment: usize) -> Result<NonNull<u8>> {
970 let alignment = alignment.max(align_of::<usize>()).next_power_of_two();
972
973 let layout = Layout::from_size_align(size, alignment)
974 .map_err(|_| MetricsError::MemoryError("Invalid SIMD layout".to_string()))?;
975
976 let ptr = unsafe { alloc(layout) };
977 if ptr.is_null() {
978 return Err(MetricsError::MemoryError(
979 "SIMD allocation failed".to_string(),
980 ));
981 }
982
983 self.simd_stats
984 .simd_memory_usage
985 .fetch_add(size, Ordering::Relaxed);
986 self.simd_stats
987 .allocations_by_alignment
988 .entry(alignment)
989 .or_insert_with(|| AtomicUsize::new(0))
990 .fetch_add(1, Ordering::Relaxed);
991
992 Ok(NonNull::new(ptr).unwrap())
993 }
994}
995
996impl ArenaAllocator {
997 pub fn new(_default_arenasize: usize) -> Result<Self> {
999 let initial_arena = Arc::new(Mutex::new(Arena::new(_default_arenasize)?));
1000
1001 Ok(Self {
1002 current_arena: initial_arena.clone(),
1003 arenas: vec![initial_arena],
1004 _default_arenasize,
1005 arena_stats: ArenaStats::new(),
1006 })
1007 }
1008
1009 pub fn allocate(&mut self, size: usize, alignment: usize) -> Result<NonNull<u8>> {
1011 let mut arena = self.current_arena.lock().unwrap();
1012
1013 if let Ok(ptr) = arena.allocate(size, alignment) {
1014 Ok(ptr)
1015 } else {
1016 drop(arena);
1018
1019 let new_arena_size = self._default_arenasize.max(size * 2);
1020 let new_arena = Arc::new(Mutex::new(Arena::new(new_arena_size)?));
1021 self.arenas.push(new_arena.clone());
1022
1023 let mut arena = new_arena.lock().unwrap();
1024 arena.allocate(size, alignment)
1025 }
1026 }
1027
1028 pub fn reset(&self) {
1030 for arena in &self.arenas {
1031 arena.lock().unwrap().reset();
1032 }
1033 }
1034
1035 pub fn compact(&self) -> Result<usize> {
1037 self.arena_stats
1039 .fragmentation_waste
1040 .store(0, Ordering::Relaxed);
1041 Ok(0)
1042 }
1043}
1044
1045impl Arena {
1046 pub fn new(size: usize) -> Result<Self> {
1048 let layout = Layout::from_size_align(size, 64) .map_err(|_| MetricsError::MemoryError("Invalid arena layout".to_string()))?;
1050
1051 let ptr = unsafe { alloc(layout) };
1052 if ptr.is_null() {
1053 return Err(MetricsError::MemoryError(
1054 "Arena allocation failed".to_string(),
1055 ));
1056 }
1057
1058 Ok(Self {
1059 memory: NonNull::new(ptr).unwrap(),
1060 size,
1061 offset: 0,
1062 alignment: 64,
1063 })
1064 }
1065
1066 pub fn allocate(&mut self, size: usize, alignment: usize) -> Result<NonNull<u8>> {
1068 let aligned_offset = (self.offset + alignment - 1) & !(alignment - 1);
1070
1071 if aligned_offset + size > self.size {
1072 return Err(MetricsError::MemoryError("Arena exhausted".to_string()));
1073 }
1074
1075 let ptr = unsafe { self.memory.as_ptr().add(aligned_offset) };
1076 self.offset = aligned_offset + size;
1077
1078 Ok(NonNull::new(ptr).unwrap())
1079 }
1080
1081 pub fn reset(&mut self) {
1083 self.offset = 0;
1084 }
1085}
1086
1087impl MemoryMappingManager {
1088 pub fn new() -> Self {
1090 Self {
1091 mappings: HashMap::new(),
1092 mapping_stats: MappingStats::new(),
1093 }
1094 }
1095
1096 pub fn map_file(&self, _file_path: &str, _accessmode: AccessMode) -> Result<&MemoryMapping> {
1098 Err(MetricsError::MemoryError(
1101 "Memory mapping not implemented".to_string(),
1102 ))
1103 }
1104}
1105
1106impl LockFreeRecycler {
1107 pub fn new() -> Self {
1109 const NUM_SIZE_CLASSES: usize = 64;
1110
1111 Self {
1112 free_lists: (0..NUM_SIZE_CLASSES)
1113 .map(|_| AtomicPtr::new(std::ptr::null_mut()))
1114 .collect(),
1115 hazard_pointers: (0..NUM_SIZE_CLASSES)
1116 .map(|_| AtomicPtr::new(std::ptr::null_mut()))
1117 .collect(),
1118 retired_nodes: CachePadded::new(Mutex::new(Vec::new())),
1119 recycler_stats: RecyclerStats::new(),
1120 }
1121 }
1122
1123 pub fn reclaim_memory(&self) -> Result<usize> {
1125 let mut reclaimed = 0;
1126 let mut retired = self.retired_nodes.lock().unwrap();
1127
1128 for node_ptr in retired.drain(..) {
1130 unsafe {
1131 let node = Box::from_raw(node_ptr);
1132 reclaimed += node.size;
1133 }
1134 }
1135
1136 self.recycler_stats
1137 .memory_reclaimed
1138 .fetch_add(reclaimed, Ordering::Relaxed);
1139 Ok(reclaimed)
1140 }
1141}
1142
1143#[derive(Debug)]
1146pub struct SystemAllocator;
1147
1148impl CustomAllocator for SystemAllocator {
1149 fn allocate(&self, size: usize, alignment: usize) -> Result<NonNull<u8>> {
1150 let layout = Layout::from_size_align(size, alignment)
1151 .map_err(|_| MetricsError::MemoryError("Invalid layout".to_string()))?;
1152
1153 let ptr = unsafe { alloc(layout) };
1154 if ptr.is_null() {
1155 return Err(MetricsError::MemoryError(
1156 "System allocation failed".to_string(),
1157 ));
1158 }
1159
1160 Ok(NonNull::new(ptr).unwrap())
1161 }
1162
1163 fn deallocate(&self, ptr: NonNull<u8>, size: usize, alignment: usize) {
1164 let layout = Layout::from_size_align(size, alignment).unwrap();
1165 unsafe { dealloc(ptr.as_ptr(), layout) };
1166 }
1167
1168 fn reallocate(
1169 &self,
1170 ptr: NonNull<u8>,
1171 old_size: usize,
1172 newsize: usize,
1173 alignment: usize,
1174 ) -> Result<NonNull<u8>> {
1175 let new_ptr = self.allocate(newsize, alignment)?;
1176 unsafe {
1177 std::ptr::copy_nonoverlapping(ptr.as_ptr(), new_ptr.as_ptr(), old_size.min(newsize));
1178 }
1179 self.deallocate(ptr, old_size, alignment);
1180 Ok(new_ptr)
1181 }
1182
1183 fn get_stats(&self) -> AllocatorStats {
1184 AllocatorStats::new()
1185 }
1186
1187 fn reset(&self) {
1188 }
1190}
1191
1192#[derive(Debug)]
1193pub struct PoolAllocator {
1194 block_size: usize,
1195}
1196
1197impl PoolAllocator {
1198 pub fn new(_blocksize: usize) -> Self {
1199 Self {
1200 block_size: _blocksize,
1201 }
1202 }
1203}
1204
1205impl CustomAllocator for PoolAllocator {
1206 fn allocate(&self, size: usize, alignment: usize) -> Result<NonNull<u8>> {
1207 if size > self.block_size {
1208 return Err(MetricsError::MemoryError(
1209 "Size exceeds pool block size".to_string(),
1210 ));
1211 }
1212
1213 let layout = Layout::from_size_align(self.block_size, alignment)
1214 .map_err(|_| MetricsError::MemoryError("Invalid pool layout".to_string()))?;
1215
1216 let ptr = unsafe { alloc(layout) };
1217 if ptr.is_null() {
1218 return Err(MetricsError::MemoryError(
1219 "Pool allocation failed".to_string(),
1220 ));
1221 }
1222
1223 Ok(NonNull::new(ptr).unwrap())
1224 }
1225
1226 fn deallocate(&self, ptr: NonNull<u8>, size: usize, alignment: usize) {
1227 let layout = Layout::from_size_align(self.block_size, alignment).unwrap();
1228 unsafe { dealloc(ptr.as_ptr(), layout) };
1229 }
1230
1231 fn reallocate(
1232 &self,
1233 ptr: NonNull<u8>,
1234 old_size: usize,
1235 newsize: usize,
1236 alignment: usize,
1237 ) -> Result<NonNull<u8>> {
1238 if newsize <= self.block_size {
1239 Ok(ptr) } else {
1241 let new_ptr = self.allocate(newsize, alignment)?;
1242 unsafe {
1243 std::ptr::copy_nonoverlapping(
1244 ptr.as_ptr(),
1245 new_ptr.as_ptr(),
1246 old_size.min(newsize),
1247 );
1248 }
1249 self.deallocate(ptr, old_size, alignment);
1250 Ok(new_ptr)
1251 }
1252 }
1253
1254 fn get_stats(&self) -> AllocatorStats {
1255 AllocatorStats::new()
1256 }
1257
1258 fn reset(&self) {
1259 }
1261}
1262
1263#[derive(Debug)]
1264pub struct SimdAllocatorWrapper;
1265
1266impl SimdAllocatorWrapper {
1267 pub fn new() -> Self {
1268 Self
1269 }
1270}
1271
1272impl CustomAllocator for SimdAllocatorWrapper {
1273 fn allocate(&self, size: usize, alignment: usize) -> Result<NonNull<u8>> {
1274 let simd_alignment = alignment.max(32).next_power_of_two(); let layout = Layout::from_size_align(size, simd_alignment)
1277 .map_err(|_| MetricsError::MemoryError("Invalid SIMD layout".to_string()))?;
1278
1279 let ptr = unsafe { alloc(layout) };
1280 if ptr.is_null() {
1281 return Err(MetricsError::MemoryError(
1282 "SIMD allocation failed".to_string(),
1283 ));
1284 }
1285
1286 Ok(NonNull::new(ptr).unwrap())
1287 }
1288
1289 fn deallocate(&self, ptr: NonNull<u8>, size: usize, alignment: usize) {
1290 let simd_alignment = alignment.max(32).next_power_of_two();
1291 let layout = Layout::from_size_align(size, simd_alignment).unwrap();
1292 unsafe { dealloc(ptr.as_ptr(), layout) };
1293 }
1294
1295 fn reallocate(
1296 &self,
1297 ptr: NonNull<u8>,
1298 old_size: usize,
1299 newsize: usize,
1300 alignment: usize,
1301 ) -> Result<NonNull<u8>> {
1302 let new_ptr = self.allocate(newsize, alignment)?;
1303 unsafe {
1304 std::ptr::copy_nonoverlapping(ptr.as_ptr(), new_ptr.as_ptr(), old_size.min(newsize));
1305 }
1306 self.deallocate(ptr, old_size, alignment);
1307 Ok(new_ptr)
1308 }
1309
1310 fn get_stats(&self) -> AllocatorStats {
1311 AllocatorStats::new()
1312 }
1313
1314 fn reset(&self) {
1315 }
1317}
1318
1319#[derive(Debug)]
1320pub struct ArenaAllocatorWrapper;
1321
1322impl ArenaAllocatorWrapper {
1323 pub fn new() -> Self {
1324 Self
1325 }
1326}
1327
1328impl CustomAllocator for ArenaAllocatorWrapper {
1329 fn allocate(&self, size: usize, alignment: usize) -> Result<NonNull<u8>> {
1330 let layout = Layout::from_size_align(size, alignment)
1332 .map_err(|_| MetricsError::MemoryError("Invalid arena layout".to_string()))?;
1333
1334 let ptr = unsafe { alloc(layout) };
1335 if ptr.is_null() {
1336 return Err(MetricsError::MemoryError(
1337 "Arena allocation failed".to_string(),
1338 ));
1339 }
1340
1341 Ok(NonNull::new(ptr).unwrap())
1342 }
1343
1344 fn deallocate(&self, ptr: NonNull<u8>, size: usize, alignment: usize) {
1345 let layout = Layout::from_size_align(size, alignment).unwrap();
1346 unsafe { dealloc(ptr.as_ptr(), layout) };
1347 }
1348
1349 fn reallocate(
1350 &self,
1351 ptr: NonNull<u8>,
1352 old_size: usize,
1353 newsize: usize,
1354 alignment: usize,
1355 ) -> Result<NonNull<u8>> {
1356 let new_ptr = self.allocate(newsize, alignment)?;
1357 unsafe {
1358 std::ptr::copy_nonoverlapping(ptr.as_ptr(), new_ptr.as_ptr(), old_size.min(newsize));
1359 }
1360 self.deallocate(ptr, old_size, alignment);
1361 Ok(new_ptr)
1362 }
1363
1364 fn get_stats(&self) -> AllocatorStats {
1365 AllocatorStats::new()
1366 }
1367
1368 fn reset(&self) {
1369 }
1371}
1372
1373impl MemoryStats {
1376 pub fn new() -> Self {
1377 Self {
1378 total_allocated: AtomicUsize::new(0),
1379 total_deallocated: AtomicUsize::new(0),
1380 peak_usage: AtomicUsize::new(0),
1381 current_usage: AtomicUsize::new(0),
1382 allocation_count: AtomicUsize::new(0),
1383 deallocation_count: AtomicUsize::new(0),
1384 fragmentation_ratio: AtomicUsize::new(0),
1385 }
1386 }
1387}
1388
1389impl PoolStatistics {
1390 pub fn new() -> Self {
1391 Self {
1392 hits: AtomicUsize::new(0),
1393 misses: AtomicUsize::new(0),
1394 utilization: AtomicUsize::new(0),
1395 avg_allocation_time: AtomicUsize::new(0),
1396 }
1397 }
1398}
1399
1400impl SimdStats {
1401 pub fn new() -> Self {
1402 Self {
1403 allocations_by_alignment: HashMap::new(),
1404 simd_memory_usage: AtomicUsize::new(0),
1405 vectorization_efficiency: AtomicUsize::new(0),
1406 }
1407 }
1408}
1409
1410impl ArenaStats {
1411 pub fn new() -> Self {
1412 Self {
1413 arenas_created: AtomicUsize::new(0),
1414 total_arena_memory: AtomicUsize::new(0),
1415 arena_utilization: AtomicUsize::new(0),
1416 fragmentation_waste: AtomicUsize::new(0),
1417 }
1418 }
1419}
1420
1421impl MappingStats {
1422 pub fn new() -> Self {
1423 Self {
1424 active_mappings: AtomicUsize::new(0),
1425 total_mapped_memory: AtomicUsize::new(0),
1426 cache_hits: AtomicUsize::new(0),
1427 cache_misses: AtomicUsize::new(0),
1428 }
1429 }
1430}
1431
1432impl RecyclerStats {
1433 pub fn new() -> Self {
1434 Self {
1435 successful_recycles: AtomicUsize::new(0),
1436 failed_recycles: AtomicUsize::new(0),
1437 hazard_contentions: AtomicUsize::new(0),
1438 memory_reclaimed: AtomicUsize::new(0),
1439 }
1440 }
1441}
1442
1443impl AllocatorStats {
1444 pub fn new() -> Self {
1445 Self {
1446 allocation_requests: AtomicUsize::new(0),
1447 deallocation_requests: AtomicUsize::new(0),
1448 bytes_allocated: AtomicUsize::new(0),
1449 bytes_deallocated: AtomicUsize::new(0),
1450 allocation_failures: AtomicUsize::new(0),
1451 }
1452 }
1453}
1454
1455impl SlabStats {
1456 pub fn new() -> Self {
1457 Self {
1458 slabs_allocated: AtomicUsize::new(0),
1459 objects_allocated: AtomicUsize::new(0),
1460 slab_utilization: AtomicUsize::new(0),
1461 internal_fragmentation: AtomicUsize::new(0),
1462 }
1463 }
1464}
1465
1466impl BuddyStats {
1467 pub fn new() -> Self {
1468 Self {
1469 allocations_by_order: (0..32).map(|_| AtomicUsize::new(0)).collect(),
1470 coalescing_operations: AtomicUsize::new(0),
1471 splitting_operations: AtomicUsize::new(0),
1472 external_fragmentation: AtomicUsize::new(0),
1473 }
1474 }
1475}
1476
1477impl<T> ZeroCopyBuffer<T> {
1480 pub fn len(&self) -> usize {
1482 self.length
1483 }
1484
1485 pub fn is_empty(&self) -> bool {
1487 self.length == 0
1488 }
1489
1490 pub fn capacity(&self) -> usize {
1492 self.capacity
1493 }
1494
1495 pub fn push(&mut self, value: T) -> Result<()> {
1497 if self.length >= self.capacity {
1498 return Err(MetricsError::MemoryError(
1499 "Buffer capacity exceeded".to_string(),
1500 ));
1501 }
1502
1503 unsafe {
1504 std::ptr::write(self.data.as_ptr().add(self.length), value);
1505 }
1506 self.length += 1;
1507 Ok(())
1508 }
1509
1510 pub fn as_slice(&self) -> &[T] {
1512 unsafe { std::slice::from_raw_parts(self.data.as_ptr(), self.length) }
1513 }
1514
1515 pub fn as_mut_slice(&mut self) -> &mut [T] {
1517 unsafe { std::slice::from_raw_parts_mut(self.data.as_ptr(), self.length) }
1518 }
1519
1520 pub fn resize(&mut self, newsize: usize) -> Result<()> {
1522 if newsize <= self.capacity {
1523 self.length = newsize;
1524 Ok(())
1525 } else {
1526 let new_ptr = self.allocator.reallocate(
1528 self.data.cast::<u8>(),
1529 self.layout.size(),
1530 newsize * size_of::<T>(),
1531 self.layout.align(),
1532 )?;
1533
1534 self.data = new_ptr.cast::<T>();
1535 self.capacity = newsize;
1536 self.length = newsize;
1537 Ok(())
1538 }
1539 }
1540}
1541
1542impl<T> Drop for ZeroCopyBuffer<T> {
1543 fn drop(&mut self) {
1544 for i in 0..self.length {
1546 unsafe {
1547 std::ptr::drop_in_place(self.data.as_ptr().add(i));
1548 }
1549 }
1550
1551 self.allocator.deallocate(
1553 self.data.cast::<u8>(),
1554 self.layout.size(),
1555 self.layout.align(),
1556 );
1557 }
1558}
1559
1560impl<'a, T> ZeroCopyArrayView<'a, T> {
1561 pub fn len(&self) -> usize {
1563 self.len
1564 }
1565
1566 pub fn is_empty(&self) -> bool {
1568 self.len == 0
1569 }
1570
1571 pub fn get(&self, index: usize) -> Option<&T> {
1573 if index < self.len {
1574 unsafe { Some(&*self.data.as_ptr().add(index)) }
1575 } else {
1576 None
1577 }
1578 }
1579
1580 pub fn as_slice(&self) -> &[T] {
1582 unsafe { std::slice::from_raw_parts(self.data.as_ptr(), self.len) }
1583 }
1584
1585 pub fn subview(&self, start: usize, len: usize) -> Result<ZeroCopyArrayView<'a, T>> {
1587 if start + len > self.len {
1588 return Err(MetricsError::IndexError(
1589 "Subview bounds exceed array".to_string(),
1590 ));
1591 }
1592
1593 Ok(ZeroCopyArrayView {
1594 data: unsafe { NonNull::new_unchecked(self.data.as_ptr().add(start)) },
1595 len,
1596 _lifetime: std::marker::PhantomData,
1597 memory_manager: self.memory_manager,
1598 })
1599 }
1600}
1601
1602impl<'a, T> ZeroCopyArrayViewMut<'a, T> {
1603 pub fn len(&self) -> usize {
1605 self.len
1606 }
1607
1608 pub fn is_empty(&self) -> bool {
1610 self.len == 0
1611 }
1612
1613 pub fn get(&self, index: usize) -> Option<&T> {
1615 if index < self.len {
1616 unsafe { Some(&*self.data.as_ptr().add(index)) }
1617 } else {
1618 None
1619 }
1620 }
1621
1622 pub fn get_mut(&mut self, index: usize) -> Option<&mut T> {
1624 if index < self.len {
1625 unsafe { Some(&mut *self.data.as_ptr().add(index)) }
1626 } else {
1627 None
1628 }
1629 }
1630
1631 pub fn as_slice(&self) -> &[T] {
1633 unsafe { std::slice::from_raw_parts(self.data.as_ptr(), self.len) }
1634 }
1635
1636 pub fn as_mut_slice(&mut self) -> &mut [T] {
1638 unsafe { std::slice::from_raw_parts_mut(self.data.as_ptr(), self.len) }
1639 }
1640}
1641
1642#[cfg(test)]
1643mod tests {
1644 use super::*;
1645 use scirs2_core::ndarray::Array1;
1646
1647 struct StreamingMAE;
1649
1650 impl StreamingMetric<f64> for StreamingMAE {
1651 type State = (f64, usize); fn init_state(&self) -> Self::State {
1654 (0.0, 0)
1655 }
1656
1657 fn update_state(
1658 &self,
1659 state: &mut Self::State,
1660 batch_true: &[f64],
1661 batch_pred: &[f64],
1662 ) -> Result<()> {
1663 for (y_t, y_p) in batch_true.iter().zip(batch_pred.iter()) {
1664 state.0 += (y_t - y_p).abs();
1665 state.1 += 1;
1666 }
1667 Ok(())
1668 }
1669
1670 fn finalize(&self, state: &Self::State) -> Result<f64> {
1671 if state.1 == 0 {
1672 return Err(MetricsError::DivisionByZero);
1673 }
1674 Ok(state.0 / state.1 as f64)
1675 }
1676 }
1677
1678 #[test]
1679 fn test_chunked_streaming_metric() {
1680 let y_true = Array1::from_vec(vec![1.0, 2.0, 3.0, 4.0, 5.0]);
1682 let y_pred = Array1::from_vec(vec![1.2, 2.3, 2.9, 4.1, 5.2]);
1683
1684 let chunked = ChunkedMetrics::new().with_chunk_size(2);
1686 let mae = chunked
1687 .compute_streaming(&y_true, &y_pred, &StreamingMAE)
1688 .unwrap();
1689
1690 let expected_mae = y_true
1692 .iter()
1693 .zip(y_pred.iter())
1694 .map(|(t, p)| (t - p).abs())
1695 .sum::<f64>()
1696 / y_true.len() as f64;
1697
1698 assert!((mae - expected_mae).abs() < 1e-10);
1699 }
1700
1701 #[test]
1702 fn test_compute_rowwise() {
1703 let data: Vec<f64> = (0..100).map(|x| x as f64).collect();
1705
1706 let row_op = |chunk: &[f64]| -> Result<f64> { Ok(chunk.iter().map(|x| x * x).sum()) };
1708
1709 let combine = |results: &[f64]| -> Result<f64> { Ok(results.iter().sum()) };
1711
1712 let chunked = ChunkedMetrics::new().with_chunk_size(10);
1714 let result = chunked.compute_rowwise(&data, row_op, combine).unwrap();
1715
1716 let expected: f64 = data.iter().map(|x| x * x).sum();
1718
1719 assert!((result - expected).abs() < 1e-10);
1720 }
1721
1722 #[test]
1723 fn test_incremental_metrics() {
1724 let data = vec![(1.0, 1.2), (2.0, 1.8), (3.0, 3.1), (4.0, 4.2), (5.0, 4.9)];
1726
1727 let mse_update = |state: &mut f64, y_true: f64, y_pred: f64| -> Result<()> {
1729 *state += (y_true - y_pred).powi(2);
1730 Ok(())
1731 };
1732
1733 let mse_finalize = |state: &f64, count: usize| -> Result<f64> {
1735 if count == 0 {
1736 return Err(MetricsError::DivisionByZero);
1737 }
1738 Ok(*state / count as f64)
1739 };
1740
1741 let expected_mse =
1743 data.iter().map(|&(t, p)| (t - p) * (t - p)).sum::<f64>() / data.len() as f64;
1744
1745 let mut incremental = IncrementalMetrics::<f64, f64>::new();
1747
1748 for &(y_true, y_pred) in &data {
1749 incremental.update(y_true, y_pred, mse_update).unwrap();
1750 }
1751
1752 let mse = incremental.finalize(mse_finalize).unwrap();
1753 assert!((mse - expected_mse).abs() < 1e-10);
1754
1755 let (y_true, y_pred): (Vec<_>, Vec<_>) = data.iter().cloned().unzip();
1757
1758 let batch_update = |state: &mut f64, y_true: &[f64], y_pred: &[f64]| -> Result<()> {
1759 for (t, p) in y_true.iter().zip(y_pred.iter()) {
1760 *state += (t - p).powi(2);
1761 }
1762 Ok(())
1763 };
1764
1765 let mut incremental_batch = IncrementalMetrics::<f64, f64>::new();
1766 incremental_batch
1767 .update_batch(&y_true, &y_pred, batch_update)
1768 .unwrap();
1769
1770 let mse_batch = incremental_batch.finalize(mse_finalize).unwrap();
1771 assert!((mse_batch - expected_mse).abs() < 1e-10);
1772 }
1773}