1use scirs2_core::ndarray::{s, Array2, ArrayView2};
7use sklears_core::error::{Result as SklResult, SklearsError};
8use std::alloc::{self, Layout};
9use std::collections::{BTreeMap, HashMap, VecDeque};
10use std::mem;
11use std::sync::{Arc, Mutex, RwLock};
12use std::thread::{self, JoinHandle};
13use std::time::{Duration, SystemTime};
14
15#[derive(Debug, Clone)]
17pub struct MemoryUsage {
18 pub allocated: u64,
20 pub peak: u64,
22 pub allocations: u64,
24 pub deallocations: u64,
26 pub fragmentation: f64,
28 pub updated_at: SystemTime,
30}
31
32impl Default for MemoryUsage {
33 fn default() -> Self {
34 Self {
35 allocated: 0,
36 peak: 0,
37 allocations: 0,
38 deallocations: 0,
39 fragmentation: 0.0,
40 updated_at: SystemTime::now(),
41 }
42 }
43}
44
45impl MemoryUsage {
46 pub fn update(&mut self, allocated: u64, allocations: u64, deallocations: u64) {
48 self.allocated = allocated;
49 self.allocations = allocations;
50 self.deallocations = deallocations;
51
52 if allocated > self.peak {
53 self.peak = allocated;
54 }
55
56 if allocations > 0 {
58 self.fragmentation = (allocations - deallocations) as f64 / allocations as f64;
59 }
60
61 self.updated_at = SystemTime::now();
62 }
63
64 #[must_use]
66 pub fn utilization(&self, total_available: u64) -> f64 {
67 if total_available == 0 {
68 0.0
69 } else {
70 self.allocated as f64 / total_available as f64
71 }
72 }
73
74 #[must_use]
76 pub fn is_critical(&self, threshold: f64, total_available: u64) -> bool {
77 self.utilization(total_available) > threshold
78 }
79}
80
81pub struct MemoryMonitor {
83 usage: Arc<RwLock<MemoryUsage>>,
85 config: MemoryMonitorConfig,
87 history: Arc<RwLock<VecDeque<MemoryUsage>>>,
89 monitor_thread: Option<JoinHandle<()>>,
91 is_running: Arc<Mutex<bool>>,
93 callbacks: Arc<RwLock<Vec<Box<dyn Fn(&MemoryUsage) + Send + Sync>>>>,
95}
96
97impl std::fmt::Debug for MemoryMonitor {
98 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
99 f.debug_struct("MemoryMonitor")
100 .field("usage", &self.usage)
101 .field("config", &self.config)
102 .field("history", &self.history)
103 .field("monitor_thread", &self.monitor_thread.is_some())
104 .field("is_running", &self.is_running)
105 .field(
106 "callbacks",
107 &format!("{} callbacks", self.callbacks.read().unwrap().len()),
108 )
109 .finish()
110 }
111}
112
113#[derive(Debug, Clone)]
115pub struct MemoryMonitorConfig {
116 pub interval: Duration,
118 pub warning_threshold: f64,
120 pub critical_threshold: f64,
122 pub max_history: usize,
124 pub auto_gc: bool,
126 pub gc_threshold: f64,
128}
129
130impl Default for MemoryMonitorConfig {
131 fn default() -> Self {
132 Self {
133 interval: Duration::from_secs(1),
134 warning_threshold: 0.7,
135 critical_threshold: 0.9,
136 max_history: 3600, auto_gc: true,
138 gc_threshold: 0.8,
139 }
140 }
141}
142
143impl MemoryMonitor {
144 #[must_use]
146 pub fn new(config: MemoryMonitorConfig) -> Self {
147 Self {
148 usage: Arc::new(RwLock::new(MemoryUsage::default())),
149 config,
150 history: Arc::new(RwLock::new(VecDeque::new())),
151 monitor_thread: None,
152 is_running: Arc::new(Mutex::new(false)),
153 callbacks: Arc::new(RwLock::new(Vec::new())),
154 }
155 }
156
157 pub fn start(&mut self) -> SklResult<()> {
159 {
160 let mut running = self.is_running.lock().unwrap();
161 if *running {
162 return Ok(());
163 }
164 *running = true;
165 }
166
167 let usage = Arc::clone(&self.usage);
168 let history = Arc::clone(&self.history);
169 let callbacks = Arc::clone(&self.callbacks);
170 let is_running = Arc::clone(&self.is_running);
171 let config = self.config.clone();
172
173 let handle = thread::spawn(move || {
174 Self::monitor_loop(usage, history, callbacks, is_running, config);
175 });
176
177 self.monitor_thread = Some(handle);
178 Ok(())
179 }
180
181 pub fn stop(&mut self) -> SklResult<()> {
183 {
184 let mut running = self.is_running.lock().unwrap();
185 *running = false;
186 }
187
188 if let Some(handle) = self.monitor_thread.take() {
189 handle.join().map_err(|_| SklearsError::InvalidData {
190 reason: "Failed to join monitor thread".to_string(),
191 })?;
192 }
193
194 Ok(())
195 }
196
197 fn monitor_loop(
199 usage: Arc<RwLock<MemoryUsage>>,
200 history: Arc<RwLock<VecDeque<MemoryUsage>>>,
201 callbacks: Arc<RwLock<Vec<Box<dyn Fn(&MemoryUsage) + Send + Sync>>>>,
202 is_running: Arc<Mutex<bool>>,
203 config: MemoryMonitorConfig,
204 ) {
205 while *is_running.lock().unwrap() {
206 let (allocated, allocations, deallocations) = Self::get_system_memory_info();
208
209 {
211 let mut current_usage = usage.write().unwrap();
212 current_usage.update(allocated, allocations, deallocations);
213
214 {
216 let mut hist = history.write().unwrap();
217 hist.push_back(current_usage.clone());
218
219 while hist.len() > config.max_history {
221 hist.pop_front();
222 }
223 }
224
225 let total_memory = Self::get_total_system_memory();
227 let utilization = current_usage.utilization(total_memory);
228
229 if config.auto_gc && utilization > config.gc_threshold {
230 Self::trigger_garbage_collection();
231 }
232
233 let cb_list = callbacks.read().unwrap();
235 for callback in cb_list.iter() {
236 callback(¤t_usage);
237 }
238 }
239
240 thread::sleep(config.interval);
241 }
242 }
243
244 fn get_system_memory_info() -> (u64, u64, u64) {
246 (1024 * 1024 * 100, 1000, 900) }
250
251 fn get_total_system_memory() -> u64 {
253 1024 * 1024 * 1024 * 8 }
256
257 fn trigger_garbage_collection() {
259 }
263
264 #[must_use]
266 pub fn current_usage(&self) -> MemoryUsage {
267 let usage = self.usage.read().unwrap();
268 usage.clone()
269 }
270
271 #[must_use]
273 pub fn usage_history(&self) -> Vec<MemoryUsage> {
274 let history = self.history.read().unwrap();
275 history.iter().cloned().collect()
276 }
277
278 pub fn add_callback(&self, callback: Box<dyn Fn(&MemoryUsage) + Send + Sync>) {
280 let mut callbacks = self.callbacks.write().unwrap();
281 callbacks.push(callback);
282 }
283
284 #[must_use]
286 pub fn is_above_threshold(&self, threshold: f64) -> bool {
287 let usage = self.usage.read().unwrap();
288 let total = Self::get_total_system_memory();
289 usage.utilization(total) > threshold
290 }
291
292 #[must_use]
294 pub fn get_statistics(&self) -> MemoryStatistics {
295 let usage = self.usage.read().unwrap();
296 let history = self.history.read().unwrap();
297
298 let avg_allocated = if history.is_empty() {
299 usage.allocated
300 } else {
301 history.iter().map(|u| u.allocated).sum::<u64>() / history.len() as u64
302 };
303
304 let max_allocated = history
305 .iter()
306 .map(|u| u.allocated)
307 .max()
308 .unwrap_or(usage.allocated);
309 let min_allocated = history
310 .iter()
311 .map(|u| u.allocated)
312 .min()
313 .unwrap_or(usage.allocated);
314
315 MemoryStatistics {
316 current: usage.clone(),
317 average_allocated: avg_allocated,
318 max_allocated,
319 min_allocated,
320 total_system_memory: Self::get_total_system_memory(),
321 samples_count: history.len(),
322 }
323 }
324}
325
326#[derive(Debug, Clone)]
328pub struct MemoryStatistics {
329 pub current: MemoryUsage,
331 pub average_allocated: u64,
333 pub max_allocated: u64,
335 pub min_allocated: u64,
337 pub total_system_memory: u64,
339 pub samples_count: usize,
341}
342
343#[derive(Debug)]
345pub struct MemoryPool {
346 config: MemoryPoolConfig,
347 available_blocks: Arc<RwLock<BTreeMap<usize, Vec<MemoryBlock>>>>,
348 allocated_blocks: Arc<RwLock<HashMap<*mut u8, MemoryBlock>>>,
349 statistics: Arc<RwLock<PoolStatistics>>,
350 monitor: Option<MemoryMonitor>,
351}
352
353#[derive(Debug, Clone)]
355pub struct MemoryPoolConfig {
356 pub initial_size: usize,
358 pub max_size: usize,
360 pub size_classes: Vec<usize>,
362 pub auto_expand: bool,
364 pub expansion_factor: f64,
366 pub compaction_enabled: bool,
368 pub compaction_threshold: f64,
370}
371
372impl Default for MemoryPoolConfig {
373 fn default() -> Self {
374 Self {
375 initial_size: 1024 * 1024 * 10, max_size: 1024 * 1024 * 100, size_classes: vec![16, 32, 64, 128, 256, 512, 1024, 2048, 4096, 8192],
378 auto_expand: true,
379 expansion_factor: 1.5,
380 compaction_enabled: true,
381 compaction_threshold: 0.7,
382 }
383 }
384}
385
386#[derive(Debug, Clone)]
388pub struct MemoryBlock {
389 pub ptr: *mut u8,
391 pub size: usize,
393 pub allocated_at: SystemTime,
395 pub last_accessed: SystemTime,
397 pub ref_count: usize,
399}
400
401#[derive(Debug, Clone)]
403pub struct PoolStatistics {
404 pub total_allocated: usize,
406 pub total_available: usize,
408 pub allocations: u64,
410 pub deallocations: u64,
412 pub hit_rate: f64,
414 pub fragmentation: f64,
416 pub utilization: f64,
418}
419
420impl Default for PoolStatistics {
421 fn default() -> Self {
422 Self {
423 total_allocated: 0,
424 total_available: 0,
425 allocations: 0,
426 deallocations: 0,
427 hit_rate: 0.0,
428 fragmentation: 0.0,
429 utilization: 0.0,
430 }
431 }
432}
433
434impl MemoryPool {
435 pub fn new(config: MemoryPoolConfig) -> SklResult<Self> {
437 let mut pool = Self {
438 config,
439 available_blocks: Arc::new(RwLock::new(BTreeMap::new())),
440 allocated_blocks: Arc::new(RwLock::new(HashMap::new())),
441 statistics: Arc::new(RwLock::new(PoolStatistics::default())),
442 monitor: None,
443 };
444
445 pool.initialize_pool()?;
447
448 Ok(pool)
449 }
450
451 fn initialize_pool(&mut self) -> SklResult<()> {
453 let mut available = self.available_blocks.write().unwrap();
454
455 for &size_class in &self.config.size_classes {
456 let blocks_per_class =
457 self.config.initial_size / (size_class * self.config.size_classes.len());
458 let mut blocks = Vec::with_capacity(blocks_per_class);
459
460 for _ in 0..blocks_per_class {
461 let layout = Layout::from_size_align(size_class, std::mem::align_of::<u8>())
462 .map_err(|_| SklearsError::InvalidData {
463 reason: "Invalid memory layout".to_string(),
464 })?;
465
466 unsafe {
467 let ptr = alloc::alloc(layout);
468 if ptr.is_null() {
469 return Err(SklearsError::InvalidData {
470 reason: "Memory allocation failed".to_string(),
471 });
472 }
473
474 blocks.push(MemoryBlock {
475 ptr,
476 size: size_class,
477 allocated_at: SystemTime::now(),
478 last_accessed: SystemTime::now(),
479 ref_count: 0,
480 });
481 }
482 }
483
484 available.insert(size_class, blocks);
485 }
486
487 Ok(())
488 }
489
490 pub fn allocate(&self, size: usize) -> SklResult<*mut u8> {
492 let size_class = self.find_size_class(size);
493 let mut available = self.available_blocks.write().unwrap();
494 let mut allocated = self.allocated_blocks.write().unwrap();
495 let mut stats = self.statistics.write().unwrap();
496
497 if let Some(blocks) = available.get_mut(&size_class) {
498 if let Some(mut block) = blocks.pop() {
499 block.allocated_at = SystemTime::now();
501 block.last_accessed = SystemTime::now();
502 block.ref_count = 1;
503
504 let ptr = block.ptr;
505 allocated.insert(ptr, block);
506
507 stats.allocations += 1;
508 stats.total_allocated += size_class;
509 stats.hit_rate = stats.allocations as f64 / (stats.allocations + 1) as f64;
510
511 return Ok(ptr);
512 }
513 }
514
515 if self.config.auto_expand {
517 let layout =
518 Layout::from_size_align(size_class, std::mem::align_of::<u8>()).map_err(|_| {
519 SklearsError::InvalidData {
520 reason: "Invalid memory layout".to_string(),
521 }
522 })?;
523
524 unsafe {
525 let ptr = alloc::alloc(layout);
526 if ptr.is_null() {
527 return Err(SklearsError::InvalidData {
528 reason: "Memory allocation failed".to_string(),
529 });
530 }
531
532 let block = MemoryBlock {
533 ptr,
534 size: size_class,
535 allocated_at: SystemTime::now(),
536 last_accessed: SystemTime::now(),
537 ref_count: 1,
538 };
539
540 allocated.insert(ptr, block);
541 stats.allocations += 1;
542 stats.total_allocated += size_class;
543
544 Ok(ptr)
545 }
546 } else {
547 Err(SklearsError::InvalidData {
548 reason: "Memory pool exhausted".to_string(),
549 })
550 }
551 }
552
553 pub fn deallocate(&self, ptr: *mut u8) -> SklResult<()> {
555 let mut available = self.available_blocks.write().unwrap();
556 let mut allocated = self.allocated_blocks.write().unwrap();
557 let mut stats = self.statistics.write().unwrap();
558
559 if let Some(mut block) = allocated.remove(&ptr) {
560 block.ref_count = 0;
561 block.last_accessed = SystemTime::now();
562
563 let size_class = block.size;
564 available.entry(size_class).or_default().push(block);
565
566 stats.deallocations += 1;
567 stats.total_allocated = stats.total_allocated.saturating_sub(size_class);
568
569 Ok(())
570 } else {
571 Err(SklearsError::InvalidData {
572 reason: "Invalid pointer for deallocation".to_string(),
573 })
574 }
575 }
576
577 fn find_size_class(&self, size: usize) -> usize {
579 self.config
580 .size_classes
581 .iter()
582 .find(|&&class_size| class_size >= size)
583 .copied()
584 .unwrap_or_else(|| {
585 let mut class_size = 1;
587 while class_size < size {
588 class_size <<= 1;
589 }
590 class_size
591 })
592 }
593
594 pub fn compact(&self) -> SklResult<()> {
596 let available = self.available_blocks.write().unwrap();
597 let mut stats = self.statistics.write().unwrap();
598
599 let total_blocks: usize = available.values().map(std::vec::Vec::len).sum();
600 let fragmentation = if total_blocks > 0 {
601 1.0 - (available.len() as f64 / total_blocks as f64)
602 } else {
603 0.0
604 };
605
606 if fragmentation > self.config.compaction_threshold {
607 stats.fragmentation = fragmentation;
610 }
611
612 Ok(())
613 }
614
615 #[must_use]
617 pub fn statistics(&self) -> PoolStatistics {
618 let stats = self.statistics.read().unwrap();
619 stats.clone()
620 }
621
622 pub fn enable_monitoring(&mut self, config: MemoryMonitorConfig) -> SklResult<()> {
624 let mut monitor = MemoryMonitor::new(config);
625 monitor.start()?;
626 self.monitor = Some(monitor);
627 Ok(())
628 }
629
630 pub fn garbage_collect(&self) -> SklResult<usize> {
632 let mut available = self.available_blocks.write().unwrap();
633 let mut freed_blocks = 0;
634
635 for (_, blocks) in available.iter_mut() {
636 let old_len = blocks.len();
637
638 let cutoff = SystemTime::now() - Duration::from_secs(300); blocks.retain(|block| block.last_accessed > cutoff);
641
642 freed_blocks += old_len - blocks.len();
643 }
644
645 Ok(freed_blocks)
646 }
647}
648
649#[derive(Debug)]
651pub struct StreamingBuffer<T> {
652 buffer: Vec<Option<T>>,
654 capacity: usize,
656 write_pos: usize,
658 read_pos: usize,
660 count: usize,
662 memory_pool: Option<Arc<MemoryPool>>,
664}
665
666impl<T> StreamingBuffer<T> {
667 pub fn new(capacity: usize) -> Self {
669 let mut buffer = Vec::with_capacity(capacity);
670 for _ in 0..capacity {
671 buffer.push(None);
672 }
673 Self {
674 buffer,
675 capacity,
676 write_pos: 0,
677 read_pos: 0,
678 count: 0,
679 memory_pool: None,
680 }
681 }
682
683 #[must_use]
685 pub fn with_memory_pool(capacity: usize, memory_pool: Arc<MemoryPool>) -> Self {
686 let mut buffer = Self::new(capacity);
687 buffer.memory_pool = Some(memory_pool);
688 buffer
689 }
690
691 pub fn push(&mut self, item: T) -> Option<T> {
693 let old_item = self.buffer[self.write_pos].take();
694 self.buffer[self.write_pos] = Some(item);
695
696 self.write_pos = (self.write_pos + 1) % self.capacity;
697
698 if self.count < self.capacity {
699 self.count += 1;
700 } else {
701 self.read_pos = (self.read_pos + 1) % self.capacity;
702 }
703
704 old_item
705 }
706
707 pub fn pop(&mut self) -> Option<T> {
709 if self.count == 0 {
710 return None;
711 }
712
713 let item = self.buffer[self.read_pos].take();
714 self.read_pos = (self.read_pos + 1) % self.capacity;
715 self.count -= 1;
716
717 item
718 }
719
720 #[must_use]
722 pub fn len(&self) -> usize {
723 self.count
724 }
725
726 #[must_use]
728 pub fn is_empty(&self) -> bool {
729 self.count == 0
730 }
731
732 #[must_use]
734 pub fn is_full(&self) -> bool {
735 self.count == self.capacity
736 }
737
738 pub fn clear(&mut self) {
740 for slot in &mut self.buffer {
741 *slot = None;
742 }
743 self.write_pos = 0;
744 self.read_pos = 0;
745 self.count = 0;
746 }
747
748 #[must_use]
750 pub fn memory_usage(&self) -> usize {
751 self.capacity * mem::size_of::<Option<T>>()
752 }
753}
754
755pub struct MemoryEfficientOps;
757
758impl MemoryEfficientOps {
759 pub fn transform_inplace<F>(array: &mut Array2<f64>, transform_fn: F)
761 where
762 F: Fn(f64) -> f64,
763 {
764 array.mapv_inplace(transform_fn);
765 }
766
767 pub fn batch_process<F, R>(
769 data: &Array2<f64>,
770 batch_size: usize,
771 process_fn: F,
772 ) -> SklResult<Vec<R>>
773 where
774 F: Fn(ArrayView2<f64>) -> SklResult<R>,
775 {
776 let mut results = Vec::new();
777 let n_rows = data.nrows();
778
779 for chunk_start in (0..n_rows).step_by(batch_size) {
780 let chunk_end = std::cmp::min(chunk_start + batch_size, n_rows);
781 let batch = data.slice(s![chunk_start..chunk_end, ..]);
782
783 let result = process_fn(batch)?;
784 results.push(result);
785 }
786
787 Ok(results)
788 }
789
790 pub fn chunked_matmul(
792 a: &Array2<f64>,
793 b: &Array2<f64>,
794 chunk_size: usize,
795 ) -> SklResult<Array2<f64>> {
796 if a.ncols() != b.nrows() {
797 return Err(SklearsError::InvalidData {
798 reason: "Matrix dimensions don't match for multiplication".to_string(),
799 });
800 }
801
802 let mut result = Array2::zeros((a.nrows(), b.ncols()));
803
804 for i_chunk in (0..a.nrows()).step_by(chunk_size) {
805 let i_end = std::cmp::min(i_chunk + chunk_size, a.nrows());
806
807 for j_chunk in (0..b.ncols()).step_by(chunk_size) {
808 let j_end = std::cmp::min(j_chunk + chunk_size, b.ncols());
809
810 for k_chunk in (0..a.ncols()).step_by(chunk_size) {
811 let k_end = std::cmp::min(k_chunk + chunk_size, a.ncols());
812
813 let a_chunk = a.slice(s![i_chunk..i_end, k_chunk..k_end]);
814 let b_chunk = b.slice(s![k_chunk..k_end, j_chunk..j_end]);
815
816 let mut result_chunk = result.slice_mut(s![i_chunk..i_end, j_chunk..j_end]);
817
818 for (i, a_row) in a_chunk.rows().into_iter().enumerate() {
820 for (j, b_col) in b_chunk.columns().into_iter().enumerate() {
821 result_chunk[[i, j]] += a_row.dot(&b_col);
822 }
823 }
824 }
825 }
826 }
827
828 Ok(result)
829 }
830
831 #[must_use]
833 pub fn optimize_precision(array: &Array2<f64>, tolerance: f64) -> Array2<f32> {
834 array.mapv(|x| {
835 if x.abs() < tolerance {
836 0.0f32
837 } else {
838 x as f32
839 }
840 })
841 }
842}
843
844#[allow(non_snake_case)]
845#[cfg(test)]
846mod tests {
847 use super::*;
848
849 #[test]
850 fn test_memory_usage() {
851 let mut usage = MemoryUsage::default();
852 usage.update(1024, 10, 5);
853
854 assert_eq!(usage.allocated, 1024);
855 assert_eq!(usage.allocations, 10);
856 assert_eq!(usage.deallocations, 5);
857 assert_eq!(usage.peak, 1024);
858 }
859
860 #[test]
861 fn test_memory_monitor_creation() {
862 let config = MemoryMonitorConfig::default();
863 let monitor = MemoryMonitor::new(config);
864
865 let usage = monitor.current_usage();
866 assert_eq!(usage.allocated, 0);
867 }
868
869 #[test]
870 fn test_memory_pool_creation() {
871 let config = MemoryPoolConfig::default();
872 let pool = MemoryPool::new(config).unwrap();
873
874 let stats = pool.statistics();
875 assert_eq!(stats.allocations, 0);
876 assert_eq!(stats.deallocations, 0);
877 }
878
879 #[test]
880 fn test_streaming_buffer() {
881 let mut buffer = StreamingBuffer::new(3);
882
883 assert!(buffer.is_empty());
884 assert_eq!(buffer.len(), 0);
885
886 buffer.push(1);
887 buffer.push(2);
888 buffer.push(3);
889
890 assert!(buffer.is_full());
891 assert_eq!(buffer.len(), 3);
892
893 let old_item = buffer.push(4); assert_eq!(old_item, Some(1));
895
896 let popped = buffer.pop();
897 assert_eq!(popped, Some(2));
898 }
899
900 #[test]
901 fn test_memory_efficient_ops() {
902 let mut array = Array2::from_shape_vec((2, 2), vec![1.0, 2.0, 3.0, 4.0]).unwrap();
903
904 MemoryEfficientOps::transform_inplace(&mut array, |x| x * 2.0);
906 assert_eq!(array[[0, 0]], 2.0);
907 assert_eq!(array[[1, 1]], 8.0);
908
909 let array_f64 = Array2::from_shape_vec((2, 2), vec![1.0, 0.000001, 3.0, 0.000002]).unwrap();
911 let array_f32 = MemoryEfficientOps::optimize_precision(&array_f64, 0.00001);
912 assert_eq!(array_f32[[0, 1]], 0.0f32); assert_eq!(array_f32[[1, 0]], 3.0f32); }
915
916 #[test]
917 fn test_batch_processing() {
918 let data =
919 Array2::from_shape_vec((4, 2), vec![1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0]).unwrap();
920
921 let results = MemoryEfficientOps::batch_process(&data, 2, |batch| Ok(batch.sum())).unwrap();
922
923 assert_eq!(results.len(), 2); assert_eq!(results[0], 10.0); assert_eq!(results[1], 26.0); }
927
928 #[test]
929 fn test_chunked_matmul() {
930 let a = Array2::from_shape_vec((2, 3), vec![1.0, 2.0, 3.0, 4.0, 5.0, 6.0]).unwrap();
931 let b = Array2::from_shape_vec((3, 2), vec![1.0, 2.0, 3.0, 4.0, 5.0, 6.0]).unwrap();
932
933 let result = MemoryEfficientOps::chunked_matmul(&a, &b, 2).unwrap();
934
935 assert_eq!(result.shape(), &[2, 2]);
937 assert_eq!(result[[0, 0]], 22.0); assert_eq!(result[[0, 1]], 28.0); assert_eq!(result[[1, 0]], 49.0); assert_eq!(result[[1, 1]], 64.0); }
942}