scirs2_neural/wasm/
memory.rs

1//! WebAssembly memory management and configuration
2//!
3//! This module provides comprehensive memory management functionality for WebAssembly neural networks including:
4//! - Memory configuration and growth strategies
5//! - Memory alignment and optimization
6//! - Shared memory support for multi-threading
7//! - Progressive loading and streaming for large models
8//! - Caching strategies and storage management
9
10/// WebAssembly memory configuration
11#[derive(Debug, Clone)]
12pub struct WasmMemoryConfig {
13    /// Initial memory pages (64KB each)
14    pub initial_pages: u32,
15    /// Maximum memory pages
16    pub maximum_pages: Option<u32>,
17    /// Shared memory (for threading)
18    pub shared: bool,
19    /// Memory growth strategy
20    pub growth_strategy: MemoryGrowthStrategy,
21    /// Memory alignment
22    pub alignment: MemoryAlignment,
23}
24
25/// Memory growth strategy
26#[derive(Debug, Clone, PartialEq)]
27pub enum MemoryGrowthStrategy {
28    /// Fixed size - no growth allowed
29    Fixed,
30    /// On-demand growth
31    OnDemand,
32    /// Pre-allocated growth
33    PreAllocated,
34    /// Streaming growth for large models
35    Streaming,
36}
37
38/// Memory alignment configuration
39#[derive(Debug, Clone)]
40pub struct MemoryAlignment {
41    /// Data alignment (bytes)
42    pub data_alignment: u32,
43    /// Function alignment (bytes)
44    pub function_alignment: u32,
45    /// SIMD alignment (bytes)
46    pub simd_alignment: u32,
47}
48
49/// Progressive loading configuration
50#[derive(Debug, Clone)]
51pub struct ProgressiveLoadingConfig {
52    /// Enable progressive loading
53    pub enable: bool,
54    /// Loading strategy
55    pub strategy: LoadingStrategy,
56    /// Chunk size in bytes
57    pub chunk_size: usize,
58    /// Preloading configuration
59    pub preloading: PreloadingConfig,
60    /// Enable streaming
61    pub streaming: bool,
62}
63
64/// Loading strategy for progressive loading
65#[derive(Debug, Clone, PartialEq)]
66pub enum LoadingStrategy {
67    /// Load all at once
68    Eager,
69    /// Load on demand
70    Lazy,
71    /// Load in chunks
72    Chunked,
73    /// Stream continuously
74    Streaming,
75}
76
77/// Preloading configuration
78#[derive(Debug, Clone)]
79pub struct PreloadingConfig {
80    /// Enable preloading
81    pub enable: bool,
82    /// Preload percentage (0.0 to 1.0)
83    pub percentage: f64,
84    /// Preload on idle
85    pub on_idle: bool,
86    /// Preload based on user interaction
87    pub on_interaction: bool,
88}
89
90/// Caching configuration
91#[derive(Debug, Clone)]
92pub struct CachingConfig {
93    /// Enable caching
94    pub enable: bool,
95    /// Cache strategy
96    pub strategy: CacheStrategy,
97    /// Storage backend
98    pub storage: CacheStorage,
99    /// Time to live in seconds
100    pub ttl_seconds: Option<u64>,
101    /// Versioning strategy
102    pub versioning: VersioningStrategy,
103}
104
105/// Cache strategy
106#[derive(Debug, Clone, PartialEq)]
107pub enum CacheStrategy {
108    /// Least Recently Used
109    LRU,
110    /// Least Frequently Used
111    LFU,
112    /// First In, First Out
113    FIFO,
114    /// Time-based expiration
115    TTL,
116    /// Custom strategy
117    Custom,
118}
119
120/// Cache storage backend
121#[derive(Debug, Clone, PartialEq)]
122pub enum CacheStorage {
123    /// Browser Cache API
124    CacheAPI,
125    /// IndexedDB
126    IndexedDB,
127    /// Local Storage
128    LocalStorage,
129    /// Session Storage
130    SessionStorage,
131    /// In-memory only
132    Memory,
133}
134
135/// Versioning strategy for cache
136#[derive(Debug, Clone, PartialEq)]
137pub enum VersioningStrategy {
138    /// Use content hash
139    Hash,
140    /// Use timestamp
141    Timestamp,
142    /// Use semantic version
143    Semantic,
144    /// Custom versioning
145    Custom(String),
146}
147
148/// Parallel execution configuration
149#[derive(Debug, Clone)]
150pub struct ParallelConfig {
151    /// Enable Web Workers
152    pub web_workers: bool,
153    /// Maximum number of workers
154    pub max_workers: Option<usize>,
155    /// Shared memory support
156    pub shared_memory: bool,
157    /// Work stealing algorithm
158    pub work_stealing: bool,
159}
160
161/// Memory export specification
162#[derive(Debug, Clone)]
163pub struct WasmMemoryExport {
164    /// Export name
165    pub name: String,
166    /// Memory configuration
167    pub config: WasmMemoryConfig,
168}
169
170/// Memory import specification
171#[derive(Debug, Clone)]
172pub struct WasmMemoryImport {
173    /// Module name
174    pub module: String,
175    /// Memory name
176    pub name: String,
177    /// Memory configuration
178    pub config: WasmMemoryConfig,
179}
180
181impl Default for WasmMemoryConfig {
182    fn default() -> Self {
183        Self {
184            initial_pages: 256,        // 16MB initial
185            maximum_pages: Some(1024), // 64MB maximum
186            shared: false,
187            growth_strategy: MemoryGrowthStrategy::OnDemand,
188            alignment: MemoryAlignment::default(),
189        }
190    }
191}
192
193impl Default for MemoryAlignment {
194    fn default() -> Self {
195        Self {
196            data_alignment: 8,      // 8-byte alignment for f64
197            function_alignment: 16, // 16-byte alignment for functions
198            simd_alignment: 16,     // 16-byte alignment for SIMD
199        }
200    }
201}
202
203impl Default for ProgressiveLoadingConfig {
204    fn default() -> Self {
205        Self {
206            enable: true,
207            strategy: LoadingStrategy::Lazy,
208            chunk_size: 1024 * 1024, // 1MB chunks
209            preloading: PreloadingConfig::default(),
210            streaming: true,
211        }
212    }
213}
214
215impl Default for PreloadingConfig {
216    fn default() -> Self {
217        Self {
218            enable: true,
219            percentage: 0.1, // Preload 10%
220            on_idle: true,
221            on_interaction: false,
222        }
223    }
224}
225
226impl Default for CachingConfig {
227    fn default() -> Self {
228        Self {
229            enable: true,
230            strategy: CacheStrategy::LRU,
231            storage: CacheStorage::CacheAPI,
232            ttl_seconds: Some(3600), // 1 hour
233            versioning: VersioningStrategy::Hash,
234        }
235    }
236}
237
238impl Default for ParallelConfig {
239    fn default() -> Self {
240        Self {
241            web_workers: true,
242            max_workers: Some(4),
243            shared_memory: false,
244            work_stealing: false,
245        }
246    }
247}
248
249impl WasmMemoryConfig {
250    /// Create a new memory configuration
251    pub fn new(initial_pages: u32, maximum_pages: Option<u32>) -> Self {
252        Self {
253            initial_pages,
254            maximum_pages,
255            shared: false,
256            growth_strategy: MemoryGrowthStrategy::OnDemand,
257            alignment: MemoryAlignment::default(),
258        }
259    }
260
261    /// Create a configuration for small models
262    pub fn small() -> Self {
263        Self {
264            initial_pages: 64,        // 4MB initial
265            maximum_pages: Some(256), // 16MB maximum
266            shared: false,
267            growth_strategy: MemoryGrowthStrategy::Fixed,
268            alignment: MemoryAlignment::default(),
269        }
270    }
271
272    /// Create a configuration for large models
273    pub fn large() -> Self {
274        Self {
275            initial_pages: 512,        // 32MB initial
276            maximum_pages: Some(4096), // 256MB maximum
277            shared: false,
278            growth_strategy: MemoryGrowthStrategy::Streaming,
279            alignment: MemoryAlignment::high_performance(),
280        }
281    }
282
283    /// Create a configuration for multi-threaded execution
284    pub fn multithreaded() -> Self {
285        Self {
286            initial_pages: 256,        // 16MB initial
287            maximum_pages: Some(2048), // 128MB maximum
288            shared: true,
289            growth_strategy: MemoryGrowthStrategy::PreAllocated,
290            alignment: MemoryAlignment::default(),
291        }
292    }
293
294    /// Get total initial memory size in bytes
295    pub fn initial_size_bytes(&self) -> usize {
296        self.initial_pages as usize * 65536 // 64KB per page
297    }
298
299    /// Get maximum memory size in bytes
300    pub fn max_size_bytes(&self) -> Option<usize> {
301        self.maximum_pages.map(|pages| pages as usize * 65536)
302    }
303
304    /// Check if configuration supports growth
305    pub fn supports_growth(&self) -> bool {
306        self.growth_strategy != MemoryGrowthStrategy::Fixed
307    }
308
309    /// Check if configuration is suitable for large models
310    pub fn is_large_model_config(&self) -> bool {
311        self.initial_size_bytes() >= 32 * 1024 * 1024 // 32MB or more
312    }
313}
314
315impl MemoryAlignment {
316    /// Create alignment configuration optimized for performance
317    pub fn high_performance() -> Self {
318        Self {
319            data_alignment: 32,     // Cache line alignment
320            function_alignment: 32, // Optimal function alignment
321            simd_alignment: 32,     // AVX alignment
322        }
323    }
324
325    /// Create alignment configuration optimized for size
326    pub fn compact() -> Self {
327        Self {
328            data_alignment: 4,     // Minimal alignment
329            function_alignment: 8, // Minimal function alignment
330            simd_alignment: 16,    // Standard SIMD alignment
331        }
332    }
333
334    /// Check if alignment is compatible with SIMD operations
335    pub fn is_simd_compatible(&self) -> bool {
336        self.simd_alignment >= 16
337    }
338
339    /// Check if alignment is optimized for cache performance
340    pub fn is_cache_optimized(&self) -> bool {
341        self.data_alignment >= 32 && self.function_alignment >= 32
342    }
343}
344
345impl ProgressiveLoadingConfig {
346    /// Create configuration for fast initial loading
347    pub fn fast_start() -> Self {
348        Self {
349            enable: true,
350            strategy: LoadingStrategy::Lazy,
351            chunk_size: 512 * 1024, // 512KB chunks
352            preloading: PreloadingConfig {
353                enable: true,
354                percentage: 0.05, // Preload 5%
355                on_idle: true,
356                on_interaction: true,
357            },
358            streaming: true,
359        }
360    }
361
362    /// Create configuration for bandwidth-constrained environments
363    pub fn low_bandwidth() -> Self {
364        Self {
365            enable: true,
366            strategy: LoadingStrategy::Chunked,
367            chunk_size: 128 * 1024, // 128KB chunks
368            preloading: PreloadingConfig {
369                enable: false,
370                percentage: 0.0,
371                on_idle: false,
372                on_interaction: false,
373            },
374            streaming: true,
375        }
376    }
377
378    /// Check if preloading is enabled
379    pub fn has_preloading(&self) -> bool {
380        self.preloading.enable
381    }
382
383    /// Get estimated memory overhead for preloading
384    pub fn preload_memory_overhead(&self, total_size: usize) -> usize {
385        if self.has_preloading() {
386            (total_size as f64 * self.preloading.percentage) as usize
387        } else {
388            0
389        }
390    }
391}
392
393impl CachingConfig {
394    /// Create configuration for aggressive caching
395    pub fn aggressive() -> Self {
396        Self {
397            enable: true,
398            strategy: CacheStrategy::LRU,
399            storage: CacheStorage::IndexedDB,
400            ttl_seconds: Some(7 * 24 * 3600), // 1 week
401            versioning: VersioningStrategy::Hash,
402        }
403    }
404
405    /// Create configuration for minimal caching
406    pub fn minimal() -> Self {
407        Self {
408            enable: true,
409            strategy: CacheStrategy::TTL,
410            storage: CacheStorage::Memory,
411            ttl_seconds: Some(300), // 5 minutes
412            versioning: VersioningStrategy::Timestamp,
413        }
414    }
415
416    /// Check if persistent storage is used
417    pub fn uses_persistent_storage(&self) -> bool {
418        matches!(
419            self.storage,
420            CacheStorage::CacheAPI | CacheStorage::IndexedDB | CacheStorage::LocalStorage
421        )
422    }
423
424    /// Get estimated cache lifetime in seconds
425    pub fn cache_lifetime(&self) -> Option<u64> {
426        self.ttl_seconds
427    }
428}
429
430impl ParallelConfig {
431    /// Create configuration for maximum parallelism
432    pub fn max_parallel() -> Self {
433        Self {
434            web_workers: true,
435            max_workers: Some(navigator_hardware_concurrency().unwrap_or(8)),
436            shared_memory: true,
437            work_stealing: true,
438        }
439    }
440
441    /// Create configuration for single-threaded execution
442    pub fn single_threaded() -> Self {
443        Self {
444            web_workers: false,
445            max_workers: Some(1),
446            shared_memory: false,
447            work_stealing: false,
448        }
449    }
450
451    /// Get effective number of workers
452    pub fn effective_workers(&self) -> usize {
453        if self.web_workers {
454            self.max_workers.unwrap_or(1)
455        } else {
456            1
457        }
458    }
459
460    /// Check if configuration supports multi-threading
461    pub fn supports_multithreading(&self) -> bool {
462        self.web_workers && self.effective_workers() > 1
463    }
464}
465
466/// Utility function to get navigator hardware concurrency (mock for server-side)
467fn navigator_hardware_concurrency() -> Option<usize> {
468    // In real implementation, this would access navigator.hardwareConcurrency
469    // For now, return a reasonable default
470    Some(4)
471}
472
473/// Memory manager for WebAssembly models
474pub struct MemoryManager {
475    config: WasmMemoryConfig,
476    progressive_config: ProgressiveLoadingConfig,
477    cache_config: CachingConfig,
478    parallel_config: ParallelConfig,
479}
480
481impl MemoryManager {
482    /// Create a new memory manager
483    pub fn new(
484        config: WasmMemoryConfig,
485        progressive_config: ProgressiveLoadingConfig,
486        cache_config: CachingConfig,
487        parallel_config: ParallelConfig,
488    ) -> Self {
489        Self {
490            config,
491            progressive_config,
492            cache_config,
493            parallel_config,
494        }
495    }
496
497    /// Create a memory manager optimized for performance
498    pub fn performance_optimized() -> Self {
499        Self {
500            config: WasmMemoryConfig::large(),
501            progressive_config: ProgressiveLoadingConfig::fast_start(),
502            cache_config: CachingConfig::aggressive(),
503            parallel_config: ParallelConfig::max_parallel(),
504        }
505    }
506
507    /// Create a memory manager optimized for low resource usage
508    pub fn resource_constrained() -> Self {
509        Self {
510            config: WasmMemoryConfig::small(),
511            progressive_config: ProgressiveLoadingConfig::low_bandwidth(),
512            cache_config: CachingConfig::minimal(),
513            parallel_config: ParallelConfig::single_threaded(),
514        }
515    }
516
517    /// Get memory configuration
518    pub fn memory_config(&self) -> &WasmMemoryConfig {
519        &self.config
520    }
521
522    /// Get progressive loading configuration
523    pub fn progressive_config(&self) -> &ProgressiveLoadingConfig {
524        &self.progressive_config
525    }
526
527    /// Get caching configuration
528    pub fn cache_config(&self) -> &CachingConfig {
529        &self.cache_config
530    }
531
532    /// Get parallel configuration
533    pub fn parallel_config(&self) -> &ParallelConfig {
534        &self.parallel_config
535    }
536
537    /// Calculate total memory requirements
538    pub fn calculate_memory_requirements(&self, model_size: usize) -> MemoryRequirements {
539        let base_memory = self.config.initial_size_bytes();
540        let model_memory = model_size;
541        let cache_overhead = if self.cache_config.enable {
542            model_size / 10 // 10% overhead for caching
543        } else {
544            0
545        };
546        let preload_overhead = self.progressive_config.preload_memory_overhead(model_size);
547        let worker_overhead = if self.parallel_config.supports_multithreading() {
548            model_size * self.parallel_config.effective_workers() / 4 // 25% per worker
549        } else {
550            0
551        };
552
553        MemoryRequirements {
554            base_memory,
555            model_memory,
556            cache_overhead,
557            preload_overhead,
558            worker_overhead,
559            total: base_memory + model_memory + cache_overhead + preload_overhead + worker_overhead,
560        }
561    }
562
563    /// Check if configuration is suitable for given model size
564    pub fn is_suitable_for_model(&self, model_size: usize) -> bool {
565        let requirements = self.calculate_memory_requirements(model_size);
566
567        if let Some(max_size) = self.config.max_size_bytes() {
568            requirements.total <= max_size
569        } else {
570            true // No limit
571        }
572    }
573
574    /// Get recommended chunk size for streaming
575    pub fn recommended_chunk_size(&self, model_size: usize) -> usize {
576        let base_chunk = self.progressive_config.chunk_size;
577
578        // Adjust chunk size based on model size
579        if model_size < 1024 * 1024 {
580            // Small models: use smaller chunks
581            base_chunk / 4
582        } else if model_size > 100 * 1024 * 1024 {
583            // Large models: use larger chunks
584            base_chunk * 2
585        } else {
586            base_chunk
587        }
588    }
589}
590
591/// Memory requirements calculation result
592#[derive(Debug, Clone)]
593pub struct MemoryRequirements {
594    /// Base WebAssembly memory
595    pub base_memory: usize,
596    /// Memory for model data
597    pub model_memory: usize,
598    /// Cache overhead
599    pub cache_overhead: usize,
600    /// Preloading overhead
601    pub preload_overhead: usize,
602    /// Worker overhead
603    pub worker_overhead: usize,
604    /// Total memory requirement
605    pub total: usize,
606}
607
608impl MemoryRequirements {
609    /// Get memory usage breakdown as percentages
610    pub fn breakdown_percentages(&self) -> MemoryBreakdown {
611        let total_f = self.total as f64;
612
613        MemoryBreakdown {
614            base_percent: (self.base_memory as f64 / total_f) * 100.0,
615            model_percent: (self.model_memory as f64 / total_f) * 100.0,
616            cache_percent: (self.cache_overhead as f64 / total_f) * 100.0,
617            preload_percent: (self.preload_overhead as f64 / total_f) * 100.0,
618            worker_percent: (self.worker_overhead as f64 / total_f) * 100.0,
619        }
620    }
621
622    /// Format memory size as human-readable string
623    pub fn format_size(bytes: usize) -> String {
624        const UNITS: &[&str] = &["B", "KB", "MB", "GB", "TB"];
625
626        if bytes == 0 {
627            return "0 B".to_string();
628        }
629
630        let mut size = bytes as f64;
631        let mut unit_index = 0;
632
633        while size >= 1024.0 && unit_index < UNITS.len() - 1 {
634            size /= 1024.0;
635            unit_index += 1;
636        }
637
638        if unit_index == 0 {
639            format!("{} {}", bytes, UNITS[unit_index])
640        } else {
641            format!("{:.1} {}", size, UNITS[unit_index])
642        }
643    }
644
645    /// Get formatted total memory requirement
646    pub fn total_formatted(&self) -> String {
647        Self::format_size(self.total)
648    }
649}
650
651/// Memory usage breakdown in percentages
652#[derive(Debug, Clone)]
653pub struct MemoryBreakdown {
654    /// Percentage of memory for base operations
655    pub base_percent: f64,
656    /// Percentage of memory for model storage
657    pub model_percent: f64,
658    /// Percentage of memory for caching
659    pub cache_percent: f64,
660    /// Percentage of memory for preloading
661    pub preload_percent: f64,
662    /// Percentage of memory for worker threads
663    pub worker_percent: f64,
664}
665
666#[cfg(test)]
667mod tests {
668    use super::*;
669
670    #[test]
671    fn test_wasm_memory_config_default() {
672        let config = WasmMemoryConfig::default();
673        assert_eq!(config.initial_pages, 256);
674        assert_eq!(config.maximum_pages, Some(1024));
675        assert!(!config.shared);
676        assert_eq!(config.growth_strategy, MemoryGrowthStrategy::OnDemand);
677    }
678
679    #[test]
680    fn test_memory_config_sizes() {
681        let config = WasmMemoryConfig::new(128, Some(512));
682        assert_eq!(config.initial_size_bytes(), 128 * 65536);
683        assert_eq!(config.max_size_bytes(), Some(512 * 65536));
684        assert!(config.supports_growth());
685    }
686
687    #[test]
688    fn test_memory_config_presets() {
689        let small = WasmMemoryConfig::small();
690        assert_eq!(small.initial_pages, 64);
691        assert_eq!(small.growth_strategy, MemoryGrowthStrategy::Fixed);
692        assert!(!small.supports_growth());
693
694        let large = WasmMemoryConfig::large();
695        assert_eq!(large.initial_pages, 512);
696        assert_eq!(large.growth_strategy, MemoryGrowthStrategy::Streaming);
697        assert!(large.is_large_model_config());
698
699        let mt = WasmMemoryConfig::multithreaded();
700        assert!(mt.shared);
701        assert_eq!(mt.growth_strategy, MemoryGrowthStrategy::PreAllocated);
702    }
703
704    #[test]
705    fn test_memory_alignment() {
706        let default_align = MemoryAlignment::default();
707        assert!(default_align.is_simd_compatible());
708        assert!(!default_align.is_cache_optimized());
709
710        let perf_align = MemoryAlignment::high_performance();
711        assert!(perf_align.is_simd_compatible());
712        assert!(perf_align.is_cache_optimized());
713
714        let compact_align = MemoryAlignment::compact();
715        assert!(compact_align.is_simd_compatible());
716        assert!(!compact_align.is_cache_optimized());
717    }
718
719    #[test]
720    fn test_progressive_loading_config() {
721        let config = ProgressiveLoadingConfig::default();
722        assert!(config.enable);
723        assert!(config.has_preloading());
724
725        let fast = ProgressiveLoadingConfig::fast_start();
726        assert_eq!(fast.chunk_size, 512 * 1024);
727        assert!(fast.preloading.on_interaction);
728
729        let low_bw = ProgressiveLoadingConfig::low_bandwidth();
730        assert_eq!(low_bw.chunk_size, 128 * 1024);
731        assert!(!low_bw.has_preloading());
732    }
733
734    #[test]
735    fn test_caching_config() {
736        let default_cache = CachingConfig::default();
737        assert!(default_cache.enable);
738        assert!(default_cache.uses_persistent_storage());
739        assert_eq!(default_cache.cache_lifetime(), Some(3600));
740
741        let aggressive = CachingConfig::aggressive();
742        assert_eq!(aggressive.strategy, CacheStrategy::LRU);
743        assert_eq!(aggressive.cache_lifetime(), Some(7 * 24 * 3600));
744
745        let minimal = CachingConfig::minimal();
746        assert_eq!(minimal.strategy, CacheStrategy::TTL);
747        assert!(!minimal.uses_persistent_storage());
748    }
749
750    #[test]
751    fn test_parallel_config() {
752        let default_parallel = ParallelConfig::default();
753        assert!(default_parallel.web_workers);
754        assert!(default_parallel.supports_multithreading());
755
756        let max_parallel = ParallelConfig::max_parallel();
757        assert!(max_parallel.work_stealing);
758        assert!(max_parallel.shared_memory);
759
760        let single = ParallelConfig::single_threaded();
761        assert!(!single.web_workers);
762        assert!(!single.supports_multithreading());
763        assert_eq!(single.effective_workers(), 1);
764    }
765
766    #[test]
767    fn test_memory_manager() {
768        let manager = MemoryManager::performance_optimized();
769        assert!(manager.memory_config().is_large_model_config());
770        assert!(manager.parallel_config().supports_multithreading());
771
772        let constrained = MemoryManager::resource_constrained();
773        assert!(!constrained.memory_config().is_large_model_config());
774        assert!(!constrained.parallel_config().supports_multithreading());
775    }
776
777    #[test]
778    fn test_memory_requirements_calculation() {
779        let manager = MemoryManager::performance_optimized();
780        let model_size = 10 * 1024 * 1024; // 10MB model
781
782        let requirements = manager.calculate_memory_requirements(model_size);
783        assert!(requirements.total > model_size);
784        assert!(requirements.model_memory == model_size);
785
786        let breakdown = requirements.breakdown_percentages();
787        assert!(
788            (breakdown.base_percent
789                + breakdown.model_percent
790                + breakdown.cache_percent
791                + breakdown.preload_percent
792                + breakdown.worker_percent
793                - 100.0)
794                .abs()
795                < 0.1
796        );
797    }
798
799    #[test]
800    fn test_memory_requirements_formatting() {
801        assert_eq!(MemoryRequirements::format_size(0), "0 B");
802        assert_eq!(MemoryRequirements::format_size(1024), "1.0 KB");
803        assert_eq!(MemoryRequirements::format_size(1024 * 1024), "1.0 MB");
804        assert_eq!(MemoryRequirements::format_size(1536), "1.5 KB");
805    }
806
807    #[test]
808    fn test_recommended_chunk_size() {
809        let manager = MemoryManager::performance_optimized();
810
811        // Small model should get smaller chunks
812        let small_chunk = manager.recommended_chunk_size(512 * 1024);
813        assert!(small_chunk < manager.progressive_config().chunk_size);
814
815        // Large model should get larger chunks
816        let large_chunk = manager.recommended_chunk_size(200 * 1024 * 1024);
817        assert!(large_chunk > manager.progressive_config().chunk_size);
818
819        // Medium model should get default chunks
820        let medium_chunk = manager.recommended_chunk_size(10 * 1024 * 1024);
821        assert_eq!(medium_chunk, manager.progressive_config().chunk_size);
822    }
823}