cubecl_runtime/memory_management/
memory_manage.rs

1use super::{
2    MemoryConfiguration, MemoryDeviceProperties, MemoryPoolOptions, MemoryUsage, PoolType,
3    memory_pool::{ExclusiveMemoryPool, MemoryPool, SlicedPool},
4};
5use crate::storage::{ComputeStorage, StorageHandle, StorageId};
6#[cfg(not(exclusive_memory_only))]
7use alloc::vec;
8use alloc::vec::Vec;
9use hashbrown::HashSet;
10
11pub use super::memory_pool::{SliceBinding, handle::*};
12
13// These are 288 bytes vs 64 bytes. Adding boxing isn't really worth
14// saving the 200 bytes.
15#[allow(clippy::large_enum_variant)]
16enum DynamicPool {
17    Sliced(SlicedPool),
18    Exclusive(ExclusiveMemoryPool),
19}
20
21impl MemoryPool for DynamicPool {
22    fn get(&self, binding: &SliceBinding) -> Option<&StorageHandle> {
23        match self {
24            DynamicPool::Sliced(m) => m.get(binding),
25            DynamicPool::Exclusive(m) => m.get(binding),
26        }
27    }
28
29    fn try_reserve(&mut self, size: u64, exclude: Option<&StorageExclude>) -> Option<SliceHandle> {
30        match self {
31            DynamicPool::Sliced(m) => m.try_reserve(size, exclude),
32            DynamicPool::Exclusive(m) => m.try_reserve(size, exclude),
33        }
34    }
35
36    fn alloc<Storage: ComputeStorage>(&mut self, storage: &mut Storage, size: u64) -> SliceHandle {
37        match self {
38            DynamicPool::Sliced(m) => m.alloc(storage, size),
39            DynamicPool::Exclusive(m) => m.alloc(storage, size),
40        }
41    }
42
43    fn get_memory_usage(&self) -> MemoryUsage {
44        match self {
45            DynamicPool::Sliced(m) => m.get_memory_usage(),
46            DynamicPool::Exclusive(m) => m.get_memory_usage(),
47        }
48    }
49
50    fn max_alloc_size(&self) -> u64 {
51        match self {
52            DynamicPool::Sliced(m) => m.max_alloc_size(),
53            DynamicPool::Exclusive(m) => m.max_alloc_size(),
54        }
55    }
56
57    fn cleanup<Storage: ComputeStorage>(
58        &mut self,
59        storage: &mut Storage,
60        alloc_nr: u64,
61        explicit: bool,
62    ) {
63        match self {
64            DynamicPool::Sliced(m) => m.cleanup(storage, alloc_nr, explicit),
65            DynamicPool::Exclusive(m) => m.cleanup(storage, alloc_nr, explicit),
66        }
67    }
68}
69
70/// Reserves and keeps track of chunks of memory in the storage, and slices upon these chunks.
71pub struct MemoryManagement<Storage> {
72    pools: Vec<DynamicPool>,
73    storage: Storage,
74    alloc_reserve_count: u64,
75}
76
77/// Exclude certain storage buffers from being selected when reserving memory.
78#[derive(Debug, Clone, Default)]
79pub struct StorageExclude {
80    ids: HashSet<StorageId>,
81}
82
83impl StorageExclude {
84    /// Add a storage buffer to the exclusion list.
85    pub fn exclude_storage(&mut self, storage: StorageId) {
86        self.ids.insert(storage);
87    }
88
89    /// Check if a storage buffer is excluded.
90    pub fn is_excluded(&self, storage: StorageId) -> bool {
91        self.ids.contains(&storage)
92    }
93
94    /// Clear the exclusion list.
95    pub fn clear(&mut self) {
96        self.ids.clear();
97    }
98
99    /// Number of currently excluded storage buffers.
100    pub fn count(&self) -> usize {
101        self.ids.len()
102    }
103}
104
105fn generate_bucket_sizes(
106    start_size: u64,
107    end_size: u64,
108    max_buckets: usize,
109    alignment: u64,
110) -> Vec<u64> {
111    let mut buckets = Vec::with_capacity(max_buckets);
112    let log_min = (start_size as f64).ln();
113    let log_max = (end_size as f64).ln();
114    let log_range = log_max - log_min;
115
116    // Pure exponential performed best, but let's try slightly denser in lower-mid range
117    for i in 0..max_buckets {
118        let p = i as f64 / (max_buckets - 1) as f64;
119        // Slight bias toward lower-mid range with less aggressive curve than sigmoid
120        let log_size = log_min + log_range * p;
121        let size = log_size.exp() as u64;
122        let aligned_size = size.next_multiple_of(alignment);
123        buckets.push(aligned_size);
124    }
125
126    buckets.dedup();
127    buckets
128}
129
130const DEALLOC_SCALE_MB: u64 = 1024 * 1024 * 1024;
131const BASE_DEALLOC_PERIOD: u64 = 5000;
132
133impl<Storage: ComputeStorage> MemoryManagement<Storage> {
134    /// Creates the options from device limits.
135    pub fn from_configuration(
136        storage: Storage,
137        properties: &MemoryDeviceProperties,
138        config: MemoryConfiguration,
139    ) -> Self {
140        let pool_options = match config {
141            #[cfg(not(exclusive_memory_only))]
142            MemoryConfiguration::SubSlices => {
143                // Round chunk size to be aligned.
144                let memory_alignment = properties.alignment;
145                let max_page = properties.max_page_size;
146                let mut pools = Vec::new();
147
148                const MB: u64 = 1024 * 1024;
149
150                // Add in a pool for allocations that are smaller than the min alignment,
151                // as they can't use offsets at all (on wgpu at least).
152                pools.push(MemoryPoolOptions {
153                    pool_type: PoolType::ExclusivePages { max_alloc_size: 0 },
154                    dealloc_period: None,
155                });
156
157                let mut current = max_page;
158                let mut max_sizes = vec![];
159                let mut page_sizes = vec![];
160                let mut base = pools.len() as u32;
161
162                while current >= 32 * MB {
163                    current /= 4;
164
165                    // Make sure every pool has an aligned size.
166                    current = current.next_multiple_of(memory_alignment);
167
168                    max_sizes.push(current / 2u64.pow(base));
169                    page_sizes.push(current);
170                    base += 1;
171                }
172
173                max_sizes.reverse();
174                page_sizes.reverse();
175
176                for i in 0..max_sizes.len() {
177                    let max = max_sizes[i];
178                    let page_size = page_sizes[i];
179
180                    pools.push(MemoryPoolOptions {
181                        // Creating max slices lower than the chunk size reduces fragmentation.
182                        pool_type: PoolType::SlicedPages {
183                            page_size,
184                            max_slice_size: max,
185                        },
186                        dealloc_period: None,
187                    });
188                }
189
190                // Add pools from big to small.
191                pools.push(MemoryPoolOptions {
192                    pool_type: PoolType::SlicedPages {
193                        page_size: max_page / memory_alignment * memory_alignment,
194                        max_slice_size: max_page / memory_alignment * memory_alignment,
195                    },
196                    dealloc_period: None,
197                });
198                pools
199            }
200            MemoryConfiguration::ExclusivePages => {
201                // Add all bin sizes. Nb: because of alignment some buckets
202                // end up as the same size, so only want unique ones,
203                // but also keep the order, so a BTree will do.
204                const MIN_BUCKET_SIZE: u64 = 1024 * 32;
205                const NUM_POOLS: usize = 24;
206
207                let sizes = generate_bucket_sizes(
208                    MIN_BUCKET_SIZE,
209                    properties.max_page_size,
210                    NUM_POOLS,
211                    properties.alignment,
212                );
213
214                sizes
215                    .iter()
216                    .map(|&size| {
217                        let dealloc_period = (BASE_DEALLOC_PERIOD as f64
218                            * (1.0 + size as f64 / (DEALLOC_SCALE_MB as f64)).round())
219                            as u64;
220
221                        MemoryPoolOptions {
222                            pool_type: PoolType::ExclusivePages {
223                                max_alloc_size: size,
224                            },
225                            dealloc_period: Some(dealloc_period),
226                        }
227                    })
228                    .collect()
229            }
230            MemoryConfiguration::Custom { pool_options } => pool_options,
231        };
232
233        for pool in pool_options.iter() {
234            log::trace!("Using memory pool: \n {pool:?}");
235        }
236
237        let pools: Vec<_> = pool_options
238            .iter()
239            .map(|options| match options.pool_type {
240                PoolType::SlicedPages {
241                    page_size,
242                    max_slice_size,
243                } => DynamicPool::Sliced(SlicedPool::new(
244                    page_size,
245                    max_slice_size,
246                    properties.alignment,
247                )),
248                PoolType::ExclusivePages { max_alloc_size } => {
249                    DynamicPool::Exclusive(ExclusiveMemoryPool::new(
250                        max_alloc_size,
251                        properties.alignment,
252                        options.dealloc_period.unwrap_or(u64::MAX),
253                    ))
254                }
255            })
256            .collect();
257
258        Self {
259            pools,
260            storage,
261            alloc_reserve_count: 0,
262        }
263    }
264
265    /// Cleanup allocations in pools that are deemed unnecessary.
266    pub fn cleanup(&mut self, explicit: bool) {
267        for pool in self.pools.iter_mut() {
268            pool.cleanup(&mut self.storage, self.alloc_reserve_count, explicit);
269        }
270    }
271
272    /// Returns the storage from the specified binding
273    pub fn get(&mut self, binding: SliceBinding) -> Option<StorageHandle> {
274        self.pools.iter().find_map(|p| p.get(&binding)).cloned()
275    }
276
277    /// Returns the resource from the storage at the specified handle
278    pub fn get_resource(
279        &mut self,
280        binding: SliceBinding,
281        offset_start: Option<u64>,
282        offset_end: Option<u64>,
283    ) -> Option<Storage::Resource> {
284        let handle = self.get(binding);
285
286        handle.map(|handle| {
287            let handle = match offset_start {
288                Some(offset) => handle.offset_start(offset),
289                None => handle,
290            };
291            let handle = match offset_end {
292                Some(offset) => handle.offset_end(offset),
293                None => handle,
294            };
295            self.storage().get(&handle)
296        })
297    }
298
299    /// Finds a spot in memory for a resource with the given size in bytes, and returns a handle to it
300    pub fn reserve(&mut self, size: u64, exclude: Option<&StorageExclude>) -> SliceHandle {
301        // If this happens every nanosecond, counts overflows after 585 years, so not worth thinking too
302        // hard about overflow here.
303        self.alloc_reserve_count += 1;
304
305        // Find first pool that fits this allocation
306        let pool = self
307            .pools
308            .iter_mut()
309            .find(|p| p.max_alloc_size() >= size)
310            .unwrap_or_else(|| panic!("No pool handles allocation of size {size}"));
311
312        if let Some(slice) = pool.try_reserve(size, exclude) {
313            return slice;
314        }
315
316        pool.alloc(&mut self.storage, size)
317    }
318
319    /// Fetch the storage used by the memory manager.
320    ///
321    /// # Notes
322    ///
323    /// The storage should probably not be used for allocations since the handles won't be
324    /// compatible with the ones provided by the current trait. Prefer using the
325    /// [alloc](ComputeStorage::alloc) and [dealloc](ComputeStorage::dealloc) functions.
326    ///
327    /// This is useful if you need to time the deallocations based on async computation, or to
328    /// change the mode of storage for different reasons.
329    pub fn storage(&mut self) -> &mut Storage {
330        &mut self.storage
331    }
332
333    /// Get the current memory usage.
334    pub fn memory_usage(&self) -> MemoryUsage {
335        self.pools.iter().map(|x| x.get_memory_usage()).fold(
336            MemoryUsage {
337                number_allocs: 0,
338                bytes_in_use: 0,
339                bytes_padding: 0,
340                bytes_reserved: 0,
341            },
342            |m1, m2| m1.combine(m2),
343        )
344    }
345
346    /// Print out a report of the current memory usage.
347    pub fn print_memory_usage(&self) {
348        #[cfg(feature = "std")]
349        log::info!("{}", self.memory_usage());
350    }
351}
352
353impl<Storage> core::fmt::Debug for MemoryManagement<Storage> {
354    fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
355        f.write_str(
356            alloc::format!(
357                "DynamicMemoryManagement {:?}",
358                core::any::type_name::<Storage>(),
359            )
360            .as_str(),
361        )
362    }
363}
364
365#[cfg(test)]
366mod tests {
367    use super::*;
368    use crate::{memory_management::MemoryManagement, storage::BytesStorage};
369
370    const DUMMY_MEM_PROPS: MemoryDeviceProperties = MemoryDeviceProperties {
371        max_page_size: 128 * 1024 * 1024,
372        alignment: 32,
373    };
374
375    // Test pools with slices.
376    #[test]
377    #[cfg(not(exclusive_memory_only))]
378    fn test_handle_mutability() {
379        let mut memory_management = MemoryManagement::from_configuration(
380            BytesStorage::default(),
381            &DUMMY_MEM_PROPS,
382            MemoryConfiguration::SubSlices,
383        );
384        let handle = memory_management.reserve(10, None);
385        let other_ref = handle.clone();
386        assert!(!handle.can_mut(), "Handle can't be mut when multiple ref.");
387        drop(other_ref);
388        assert!(handle.can_mut(), "Handle should be mut when only one ref.");
389    }
390
391    // Test pools with slices.
392    #[test]
393    #[cfg(not(exclusive_memory_only))]
394    fn test_memory_usage() {
395        let max_page_size = 512;
396
397        let mut memory_management = MemoryManagement::from_configuration(
398            BytesStorage::default(),
399            &DUMMY_MEM_PROPS,
400            MemoryConfiguration::Custom {
401                pool_options: vec![MemoryPoolOptions {
402                    pool_type: PoolType::ExclusivePages {
403                        max_alloc_size: max_page_size,
404                    },
405                    dealloc_period: None,
406                }],
407            },
408        );
409        let handle = memory_management.reserve(100, None);
410        let usage = memory_management.memory_usage();
411
412        assert_eq!(usage.bytes_in_use, 100);
413        assert!(usage.bytes_reserved >= 100 && usage.bytes_reserved <= max_page_size);
414
415        // Drop and re-alloc.
416        drop(handle);
417        let _handle = memory_management.reserve(100, None);
418        let usage_new = memory_management.memory_usage();
419        assert_eq!(usage, usage_new);
420    }
421
422    #[test]
423    fn alloc_two_chunks_on_one_page() {
424        let page_size = 2048;
425
426        let mut memory_management = MemoryManagement::from_configuration(
427            BytesStorage::default(),
428            &DUMMY_MEM_PROPS,
429            MemoryConfiguration::Custom {
430                pool_options: vec![MemoryPoolOptions {
431                    pool_type: PoolType::SlicedPages {
432                        page_size,
433                        max_slice_size: page_size,
434                    },
435                    dealloc_period: None,
436                }],
437            },
438        );
439
440        let alloc_size = 512;
441        let _handle = memory_management.reserve(alloc_size, None);
442        let _new_handle = memory_management.reserve(alloc_size, None);
443
444        let usage = memory_management.memory_usage();
445        assert_eq!(usage.number_allocs, 2);
446        assert_eq!(usage.bytes_in_use, alloc_size * 2);
447        assert_eq!(usage.bytes_reserved, page_size);
448    }
449
450    #[test]
451    fn alloc_reuses_storage() {
452        // If no storage is re-used, this will allocate two pages.
453        let page_size = 512;
454
455        let mut memory_management = MemoryManagement::from_configuration(
456            BytesStorage::default(),
457            &DUMMY_MEM_PROPS,
458            MemoryConfiguration::Custom {
459                pool_options: vec![MemoryPoolOptions {
460                    pool_type: PoolType::SlicedPages {
461                        page_size,
462                        max_slice_size: page_size,
463                    },
464                    dealloc_period: None,
465                }],
466            },
467        );
468
469        let alloc_size = 512;
470        let _handle = memory_management.reserve(alloc_size, None);
471        drop(_handle);
472        let _new_handle = memory_management.reserve(alloc_size, None);
473
474        let usage = memory_management.memory_usage();
475        assert_eq!(usage.number_allocs, 1);
476        assert_eq!(usage.bytes_in_use, alloc_size);
477        assert_eq!(usage.bytes_reserved, page_size);
478    }
479
480    #[test]
481    fn alloc_allocs_new_storage() {
482        let page_size = 1024;
483
484        let mut memory_management = MemoryManagement::from_configuration(
485            BytesStorage::default(),
486            &DUMMY_MEM_PROPS,
487            MemoryConfiguration::Custom {
488                pool_options: vec![MemoryPoolOptions {
489                    pool_type: PoolType::SlicedPages {
490                        page_size,
491                        max_slice_size: page_size,
492                    },
493                    dealloc_period: None,
494                }],
495            },
496        );
497
498        let alloc_size = 768;
499        let _handle = memory_management.reserve(alloc_size, None);
500        let _new_handle = memory_management.reserve(alloc_size, None);
501
502        let usage = memory_management.memory_usage();
503        assert_eq!(usage.number_allocs, 2);
504        assert_eq!(usage.bytes_in_use, alloc_size * 2);
505        assert_eq!(usage.bytes_reserved, page_size * 2);
506    }
507
508    #[test]
509    fn alloc_respects_alignment_size() {
510        let page_size = 500;
511        let mut memory_management = MemoryManagement::from_configuration(
512            BytesStorage::default(),
513            &MemoryDeviceProperties {
514                max_page_size: page_size,
515                alignment: 50,
516            },
517            MemoryConfiguration::Custom {
518                pool_options: vec![MemoryPoolOptions {
519                    pool_type: PoolType::SlicedPages {
520                        page_size,
521                        max_slice_size: page_size,
522                    },
523                    dealloc_period: None,
524                }],
525            },
526        );
527        let alloc_size = 40;
528        let _handle = memory_management.reserve(alloc_size, None);
529        let _new_handle = memory_management.reserve(alloc_size, None);
530        let usage = memory_management.memory_usage();
531        // Each slice should be aligned to 50 bytes, so 20 padding bytes.
532        assert_eq!(usage.bytes_padding, 10 * 2);
533    }
534
535    #[test]
536    fn allocs_on_correct_page() {
537        let sizes = [100, 200, 300, 400];
538
539        let pools = sizes
540            .iter()
541            .map(|size| MemoryPoolOptions {
542                pool_type: PoolType::SlicedPages {
543                    page_size: *size,
544                    max_slice_size: *size,
545                },
546                dealloc_period: None,
547            })
548            .collect();
549        let mut memory_management = MemoryManagement::from_configuration(
550            BytesStorage::default(),
551            &MemoryDeviceProperties {
552                max_page_size: 128 * 1024 * 1024,
553                alignment: 10,
554            },
555            MemoryConfiguration::Custom {
556                pool_options: pools,
557            },
558        );
559        // Allocate one thing on each page.
560        let alloc_sizes = [50, 150, 250, 350];
561        let _handles = alloc_sizes.map(|s| memory_management.reserve(s, None));
562
563        let usage = memory_management.memory_usage();
564
565        // Total memory should be size of all pages, and no more.
566        assert_eq!(usage.bytes_in_use, alloc_sizes.iter().sum::<u64>());
567        assert!(usage.bytes_reserved >= sizes.iter().sum::<u64>());
568    }
569
570    #[test]
571    #[cfg(not(exclusive_memory_only))]
572    fn allocate_deallocate_reallocate() {
573        let mut memory_management = MemoryManagement::from_configuration(
574            BytesStorage::default(),
575            &MemoryDeviceProperties {
576                max_page_size: 128 * 1024 * 1024,
577                alignment: 32,
578            },
579            MemoryConfiguration::SubSlices,
580        );
581        // Allocate a bunch
582        let handles: Vec<_> = (0..5)
583            .map(|i| memory_management.reserve(1000 * (i + 1), None))
584            .collect();
585        let usage_before = memory_management.memory_usage();
586        // Deallocate
587        drop(handles);
588        // Reallocate
589        let _new_handles: Vec<_> = (0..5)
590            .map(|i| memory_management.reserve(1000 * (i + 1), None))
591            .collect();
592        let usage_after = memory_management.memory_usage();
593        assert_eq!(usage_before.number_allocs, usage_after.number_allocs);
594        assert_eq!(usage_before.bytes_in_use, usage_after.bytes_in_use);
595        // Usage after can actually be _less_ because of defragging.
596        assert!(usage_before.bytes_reserved >= usage_after.bytes_reserved);
597    }
598
599    #[test]
600    #[cfg(not(exclusive_memory_only))]
601    fn test_fragmentation_resistance() {
602        let mut memory_management = MemoryManagement::from_configuration(
603            BytesStorage::default(),
604            &MemoryDeviceProperties {
605                max_page_size: 128 * 1024 * 1024,
606                alignment: 32,
607            },
608            MemoryConfiguration::SubSlices,
609        );
610        // Allocate a mix of small and large chunks
611        let sizes = [50, 1000, 100, 5000, 200, 10000, 300];
612        let handles: Vec<_> = sizes
613            .iter()
614            .map(|&size| memory_management.reserve(size, None))
615            .collect();
616        let usage_before = memory_management.memory_usage();
617        // Deallocate every other allocation
618        for i in (0..handles.len()).step_by(2) {
619            drop(handles[i].clone());
620        }
621        // Reallocate similar sizes
622        for &size in &sizes[0..sizes.len() / 2] {
623            memory_management.reserve(size, None);
624        }
625        let usage_after = memory_management.memory_usage();
626        // Check that we haven't increased our memory usage significantly
627        assert!(usage_after.bytes_reserved <= (usage_before.bytes_reserved as f64 * 1.1) as u64);
628    }
629
630    // Test pools without slices. More or less same as tests above.
631    #[test]
632    fn noslice_test_handle_mutability() {
633        let mut memory_management = MemoryManagement::from_configuration(
634            BytesStorage::default(),
635            &(MemoryDeviceProperties {
636                max_page_size: 128 * 1024 * 1024,
637                alignment: 32,
638            }),
639            MemoryConfiguration::ExclusivePages,
640        );
641        let handle = memory_management.reserve(10, None);
642        let other_ref = handle.clone();
643        assert!(!handle.can_mut(), "Handle can't be mut when multiple ref.");
644        drop(other_ref);
645        assert!(handle.can_mut(), "Handle should be mut when only one ref.");
646    }
647
648    #[test]
649    fn noslice_alloc_two_chunk() {
650        let mut memory_management = MemoryManagement::from_configuration(
651            BytesStorage::default(),
652            &DUMMY_MEM_PROPS,
653            MemoryConfiguration::Custom {
654                pool_options: vec![MemoryPoolOptions {
655                    pool_type: PoolType::ExclusivePages {
656                        max_alloc_size: 1024,
657                    },
658                    dealloc_period: None,
659                }],
660            },
661        );
662
663        let alloc_size = 512;
664        let _handle = memory_management.reserve(alloc_size, None);
665        let _new_handle = memory_management.reserve(alloc_size, None);
666
667        let usage = memory_management.memory_usage();
668        assert_eq!(usage.number_allocs, 2);
669        assert_eq!(usage.bytes_in_use, alloc_size * 2);
670        assert!(usage.bytes_reserved >= alloc_size * 2);
671    }
672
673    #[test]
674    fn noslice_alloc_reuses_storage() {
675        // If no storage is re-used, this will allocate two pages.
676        let mut memory_management = MemoryManagement::from_configuration(
677            BytesStorage::default(),
678            &DUMMY_MEM_PROPS,
679            MemoryConfiguration::Custom {
680                pool_options: vec![MemoryPoolOptions {
681                    pool_type: PoolType::ExclusivePages {
682                        max_alloc_size: 1024,
683                    },
684                    dealloc_period: None,
685                }],
686            },
687        );
688
689        let alloc_size = 512;
690        let _handle = memory_management.reserve(alloc_size, None);
691        drop(_handle);
692        let _new_handle = memory_management.reserve(alloc_size, None);
693
694        let usage = memory_management.memory_usage();
695        assert_eq!(usage.number_allocs, 1);
696        assert_eq!(usage.bytes_in_use, alloc_size);
697        assert!(usage.bytes_reserved >= alloc_size);
698    }
699
700    #[test]
701    fn noslice_alloc_allocs_new_storage() {
702        let mut memory_management = MemoryManagement::from_configuration(
703            BytesStorage::default(),
704            &DUMMY_MEM_PROPS,
705            MemoryConfiguration::Custom {
706                pool_options: vec![MemoryPoolOptions {
707                    pool_type: PoolType::ExclusivePages {
708                        max_alloc_size: 1024,
709                    },
710                    dealloc_period: None,
711                }],
712            },
713        );
714
715        let alloc_size = 768;
716        let _handle = memory_management.reserve(alloc_size, None);
717        let _new_handle = memory_management.reserve(alloc_size, None);
718        let usage = memory_management.memory_usage();
719        assert_eq!(usage.number_allocs, 2);
720        assert_eq!(usage.bytes_in_use, alloc_size * 2);
721        assert!(usage.bytes_reserved >= alloc_size * 2);
722    }
723
724    #[test]
725    fn noslice_alloc_respects_alignment_size() {
726        let mut memory_management = MemoryManagement::from_configuration(
727            BytesStorage::default(),
728            &MemoryDeviceProperties {
729                max_page_size: DUMMY_MEM_PROPS.max_page_size,
730                alignment: 50,
731            },
732            MemoryConfiguration::Custom {
733                pool_options: vec![MemoryPoolOptions {
734                    pool_type: PoolType::ExclusivePages {
735                        max_alloc_size: 50 * 20,
736                    },
737                    dealloc_period: None,
738                }],
739            },
740        );
741        let alloc_size = 40;
742        let _handle = memory_management.reserve(alloc_size, None);
743        let _new_handle = memory_management.reserve(alloc_size, None);
744        let usage = memory_management.memory_usage();
745        // Each slice should be aligned to 60 bytes, so 20 padding bytes.
746        assert_eq!(usage.bytes_padding, 10 * 2);
747    }
748
749    #[test]
750    fn noslice_allocs_on_correct_page() {
751        let pools = [100, 200, 300, 400]
752            .iter()
753            .map(|&size| MemoryPoolOptions {
754                pool_type: PoolType::SlicedPages {
755                    page_size: size,
756                    max_slice_size: size,
757                },
758                dealloc_period: None,
759            })
760            .collect();
761        let mut memory_management = MemoryManagement::from_configuration(
762            BytesStorage::default(),
763            &MemoryDeviceProperties {
764                max_page_size: DUMMY_MEM_PROPS.max_page_size,
765                alignment: 10,
766            },
767            MemoryConfiguration::Custom {
768                pool_options: pools,
769            },
770        );
771        // Allocate one thing on each page.
772        let alloc_sizes = [50, 150, 250, 350];
773        let _handles = alloc_sizes.map(|s| memory_management.reserve(s, None));
774        let usage = memory_management.memory_usage();
775        // Total memory should be size of all pages, and no more.
776        assert_eq!(usage.bytes_in_use, alloc_sizes.iter().sum::<u64>());
777    }
778
779    #[test]
780    fn noslice_allocate_deallocate_reallocate() {
781        let mut memory_management = MemoryManagement::from_configuration(
782            BytesStorage::default(),
783            &MemoryDeviceProperties {
784                max_page_size: 128 * 1024 * 1024,
785                alignment: 32,
786            },
787            MemoryConfiguration::ExclusivePages,
788        );
789        // Allocate a bunch
790        let handles: Vec<_> = (0..5)
791            .map(|i| memory_management.reserve(1000 * (i + 1), None))
792            .collect();
793        let usage_before = memory_management.memory_usage();
794        // Deallocate
795        drop(handles);
796        // Reallocate
797        let _new_handles: Vec<_> = (0..5)
798            .map(|i| memory_management.reserve(1000 * (i + 1), None))
799            .collect();
800        let usage_after = memory_management.memory_usage();
801        assert_eq!(usage_before.number_allocs, usage_after.number_allocs);
802        assert_eq!(usage_before.bytes_in_use, usage_after.bytes_in_use);
803        assert_eq!(usage_before.bytes_reserved, usage_after.bytes_reserved);
804    }
805}