cubecl_runtime/memory_management/
memory_manage.rs

1use super::{
2    MemoryConfiguration, MemoryDeviceProperties, MemoryPoolOptions, MemoryUsage, PoolType,
3    memory_pool::{ExclusiveMemoryPool, MemoryPool, SlicedPool},
4};
5use crate::storage::{ComputeStorage, StorageHandle, StorageId};
6#[cfg(not(feature = "std"))]
7use alloc::vec;
8use alloc::vec::Vec;
9use hashbrown::HashSet;
10
11pub use super::memory_pool::{SliceBinding, SliceHandle};
12
13enum DynamicPool {
14    Sliced(SlicedPool),
15    Exclusive(ExclusiveMemoryPool),
16}
17
18impl MemoryPool for DynamicPool {
19    fn get(&self, binding: &SliceBinding) -> Option<&StorageHandle> {
20        match self {
21            DynamicPool::Sliced(m) => m.get(binding),
22            DynamicPool::Exclusive(m) => m.get(binding),
23        }
24    }
25
26    fn try_reserve(&mut self, size: u64, exclude: Option<&StorageExclude>) -> Option<SliceHandle> {
27        match self {
28            DynamicPool::Sliced(m) => m.try_reserve(size, exclude),
29            DynamicPool::Exclusive(m) => m.try_reserve(size, exclude),
30        }
31    }
32
33    fn alloc<Storage: ComputeStorage>(&mut self, storage: &mut Storage, size: u64) -> SliceHandle {
34        match self {
35            DynamicPool::Sliced(m) => m.alloc(storage, size),
36            DynamicPool::Exclusive(m) => m.alloc(storage, size),
37        }
38    }
39
40    fn get_memory_usage(&self) -> MemoryUsage {
41        match self {
42            DynamicPool::Sliced(m) => m.get_memory_usage(),
43            DynamicPool::Exclusive(m) => m.get_memory_usage(),
44        }
45    }
46
47    fn max_alloc_size(&self) -> u64 {
48        match self {
49            DynamicPool::Sliced(m) => m.max_alloc_size(),
50            DynamicPool::Exclusive(m) => m.max_alloc_size(),
51        }
52    }
53
54    fn cleanup<Storage: ComputeStorage>(
55        &mut self,
56        storage: &mut Storage,
57        alloc_nr: u64,
58        explicit: bool,
59    ) {
60        match self {
61            DynamicPool::Sliced(m) => m.cleanup(storage, alloc_nr, explicit),
62            DynamicPool::Exclusive(m) => m.cleanup(storage, alloc_nr, explicit),
63        }
64    }
65}
66
67/// Reserves and keeps track of chunks of memory in the storage, and slices upon these chunks.
68pub struct MemoryManagement<Storage> {
69    pools: Vec<DynamicPool>,
70    storage: Storage,
71    alloc_reserve_count: u64,
72}
73
74/// Exclude certain storage buffers from being selected when reserving memory.
75#[derive(Debug, Clone, Default)]
76pub struct StorageExclude {
77    ids: HashSet<StorageId>,
78}
79
80impl StorageExclude {
81    /// Add a storage buffer to the exclusion list.
82    pub fn exclude_storage(&mut self, storage: StorageId) {
83        self.ids.insert(storage);
84    }
85
86    /// Check if a storage buffer is excluded.
87    pub fn is_excluded(&self, storage: StorageId) -> bool {
88        self.ids.contains(&storage)
89    }
90
91    /// Clear the exclusion list.
92    pub fn clear(&mut self) {
93        self.ids.clear();
94    }
95
96    /// Number of currently excluded storage buffers.
97    pub fn count(&self) -> usize {
98        self.ids.len()
99    }
100}
101
102fn generate_bucket_sizes(
103    start_size: u64,
104    end_size: u64,
105    max_buckets: usize,
106    alignment: u64,
107) -> Vec<u64> {
108    let mut buckets = Vec::with_capacity(max_buckets);
109    let log_min = (start_size as f64).ln();
110    let log_max = (end_size as f64).ln();
111    let log_range = log_max - log_min;
112
113    // Pure exponential performed best, but let's try slightly denser in lower-mid range
114    for i in 0..max_buckets {
115        let p = i as f64 / (max_buckets - 1) as f64;
116        // Slight bias toward lower-mid range with less aggressive curve than sigmoid
117        let log_size = log_min + log_range * p;
118        let size = log_size.exp() as u64;
119        let aligned_size = size.next_multiple_of(alignment);
120        buckets.push(aligned_size);
121    }
122
123    buckets.dedup();
124    buckets
125}
126
127const DEALLOC_SCALE_MB: u64 = 1024 * 1024 * 1024;
128const BASE_DEALLOC_PERIOD: u64 = 5000;
129
130impl<Storage: ComputeStorage> MemoryManagement<Storage> {
131    /// Creates the options from device limits.
132    pub fn from_configuration(
133        storage: Storage,
134        properties: &MemoryDeviceProperties,
135        config: MemoryConfiguration,
136    ) -> Self {
137        let pool_options = match config {
138            #[cfg(not(exclusive_memory_only))]
139            MemoryConfiguration::SubSlices => {
140                // Round chunk size to be aligned.
141                let memory_alignment = properties.alignment;
142                let max_page = properties.max_page_size;
143                let mut pools = Vec::new();
144
145                const MB: u64 = 1024 * 1024;
146
147                // Add in a pool for allocations that are smaller than the min alignment,
148                // as they can't use offsets at all (on wgpu at least).
149                pools.push(MemoryPoolOptions {
150                    pool_type: PoolType::ExclusivePages { max_alloc_size: 0 },
151                    dealloc_period: None,
152                });
153
154                let mut current = max_page;
155                let mut max_sizes = vec![];
156                let mut page_sizes = vec![];
157                let mut base = pools.len() as u32;
158
159                while current >= 32 * MB {
160                    current /= 4;
161
162                    // Make sure every pool has an aligned size.
163                    current = current.next_multiple_of(memory_alignment);
164
165                    max_sizes.push(current / 2u64.pow(base));
166                    page_sizes.push(current);
167                    base += 1;
168                }
169
170                max_sizes.reverse();
171                page_sizes.reverse();
172
173                for i in 0..max_sizes.len() {
174                    let max = max_sizes[i];
175                    let page_size = page_sizes[i];
176
177                    pools.push(MemoryPoolOptions {
178                        // Creating max slices lower than the chunk size reduces fragmentation.
179                        pool_type: PoolType::SlicedPages {
180                            page_size,
181                            max_slice_size: max,
182                        },
183                        dealloc_period: None,
184                    });
185                }
186
187                // Add pools from big to small.
188                pools.push(MemoryPoolOptions {
189                    pool_type: PoolType::SlicedPages {
190                        page_size: max_page / memory_alignment * memory_alignment,
191                        max_slice_size: max_page / memory_alignment * memory_alignment,
192                    },
193                    dealloc_period: None,
194                });
195                pools
196            }
197            MemoryConfiguration::ExclusivePages => {
198                // Add all bin sizes. Nb: because of alignment some buckets
199                // end up as the same size, so only want unique ones,
200                // but also keep the order, so a BTree will do.
201                const MIN_BUCKET_SIZE: u64 = 1024 * 32;
202                const NUM_POOLS: usize = 24;
203
204                let sizes = generate_bucket_sizes(
205                    MIN_BUCKET_SIZE,
206                    properties.max_page_size,
207                    NUM_POOLS,
208                    properties.alignment,
209                );
210
211                sizes
212                    .iter()
213                    .map(|&size| {
214                        let dealloc_period = (BASE_DEALLOC_PERIOD as f64
215                            * (1.0 + size as f64 / (DEALLOC_SCALE_MB as f64)).round())
216                            as u64;
217
218                        MemoryPoolOptions {
219                            pool_type: PoolType::ExclusivePages {
220                                max_alloc_size: size,
221                            },
222                            dealloc_period: Some(dealloc_period),
223                        }
224                    })
225                    .collect()
226            }
227            MemoryConfiguration::Custom { pool_options } => pool_options,
228        };
229
230        for pool in pool_options.iter() {
231            log::trace!("Using memory pool: \n {pool:?}");
232        }
233
234        let pools: Vec<_> = pool_options
235            .iter()
236            .map(|options| match options.pool_type {
237                PoolType::SlicedPages {
238                    page_size,
239                    max_slice_size,
240                } => DynamicPool::Sliced(SlicedPool::new(
241                    page_size,
242                    max_slice_size,
243                    properties.alignment,
244                )),
245                PoolType::ExclusivePages { max_alloc_size } => {
246                    DynamicPool::Exclusive(ExclusiveMemoryPool::new(
247                        max_alloc_size,
248                        properties.alignment,
249                        options.dealloc_period.unwrap_or(u64::MAX),
250                    ))
251                }
252            })
253            .collect();
254
255        Self {
256            pools,
257            storage,
258            alloc_reserve_count: 0,
259        }
260    }
261
262    /// Cleanup allocations in pools that are deemed unnecessary.
263    pub fn cleanup(&mut self, explicit: bool) {
264        for pool in self.pools.iter_mut() {
265            pool.cleanup(&mut self.storage, self.alloc_reserve_count, explicit);
266        }
267    }
268
269    /// Returns the storage from the specified binding
270    pub fn get(&mut self, binding: SliceBinding) -> Option<StorageHandle> {
271        self.pools.iter().find_map(|p| p.get(&binding)).cloned()
272    }
273
274    /// Returns the resource from the storage at the specified handle
275    pub fn get_resource(
276        &mut self,
277        binding: SliceBinding,
278        offset_start: Option<u64>,
279        offset_end: Option<u64>,
280    ) -> Option<Storage::Resource> {
281        let handle = self.get(binding);
282
283        handle.map(|handle| {
284            let handle = match offset_start {
285                Some(offset) => handle.offset_start(offset),
286                None => handle,
287            };
288            let handle = match offset_end {
289                Some(offset) => handle.offset_end(offset),
290                None => handle,
291            };
292            self.storage().get(&handle)
293        })
294    }
295
296    /// Finds a spot in memory for a resource with the given size in bytes, and returns a handle to it
297    pub fn reserve(&mut self, size: u64, exclude: Option<&StorageExclude>) -> SliceHandle {
298        // If this happens every nanosecond, counts overflows after 585 years, so not worth thinking too
299        // hard about overflow here.
300        self.alloc_reserve_count += 1;
301
302        // Find first pool that fits this allocation
303        let pool = self
304            .pools
305            .iter_mut()
306            .find(|p| p.max_alloc_size() >= size)
307            .unwrap_or_else(|| panic!("No pool handles allocation of size {size}"));
308
309        if let Some(slice) = pool.try_reserve(size, exclude) {
310            return slice;
311        }
312
313        pool.alloc(&mut self.storage, size)
314    }
315
316    /// Fetch the storage used by the memory manager.
317    ///
318    /// # Notes
319    ///
320    /// The storage should probably not be used for allocations since the handles won't be
321    /// compatible with the ones provided by the current trait. Prefer using the
322    /// [alloc](ComputeStorage::alloc) and [dealloc](ComputeStorage::dealloc) functions.
323    ///
324    /// This is useful if you need to time the deallocations based on async computation, or to
325    /// change the mode of storage for different reasons.
326    pub fn storage(&mut self) -> &mut Storage {
327        &mut self.storage
328    }
329
330    /// Get the current memory usage.
331    pub fn memory_usage(&self) -> MemoryUsage {
332        self.pools.iter().map(|x| x.get_memory_usage()).fold(
333            MemoryUsage {
334                number_allocs: 0,
335                bytes_in_use: 0,
336                bytes_padding: 0,
337                bytes_reserved: 0,
338            },
339            |m1, m2| m1.combine(m2),
340        )
341    }
342
343    /// Print out a report of the current memory usage.
344    pub fn print_memory_usage(&self) {
345        #[cfg(feature = "std")]
346        log::info!("{}", self.memory_usage());
347    }
348}
349
350impl<Storage> core::fmt::Debug for MemoryManagement<Storage> {
351    fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
352        f.write_str(
353            alloc::format!(
354                "DynamicMemoryManagement {:?}",
355                core::any::type_name::<Storage>(),
356            )
357            .as_str(),
358        )
359    }
360}
361
362#[cfg(test)]
363mod tests {
364    use super::*;
365    use crate::{memory_management::MemoryManagement, storage::BytesStorage};
366
367    const DUMMY_MEM_PROPS: MemoryDeviceProperties = MemoryDeviceProperties {
368        max_page_size: 128 * 1024 * 1024,
369        alignment: 32,
370    };
371
372    // Test pools with slices.
373    #[test]
374    #[cfg(not(exclusive_memory_only))]
375    fn test_handle_mutability() {
376        let mut memory_management = MemoryManagement::from_configuration(
377            BytesStorage::default(),
378            &DUMMY_MEM_PROPS,
379            MemoryConfiguration::SubSlices,
380        );
381        let handle = memory_management.reserve(10, None);
382        let other_ref = handle.clone();
383        assert!(!handle.can_mut(), "Handle can't be mut when multiple ref.");
384        drop(other_ref);
385        assert!(handle.can_mut(), "Handle should be mut when only one ref.");
386    }
387
388    // Test pools with slices.
389    #[test]
390    #[cfg(not(exclusive_memory_only))]
391    fn test_memory_usage() {
392        let max_page_size = 512;
393
394        let mut memory_management = MemoryManagement::from_configuration(
395            BytesStorage::default(),
396            &DUMMY_MEM_PROPS,
397            MemoryConfiguration::Custom {
398                pool_options: vec![MemoryPoolOptions {
399                    pool_type: PoolType::ExclusivePages {
400                        max_alloc_size: max_page_size,
401                    },
402                    dealloc_period: None,
403                }],
404            },
405        );
406        let handle = memory_management.reserve(100, None);
407        let usage = memory_management.memory_usage();
408
409        assert_eq!(usage.bytes_in_use, 100);
410        assert!(usage.bytes_reserved >= 100 && usage.bytes_reserved <= max_page_size);
411
412        // Drop and re-alloc.
413        drop(handle);
414        let _handle = memory_management.reserve(100, None);
415        let usage_new = memory_management.memory_usage();
416        assert_eq!(usage, usage_new);
417    }
418
419    #[test]
420    fn alloc_two_chunks_on_one_page() {
421        let page_size = 2048;
422
423        let mut memory_management = MemoryManagement::from_configuration(
424            BytesStorage::default(),
425            &DUMMY_MEM_PROPS,
426            MemoryConfiguration::Custom {
427                pool_options: vec![MemoryPoolOptions {
428                    pool_type: PoolType::SlicedPages {
429                        page_size,
430                        max_slice_size: page_size,
431                    },
432                    dealloc_period: None,
433                }],
434            },
435        );
436
437        let alloc_size = 512;
438        let _handle = memory_management.reserve(alloc_size, None);
439        let _new_handle = memory_management.reserve(alloc_size, None);
440
441        let usage = memory_management.memory_usage();
442        assert_eq!(usage.number_allocs, 2);
443        assert_eq!(usage.bytes_in_use, alloc_size * 2);
444        assert_eq!(usage.bytes_reserved, page_size);
445    }
446
447    #[test]
448    fn alloc_reuses_storage() {
449        // If no storage is re-used, this will allocate two pages.
450        let page_size = 512;
451
452        let mut memory_management = MemoryManagement::from_configuration(
453            BytesStorage::default(),
454            &DUMMY_MEM_PROPS,
455            MemoryConfiguration::Custom {
456                pool_options: vec![MemoryPoolOptions {
457                    pool_type: PoolType::SlicedPages {
458                        page_size,
459                        max_slice_size: page_size,
460                    },
461                    dealloc_period: None,
462                }],
463            },
464        );
465
466        let alloc_size = 512;
467        let _handle = memory_management.reserve(alloc_size, None);
468        drop(_handle);
469        let _new_handle = memory_management.reserve(alloc_size, None);
470
471        let usage = memory_management.memory_usage();
472        assert_eq!(usage.number_allocs, 1);
473        assert_eq!(usage.bytes_in_use, alloc_size);
474        assert_eq!(usage.bytes_reserved, page_size);
475    }
476
477    #[test]
478    fn alloc_allocs_new_storage() {
479        let page_size = 1024;
480
481        let mut memory_management = MemoryManagement::from_configuration(
482            BytesStorage::default(),
483            &DUMMY_MEM_PROPS,
484            MemoryConfiguration::Custom {
485                pool_options: vec![MemoryPoolOptions {
486                    pool_type: PoolType::SlicedPages {
487                        page_size,
488                        max_slice_size: page_size,
489                    },
490                    dealloc_period: None,
491                }],
492            },
493        );
494
495        let alloc_size = 768;
496        let _handle = memory_management.reserve(alloc_size, None);
497        let _new_handle = memory_management.reserve(alloc_size, None);
498
499        let usage = memory_management.memory_usage();
500        assert_eq!(usage.number_allocs, 2);
501        assert_eq!(usage.bytes_in_use, alloc_size * 2);
502        assert_eq!(usage.bytes_reserved, page_size * 2);
503    }
504
505    #[test]
506    fn alloc_respects_alignment_size() {
507        let page_size = 500;
508        let mut memory_management = MemoryManagement::from_configuration(
509            BytesStorage::default(),
510            &MemoryDeviceProperties {
511                max_page_size: page_size,
512                alignment: 50,
513            },
514            MemoryConfiguration::Custom {
515                pool_options: vec![MemoryPoolOptions {
516                    pool_type: PoolType::SlicedPages {
517                        page_size,
518                        max_slice_size: page_size,
519                    },
520                    dealloc_period: None,
521                }],
522            },
523        );
524        let alloc_size = 40;
525        let _handle = memory_management.reserve(alloc_size, None);
526        let _new_handle = memory_management.reserve(alloc_size, None);
527        let usage = memory_management.memory_usage();
528        // Each slice should be aligned to 50 bytes, so 20 padding bytes.
529        assert_eq!(usage.bytes_padding, 10 * 2);
530    }
531
532    #[test]
533    fn allocs_on_correct_page() {
534        let sizes = [100, 200, 300, 400];
535
536        let pools = sizes
537            .iter()
538            .map(|size| MemoryPoolOptions {
539                pool_type: PoolType::SlicedPages {
540                    page_size: *size,
541                    max_slice_size: *size,
542                },
543                dealloc_period: None,
544            })
545            .collect();
546        let mut memory_management = MemoryManagement::from_configuration(
547            BytesStorage::default(),
548            &MemoryDeviceProperties {
549                max_page_size: 128 * 1024 * 1024,
550                alignment: 10,
551            },
552            MemoryConfiguration::Custom {
553                pool_options: pools,
554            },
555        );
556        // Allocate one thing on each page.
557        let alloc_sizes = [50, 150, 250, 350];
558        let _handles = alloc_sizes.map(|s| memory_management.reserve(s, None));
559
560        let usage = memory_management.memory_usage();
561
562        // Total memory should be size of all pages, and no more.
563        assert_eq!(usage.bytes_in_use, alloc_sizes.iter().sum::<u64>());
564        assert!(usage.bytes_reserved >= sizes.iter().sum::<u64>());
565    }
566
567    #[test]
568    #[cfg(not(exclusive_memory_only))]
569    fn allocate_deallocate_reallocate() {
570        let mut memory_management = MemoryManagement::from_configuration(
571            BytesStorage::default(),
572            &MemoryDeviceProperties {
573                max_page_size: 128 * 1024 * 1024,
574                alignment: 32,
575            },
576            MemoryConfiguration::SubSlices,
577        );
578        // Allocate a bunch
579        let handles: Vec<_> = (0..5)
580            .map(|i| memory_management.reserve(1000 * (i + 1), None))
581            .collect();
582        let usage_before = memory_management.memory_usage();
583        // Deallocate
584        drop(handles);
585        // Reallocate
586        let _new_handles: Vec<_> = (0..5)
587            .map(|i| memory_management.reserve(1000 * (i + 1), None))
588            .collect();
589        let usage_after = memory_management.memory_usage();
590        assert_eq!(usage_before.number_allocs, usage_after.number_allocs);
591        assert_eq!(usage_before.bytes_in_use, usage_after.bytes_in_use);
592        // Usage after can actually be _less_ because of defragging.
593        assert!(usage_before.bytes_reserved >= usage_after.bytes_reserved);
594    }
595
596    #[test]
597    #[cfg(not(exclusive_memory_only))]
598    fn test_fragmentation_resistance() {
599        let mut memory_management = MemoryManagement::from_configuration(
600            BytesStorage::default(),
601            &MemoryDeviceProperties {
602                max_page_size: 128 * 1024 * 1024,
603                alignment: 32,
604            },
605            MemoryConfiguration::SubSlices,
606        );
607        // Allocate a mix of small and large chunks
608        let sizes = [50, 1000, 100, 5000, 200, 10000, 300];
609        let handles: Vec<_> = sizes
610            .iter()
611            .map(|&size| memory_management.reserve(size, None))
612            .collect();
613        let usage_before = memory_management.memory_usage();
614        // Deallocate every other allocation
615        for i in (0..handles.len()).step_by(2) {
616            drop(handles[i].clone());
617        }
618        // Reallocate similar sizes
619        for &size in &sizes[0..sizes.len() / 2] {
620            memory_management.reserve(size, None);
621        }
622        let usage_after = memory_management.memory_usage();
623        // Check that we haven't increased our memory usage significantly
624        assert!(usage_after.bytes_reserved <= (usage_before.bytes_reserved as f64 * 1.1) as u64);
625    }
626
627    // Test pools without slices. More or less same as tests above.
628    #[test]
629    fn noslice_test_handle_mutability() {
630        let mut memory_management = MemoryManagement::from_configuration(
631            BytesStorage::default(),
632            &(MemoryDeviceProperties {
633                max_page_size: 128 * 1024 * 1024,
634                alignment: 32,
635            }),
636            MemoryConfiguration::ExclusivePages,
637        );
638        let handle = memory_management.reserve(10, None);
639        let other_ref = handle.clone();
640        assert!(!handle.can_mut(), "Handle can't be mut when multiple ref.");
641        drop(other_ref);
642        assert!(handle.can_mut(), "Handle should be mut when only one ref.");
643    }
644
645    #[test]
646    fn noslice_alloc_two_chunk() {
647        let mut memory_management = MemoryManagement::from_configuration(
648            BytesStorage::default(),
649            &DUMMY_MEM_PROPS,
650            MemoryConfiguration::Custom {
651                pool_options: vec![MemoryPoolOptions {
652                    pool_type: PoolType::ExclusivePages {
653                        max_alloc_size: 1024,
654                    },
655                    dealloc_period: None,
656                }],
657            },
658        );
659
660        let alloc_size = 512;
661        let _handle = memory_management.reserve(alloc_size, None);
662        let _new_handle = memory_management.reserve(alloc_size, None);
663
664        let usage = memory_management.memory_usage();
665        assert_eq!(usage.number_allocs, 2);
666        assert_eq!(usage.bytes_in_use, alloc_size * 2);
667        assert!(usage.bytes_reserved >= alloc_size * 2);
668    }
669
670    #[test]
671    fn noslice_alloc_reuses_storage() {
672        // If no storage is re-used, this will allocate two pages.
673        let mut memory_management = MemoryManagement::from_configuration(
674            BytesStorage::default(),
675            &DUMMY_MEM_PROPS,
676            MemoryConfiguration::Custom {
677                pool_options: vec![MemoryPoolOptions {
678                    pool_type: PoolType::ExclusivePages {
679                        max_alloc_size: 1024,
680                    },
681                    dealloc_period: None,
682                }],
683            },
684        );
685
686        let alloc_size = 512;
687        let _handle = memory_management.reserve(alloc_size, None);
688        drop(_handle);
689        let _new_handle = memory_management.reserve(alloc_size, None);
690
691        let usage = memory_management.memory_usage();
692        assert_eq!(usage.number_allocs, 1);
693        assert_eq!(usage.bytes_in_use, alloc_size);
694        assert!(usage.bytes_reserved >= alloc_size);
695    }
696
697    #[test]
698    fn noslice_alloc_allocs_new_storage() {
699        let mut memory_management = MemoryManagement::from_configuration(
700            BytesStorage::default(),
701            &DUMMY_MEM_PROPS,
702            MemoryConfiguration::Custom {
703                pool_options: vec![MemoryPoolOptions {
704                    pool_type: PoolType::ExclusivePages {
705                        max_alloc_size: 1024,
706                    },
707                    dealloc_period: None,
708                }],
709            },
710        );
711
712        let alloc_size = 768;
713        let _handle = memory_management.reserve(alloc_size, None);
714        let _new_handle = memory_management.reserve(alloc_size, None);
715        let usage = memory_management.memory_usage();
716        assert_eq!(usage.number_allocs, 2);
717        assert_eq!(usage.bytes_in_use, alloc_size * 2);
718        assert!(usage.bytes_reserved >= alloc_size * 2);
719    }
720
721    #[test]
722    fn noslice_alloc_respects_alignment_size() {
723        let mut memory_management = MemoryManagement::from_configuration(
724            BytesStorage::default(),
725            &MemoryDeviceProperties {
726                max_page_size: DUMMY_MEM_PROPS.max_page_size,
727                alignment: 50,
728            },
729            MemoryConfiguration::Custom {
730                pool_options: vec![MemoryPoolOptions {
731                    pool_type: PoolType::ExclusivePages {
732                        max_alloc_size: 50 * 20,
733                    },
734                    dealloc_period: None,
735                }],
736            },
737        );
738        let alloc_size = 40;
739        let _handle = memory_management.reserve(alloc_size, None);
740        let _new_handle = memory_management.reserve(alloc_size, None);
741        let usage = memory_management.memory_usage();
742        // Each slice should be aligned to 60 bytes, so 20 padding bytes.
743        assert_eq!(usage.bytes_padding, 10 * 2);
744    }
745
746    #[test]
747    fn noslice_allocs_on_correct_page() {
748        let pools = [100, 200, 300, 400]
749            .iter()
750            .map(|&size| MemoryPoolOptions {
751                pool_type: PoolType::SlicedPages {
752                    page_size: size,
753                    max_slice_size: size,
754                },
755                dealloc_period: None,
756            })
757            .collect();
758        let mut memory_management = MemoryManagement::from_configuration(
759            BytesStorage::default(),
760            &MemoryDeviceProperties {
761                max_page_size: DUMMY_MEM_PROPS.max_page_size,
762                alignment: 10,
763            },
764            MemoryConfiguration::Custom {
765                pool_options: pools,
766            },
767        );
768        // Allocate one thing on each page.
769        let alloc_sizes = [50, 150, 250, 350];
770        let _handles = alloc_sizes.map(|s| memory_management.reserve(s, None));
771        let usage = memory_management.memory_usage();
772        // Total memory should be size of all pages, and no more.
773        assert_eq!(usage.bytes_in_use, alloc_sizes.iter().sum::<u64>());
774    }
775
776    #[test]
777    fn noslice_allocate_deallocate_reallocate() {
778        let mut memory_management = MemoryManagement::from_configuration(
779            BytesStorage::default(),
780            &MemoryDeviceProperties {
781                max_page_size: 128 * 1024 * 1024,
782                alignment: 32,
783            },
784            MemoryConfiguration::ExclusivePages,
785        );
786        // Allocate a bunch
787        let handles: Vec<_> = (0..5)
788            .map(|i| memory_management.reserve(1000 * (i + 1), None))
789            .collect();
790        let usage_before = memory_management.memory_usage();
791        // Deallocate
792        drop(handles);
793        // Reallocate
794        let _new_handles: Vec<_> = (0..5)
795            .map(|i| memory_management.reserve(1000 * (i + 1), None))
796            .collect();
797        let usage_after = memory_management.memory_usage();
798        assert_eq!(usage_before.number_allocs, usage_after.number_allocs);
799        assert_eq!(usage_before.bytes_in_use, usage_after.bytes_in_use);
800        assert_eq!(usage_before.bytes_reserved, usage_after.bytes_reserved);
801    }
802}