cubecl_runtime/memory_management/
memory_manage.rs

1use alloc::collections::BTreeSet;
2
3use super::{
4    memory_pool::{ExclusiveMemoryPool, MemoryPool, SliceBinding, SliceHandle, SlicedPool},
5    MemoryConfiguration, MemoryDeviceProperties, MemoryLock, MemoryPoolOptions, MemoryUsage,
6    PoolType,
7};
8use crate::storage::{ComputeStorage, StorageHandle};
9use alloc::vec::Vec;
10
11enum DynamicPool {
12    Sliced(SlicedPool),
13    Exclusive(ExclusiveMemoryPool),
14}
15
16// Bin sizes as per https://github.com/sebbbi/OffsetAllocator/blob/main/README.md
17// This guarantees that _for bins in use_, the wasted space is at most 12.5%. So as long
18// as bins have a high use rate this should be fairly efficient. That said, currently slices in
19// bins don't deallocate, so there is a chance more memory than needed is used.
20const EXP_BIN_SIZES: [u64; 200] = [
21    128, 144, 160, 176, 192, 208, 224, 240, 256, 288, 320, 352, 384, 416, 448, 480, 512, 576, 640,
22    704, 768, 832, 896, 960, 1024, 1152, 1280, 1408, 1536, 1664, 1792, 1920, 2048, 2304, 2560,
23    2816, 3072, 3328, 3584, 3840, 4096, 4608, 5120, 5632, 6144, 6656, 7168, 7680, 8192, 9216,
24    10240, 11264, 12288, 13312, 14336, 15360, 16384, 18432, 20480, 22528, 24576, 26624, 28672,
25    30720, 32768, 36864, 40960, 45056, 49152, 53248, 57344, 61440, 65536, 73728, 81920, 90112,
26    98304, 106496, 114688, 122880, 131072, 147456, 163840, 180224, 196608, 212992, 229376, 245760,
27    262144, 294912, 327680, 360448, 393216, 425984, 458752, 491520, 524288, 589824, 655360, 720896,
28    786432, 851968, 917504, 983040, 1048576, 1179648, 1310720, 1441792, 1572864, 1703936, 1835008,
29    1966080, 2097152, 2359296, 2621440, 2883584, 3145728, 3407872, 3670016, 3932160, 4194304,
30    4718592, 5242880, 5767168, 6291456, 6815744, 7340032, 7864320, 8388608, 9437184, 10485760,
31    11534336, 12582912, 13631488, 14680064, 15728640, 16777216, 18874368, 20971520, 23068672,
32    25165824, 27262976, 29360128, 31457280, 33554432, 37748736, 41943040, 46137344, 50331648,
33    54525952, 58720256, 62914560, 67108864, 75497472, 83886080, 92274688, 100663296, 109051904,
34    117440512, 125829120, 134217728, 150994944, 167772160, 184549376, 201326592, 218103808,
35    234881024, 251658240, 268435456, 301989888, 335544320, 369098752, 402653184, 436207616,
36    469762048, 503316480, 536870912, 603979776, 671088640, 738197504, 805306368, 872415232,
37    939524096, 1006632960, 1073741824, 1207959552, 1342177280, 1476395008, 1610612736, 1744830464,
38    1879048192, 2013265920, 2147483648, 2415919104, 2684354560, 2952790016, 3221225472, 3489660928,
39    3758096384, 4026531840,
40];
41
42const MB: usize = 1024 * 1024;
43
44impl MemoryPool for DynamicPool {
45    fn get(&self, binding: &SliceBinding) -> Option<&StorageHandle> {
46        match self {
47            DynamicPool::Sliced(m) => m.get(binding),
48            DynamicPool::Exclusive(m) => m.get(binding),
49        }
50    }
51
52    fn reserve<Storage: ComputeStorage>(
53        &mut self,
54        storage: &mut Storage,
55        size: u64,
56        locked: Option<&MemoryLock>,
57    ) -> SliceHandle {
58        match self {
59            DynamicPool::Sliced(m) => m.reserve(storage, size, locked),
60            DynamicPool::Exclusive(m) => m.reserve(storage, size, locked),
61        }
62    }
63
64    fn alloc<Storage: ComputeStorage>(&mut self, storage: &mut Storage, size: u64) -> SliceHandle {
65        match self {
66            DynamicPool::Sliced(m) => m.alloc(storage, size),
67            DynamicPool::Exclusive(m) => m.alloc(storage, size),
68        }
69    }
70
71    fn get_memory_usage(&self) -> MemoryUsage {
72        match self {
73            DynamicPool::Sliced(m) => m.get_memory_usage(),
74            DynamicPool::Exclusive(m) => m.get_memory_usage(),
75        }
76    }
77
78    fn max_alloc_size(&self) -> u64 {
79        match self {
80            DynamicPool::Sliced(m) => m.max_alloc_size(),
81            DynamicPool::Exclusive(m) => m.max_alloc_size(),
82        }
83    }
84    fn cleanup<Storage: ComputeStorage>(&mut self, storage: &mut Storage, alloc_nr: u64) {
85        match self {
86            DynamicPool::Sliced(m) => m.cleanup(storage, alloc_nr),
87            DynamicPool::Exclusive(m) => m.cleanup(storage, alloc_nr),
88        }
89    }
90}
91
92/// Reserves and keeps track of chunks of memory in the storage, and slices upon these chunks.
93pub struct MemoryManagement<Storage> {
94    pools: Vec<DynamicPool>,
95    storage: Storage,
96    alloc_reserve_count: u64,
97}
98
99impl<Storage: ComputeStorage> MemoryManagement<Storage> {
100    /// Creates the options from device limits.
101    pub fn from_configuration(
102        storage: Storage,
103        properties: MemoryDeviceProperties,
104        config: MemoryConfiguration,
105    ) -> Self {
106        let pools = match config {
107            #[cfg(not(exclusive_memory_only))]
108            MemoryConfiguration::SubSlices => {
109                // Round chunk size to be aligned.
110                let memory_alignment = properties.alignment;
111                let max_page = properties.max_page_size;
112
113                let mut pools = Vec::new();
114                pools.push(MemoryPoolOptions {
115                    page_size: max_page / memory_alignment * memory_alignment, // align the size to max_page.
116                    chunk_num_prealloc: 0,
117                    pool_type: PoolType::SlicedPages {
118                        max_slice_size: max_page,
119                    },
120                    dealloc_period: None,
121                });
122
123                const MB: u64 = 1024 * 1024;
124
125                let mut current = max_page;
126                while current >= 32 * MB {
127                    current /= 4;
128                    // Make sure every pool has an aligned size.
129                    current = current.next_multiple_of(memory_alignment);
130
131                    pools.push(MemoryPoolOptions {
132                        page_size: current,
133                        chunk_num_prealloc: 0,
134                        // Creating max slices lower than the chunk size reduces fragmentation.
135                        pool_type: PoolType::SlicedPages {
136                            max_slice_size: current / 2u64.pow(pools.len() as u32),
137                        },
138                        dealloc_period: None,
139                    });
140                }
141                // Add in a pool for allocations that are smaller than the min alignment,
142                // as they can't use offsets at all (on wgpu at least).
143                pools.push(MemoryPoolOptions {
144                    page_size: memory_alignment,
145                    chunk_num_prealloc: 0,
146                    pool_type: PoolType::ExclusivePages,
147                    dealloc_period: None,
148                });
149                pools
150            }
151            MemoryConfiguration::ExclusivePages => {
152                // Round chunk size to be aligned.
153                let memory_alignment = properties.alignment;
154
155                // Add all bin sizes. Nb: because of alignment some buckets
156                // end up as the same size, so only want unique ones,
157                // but also keep the order, so a BTree will do.
158                let sizes: BTreeSet<_> = EXP_BIN_SIZES
159                    .iter()
160                    .copied()
161                    .map(|size| size.next_multiple_of(memory_alignment))
162                    .take_while(|&size| size < properties.max_page_size)
163                    .collect();
164
165                // Add in one pool for all massive allocations.
166                sizes
167                    .iter()
168                    .map(|&s| {
169                        // Bigger buckets will logically have less slices, and are a bigger win
170                        // to deallocate, so make the deallocation period roughly proportional to
171                        // alloc size.
172                        //
173                        // This also +- follows zipfs law https://en.wikipedia.org/wiki/Zipf%27s_law
174                        // which is an ok assumption for the distribution of allocations.
175                        //
176                        // This ranges from:
177                        //   128 bytes, 8389608 allocations (aka almost never)
178                        //   10kb, 105857 allocations
179                        //   1MB, 2024 allocations
180                        //   100MB+, 1000-1011 allocations
181                        let base_period = 1000;
182                        let dealloc_period = base_period + 1024 * MB as u64 / s;
183
184                        MemoryPoolOptions {
185                            page_size: s,
186                            chunk_num_prealloc: 0,
187                            pool_type: PoolType::ExclusivePages,
188                            dealloc_period: Some(dealloc_period),
189                        }
190                    })
191                    .collect()
192            }
193            MemoryConfiguration::Custom(pool_settings) => pool_settings,
194        };
195
196        for pool in pools.iter() {
197            log::trace!("Using memory pool: \n {pool:?}");
198        }
199
200        Self::new(storage, pools, properties.alignment)
201    }
202
203    /// Creates a new instance using the given storage, merging_strategy strategy and slice strategy.
204    pub fn new(mut storage: Storage, pools: Vec<MemoryPoolOptions>, memory_alignment: u64) -> Self {
205        let mut pools: Vec<_> = pools
206            .iter()
207            .map(|options| {
208                let mut pool = match options.pool_type {
209                    PoolType::SlicedPages {
210                        max_slice_size: max_slice,
211                    } => DynamicPool::Sliced(SlicedPool::new(
212                        options.page_size,
213                        max_slice,
214                        memory_alignment,
215                    )),
216                    PoolType::ExclusivePages => DynamicPool::Exclusive(ExclusiveMemoryPool::new(
217                        options.page_size,
218                        memory_alignment,
219                        options.dealloc_period.unwrap_or(u64::MAX),
220                    )),
221                };
222
223                for _ in 0..options.chunk_num_prealloc {
224                    pool.alloc(&mut storage, options.page_size);
225                }
226
227                pool
228            })
229            .collect();
230
231        pools.sort_by(|pool1, pool2| u64::cmp(&pool1.max_alloc_size(), &pool2.max_alloc_size()));
232
233        Self {
234            pools,
235            storage,
236            alloc_reserve_count: 0,
237        }
238    }
239
240    /// Cleanup allocations in pools that are deemed unnecessary.
241    pub fn cleanup(&mut self) {
242        for pool in self.pools.iter_mut() {
243            pool.cleanup(&mut self.storage, self.alloc_reserve_count);
244        }
245    }
246
247    /// Returns the storage from the specified binding
248    pub fn get(&mut self, binding: SliceBinding) -> StorageHandle {
249        self.pools
250            .iter()
251            .find_map(|p| p.get(&binding))
252            .expect("No handle found in memory pools")
253            .clone()
254    }
255
256    /// Returns the resource from the storage at the specified handle
257    pub fn get_resource(
258        &mut self,
259        binding: SliceBinding,
260        offset_start: Option<u64>,
261        offset_end: Option<u64>,
262    ) -> Storage::Resource {
263        let handle = self.get(binding);
264        let handle = match offset_start {
265            Some(offset) => handle.offset_start(offset),
266            None => handle,
267        };
268        let handle = match offset_end {
269            Some(offset) => handle.offset_end(offset),
270            None => handle,
271        };
272        self.storage().get(&handle)
273    }
274
275    /// Finds a spot in memory for a resource with the given size in bytes, and returns a handle to it
276    pub fn reserve(&mut self, size: u64, exclude: Option<&MemoryLock>) -> SliceHandle {
277        // If this happens every nanosecond, counts overflows after 585 years, so not worth thinking too
278        // hard about overflow here.
279        self.alloc_reserve_count += 1;
280
281        // Find first pool where size <= p.max_alloc with a binary search.
282        let pool_ind = self.pools.partition_point(|p| size > p.max_alloc_size());
283
284        // Ensure the pool index is in bounds, otherwise there isn't any pool that can fit the
285        // requested allocation
286        if pool_ind >= self.pools.len() {
287            panic!("Unable to find valid pool partition point: No memory pool big enough to reserve {size} bytes.");
288        }
289
290        let pool = &mut self.pools[pool_ind];
291        if pool.max_alloc_size() < size {
292            panic!("No memory pool big enough to reserve {size} bytes.");
293        }
294        pool.reserve(&mut self.storage, size, exclude)
295    }
296
297    /// Bypass the memory allocation algorithm to allocate data directly.
298    ///
299    /// # Notes
300    ///
301    /// Can be useful for servers that want specific control over memory.
302    pub fn alloc(&mut self, size: u64) -> SliceHandle {
303        // Find first pool where size <= p.max_alloc with a binary search.
304        let pool_ind = self.pools.partition_point(|p| size > p.max_alloc_size());
305        let pool = &mut self.pools[pool_ind];
306        if pool.max_alloc_size() < size {
307            panic!("No memory pool big enough to alloc {size} bytes.");
308        }
309        pool.alloc(&mut self.storage, size)
310    }
311
312    /// Bypass the memory allocation algorithm to deallocate data directly.
313    ///
314    /// # Notes
315    ///
316    /// Can be useful for servers that want specific control over memory.
317    pub fn dealloc(&mut self, _binding: SliceBinding) {
318        // Can't dealloc slices.
319    }
320
321    /// Fetch the storage used by the memory manager.
322    ///
323    /// # Notes
324    ///
325    /// The storage should probably not be used for allocations since the handles won't be
326    /// compatible with the ones provided by the current trait. Prefer using the
327    /// [alloc](MemoryManagement::alloc) and [dealloc](MemoryManagement::dealloc) functions.
328    ///
329    /// This is useful if you need to time the deallocations based on async computation, or to
330    /// change the mode of storage for different reasons.
331    pub fn storage(&mut self) -> &mut Storage {
332        &mut self.storage
333    }
334
335    /// Get the current memory usage.
336    pub fn memory_usage(&self) -> MemoryUsage {
337        self.pools.iter().map(|x| x.get_memory_usage()).fold(
338            MemoryUsage {
339                number_allocs: 0,
340                bytes_in_use: 0,
341                bytes_padding: 0,
342                bytes_reserved: 0,
343            },
344            |m1, m2| m1.combine(m2),
345        )
346    }
347
348    /// Print out a report of the current memory usage.
349    pub fn print_memory_usage(&self) {
350        #[cfg(feature = "std")]
351        log::info!("{}", self.memory_usage());
352    }
353}
354
355impl<Storage> core::fmt::Debug for MemoryManagement<Storage> {
356    fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
357        f.write_str(
358            alloc::format!(
359                "DynamicMemoryManagement {:?}",
360                core::any::type_name::<Storage>(),
361            )
362            .as_str(),
363        )
364    }
365}
366
367#[cfg(test)]
368mod tests {
369    use super::*;
370    use crate::{memory_management::MemoryManagement, storage::BytesStorage};
371
372    // Test pools with slices.
373    #[test]
374    #[cfg(not(exclusive_memory_only))]
375    fn test_handle_mutability() {
376        let mut memory_management = MemoryManagement::from_configuration(
377            BytesStorage::default(),
378            MemoryDeviceProperties {
379                max_page_size: 128 * 1024 * 1024,
380                alignment: 32,
381            },
382            MemoryConfiguration::SubSlices,
383        );
384        let handle = memory_management.reserve(10, None);
385        let other_ref = handle.clone();
386        assert!(!handle.can_mut(), "Handle can't be mut when multiple ref.");
387        drop(other_ref);
388        assert!(handle.can_mut(), "Handle should be mut when only one ref.");
389    }
390
391    #[test]
392    fn alloc_two_chunks_on_one_page() {
393        let page_size = 2048;
394
395        let mut memory_management = MemoryManagement::new(
396            BytesStorage::default(),
397            vec![MemoryPoolOptions {
398                page_size,
399                chunk_num_prealloc: 0,
400                pool_type: PoolType::SlicedPages {
401                    max_slice_size: page_size,
402                },
403                dealloc_period: None,
404            }],
405            32,
406        );
407
408        let alloc_size = 512;
409        let _handle = memory_management.reserve(alloc_size, None);
410        let _new_handle = memory_management.reserve(alloc_size, None);
411
412        let usage = memory_management.memory_usage();
413        assert_eq!(usage.number_allocs, 2);
414        assert_eq!(usage.bytes_in_use, alloc_size * 2);
415        assert_eq!(usage.bytes_reserved, page_size);
416    }
417
418    #[test]
419    fn alloc_reuses_storage() {
420        // If no storage is re-used, this will allocate two pages.
421        let page_size = 512;
422
423        let mut memory_management = MemoryManagement::new(
424            BytesStorage::default(),
425            vec![MemoryPoolOptions {
426                page_size,
427                chunk_num_prealloc: 0,
428                pool_type: PoolType::SlicedPages {
429                    max_slice_size: page_size,
430                },
431                dealloc_period: None,
432            }],
433            32,
434        );
435
436        let alloc_size = 512;
437        let _handle = memory_management.reserve(alloc_size, None);
438        drop(_handle);
439        let _new_handle = memory_management.reserve(alloc_size, None);
440
441        let usage = memory_management.memory_usage();
442        assert_eq!(usage.number_allocs, 1);
443        assert_eq!(usage.bytes_in_use, alloc_size);
444        assert_eq!(usage.bytes_reserved, page_size);
445    }
446
447    #[test]
448    fn alloc_allocs_new_storage() {
449        let page_size = 1024;
450
451        let mut memory_management = MemoryManagement::new(
452            BytesStorage::default(),
453            vec![MemoryPoolOptions {
454                page_size,
455                chunk_num_prealloc: 0,
456                pool_type: PoolType::SlicedPages {
457                    max_slice_size: page_size,
458                },
459                dealloc_period: None,
460            }],
461            32,
462        );
463
464        let alloc_size = 768;
465        let _handle = memory_management.reserve(alloc_size, None);
466        let _new_handle = memory_management.reserve(alloc_size, None);
467
468        let usage = memory_management.memory_usage();
469        assert_eq!(usage.number_allocs, 2);
470        assert_eq!(usage.bytes_in_use, alloc_size * 2);
471        assert_eq!(usage.bytes_reserved, page_size * 2);
472    }
473
474    #[test]
475    fn alloc_respects_alignment_size() {
476        let page_size = 500;
477        let mut memory_management = MemoryManagement::new(
478            BytesStorage::default(),
479            vec![MemoryPoolOptions {
480                page_size,
481                chunk_num_prealloc: 0,
482                pool_type: PoolType::SlicedPages {
483                    max_slice_size: page_size,
484                },
485                dealloc_period: None,
486            }],
487            50,
488        );
489        let alloc_size = 40;
490        let _handle = memory_management.reserve(alloc_size, None);
491        let _new_handle = memory_management.reserve(alloc_size, None);
492        let usage = memory_management.memory_usage();
493        // Each slice should be aligned to 60 bytes, so 20 padding bytes.
494        assert_eq!(usage.bytes_padding, 10 * 2);
495    }
496
497    #[test]
498    fn allocs_on_correct_page() {
499        let sizes = [100, 200, 300, 400];
500
501        let pools = sizes
502            .iter()
503            .map(|&size| MemoryPoolOptions {
504                page_size: size,
505                chunk_num_prealloc: 0,
506                pool_type: PoolType::SlicedPages {
507                    max_slice_size: size,
508                },
509                dealloc_period: None,
510            })
511            .collect();
512        let mut memory_management = MemoryManagement::new(BytesStorage::default(), pools, 10);
513        // Allocate one thing on each page.
514        let alloc_sizes = [50, 150, 250, 350];
515        let _handles = alloc_sizes.map(|s| memory_management.reserve(s, None));
516
517        let usage = memory_management.memory_usage();
518
519        // Total memory should be size of all pages, and no more.
520        assert_eq!(usage.bytes_in_use, alloc_sizes.iter().sum::<u64>());
521        assert_eq!(usage.bytes_reserved, sizes.iter().sum::<u64>());
522    }
523
524    #[test]
525    #[cfg(not(exclusive_memory_only))]
526    fn allocate_deallocate_reallocate() {
527        let mut memory_management = MemoryManagement::from_configuration(
528            BytesStorage::default(),
529            MemoryDeviceProperties {
530                max_page_size: 128 * 1024 * 1024,
531                alignment: 32,
532            },
533            MemoryConfiguration::SubSlices,
534        );
535        // Allocate a bunch
536        let handles: Vec<_> = (0..5)
537            .map(|i| memory_management.reserve(1000 * (i + 1), None))
538            .collect();
539        let usage_before = memory_management.memory_usage();
540        // Deallocate
541        drop(handles);
542        // Reallocate
543        let _new_handles: Vec<_> = (0..5)
544            .map(|i| memory_management.reserve(1000 * (i + 1), None))
545            .collect();
546        let usage_after = memory_management.memory_usage();
547        assert_eq!(usage_before.number_allocs, usage_after.number_allocs);
548        assert_eq!(usage_before.bytes_in_use, usage_after.bytes_in_use);
549        // Usage after can actually be _less_ because of defragging.
550        assert!(usage_before.bytes_reserved >= usage_after.bytes_reserved);
551    }
552
553    #[test]
554    #[cfg(not(exclusive_memory_only))]
555    fn test_fragmentation_resistance() {
556        let mut memory_management = MemoryManagement::from_configuration(
557            BytesStorage::default(),
558            MemoryDeviceProperties {
559                max_page_size: 128 * 1024 * 1024,
560                alignment: 32,
561            },
562            MemoryConfiguration::SubSlices,
563        );
564        // Allocate a mix of small and large chunks
565        let sizes = [50, 1000, 100, 5000, 200, 10000, 300];
566        let handles: Vec<_> = sizes
567            .iter()
568            .map(|&size| memory_management.reserve(size, None))
569            .collect();
570        let usage_before = memory_management.memory_usage();
571        // Deallocate every other allocation
572        for i in (0..handles.len()).step_by(2) {
573            drop(handles[i].clone());
574        }
575        // Reallocate similar sizes
576        for &size in &sizes[0..sizes.len() / 2] {
577            memory_management.reserve(size, None);
578        }
579        let usage_after = memory_management.memory_usage();
580        // Check that we haven't increased our memory usage significantly
581        assert!(usage_after.bytes_reserved <= (usage_before.bytes_reserved as f64 * 1.1) as u64);
582    }
583
584    // Test pools without slices. More or less same as tests above.
585    #[test]
586    fn noslice_test_handle_mutability() {
587        let mem_props = MemoryDeviceProperties {
588            max_page_size: 128 * 1024 * 1024,
589            alignment: 32,
590        };
591        let mut memory_management = MemoryManagement::from_configuration(
592            BytesStorage::default(),
593            mem_props,
594            MemoryConfiguration::ExclusivePages,
595        );
596        let handle = memory_management.reserve(10, None);
597        let other_ref = handle.clone();
598        assert!(!handle.can_mut(), "Handle can't be mut when multiple ref.");
599        drop(other_ref);
600        assert!(handle.can_mut(), "Handle should be mut when only one ref.");
601    }
602
603    #[test]
604    fn noslice_alloc_two_chunk() {
605        let page_size = 2048;
606
607        let mut memory_management = MemoryManagement::new(
608            BytesStorage::default(),
609            vec![MemoryPoolOptions {
610                page_size,
611                chunk_num_prealloc: 0,
612                pool_type: PoolType::ExclusivePages,
613                dealloc_period: None,
614            }],
615            32,
616        );
617
618        let alloc_size = 512;
619        let _handle = memory_management.reserve(alloc_size, None);
620        let _new_handle = memory_management.reserve(alloc_size, None);
621
622        let usage = memory_management.memory_usage();
623        assert_eq!(usage.number_allocs, 2);
624        assert_eq!(usage.bytes_in_use, alloc_size * 2);
625        assert_eq!(usage.bytes_reserved, page_size * 2);
626    }
627
628    #[test]
629    fn noslice_alloc_reuses_storage() {
630        // If no storage is re-used, this will allocate two pages.
631        let mut memory_management = MemoryManagement::new(
632            BytesStorage::default(),
633            vec![MemoryPoolOptions {
634                page_size: 512,
635                chunk_num_prealloc: 0,
636                pool_type: PoolType::ExclusivePages,
637                dealloc_period: None,
638            }],
639            32,
640        );
641
642        let alloc_size = 512;
643        let _handle = memory_management.reserve(alloc_size, None);
644        drop(_handle);
645        let _new_handle = memory_management.reserve(alloc_size, None);
646
647        let usage = memory_management.memory_usage();
648        assert_eq!(usage.number_allocs, 1);
649        assert_eq!(usage.bytes_in_use, alloc_size);
650        assert_eq!(usage.bytes_reserved, alloc_size);
651    }
652
653    #[test]
654    fn noslice_alloc_allocs_new_storage() {
655        let page_size = 1024;
656        let mut memory_management = MemoryManagement::new(
657            BytesStorage::default(),
658            vec![MemoryPoolOptions {
659                page_size,
660                chunk_num_prealloc: 0,
661                pool_type: PoolType::ExclusivePages,
662                dealloc_period: None,
663            }],
664            32,
665        );
666
667        let alloc_size = 768;
668        let _handle = memory_management.reserve(alloc_size, None);
669        let _new_handle = memory_management.reserve(alloc_size, None);
670        let usage = memory_management.memory_usage();
671        assert_eq!(usage.number_allocs, 2);
672        assert_eq!(usage.bytes_in_use, alloc_size * 2);
673        assert_eq!(usage.bytes_reserved, page_size * 2);
674    }
675
676    #[test]
677    fn noslice_alloc_respects_alignment_size() {
678        let page_size = 500;
679        let mut memory_management = MemoryManagement::new(
680            BytesStorage::default(),
681            vec![MemoryPoolOptions {
682                page_size,
683                chunk_num_prealloc: 0,
684                pool_type: PoolType::ExclusivePages,
685                dealloc_period: None,
686            }],
687            50,
688        );
689        let alloc_size = 40;
690        let _handle = memory_management.reserve(alloc_size, None);
691        let _new_handle = memory_management.reserve(alloc_size, None);
692        let usage = memory_management.memory_usage();
693        // Each slice should be aligned to 60 bytes, so 20 padding bytes.
694        assert_eq!(usage.bytes_padding, 10 * 2);
695    }
696
697    #[test]
698    fn noslice_allocs_on_correct_page() {
699        let pools = [100, 200, 300, 400]
700            .iter()
701            .map(|&size| MemoryPoolOptions {
702                page_size: size,
703                chunk_num_prealloc: 0,
704                pool_type: PoolType::SlicedPages {
705                    max_slice_size: size,
706                },
707                dealloc_period: None,
708            })
709            .collect();
710        let mut memory_management = MemoryManagement::new(BytesStorage::default(), pools, 10);
711        // Allocate one thing on each page.
712        let alloc_sizes = [50, 150, 250, 350];
713        let _handles = alloc_sizes.map(|s| memory_management.reserve(s, None));
714        let usage = memory_management.memory_usage();
715        // Total memory should be size of all pages, and no more.
716        assert_eq!(usage.bytes_in_use, alloc_sizes.iter().sum::<u64>());
717    }
718
719    #[test]
720    fn noslice_allocate_deallocate_reallocate() {
721        let mut memory_management = MemoryManagement::from_configuration(
722            BytesStorage::default(),
723            MemoryDeviceProperties {
724                max_page_size: 128 * 1024 * 1024,
725                alignment: 32,
726            },
727            MemoryConfiguration::ExclusivePages,
728        );
729        // Allocate a bunch
730        let handles: Vec<_> = (0..5)
731            .map(|i| memory_management.reserve(1000 * (i + 1), None))
732            .collect();
733        let usage_before = memory_management.memory_usage();
734        // Deallocate
735        drop(handles);
736        // Reallocate
737        let _new_handles: Vec<_> = (0..5)
738            .map(|i| memory_management.reserve(1000 * (i + 1), None))
739            .collect();
740        let usage_after = memory_management.memory_usage();
741        assert_eq!(usage_before.number_allocs, usage_after.number_allocs);
742        assert_eq!(usage_before.bytes_in_use, usage_after.bytes_in_use);
743        assert_eq!(usage_before.bytes_reserved, usage_after.bytes_reserved);
744    }
745}