Skip to main content

cubecl_runtime/memory_management/
memory_manage.rs

1use super::{
2    MemoryConfiguration, MemoryPoolOptions, MemoryUsage, PoolType,
3    memory_pool::{ExclusiveMemoryPool, MemoryPool, PersistentPool, SlicedPool},
4};
5use crate::{
6    config::{
7        GlobalConfig,
8        memory::{MemoryLogLevel, PersistentMemory},
9    },
10    logging::ServerLogger,
11    memory_management::BytesFormat,
12    server::IoError,
13    storage::{ComputeStorage, StorageHandle},
14};
15
16use alloc::format;
17use alloc::string::{String, ToString};
18#[cfg(not(exclusive_memory_only))]
19use alloc::vec;
20use alloc::vec::Vec;
21use cubecl_common::{backtrace::BackTrace, stub::Arc};
22use cubecl_ir::MemoryDeviceProperties;
23
24pub use super::memory_pool::{SliceBinding, handle::*};
25
26// These are 288 bytes vs 64 bytes. Adding boxing isn't really worth
27// saving the 200 bytes.
28#[allow(clippy::large_enum_variant)]
29enum DynamicPool {
30    Sliced(SlicedPool),
31    Exclusive(ExclusiveMemoryPool),
32}
33
34impl MemoryPool for DynamicPool {
35    fn accept(&self, size: u64) -> bool {
36        match self {
37            DynamicPool::Sliced(pool) => pool.accept(size),
38            DynamicPool::Exclusive(pool) => pool.accept(size),
39        }
40    }
41
42    fn get(&self, binding: &SliceBinding) -> Option<&StorageHandle> {
43        match self {
44            DynamicPool::Sliced(m) => m.get(binding),
45            DynamicPool::Exclusive(m) => m.get(binding),
46        }
47    }
48
49    #[cfg_attr(feature = "tracing", tracing::instrument(level = "trace", skip(self)))]
50    fn try_reserve(&mut self, size: u64) -> Option<SliceHandle> {
51        match self {
52            DynamicPool::Sliced(m) => m.try_reserve(size),
53            DynamicPool::Exclusive(m) => m.try_reserve(size),
54        }
55    }
56
57    #[cfg_attr(
58        feature = "tracing",
59        tracing::instrument(level = "trace", skip(self, storage))
60    )]
61    fn alloc<Storage: ComputeStorage>(
62        &mut self,
63        storage: &mut Storage,
64        size: u64,
65    ) -> Result<SliceHandle, IoError> {
66        match self {
67            DynamicPool::Sliced(m) => m.alloc(storage, size),
68            DynamicPool::Exclusive(m) => m.alloc(storage, size),
69        }
70    }
71
72    fn get_memory_usage(&self) -> MemoryUsage {
73        match self {
74            DynamicPool::Sliced(m) => m.get_memory_usage(),
75            DynamicPool::Exclusive(m) => m.get_memory_usage(),
76        }
77    }
78
79    fn cleanup<Storage: ComputeStorage>(
80        &mut self,
81        storage: &mut Storage,
82        alloc_nr: u64,
83        explicit: bool,
84    ) {
85        match self {
86            DynamicPool::Sliced(m) => m.cleanup(storage, alloc_nr, explicit),
87            DynamicPool::Exclusive(m) => m.cleanup(storage, alloc_nr, explicit),
88        }
89    }
90}
91
92#[derive(Default, Clone, Copy, Debug)]
93/// The mode of allocation used.
94pub enum MemoryAllocationMode {
95    /// Use the automatic memory management strategy for allocation.
96    #[default]
97    Auto,
98    /// Use a persistent memory management strategy, meaning that all allocations are for data that is
99    /// likely never going to be freed.
100    Persistent,
101}
102
103/// Reserves and keeps track of chunks of memory in the storage, and slices upon these chunks.
104pub struct MemoryManagement<Storage> {
105    name: String,
106    persistent: PersistentPool,
107    pools: Vec<DynamicPool>,
108    storage: Storage,
109    alloc_reserve_count: u64,
110    mode: MemoryAllocationMode,
111    config: PersistentMemory,
112    logger: Arc<ServerLogger>,
113}
114
115fn generate_bucket_sizes(
116    start_size: u64,
117    end_size: u64,
118    max_buckets: usize,
119    alignment: u64,
120) -> Vec<u64> {
121    let mut buckets = Vec::with_capacity(max_buckets);
122    let log_min = (start_size as f64).ln();
123    let log_max = (end_size as f64).ln();
124    let log_range = log_max - log_min;
125
126    // Pure exponential performed best, but let's try slightly denser in lower-mid range
127    for i in 0..max_buckets {
128        let p = i as f64 / (max_buckets - 1) as f64;
129        // Slight bias toward lower-mid range with less aggressive curve than sigmoid
130        let log_size = log_min + log_range * p;
131        let size = log_size.exp() as u64;
132        let aligned_size = size.next_multiple_of(alignment);
133        buckets.push(aligned_size);
134    }
135
136    buckets.dedup();
137    buckets
138}
139
140const DEALLOC_SCALE_MB: u64 = 1024 * 1024 * 1024;
141const BASE_DEALLOC_PERIOD: u64 = 5000;
142
143/// The options for creating a new [`MemoryManagement`] instance.
144#[derive(Debug)]
145pub struct MemoryManagementOptions {
146    /// The name of the memory management.
147    name: String,
148    /// The [`MemoryAllocationOption`] used by this instance.
149    memory: MemoryAllocationOption,
150}
151
152impl MemoryManagementOptions {
153    /// Creates a new [`MemoryManagementOptions`].
154    pub fn new<S: Into<String>>(name: S) -> Self {
155        Self {
156            name: name.into(),
157            memory: MemoryAllocationOption::FromConfig,
158        }
159    }
160
161    /// Forces the [`MemoryAllocationMode`] during execution to always be the provided one.
162    pub fn mode(mut self, mode: MemoryAllocationMode) -> Self {
163        self.memory = MemoryAllocationOption::Provided(mode);
164        self
165    }
166}
167
168#[derive(Default, Debug)]
169/// Determines which [`MemoryAllocationMode`] is used during allocations.
170enum MemoryAllocationOption {
171    #[default]
172    /// Uses the [`GlobalConfig`] to determine the mode of allocation.
173    FromConfig,
174    /// Use the provided [`MemoryAllocationMode`].
175    Provided(MemoryAllocationMode),
176}
177
178impl<Storage: ComputeStorage> MemoryManagement<Storage> {
179    /// Creates the options from device limits.
180    pub fn from_configuration(
181        storage: Storage,
182        properties: &MemoryDeviceProperties,
183        config: MemoryConfiguration,
184        logger: Arc<ServerLogger>,
185        options: MemoryManagementOptions,
186    ) -> Self {
187        let pool_options = match config {
188            #[cfg(not(exclusive_memory_only))]
189            MemoryConfiguration::SubSlices => {
190                // Round chunk size to be aligned.
191                let memory_alignment = properties.alignment;
192                let max_page = properties.max_page_size;
193                let mut pools = Vec::new();
194
195                const MB: u64 = 1024 * 1024;
196
197                // Add in a pool for allocations that are smaller than the min alignment,
198                // as they can't use offsets at all (on wgpu at least).
199                pools.push(MemoryPoolOptions {
200                    pool_type: PoolType::ExclusivePages { max_alloc_size: 0 },
201                    dealloc_period: None,
202                });
203
204                let mut current = max_page;
205                let mut max_sizes = vec![];
206                let mut page_sizes = vec![];
207                let mut base = pools.len() as u32;
208
209                while current >= 32 * MB {
210                    current /= 4;
211
212                    // Make sure every pool has an aligned size.
213                    current = current.next_multiple_of(memory_alignment);
214
215                    max_sizes.push(current / 2u64.pow(base));
216                    page_sizes.push(current);
217                    base += 1;
218                }
219
220                max_sizes.reverse();
221                page_sizes.reverse();
222
223                for i in 0..max_sizes.len() {
224                    let max = max_sizes[i];
225                    let page_size = page_sizes[i];
226
227                    pools.push(MemoryPoolOptions {
228                        // Creating max slices lower than the chunk size reduces fragmentation.
229                        pool_type: PoolType::SlicedPages {
230                            page_size,
231                            max_slice_size: max,
232                        },
233                        dealloc_period: None,
234                    });
235                }
236
237                // Add pools from big to small.
238                pools.push(MemoryPoolOptions {
239                    pool_type: PoolType::SlicedPages {
240                        page_size: max_page / memory_alignment * memory_alignment,
241                        max_slice_size: max_page / memory_alignment * memory_alignment,
242                    },
243                    dealloc_period: None,
244                });
245                pools
246            }
247            MemoryConfiguration::ExclusivePages => {
248                // Add all bin sizes. Nb: because of alignment some buckets
249                // end up as the same size, so only want unique ones,
250                // but also keep the order, so a BTree will do.
251                const MIN_BUCKET_SIZE: u64 = 1024 * 32;
252                const NUM_POOLS: usize = 24;
253
254                let sizes = generate_bucket_sizes(
255                    MIN_BUCKET_SIZE,
256                    properties.max_page_size,
257                    NUM_POOLS,
258                    properties.alignment,
259                );
260
261                sizes
262                    .iter()
263                    .map(|&size| {
264                        let dealloc_period = (BASE_DEALLOC_PERIOD as f64
265                            * (1.0 + size as f64 / (DEALLOC_SCALE_MB as f64)).round())
266                            as u64;
267
268                        MemoryPoolOptions {
269                            pool_type: PoolType::ExclusivePages {
270                                max_alloc_size: size,
271                            },
272                            dealloc_period: Some(dealloc_period),
273                        }
274                    })
275                    .collect()
276            }
277            MemoryConfiguration::Custom { pool_options } => pool_options,
278        };
279
280        logger.log_memory(
281            |level| !matches!(level, MemoryLogLevel::Disabled),
282            || {
283                let mut msg = String::new();
284                for pool in pool_options.iter() {
285                    msg += &format!("[{}] Using memory pool: \n {pool:?}", options.name);
286                }
287                msg
288            },
289        );
290
291        let pools: Vec<_> = pool_options
292            .iter()
293            .map(|options| match options.pool_type {
294                PoolType::SlicedPages {
295                    page_size,
296                    max_slice_size,
297                } => DynamicPool::Sliced(SlicedPool::new(
298                    page_size,
299                    max_slice_size,
300                    properties.alignment,
301                )),
302                PoolType::ExclusivePages { max_alloc_size } => {
303                    DynamicPool::Exclusive(ExclusiveMemoryPool::new(
304                        max_alloc_size,
305                        properties.alignment,
306                        options.dealloc_period.unwrap_or(u64::MAX),
307                    ))
308                }
309            })
310            .collect();
311
312        let config = GlobalConfig::get().memory.persistent_memory.clone();
313
314        let mode = match options.memory {
315            MemoryAllocationOption::Provided(mode) => mode,
316            MemoryAllocationOption::FromConfig => match config {
317                PersistentMemory::Enabled => MemoryAllocationMode::Auto,
318                PersistentMemory::Disabled => MemoryAllocationMode::Auto,
319                PersistentMemory::Enforced => MemoryAllocationMode::Persistent,
320            },
321        };
322
323        Self {
324            name: options.name,
325            persistent: PersistentPool::new(properties.max_page_size, properties.alignment),
326            pools,
327            storage,
328            alloc_reserve_count: 0,
329            mode,
330            config,
331            logger,
332        }
333    }
334
335    /// Change the mode of allocation.
336    pub fn mode(&mut self, mode: MemoryAllocationMode) {
337        // We override the mode based on the cubecl config.
338        let mode = match self.config {
339            PersistentMemory::Enabled => mode,
340            PersistentMemory::Disabled | PersistentMemory::Enforced => return,
341        };
342
343        self.logger.log_memory(
344            |level| !matches!(level, MemoryLogLevel::Disabled),
345            || {
346                format!(
347                    "[{}] Setting memory allocation mode: from {:?} => {mode:?}",
348                    self.name, self.mode
349                )
350            },
351        );
352        self.mode = mode;
353    }
354
355    /// Cleanup allocations in pools that are deemed unnecessary.
356    pub fn cleanup(&mut self, explicit: bool) {
357        self.logger.log_memory(
358            |level| !matches!(level, MemoryLogLevel::Disabled) && explicit,
359            || "Manual memory cleanup ...".to_string(),
360        );
361
362        self.persistent
363            .cleanup(&mut self.storage, self.alloc_reserve_count, explicit);
364
365        for pool in self.pools.iter_mut() {
366            pool.cleanup(&mut self.storage, self.alloc_reserve_count, explicit);
367        }
368    }
369
370    /// Returns the storage from the specified binding
371    pub fn get(&mut self, binding: SliceBinding) -> Option<StorageHandle> {
372        if let Some(val) = self.persistent.get(&binding) {
373            return Some(val.clone());
374        }
375
376        self.pools.iter().find_map(|p| p.get(&binding)).cloned()
377    }
378
379    /// Returns the resource from the storage at the specified handle
380    pub fn get_resource(
381        &mut self,
382        binding: SliceBinding,
383        offset_start: Option<u64>,
384        offset_end: Option<u64>,
385    ) -> Option<Storage::Resource> {
386        let handle = self.get(binding);
387
388        handle.map(|handle| {
389            let handle = match offset_start {
390                Some(offset) => handle.offset_start(offset),
391                None => handle,
392            };
393            let handle = match offset_end {
394                Some(offset) => handle.offset_end(offset),
395                None => handle,
396            };
397            self.storage().get(&handle)
398        })
399    }
400
401    /// Finds a spot in memory for a resource with the given size in bytes, and returns a handle to it
402    #[cfg_attr(feature = "tracing", tracing::instrument(level = "trace", skip(self)))]
403    pub fn reserve(&mut self, size: u64) -> Result<SliceHandle, IoError> {
404        // If this happens every nanosecond, counts overflows after 585 years, so not worth thinking too
405        // hard about overflow here.
406        self.alloc_reserve_count += 1;
407
408        if let Some(val) = self.persistent.try_reserve(size) {
409            self.logger.log_memory(
410                |level| matches!(level, MemoryLogLevel::Full),
411                || {
412                    format!(
413                        "[{}] Reserved memory {size} using persistent memory",
414                        self.name
415                    )
416                },
417            );
418            return Ok(val);
419        }
420
421        if matches!(self.mode, MemoryAllocationMode::Persistent) || self.persistent.has_size(size) {
422            let allocated = self.persistent.alloc(&mut self.storage, size);
423
424            self.logger.log_memory(
425                |level| !matches!(level, MemoryLogLevel::Disabled),
426                || {
427                    format!(
428                        "[{}] Allocated a new memory page using persistent memory, \n{}",
429                        self.name, self,
430                    )
431                },
432            );
433            return allocated;
434        }
435
436        self.logger.log_memory(
437            |level| matches!(level, MemoryLogLevel::Full),
438            || {
439                format!(
440                    "[{}] Reserved memory {} using dynamic pool",
441                    self.name,
442                    BytesFormat::new(size)
443                )
444            },
445        );
446
447        // Find first pool that fits this allocation
448        let pool = self
449            .pools
450            .iter_mut()
451            .find(|p| p.accept(size))
452            .ok_or(IoError::BufferTooBig {
453                size,
454                backtrace: BackTrace::capture(),
455            })?;
456
457        if let Some(slice) = pool.try_reserve(size) {
458            return Ok(slice);
459        }
460
461        let allocated = pool.alloc(&mut self.storage, size);
462
463        self.logger.log_memory(
464            |level| matches!(level, MemoryLogLevel::Full),
465            || {
466                format!(
467                    "[{}], Allocated a new memory page, current usage: \n{}",
468                    self.name, self
469                )
470            },
471        );
472
473        allocated
474    }
475
476    /// Fetch the storage used by the memory manager.
477    ///
478    /// # Notes
479    ///
480    /// The storage should probably not be used for allocations since the handles won't be
481    /// compatible with the ones provided by the current trait. Prefer using the
482    /// [alloc](ComputeStorage::alloc) and [dealloc](ComputeStorage::dealloc) functions.
483    ///
484    /// This is useful if you need to time the deallocations based on async computation, or to
485    /// change the mode of storage for different reasons.
486    pub fn storage(&mut self) -> &mut Storage {
487        &mut self.storage
488    }
489
490    /// Get the current memory usage.
491    pub fn memory_usage(&self) -> MemoryUsage {
492        let memory_usage = self.pools.iter().map(|x| x.get_memory_usage()).fold(
493            MemoryUsage {
494                number_allocs: 0,
495                bytes_in_use: 0,
496                bytes_padding: 0,
497                bytes_reserved: 0,
498            },
499            |m1, m2| m1.combine(m2),
500        );
501        memory_usage.combine(self.persistent.get_memory_usage())
502    }
503
504    /// Print out a report of the current memory usage.
505    pub fn print_memory_usage(&self) {
506        #[cfg(feature = "std")]
507        log::info!("{}", self.memory_usage());
508    }
509}
510impl<Storage: ComputeStorage> core::fmt::Display for MemoryManagement<Storage> {
511    fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
512        f.write_str("\n# MemoryManagement\n\n")?;
513        f.write_fmt(format_args!(" - name: {:?}\n", self.name))?;
514        f.write_fmt(format_args!("\n## Persistent\n\n{}", self.persistent))?;
515        f.write_str("\n## Dynamic\n\n")?;
516
517        for pool in self.pools.iter() {
518            match pool {
519                DynamicPool::Sliced(pool) => f.write_fmt(format_args!("{pool}\n"))?,
520                DynamicPool::Exclusive(pool) => f.write_fmt(format_args!("{pool}\n"))?,
521            }
522        }
523        let memory_usage = self.memory_usage();
524        f.write_fmt(format_args!("\n## Summary\n\n{memory_usage}"))?;
525
526        Ok(())
527    }
528}
529
530impl<Storage> core::fmt::Debug for MemoryManagement<Storage> {
531    fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
532        f.write_str(
533            alloc::format!(
534                "DynamicMemoryManagement {:?}",
535                core::any::type_name::<Storage>(),
536            )
537            .as_str(),
538        )
539    }
540}
541
542#[cfg(test)]
543mod tests {
544    use super::*;
545    use crate::{memory_management::MemoryManagement, storage::BytesStorage};
546    use alloc::vec;
547
548    const DUMMY_MEM_PROPS: MemoryDeviceProperties = MemoryDeviceProperties {
549        max_page_size: 128 * 1024 * 1024,
550        alignment: 32,
551    };
552
553    fn options() -> MemoryManagementOptions {
554        MemoryManagementOptions {
555            name: "test".into(),
556            memory: MemoryAllocationOption::FromConfig,
557        }
558    }
559
560    // Test pools with slices.
561    #[test_log::test]
562    #[cfg(not(exclusive_memory_only))]
563    fn test_handle_mutability() {
564        let mut memory_management = MemoryManagement::from_configuration(
565            BytesStorage::default(),
566            &DUMMY_MEM_PROPS,
567            MemoryConfiguration::SubSlices,
568            Arc::new(ServerLogger::default()),
569            options(),
570        );
571        let handle = memory_management.reserve(10).unwrap();
572        let other_ref = handle.clone();
573        assert!(!handle.can_mut(), "Handle can't be mut when multiple ref.");
574        drop(other_ref);
575        assert!(handle.can_mut(), "Handle should be mut when only one ref.");
576    }
577
578    // Test pools with slices.
579    #[test_log::test]
580    #[cfg(not(exclusive_memory_only))]
581    fn test_memory_usage() {
582        let max_page_size = 512;
583
584        let mut memory_management = MemoryManagement::from_configuration(
585            BytesStorage::default(),
586            &DUMMY_MEM_PROPS,
587            MemoryConfiguration::Custom {
588                pool_options: vec![MemoryPoolOptions {
589                    pool_type: PoolType::ExclusivePages {
590                        max_alloc_size: max_page_size,
591                    },
592                    dealloc_period: None,
593                }],
594            },
595            Arc::new(ServerLogger::default()),
596            options(),
597        );
598        let handle = memory_management.reserve(100);
599        let usage = memory_management.memory_usage();
600
601        assert_eq!(usage.bytes_in_use, 100);
602        assert!(usage.bytes_reserved >= 100 && usage.bytes_reserved <= max_page_size);
603
604        // Drop and re-alloc.
605        drop(handle);
606        let _handle = memory_management.reserve(100);
607        let usage_new = memory_management.memory_usage();
608        assert_eq!(usage, usage_new);
609    }
610
611    #[test_log::test]
612    fn alloc_two_chunks_on_one_page() {
613        let page_size = 2048;
614
615        let mut memory_management = MemoryManagement::from_configuration(
616            BytesStorage::default(),
617            &DUMMY_MEM_PROPS,
618            MemoryConfiguration::Custom {
619                pool_options: vec![MemoryPoolOptions {
620                    pool_type: PoolType::SlicedPages {
621                        page_size,
622                        max_slice_size: page_size,
623                    },
624                    dealloc_period: None,
625                }],
626            },
627            Arc::new(ServerLogger::default()),
628            options(),
629        );
630
631        let alloc_size = 512;
632        let _handle = memory_management.reserve(alloc_size);
633        let _new_handle = memory_management.reserve(alloc_size);
634
635        let usage = memory_management.memory_usage();
636        assert_eq!(usage.number_allocs, 2);
637        assert_eq!(usage.bytes_in_use, alloc_size * 2);
638        assert_eq!(usage.bytes_reserved, page_size);
639    }
640
641    #[test_log::test]
642    fn alloc_reuses_storage() {
643        // If no storage is re-used, this will allocate two pages.
644        let page_size = 512;
645
646        let mut memory_management = MemoryManagement::from_configuration(
647            BytesStorage::default(),
648            &DUMMY_MEM_PROPS,
649            MemoryConfiguration::Custom {
650                pool_options: vec![MemoryPoolOptions {
651                    pool_type: PoolType::SlicedPages {
652                        page_size,
653                        max_slice_size: page_size,
654                    },
655                    dealloc_period: None,
656                }],
657            },
658            Arc::new(ServerLogger::default()),
659            options(),
660        );
661
662        let alloc_size = 512;
663        let _handle = memory_management.reserve(alloc_size);
664        drop(_handle);
665        let _new_handle = memory_management.reserve(alloc_size);
666
667        let usage = memory_management.memory_usage();
668        assert_eq!(usage.number_allocs, 1);
669        assert_eq!(usage.bytes_in_use, alloc_size);
670        assert_eq!(usage.bytes_reserved, page_size);
671    }
672
673    #[test_log::test]
674    fn alloc_allocs_new_storage() {
675        let page_size = 1024;
676
677        let mut memory_management = MemoryManagement::from_configuration(
678            BytesStorage::default(),
679            &DUMMY_MEM_PROPS,
680            MemoryConfiguration::Custom {
681                pool_options: vec![MemoryPoolOptions {
682                    pool_type: PoolType::SlicedPages {
683                        page_size,
684                        max_slice_size: page_size,
685                    },
686                    dealloc_period: None,
687                }],
688            },
689            Arc::new(ServerLogger::default()),
690            options(),
691        );
692
693        let alloc_size = 768;
694        let _handle = memory_management.reserve(alloc_size);
695        let _new_handle = memory_management.reserve(alloc_size);
696
697        let usage = memory_management.memory_usage();
698        assert_eq!(usage.number_allocs, 2);
699        assert_eq!(usage.bytes_in_use, alloc_size * 2);
700        assert_eq!(usage.bytes_reserved, page_size * 2);
701    }
702
703    #[test_log::test]
704    fn alloc_respects_alignment_size() {
705        let page_size = 500;
706        let mut memory_management = MemoryManagement::from_configuration(
707            BytesStorage::default(),
708            &MemoryDeviceProperties {
709                max_page_size: page_size,
710                alignment: 50,
711            },
712            MemoryConfiguration::Custom {
713                pool_options: vec![MemoryPoolOptions {
714                    pool_type: PoolType::SlicedPages {
715                        page_size,
716                        max_slice_size: page_size,
717                    },
718                    dealloc_period: None,
719                }],
720            },
721            Arc::new(ServerLogger::default()),
722            options(),
723        );
724        let alloc_size = 40;
725        let _handle = memory_management.reserve(alloc_size);
726        let _new_handle = memory_management.reserve(alloc_size);
727        let usage = memory_management.memory_usage();
728        // Each slice should be aligned to 50 bytes, so 20 padding bytes.
729        assert_eq!(usage.bytes_padding, 10 * 2);
730    }
731
732    #[test_log::test]
733    fn allocs_on_correct_page() {
734        let sizes = [100, 200, 300, 400];
735
736        let pools = sizes
737            .iter()
738            .map(|size| MemoryPoolOptions {
739                pool_type: PoolType::SlicedPages {
740                    page_size: *size,
741                    max_slice_size: *size,
742                },
743                dealloc_period: None,
744            })
745            .collect();
746        let mut memory_management = MemoryManagement::from_configuration(
747            BytesStorage::default(),
748            &MemoryDeviceProperties {
749                max_page_size: 128 * 1024 * 1024,
750                alignment: 10,
751            },
752            MemoryConfiguration::Custom {
753                pool_options: pools,
754            },
755            Arc::new(ServerLogger::default()),
756            options(),
757        );
758        // Allocate one thing on each page.
759        let alloc_sizes = [50, 150, 250, 350];
760        let _handles = alloc_sizes.map(|s| memory_management.reserve(s));
761
762        let usage = memory_management.memory_usage();
763
764        // Total memory should be size of all pages, and no more.
765        assert_eq!(usage.bytes_in_use, alloc_sizes.iter().sum::<u64>());
766        assert!(usage.bytes_reserved >= sizes.iter().sum::<u64>());
767    }
768
769    #[test_log::test]
770    #[cfg(not(exclusive_memory_only))]
771    fn allocate_deallocate_reallocate() {
772        let mut memory_management = MemoryManagement::from_configuration(
773            BytesStorage::default(),
774            &MemoryDeviceProperties {
775                max_page_size: 128 * 1024 * 1024,
776                alignment: 32,
777            },
778            MemoryConfiguration::SubSlices,
779            Arc::new(ServerLogger::default()),
780            options(),
781        );
782        // Allocate a bunch
783        let handles: Vec<_> = (0..5)
784            .map(|i| memory_management.reserve(1000 * (i + 1)))
785            .collect();
786        let usage_before = memory_management.memory_usage();
787        // Deallocate
788        drop(handles);
789        // Reallocate
790        let _new_handles: Vec<_> = (0..5)
791            .map(|i| memory_management.reserve(1000 * (i + 1)))
792            .collect();
793        let usage_after = memory_management.memory_usage();
794        assert_eq!(usage_before.number_allocs, usage_after.number_allocs);
795        assert_eq!(usage_before.bytes_in_use, usage_after.bytes_in_use);
796        // Usage after can actually be _less_ because of defragging.
797        assert!(usage_before.bytes_reserved >= usage_after.bytes_reserved);
798    }
799
800    #[test_log::test]
801    #[cfg(not(exclusive_memory_only))]
802    fn test_fragmentation_resistance() {
803        let mut memory_management = MemoryManagement::from_configuration(
804            BytesStorage::default(),
805            &MemoryDeviceProperties {
806                max_page_size: 128 * 1024 * 1024,
807                alignment: 32,
808            },
809            MemoryConfiguration::SubSlices,
810            Arc::new(ServerLogger::default()),
811            options(),
812        );
813        // Allocate a mix of small and large chunks
814        let sizes = [50, 1000, 100, 5000, 200, 10000, 300];
815        let handles: Vec<_> = sizes
816            .iter()
817            .map(|&size| memory_management.reserve(size).unwrap())
818            .collect();
819        let usage_before = memory_management.memory_usage();
820        // Deallocate every other allocation
821        for i in (0..handles.len()).step_by(2) {
822            drop(handles[i].clone());
823        }
824        // Reallocate similar sizes
825        for &size in &sizes[0..sizes.len() / 2] {
826            memory_management.reserve(size).unwrap();
827        }
828        let usage_after = memory_management.memory_usage();
829        // Check that we haven't increased our memory usage significantly
830        assert!(usage_after.bytes_reserved <= (usage_before.bytes_reserved as f64 * 1.1) as u64);
831    }
832
833    // Test pools without slices. More or less same as tests above.
834    #[test_log::test]
835    fn noslice_test_handle_mutability() {
836        let mut memory_management = MemoryManagement::from_configuration(
837            BytesStorage::default(),
838            &(MemoryDeviceProperties {
839                max_page_size: 128 * 1024 * 1024,
840                alignment: 32,
841            }),
842            MemoryConfiguration::ExclusivePages,
843            Arc::new(ServerLogger::default()),
844            options(),
845        );
846        let handle = memory_management.reserve(10).unwrap();
847        let other_ref = handle.clone();
848        assert!(!handle.can_mut(), "Handle can't be mut when multiple ref.");
849        drop(other_ref);
850        assert!(handle.can_mut(), "Handle should be mut when only one ref.");
851    }
852
853    #[test_log::test]
854    fn noslice_alloc_two_chunk() {
855        let mut memory_management = MemoryManagement::from_configuration(
856            BytesStorage::default(),
857            &DUMMY_MEM_PROPS,
858            MemoryConfiguration::Custom {
859                pool_options: vec![MemoryPoolOptions {
860                    pool_type: PoolType::ExclusivePages {
861                        max_alloc_size: 1024,
862                    },
863                    dealloc_period: None,
864                }],
865            },
866            Arc::new(ServerLogger::default()),
867            options(),
868        );
869
870        let alloc_size = 512;
871        let _handle = memory_management.reserve(alloc_size);
872        let _new_handle = memory_management.reserve(alloc_size);
873
874        let usage = memory_management.memory_usage();
875        assert_eq!(usage.number_allocs, 2);
876        assert_eq!(usage.bytes_in_use, alloc_size * 2);
877        assert!(usage.bytes_reserved >= alloc_size * 2);
878    }
879
880    #[test_log::test]
881    fn noslice_alloc_reuses_storage() {
882        // If no storage is re-used, this will allocate two pages.
883        let mut memory_management = MemoryManagement::from_configuration(
884            BytesStorage::default(),
885            &DUMMY_MEM_PROPS,
886            MemoryConfiguration::Custom {
887                pool_options: vec![MemoryPoolOptions {
888                    pool_type: PoolType::ExclusivePages {
889                        max_alloc_size: 1024,
890                    },
891                    dealloc_period: None,
892                }],
893            },
894            Arc::new(ServerLogger::default()),
895            options(),
896        );
897
898        let alloc_size = 512;
899        let _handle = memory_management.reserve(alloc_size);
900        drop(_handle);
901        let _new_handle = memory_management.reserve(alloc_size);
902
903        let usage = memory_management.memory_usage();
904        assert_eq!(usage.number_allocs, 1);
905        assert_eq!(usage.bytes_in_use, alloc_size);
906        assert!(usage.bytes_reserved >= alloc_size);
907    }
908
909    #[test_log::test]
910    fn noslice_alloc_allocs_new_storage() {
911        let mut memory_management = MemoryManagement::from_configuration(
912            BytesStorage::default(),
913            &DUMMY_MEM_PROPS,
914            MemoryConfiguration::Custom {
915                pool_options: vec![MemoryPoolOptions {
916                    pool_type: PoolType::ExclusivePages {
917                        max_alloc_size: 1024,
918                    },
919                    dealloc_period: None,
920                }],
921            },
922            Arc::new(ServerLogger::default()),
923            options(),
924        );
925
926        let alloc_size = 768;
927        let _handle = memory_management.reserve(alloc_size);
928        let _new_handle = memory_management.reserve(alloc_size);
929        let usage = memory_management.memory_usage();
930        assert_eq!(usage.number_allocs, 2);
931        assert_eq!(usage.bytes_in_use, alloc_size * 2);
932        assert!(usage.bytes_reserved >= alloc_size * 2);
933    }
934
935    #[test_log::test]
936    fn noslice_alloc_respects_alignment_size() {
937        let mut memory_management = MemoryManagement::from_configuration(
938            BytesStorage::default(),
939            &MemoryDeviceProperties {
940                max_page_size: DUMMY_MEM_PROPS.max_page_size,
941                alignment: 50,
942            },
943            MemoryConfiguration::Custom {
944                pool_options: vec![MemoryPoolOptions {
945                    pool_type: PoolType::ExclusivePages {
946                        max_alloc_size: 50 * 20,
947                    },
948                    dealloc_period: None,
949                }],
950            },
951            Arc::new(ServerLogger::default()),
952            options(),
953        );
954        let alloc_size = 40;
955        let _handle = memory_management.reserve(alloc_size);
956        let _new_handle = memory_management.reserve(alloc_size);
957        let usage = memory_management.memory_usage();
958        // Each slice should be aligned to 60 bytes, so 20 padding bytes.
959        assert_eq!(usage.bytes_padding, 10 * 2);
960    }
961
962    #[test_log::test]
963    fn noslice_allocs_on_correct_page() {
964        let pools = [100, 200, 300, 400]
965            .iter()
966            .map(|&size| MemoryPoolOptions {
967                pool_type: PoolType::SlicedPages {
968                    page_size: size,
969                    max_slice_size: size,
970                },
971                dealloc_period: None,
972            })
973            .collect();
974        let mut memory_management = MemoryManagement::from_configuration(
975            BytesStorage::default(),
976            &MemoryDeviceProperties {
977                max_page_size: DUMMY_MEM_PROPS.max_page_size,
978                alignment: 10,
979            },
980            MemoryConfiguration::Custom {
981                pool_options: pools,
982            },
983            Arc::new(ServerLogger::default()),
984            options(),
985        );
986        // Allocate one thing on each page.
987        let alloc_sizes = [50, 150, 250, 350];
988        let _handles = alloc_sizes.map(|s| memory_management.reserve(s));
989        let usage = memory_management.memory_usage();
990        // Total memory should be size of all pages, and no more.
991        assert_eq!(usage.bytes_in_use, alloc_sizes.iter().sum::<u64>());
992    }
993
994    #[test_log::test]
995    fn noslice_allocate_deallocate_reallocate() {
996        let mut memory_management = MemoryManagement::from_configuration(
997            BytesStorage::default(),
998            &MemoryDeviceProperties {
999                max_page_size: 128 * 1024 * 1024,
1000                alignment: 32,
1001            },
1002            MemoryConfiguration::ExclusivePages,
1003            Arc::new(ServerLogger::default()),
1004            options(),
1005        );
1006        // Allocate a bunch
1007        let handles: Vec<_> = (0..5)
1008            .map(|i| memory_management.reserve(1000 * (i + 1)))
1009            .collect();
1010        let usage_before = memory_management.memory_usage();
1011        // Deallocate
1012        drop(handles);
1013        // Reallocate
1014        let _new_handles: Vec<_> = (0..5)
1015            .map(|i| memory_management.reserve(1000 * (i + 1)))
1016            .collect();
1017        let usage_after = memory_management.memory_usage();
1018        assert_eq!(usage_before.number_allocs, usage_after.number_allocs);
1019        assert_eq!(usage_before.bytes_in_use, usage_after.bytes_in_use);
1020        assert_eq!(usage_before.bytes_reserved, usage_after.bytes_reserved);
1021    }
1022}