cubecl_runtime/memory_management/
memory_manage.rs

1use super::{
2    MemoryConfiguration, MemoryDeviceProperties, MemoryPoolOptions, MemoryUsage, PoolType,
3    memory_pool::{ExclusiveMemoryPool, MemoryPool, PersistentPool, SlicedPool},
4};
5use crate::{
6    config::{
7        GlobalConfig,
8        memory::{MemoryLogLevel, PersistentMemory},
9    },
10    logging::ServerLogger,
11    memory_management::BytesFormat,
12    server::IoError,
13    storage::{ComputeStorage, StorageHandle},
14};
15
16use alloc::format;
17use alloc::string::{String, ToString};
18#[cfg(not(exclusive_memory_only))]
19use alloc::vec;
20use alloc::vec::Vec;
21use cubecl_common::{backtrace::BackTrace, stub::Arc};
22
23pub use super::memory_pool::{SliceBinding, handle::*};
24
25// These are 288 bytes vs 64 bytes. Adding boxing isn't really worth
26// saving the 200 bytes.
27#[allow(clippy::large_enum_variant)]
28enum DynamicPool {
29    Sliced(SlicedPool),
30    Exclusive(ExclusiveMemoryPool),
31}
32
33impl MemoryPool for DynamicPool {
34    fn accept(&self, size: u64) -> bool {
35        match self {
36            DynamicPool::Sliced(pool) => pool.accept(size),
37            DynamicPool::Exclusive(pool) => pool.accept(size),
38        }
39    }
40
41    fn get(&self, binding: &SliceBinding) -> Option<&StorageHandle> {
42        match self {
43            DynamicPool::Sliced(m) => m.get(binding),
44            DynamicPool::Exclusive(m) => m.get(binding),
45        }
46    }
47
48    fn try_reserve(&mut self, size: u64) -> Option<SliceHandle> {
49        match self {
50            DynamicPool::Sliced(m) => m.try_reserve(size),
51            DynamicPool::Exclusive(m) => m.try_reserve(size),
52        }
53    }
54
55    fn alloc<Storage: ComputeStorage>(
56        &mut self,
57        storage: &mut Storage,
58        size: u64,
59    ) -> Result<SliceHandle, IoError> {
60        match self {
61            DynamicPool::Sliced(m) => m.alloc(storage, size),
62            DynamicPool::Exclusive(m) => m.alloc(storage, size),
63        }
64    }
65
66    fn get_memory_usage(&self) -> MemoryUsage {
67        match self {
68            DynamicPool::Sliced(m) => m.get_memory_usage(),
69            DynamicPool::Exclusive(m) => m.get_memory_usage(),
70        }
71    }
72
73    fn cleanup<Storage: ComputeStorage>(
74        &mut self,
75        storage: &mut Storage,
76        alloc_nr: u64,
77        explicit: bool,
78    ) {
79        match self {
80            DynamicPool::Sliced(m) => m.cleanup(storage, alloc_nr, explicit),
81            DynamicPool::Exclusive(m) => m.cleanup(storage, alloc_nr, explicit),
82        }
83    }
84}
85
86#[derive(Default, Clone, Copy, Debug)]
87/// The mode of allocation used.
88pub enum MemoryAllocationMode {
89    /// Use the automatic memory management strategy for allocation.
90    #[default]
91    Auto,
92    /// Use a persistent memory management strategy, meaning that all allocations are for data that is
93    /// likely never going to be freed.
94    Persistent,
95}
96
97/// Reserves and keeps track of chunks of memory in the storage, and slices upon these chunks.
98pub struct MemoryManagement<Storage> {
99    name: String,
100    persistent: PersistentPool,
101    pools: Vec<DynamicPool>,
102    storage: Storage,
103    alloc_reserve_count: u64,
104    mode: MemoryAllocationMode,
105    config: PersistentMemory,
106    logger: Arc<ServerLogger>,
107}
108
109fn generate_bucket_sizes(
110    start_size: u64,
111    end_size: u64,
112    max_buckets: usize,
113    alignment: u64,
114) -> Vec<u64> {
115    let mut buckets = Vec::with_capacity(max_buckets);
116    let log_min = (start_size as f64).ln();
117    let log_max = (end_size as f64).ln();
118    let log_range = log_max - log_min;
119
120    // Pure exponential performed best, but let's try slightly denser in lower-mid range
121    for i in 0..max_buckets {
122        let p = i as f64 / (max_buckets - 1) as f64;
123        // Slight bias toward lower-mid range with less aggressive curve than sigmoid
124        let log_size = log_min + log_range * p;
125        let size = log_size.exp() as u64;
126        let aligned_size = size.next_multiple_of(alignment);
127        buckets.push(aligned_size);
128    }
129
130    buckets.dedup();
131    buckets
132}
133
134const DEALLOC_SCALE_MB: u64 = 1024 * 1024 * 1024;
135const BASE_DEALLOC_PERIOD: u64 = 5000;
136
137/// The options for creating a new [MemoryManagement] instance.
138#[derive(Debug)]
139pub struct MemoryManagementOptions {
140    /// The name of the memory management.
141    name: String,
142    /// The [MemoryAllocationOption] used by this instance.
143    memory: MemoryAllocationOption,
144}
145
146impl MemoryManagementOptions {
147    /// Creates a new [MemoryManagementOptions].
148    pub fn new<S: Into<String>>(name: S) -> Self {
149        Self {
150            name: name.into(),
151            memory: MemoryAllocationOption::FromConfig,
152        }
153    }
154
155    /// Forces the [MemoryAllocationMode] during execution to always be the provided one.
156    pub fn mode(mut self, mode: MemoryAllocationMode) -> Self {
157        self.memory = MemoryAllocationOption::Provided(mode);
158        self
159    }
160}
161
162#[derive(Default, Debug)]
163/// Determines which [MemoryAllocationMode] is used during allocations.
164enum MemoryAllocationOption {
165    #[default]
166    /// Uses the [GlobalConfig] to determine the mode of allocation.
167    FromConfig,
168    /// Use the provided [MemoryAllocationMode].
169    Provided(MemoryAllocationMode),
170}
171
172impl<Storage: ComputeStorage> MemoryManagement<Storage> {
173    /// Creates the options from device limits.
174    pub fn from_configuration(
175        storage: Storage,
176        properties: &MemoryDeviceProperties,
177        config: MemoryConfiguration,
178        logger: Arc<ServerLogger>,
179        options: MemoryManagementOptions,
180    ) -> Self {
181        let pool_options = match config {
182            #[cfg(not(exclusive_memory_only))]
183            MemoryConfiguration::SubSlices => {
184                // Round chunk size to be aligned.
185                let memory_alignment = properties.alignment;
186                let max_page = properties.max_page_size;
187                let mut pools = Vec::new();
188
189                const MB: u64 = 1024 * 1024;
190
191                // Add in a pool for allocations that are smaller than the min alignment,
192                // as they can't use offsets at all (on wgpu at least).
193                pools.push(MemoryPoolOptions {
194                    pool_type: PoolType::ExclusivePages { max_alloc_size: 0 },
195                    dealloc_period: None,
196                });
197
198                let mut current = max_page;
199                let mut max_sizes = vec![];
200                let mut page_sizes = vec![];
201                let mut base = pools.len() as u32;
202
203                while current >= 32 * MB {
204                    current /= 4;
205
206                    // Make sure every pool has an aligned size.
207                    current = current.next_multiple_of(memory_alignment);
208
209                    max_sizes.push(current / 2u64.pow(base));
210                    page_sizes.push(current);
211                    base += 1;
212                }
213
214                max_sizes.reverse();
215                page_sizes.reverse();
216
217                for i in 0..max_sizes.len() {
218                    let max = max_sizes[i];
219                    let page_size = page_sizes[i];
220
221                    pools.push(MemoryPoolOptions {
222                        // Creating max slices lower than the chunk size reduces fragmentation.
223                        pool_type: PoolType::SlicedPages {
224                            page_size,
225                            max_slice_size: max,
226                        },
227                        dealloc_period: None,
228                    });
229                }
230
231                // Add pools from big to small.
232                pools.push(MemoryPoolOptions {
233                    pool_type: PoolType::SlicedPages {
234                        page_size: max_page / memory_alignment * memory_alignment,
235                        max_slice_size: max_page / memory_alignment * memory_alignment,
236                    },
237                    dealloc_period: None,
238                });
239                pools
240            }
241            MemoryConfiguration::ExclusivePages => {
242                // Add all bin sizes. Nb: because of alignment some buckets
243                // end up as the same size, so only want unique ones,
244                // but also keep the order, so a BTree will do.
245                const MIN_BUCKET_SIZE: u64 = 1024 * 32;
246                const NUM_POOLS: usize = 24;
247
248                let sizes = generate_bucket_sizes(
249                    MIN_BUCKET_SIZE,
250                    properties.max_page_size,
251                    NUM_POOLS,
252                    properties.alignment,
253                );
254
255                sizes
256                    .iter()
257                    .map(|&size| {
258                        let dealloc_period = (BASE_DEALLOC_PERIOD as f64
259                            * (1.0 + size as f64 / (DEALLOC_SCALE_MB as f64)).round())
260                            as u64;
261
262                        MemoryPoolOptions {
263                            pool_type: PoolType::ExclusivePages {
264                                max_alloc_size: size,
265                            },
266                            dealloc_period: Some(dealloc_period),
267                        }
268                    })
269                    .collect()
270            }
271            MemoryConfiguration::Custom { pool_options } => pool_options,
272        };
273
274        logger.log_memory(
275            |level| !matches!(level, MemoryLogLevel::Disabled),
276            || {
277                let mut msg = String::new();
278                for pool in pool_options.iter() {
279                    msg += &format!("[{}] Using memory pool: \n {pool:?}", options.name);
280                }
281                msg
282            },
283        );
284
285        let pools: Vec<_> = pool_options
286            .iter()
287            .map(|options| match options.pool_type {
288                PoolType::SlicedPages {
289                    page_size,
290                    max_slice_size,
291                } => DynamicPool::Sliced(SlicedPool::new(
292                    page_size,
293                    max_slice_size,
294                    properties.alignment,
295                )),
296                PoolType::ExclusivePages { max_alloc_size } => {
297                    DynamicPool::Exclusive(ExclusiveMemoryPool::new(
298                        max_alloc_size,
299                        properties.alignment,
300                        options.dealloc_period.unwrap_or(u64::MAX),
301                    ))
302                }
303            })
304            .collect();
305
306        let config = GlobalConfig::get().memory.persistent_memory.clone();
307
308        let mode = match options.memory {
309            MemoryAllocationOption::Provided(mode) => mode,
310            MemoryAllocationOption::FromConfig => match config {
311                PersistentMemory::Enabled => MemoryAllocationMode::Auto,
312                PersistentMemory::Disabled => MemoryAllocationMode::Auto,
313                PersistentMemory::Enforced => MemoryAllocationMode::Persistent,
314            },
315        };
316
317        Self {
318            name: options.name,
319            persistent: PersistentPool::new(properties.max_page_size, properties.alignment),
320            pools,
321            storage,
322            alloc_reserve_count: 0,
323            mode,
324            config,
325            logger,
326        }
327    }
328
329    /// Change the mode of allocation.
330    pub fn mode(&mut self, mode: MemoryAllocationMode) {
331        // We override the mode based on the cubecl config.
332        let mode = match self.config {
333            PersistentMemory::Enabled => mode,
334            PersistentMemory::Disabled | PersistentMemory::Enforced => return,
335        };
336
337        self.logger.log_memory(
338            |level| !matches!(level, MemoryLogLevel::Disabled),
339            || {
340                format!(
341                    "[{}] Setting memory allocation mode: from {:?} => {mode:?}",
342                    self.name, self.mode
343                )
344            },
345        );
346        self.mode = mode;
347    }
348
349    /// Cleanup allocations in pools that are deemed unnecessary.
350    pub fn cleanup(&mut self, explicit: bool) {
351        self.logger.log_memory(
352            |level| !matches!(level, MemoryLogLevel::Disabled) && explicit,
353            || "Manual memory cleanup ...".to_string(),
354        );
355
356        self.persistent
357            .cleanup(&mut self.storage, self.alloc_reserve_count, explicit);
358
359        for pool in self.pools.iter_mut() {
360            pool.cleanup(&mut self.storage, self.alloc_reserve_count, explicit);
361        }
362    }
363
364    /// Returns the storage from the specified binding
365    pub fn get(&mut self, binding: SliceBinding) -> Option<StorageHandle> {
366        if let Some(val) = self.persistent.get(&binding) {
367            return Some(val.clone());
368        }
369
370        self.pools.iter().find_map(|p| p.get(&binding)).cloned()
371    }
372
373    /// Returns the resource from the storage at the specified handle
374    pub fn get_resource(
375        &mut self,
376        binding: SliceBinding,
377        offset_start: Option<u64>,
378        offset_end: Option<u64>,
379    ) -> Option<Storage::Resource> {
380        let handle = self.get(binding);
381
382        handle.map(|handle| {
383            let handle = match offset_start {
384                Some(offset) => handle.offset_start(offset),
385                None => handle,
386            };
387            let handle = match offset_end {
388                Some(offset) => handle.offset_end(offset),
389                None => handle,
390            };
391            self.storage().get(&handle)
392        })
393    }
394
395    /// Finds a spot in memory for a resource with the given size in bytes, and returns a handle to it
396    pub fn reserve(&mut self, size: u64) -> Result<SliceHandle, IoError> {
397        // If this happens every nanosecond, counts overflows after 585 years, so not worth thinking too
398        // hard about overflow here.
399        self.alloc_reserve_count += 1;
400
401        if let Some(val) = self.persistent.try_reserve(size) {
402            self.logger.log_memory(
403                |level| matches!(level, MemoryLogLevel::Full),
404                || {
405                    format!(
406                        "[{}] Reserved memory {size} using persistent memory",
407                        self.name
408                    )
409                },
410            );
411            return Ok(val);
412        }
413
414        if matches!(self.mode, MemoryAllocationMode::Persistent) || self.persistent.has_size(size) {
415            let allocated = self.persistent.alloc(&mut self.storage, size);
416
417            self.logger.log_memory(
418                |level| !matches!(level, MemoryLogLevel::Disabled),
419                || {
420                    format!(
421                        "[{}] Allocated a new memory page using persistent memory, \n{}",
422                        self.name, self,
423                    )
424                },
425            );
426            return allocated;
427        }
428
429        self.logger.log_memory(
430            |level| matches!(level, MemoryLogLevel::Full),
431            || {
432                format!(
433                    "[{}] Reserved memory {} using dynamic pool",
434                    self.name,
435                    BytesFormat::new(size)
436                )
437            },
438        );
439
440        // Find first pool that fits this allocation
441        let pool = self
442            .pools
443            .iter_mut()
444            .find(|p| p.accept(size))
445            .ok_or(IoError::BufferTooBig {
446                size,
447                backtrace: BackTrace::capture(),
448            })?;
449
450        if let Some(slice) = pool.try_reserve(size) {
451            return Ok(slice);
452        }
453
454        let allocated = pool.alloc(&mut self.storage, size);
455
456        self.logger.log_memory(
457            |level| matches!(level, MemoryLogLevel::Full),
458            || {
459                format!(
460                    "[{}], Allocated a new memory page, current usage: \n{}",
461                    self.name, self
462                )
463            },
464        );
465
466        allocated
467    }
468
469    /// Fetch the storage used by the memory manager.
470    ///
471    /// # Notes
472    ///
473    /// The storage should probably not be used for allocations since the handles won't be
474    /// compatible with the ones provided by the current trait. Prefer using the
475    /// [alloc](ComputeStorage::alloc) and [dealloc](ComputeStorage::dealloc) functions.
476    ///
477    /// This is useful if you need to time the deallocations based on async computation, or to
478    /// change the mode of storage for different reasons.
479    pub fn storage(&mut self) -> &mut Storage {
480        &mut self.storage
481    }
482
483    /// Get the current memory usage.
484    pub fn memory_usage(&self) -> MemoryUsage {
485        let memory_usage = self.pools.iter().map(|x| x.get_memory_usage()).fold(
486            MemoryUsage {
487                number_allocs: 0,
488                bytes_in_use: 0,
489                bytes_padding: 0,
490                bytes_reserved: 0,
491            },
492            |m1, m2| m1.combine(m2),
493        );
494        memory_usage.combine(self.persistent.get_memory_usage())
495    }
496
497    /// Print out a report of the current memory usage.
498    pub fn print_memory_usage(&self) {
499        #[cfg(feature = "std")]
500        log::info!("{}", self.memory_usage());
501    }
502}
503impl<Storage: ComputeStorage> core::fmt::Display for MemoryManagement<Storage> {
504    fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
505        f.write_str("\n# MemoryManagement\n\n")?;
506        f.write_fmt(format_args!(" - name: {:?}\n", self.name))?;
507        f.write_fmt(format_args!("\n## Persistent\n\n{}", self.persistent))?;
508        f.write_str("\n## Dynamic\n\n")?;
509
510        for pool in self.pools.iter() {
511            match pool {
512                DynamicPool::Sliced(pool) => f.write_fmt(format_args!("{pool}\n"))?,
513                DynamicPool::Exclusive(pool) => f.write_fmt(format_args!("{pool}\n"))?,
514            }
515        }
516        let memory_usage = self.memory_usage();
517        f.write_fmt(format_args!("\n## Summary\n\n{memory_usage}"))?;
518
519        Ok(())
520    }
521}
522
523impl<Storage> core::fmt::Debug for MemoryManagement<Storage> {
524    fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
525        f.write_str(
526            alloc::format!(
527                "DynamicMemoryManagement {:?}",
528                core::any::type_name::<Storage>(),
529            )
530            .as_str(),
531        )
532    }
533}
534
535#[cfg(test)]
536mod tests {
537    use super::*;
538    use crate::{memory_management::MemoryManagement, storage::BytesStorage};
539
540    const DUMMY_MEM_PROPS: MemoryDeviceProperties = MemoryDeviceProperties {
541        max_page_size: 128 * 1024 * 1024,
542        alignment: 32,
543    };
544
545    fn options() -> MemoryManagementOptions {
546        MemoryManagementOptions {
547            name: "test".into(),
548            memory: MemoryAllocationOption::FromConfig,
549        }
550    }
551
552    // Test pools with slices.
553    #[test]
554    #[cfg(not(exclusive_memory_only))]
555    fn test_handle_mutability() {
556        let mut memory_management = MemoryManagement::from_configuration(
557            BytesStorage::default(),
558            &DUMMY_MEM_PROPS,
559            MemoryConfiguration::SubSlices,
560            Arc::new(ServerLogger::default()),
561            options(),
562        );
563        let handle = memory_management.reserve(10).unwrap();
564        let other_ref = handle.clone();
565        assert!(!handle.can_mut(), "Handle can't be mut when multiple ref.");
566        drop(other_ref);
567        assert!(handle.can_mut(), "Handle should be mut when only one ref.");
568    }
569
570    // Test pools with slices.
571    #[test]
572    #[cfg(not(exclusive_memory_only))]
573    fn test_memory_usage() {
574        let max_page_size = 512;
575
576        let mut memory_management = MemoryManagement::from_configuration(
577            BytesStorage::default(),
578            &DUMMY_MEM_PROPS,
579            MemoryConfiguration::Custom {
580                pool_options: vec![MemoryPoolOptions {
581                    pool_type: PoolType::ExclusivePages {
582                        max_alloc_size: max_page_size,
583                    },
584                    dealloc_period: None,
585                }],
586            },
587            Arc::new(ServerLogger::default()),
588            options(),
589        );
590        let handle = memory_management.reserve(100);
591        let usage = memory_management.memory_usage();
592
593        assert_eq!(usage.bytes_in_use, 100);
594        assert!(usage.bytes_reserved >= 100 && usage.bytes_reserved <= max_page_size);
595
596        // Drop and re-alloc.
597        drop(handle);
598        let _handle = memory_management.reserve(100);
599        let usage_new = memory_management.memory_usage();
600        assert_eq!(usage, usage_new);
601    }
602
603    #[test]
604    fn alloc_two_chunks_on_one_page() {
605        let page_size = 2048;
606
607        let mut memory_management = MemoryManagement::from_configuration(
608            BytesStorage::default(),
609            &DUMMY_MEM_PROPS,
610            MemoryConfiguration::Custom {
611                pool_options: vec![MemoryPoolOptions {
612                    pool_type: PoolType::SlicedPages {
613                        page_size,
614                        max_slice_size: page_size,
615                    },
616                    dealloc_period: None,
617                }],
618            },
619            Arc::new(ServerLogger::default()),
620            options(),
621        );
622
623        let alloc_size = 512;
624        let _handle = memory_management.reserve(alloc_size);
625        let _new_handle = memory_management.reserve(alloc_size);
626
627        let usage = memory_management.memory_usage();
628        assert_eq!(usage.number_allocs, 2);
629        assert_eq!(usage.bytes_in_use, alloc_size * 2);
630        assert_eq!(usage.bytes_reserved, page_size);
631    }
632
633    #[test]
634    fn alloc_reuses_storage() {
635        // If no storage is re-used, this will allocate two pages.
636        let page_size = 512;
637
638        let mut memory_management = MemoryManagement::from_configuration(
639            BytesStorage::default(),
640            &DUMMY_MEM_PROPS,
641            MemoryConfiguration::Custom {
642                pool_options: vec![MemoryPoolOptions {
643                    pool_type: PoolType::SlicedPages {
644                        page_size,
645                        max_slice_size: page_size,
646                    },
647                    dealloc_period: None,
648                }],
649            },
650            Arc::new(ServerLogger::default()),
651            options(),
652        );
653
654        let alloc_size = 512;
655        let _handle = memory_management.reserve(alloc_size);
656        drop(_handle);
657        let _new_handle = memory_management.reserve(alloc_size);
658
659        let usage = memory_management.memory_usage();
660        assert_eq!(usage.number_allocs, 1);
661        assert_eq!(usage.bytes_in_use, alloc_size);
662        assert_eq!(usage.bytes_reserved, page_size);
663    }
664
665    #[test]
666    fn alloc_allocs_new_storage() {
667        let page_size = 1024;
668
669        let mut memory_management = MemoryManagement::from_configuration(
670            BytesStorage::default(),
671            &DUMMY_MEM_PROPS,
672            MemoryConfiguration::Custom {
673                pool_options: vec![MemoryPoolOptions {
674                    pool_type: PoolType::SlicedPages {
675                        page_size,
676                        max_slice_size: page_size,
677                    },
678                    dealloc_period: None,
679                }],
680            },
681            Arc::new(ServerLogger::default()),
682            options(),
683        );
684
685        let alloc_size = 768;
686        let _handle = memory_management.reserve(alloc_size);
687        let _new_handle = memory_management.reserve(alloc_size);
688
689        let usage = memory_management.memory_usage();
690        assert_eq!(usage.number_allocs, 2);
691        assert_eq!(usage.bytes_in_use, alloc_size * 2);
692        assert_eq!(usage.bytes_reserved, page_size * 2);
693    }
694
695    #[test]
696    fn alloc_respects_alignment_size() {
697        let page_size = 500;
698        let mut memory_management = MemoryManagement::from_configuration(
699            BytesStorage::default(),
700            &MemoryDeviceProperties {
701                max_page_size: page_size,
702                alignment: 50,
703            },
704            MemoryConfiguration::Custom {
705                pool_options: vec![MemoryPoolOptions {
706                    pool_type: PoolType::SlicedPages {
707                        page_size,
708                        max_slice_size: page_size,
709                    },
710                    dealloc_period: None,
711                }],
712            },
713            Arc::new(ServerLogger::default()),
714            options(),
715        );
716        let alloc_size = 40;
717        let _handle = memory_management.reserve(alloc_size);
718        let _new_handle = memory_management.reserve(alloc_size);
719        let usage = memory_management.memory_usage();
720        // Each slice should be aligned to 50 bytes, so 20 padding bytes.
721        assert_eq!(usage.bytes_padding, 10 * 2);
722    }
723
724    #[test]
725    fn allocs_on_correct_page() {
726        let sizes = [100, 200, 300, 400];
727
728        let pools = sizes
729            .iter()
730            .map(|size| MemoryPoolOptions {
731                pool_type: PoolType::SlicedPages {
732                    page_size: *size,
733                    max_slice_size: *size,
734                },
735                dealloc_period: None,
736            })
737            .collect();
738        let mut memory_management = MemoryManagement::from_configuration(
739            BytesStorage::default(),
740            &MemoryDeviceProperties {
741                max_page_size: 128 * 1024 * 1024,
742                alignment: 10,
743            },
744            MemoryConfiguration::Custom {
745                pool_options: pools,
746            },
747            Arc::new(ServerLogger::default()),
748            options(),
749        );
750        // Allocate one thing on each page.
751        let alloc_sizes = [50, 150, 250, 350];
752        let _handles = alloc_sizes.map(|s| memory_management.reserve(s));
753
754        let usage = memory_management.memory_usage();
755
756        // Total memory should be size of all pages, and no more.
757        assert_eq!(usage.bytes_in_use, alloc_sizes.iter().sum::<u64>());
758        assert!(usage.bytes_reserved >= sizes.iter().sum::<u64>());
759    }
760
761    #[test]
762    #[cfg(not(exclusive_memory_only))]
763    fn allocate_deallocate_reallocate() {
764        let mut memory_management = MemoryManagement::from_configuration(
765            BytesStorage::default(),
766            &MemoryDeviceProperties {
767                max_page_size: 128 * 1024 * 1024,
768                alignment: 32,
769            },
770            MemoryConfiguration::SubSlices,
771            Arc::new(ServerLogger::default()),
772            options(),
773        );
774        // Allocate a bunch
775        let handles: Vec<_> = (0..5)
776            .map(|i| memory_management.reserve(1000 * (i + 1)))
777            .collect();
778        let usage_before = memory_management.memory_usage();
779        // Deallocate
780        drop(handles);
781        // Reallocate
782        let _new_handles: Vec<_> = (0..5)
783            .map(|i| memory_management.reserve(1000 * (i + 1)))
784            .collect();
785        let usage_after = memory_management.memory_usage();
786        assert_eq!(usage_before.number_allocs, usage_after.number_allocs);
787        assert_eq!(usage_before.bytes_in_use, usage_after.bytes_in_use);
788        // Usage after can actually be _less_ because of defragging.
789        assert!(usage_before.bytes_reserved >= usage_after.bytes_reserved);
790    }
791
792    #[test]
793    #[cfg(not(exclusive_memory_only))]
794    fn test_fragmentation_resistance() {
795        let mut memory_management = MemoryManagement::from_configuration(
796            BytesStorage::default(),
797            &MemoryDeviceProperties {
798                max_page_size: 128 * 1024 * 1024,
799                alignment: 32,
800            },
801            MemoryConfiguration::SubSlices,
802            Arc::new(ServerLogger::default()),
803            options(),
804        );
805        // Allocate a mix of small and large chunks
806        let sizes = [50, 1000, 100, 5000, 200, 10000, 300];
807        let handles: Vec<_> = sizes
808            .iter()
809            .map(|&size| memory_management.reserve(size).unwrap())
810            .collect();
811        let usage_before = memory_management.memory_usage();
812        // Deallocate every other allocation
813        for i in (0..handles.len()).step_by(2) {
814            drop(handles[i].clone());
815        }
816        // Reallocate similar sizes
817        for &size in &sizes[0..sizes.len() / 2] {
818            memory_management.reserve(size).unwrap();
819        }
820        let usage_after = memory_management.memory_usage();
821        // Check that we haven't increased our memory usage significantly
822        assert!(usage_after.bytes_reserved <= (usage_before.bytes_reserved as f64 * 1.1) as u64);
823    }
824
825    // Test pools without slices. More or less same as tests above.
826    #[test]
827    fn noslice_test_handle_mutability() {
828        let mut memory_management = MemoryManagement::from_configuration(
829            BytesStorage::default(),
830            &(MemoryDeviceProperties {
831                max_page_size: 128 * 1024 * 1024,
832                alignment: 32,
833            }),
834            MemoryConfiguration::ExclusivePages,
835            Arc::new(ServerLogger::default()),
836            options(),
837        );
838        let handle = memory_management.reserve(10).unwrap();
839        let other_ref = handle.clone();
840        assert!(!handle.can_mut(), "Handle can't be mut when multiple ref.");
841        drop(other_ref);
842        assert!(handle.can_mut(), "Handle should be mut when only one ref.");
843    }
844
845    #[test]
846    fn noslice_alloc_two_chunk() {
847        let mut memory_management = MemoryManagement::from_configuration(
848            BytesStorage::default(),
849            &DUMMY_MEM_PROPS,
850            MemoryConfiguration::Custom {
851                pool_options: vec![MemoryPoolOptions {
852                    pool_type: PoolType::ExclusivePages {
853                        max_alloc_size: 1024,
854                    },
855                    dealloc_period: None,
856                }],
857            },
858            Arc::new(ServerLogger::default()),
859            options(),
860        );
861
862        let alloc_size = 512;
863        let _handle = memory_management.reserve(alloc_size);
864        let _new_handle = memory_management.reserve(alloc_size);
865
866        let usage = memory_management.memory_usage();
867        assert_eq!(usage.number_allocs, 2);
868        assert_eq!(usage.bytes_in_use, alloc_size * 2);
869        assert!(usage.bytes_reserved >= alloc_size * 2);
870    }
871
872    #[test]
873    fn noslice_alloc_reuses_storage() {
874        // If no storage is re-used, this will allocate two pages.
875        let mut memory_management = MemoryManagement::from_configuration(
876            BytesStorage::default(),
877            &DUMMY_MEM_PROPS,
878            MemoryConfiguration::Custom {
879                pool_options: vec![MemoryPoolOptions {
880                    pool_type: PoolType::ExclusivePages {
881                        max_alloc_size: 1024,
882                    },
883                    dealloc_period: None,
884                }],
885            },
886            Arc::new(ServerLogger::default()),
887            options(),
888        );
889
890        let alloc_size = 512;
891        let _handle = memory_management.reserve(alloc_size);
892        drop(_handle);
893        let _new_handle = memory_management.reserve(alloc_size);
894
895        let usage = memory_management.memory_usage();
896        assert_eq!(usage.number_allocs, 1);
897        assert_eq!(usage.bytes_in_use, alloc_size);
898        assert!(usage.bytes_reserved >= alloc_size);
899    }
900
901    #[test]
902    fn noslice_alloc_allocs_new_storage() {
903        let mut memory_management = MemoryManagement::from_configuration(
904            BytesStorage::default(),
905            &DUMMY_MEM_PROPS,
906            MemoryConfiguration::Custom {
907                pool_options: vec![MemoryPoolOptions {
908                    pool_type: PoolType::ExclusivePages {
909                        max_alloc_size: 1024,
910                    },
911                    dealloc_period: None,
912                }],
913            },
914            Arc::new(ServerLogger::default()),
915            options(),
916        );
917
918        let alloc_size = 768;
919        let _handle = memory_management.reserve(alloc_size);
920        let _new_handle = memory_management.reserve(alloc_size);
921        let usage = memory_management.memory_usage();
922        assert_eq!(usage.number_allocs, 2);
923        assert_eq!(usage.bytes_in_use, alloc_size * 2);
924        assert!(usage.bytes_reserved >= alloc_size * 2);
925    }
926
927    #[test]
928    fn noslice_alloc_respects_alignment_size() {
929        let mut memory_management = MemoryManagement::from_configuration(
930            BytesStorage::default(),
931            &MemoryDeviceProperties {
932                max_page_size: DUMMY_MEM_PROPS.max_page_size,
933                alignment: 50,
934            },
935            MemoryConfiguration::Custom {
936                pool_options: vec![MemoryPoolOptions {
937                    pool_type: PoolType::ExclusivePages {
938                        max_alloc_size: 50 * 20,
939                    },
940                    dealloc_period: None,
941                }],
942            },
943            Arc::new(ServerLogger::default()),
944            options(),
945        );
946        let alloc_size = 40;
947        let _handle = memory_management.reserve(alloc_size);
948        let _new_handle = memory_management.reserve(alloc_size);
949        let usage = memory_management.memory_usage();
950        // Each slice should be aligned to 60 bytes, so 20 padding bytes.
951        assert_eq!(usage.bytes_padding, 10 * 2);
952    }
953
954    #[test]
955    fn noslice_allocs_on_correct_page() {
956        let pools = [100, 200, 300, 400]
957            .iter()
958            .map(|&size| MemoryPoolOptions {
959                pool_type: PoolType::SlicedPages {
960                    page_size: size,
961                    max_slice_size: size,
962                },
963                dealloc_period: None,
964            })
965            .collect();
966        let mut memory_management = MemoryManagement::from_configuration(
967            BytesStorage::default(),
968            &MemoryDeviceProperties {
969                max_page_size: DUMMY_MEM_PROPS.max_page_size,
970                alignment: 10,
971            },
972            MemoryConfiguration::Custom {
973                pool_options: pools,
974            },
975            Arc::new(ServerLogger::default()),
976            options(),
977        );
978        // Allocate one thing on each page.
979        let alloc_sizes = [50, 150, 250, 350];
980        let _handles = alloc_sizes.map(|s| memory_management.reserve(s));
981        let usage = memory_management.memory_usage();
982        // Total memory should be size of all pages, and no more.
983        assert_eq!(usage.bytes_in_use, alloc_sizes.iter().sum::<u64>());
984    }
985
986    #[test]
987    fn noslice_allocate_deallocate_reallocate() {
988        let mut memory_management = MemoryManagement::from_configuration(
989            BytesStorage::default(),
990            &MemoryDeviceProperties {
991                max_page_size: 128 * 1024 * 1024,
992                alignment: 32,
993            },
994            MemoryConfiguration::ExclusivePages,
995            Arc::new(ServerLogger::default()),
996            options(),
997        );
998        // Allocate a bunch
999        let handles: Vec<_> = (0..5)
1000            .map(|i| memory_management.reserve(1000 * (i + 1)))
1001            .collect();
1002        let usage_before = memory_management.memory_usage();
1003        // Deallocate
1004        drop(handles);
1005        // Reallocate
1006        let _new_handles: Vec<_> = (0..5)
1007            .map(|i| memory_management.reserve(1000 * (i + 1)))
1008            .collect();
1009        let usage_after = memory_management.memory_usage();
1010        assert_eq!(usage_before.number_allocs, usage_after.number_allocs);
1011        assert_eq!(usage_before.bytes_in_use, usage_after.bytes_in_use);
1012        assert_eq!(usage_before.bytes_reserved, usage_after.bytes_reserved);
1013    }
1014}