cubecl_runtime/memory_management/
memory_manage.rs

1use super::{
2    MemoryConfiguration, MemoryDeviceProperties, MemoryPoolOptions, MemoryUsage, PoolType,
3    memory_pool::{ExclusiveMemoryPool, MemoryPool, PersistentPool, SlicedPool},
4};
5use crate::{
6    config::{
7        GlobalConfig,
8        memory::{MemoryLogLevel, PersistentMemory},
9    },
10    logging::ServerLogger,
11    memory_management::BytesFormat,
12    server::IoError,
13    storage::{ComputeStorage, StorageHandle},
14};
15
16use alloc::format;
17use alloc::string::{String, ToString};
18#[cfg(not(exclusive_memory_only))]
19use alloc::vec;
20use alloc::vec::Vec;
21use cubecl_common::stub::Arc;
22
23pub use super::memory_pool::{SliceBinding, handle::*};
24
25// These are 288 bytes vs 64 bytes. Adding boxing isn't really worth
26// saving the 200 bytes.
27#[allow(clippy::large_enum_variant)]
28enum DynamicPool {
29    Sliced(SlicedPool),
30    Exclusive(ExclusiveMemoryPool),
31}
32
33impl MemoryPool for DynamicPool {
34    fn accept(&self, size: u64) -> bool {
35        match self {
36            DynamicPool::Sliced(pool) => pool.accept(size),
37            DynamicPool::Exclusive(pool) => pool.accept(size),
38        }
39    }
40
41    fn get(&self, binding: &SliceBinding) -> Option<&StorageHandle> {
42        match self {
43            DynamicPool::Sliced(m) => m.get(binding),
44            DynamicPool::Exclusive(m) => m.get(binding),
45        }
46    }
47
48    fn try_reserve(&mut self, size: u64) -> Option<SliceHandle> {
49        match self {
50            DynamicPool::Sliced(m) => m.try_reserve(size),
51            DynamicPool::Exclusive(m) => m.try_reserve(size),
52        }
53    }
54
55    fn alloc<Storage: ComputeStorage>(
56        &mut self,
57        storage: &mut Storage,
58        size: u64,
59    ) -> Result<SliceHandle, IoError> {
60        match self {
61            DynamicPool::Sliced(m) => m.alloc(storage, size),
62            DynamicPool::Exclusive(m) => m.alloc(storage, size),
63        }
64    }
65
66    fn get_memory_usage(&self) -> MemoryUsage {
67        match self {
68            DynamicPool::Sliced(m) => m.get_memory_usage(),
69            DynamicPool::Exclusive(m) => m.get_memory_usage(),
70        }
71    }
72
73    fn cleanup<Storage: ComputeStorage>(
74        &mut self,
75        storage: &mut Storage,
76        alloc_nr: u64,
77        explicit: bool,
78    ) {
79        match self {
80            DynamicPool::Sliced(m) => m.cleanup(storage, alloc_nr, explicit),
81            DynamicPool::Exclusive(m) => m.cleanup(storage, alloc_nr, explicit),
82        }
83    }
84}
85
86#[derive(Default, Clone, Copy, Debug)]
87/// The mode of allocation used.
88pub enum MemoryAllocationMode {
89    /// Use the automatic memory management strategy for allocation.
90    #[default]
91    Auto,
92    /// Use a persistent memory management strategy, meaning that all allocations are for data that is
93    /// likely never going to be freed.
94    Persistent,
95}
96
97/// Reserves and keeps track of chunks of memory in the storage, and slices upon these chunks.
98pub struct MemoryManagement<Storage> {
99    name: String,
100    persistent: PersistentPool,
101    pools: Vec<DynamicPool>,
102    storage: Storage,
103    alloc_reserve_count: u64,
104    mode: MemoryAllocationMode,
105    config: PersistentMemory,
106    logger: Arc<ServerLogger>,
107}
108
109fn generate_bucket_sizes(
110    start_size: u64,
111    end_size: u64,
112    max_buckets: usize,
113    alignment: u64,
114) -> Vec<u64> {
115    let mut buckets = Vec::with_capacity(max_buckets);
116    let log_min = (start_size as f64).ln();
117    let log_max = (end_size as f64).ln();
118    let log_range = log_max - log_min;
119
120    // Pure exponential performed best, but let's try slightly denser in lower-mid range
121    for i in 0..max_buckets {
122        let p = i as f64 / (max_buckets - 1) as f64;
123        // Slight bias toward lower-mid range with less aggressive curve than sigmoid
124        let log_size = log_min + log_range * p;
125        let size = log_size.exp() as u64;
126        let aligned_size = size.next_multiple_of(alignment);
127        buckets.push(aligned_size);
128    }
129
130    buckets.dedup();
131    buckets
132}
133
134const DEALLOC_SCALE_MB: u64 = 1024 * 1024 * 1024;
135const BASE_DEALLOC_PERIOD: u64 = 5000;
136
137/// The options for creating a new [MemoryManagement] instance.
138#[derive(Debug)]
139pub struct MemoryManagementOptions {
140    /// The name of the memory management.
141    name: String,
142    /// The [MemoryAllocationOption] used by this instance.
143    memory: MemoryAllocationOption,
144}
145
146impl MemoryManagementOptions {
147    /// Creates a new [MemoryManagementOptions].
148    pub fn new<S: Into<String>>(name: S) -> Self {
149        Self {
150            name: name.into(),
151            memory: MemoryAllocationOption::FromConfig,
152        }
153    }
154
155    /// Forces the [MemoryAllocationMode] during execution to always be the provided one.
156    pub fn mode(mut self, mode: MemoryAllocationMode) -> Self {
157        self.memory = MemoryAllocationOption::Provided(mode);
158        self
159    }
160}
161
162#[derive(Default, Debug)]
163/// Determines which [MemoryAllocationMode] is used during allocations.
164enum MemoryAllocationOption {
165    #[default]
166    /// Uses the [GlobalConfig] to determine the mode of allocation.
167    FromConfig,
168    /// Use the provided [MemoryAllocationMode].
169    Provided(MemoryAllocationMode),
170}
171
172impl<Storage: ComputeStorage> MemoryManagement<Storage> {
173    /// Creates the options from device limits.
174    pub fn from_configuration(
175        storage: Storage,
176        properties: &MemoryDeviceProperties,
177        config: MemoryConfiguration,
178        logger: Arc<ServerLogger>,
179        options: MemoryManagementOptions,
180    ) -> Self {
181        let pool_options = match config {
182            #[cfg(not(exclusive_memory_only))]
183            MemoryConfiguration::SubSlices => {
184                // Round chunk size to be aligned.
185                let memory_alignment = properties.alignment;
186                let max_page = properties.max_page_size;
187                let mut pools = Vec::new();
188
189                const MB: u64 = 1024 * 1024;
190
191                // Add in a pool for allocations that are smaller than the min alignment,
192                // as they can't use offsets at all (on wgpu at least).
193                pools.push(MemoryPoolOptions {
194                    pool_type: PoolType::ExclusivePages { max_alloc_size: 0 },
195                    dealloc_period: None,
196                });
197
198                let mut current = max_page;
199                let mut max_sizes = vec![];
200                let mut page_sizes = vec![];
201                let mut base = pools.len() as u32;
202
203                while current >= 32 * MB {
204                    current /= 4;
205
206                    // Make sure every pool has an aligned size.
207                    current = current.next_multiple_of(memory_alignment);
208
209                    max_sizes.push(current / 2u64.pow(base));
210                    page_sizes.push(current);
211                    base += 1;
212                }
213
214                max_sizes.reverse();
215                page_sizes.reverse();
216
217                for i in 0..max_sizes.len() {
218                    let max = max_sizes[i];
219                    let page_size = page_sizes[i];
220
221                    pools.push(MemoryPoolOptions {
222                        // Creating max slices lower than the chunk size reduces fragmentation.
223                        pool_type: PoolType::SlicedPages {
224                            page_size,
225                            max_slice_size: max,
226                        },
227                        dealloc_period: None,
228                    });
229                }
230
231                // Add pools from big to small.
232                pools.push(MemoryPoolOptions {
233                    pool_type: PoolType::SlicedPages {
234                        page_size: max_page / memory_alignment * memory_alignment,
235                        max_slice_size: max_page / memory_alignment * memory_alignment,
236                    },
237                    dealloc_period: None,
238                });
239                pools
240            }
241            MemoryConfiguration::ExclusivePages => {
242                // Add all bin sizes. Nb: because of alignment some buckets
243                // end up as the same size, so only want unique ones,
244                // but also keep the order, so a BTree will do.
245                const MIN_BUCKET_SIZE: u64 = 1024 * 32;
246                const NUM_POOLS: usize = 24;
247
248                let sizes = generate_bucket_sizes(
249                    MIN_BUCKET_SIZE,
250                    properties.max_page_size,
251                    NUM_POOLS,
252                    properties.alignment,
253                );
254
255                sizes
256                    .iter()
257                    .map(|&size| {
258                        let dealloc_period = (BASE_DEALLOC_PERIOD as f64
259                            * (1.0 + size as f64 / (DEALLOC_SCALE_MB as f64)).round())
260                            as u64;
261
262                        MemoryPoolOptions {
263                            pool_type: PoolType::ExclusivePages {
264                                max_alloc_size: size,
265                            },
266                            dealloc_period: Some(dealloc_period),
267                        }
268                    })
269                    .collect()
270            }
271            MemoryConfiguration::Custom { pool_options } => pool_options,
272        };
273
274        logger.log_memory(
275            |level| !matches!(level, MemoryLogLevel::Disabled),
276            || {
277                let mut msg = String::new();
278                for pool in pool_options.iter() {
279                    msg += &format!("[{}] Using memory pool: \n {pool:?}", options.name);
280                }
281                msg
282            },
283        );
284
285        let pools: Vec<_> = pool_options
286            .iter()
287            .map(|options| match options.pool_type {
288                PoolType::SlicedPages {
289                    page_size,
290                    max_slice_size,
291                } => DynamicPool::Sliced(SlicedPool::new(
292                    page_size,
293                    max_slice_size,
294                    properties.alignment,
295                )),
296                PoolType::ExclusivePages { max_alloc_size } => {
297                    DynamicPool::Exclusive(ExclusiveMemoryPool::new(
298                        max_alloc_size,
299                        properties.alignment,
300                        options.dealloc_period.unwrap_or(u64::MAX),
301                    ))
302                }
303            })
304            .collect();
305
306        let config = GlobalConfig::get().memory.persistent_memory.clone();
307
308        let mode = match options.memory {
309            MemoryAllocationOption::Provided(mode) => mode,
310            MemoryAllocationOption::FromConfig => match config {
311                PersistentMemory::Enabled => MemoryAllocationMode::Auto,
312                PersistentMemory::Disabled => MemoryAllocationMode::Auto,
313                PersistentMemory::Enforced => MemoryAllocationMode::Persistent,
314            },
315        };
316
317        Self {
318            name: options.name,
319            persistent: PersistentPool::new(properties.max_page_size, properties.alignment),
320            pools,
321            storage,
322            alloc_reserve_count: 0,
323            mode,
324            config,
325            logger,
326        }
327    }
328
329    /// Change the mode of allocation.
330    pub fn mode(&mut self, mode: MemoryAllocationMode) {
331        // We override the mode based on the cubecl config.
332        let mode = match self.config {
333            PersistentMemory::Enabled => mode,
334            PersistentMemory::Disabled | PersistentMemory::Enforced => return,
335        };
336
337        self.logger.log_memory(
338            |level| !matches!(level, MemoryLogLevel::Disabled),
339            || {
340                format!(
341                    "[{}] Setting memory allocation mode: from {:?} => {mode:?}",
342                    self.name, self.mode
343                )
344            },
345        );
346        self.mode = mode;
347    }
348
349    /// Cleanup allocations in pools that are deemed unnecessary.
350    pub fn cleanup(&mut self, explicit: bool) {
351        self.logger.log_memory(
352            |level| !matches!(level, MemoryLogLevel::Disabled) && explicit,
353            || "Manual memory cleanup ...".to_string(),
354        );
355
356        self.persistent
357            .cleanup(&mut self.storage, self.alloc_reserve_count, explicit);
358
359        for pool in self.pools.iter_mut() {
360            pool.cleanup(&mut self.storage, self.alloc_reserve_count, explicit);
361        }
362    }
363
364    /// Returns the storage from the specified binding
365    pub fn get(&mut self, binding: SliceBinding) -> Option<StorageHandle> {
366        if let Some(val) = self.persistent.get(&binding) {
367            return Some(val.clone());
368        }
369
370        self.pools.iter().find_map(|p| p.get(&binding)).cloned()
371    }
372
373    /// Returns the resource from the storage at the specified handle
374    pub fn get_resource(
375        &mut self,
376        binding: SliceBinding,
377        offset_start: Option<u64>,
378        offset_end: Option<u64>,
379    ) -> Option<Storage::Resource> {
380        let handle = self.get(binding);
381
382        handle.map(|handle| {
383            let handle = match offset_start {
384                Some(offset) => handle.offset_start(offset),
385                None => handle,
386            };
387            let handle = match offset_end {
388                Some(offset) => handle.offset_end(offset),
389                None => handle,
390            };
391            self.storage().get(&handle)
392        })
393    }
394
395    /// Finds a spot in memory for a resource with the given size in bytes, and returns a handle to it
396    pub fn reserve(&mut self, size: u64) -> Result<SliceHandle, IoError> {
397        // If this happens every nanosecond, counts overflows after 585 years, so not worth thinking too
398        // hard about overflow here.
399        self.alloc_reserve_count += 1;
400
401        if let Some(val) = self.persistent.try_reserve(size) {
402            self.logger.log_memory(
403                |level| matches!(level, MemoryLogLevel::Full),
404                || {
405                    format!(
406                        "[{}] Reserved memory {size} using persistent memory",
407                        self.name
408                    )
409                },
410            );
411            return Ok(val);
412        }
413
414        if matches!(self.mode, MemoryAllocationMode::Persistent) || self.persistent.has_size(size) {
415            let allocated = self.persistent.alloc(&mut self.storage, size);
416
417            self.logger.log_memory(
418                |level| !matches!(level, MemoryLogLevel::Disabled),
419                || {
420                    format!(
421                        "[{}] Allocated a new memory page using persistent memory, \n{}",
422                        self.name, self,
423                    )
424                },
425            );
426            return allocated;
427        }
428
429        self.logger.log_memory(
430            |level| matches!(level, MemoryLogLevel::Full),
431            || {
432                format!(
433                    "[{}] Reserved memory {} using dynamic pool",
434                    self.name,
435                    BytesFormat::new(size)
436                )
437            },
438        );
439
440        // Find first pool that fits this allocation
441        let pool = self
442            .pools
443            .iter_mut()
444            .find(|p| p.accept(size))
445            .ok_or(IoError::BufferTooBig(size as usize))?;
446
447        if let Some(slice) = pool.try_reserve(size) {
448            return Ok(slice);
449        }
450
451        let allocated = pool.alloc(&mut self.storage, size);
452
453        self.logger.log_memory(
454            |level| matches!(level, MemoryLogLevel::Full),
455            || {
456                format!(
457                    "[{}], Allocated a new memory page, current usage: \n{}",
458                    self.name, self
459                )
460            },
461        );
462
463        allocated
464    }
465
466    /// Fetch the storage used by the memory manager.
467    ///
468    /// # Notes
469    ///
470    /// The storage should probably not be used for allocations since the handles won't be
471    /// compatible with the ones provided by the current trait. Prefer using the
472    /// [alloc](ComputeStorage::alloc) and [dealloc](ComputeStorage::dealloc) functions.
473    ///
474    /// This is useful if you need to time the deallocations based on async computation, or to
475    /// change the mode of storage for different reasons.
476    pub fn storage(&mut self) -> &mut Storage {
477        &mut self.storage
478    }
479
480    /// Get the current memory usage.
481    pub fn memory_usage(&self) -> MemoryUsage {
482        let memory_usage = self.pools.iter().map(|x| x.get_memory_usage()).fold(
483            MemoryUsage {
484                number_allocs: 0,
485                bytes_in_use: 0,
486                bytes_padding: 0,
487                bytes_reserved: 0,
488            },
489            |m1, m2| m1.combine(m2),
490        );
491        memory_usage.combine(self.persistent.get_memory_usage())
492    }
493
494    /// Print out a report of the current memory usage.
495    pub fn print_memory_usage(&self) {
496        #[cfg(feature = "std")]
497        log::info!("{}", self.memory_usage());
498    }
499}
500impl<Storage: ComputeStorage> core::fmt::Display for MemoryManagement<Storage> {
501    fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
502        f.write_str("\n# MemoryManagement\n\n")?;
503        f.write_fmt(format_args!(" - name: {:?}\n", self.name))?;
504        f.write_fmt(format_args!("\n## Persistent\n\n{}", self.persistent))?;
505        f.write_str("\n## Dynamic\n\n")?;
506
507        for pool in self.pools.iter() {
508            match pool {
509                DynamicPool::Sliced(pool) => f.write_fmt(format_args!("{pool}\n"))?,
510                DynamicPool::Exclusive(pool) => f.write_fmt(format_args!("{pool}\n"))?,
511            }
512        }
513        let memory_usage = self.memory_usage();
514        f.write_fmt(format_args!("\n## Summary\n\n{}", memory_usage))?;
515
516        Ok(())
517    }
518}
519
520impl<Storage> core::fmt::Debug for MemoryManagement<Storage> {
521    fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
522        f.write_str(
523            alloc::format!(
524                "DynamicMemoryManagement {:?}",
525                core::any::type_name::<Storage>(),
526            )
527            .as_str(),
528        )
529    }
530}
531
532#[cfg(test)]
533mod tests {
534    use super::*;
535    use crate::{memory_management::MemoryManagement, storage::BytesStorage};
536
537    const DUMMY_MEM_PROPS: MemoryDeviceProperties = MemoryDeviceProperties {
538        max_page_size: 128 * 1024 * 1024,
539        alignment: 32,
540    };
541
542    fn options() -> MemoryManagementOptions {
543        MemoryManagementOptions {
544            name: "test".into(),
545            memory: MemoryAllocationOption::FromConfig,
546        }
547    }
548
549    // Test pools with slices.
550    #[test]
551    #[cfg(not(exclusive_memory_only))]
552    fn test_handle_mutability() {
553        let mut memory_management = MemoryManagement::from_configuration(
554            BytesStorage::default(),
555            &DUMMY_MEM_PROPS,
556            MemoryConfiguration::SubSlices,
557            Arc::new(ServerLogger::default()),
558            options(),
559        );
560        let handle = memory_management.reserve(10).unwrap();
561        let other_ref = handle.clone();
562        assert!(!handle.can_mut(), "Handle can't be mut when multiple ref.");
563        drop(other_ref);
564        assert!(handle.can_mut(), "Handle should be mut when only one ref.");
565    }
566
567    // Test pools with slices.
568    #[test]
569    #[cfg(not(exclusive_memory_only))]
570    fn test_memory_usage() {
571        let max_page_size = 512;
572
573        let mut memory_management = MemoryManagement::from_configuration(
574            BytesStorage::default(),
575            &DUMMY_MEM_PROPS,
576            MemoryConfiguration::Custom {
577                pool_options: vec![MemoryPoolOptions {
578                    pool_type: PoolType::ExclusivePages {
579                        max_alloc_size: max_page_size,
580                    },
581                    dealloc_period: None,
582                }],
583            },
584            Arc::new(ServerLogger::default()),
585            options(),
586        );
587        let handle = memory_management.reserve(100);
588        let usage = memory_management.memory_usage();
589
590        assert_eq!(usage.bytes_in_use, 100);
591        assert!(usage.bytes_reserved >= 100 && usage.bytes_reserved <= max_page_size);
592
593        // Drop and re-alloc.
594        drop(handle);
595        let _handle = memory_management.reserve(100);
596        let usage_new = memory_management.memory_usage();
597        assert_eq!(usage, usage_new);
598    }
599
600    #[test]
601    fn alloc_two_chunks_on_one_page() {
602        let page_size = 2048;
603
604        let mut memory_management = MemoryManagement::from_configuration(
605            BytesStorage::default(),
606            &DUMMY_MEM_PROPS,
607            MemoryConfiguration::Custom {
608                pool_options: vec![MemoryPoolOptions {
609                    pool_type: PoolType::SlicedPages {
610                        page_size,
611                        max_slice_size: page_size,
612                    },
613                    dealloc_period: None,
614                }],
615            },
616            Arc::new(ServerLogger::default()),
617            options(),
618        );
619
620        let alloc_size = 512;
621        let _handle = memory_management.reserve(alloc_size);
622        let _new_handle = memory_management.reserve(alloc_size);
623
624        let usage = memory_management.memory_usage();
625        assert_eq!(usage.number_allocs, 2);
626        assert_eq!(usage.bytes_in_use, alloc_size * 2);
627        assert_eq!(usage.bytes_reserved, page_size);
628    }
629
630    #[test]
631    fn alloc_reuses_storage() {
632        // If no storage is re-used, this will allocate two pages.
633        let page_size = 512;
634
635        let mut memory_management = MemoryManagement::from_configuration(
636            BytesStorage::default(),
637            &DUMMY_MEM_PROPS,
638            MemoryConfiguration::Custom {
639                pool_options: vec![MemoryPoolOptions {
640                    pool_type: PoolType::SlicedPages {
641                        page_size,
642                        max_slice_size: page_size,
643                    },
644                    dealloc_period: None,
645                }],
646            },
647            Arc::new(ServerLogger::default()),
648            options(),
649        );
650
651        let alloc_size = 512;
652        let _handle = memory_management.reserve(alloc_size);
653        drop(_handle);
654        let _new_handle = memory_management.reserve(alloc_size);
655
656        let usage = memory_management.memory_usage();
657        assert_eq!(usage.number_allocs, 1);
658        assert_eq!(usage.bytes_in_use, alloc_size);
659        assert_eq!(usage.bytes_reserved, page_size);
660    }
661
662    #[test]
663    fn alloc_allocs_new_storage() {
664        let page_size = 1024;
665
666        let mut memory_management = MemoryManagement::from_configuration(
667            BytesStorage::default(),
668            &DUMMY_MEM_PROPS,
669            MemoryConfiguration::Custom {
670                pool_options: vec![MemoryPoolOptions {
671                    pool_type: PoolType::SlicedPages {
672                        page_size,
673                        max_slice_size: page_size,
674                    },
675                    dealloc_period: None,
676                }],
677            },
678            Arc::new(ServerLogger::default()),
679            options(),
680        );
681
682        let alloc_size = 768;
683        let _handle = memory_management.reserve(alloc_size);
684        let _new_handle = memory_management.reserve(alloc_size);
685
686        let usage = memory_management.memory_usage();
687        assert_eq!(usage.number_allocs, 2);
688        assert_eq!(usage.bytes_in_use, alloc_size * 2);
689        assert_eq!(usage.bytes_reserved, page_size * 2);
690    }
691
692    #[test]
693    fn alloc_respects_alignment_size() {
694        let page_size = 500;
695        let mut memory_management = MemoryManagement::from_configuration(
696            BytesStorage::default(),
697            &MemoryDeviceProperties {
698                max_page_size: page_size,
699                alignment: 50,
700            },
701            MemoryConfiguration::Custom {
702                pool_options: vec![MemoryPoolOptions {
703                    pool_type: PoolType::SlicedPages {
704                        page_size,
705                        max_slice_size: page_size,
706                    },
707                    dealloc_period: None,
708                }],
709            },
710            Arc::new(ServerLogger::default()),
711            options(),
712        );
713        let alloc_size = 40;
714        let _handle = memory_management.reserve(alloc_size);
715        let _new_handle = memory_management.reserve(alloc_size);
716        let usage = memory_management.memory_usage();
717        // Each slice should be aligned to 50 bytes, so 20 padding bytes.
718        assert_eq!(usage.bytes_padding, 10 * 2);
719    }
720
721    #[test]
722    fn allocs_on_correct_page() {
723        let sizes = [100, 200, 300, 400];
724
725        let pools = sizes
726            .iter()
727            .map(|size| MemoryPoolOptions {
728                pool_type: PoolType::SlicedPages {
729                    page_size: *size,
730                    max_slice_size: *size,
731                },
732                dealloc_period: None,
733            })
734            .collect();
735        let mut memory_management = MemoryManagement::from_configuration(
736            BytesStorage::default(),
737            &MemoryDeviceProperties {
738                max_page_size: 128 * 1024 * 1024,
739                alignment: 10,
740            },
741            MemoryConfiguration::Custom {
742                pool_options: pools,
743            },
744            Arc::new(ServerLogger::default()),
745            options(),
746        );
747        // Allocate one thing on each page.
748        let alloc_sizes = [50, 150, 250, 350];
749        let _handles = alloc_sizes.map(|s| memory_management.reserve(s));
750
751        let usage = memory_management.memory_usage();
752
753        // Total memory should be size of all pages, and no more.
754        assert_eq!(usage.bytes_in_use, alloc_sizes.iter().sum::<u64>());
755        assert!(usage.bytes_reserved >= sizes.iter().sum::<u64>());
756    }
757
758    #[test]
759    #[cfg(not(exclusive_memory_only))]
760    fn allocate_deallocate_reallocate() {
761        let mut memory_management = MemoryManagement::from_configuration(
762            BytesStorage::default(),
763            &MemoryDeviceProperties {
764                max_page_size: 128 * 1024 * 1024,
765                alignment: 32,
766            },
767            MemoryConfiguration::SubSlices,
768            Arc::new(ServerLogger::default()),
769            options(),
770        );
771        // Allocate a bunch
772        let handles: Vec<_> = (0..5)
773            .map(|i| memory_management.reserve(1000 * (i + 1)))
774            .collect();
775        let usage_before = memory_management.memory_usage();
776        // Deallocate
777        drop(handles);
778        // Reallocate
779        let _new_handles: Vec<_> = (0..5)
780            .map(|i| memory_management.reserve(1000 * (i + 1)))
781            .collect();
782        let usage_after = memory_management.memory_usage();
783        assert_eq!(usage_before.number_allocs, usage_after.number_allocs);
784        assert_eq!(usage_before.bytes_in_use, usage_after.bytes_in_use);
785        // Usage after can actually be _less_ because of defragging.
786        assert!(usage_before.bytes_reserved >= usage_after.bytes_reserved);
787    }
788
789    #[test]
790    #[cfg(not(exclusive_memory_only))]
791    fn test_fragmentation_resistance() {
792        let mut memory_management = MemoryManagement::from_configuration(
793            BytesStorage::default(),
794            &MemoryDeviceProperties {
795                max_page_size: 128 * 1024 * 1024,
796                alignment: 32,
797            },
798            MemoryConfiguration::SubSlices,
799            Arc::new(ServerLogger::default()),
800            options(),
801        );
802        // Allocate a mix of small and large chunks
803        let sizes = [50, 1000, 100, 5000, 200, 10000, 300];
804        let handles: Vec<_> = sizes
805            .iter()
806            .map(|&size| memory_management.reserve(size).unwrap())
807            .collect();
808        let usage_before = memory_management.memory_usage();
809        // Deallocate every other allocation
810        for i in (0..handles.len()).step_by(2) {
811            drop(handles[i].clone());
812        }
813        // Reallocate similar sizes
814        for &size in &sizes[0..sizes.len() / 2] {
815            memory_management.reserve(size).unwrap();
816        }
817        let usage_after = memory_management.memory_usage();
818        // Check that we haven't increased our memory usage significantly
819        assert!(usage_after.bytes_reserved <= (usage_before.bytes_reserved as f64 * 1.1) as u64);
820    }
821
822    // Test pools without slices. More or less same as tests above.
823    #[test]
824    fn noslice_test_handle_mutability() {
825        let mut memory_management = MemoryManagement::from_configuration(
826            BytesStorage::default(),
827            &(MemoryDeviceProperties {
828                max_page_size: 128 * 1024 * 1024,
829                alignment: 32,
830            }),
831            MemoryConfiguration::ExclusivePages,
832            Arc::new(ServerLogger::default()),
833            options(),
834        );
835        let handle = memory_management.reserve(10).unwrap();
836        let other_ref = handle.clone();
837        assert!(!handle.can_mut(), "Handle can't be mut when multiple ref.");
838        drop(other_ref);
839        assert!(handle.can_mut(), "Handle should be mut when only one ref.");
840    }
841
842    #[test]
843    fn noslice_alloc_two_chunk() {
844        let mut memory_management = MemoryManagement::from_configuration(
845            BytesStorage::default(),
846            &DUMMY_MEM_PROPS,
847            MemoryConfiguration::Custom {
848                pool_options: vec![MemoryPoolOptions {
849                    pool_type: PoolType::ExclusivePages {
850                        max_alloc_size: 1024,
851                    },
852                    dealloc_period: None,
853                }],
854            },
855            Arc::new(ServerLogger::default()),
856            options(),
857        );
858
859        let alloc_size = 512;
860        let _handle = memory_management.reserve(alloc_size);
861        let _new_handle = memory_management.reserve(alloc_size);
862
863        let usage = memory_management.memory_usage();
864        assert_eq!(usage.number_allocs, 2);
865        assert_eq!(usage.bytes_in_use, alloc_size * 2);
866        assert!(usage.bytes_reserved >= alloc_size * 2);
867    }
868
869    #[test]
870    fn noslice_alloc_reuses_storage() {
871        // If no storage is re-used, this will allocate two pages.
872        let mut memory_management = MemoryManagement::from_configuration(
873            BytesStorage::default(),
874            &DUMMY_MEM_PROPS,
875            MemoryConfiguration::Custom {
876                pool_options: vec![MemoryPoolOptions {
877                    pool_type: PoolType::ExclusivePages {
878                        max_alloc_size: 1024,
879                    },
880                    dealloc_period: None,
881                }],
882            },
883            Arc::new(ServerLogger::default()),
884            options(),
885        );
886
887        let alloc_size = 512;
888        let _handle = memory_management.reserve(alloc_size);
889        drop(_handle);
890        let _new_handle = memory_management.reserve(alloc_size);
891
892        let usage = memory_management.memory_usage();
893        assert_eq!(usage.number_allocs, 1);
894        assert_eq!(usage.bytes_in_use, alloc_size);
895        assert!(usage.bytes_reserved >= alloc_size);
896    }
897
898    #[test]
899    fn noslice_alloc_allocs_new_storage() {
900        let mut memory_management = MemoryManagement::from_configuration(
901            BytesStorage::default(),
902            &DUMMY_MEM_PROPS,
903            MemoryConfiguration::Custom {
904                pool_options: vec![MemoryPoolOptions {
905                    pool_type: PoolType::ExclusivePages {
906                        max_alloc_size: 1024,
907                    },
908                    dealloc_period: None,
909                }],
910            },
911            Arc::new(ServerLogger::default()),
912            options(),
913        );
914
915        let alloc_size = 768;
916        let _handle = memory_management.reserve(alloc_size);
917        let _new_handle = memory_management.reserve(alloc_size);
918        let usage = memory_management.memory_usage();
919        assert_eq!(usage.number_allocs, 2);
920        assert_eq!(usage.bytes_in_use, alloc_size * 2);
921        assert!(usage.bytes_reserved >= alloc_size * 2);
922    }
923
924    #[test]
925    fn noslice_alloc_respects_alignment_size() {
926        let mut memory_management = MemoryManagement::from_configuration(
927            BytesStorage::default(),
928            &MemoryDeviceProperties {
929                max_page_size: DUMMY_MEM_PROPS.max_page_size,
930                alignment: 50,
931            },
932            MemoryConfiguration::Custom {
933                pool_options: vec![MemoryPoolOptions {
934                    pool_type: PoolType::ExclusivePages {
935                        max_alloc_size: 50 * 20,
936                    },
937                    dealloc_period: None,
938                }],
939            },
940            Arc::new(ServerLogger::default()),
941            options(),
942        );
943        let alloc_size = 40;
944        let _handle = memory_management.reserve(alloc_size);
945        let _new_handle = memory_management.reserve(alloc_size);
946        let usage = memory_management.memory_usage();
947        // Each slice should be aligned to 60 bytes, so 20 padding bytes.
948        assert_eq!(usage.bytes_padding, 10 * 2);
949    }
950
951    #[test]
952    fn noslice_allocs_on_correct_page() {
953        let pools = [100, 200, 300, 400]
954            .iter()
955            .map(|&size| MemoryPoolOptions {
956                pool_type: PoolType::SlicedPages {
957                    page_size: size,
958                    max_slice_size: size,
959                },
960                dealloc_period: None,
961            })
962            .collect();
963        let mut memory_management = MemoryManagement::from_configuration(
964            BytesStorage::default(),
965            &MemoryDeviceProperties {
966                max_page_size: DUMMY_MEM_PROPS.max_page_size,
967                alignment: 10,
968            },
969            MemoryConfiguration::Custom {
970                pool_options: pools,
971            },
972            Arc::new(ServerLogger::default()),
973            options(),
974        );
975        // Allocate one thing on each page.
976        let alloc_sizes = [50, 150, 250, 350];
977        let _handles = alloc_sizes.map(|s| memory_management.reserve(s));
978        let usage = memory_management.memory_usage();
979        // Total memory should be size of all pages, and no more.
980        assert_eq!(usage.bytes_in_use, alloc_sizes.iter().sum::<u64>());
981    }
982
983    #[test]
984    fn noslice_allocate_deallocate_reallocate() {
985        let mut memory_management = MemoryManagement::from_configuration(
986            BytesStorage::default(),
987            &MemoryDeviceProperties {
988                max_page_size: 128 * 1024 * 1024,
989                alignment: 32,
990            },
991            MemoryConfiguration::ExclusivePages,
992            Arc::new(ServerLogger::default()),
993            options(),
994        );
995        // Allocate a bunch
996        let handles: Vec<_> = (0..5)
997            .map(|i| memory_management.reserve(1000 * (i + 1)))
998            .collect();
999        let usage_before = memory_management.memory_usage();
1000        // Deallocate
1001        drop(handles);
1002        // Reallocate
1003        let _new_handles: Vec<_> = (0..5)
1004            .map(|i| memory_management.reserve(1000 * (i + 1)))
1005            .collect();
1006        let usage_after = memory_management.memory_usage();
1007        assert_eq!(usage_before.number_allocs, usage_after.number_allocs);
1008        assert_eq!(usage_before.bytes_in_use, usage_after.bytes_in_use);
1009        assert_eq!(usage_before.bytes_reserved, usage_after.bytes_reserved);
1010    }
1011}