Skip to main content

cubecl_runtime/memory_management/
memory_manage.rs

1use super::{
2    MemoryConfiguration, MemoryPoolOptions, MemoryUsage, PoolType,
3    memory_pool::{ExclusiveMemoryPool, MemoryPool, PersistentPool, SlicedPool},
4};
5use crate::{
6    config::{
7        GlobalConfig,
8        memory::{MemoryLogLevel, PersistentMemory},
9    },
10    logging::ServerLogger,
11    memory_management::{BytesFormat, memory_pool::Slice},
12    server::IoError,
13    storage::{ComputeStorage, StorageHandle},
14};
15
16use alloc::format;
17use alloc::string::{String, ToString};
18#[cfg(not(exclusive_memory_only))]
19use alloc::vec;
20use alloc::vec::Vec;
21use cubecl_common::{backtrace::BackTrace, stub::Arc};
22use cubecl_ir::MemoryDeviceProperties;
23
24pub use super::memory_pool::{ManagedMemoryBinding, handle::*};
25
26// These are 288 bytes vs 64 bytes. Adding boxing isn't really worth
27// saving the 200 bytes.
28#[allow(clippy::large_enum_variant)]
29enum DynamicPool {
30    Sliced(SlicedPool),
31    Exclusive(ExclusiveMemoryPool),
32}
33
34impl MemoryPool for DynamicPool {
35    fn accept(&self, size: u64) -> bool {
36        match self {
37            DynamicPool::Sliced(pool) => pool.accept(size),
38            DynamicPool::Exclusive(pool) => pool.accept(size),
39        }
40    }
41
42    fn find(&self, binding: &ManagedMemoryBinding) -> Result<&Slice, IoError> {
43        match self {
44            DynamicPool::Sliced(m) => m.find(binding),
45            DynamicPool::Exclusive(m) => m.find(binding),
46        }
47    }
48
49    #[cfg_attr(feature = "tracing", tracing::instrument(level = "trace", skip(self)))]
50    fn try_reserve(&mut self, size: u64) -> Option<ManagedMemoryHandle> {
51        match self {
52            DynamicPool::Sliced(m) => m.try_reserve(size),
53            DynamicPool::Exclusive(m) => m.try_reserve(size),
54        }
55    }
56
57    #[cfg_attr(
58        feature = "tracing",
59        tracing::instrument(level = "trace", skip(self, storage))
60    )]
61    fn alloc<Storage: ComputeStorage>(
62        &mut self,
63        storage: &mut Storage,
64        size: u64,
65    ) -> Result<ManagedMemoryHandle, IoError> {
66        match self {
67            DynamicPool::Sliced(m) => m.alloc(storage, size),
68            DynamicPool::Exclusive(m) => m.alloc(storage, size),
69        }
70    }
71
72    fn get_memory_usage(&self) -> MemoryUsage {
73        match self {
74            DynamicPool::Sliced(m) => m.get_memory_usage(),
75            DynamicPool::Exclusive(m) => m.get_memory_usage(),
76        }
77    }
78
79    fn cleanup<Storage: ComputeStorage>(
80        &mut self,
81        storage: &mut Storage,
82        alloc_nr: u64,
83        explicit: bool,
84    ) {
85        match self {
86            DynamicPool::Sliced(m) => m.cleanup(storage, alloc_nr, explicit),
87            DynamicPool::Exclusive(m) => m.cleanup(storage, alloc_nr, explicit),
88        };
89        storage.flush();
90    }
91
92    fn bind(
93        &mut self,
94        reserved: ManagedMemoryHandle,
95        assigned: ManagedMemoryHandle,
96        cursor: u64,
97    ) -> Result<(), IoError> {
98        match self {
99            DynamicPool::Sliced(m) => m.bind(reserved, assigned, cursor),
100            DynamicPool::Exclusive(m) => m.bind(reserved, assigned, cursor),
101        }
102    }
103}
104
105#[derive(Default, Clone, Copy, Debug)]
106/// The mode of allocation used.
107pub enum MemoryAllocationMode {
108    /// Use the automatic memory management strategy for allocation.
109    #[default]
110    Auto,
111    /// Use a persistent memory management strategy, meaning that all allocations are for data that is
112    /// likely never going to be freed.
113    Persistent,
114}
115
116/// Reserves and keeps track of chunks of memory in the storage, and slices upon these chunks.
117pub struct MemoryManagement<Storage> {
118    name: String,
119    persistent: PersistentPool,
120    pools: Vec<DynamicPool>,
121    storage: Storage,
122    alloc_reserve_count: u64,
123    mode: MemoryAllocationMode,
124    config: PersistentMemory,
125    logger: Arc<ServerLogger>,
126}
127
128fn generate_bucket_sizes(
129    start_size: u64,
130    end_size: u64,
131    max_buckets: usize,
132    alignment: u64,
133) -> Vec<u64> {
134    let mut buckets = Vec::with_capacity(max_buckets);
135    let log_min = (start_size as f64).ln();
136    let log_max = (end_size as f64).ln();
137    let log_range = log_max - log_min;
138
139    // Pure exponential performed best, but let's try slightly denser in lower-mid range
140    for i in 0..max_buckets {
141        let p = i as f64 / (max_buckets - 1) as f64;
142        // Slight bias toward lower-mid range with less aggressive curve than sigmoid
143        let log_size = log_min + log_range * p;
144        let size = log_size.exp() as u64;
145        let aligned_size = size.next_multiple_of(alignment);
146        buckets.push(aligned_size);
147    }
148
149    buckets.dedup();
150    buckets
151}
152
153const DEALLOC_SCALE_MB: u64 = 1024 * 1024 * 1024;
154const BASE_DEALLOC_PERIOD: u64 = 5000;
155
156/// The options for creating a new [`MemoryManagement`] instance.
157#[derive(Debug)]
158pub struct MemoryManagementOptions {
159    /// The name of the memory management.
160    name: String,
161    /// The [`MemoryAllocationOption`] used by this instance.
162    memory: MemoryAllocationOption,
163}
164
165impl MemoryManagementOptions {
166    /// Creates a new [`MemoryManagementOptions`].
167    pub fn new<S: Into<String>>(name: S) -> Self {
168        Self {
169            name: name.into(),
170            memory: MemoryAllocationOption::FromConfig,
171        }
172    }
173
174    /// Forces the [`MemoryAllocationMode`] during execution to always be the provided one.
175    pub fn mode(mut self, mode: MemoryAllocationMode) -> Self {
176        self.memory = MemoryAllocationOption::Provided(mode);
177        self
178    }
179}
180
181#[derive(Default, Debug)]
182/// Determines which [`MemoryAllocationMode`] is used during allocations.
183enum MemoryAllocationOption {
184    #[default]
185    /// Uses the [`GlobalConfig`] to determine the mode of allocation.
186    FromConfig,
187    /// Use the provided [`MemoryAllocationMode`].
188    Provided(MemoryAllocationMode),
189}
190
191impl<Storage: ComputeStorage> MemoryManagement<Storage> {
192    /// Creates the options from device limits.
193    pub fn from_configuration(
194        storage: Storage,
195        properties: &MemoryDeviceProperties,
196        config: MemoryConfiguration,
197        logger: Arc<ServerLogger>,
198        options: MemoryManagementOptions,
199    ) -> Self {
200        let pool_options = match config {
201            #[cfg(not(exclusive_memory_only))]
202            MemoryConfiguration::SubSlices => {
203                // Round chunk size to be aligned.
204                let memory_alignment = properties.alignment;
205                let max_page = properties.max_page_size;
206                let mut pools = Vec::new();
207
208                const MB: u64 = 1024 * 1024;
209
210                // Add in a pool for allocations that are smaller than the min alignment,
211                // as they can't use offsets at all (on wgpu at least).
212                pools.push(MemoryPoolOptions {
213                    pool_type: PoolType::ExclusivePages { max_alloc_size: 0 },
214                    dealloc_period: None,
215                });
216
217                let mut current = max_page;
218                let mut max_sizes = vec![];
219                let mut page_sizes = vec![];
220                let mut base = pools.len() as u32;
221
222                while current >= 32 * MB {
223                    current /= 4;
224
225                    // Make sure every pool has an aligned size.
226                    current = current.next_multiple_of(memory_alignment);
227
228                    max_sizes.push(current / 2u64.pow(base));
229                    page_sizes.push(current);
230                    base += 1;
231                }
232
233                max_sizes.reverse();
234                page_sizes.reverse();
235
236                for i in 0..max_sizes.len() {
237                    let max = max_sizes[i];
238                    let page_size = page_sizes[i];
239
240                    pools.push(MemoryPoolOptions {
241                        // Creating max slices lower than the chunk size reduces fragmentation.
242                        pool_type: PoolType::SlicedPages {
243                            page_size,
244                            max_slice_size: max,
245                        },
246                        dealloc_period: None,
247                    });
248                }
249
250                // Add pools from big to small.
251                pools.push(MemoryPoolOptions {
252                    pool_type: PoolType::SlicedPages {
253                        page_size: max_page / memory_alignment * memory_alignment,
254                        max_slice_size: max_page / memory_alignment * memory_alignment,
255                    },
256                    dealloc_period: None,
257                });
258                pools
259            }
260            MemoryConfiguration::ExclusivePages => {
261                // Add all bin sizes. Nb: because of alignment some buckets
262                // end up as the same size, so only want unique ones,
263                // but also keep the order, so a BTree will do.
264                const MIN_BUCKET_SIZE: u64 = 1024 * 32;
265                const NUM_POOLS: usize = 24;
266
267                let sizes = generate_bucket_sizes(
268                    MIN_BUCKET_SIZE,
269                    properties.max_page_size,
270                    NUM_POOLS,
271                    properties.alignment,
272                );
273
274                sizes
275                    .iter()
276                    .map(|&size| {
277                        let dealloc_period = (BASE_DEALLOC_PERIOD as f64
278                            * (1.0 + size as f64 / (DEALLOC_SCALE_MB as f64)).round())
279                            as u64;
280
281                        MemoryPoolOptions {
282                            pool_type: PoolType::ExclusivePages {
283                                max_alloc_size: size,
284                            },
285                            dealloc_period: Some(dealloc_period),
286                        }
287                    })
288                    .collect()
289            }
290            MemoryConfiguration::Custom { pool_options } => pool_options,
291        };
292
293        logger.log_memory(
294            |level| !matches!(level, MemoryLogLevel::Disabled),
295            || {
296                let mut msg = String::new();
297                for pool in pool_options.iter() {
298                    msg += &format!("[{}] Using memory pool: \n {pool:?}\n", options.name);
299                }
300                msg
301            },
302        );
303
304        let pools: Vec<_> = pool_options
305            .iter()
306            .enumerate()
307            .map(|(pool_pos, options)| {
308                let pool_pos = pool_pos as u8;
309
310                match options.pool_type {
311                    PoolType::SlicedPages {
312                        page_size,
313                        max_slice_size,
314                    } => DynamicPool::Sliced(SlicedPool::new(
315                        page_size,
316                        max_slice_size,
317                        properties.alignment,
318                        pool_pos,
319                    )),
320                    PoolType::ExclusivePages { max_alloc_size } => {
321                        DynamicPool::Exclusive(ExclusiveMemoryPool::new(
322                            max_alloc_size,
323                            properties.alignment,
324                            options.dealloc_period.unwrap_or(u64::MAX),
325                            pool_pos,
326                        ))
327                    }
328                }
329            })
330            .collect();
331
332        let config = GlobalConfig::get().memory.persistent_memory.clone();
333
334        let mode = match options.memory {
335            MemoryAllocationOption::Provided(mode) => mode,
336            MemoryAllocationOption::FromConfig => match config {
337                PersistentMemory::Enabled => MemoryAllocationMode::Auto,
338                PersistentMemory::Disabled => MemoryAllocationMode::Auto,
339                PersistentMemory::Enforced => MemoryAllocationMode::Persistent,
340            },
341        };
342
343        Self {
344            name: options.name,
345            persistent: PersistentPool::new(
346                properties.max_page_size,
347                properties.alignment,
348                pools.len() as u8,
349            ),
350            pools,
351            storage,
352            alloc_reserve_count: 0,
353            mode,
354            config,
355            logger,
356        }
357    }
358
359    /// Change the mode of allocation.
360    pub fn mode(&mut self, mode: MemoryAllocationMode) {
361        // We override the mode based on the cubecl config.
362        let mode = match self.config {
363            PersistentMemory::Enabled => mode,
364            PersistentMemory::Disabled | PersistentMemory::Enforced => return,
365        };
366
367        self.logger.log_memory(
368            |level| !matches!(level, MemoryLogLevel::Disabled),
369            || {
370                format!(
371                    "[{}] Setting memory allocation mode: from {:?} => {mode:?}",
372                    self.name, self.mode
373                )
374            },
375        );
376        self.mode = mode;
377    }
378
379    /// Cleanup allocations in pools that are deemed unnecessary.
380    pub fn cleanup(&mut self, explicit: bool) {
381        self.logger.log_memory(
382            |level| !matches!(level, MemoryLogLevel::Disabled) && explicit,
383            || "Manual memory cleanup ...".to_string(),
384        );
385
386        self.persistent
387            .cleanup(&mut self.storage, self.alloc_reserve_count, explicit);
388
389        for pool in self.pools.iter_mut() {
390            pool.cleanup(&mut self.storage, self.alloc_reserve_count, explicit);
391        }
392    }
393
394    /// Returns the storage from the specified binding
395    pub fn get_cursor(&self, binding: ManagedMemoryBinding) -> Result<u64, IoError> {
396        let slice = self.find(binding)?;
397        Ok(slice.cursor)
398    }
399
400    /// Returns the storage from the specified binding
401    fn find(&self, binding: ManagedMemoryBinding) -> Result<&Slice, IoError> {
402        let id = binding.descriptor();
403
404        if id.location().pool >= self.pools.len() as u8 {
405            return self.persistent.find(&binding);
406        }
407
408        let pool =
409            self.pools
410                .get(id.location().pool as usize)
411                .ok_or_else(|| IoError::NotFound {
412                    backtrace: BackTrace::capture(),
413                    reason: format!("Pool {} doesn't exist", id.location().pool).into(),
414                })?;
415
416        let slice = pool.find(&binding)?;
417
418        assert_eq!(slice.handle.descriptor(), binding.descriptor());
419
420        Ok(slice)
421    }
422
423    /// Returns the storage from the specified binding
424    pub fn get_storage(&mut self, binding: ManagedMemoryBinding) -> Result<StorageHandle, IoError> {
425        let slice = self.find(binding)?;
426        Ok(slice.storage.clone())
427    }
428
429    /// Returns the resource from the storage at the specified handle
430    pub fn get_resource(
431        &mut self,
432        binding: ManagedMemoryBinding,
433        offset_start: Option<u64>,
434        offset_end: Option<u64>,
435    ) -> Result<Storage::Resource, IoError> {
436        let handle = self.get_storage(binding)?;
437
438        let handle = match offset_start {
439            Some(offset) => handle.offset_start(offset),
440            None => handle,
441        };
442        let handle = match offset_end {
443            Some(offset) => handle.offset_end(offset),
444            None => handle,
445        };
446        Ok(self.storage().get(&handle))
447    }
448
449    /// Finds a spot in memory for a resource with the given size in bytes, and returns a handle to it
450    #[cfg_attr(feature = "tracing", tracing::instrument(level = "trace", skip(self)))]
451    pub fn reserve(&mut self, size: u64) -> Result<ManagedMemoryHandle, IoError> {
452        // If this happens every nanosecond, counts overflows after 585 years, so not worth thinking too
453        // hard about overflow here.
454        self.alloc_reserve_count += 1;
455
456        if let Some(val) = self.persistent.try_reserve(size) {
457            self.logger.log_memory(
458                |level| matches!(level, MemoryLogLevel::Full),
459                || {
460                    format!(
461                        "[{}] Reserved memory {size} using persistent memory",
462                        self.name
463                    )
464                },
465            );
466            return Ok(val);
467        }
468
469        if matches!(self.mode, MemoryAllocationMode::Persistent) || self.persistent.has_size(size) {
470            let allocated = self.persistent.alloc(&mut self.storage, size);
471
472            self.logger.log_memory(
473                |level| !matches!(level, MemoryLogLevel::Disabled),
474                || {
475                    format!(
476                        "[{}] Allocated a new memory page using persistent memory, \n{}",
477                        self.name, self,
478                    )
479                },
480            );
481            return allocated;
482        }
483
484        self.logger.log_memory(
485            |level| matches!(level, MemoryLogLevel::Full),
486            || {
487                format!(
488                    "[{}] Reserved memory {} using dynamic pool",
489                    self.name,
490                    BytesFormat::new(size)
491                )
492            },
493        );
494
495        // Find first pool that fits this allocation
496        let pool = self
497            .pools
498            .iter_mut()
499            .find(|p| p.accept(size))
500            .ok_or(IoError::BufferTooBig {
501                size,
502                backtrace: BackTrace::capture(),
503            })?;
504
505        if let Some(slice) = pool.try_reserve(size) {
506            return Ok(slice);
507        }
508
509        let allocated = pool.alloc(&mut self.storage, size);
510
511        self.logger.log_memory(
512            |level| matches!(level, MemoryLogLevel::Full),
513            || {
514                format!(
515                    "[{}], Allocated a new memory page, current usage: \n{}",
516                    self.name, self
517                )
518            },
519        );
520
521        allocated
522    }
523
524    /// Fetch the storage used by the memory manager.
525    ///
526    /// # Notes
527    ///
528    /// The storage should probably not be used for allocations since the handles won't be
529    /// compatible with the ones provided by the current trait. Prefer using the
530    /// [alloc](ComputeStorage::alloc) and [dealloc](ComputeStorage::dealloc) functions.
531    ///
532    /// This is useful if you need to time the deallocations based on async computation, or to
533    /// change the mode of storage for different reasons.
534    pub fn storage(&mut self) -> &mut Storage {
535        &mut self.storage
536    }
537
538    /// Get the current memory usage.
539    pub fn memory_usage(&self) -> MemoryUsage {
540        let memory_usage = self.pools.iter().map(|x| x.get_memory_usage()).fold(
541            MemoryUsage {
542                number_allocs: 0,
543                bytes_in_use: 0,
544                bytes_padding: 0,
545                bytes_reserved: 0,
546            },
547            |m1, m2| m1.combine(m2),
548        );
549        memory_usage.combine(self.persistent.get_memory_usage())
550    }
551
552    /// Print out a report of the current memory usage.
553    pub fn print_memory_usage(&self) {
554        #[cfg(feature = "std")]
555        log::info!("{}", self.memory_usage());
556    }
557
558    /// Binds the given [handle](HandleId) to a [`MemorySlot`].
559    pub fn bind(
560        &mut self,
561        reserved: ManagedMemoryHandle,
562        assigned: ManagedMemoryHandle,
563        cursor: u64,
564    ) -> Result<(), IoError> {
565        let descriptor = reserved.descriptor();
566
567        if descriptor.location().init == 0 {
568            return Err(IoError::NotFound {
569                backtrace: BackTrace::capture(),
570                reason: "Reserved memory isn't initialized".into(),
571            });
572        }
573
574        let pool_index = descriptor.location().pool as usize;
575        if pool_index >= self.pools.len() {
576            return self.persistent.bind(reserved, assigned, cursor);
577        }
578
579        self.pools
580            .get_mut(pool_index)
581            .map(|p| p.bind(reserved, assigned, cursor))
582            .ok_or_else(|| IoError::NotFound {
583                backtrace: BackTrace::capture(),
584                reason: format!("Memory pool {} doesn't exist", pool_index).into(),
585            })?
586    }
587}
588
589impl<Storage: ComputeStorage> core::fmt::Display for MemoryManagement<Storage> {
590    fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
591        f.write_str("\n# MemoryManagement\n\n")?;
592        f.write_fmt(format_args!(" - name: {:?}\n", self.name))?;
593        f.write_fmt(format_args!("\n## Persistent\n\n{}", self.persistent))?;
594        f.write_str("\n## Dynamic\n\n")?;
595
596        for pool in self.pools.iter() {
597            match pool {
598                DynamicPool::Sliced(pool) => f.write_fmt(format_args!("{pool}\n"))?,
599                DynamicPool::Exclusive(pool) => f.write_fmt(format_args!("{pool}\n"))?,
600            }
601        }
602        let memory_usage = self.memory_usage();
603        f.write_fmt(format_args!("\n## Summary\n\n{memory_usage}"))?;
604
605        Ok(())
606    }
607}
608
609impl<Storage> core::fmt::Debug for MemoryManagement<Storage> {
610    fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
611        f.write_str(
612            alloc::format!(
613                "DynamicMemoryManagement {:?}",
614                core::any::type_name::<Storage>(),
615            )
616            .as_str(),
617        )
618    }
619}
620
621#[cfg(test)]
622mod tests {
623    use super::*;
624    use crate::{memory_management::MemoryManagement, storage::BytesStorage};
625    use alloc::vec;
626
627    const DUMMY_MEM_PROPS: MemoryDeviceProperties = MemoryDeviceProperties {
628        max_page_size: 128 * 1024 * 1024,
629        alignment: 32,
630    };
631
632    fn options() -> MemoryManagementOptions {
633        MemoryManagementOptions {
634            name: "test".into(),
635            memory: MemoryAllocationOption::FromConfig,
636        }
637    }
638
639    // Test pools with slices.
640    #[test_log::test]
641    #[cfg(not(exclusive_memory_only))]
642    fn test_handle_mutability() {
643        let mut memory_management = MemoryManagement::from_configuration(
644            BytesStorage::default(),
645            &DUMMY_MEM_PROPS,
646            MemoryConfiguration::SubSlices,
647            Arc::new(ServerLogger::default()),
648            options(),
649        );
650        let handle = memory_management.reserve(10).unwrap();
651        let other_ref = handle.clone();
652        assert!(!handle.can_mut(), "Handle can't be mut when multiple ref.");
653        drop(other_ref);
654        assert!(handle.can_mut(), "Handle should be mut when only one ref.");
655    }
656
657    // Test pools with slices.
658    #[test_log::test]
659    #[cfg(not(exclusive_memory_only))]
660    fn test_memory_usage() {
661        let max_page_size = 512;
662
663        let mut memory_management = MemoryManagement::from_configuration(
664            BytesStorage::default(),
665            &DUMMY_MEM_PROPS,
666            MemoryConfiguration::Custom {
667                pool_options: vec![MemoryPoolOptions {
668                    pool_type: PoolType::ExclusivePages {
669                        max_alloc_size: max_page_size,
670                    },
671                    dealloc_period: None,
672                }],
673            },
674            Arc::new(ServerLogger::default()),
675            options(),
676        );
677        let handle = memory_management.reserve(100);
678        let usage = memory_management.memory_usage();
679
680        assert_eq!(usage.bytes_in_use, 100);
681        assert!(usage.bytes_reserved >= 100 && usage.bytes_reserved <= max_page_size);
682
683        // Drop and re-alloc.
684        drop(handle);
685        let _handle = memory_management.reserve(100);
686        let usage_new = memory_management.memory_usage();
687        assert_eq!(usage, usage_new);
688    }
689
690    #[test_log::test]
691    fn alloc_two_chunks_on_one_page() {
692        let page_size = 2048;
693
694        let mut memory_management = MemoryManagement::from_configuration(
695            BytesStorage::default(),
696            &DUMMY_MEM_PROPS,
697            MemoryConfiguration::Custom {
698                pool_options: vec![MemoryPoolOptions {
699                    pool_type: PoolType::SlicedPages {
700                        page_size,
701                        max_slice_size: page_size,
702                    },
703                    dealloc_period: None,
704                }],
705            },
706            Arc::new(ServerLogger::default()),
707            options(),
708        );
709
710        let alloc_size = 512;
711        let _handle = memory_management.reserve(alloc_size);
712        let _new_handle = memory_management.reserve(alloc_size);
713
714        let usage = memory_management.memory_usage();
715        assert_eq!(usage.number_allocs, 2);
716        assert_eq!(usage.bytes_in_use, alloc_size * 2);
717        assert_eq!(usage.bytes_reserved, page_size);
718    }
719
720    #[test_log::test]
721    fn alloc_reuses_storage() {
722        // If no storage is re-used, this will allocate two pages.
723        let page_size = 512;
724
725        let mut memory_management = MemoryManagement::from_configuration(
726            BytesStorage::default(),
727            &DUMMY_MEM_PROPS,
728            MemoryConfiguration::Custom {
729                pool_options: vec![MemoryPoolOptions {
730                    pool_type: PoolType::SlicedPages {
731                        page_size,
732                        max_slice_size: page_size,
733                    },
734                    dealloc_period: None,
735                }],
736            },
737            Arc::new(ServerLogger::default()),
738            options(),
739        );
740
741        let alloc_size = 512;
742        let _handle = memory_management.reserve(alloc_size);
743        drop(_handle);
744        let _new_handle = memory_management.reserve(alloc_size);
745
746        let usage = memory_management.memory_usage();
747        assert_eq!(usage.number_allocs, 1);
748        assert_eq!(usage.bytes_in_use, alloc_size);
749        assert_eq!(usage.bytes_reserved, page_size);
750    }
751
752    #[test_log::test]
753    fn alloc_allocs_new_storage() {
754        let page_size = 1024;
755
756        let mut memory_management = MemoryManagement::from_configuration(
757            BytesStorage::default(),
758            &DUMMY_MEM_PROPS,
759            MemoryConfiguration::Custom {
760                pool_options: vec![MemoryPoolOptions {
761                    pool_type: PoolType::SlicedPages {
762                        page_size,
763                        max_slice_size: page_size,
764                    },
765                    dealloc_period: None,
766                }],
767            },
768            Arc::new(ServerLogger::default()),
769            options(),
770        );
771
772        let alloc_size = 768;
773        let _handle = memory_management.reserve(alloc_size);
774        let _new_handle = memory_management.reserve(alloc_size);
775
776        let usage = memory_management.memory_usage();
777        assert_eq!(usage.number_allocs, 2);
778        assert_eq!(usage.bytes_in_use, alloc_size * 2);
779        assert_eq!(usage.bytes_reserved, page_size * 2);
780    }
781
782    #[test_log::test]
783    fn alloc_respects_alignment_size() {
784        let page_size = 500;
785        let mut memory_management = MemoryManagement::from_configuration(
786            BytesStorage::default(),
787            &MemoryDeviceProperties {
788                max_page_size: page_size,
789                alignment: 50,
790            },
791            MemoryConfiguration::Custom {
792                pool_options: vec![MemoryPoolOptions {
793                    pool_type: PoolType::SlicedPages {
794                        page_size,
795                        max_slice_size: page_size,
796                    },
797                    dealloc_period: None,
798                }],
799            },
800            Arc::new(ServerLogger::default()),
801            options(),
802        );
803        let alloc_size = 40;
804        let _handle = memory_management.reserve(alloc_size);
805        let _new_handle = memory_management.reserve(alloc_size);
806        let usage = memory_management.memory_usage();
807        // Each slice should be aligned to 50 bytes, so 20 padding bytes.
808        assert_eq!(usage.bytes_padding, 10 * 2);
809    }
810
811    #[test_log::test]
812    fn allocs_on_correct_page() {
813        let sizes = [100, 200, 300, 400];
814
815        let pools = sizes
816            .iter()
817            .map(|size| MemoryPoolOptions {
818                pool_type: PoolType::SlicedPages {
819                    page_size: *size,
820                    max_slice_size: *size,
821                },
822                dealloc_period: None,
823            })
824            .collect();
825        let mut memory_management = MemoryManagement::from_configuration(
826            BytesStorage::default(),
827            &MemoryDeviceProperties {
828                max_page_size: 128 * 1024 * 1024,
829                alignment: 10,
830            },
831            MemoryConfiguration::Custom {
832                pool_options: pools,
833            },
834            Arc::new(ServerLogger::default()),
835            options(),
836        );
837        // Allocate one thing on each page.
838        let alloc_sizes = [50, 150, 250, 350];
839        let _handles = alloc_sizes.map(|s| memory_management.reserve(s));
840
841        let usage = memory_management.memory_usage();
842
843        // Total memory should be size of all pages, and no more.
844        assert_eq!(usage.bytes_in_use, alloc_sizes.iter().sum::<u64>());
845        assert!(usage.bytes_reserved >= sizes.iter().sum::<u64>());
846    }
847
848    #[test_log::test]
849    #[cfg(not(exclusive_memory_only))]
850    fn allocate_deallocate_reallocate() {
851        let mut memory_management = MemoryManagement::from_configuration(
852            BytesStorage::default(),
853            &MemoryDeviceProperties {
854                max_page_size: 128 * 1024 * 1024,
855                alignment: 32,
856            },
857            MemoryConfiguration::SubSlices,
858            Arc::new(ServerLogger::default()),
859            options(),
860        );
861        // Allocate a bunch
862        let handles: Vec<_> = (0..5)
863            .map(|i| memory_management.reserve(1000 * (i + 1)))
864            .collect();
865        let usage_before = memory_management.memory_usage();
866        // Deallocate
867        drop(handles);
868        // Reallocate
869        let _new_handles: Vec<_> = (0..5)
870            .map(|i| memory_management.reserve(1000 * (i + 1)))
871            .collect();
872        let usage_after = memory_management.memory_usage();
873        assert_eq!(usage_before.number_allocs, usage_after.number_allocs);
874        assert_eq!(usage_before.bytes_in_use, usage_after.bytes_in_use);
875        // Usage after can actually be _less_ because of defragging.
876        assert!(usage_before.bytes_reserved >= usage_after.bytes_reserved);
877    }
878
879    #[test_log::test]
880    #[cfg(not(exclusive_memory_only))]
881    fn test_fragmentation_resistance() {
882        let mut memory_management = MemoryManagement::from_configuration(
883            BytesStorage::default(),
884            &MemoryDeviceProperties {
885                max_page_size: 128 * 1024 * 1024,
886                alignment: 32,
887            },
888            MemoryConfiguration::SubSlices,
889            Arc::new(ServerLogger::default()),
890            options(),
891        );
892        // Allocate a mix of small and large chunks
893        let sizes = [50, 1000, 100, 5000, 200, 10000, 300];
894        let handles: Vec<_> = sizes
895            .iter()
896            .map(|&size| memory_management.reserve(size).unwrap())
897            .collect();
898        let usage_before = memory_management.memory_usage();
899        // Deallocate every other allocation
900        for i in (0..handles.len()).step_by(2) {
901            drop(handles[i].clone());
902        }
903        // Reallocate similar sizes
904        for &size in &sizes[0..sizes.len() / 2] {
905            memory_management.reserve(size).unwrap();
906        }
907        let usage_after = memory_management.memory_usage();
908        // Check that we haven't increased our memory usage significantly
909        assert!(usage_after.bytes_reserved <= (usage_before.bytes_reserved as f64 * 1.1) as u64);
910    }
911
912    // Test pools without slices. More or less same as tests above.
913    #[test_log::test]
914    fn noslice_test_handle_mutability() {
915        let mut memory_management = MemoryManagement::from_configuration(
916            BytesStorage::default(),
917            &(MemoryDeviceProperties {
918                max_page_size: 128 * 1024 * 1024,
919                alignment: 32,
920            }),
921            MemoryConfiguration::ExclusivePages,
922            Arc::new(ServerLogger::default()),
923            options(),
924        );
925        let handle = memory_management.reserve(10).unwrap();
926        let other_ref = handle.clone();
927        assert!(!handle.can_mut(), "Handle can't be mut when multiple ref.");
928        drop(other_ref);
929        assert!(handle.can_mut(), "Handle should be mut when only one ref.");
930    }
931
932    #[test_log::test]
933    fn noslice_alloc_two_chunk() {
934        let mut memory_management = MemoryManagement::from_configuration(
935            BytesStorage::default(),
936            &DUMMY_MEM_PROPS,
937            MemoryConfiguration::Custom {
938                pool_options: vec![MemoryPoolOptions {
939                    pool_type: PoolType::ExclusivePages {
940                        max_alloc_size: 1024,
941                    },
942                    dealloc_period: None,
943                }],
944            },
945            Arc::new(ServerLogger::default()),
946            options(),
947        );
948
949        let alloc_size = 512;
950        let _handle = memory_management.reserve(alloc_size);
951        let _new_handle = memory_management.reserve(alloc_size);
952
953        let usage = memory_management.memory_usage();
954        assert_eq!(usage.number_allocs, 2);
955        assert_eq!(usage.bytes_in_use, alloc_size * 2);
956        assert!(usage.bytes_reserved >= alloc_size * 2);
957    }
958
959    #[test_log::test]
960    fn noslice_alloc_reuses_storage() {
961        // If no storage is re-used, this will allocate two pages.
962        let mut memory_management = MemoryManagement::from_configuration(
963            BytesStorage::default(),
964            &DUMMY_MEM_PROPS,
965            MemoryConfiguration::Custom {
966                pool_options: vec![MemoryPoolOptions {
967                    pool_type: PoolType::ExclusivePages {
968                        max_alloc_size: 1024,
969                    },
970                    dealloc_period: None,
971                }],
972            },
973            Arc::new(ServerLogger::default()),
974            options(),
975        );
976
977        let alloc_size = 512;
978        let _handle = memory_management.reserve(alloc_size);
979        drop(_handle);
980        let _new_handle = memory_management.reserve(alloc_size);
981
982        let usage = memory_management.memory_usage();
983        assert_eq!(usage.number_allocs, 1);
984        assert_eq!(usage.bytes_in_use, alloc_size);
985        assert!(usage.bytes_reserved >= alloc_size);
986    }
987
988    #[test_log::test]
989    fn noslice_alloc_allocs_new_storage() {
990        let mut memory_management = MemoryManagement::from_configuration(
991            BytesStorage::default(),
992            &DUMMY_MEM_PROPS,
993            MemoryConfiguration::Custom {
994                pool_options: vec![MemoryPoolOptions {
995                    pool_type: PoolType::ExclusivePages {
996                        max_alloc_size: 1024,
997                    },
998                    dealloc_period: None,
999                }],
1000            },
1001            Arc::new(ServerLogger::default()),
1002            options(),
1003        );
1004
1005        let alloc_size = 768;
1006        let _handle = memory_management.reserve(alloc_size);
1007        let _new_handle = memory_management.reserve(alloc_size);
1008        let usage = memory_management.memory_usage();
1009        assert_eq!(usage.number_allocs, 2);
1010        assert_eq!(usage.bytes_in_use, alloc_size * 2);
1011        assert!(usage.bytes_reserved >= alloc_size * 2);
1012    }
1013
1014    #[test_log::test]
1015    fn noslice_alloc_respects_alignment_size() {
1016        let mut memory_management = MemoryManagement::from_configuration(
1017            BytesStorage::default(),
1018            &MemoryDeviceProperties {
1019                max_page_size: DUMMY_MEM_PROPS.max_page_size,
1020                alignment: 50,
1021            },
1022            MemoryConfiguration::Custom {
1023                pool_options: vec![MemoryPoolOptions {
1024                    pool_type: PoolType::ExclusivePages {
1025                        max_alloc_size: 50 * 20,
1026                    },
1027                    dealloc_period: None,
1028                }],
1029            },
1030            Arc::new(ServerLogger::default()),
1031            options(),
1032        );
1033        let alloc_size = 40;
1034        let _handle = memory_management.reserve(alloc_size);
1035        let _new_handle = memory_management.reserve(alloc_size);
1036        let usage = memory_management.memory_usage();
1037        // Each slice should be aligned to 60 bytes, so 20 padding bytes.
1038        assert_eq!(usage.bytes_padding, 10 * 2);
1039    }
1040
1041    #[test_log::test]
1042    fn noslice_allocs_on_correct_page() {
1043        let pools = [100, 200, 300, 400]
1044            .iter()
1045            .map(|&size| MemoryPoolOptions {
1046                pool_type: PoolType::SlicedPages {
1047                    page_size: size,
1048                    max_slice_size: size,
1049                },
1050                dealloc_period: None,
1051            })
1052            .collect();
1053        let mut memory_management = MemoryManagement::from_configuration(
1054            BytesStorage::default(),
1055            &MemoryDeviceProperties {
1056                max_page_size: DUMMY_MEM_PROPS.max_page_size,
1057                alignment: 10,
1058            },
1059            MemoryConfiguration::Custom {
1060                pool_options: pools,
1061            },
1062            Arc::new(ServerLogger::default()),
1063            options(),
1064        );
1065        // Allocate one thing on each page.
1066        let alloc_sizes = [50, 150, 250, 350];
1067        let _handles = alloc_sizes.map(|s| memory_management.reserve(s));
1068        let usage = memory_management.memory_usage();
1069        // Total memory should be size of all pages, and no more.
1070        assert_eq!(usage.bytes_in_use, alloc_sizes.iter().sum::<u64>());
1071    }
1072
1073    #[test_log::test]
1074    fn noslice_allocate_deallocate_reallocate() {
1075        let mut memory_management = MemoryManagement::from_configuration(
1076            BytesStorage::default(),
1077            &MemoryDeviceProperties {
1078                max_page_size: 128 * 1024 * 1024,
1079                alignment: 32,
1080            },
1081            MemoryConfiguration::ExclusivePages,
1082            Arc::new(ServerLogger::default()),
1083            options(),
1084        );
1085        // Allocate a bunch
1086        let handles: Vec<_> = (0..5)
1087            .map(|i| memory_management.reserve(1000 * (i + 1)))
1088            .collect();
1089        let usage_before = memory_management.memory_usage();
1090        // Deallocate
1091        drop(handles);
1092        // Reallocate
1093        let _new_handles: Vec<_> = (0..5)
1094            .map(|i| memory_management.reserve(1000 * (i + 1)))
1095            .collect();
1096        let usage_after = memory_management.memory_usage();
1097        assert_eq!(usage_before.number_allocs, usage_after.number_allocs);
1098        assert_eq!(usage_before.bytes_in_use, usage_after.bytes_in_use);
1099        assert_eq!(usage_before.bytes_reserved, usage_after.bytes_reserved);
1100    }
1101}