Skip to main content

wasm_dbms_memory/
memory_manager.rs

1// Rust guideline compliant 2026-04-27
2// X-WHERE-CLAUSE, M-CANONICAL-DOCS
3
4//! Memory manager for page-level memory operations.
5
6use wasm_dbms_api::prelude::{
7    DataSize, Encode, MSize, MemoryError, MemoryResult, Page, PageOffset,
8};
9
10use crate::memory_access::MemoryAccess;
11use crate::provider::MemoryProvider;
12
13/// Schema page (reserved page 0).
14pub const SCHEMA_PAGE: Page = 0;
15/// ACL page (reserved page 1).
16pub const ACL_PAGE: Page = 1;
17/// Unclaimed-pages ledger page (reserved page 2).
18pub const UNCLAIMED_PAGES_PAGE: Page = 2;
19/// Number of reserved pages allocated at initialization.
20pub const RESERVED_PAGES: u64 = 3;
21
22/// The memory manager handles page-level memory operations on top of a
23/// [`MemoryProvider`].
24pub struct MemoryManager<P>
25where
26    P: MemoryProvider,
27{
28    provider: P,
29}
30
31impl<P> MemoryManager<P>
32where
33    P: MemoryProvider,
34{
35    /// Initializes the memory manager and allocates the header and reserved
36    /// pages.
37    ///
38    /// # Panics
39    ///
40    /// Panics if the memory provider fails to initialize.
41    pub fn init(provider: P) -> Self {
42        let mut manager = MemoryManager { provider };
43
44        // Check whether the reserved pages are already allocated.
45        let current_pages = manager.provider.pages();
46        if current_pages >= RESERVED_PAGES {
47            return manager;
48        }
49
50        // Request the missing reserved pages (schema, ACL, unclaimed).
51        let missing = RESERVED_PAGES - current_pages;
52        if let Err(err) = manager.provider.grow(missing) {
53            panic!("Failed to grow memory during initialization: {err}");
54        }
55
56        manager
57    }
58
59    /// Returns the ACL page number.
60    pub const fn acl_page(&self) -> Page {
61        ACL_PAGE
62    }
63
64    /// Returns the schema page.
65    pub const fn schema_page(&self) -> Page {
66        SCHEMA_PAGE
67    }
68
69    /// Returns the unclaimed-pages ledger page.
70    pub const fn unclaimed_pages_page(&self) -> Page {
71        UNCLAIMED_PAGES_PAGE
72    }
73
74    /// Consumes the manager and returns the underlying provider.
75    ///
76    /// Test-only helper that enables reload simulations without going
77    /// through the full DBMS context.
78    #[cfg(test)]
79    pub(crate) fn into_provider(self) -> P {
80        self.provider
81    }
82
83    /// Gets the last allocated page number.
84    pub fn last_page(&self) -> Option<Page> {
85        match self.provider.pages() {
86            0 => None,
87            n => Some(n as Page - 1),
88        }
89    }
90
91    /// Returns the total number of pages currently backed by the provider.
92    pub fn pages_count(&self) -> u64 {
93        self.provider.pages()
94    }
95
96    /// Calculates the absolute offset in memory given a page number and an
97    /// offset within that page.
98    fn absolute_offset(&self, page: Page, offset: PageOffset) -> u64 {
99        (page as u64)
100            .checked_mul(P::PAGE_SIZE)
101            .and_then(|page_offset| page_offset.checked_add(offset as u64))
102            .expect("Overflow when calculating absolute offset")
103    }
104
105    /// Checks if the specified page is allocated.
106    fn check_unallocated_page(
107        &self,
108        page: Page,
109        offset: PageOffset,
110        data_size: MSize,
111    ) -> MemoryResult<()> {
112        if self.last_page().is_none_or(|last_page| page > last_page) {
113            return Err(MemoryError::SegmentationFault {
114                page,
115                offset,
116                data_size: data_size as u64,
117                page_size: P::PAGE_SIZE,
118            });
119        }
120        Ok(())
121    }
122
123    /// Checks if the given offset is aligned according to the alignment
124    /// requirement of type `E`.
125    fn check_alignment<E>(&self, offset: PageOffset) -> MemoryResult<()>
126    where
127        E: Encode,
128    {
129        let alignment = E::ALIGNMENT as PageOffset;
130        if alignment != 0 && !offset.is_multiple_of(alignment) {
131            return Err(MemoryError::OffsetNotAligned { offset, alignment });
132        }
133        Ok(())
134    }
135}
136
137impl<P> MemoryAccess for MemoryManager<P>
138where
139    P: MemoryProvider,
140{
141    fn page_size(&self) -> u64 {
142        P::PAGE_SIZE
143    }
144
145    fn grow_one_page(&mut self) -> MemoryResult<Page> {
146        self.provider.grow(1)?;
147
148        // Zero the newly allocated page.
149        self.provider.write(
150            self.absolute_offset(self.last_page().unwrap_or(0), 0),
151            &vec![0u8; P::PAGE_SIZE as usize],
152        )?;
153
154        match self.last_page() {
155            Some(page) => Ok(page),
156            None => Err(MemoryError::FailedToAllocatePage),
157        }
158    }
159
160    fn zero_page(&mut self, page: Page) -> MemoryResult<()> {
161        if self.last_page().is_none_or(|last_page| page > last_page) {
162            return Err(MemoryError::SegmentationFault {
163                page,
164                offset: 0,
165                data_size: P::PAGE_SIZE,
166                page_size: P::PAGE_SIZE,
167            });
168        }
169
170        let absolute_offset = self.absolute_offset(page, 0);
171        let buffer = vec![0u8; P::PAGE_SIZE as usize];
172        self.provider.write(absolute_offset, &buffer)
173    }
174
175    fn read_at<D>(&mut self, page: Page, offset: PageOffset) -> MemoryResult<D>
176    where
177        D: Encode,
178    {
179        self.check_alignment::<D>(offset)?;
180
181        let mut buf = vec![
182            0u8;
183            match D::SIZE {
184                DataSize::Fixed(size) => size as usize,
185                DataSize::Dynamic => (P::PAGE_SIZE as usize).saturating_sub(offset as usize),
186            }
187        ];
188
189        self.read_at_raw(page, offset, &mut buf)?;
190
191        D::decode(std::borrow::Cow::Owned(buf))
192    }
193
194    fn write_at<E>(&mut self, page: Page, offset: PageOffset, data: &E) -> MemoryResult<()>
195    where
196        E: Encode,
197    {
198        self.check_unallocated_page(page, offset, data.size())?;
199        self.check_alignment::<E>(offset)?;
200
201        let encoded = data.encode();
202
203        if offset as u64 + encoded.len() as u64 > P::PAGE_SIZE {
204            return Err(MemoryError::SegmentationFault {
205                page,
206                offset,
207                data_size: encoded.len() as u64,
208                page_size: P::PAGE_SIZE,
209            });
210        }
211
212        let absolute_offset = self.absolute_offset(page, offset);
213        self.provider.write(absolute_offset, encoded.as_ref())?;
214
215        // Zero padding bytes if any.
216        let padding = align_up::<E>(encoded.len()) - encoded.len();
217        if padding > 0 {
218            let padding_offset = absolute_offset + encoded.len() as u64;
219            let padding_buffer = vec![0u8; padding];
220            self.provider
221                .write(padding_offset, padding_buffer.as_ref())?;
222        }
223
224        Ok(())
225    }
226
227    fn write_at_raw(&mut self, page: Page, offset: PageOffset, buf: &[u8]) -> MemoryResult<()> {
228        self.check_unallocated_page(page, offset, buf.len() as MSize)?;
229
230        if offset as u64 + buf.len() as u64 > P::PAGE_SIZE {
231            return Err(MemoryError::SegmentationFault {
232                page,
233                offset,
234                data_size: buf.len() as u64,
235                page_size: P::PAGE_SIZE,
236            });
237        }
238
239        let absolute_offset = self.absolute_offset(page, offset);
240        self.provider.write(absolute_offset, buf)
241    }
242
243    fn zero<E>(&mut self, page: Page, offset: PageOffset, data: &E) -> MemoryResult<()>
244    where
245        E: Encode,
246    {
247        self.check_unallocated_page(page, offset, data.size())?;
248        self.check_alignment::<E>(offset)?;
249
250        let length = align_up::<E>(data.size() as usize);
251
252        if offset as u64 + (length as u64) > P::PAGE_SIZE {
253            return Err(MemoryError::SegmentationFault {
254                page,
255                offset,
256                data_size: data.size() as u64,
257                page_size: P::PAGE_SIZE,
258            });
259        }
260
261        let absolute_offset = self.absolute_offset(page, offset);
262        let buffer = vec![0u8; length];
263        self.provider.write(absolute_offset, buffer.as_ref())
264    }
265
266    fn zero_raw(&mut self, page: Page, offset: PageOffset, len: PageOffset) -> MemoryResult<()> {
267        if self.last_page().is_none_or(|last_page| page > last_page) {
268            return Err(MemoryError::SegmentationFault {
269                page,
270                offset,
271                data_size: len as u64,
272                page_size: P::PAGE_SIZE,
273            });
274        }
275
276        if offset as u64 + len as u64 > P::PAGE_SIZE {
277            return Err(MemoryError::SegmentationFault {
278                page,
279                offset,
280                data_size: len as u64,
281                page_size: P::PAGE_SIZE,
282            });
283        }
284
285        let absolute_offset = self.absolute_offset(page, offset);
286        let buffer = vec![0u8; len as usize];
287        self.provider.write(absolute_offset, buffer.as_ref())
288    }
289
290    fn read_at_raw(
291        &mut self,
292        page: Page,
293        offset: PageOffset,
294        buf: &mut [u8],
295    ) -> MemoryResult<usize> {
296        if self.last_page().is_none_or(|last_page| page > last_page) {
297            return Err(MemoryError::SegmentationFault {
298                page,
299                offset,
300                data_size: buf.len() as u64,
301                page_size: P::PAGE_SIZE,
302            });
303        }
304
305        let read_len = ((P::PAGE_SIZE - offset as u64) as usize).min(buf.len());
306
307        let absolute_offset = self.absolute_offset(page, offset);
308        self.provider
309            .read(absolute_offset, buf[..read_len].as_mut())?;
310
311        Ok(read_len)
312    }
313}
314
315/// Gets the padding at the given offset to the next multiple of
316/// [`E::ALIGNMENT`].
317#[inline]
318pub const fn align_up<E>(offset: usize) -> usize
319where
320    E: Encode,
321{
322    let alignment = E::ALIGNMENT as usize;
323    offset.div_ceil(alignment) * alignment
324}
325
326#[cfg(test)]
327mod tests {
328    use std::borrow::Cow;
329
330    use wasm_dbms_api::prelude::{
331        DEFAULT_ALIGNMENT, DataSize, MSize, MemoryError, MemoryResult, PageOffset, Text,
332    };
333
334    use super::*;
335    use crate::provider::HeapMemoryProvider;
336
337    fn make_mm() -> MemoryManager<HeapMemoryProvider> {
338        MemoryManager::init(HeapMemoryProvider::default())
339    }
340
341    #[test]
342    fn test_should_init_memory_manager() {
343        let mm = make_mm();
344        assert_eq!(mm.last_page(), Some(2));
345    }
346
347    #[test]
348    fn test_should_get_last_page() {
349        let mm = make_mm();
350        let last_page = mm.last_page();
351        assert_eq!(last_page, Some(2)); // schema, ACL, and unclaimed-pages pages
352    }
353
354    #[test]
355    fn test_should_get_memory_page_size() {
356        let mm = make_mm();
357        let page_size = mm.page_size();
358        assert_eq!(page_size, HeapMemoryProvider::PAGE_SIZE);
359    }
360
361    #[test]
362    fn test_should_write_and_read_fixed_data_size() {
363        let mut mm = make_mm();
364        let data_to_write = FixedSizeData { a: 42, b: 1337 };
365        mm.write_at(ACL_PAGE, 0, &data_to_write)
366            .expect("Failed to write data to ACL page");
367
368        let out: FixedSizeData = mm
369            .read_at(ACL_PAGE, 0)
370            .expect("Failed to read data from ACL page");
371
372        assert_eq!(out, data_to_write);
373    }
374
375    #[test]
376    fn test_write_should_zero_padding() {
377        let mut mm = make_mm();
378        let data_to_write = Text("very_long_string".to_string());
379        mm.write_at(ACL_PAGE, 0, &data_to_write)
380            .expect("Failed to write data to ACL page");
381
382        let mut buffer = vec![0; 32];
383        mm.read_at_raw(ACL_PAGE, 0, &mut buffer)
384            .expect("Failed to read data from ACL page");
385
386        let non_zero_count = buffer.iter().filter(|&&b| b != 0).count();
387        assert_eq!(non_zero_count, data_to_write.size() as usize - 1);
388
389        let data_to_write_short = Text("short".to_string());
390        mm.write_at(ACL_PAGE, 0, &data_to_write_short)
391            .expect("Failed to write data to ACL page");
392
393        let mut buffer = vec![0; 32];
394        mm.read_at_raw(ACL_PAGE, 0, &mut buffer)
395            .expect("Failed to read data from ACL page");
396
397        let non_zero_count = buffer.iter().filter(|&&b| b != 0).count();
398        assert_eq!(non_zero_count, data_to_write_short.size() as usize - 1);
399    }
400
401    #[test]
402    fn test_should_zero_data() {
403        let mut mm = make_mm();
404        let data_to_write = FixedSizeData { a: 100, b: 200 };
405        mm.write_at(ACL_PAGE, 48, &data_to_write)
406            .expect("Failed to write data to ACL page");
407
408        mm.zero(ACL_PAGE, 48, &data_to_write)
409            .expect("Failed to zero data on ACL page");
410
411        let mut buffer = vec![0; 50];
412        mm.read_at_raw(ACL_PAGE, 48, &mut buffer)
413            .expect("Failed to read data from ACL page");
414
415        assert!(buffer.iter().all(|&b| b == 0));
416    }
417
418    #[test]
419    fn test_should_zero_with_alignment() {
420        let mut mm = make_mm();
421        let data_to_write = FixedSizeData { a: 100, b: 200 };
422        mm.write_at(ACL_PAGE, 0, &data_to_write)
423            .expect("Failed to write data to ACL page");
424        let data_to_write = FixedSizeData { a: 100, b: 200 };
425        mm.write_at(ACL_PAGE, 6, &data_to_write)
426            .expect("Failed to write data to ACL page");
427
428        let data_with_alignment = DataWithAlignment { a: 100, b: 200 };
429        mm.zero(ACL_PAGE, 0, &data_with_alignment)
430            .expect("Failed to zero data on ACL page");
431
432        let mut buffer = vec![0; 32];
433        mm.read_at_raw(ACL_PAGE, 0, &mut buffer)
434            .expect("Failed to read data from ACL page");
435        assert!(
436            buffer.iter().all(|&b| b == 0),
437            "First 32 bytes are not zeroed"
438        );
439    }
440
441    #[test]
442    fn test_should_check_whether_write_is_aligned() {
443        let mut mm = make_mm();
444        let data_to_write = FixedSizeData { a: 100, b: 200 };
445        let res = mm.write_at(ACL_PAGE, 2, &data_to_write);
446        assert!(matches!(res, Err(MemoryError::OffsetNotAligned { .. })));
447    }
448
449    #[test]
450    fn test_should_check_whether_read_is_aligned() {
451        let mut mm = make_mm();
452        let result: MemoryResult<FixedSizeData> = mm.read_at(ACL_PAGE, 3);
453        assert!(matches!(result, Err(MemoryError::OffsetNotAligned { .. })));
454    }
455
456    #[test]
457    fn test_should_check_whether_zero_is_aligned() {
458        let mut mm = make_mm();
459        let data_to_zero = FixedSizeData { a: 1, b: 2 };
460        let result = mm.zero(ACL_PAGE, 5, &data_to_zero);
461        assert!(matches!(result, Err(MemoryError::OffsetNotAligned { .. })));
462    }
463
464    #[test]
465    fn test_should_not_zero_unallocated_page() {
466        let mut mm = make_mm();
467        let data_to_zero = FixedSizeData { a: 1, b: 2 };
468        let result = mm.zero(10, 0, &data_to_zero);
469        assert!(matches!(result, Err(MemoryError::SegmentationFault { .. })));
470    }
471
472    #[test]
473    fn test_should_not_zero_out_of_bounds() {
474        let mut mm = make_mm();
475        let data_to_zero = FixedSizeData { a: 1, b: 2 };
476        let result = mm.zero(
477            ACL_PAGE,
478            (HeapMemoryProvider::PAGE_SIZE - 4) as PageOffset,
479            &data_to_zero,
480        );
481        assert!(matches!(result, Err(MemoryError::SegmentationFault { .. })));
482    }
483
484    #[test]
485    fn test_should_read_raw() {
486        let mut mm = make_mm();
487        let data_to_write = vec![1u8, 2, 3, 4, 5];
488        mm.write_at_raw(ACL_PAGE, 20, &data_to_write)
489            .expect("Failed to write raw data to ACL page");
490
491        let mut buf = vec![0u8; 5];
492        mm.read_at_raw(ACL_PAGE, 20, &mut buf)
493            .expect("Failed to read raw data from ACL page");
494
495        assert_eq!(buf, data_to_write);
496    }
497
498    #[test]
499    fn test_should_fail_out_of_bounds_access() {
500        let mut mm = make_mm();
501        let data_to_write = FixedSizeData { a: 1, b: 2 };
502        let result = mm.write_at(
503            ACL_PAGE,
504            (HeapMemoryProvider::PAGE_SIZE - 4) as PageOffset,
505            &data_to_write,
506        );
507        assert!(matches!(result, Err(MemoryError::SegmentationFault { .. })));
508
509        let result: MemoryResult<FixedSizeData> = mm.read_at(10, 0);
510        assert!(matches!(result, Err(MemoryError::SegmentationFault { .. })));
511        let result = mm.write_at(10, 0, &data_to_write);
512        assert!(matches!(result, Err(MemoryError::SegmentationFault { .. })));
513    }
514
515    #[test]
516    fn test_should_claim_new_page_by_growing() {
517        let mut mm = make_mm();
518        let initial_last_page = mm.last_page().unwrap();
519        let new_page = mm.claim_page().expect("Failed to claim new page");
520        assert_eq!(new_page, initial_last_page + 1);
521        let updated_last_page = mm.last_page().unwrap();
522        assert_eq!(updated_last_page, new_page);
523    }
524
525    #[test]
526    fn test_unclaim_then_claim_returns_same_page() {
527        let mut mm = make_mm();
528        let first = mm.claim_page().expect("claim");
529        // Write a non-zero pattern so we can assert it's zeroed on unclaim.
530        mm.write_at_raw(first, 0, &[0xAB, 0xCD, 0xEF, 0x42])
531            .expect("write");
532
533        mm.unclaim_page(first).expect("unclaim");
534
535        // The reserved page should hand the same page back.
536        let reused = mm.claim_page().expect("claim again");
537        assert_eq!(reused, first);
538
539        // Page contents must be zeroed by unclaim.
540        let mut buf = [0u8; 4];
541        mm.read_at_raw(reused, 0, &mut buf).expect("read");
542        assert_eq!(buf, [0u8; 4]);
543    }
544
545    #[test]
546    fn test_claim_after_exhausting_unclaimed_grows_memory() {
547        let mut mm = make_mm();
548        let page_a = mm.claim_page().expect("claim a");
549        let page_b = mm.claim_page().expect("claim b");
550
551        mm.unclaim_page(page_a).expect("unclaim a");
552        mm.unclaim_page(page_b).expect("unclaim b");
553
554        // Pop both unclaimed pages. The third claim must grow.
555        let _ = mm.claim_page().expect("reuse 1");
556        let _ = mm.claim_page().expect("reuse 2");
557        let high_water_before = mm.last_page().unwrap();
558        let grown = mm.claim_page().expect("grow");
559        assert_eq!(grown, high_water_before + 1);
560    }
561
562    #[test]
563    fn test_unclaim_persists_across_reload() {
564        let mut provider = HeapMemoryProvider::default();
565        {
566            let mut mm = MemoryManager::init(provider);
567            let first = mm.claim_page().expect("claim");
568            mm.unclaim_page(first).expect("unclaim");
569            provider = mm.into_provider();
570        }
571        let mut mm = MemoryManager::init(provider);
572        let reused = mm.claim_page().expect("claim after reload");
573        assert_eq!(reused, RESERVED_PAGES as Page);
574    }
575
576    #[test]
577    fn test_zero_page_zeros_full_page_contents() {
578        let mut mm = make_mm();
579        let page = mm.claim_page().expect("claim");
580        // Write to several offsets across the page.
581        mm.write_at_raw(page, 0, &[1, 2, 3, 4]).expect("write 0");
582        mm.write_at_raw(page, 30_000, &[9, 9]).expect("write 30k");
583
584        mm.zero_page(page).expect("zero_page");
585
586        let mut buf = [0u8; 4];
587        mm.read_at_raw(page, 0, &mut buf).expect("read");
588        assert_eq!(buf, [0u8; 4]);
589        let mut buf = [0u8; 2];
590        mm.read_at_raw(page, 30_000, &mut buf).expect("read");
591        assert_eq!(buf, [0u8; 2]);
592    }
593
594    #[test]
595    fn test_zero_page_rejects_unallocated_page() {
596        let mut mm = make_mm();
597        let result = mm.zero_page(99);
598        assert!(matches!(result, Err(MemoryError::SegmentationFault { .. })));
599    }
600
601    #[test]
602    fn test_should_check_unallocated_page() {
603        let mm = make_mm();
604        let result = mm.check_unallocated_page(100, 0, 10);
605        assert!(matches!(result, Err(MemoryError::SegmentationFault { .. })));
606
607        let last_page = mm.last_page().unwrap();
608        let result = mm.check_unallocated_page(last_page, 0, 10);
609        assert!(result.is_ok());
610    }
611
612    #[test]
613    fn test_should_compute_padding() {
614        assert_eq!(align_up::<DataWithAlignment>(0), 0);
615        assert_eq!(align_up::<DataWithAlignment>(1), 32);
616        assert_eq!(align_up::<DataWithAlignment>(2), 32);
617        assert_eq!(align_up::<DataWithAlignment>(3), 32);
618        assert_eq!(align_up::<DataWithAlignment>(31), 32);
619        assert_eq!(align_up::<DataWithAlignment>(32), 32);
620        assert_eq!(align_up::<DataWithAlignment>(48), 64);
621        assert_eq!(align_up::<DataWithAlignment>(147), 160);
622    }
623
624    #[derive(Debug, Clone, PartialEq)]
625    struct FixedSizeData {
626        a: u16,
627        b: u32,
628    }
629
630    impl Encode for FixedSizeData {
631        const SIZE: DataSize = DataSize::Fixed(6);
632        const ALIGNMENT: PageOffset = 6;
633
634        fn encode(&'_ self) -> Cow<'_, [u8]> {
635            let mut buf = vec![0u8; self.size() as usize];
636            buf[0..2].copy_from_slice(&self.a.to_le_bytes());
637            buf[2..6].copy_from_slice(&self.b.to_le_bytes());
638            Cow::Owned(buf)
639        }
640
641        fn decode(data: Cow<[u8]>) -> MemoryResult<Self>
642        where
643            Self: Sized,
644        {
645            let a = u16::from_le_bytes([data[0], data[1]]);
646            let b = u32::from_le_bytes([data[2], data[3], data[4], data[5]]);
647            Ok(FixedSizeData { a, b })
648        }
649
650        fn size(&self) -> MSize {
651            6
652        }
653    }
654
655    #[derive(Debug, Clone, PartialEq)]
656    struct DataWithAlignment {
657        a: u16,
658        b: u32,
659    }
660
661    impl Encode for DataWithAlignment {
662        const SIZE: DataSize = DataSize::Dynamic;
663        const ALIGNMENT: PageOffset = DEFAULT_ALIGNMENT;
664
665        fn encode(&'_ self) -> Cow<'_, [u8]> {
666            let mut buf = vec![0u8; self.size() as usize];
667            buf[0..2].copy_from_slice(&self.a.to_le_bytes());
668            buf[2..6].copy_from_slice(&self.b.to_le_bytes());
669            Cow::Owned(buf)
670        }
671
672        fn decode(data: Cow<[u8]>) -> MemoryResult<Self>
673        where
674            Self: Sized,
675        {
676            let a = u16::from_le_bytes([data[0], data[1]]);
677            let b = u32::from_le_bytes([data[2], data[3], data[4], data[5]]);
678            Ok(DataWithAlignment { a, b })
679        }
680
681        fn size(&self) -> MSize {
682            6
683        }
684    }
685}