intuicio_data/
managed_box.rs

1use crate::{
2    Finalize,
3    lifetime::{
4        Lifetime, LifetimeLazy, LifetimeRef, LifetimeRefMut, ValueReadAccess, ValueWriteAccess,
5    },
6    managed::{
7        DynamicManagedLazy, DynamicManagedRef, DynamicManagedRefMut, ManagedLazy, ManagedRef,
8        ManagedRefMut,
9    },
10    non_zero_alloc, non_zero_dealloc, pointer_alignment_padding,
11    type_hash::TypeHash,
12};
13use std::{alloc::Layout, cell::RefCell, collections::HashMap, ops::Range};
14
15const MEMORY_CHUNK_SIZE: usize = 128;
16const MEMORY_PAGE_SIZE: usize = MEMORY_CHUNK_SIZE * u128::BITS as usize;
17
18thread_local! {
19    static STORAGE: RefCell<ManagedStorage> = Default::default();
20}
21
22pub fn managed_storage_stats() -> ManagedStorageStats {
23    STORAGE.with_borrow(|storage| storage.stats())
24}
25
26enum ManagedObjectHeader {
27    Occupied {
28        id: usize,
29        type_hash: TypeHash,
30        lifetime: Lifetime,
31        layout: Layout,
32        finalizer: unsafe fn(*mut ()),
33        instances_count: usize,
34        padding: u8,
35    },
36    Free,
37}
38
39impl std::fmt::Debug for ManagedObjectHeader {
40    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
41        match self {
42            Self::Occupied {
43                id,
44                type_hash,
45                layout,
46                finalizer,
47                instances_count,
48                ..
49            } => f
50                .debug_struct("Occupied")
51                .field("id", id)
52                .field("type_hash", type_hash)
53                .field("layout", layout)
54                .field("finalizer", finalizer)
55                .field("instances_count", instances_count)
56                .finish_non_exhaustive(),
57            Self::Free => write!(f, "Free"),
58        }
59    }
60}
61
62#[derive(Debug, Default, Clone, Copy)]
63struct OccupancyMap {
64    // each bit represents single memory chunk occupancy.
65    mask: u128,
66}
67
68impl OccupancyMap {
69    fn occuppy(&mut self, range: OccupancyRange) {
70        self.mask |= range.mask;
71    }
72
73    fn free(&mut self, range: OccupancyRange) {
74        self.mask &= !range.mask;
75    }
76
77    fn is_free(&self, range: OccupancyRange) -> bool {
78        self.mask & range.mask == 0
79    }
80
81    fn find_free_space(
82        &self,
83        object_with_header_size: usize,
84        range: OccupancyRange,
85    ) -> Option<OccupancyRange> {
86        if object_with_header_size > range.byte_size() {
87            return None;
88        }
89        if self.is_free(range) {
90            return range.cut(object_with_header_size);
91        }
92        let (left, right) = range.split()?;
93        let left = self.find_free_space(object_with_header_size, left);
94        let right = self.find_free_space(object_with_header_size, right);
95        match (left, right) {
96            (None, None) => None,
97            (None, Some(right)) => Some(right),
98            (Some(left), None) => Some(left),
99            (Some(left), Some(right)) => {
100                if right.byte_size() < left.byte_size() {
101                    Some(right)
102                } else {
103                    Some(left)
104                }
105            }
106        }
107    }
108}
109
110#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
111struct OccupancyRange {
112    bits_start_inclusive: usize,
113    bits_end_exclusive: usize,
114    mask: u128,
115}
116
117impl Default for OccupancyRange {
118    fn default() -> Self {
119        Self {
120            bits_start_inclusive: 0,
121            bits_end_exclusive: u128::BITS as _,
122            mask: u128::MAX,
123        }
124    }
125}
126
127impl OccupancyRange {
128    fn range(&self) -> Range<usize> {
129        self.bits_start_inclusive..self.bits_end_exclusive
130    }
131
132    fn byte_offset(&self) -> usize {
133        self.bits_start_inclusive * MEMORY_CHUNK_SIZE
134    }
135
136    fn byte_size(&self) -> usize {
137        (self.bits_end_exclusive - self.bits_start_inclusive) * MEMORY_CHUNK_SIZE
138    }
139
140    fn update_mask(mut self) -> Self {
141        let size = self.bits_end_exclusive - self.bits_start_inclusive;
142        self.mask = if size == u128::BITS as _ {
143            u128::MAX
144        } else {
145            (!u128::MAX.wrapping_shl(size as _)).wrapping_shl(self.bits_start_inclusive as _)
146        };
147        self
148    }
149
150    fn cut(&self, object_with_header_size: usize) -> Option<Self> {
151        let size = object_with_header_size.next_power_of_two() / MEMORY_CHUNK_SIZE;
152        if size <= self.byte_size() {
153            Some(
154                Self {
155                    bits_start_inclusive: self.bits_start_inclusive,
156                    bits_end_exclusive: self.bits_start_inclusive + size,
157                    mask: 0,
158                }
159                .update_mask(),
160            )
161        } else {
162            None
163        }
164    }
165
166    fn split(&self) -> Option<(Self, Self)> {
167        let half_size = (self.bits_end_exclusive - self.bits_start_inclusive) / 2;
168        if half_size == 0 {
169            return None;
170        }
171        let start = self.bits_start_inclusive;
172        let mid = self.bits_start_inclusive + half_size;
173        let end = self.bits_end_exclusive;
174        Some((
175            Self {
176                bits_start_inclusive: start,
177                bits_end_exclusive: mid,
178                mask: 0,
179            }
180            .update_mask(),
181            Self {
182                bits_start_inclusive: mid,
183                bits_end_exclusive: end,
184                mask: 0,
185            }
186            .update_mask(),
187        ))
188    }
189
190    fn from_pointer_size(memory: *const u8, pointer: *const u8, size: usize) -> Self {
191        let offset = pointer as usize - memory as usize;
192        let from = offset / MEMORY_CHUNK_SIZE;
193        let to = from + size.next_power_of_two() / MEMORY_CHUNK_SIZE;
194        Self {
195            bits_start_inclusive: from,
196            bits_end_exclusive: to,
197            mask: 0,
198        }
199        .update_mask()
200    }
201}
202
203enum ManagedMemoryPage {
204    Chunked {
205        memory: *mut u8,
206        layout: Layout,
207        occupancy: OccupancyMap,
208        padding: u8,
209    },
210    Exclusive {
211        memory: *mut u8,
212        layout: Layout,
213        padding: u8,
214    },
215}
216
217impl Drop for ManagedMemoryPage {
218    fn drop(&mut self) {
219        // TODO: if it somehow happen that some objects won't deallocate before page gets destroyed
220        // (highly impossible), add consuming headers and finalizing objects.
221        unsafe {
222            match self {
223                ManagedMemoryPage::Chunked { memory, layout, .. } => {
224                    if memory.is_null() {
225                        return;
226                    }
227                    non_zero_dealloc(*memory, *layout);
228                }
229                ManagedMemoryPage::Exclusive { memory, layout, .. } => {
230                    if memory.is_null() {
231                        return;
232                    }
233                    non_zero_dealloc(*memory, *layout);
234                }
235            }
236        }
237    }
238}
239
240impl ManagedMemoryPage {
241    fn new_chunked() -> Option<Self> {
242        let header_layout = Layout::new::<ManagedObjectHeader>().pad_to_align();
243        let layout = Layout::from_size_align(MEMORY_PAGE_SIZE + header_layout.align(), 1).unwrap();
244        unsafe {
245            let memory = non_zero_alloc(layout);
246            if memory.is_null() {
247                None
248            } else {
249                let padding = pointer_alignment_padding(memory, header_layout.align());
250                for offset in (0..MEMORY_PAGE_SIZE).step_by(MEMORY_CHUNK_SIZE) {
251                    memory
252                        .add(padding + offset)
253                        .cast::<ManagedObjectHeader>()
254                        .write(ManagedObjectHeader::Free);
255                }
256                Some(Self::Chunked {
257                    memory,
258                    layout,
259                    occupancy: Default::default(),
260                    padding: padding as u8,
261                })
262            }
263        }
264    }
265
266    fn new_exclusive(size: usize, alignment: usize) -> Option<Self> {
267        unsafe {
268            let header_layout = Layout::new::<ManagedObjectHeader>().pad_to_align();
269            let layout =
270                Layout::from_size_align_unchecked(header_layout.size() + size + alignment, 1);
271            let memory = non_zero_alloc(layout);
272            if memory.is_null() {
273                None
274            } else {
275                let padding = pointer_alignment_padding(memory, header_layout.align());
276                memory
277                    .add(padding)
278                    .cast::<ManagedObjectHeader>()
279                    .write(ManagedObjectHeader::Free);
280                Some(Self::Exclusive {
281                    layout,
282                    memory,
283                    padding: padding as u8,
284                })
285            }
286        }
287    }
288
289    fn alloc_uninitialized(
290        &mut self,
291        id: usize,
292        page: usize,
293        type_hash: TypeHash,
294        layout: Layout,
295        finalizer: unsafe fn(*mut ()),
296    ) -> Option<DynamicManagedBox> {
297        let header_layout = Layout::new::<ManagedObjectHeader>().pad_to_align();
298        match self {
299            ManagedMemoryPage::Chunked {
300                memory,
301                occupancy,
302                padding,
303                ..
304            } => unsafe {
305                let range = occupancy.find_free_space(
306                    header_layout.size() + layout.size(),
307                    OccupancyRange::default(),
308                )?;
309                let memory = memory.add(*padding as usize + range.byte_offset());
310                let padding = pointer_alignment_padding(memory, layout.align());
311                if header_layout.size() + layout.size() - padding > range.byte_size() {
312                    return None;
313                }
314                occupancy.occuppy(range);
315                *memory.cast::<ManagedObjectHeader>().as_mut().unwrap() =
316                    ManagedObjectHeader::Occupied {
317                        id,
318                        type_hash,
319                        lifetime: Default::default(),
320                        layout,
321                        finalizer,
322                        instances_count: 1,
323                        padding: padding as u8,
324                    };
325                Some(DynamicManagedBox {
326                    memory,
327                    id,
328                    page,
329                    drop: true,
330                })
331            },
332            ManagedMemoryPage::Exclusive {
333                memory, padding, ..
334            } => unsafe {
335                let memory = memory.add(*padding as usize);
336                let padding = pointer_alignment_padding(memory, layout.align());
337                *memory.cast::<ManagedObjectHeader>().as_mut().unwrap() =
338                    ManagedObjectHeader::Occupied {
339                        id,
340                        type_hash,
341                        lifetime: Default::default(),
342                        layout,
343                        finalizer,
344                        instances_count: 1,
345                        padding: padding as u8,
346                    };
347                Some(DynamicManagedBox {
348                    memory,
349                    id,
350                    page,
351                    drop: true,
352                })
353            },
354        }
355    }
356
357    fn owns_pointer(&self, pointer: *const u8) -> bool {
358        let (from, to) = unsafe {
359            match self {
360                ManagedMemoryPage::Chunked { memory, layout, .. }
361                | ManagedMemoryPage::Exclusive { memory, layout, .. } => {
362                    (*memory, memory.add(layout.size()))
363                }
364            }
365        };
366        pointer >= from && pointer < to
367    }
368
369    fn total_size(&self) -> usize {
370        match self {
371            ManagedMemoryPage::Chunked { layout, .. }
372            | ManagedMemoryPage::Exclusive { layout, .. } => layout.size(),
373        }
374    }
375
376    fn occupied_size(&self) -> usize {
377        match self {
378            ManagedMemoryPage::Chunked { occupancy, .. } => {
379                occupancy.mask.count_ones() as usize * MEMORY_CHUNK_SIZE
380            }
381            ManagedMemoryPage::Exclusive { layout, .. } => layout.size(),
382        }
383    }
384
385    fn free_size(&self) -> usize {
386        match self {
387            ManagedMemoryPage::Chunked { occupancy, .. } => {
388                occupancy.mask.count_zeros() as usize * MEMORY_CHUNK_SIZE
389            }
390            ManagedMemoryPage::Exclusive { .. } => 0,
391        }
392    }
393}
394
395#[derive(Debug, Default, Clone, PartialEq, Eq)]
396pub struct ManagedStorageStats {
397    pub pages_count: usize,
398    pub chunked_pages_count: usize,
399    pub exclusive_pages_count: usize,
400    pub total_size: usize,
401    pub occupied_size: usize,
402    pub free_size: usize,
403}
404
405#[derive(Default)]
406struct ManagedStorage {
407    object_id_generator: usize,
408    page_id_generator: usize,
409    pages: HashMap<usize, ManagedMemoryPage>,
410}
411
412impl ManagedStorage {
413    fn stats(&self) -> ManagedStorageStats {
414        ManagedStorageStats {
415            pages_count: self.pages.len(),
416            chunked_pages_count: self
417                .pages
418                .values()
419                .filter(|page| matches!(page, ManagedMemoryPage::Chunked { .. }))
420                .count(),
421            exclusive_pages_count: self
422                .pages
423                .values()
424                .filter(|page| matches!(page, ManagedMemoryPage::Exclusive { .. }))
425                .count(),
426            total_size: self.pages.values().map(|page| page.total_size()).sum(),
427            occupied_size: self.pages.values().map(|page| page.occupied_size()).sum(),
428            free_size: self.pages.values().map(|page| page.free_size()).sum(),
429        }
430    }
431
432    fn generate_object_id(&mut self) -> usize {
433        let result = self.object_id_generator;
434        self.object_id_generator = self.object_id_generator.wrapping_add(1);
435        result
436    }
437
438    fn generate_page_id(&mut self) -> usize {
439        let result = self.page_id_generator;
440        self.page_id_generator = self.page_id_generator.wrapping_add(1);
441        result
442    }
443
444    fn alloc_uninitialized(
445        &mut self,
446        type_hash: TypeHash,
447        layout: Layout,
448        finalizer: unsafe fn(*mut ()),
449    ) -> DynamicManagedBox {
450        let id = self.generate_object_id();
451        let size = layout.size() + Layout::new::<ManagedObjectHeader>().size();
452        if size > MEMORY_PAGE_SIZE {
453            let page_id = self.generate_page_id();
454            let mut page = ManagedMemoryPage::new_exclusive(size, layout.align()).unwrap();
455            let object = page
456                .alloc_uninitialized(id, page_id, type_hash, layout, finalizer)
457                .unwrap();
458            self.pages.insert(page_id, page);
459            object
460        } else {
461            for (page_id, page) in &mut self.pages {
462                if matches!(page, ManagedMemoryPage::Chunked { .. }) {
463                    if let Some(object) =
464                        page.alloc_uninitialized(id, *page_id, type_hash, layout, finalizer)
465                    {
466                        return object;
467                    }
468                }
469            }
470            let page_id = self.generate_page_id();
471            let mut page = ManagedMemoryPage::new_chunked().unwrap();
472            let object = page
473                .alloc_uninitialized(id, page_id, type_hash, layout, finalizer)
474                .unwrap();
475            self.pages.insert(page_id, page);
476            object
477        }
478    }
479
480    fn increment(&mut self, object_id: usize, page_id: usize, pointer: *mut u8) {
481        if let Some(page) = self.pages.get(&page_id) {
482            if page.owns_pointer(pointer) {
483                unsafe {
484                    let header = pointer.cast::<ManagedObjectHeader>().as_mut().unwrap();
485                    if let ManagedObjectHeader::Occupied {
486                        id,
487                        instances_count,
488                        ..
489                    } = header
490                    {
491                        if object_id == *id {
492                            *instances_count += 1;
493                        }
494                    }
495                }
496            }
497        }
498    }
499
500    fn decrement(&mut self, object_id: usize, page_id: usize, pointer: *mut u8) {
501        if let Some(page) = self.pages.get_mut(&page_id) {
502            if page.owns_pointer(pointer) {
503                let header_size = Layout::new::<ManagedObjectHeader>().pad_to_align().size();
504                unsafe {
505                    let header = pointer.cast::<ManagedObjectHeader>().as_mut().unwrap();
506                    if let ManagedObjectHeader::Occupied {
507                        id,
508                        layout,
509                        finalizer,
510                        instances_count,
511                        padding,
512                        ..
513                    } = header
514                    {
515                        if object_id == *id && *instances_count > 0 {
516                            *instances_count -= 1;
517                            if *instances_count == 0 {
518                                (finalizer)(
519                                    pointer.add(header_size + *padding as usize).cast::<()>(),
520                                );
521                                match page {
522                                    ManagedMemoryPage::Chunked {
523                                        memory,
524                                        occupancy,
525                                        padding,
526                                        ..
527                                    } => {
528                                        let range = OccupancyRange::from_pointer_size(
529                                            memory.add(*padding as usize),
530                                            pointer,
531                                            header_size + layout.size(),
532                                        );
533                                        occupancy.free(range);
534                                        *header = ManagedObjectHeader::Free;
535                                        for index in range.range().skip(1) {
536                                            memory
537                                                .add(*padding as usize + index * MEMORY_CHUNK_SIZE)
538                                                .cast::<ManagedObjectHeader>()
539                                                .write(ManagedObjectHeader::Free);
540                                        }
541                                        if occupancy.is_free(OccupancyRange::default()) {
542                                            self.pages.remove(&page_id);
543                                        }
544                                    }
545                                    ManagedMemoryPage::Exclusive { .. } => {
546                                        *header = ManagedObjectHeader::Free;
547                                        self.pages.remove(&page_id);
548                                    }
549                                }
550                            }
551                        }
552                    }
553                }
554            }
555        }
556    }
557
558    fn access_object_lifetime_type<T>(
559        &self,
560        pointer: *mut u8,
561        object_id: usize,
562        page_id: usize,
563        type_check: bool,
564    ) -> Option<(*mut T, *mut Lifetime, TypeHash)> {
565        if let Some(page) = self.pages.get(&page_id) {
566            if page.owns_pointer(pointer) {
567                let header_size = Layout::new::<ManagedObjectHeader>().pad_to_align().size();
568                let header = unsafe { pointer.cast::<ManagedObjectHeader>().as_mut().unwrap() };
569                if let ManagedObjectHeader::Occupied {
570                    id,
571                    type_hash,
572                    lifetime,
573                    instances_count,
574                    padding,
575                    ..
576                } = header
577                {
578                    if object_id == *id
579                        && *instances_count > 0
580                        && (!type_check || *type_hash == TypeHash::of::<T>())
581                    {
582                        return Some((
583                            unsafe { pointer.add(header_size + *padding as usize).cast::<T>() },
584                            lifetime,
585                            *type_hash,
586                        ));
587                    }
588                }
589            }
590        }
591        None
592    }
593
594    fn object_type_hash(
595        &self,
596        pointer: *mut u8,
597        object_id: usize,
598        page_id: usize,
599    ) -> Option<TypeHash> {
600        if let Some(page) = self.pages.get(&page_id) {
601            if page.owns_pointer(pointer) {
602                let header = unsafe { pointer.cast::<ManagedObjectHeader>().as_mut().unwrap() };
603                if let ManagedObjectHeader::Occupied {
604                    id,
605                    type_hash,
606                    instances_count,
607                    ..
608                } = header
609                {
610                    if object_id == *id && *instances_count > 0 {
611                        return Some(*type_hash);
612                    }
613                }
614            }
615        }
616        None
617    }
618
619    fn object_layout_with_offset(
620        &self,
621        pointer: *mut u8,
622        object_id: usize,
623        page_id: usize,
624    ) -> Option<(Layout, usize)> {
625        if let Some(page) = self.pages.get(&page_id) {
626            if page.owns_pointer(pointer) {
627                let header_size = Layout::new::<ManagedObjectHeader>().pad_to_align().size();
628                let header = unsafe { pointer.cast::<ManagedObjectHeader>().as_mut().unwrap() };
629                if let ManagedObjectHeader::Occupied {
630                    id,
631                    layout,
632                    instances_count,
633                    padding,
634                    ..
635                } = header
636                {
637                    if object_id == *id && *instances_count > 0 {
638                        return Some((*layout, header_size + *padding as usize));
639                    }
640                }
641            }
642        }
643        None
644    }
645
646    fn object_instances_count(&self, pointer: *mut u8, object_id: usize, page_id: usize) -> usize {
647        if let Some(page) = self.pages.get(&page_id) {
648            if page.owns_pointer(pointer) {
649                let header = unsafe { pointer.cast::<ManagedObjectHeader>().as_mut().unwrap() };
650                if let ManagedObjectHeader::Occupied {
651                    id,
652                    instances_count,
653                    ..
654                } = header
655                {
656                    if object_id == *id {
657                        return *instances_count;
658                    }
659                }
660            }
661        }
662        0
663    }
664}
665
666pub struct ManagedBox<T> {
667    memory: *mut T,
668    id: usize,
669    page: usize,
670    drop: bool,
671}
672
673impl<T: Default> Default for ManagedBox<T> {
674    fn default() -> Self {
675        Self::new(T::default())
676    }
677}
678
679impl<T> Drop for ManagedBox<T> {
680    fn drop(&mut self) {
681        if self.drop {
682            STORAGE.with_borrow_mut(|storage| {
683                storage.decrement(self.id, self.page, self.memory.cast());
684            })
685        }
686    }
687}
688
689impl<T> ManagedBox<T> {
690    pub fn new(value: T) -> Self
691    where
692        T: Finalize,
693    {
694        let mut result = DynamicManagedBox::new(value);
695        result.drop = false;
696        Self {
697            memory: result.memory.cast(),
698            id: result.id,
699            page: result.page,
700            drop: true,
701        }
702    }
703
704    pub fn into_dynamic(mut self) -> DynamicManagedBox {
705        self.drop = false;
706        DynamicManagedBox {
707            memory: self.memory.cast(),
708            id: self.id,
709            page: self.page,
710            drop: true,
711        }
712    }
713
714    pub fn instances_count(&self) -> usize {
715        STORAGE.with_borrow(|storage| {
716            storage.object_instances_count(self.memory.cast(), self.id, self.page)
717        })
718    }
719
720    pub fn does_share_reference(&self, other: &Self) -> bool {
721        self.id == other.id && self.page == other.page && self.memory == other.memory
722    }
723
724    pub fn type_hash(&self) -> Option<TypeHash> {
725        STORAGE
726            .with_borrow(|storage| storage.object_type_hash(self.memory.cast(), self.id, self.page))
727    }
728
729    pub fn lifetime_borrow(&self) -> Option<LifetimeRef> {
730        STORAGE.with_borrow(|storage| {
731            let (_, lifetime, _) = storage.access_object_lifetime_type::<u8>(
732                self.memory.cast(),
733                self.id,
734                self.page,
735                false,
736            )?;
737            unsafe { lifetime.as_ref()?.borrow() }
738        })
739    }
740
741    pub fn lifetime_borrow_mut(&self) -> Option<LifetimeRefMut> {
742        STORAGE.with_borrow(|storage| {
743            let (_, lifetime, _) = storage.access_object_lifetime_type::<u8>(
744                self.memory.cast(),
745                self.id,
746                self.page,
747                false,
748            )?;
749            unsafe { lifetime.as_ref()?.borrow_mut() }
750        })
751    }
752
753    pub fn lifetime_lazy(&self) -> Option<LifetimeLazy> {
754        STORAGE.with_borrow(|storage| {
755            let (_, lifetime, _) = storage.access_object_lifetime_type::<u8>(
756                self.memory.cast(),
757                self.id,
758                self.page,
759                false,
760            )?;
761            unsafe { Some(lifetime.as_ref()?.lazy()) }
762        })
763    }
764
765    pub fn read(&self) -> Option<ValueReadAccess<T>> {
766        STORAGE.with_borrow(|storage| {
767            let (pointer, lifetime, _) = storage.access_object_lifetime_type::<T>(
768                self.memory.cast(),
769                self.id,
770                self.page,
771                true,
772            )?;
773            unsafe { lifetime.as_ref()?.read_ptr(pointer) }
774        })
775    }
776
777    pub fn write(&mut self) -> Option<ValueWriteAccess<T>> {
778        STORAGE.with_borrow(|storage| {
779            let (pointer, lifetime, _) = storage.access_object_lifetime_type::<T>(
780                self.memory.cast(),
781                self.id,
782                self.page,
783                true,
784            )?;
785            unsafe { lifetime.as_mut()?.write_ptr(pointer) }
786        })
787    }
788
789    pub fn borrow(&self) -> Option<ManagedRef<T>> {
790        STORAGE.with_borrow(|storage| {
791            let (pointer, lifetime, _) = storage.access_object_lifetime_type::<T>(
792                self.memory.cast(),
793                self.id,
794                self.page,
795                true,
796            )?;
797            unsafe { ManagedRef::new_raw(pointer, lifetime.as_ref()?.borrow()?) }
798        })
799    }
800
801    pub fn borrow_mut(&mut self) -> Option<ManagedRefMut<T>> {
802        STORAGE.with_borrow(|storage| {
803            let (pointer, lifetime, _) = storage.access_object_lifetime_type::<T>(
804                self.memory.cast(),
805                self.id,
806                self.page,
807                true,
808            )?;
809            unsafe { ManagedRefMut::new_raw(pointer, lifetime.as_mut()?.borrow_mut()?) }
810        })
811    }
812
813    pub fn lazy(&self) -> Option<ManagedLazy<T>> {
814        STORAGE.with_borrow(|storage| {
815            let (pointer, lifetime, _) = storage.access_object_lifetime_type::<T>(
816                self.memory.cast(),
817                self.id,
818                self.page,
819                true,
820            )?;
821            unsafe { ManagedLazy::new_raw(pointer, lifetime.as_mut().unwrap().lazy()) }
822        })
823    }
824
825    /// # Safety
826    pub unsafe fn as_ptr(&self) -> Option<*const T> {
827        STORAGE.with_borrow(|storage| {
828            let (pointer, _, _) = storage.access_object_lifetime_type::<T>(
829                self.memory.cast(),
830                self.id,
831                self.page,
832                true,
833            )?;
834            Some(pointer.cast_const())
835        })
836    }
837
838    /// # Safety
839    pub unsafe fn as_ptr_mut(&mut self) -> Option<*mut T> {
840        STORAGE.with_borrow(|storage| {
841            let (pointer, _, _) = storage.access_object_lifetime_type::<T>(
842                self.memory.cast(),
843                self.id,
844                self.page,
845                true,
846            )?;
847            Some(pointer)
848        })
849    }
850
851    /// # Safety
852    pub unsafe fn as_ptr_raw(&self) -> Option<*const u8> {
853        STORAGE.with_borrow(|storage| {
854            let (pointer, _, _) = storage.access_object_lifetime_type::<T>(
855                self.memory.cast(),
856                self.id,
857                self.page,
858                false,
859            )?;
860            Some(pointer.cast_const().cast())
861        })
862    }
863
864    /// # Safety
865    pub unsafe fn as_mut_ptr_raw(&mut self) -> Option<*mut u8> {
866        STORAGE.with_borrow(|storage| {
867            let (pointer, _, _) = storage.access_object_lifetime_type::<T>(
868                self.memory.cast(),
869                self.id,
870                self.page,
871                false,
872            )?;
873            Some(pointer.cast())
874        })
875    }
876}
877
878impl<T> Clone for ManagedBox<T> {
879    fn clone(&self) -> Self {
880        STORAGE.with_borrow_mut(|storage| {
881            storage.increment(self.id, self.page, self.memory.cast());
882            Self {
883                memory: self.memory,
884                id: self.id,
885                page: self.page,
886                drop: true,
887            }
888        })
889    }
890}
891
892pub struct DynamicManagedBox {
893    memory: *mut u8,
894    id: usize,
895    page: usize,
896    drop: bool,
897}
898
899impl Drop for DynamicManagedBox {
900    fn drop(&mut self) {
901        if self.drop {
902            STORAGE.with_borrow_mut(|storage| {
903                storage.decrement(self.id, self.page, self.memory);
904            })
905        }
906    }
907}
908
909impl DynamicManagedBox {
910    pub fn new<T: Finalize>(value: T) -> Self {
911        unsafe {
912            let mut result =
913                Self::new_uninitialized(TypeHash::of::<T>(), Layout::new::<T>(), T::finalize_raw);
914            result.as_ptr_mut::<T>().unwrap().write(value);
915            result
916        }
917    }
918
919    pub fn new_uninitialized(
920        type_hash: TypeHash,
921        layout: Layout,
922        finalizer: unsafe fn(*mut ()),
923    ) -> Self {
924        STORAGE.with_borrow_mut(|storage| {
925            storage.alloc_uninitialized(type_hash, layout.pad_to_align(), finalizer)
926        })
927    }
928
929    pub fn into_typed<T>(mut self) -> Result<ManagedBox<T>, Self> {
930        if self.is::<T>() {
931            self.drop = false;
932            Ok(ManagedBox {
933                memory: self.memory.cast(),
934                id: self.id,
935                page: self.page,
936                drop: true,
937            })
938        } else {
939            Err(self)
940        }
941    }
942
943    pub fn instances_count(&self) -> usize {
944        STORAGE
945            .with_borrow(|storage| storage.object_instances_count(self.memory, self.id, self.page))
946    }
947
948    pub fn does_share_reference(&self, other: &Self) -> bool {
949        self.id == other.id && self.page == other.page && self.memory == other.memory
950    }
951
952    pub fn type_hash(&self) -> Option<TypeHash> {
953        STORAGE.with_borrow(|storage| storage.object_type_hash(self.memory, self.id, self.page))
954    }
955
956    pub fn lifetime_borrow(&self) -> Option<LifetimeRef> {
957        STORAGE.with_borrow(|storage| {
958            let (_, lifetime, _) = storage.access_object_lifetime_type::<u8>(
959                self.memory.cast(),
960                self.id,
961                self.page,
962                false,
963            )?;
964            unsafe { lifetime.as_ref()?.borrow() }
965        })
966    }
967
968    pub fn lifetime_borrow_mut(&self) -> Option<LifetimeRefMut> {
969        STORAGE.with_borrow(|storage| {
970            let (_, lifetime, _) = storage.access_object_lifetime_type::<u8>(
971                self.memory.cast(),
972                self.id,
973                self.page,
974                false,
975            )?;
976            unsafe { lifetime.as_ref()?.borrow_mut() }
977        })
978    }
979
980    pub fn lifetime_lazy(&self) -> Option<LifetimeLazy> {
981        STORAGE.with_borrow(|storage| {
982            let (_, lifetime, _) = storage.access_object_lifetime_type::<u8>(
983                self.memory.cast(),
984                self.id,
985                self.page,
986                false,
987            )?;
988            unsafe { Some(lifetime.as_ref()?.lazy()) }
989        })
990    }
991
992    pub fn is<T>(&self) -> bool {
993        STORAGE.with_borrow(|storage| {
994            storage
995                .access_object_lifetime_type::<T>(self.memory, self.id, self.page, true)
996                .is_some()
997        })
998    }
999
1000    pub fn borrow(&self) -> Option<DynamicManagedRef> {
1001        STORAGE.with_borrow(|storage| {
1002            let (pointer, lifetime, type_hash) = storage.access_object_lifetime_type::<u8>(
1003                self.memory,
1004                self.id,
1005                self.page,
1006                false,
1007            )?;
1008            unsafe { DynamicManagedRef::new_raw(type_hash, lifetime.as_ref()?.borrow()?, pointer) }
1009        })
1010    }
1011
1012    pub fn borrow_mut(&mut self) -> Option<DynamicManagedRefMut> {
1013        STORAGE.with_borrow(|storage| {
1014            let (pointer, lifetime, type_hash) = storage.access_object_lifetime_type::<u8>(
1015                self.memory,
1016                self.id,
1017                self.page,
1018                false,
1019            )?;
1020            unsafe {
1021                DynamicManagedRefMut::new_raw(type_hash, lifetime.as_mut()?.borrow_mut()?, pointer)
1022            }
1023        })
1024    }
1025
1026    pub fn lazy(&self) -> Option<DynamicManagedLazy> {
1027        STORAGE.with_borrow(|storage| {
1028            let (pointer, lifetime, type_hash) = storage.access_object_lifetime_type::<u8>(
1029                self.memory,
1030                self.id,
1031                self.page,
1032                false,
1033            )?;
1034            unsafe {
1035                DynamicManagedLazy::new_raw(type_hash, lifetime.as_mut().unwrap().lazy(), pointer)
1036            }
1037        })
1038    }
1039
1040    pub fn read<T>(&self) -> Option<ValueReadAccess<T>> {
1041        STORAGE.with_borrow(|storage| {
1042            let (pointer, lifetime, _) =
1043                storage.access_object_lifetime_type::<T>(self.memory, self.id, self.page, true)?;
1044            unsafe { lifetime.as_ref()?.read_ptr(pointer) }
1045        })
1046    }
1047
1048    pub fn write<T>(&mut self) -> Option<ValueWriteAccess<T>> {
1049        STORAGE.with_borrow(|storage| {
1050            let (pointer, lifetime, _) =
1051                storage.access_object_lifetime_type::<T>(self.memory, self.id, self.page, true)?;
1052            unsafe { lifetime.as_mut()?.write_ptr(pointer) }
1053        })
1054    }
1055
1056    /// # Safety
1057    pub unsafe fn memory(&self) -> Option<&[u8]> {
1058        STORAGE.with_borrow(|storage| {
1059            storage
1060                .object_layout_with_offset(self.memory, self.id, self.page)
1061                .map(|(layout, offset)| unsafe {
1062                    std::slice::from_raw_parts(self.memory.add(offset), layout.size())
1063                })
1064        })
1065    }
1066
1067    /// # Safety
1068    pub unsafe fn memory_mut(&mut self) -> Option<&mut [u8]> {
1069        STORAGE.with_borrow(|storage| {
1070            storage
1071                .object_layout_with_offset(self.memory, self.id, self.page)
1072                .map(|(layout, offset)| unsafe {
1073                    std::slice::from_raw_parts_mut(self.memory.add(offset), layout.size())
1074                })
1075        })
1076    }
1077
1078    /// # Safety
1079    pub unsafe fn as_ptr<T>(&self) -> Option<*const T> {
1080        STORAGE.with_borrow(|storage| {
1081            let (pointer, _, _) =
1082                storage.access_object_lifetime_type::<T>(self.memory, self.id, self.page, true)?;
1083            Some(pointer.cast_const().cast())
1084        })
1085    }
1086
1087    /// # Safety
1088    pub unsafe fn as_ptr_mut<T>(&mut self) -> Option<*mut T> {
1089        STORAGE.with_borrow(|storage| {
1090            let (pointer, _, _) =
1091                storage.access_object_lifetime_type::<T>(self.memory, self.id, self.page, true)?;
1092            Some(pointer.cast())
1093        })
1094    }
1095
1096    /// # Safety
1097    pub unsafe fn as_ptr_raw(&self) -> Option<*const u8> {
1098        STORAGE.with_borrow(|storage| {
1099            let (pointer, _, _) = storage.access_object_lifetime_type::<u8>(
1100                self.memory,
1101                self.id,
1102                self.page,
1103                false,
1104            )?;
1105            Some(pointer.cast_const())
1106        })
1107    }
1108
1109    /// # Safety
1110    pub unsafe fn as_mut_ptr_raw(&mut self) -> Option<*mut u8> {
1111        STORAGE.with_borrow(|storage| {
1112            let (pointer, _, _) = storage.access_object_lifetime_type::<u8>(
1113                self.memory,
1114                self.id,
1115                self.page,
1116                false,
1117            )?;
1118            Some(pointer)
1119        })
1120    }
1121}
1122
1123impl Clone for DynamicManagedBox {
1124    fn clone(&self) -> Self {
1125        STORAGE.with_borrow_mut(|storage| {
1126            storage.increment(self.id, self.page, self.memory);
1127            Self {
1128                memory: self.memory,
1129                id: self.id,
1130                page: self.page,
1131                drop: true,
1132            }
1133        })
1134    }
1135}
1136
1137#[cfg(test)]
1138mod tests {
1139    use super::*;
1140
1141    #[test]
1142    fn test_occupancy_range() {
1143        let v = OccupancyRange {
1144            bits_start_inclusive: 0,
1145            bits_end_exclusive: 128,
1146            ..Default::default()
1147        }
1148        .update_mask();
1149        assert_eq!(v.mask, 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF);
1150        assert_eq!(v.bits_start_inclusive..v.bits_end_exclusive, 0..128);
1151        assert_eq!(v.byte_size(), MEMORY_PAGE_SIZE);
1152
1153        let v = OccupancyRange::default();
1154        assert_eq!(v.mask, 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF);
1155        assert_eq!(v.bits_start_inclusive..v.bits_end_exclusive, 0..128);
1156        assert_eq!(v.byte_size(), MEMORY_PAGE_SIZE);
1157
1158        let v = v.split().unwrap().0;
1159        assert_eq!(v.mask, 0x0000000000000000FFFFFFFFFFFFFFFF);
1160        assert_eq!(v.bits_start_inclusive..v.bits_end_exclusive, 0..64);
1161        assert_eq!(v.byte_size(), MEMORY_PAGE_SIZE / 2);
1162
1163        let v = v.split().unwrap().1;
1164        assert_eq!(v.mask, 0x0000000000000000FFFFFFFF00000000);
1165        assert_eq!(v.bits_start_inclusive..v.bits_end_exclusive, 32..64);
1166        assert_eq!(v.byte_size(), MEMORY_PAGE_SIZE / 4);
1167
1168        let v = v.split().unwrap().0;
1169        assert_eq!(v.mask, 0x00000000000000000000FFFF00000000);
1170        assert_eq!(v.bits_start_inclusive..v.bits_end_exclusive, 32..48);
1171        assert_eq!(v.byte_size(), MEMORY_PAGE_SIZE / 8);
1172
1173        let v = v.split().unwrap().1;
1174        assert_eq!(v.mask, 0x00000000000000000000FF0000000000);
1175        assert_eq!(v.bits_start_inclusive..v.bits_end_exclusive, 40..48);
1176        assert_eq!(v.byte_size(), MEMORY_PAGE_SIZE / 16);
1177
1178        let v = v.split().unwrap().0;
1179        assert_eq!(v.mask, 0x000000000000000000000F0000000000);
1180        assert_eq!(v.bits_start_inclusive..v.bits_end_exclusive, 40..44);
1181        assert_eq!(v.byte_size(), MEMORY_PAGE_SIZE / 32);
1182
1183        let v = v.split().unwrap().1;
1184        assert_eq!(
1185            v.mask,
1186            0b0000000000000000000011000000000000000000000000000000000000000000
1187        );
1188        assert_eq!(v.bits_start_inclusive..v.bits_end_exclusive, 42..44);
1189        assert_eq!(v.byte_size(), MEMORY_PAGE_SIZE / 64);
1190
1191        let v = v.split().unwrap().0;
1192        assert_eq!(
1193            v.mask,
1194            0b0000000000000000000001000000000000000000000000000000000000000000
1195        );
1196        assert_eq!(v.bits_start_inclusive..v.bits_end_exclusive, 42..43);
1197        assert_eq!(v.byte_size(), MEMORY_PAGE_SIZE / 128);
1198
1199        assert!(v.split().is_none());
1200    }
1201
1202    #[test]
1203    fn test_occupancy_map() {
1204        let header_size = Layout::new::<ManagedObjectHeader>().pad_to_align().size();
1205        let mut map = OccupancyMap::default();
1206
1207        let range = map
1208            .find_free_space(
1209                std::mem::size_of::<f32>() + header_size,
1210                OccupancyRange::default(),
1211            )
1212            .unwrap();
1213        map.occuppy(range);
1214        assert_eq!(range.bits_start_inclusive..range.bits_end_exclusive, 0..1);
1215
1216        let range = map
1217            .find_free_space(
1218                std::mem::size_of::<u8>() + header_size,
1219                OccupancyRange::default(),
1220            )
1221            .unwrap();
1222        map.occuppy(range);
1223        assert_eq!(range.bits_start_inclusive..range.bits_end_exclusive, 1..2);
1224    }
1225
1226    #[test]
1227    fn test_managed_box() {
1228        assert_eq!(managed_storage_stats(), ManagedStorageStats::default());
1229        let a = ManagedBox::new(42usize);
1230        assert_eq!(
1231            managed_storage_stats(),
1232            ManagedStorageStats {
1233                pages_count: 1,
1234                chunked_pages_count: 1,
1235                total_size: 16392,
1236                occupied_size: 128,
1237                free_size: 16256,
1238                ..Default::default()
1239            }
1240        );
1241        assert_eq!(*a.read().unwrap(), 42);
1242        assert_eq!(a.instances_count(), 1);
1243        let mut b = a.clone();
1244        assert_eq!(
1245            managed_storage_stats(),
1246            ManagedStorageStats {
1247                pages_count: 1,
1248                chunked_pages_count: 1,
1249                total_size: 16392,
1250                occupied_size: 128,
1251                free_size: 16256,
1252                ..Default::default()
1253            }
1254        );
1255        assert_eq!(a.instances_count(), 2);
1256        assert_eq!(b.instances_count(), 2);
1257        assert!(a.does_share_reference(&b));
1258        assert_eq!(*b.read().unwrap(), 42);
1259        *b.write().unwrap() = 10;
1260        assert_eq!(*a.read().unwrap(), 10);
1261        assert_eq!(*b.read().unwrap(), 10);
1262        drop(a);
1263        assert_eq!(
1264            managed_storage_stats(),
1265            ManagedStorageStats {
1266                pages_count: 1,
1267                chunked_pages_count: 1,
1268                total_size: 16392,
1269                occupied_size: 128,
1270                free_size: 16256,
1271                ..Default::default()
1272            }
1273        );
1274        assert_eq!(b.instances_count(), 1);
1275        drop(b);
1276        assert_eq!(managed_storage_stats(), ManagedStorageStats::default());
1277    }
1278
1279    #[test]
1280    fn test_dynamic_managed_box() {
1281        assert_eq!(managed_storage_stats(), ManagedStorageStats::default());
1282        let a = DynamicManagedBox::new(42usize);
1283        assert_eq!(
1284            managed_storage_stats(),
1285            ManagedStorageStats {
1286                pages_count: 1,
1287                chunked_pages_count: 1,
1288                total_size: 16392,
1289                occupied_size: 128,
1290                free_size: 16256,
1291                ..Default::default()
1292            }
1293        );
1294        assert!(a.is::<usize>());
1295        assert_eq!(*a.read::<usize>().unwrap(), 42);
1296        assert_eq!(a.instances_count(), 1);
1297        let mut b = a.clone();
1298        assert_eq!(
1299            managed_storage_stats(),
1300            ManagedStorageStats {
1301                pages_count: 1,
1302                chunked_pages_count: 1,
1303                total_size: 16392,
1304                occupied_size: 128,
1305                free_size: 16256,
1306                ..Default::default()
1307            }
1308        );
1309        assert!(b.is::<usize>());
1310        assert_eq!(a.instances_count(), 2);
1311        assert_eq!(b.instances_count(), 2);
1312        assert!(a.does_share_reference(&b));
1313        assert_eq!(*b.read::<usize>().unwrap(), 42);
1314        *b.write::<usize>().unwrap() = 10;
1315        assert_eq!(*a.read::<usize>().unwrap(), 10);
1316        assert_eq!(*b.read::<usize>().unwrap(), 10);
1317        drop(a);
1318        assert_eq!(
1319            managed_storage_stats(),
1320            ManagedStorageStats {
1321                pages_count: 1,
1322                chunked_pages_count: 1,
1323                total_size: 16392,
1324                occupied_size: 128,
1325                free_size: 16256,
1326                ..Default::default()
1327            }
1328        );
1329        assert_eq!(b.instances_count(), 1);
1330        drop(b);
1331        assert_eq!(managed_storage_stats(), ManagedStorageStats::default());
1332    }
1333
1334    #[test]
1335    fn test_growing_allocations() {
1336        assert_eq!(managed_storage_stats(), ManagedStorageStats::default());
1337        let a = ManagedBox::<[u64; 10]>::new(std::array::from_fn(|index| index as _));
1338        assert_eq!(
1339            managed_storage_stats(),
1340            ManagedStorageStats {
1341                pages_count: 1,
1342                chunked_pages_count: 1,
1343                total_size: 16392,
1344                occupied_size: 256,
1345                free_size: 16128,
1346                ..Default::default()
1347            }
1348        );
1349        let b = ManagedBox::<[u64; 100]>::new(std::array::from_fn(|index| index as _));
1350        assert_eq!(
1351            managed_storage_stats(),
1352            ManagedStorageStats {
1353                pages_count: 1,
1354                chunked_pages_count: 1,
1355                total_size: 16392,
1356                occupied_size: 1280,
1357                free_size: 15104,
1358                ..Default::default()
1359            }
1360        );
1361        let c = ManagedBox::<[u64; 1000]>::new(std::array::from_fn(|index| index as _));
1362        assert_eq!(
1363            managed_storage_stats(),
1364            ManagedStorageStats {
1365                pages_count: 1,
1366                chunked_pages_count: 1,
1367                total_size: 16392,
1368                occupied_size: 9472,
1369                free_size: 6912,
1370                ..Default::default()
1371            }
1372        );
1373        let d = ManagedBox::<[u64; 10000]>::new(std::array::from_fn(|index| index as _));
1374        assert_eq!(
1375            managed_storage_stats(),
1376            ManagedStorageStats {
1377                pages_count: 2,
1378                chunked_pages_count: 1,
1379                exclusive_pages_count: 1,
1380                total_size: 96528,
1381                occupied_size: 89608,
1382                free_size: 6912
1383            }
1384        );
1385        drop(a);
1386        assert_eq!(
1387            managed_storage_stats(),
1388            ManagedStorageStats {
1389                pages_count: 2,
1390                chunked_pages_count: 1,
1391                exclusive_pages_count: 1,
1392                total_size: 96528,
1393                occupied_size: 89352,
1394                free_size: 7168
1395            }
1396        );
1397        drop(b);
1398        assert_eq!(
1399            managed_storage_stats(),
1400            ManagedStorageStats {
1401                pages_count: 2,
1402                chunked_pages_count: 1,
1403                exclusive_pages_count: 1,
1404                total_size: 96528,
1405                occupied_size: 88328,
1406                free_size: 8192
1407            }
1408        );
1409        drop(c);
1410        assert_eq!(
1411            managed_storage_stats(),
1412            ManagedStorageStats {
1413                pages_count: 1,
1414                chunked_pages_count: 0,
1415                exclusive_pages_count: 1,
1416                total_size: 80136,
1417                occupied_size: 80136,
1418                free_size: 0
1419            }
1420        );
1421        drop(d);
1422        assert_eq!(managed_storage_stats(), ManagedStorageStats::default());
1423    }
1424
1425    #[test]
1426    fn test_managed_box_borrows() {
1427        let v = ManagedBox::new(42usize);
1428        let r = v.borrow().unwrap();
1429        drop(v);
1430        assert!(r.read().is_none());
1431    }
1432
1433    #[test]
1434    fn test_fuzz_managed_box() {
1435        let builders = [
1436            || DynamicManagedBox::new(1u8),
1437            || DynamicManagedBox::new(2u16),
1438            || DynamicManagedBox::new(3u32),
1439            || DynamicManagedBox::new(4u64),
1440            || DynamicManagedBox::new(5u128),
1441            || DynamicManagedBox::new([42u8; 1000]),
1442            || DynamicManagedBox::new([42u8; 10000]),
1443            || DynamicManagedBox::new([42u8; 100000]),
1444        ];
1445        let mut boxes = std::array::from_fn::<_, 50, _>(|_| None);
1446        for index in 0..100 {
1447            let source = index % builders.len();
1448            let target = index % boxes.len();
1449            boxes[target] = Some((builders[source])());
1450        }
1451    }
1452}