1use crate::{
2 Finalize,
3 lifetime::{
4 Lifetime, LifetimeLazy, LifetimeRef, LifetimeRefMut, ValueReadAccess, ValueWriteAccess,
5 },
6 managed::{
7 DynamicManagedLazy, DynamicManagedRef, DynamicManagedRefMut, ManagedLazy, ManagedRef,
8 ManagedRefMut,
9 },
10 non_zero_alloc, non_zero_dealloc, pointer_alignment_padding,
11 type_hash::TypeHash,
12};
13use std::{
14 alloc::Layout, cell::RefCell, collections::HashMap, future::poll_fn, ops::Range, task::Poll,
15};
16
17const MEMORY_CHUNK_SIZE: usize = 128;
18const MEMORY_PAGE_SIZE: usize = MEMORY_CHUNK_SIZE * u128::BITS as usize;
19
20thread_local! {
21 static STORAGE: RefCell<ManagedStorage> = Default::default();
22}
23
24pub fn managed_storage_stats() -> ManagedStorageStats {
25 STORAGE.with_borrow(|storage| storage.stats())
26}
27
28enum ManagedObjectHeader {
29 Occupied {
30 id: usize,
31 type_hash: TypeHash,
32 lifetime: Lifetime,
33 layout: Layout,
34 finalizer: unsafe fn(*mut ()),
35 instances_count: usize,
36 padding: u8,
37 },
38 Free,
39}
40
41impl std::fmt::Debug for ManagedObjectHeader {
42 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
43 match self {
44 Self::Occupied {
45 id,
46 type_hash,
47 layout,
48 finalizer,
49 instances_count,
50 ..
51 } => f
52 .debug_struct("Occupied")
53 .field("id", id)
54 .field("type_hash", type_hash)
55 .field("layout", layout)
56 .field("finalizer", finalizer)
57 .field("instances_count", instances_count)
58 .finish_non_exhaustive(),
59 Self::Free => write!(f, "Free"),
60 }
61 }
62}
63
64#[derive(Debug, Default, Clone, Copy)]
65struct OccupancyMap {
66 mask: u128,
68}
69
70impl OccupancyMap {
71 fn occuppy(&mut self, range: OccupancyRange) {
72 self.mask |= range.mask;
73 }
74
75 fn free(&mut self, range: OccupancyRange) {
76 self.mask &= !range.mask;
77 }
78
79 fn is_free(&self, range: OccupancyRange) -> bool {
80 self.mask & range.mask == 0
81 }
82
83 fn find_free_space(
84 &self,
85 object_with_header_size: usize,
86 range: OccupancyRange,
87 ) -> Option<OccupancyRange> {
88 if object_with_header_size > range.byte_size() {
89 return None;
90 }
91 if self.is_free(range) {
92 return range.cut(object_with_header_size);
93 }
94 let (left, right) = range.split()?;
95 let left = self.find_free_space(object_with_header_size, left);
96 let right = self.find_free_space(object_with_header_size, right);
97 match (left, right) {
98 (None, None) => None,
99 (None, Some(right)) => Some(right),
100 (Some(left), None) => Some(left),
101 (Some(left), Some(right)) => {
102 if right.byte_size() < left.byte_size() {
103 Some(right)
104 } else {
105 Some(left)
106 }
107 }
108 }
109 }
110}
111
112#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
113struct OccupancyRange {
114 bits_start_inclusive: usize,
115 bits_end_exclusive: usize,
116 mask: u128,
117}
118
119impl Default for OccupancyRange {
120 fn default() -> Self {
121 Self {
122 bits_start_inclusive: 0,
123 bits_end_exclusive: u128::BITS as _,
124 mask: u128::MAX,
125 }
126 }
127}
128
129impl OccupancyRange {
130 fn range(&self) -> Range<usize> {
131 self.bits_start_inclusive..self.bits_end_exclusive
132 }
133
134 fn byte_offset(&self) -> usize {
135 self.bits_start_inclusive * MEMORY_CHUNK_SIZE
136 }
137
138 fn byte_size(&self) -> usize {
139 (self.bits_end_exclusive - self.bits_start_inclusive) * MEMORY_CHUNK_SIZE
140 }
141
142 fn update_mask(mut self) -> Self {
143 let size = self.bits_end_exclusive - self.bits_start_inclusive;
144 self.mask = if size == u128::BITS as _ {
145 u128::MAX
146 } else {
147 (!u128::MAX.wrapping_shl(size as _)).wrapping_shl(self.bits_start_inclusive as _)
148 };
149 self
150 }
151
152 fn cut(&self, object_with_header_size: usize) -> Option<Self> {
153 let size = object_with_header_size.next_power_of_two() / MEMORY_CHUNK_SIZE;
154 if size <= self.byte_size() {
155 Some(
156 Self {
157 bits_start_inclusive: self.bits_start_inclusive,
158 bits_end_exclusive: self.bits_start_inclusive + size,
159 mask: 0,
160 }
161 .update_mask(),
162 )
163 } else {
164 None
165 }
166 }
167
168 fn split(&self) -> Option<(Self, Self)> {
169 let half_size = (self.bits_end_exclusive - self.bits_start_inclusive) / 2;
170 if half_size == 0 {
171 return None;
172 }
173 let start = self.bits_start_inclusive;
174 let mid = self.bits_start_inclusive + half_size;
175 let end = self.bits_end_exclusive;
176 Some((
177 Self {
178 bits_start_inclusive: start,
179 bits_end_exclusive: mid,
180 mask: 0,
181 }
182 .update_mask(),
183 Self {
184 bits_start_inclusive: mid,
185 bits_end_exclusive: end,
186 mask: 0,
187 }
188 .update_mask(),
189 ))
190 }
191
192 fn from_pointer_size(memory: *const u8, pointer: *const u8, size: usize) -> Self {
193 let offset = pointer as usize - memory as usize;
194 let from = offset / MEMORY_CHUNK_SIZE;
195 let to = from + size.next_power_of_two() / MEMORY_CHUNK_SIZE;
196 Self {
197 bits_start_inclusive: from,
198 bits_end_exclusive: to,
199 mask: 0,
200 }
201 .update_mask()
202 }
203}
204
205enum ManagedMemoryPage {
206 Chunked {
207 memory: *mut u8,
208 layout: Layout,
209 occupancy: OccupancyMap,
210 padding: u8,
211 },
212 Exclusive {
213 memory: *mut u8,
214 layout: Layout,
215 padding: u8,
216 },
217}
218
219impl Drop for ManagedMemoryPage {
220 fn drop(&mut self) {
221 unsafe {
224 match self {
225 ManagedMemoryPage::Chunked { memory, layout, .. } => {
226 if memory.is_null() {
227 return;
228 }
229 non_zero_dealloc(*memory, *layout);
230 }
231 ManagedMemoryPage::Exclusive { memory, layout, .. } => {
232 if memory.is_null() {
233 return;
234 }
235 non_zero_dealloc(*memory, *layout);
236 }
237 }
238 }
239 }
240}
241
242impl ManagedMemoryPage {
243 fn new_chunked() -> Option<Self> {
244 let header_layout = Layout::new::<ManagedObjectHeader>().pad_to_align();
245 let layout = Layout::from_size_align(MEMORY_PAGE_SIZE + header_layout.align(), 1).unwrap();
246 unsafe {
247 let memory = non_zero_alloc(layout);
248 if memory.is_null() {
249 None
250 } else {
251 let padding = pointer_alignment_padding(memory, header_layout.align());
252 for offset in (0..MEMORY_PAGE_SIZE).step_by(MEMORY_CHUNK_SIZE) {
253 memory
254 .add(padding + offset)
255 .cast::<ManagedObjectHeader>()
256 .write(ManagedObjectHeader::Free);
257 }
258 Some(Self::Chunked {
259 memory,
260 layout,
261 occupancy: Default::default(),
262 padding: padding as u8,
263 })
264 }
265 }
266 }
267
268 fn new_exclusive(size: usize, alignment: usize) -> Option<Self> {
269 unsafe {
270 let header_layout = Layout::new::<ManagedObjectHeader>().pad_to_align();
271 let layout =
272 Layout::from_size_align_unchecked(header_layout.size() + size + alignment, 1);
273 let memory = non_zero_alloc(layout);
274 if memory.is_null() {
275 None
276 } else {
277 let padding = pointer_alignment_padding(memory, header_layout.align());
278 memory
279 .add(padding)
280 .cast::<ManagedObjectHeader>()
281 .write(ManagedObjectHeader::Free);
282 Some(Self::Exclusive {
283 layout,
284 memory,
285 padding: padding as u8,
286 })
287 }
288 }
289 }
290
291 fn alloc_uninitialized(
292 &mut self,
293 id: usize,
294 page: usize,
295 type_hash: TypeHash,
296 layout: Layout,
297 finalizer: unsafe fn(*mut ()),
298 ) -> Option<DynamicManagedBox> {
299 let header_layout = Layout::new::<ManagedObjectHeader>().pad_to_align();
300 match self {
301 ManagedMemoryPage::Chunked {
302 memory,
303 occupancy,
304 padding,
305 ..
306 } => unsafe {
307 let range = occupancy.find_free_space(
308 header_layout.size() + layout.size(),
309 OccupancyRange::default(),
310 )?;
311 let memory = memory.add(*padding as usize + range.byte_offset());
312 let padding = pointer_alignment_padding(memory, layout.align());
313 if header_layout.size() + layout.size() - padding > range.byte_size() {
314 return None;
315 }
316 occupancy.occuppy(range);
317 *memory.cast::<ManagedObjectHeader>().as_mut().unwrap() =
318 ManagedObjectHeader::Occupied {
319 id,
320 type_hash,
321 lifetime: Default::default(),
322 layout,
323 finalizer,
324 instances_count: 1,
325 padding: padding as u8,
326 };
327 Some(DynamicManagedBox {
328 memory,
329 id,
330 page,
331 drop: true,
332 })
333 },
334 ManagedMemoryPage::Exclusive {
335 memory, padding, ..
336 } => unsafe {
337 let memory = memory.add(*padding as usize);
338 let padding = pointer_alignment_padding(memory, layout.align());
339 *memory.cast::<ManagedObjectHeader>().as_mut().unwrap() =
340 ManagedObjectHeader::Occupied {
341 id,
342 type_hash,
343 lifetime: Default::default(),
344 layout,
345 finalizer,
346 instances_count: 1,
347 padding: padding as u8,
348 };
349 Some(DynamicManagedBox {
350 memory,
351 id,
352 page,
353 drop: true,
354 })
355 },
356 }
357 }
358
359 fn owns_pointer(&self, pointer: *const u8) -> bool {
360 let (from, to) = unsafe {
361 match self {
362 ManagedMemoryPage::Chunked { memory, layout, .. }
363 | ManagedMemoryPage::Exclusive { memory, layout, .. } => {
364 (*memory, memory.add(layout.size()))
365 }
366 }
367 };
368 pointer >= from && pointer < to
369 }
370
371 fn total_size(&self) -> usize {
372 match self {
373 ManagedMemoryPage::Chunked { layout, .. }
374 | ManagedMemoryPage::Exclusive { layout, .. } => layout.size(),
375 }
376 }
377
378 fn occupied_size(&self) -> usize {
379 match self {
380 ManagedMemoryPage::Chunked { occupancy, .. } => {
381 occupancy.mask.count_ones() as usize * MEMORY_CHUNK_SIZE
382 }
383 ManagedMemoryPage::Exclusive { layout, .. } => layout.size(),
384 }
385 }
386
387 fn free_size(&self) -> usize {
388 match self {
389 ManagedMemoryPage::Chunked { occupancy, .. } => {
390 occupancy.mask.count_zeros() as usize * MEMORY_CHUNK_SIZE
391 }
392 ManagedMemoryPage::Exclusive { .. } => 0,
393 }
394 }
395}
396
397#[derive(Debug, Default, Clone, PartialEq, Eq)]
398pub struct ManagedStorageStats {
399 pub pages_count: usize,
400 pub chunked_pages_count: usize,
401 pub exclusive_pages_count: usize,
402 pub total_size: usize,
403 pub occupied_size: usize,
404 pub free_size: usize,
405}
406
407#[derive(Default)]
408struct ManagedStorage {
409 object_id_generator: usize,
410 page_id_generator: usize,
411 pages: HashMap<usize, ManagedMemoryPage>,
412}
413
414impl ManagedStorage {
415 fn stats(&self) -> ManagedStorageStats {
416 ManagedStorageStats {
417 pages_count: self.pages.len(),
418 chunked_pages_count: self
419 .pages
420 .values()
421 .filter(|page| matches!(page, ManagedMemoryPage::Chunked { .. }))
422 .count(),
423 exclusive_pages_count: self
424 .pages
425 .values()
426 .filter(|page| matches!(page, ManagedMemoryPage::Exclusive { .. }))
427 .count(),
428 total_size: self.pages.values().map(|page| page.total_size()).sum(),
429 occupied_size: self.pages.values().map(|page| page.occupied_size()).sum(),
430 free_size: self.pages.values().map(|page| page.free_size()).sum(),
431 }
432 }
433
434 fn generate_object_id(&mut self) -> usize {
435 let result = self.object_id_generator;
436 self.object_id_generator = self.object_id_generator.wrapping_add(1);
437 result
438 }
439
440 fn generate_page_id(&mut self) -> usize {
441 let result = self.page_id_generator;
442 self.page_id_generator = self.page_id_generator.wrapping_add(1);
443 result
444 }
445
446 fn alloc_uninitialized(
447 &mut self,
448 type_hash: TypeHash,
449 layout: Layout,
450 finalizer: unsafe fn(*mut ()),
451 ) -> DynamicManagedBox {
452 let id = self.generate_object_id();
453 let size = layout.size() + Layout::new::<ManagedObjectHeader>().size();
454 if size > MEMORY_PAGE_SIZE {
455 let page_id = self.generate_page_id();
456 let mut page = ManagedMemoryPage::new_exclusive(size, layout.align()).unwrap();
457 let object = page
458 .alloc_uninitialized(id, page_id, type_hash, layout, finalizer)
459 .unwrap();
460 self.pages.insert(page_id, page);
461 object
462 } else {
463 for (page_id, page) in &mut self.pages {
464 if matches!(page, ManagedMemoryPage::Chunked { .. }) {
465 if let Some(object) =
466 page.alloc_uninitialized(id, *page_id, type_hash, layout, finalizer)
467 {
468 return object;
469 }
470 }
471 }
472 let page_id = self.generate_page_id();
473 let mut page = ManagedMemoryPage::new_chunked().unwrap();
474 let object = page
475 .alloc_uninitialized(id, page_id, type_hash, layout, finalizer)
476 .unwrap();
477 self.pages.insert(page_id, page);
478 object
479 }
480 }
481
482 fn increment(&mut self, object_id: usize, page_id: usize, pointer: *mut u8) {
483 if let Some(page) = self.pages.get(&page_id) {
484 if page.owns_pointer(pointer) {
485 unsafe {
486 let header = pointer.cast::<ManagedObjectHeader>().as_mut().unwrap();
487 if let ManagedObjectHeader::Occupied {
488 id,
489 instances_count,
490 ..
491 } = header
492 {
493 if object_id == *id {
494 *instances_count += 1;
495 }
496 }
497 }
498 }
499 }
500 }
501
502 fn decrement(&mut self, object_id: usize, page_id: usize, pointer: *mut u8) {
503 if let Some(page) = self.pages.get_mut(&page_id) {
504 if page.owns_pointer(pointer) {
505 let header_size = Layout::new::<ManagedObjectHeader>().pad_to_align().size();
506 unsafe {
507 let header = pointer.cast::<ManagedObjectHeader>().as_mut().unwrap();
508 if let ManagedObjectHeader::Occupied {
509 id,
510 layout,
511 finalizer,
512 instances_count,
513 padding,
514 ..
515 } = header
516 {
517 if object_id == *id && *instances_count > 0 {
518 *instances_count -= 1;
519 if *instances_count == 0 {
520 (finalizer)(
521 pointer.add(header_size + *padding as usize).cast::<()>(),
522 );
523 match page {
524 ManagedMemoryPage::Chunked {
525 memory,
526 occupancy,
527 padding,
528 ..
529 } => {
530 let range = OccupancyRange::from_pointer_size(
531 memory.add(*padding as usize),
532 pointer,
533 header_size + layout.size(),
534 );
535 occupancy.free(range);
536 *header = ManagedObjectHeader::Free;
537 for index in range.range().skip(1) {
538 memory
539 .add(*padding as usize + index * MEMORY_CHUNK_SIZE)
540 .cast::<ManagedObjectHeader>()
541 .write(ManagedObjectHeader::Free);
542 }
543 if occupancy.is_free(OccupancyRange::default()) {
544 self.pages.remove(&page_id);
545 }
546 }
547 ManagedMemoryPage::Exclusive { .. } => {
548 *header = ManagedObjectHeader::Free;
549 self.pages.remove(&page_id);
550 }
551 }
552 }
553 }
554 }
555 }
556 }
557 }
558 }
559
560 fn access_object_lifetime_type<T>(
561 &self,
562 pointer: *mut u8,
563 object_id: usize,
564 page_id: usize,
565 type_check: bool,
566 ) -> Option<(*mut T, *mut Lifetime, TypeHash)> {
567 if let Some(page) = self.pages.get(&page_id) {
568 if page.owns_pointer(pointer) {
569 let header_size = Layout::new::<ManagedObjectHeader>().pad_to_align().size();
570 let header = unsafe { pointer.cast::<ManagedObjectHeader>().as_mut().unwrap() };
571 if let ManagedObjectHeader::Occupied {
572 id,
573 type_hash,
574 lifetime,
575 instances_count,
576 padding,
577 ..
578 } = header
579 {
580 if object_id == *id
581 && *instances_count > 0
582 && (!type_check || *type_hash == TypeHash::of::<T>())
583 {
584 return Some((
585 unsafe { pointer.add(header_size + *padding as usize).cast::<T>() },
586 lifetime,
587 *type_hash,
588 ));
589 }
590 }
591 }
592 }
593 None
594 }
595
596 fn object_type_hash(
597 &self,
598 pointer: *mut u8,
599 object_id: usize,
600 page_id: usize,
601 ) -> Option<TypeHash> {
602 if let Some(page) = self.pages.get(&page_id) {
603 if page.owns_pointer(pointer) {
604 let header = unsafe { pointer.cast::<ManagedObjectHeader>().as_mut().unwrap() };
605 if let ManagedObjectHeader::Occupied {
606 id,
607 type_hash,
608 instances_count,
609 ..
610 } = header
611 {
612 if object_id == *id && *instances_count > 0 {
613 return Some(*type_hash);
614 }
615 }
616 }
617 }
618 None
619 }
620
621 fn object_layout_with_offset(
622 &self,
623 pointer: *mut u8,
624 object_id: usize,
625 page_id: usize,
626 ) -> Option<(Layout, usize)> {
627 if let Some(page) = self.pages.get(&page_id) {
628 if page.owns_pointer(pointer) {
629 let header_size = Layout::new::<ManagedObjectHeader>().pad_to_align().size();
630 let header = unsafe { pointer.cast::<ManagedObjectHeader>().as_mut().unwrap() };
631 if let ManagedObjectHeader::Occupied {
632 id,
633 layout,
634 instances_count,
635 padding,
636 ..
637 } = header
638 {
639 if object_id == *id && *instances_count > 0 {
640 return Some((*layout, header_size + *padding as usize));
641 }
642 }
643 }
644 }
645 None
646 }
647
648 fn object_instances_count(&self, pointer: *mut u8, object_id: usize, page_id: usize) -> usize {
649 if let Some(page) = self.pages.get(&page_id) {
650 if page.owns_pointer(pointer) {
651 let header = unsafe { pointer.cast::<ManagedObjectHeader>().as_mut().unwrap() };
652 if let ManagedObjectHeader::Occupied {
653 id,
654 instances_count,
655 ..
656 } = header
657 {
658 if object_id == *id {
659 return *instances_count;
660 }
661 }
662 }
663 }
664 0
665 }
666}
667
668pub struct ManagedBox<T> {
669 memory: *mut T,
670 id: usize,
671 page: usize,
672 drop: bool,
673}
674
675impl<T: Default> Default for ManagedBox<T> {
676 fn default() -> Self {
677 Self::new(T::default())
678 }
679}
680
681impl<T> Drop for ManagedBox<T> {
682 fn drop(&mut self) {
683 if self.drop {
684 STORAGE.with_borrow_mut(|storage| {
685 storage.decrement(self.id, self.page, self.memory.cast());
686 })
687 }
688 }
689}
690
691impl<T> ManagedBox<T> {
692 pub fn new(value: T) -> Self
693 where
694 T: Finalize,
695 {
696 let mut result = DynamicManagedBox::new(value);
697 result.drop = false;
698 Self {
699 memory: result.memory.cast(),
700 id: result.id,
701 page: result.page,
702 drop: true,
703 }
704 }
705
706 pub fn into_dynamic(mut self) -> DynamicManagedBox {
707 self.drop = false;
708 DynamicManagedBox {
709 memory: self.memory.cast(),
710 id: self.id,
711 page: self.page,
712 drop: true,
713 }
714 }
715
716 pub fn instances_count(&self) -> usize {
717 STORAGE.with_borrow(|storage| {
718 storage.object_instances_count(self.memory.cast(), self.id, self.page)
719 })
720 }
721
722 pub fn does_share_reference(&self, other: &Self) -> bool {
723 self.id == other.id && self.page == other.page && self.memory == other.memory
724 }
725
726 pub fn type_hash(&self) -> Option<TypeHash> {
727 STORAGE
728 .with_borrow(|storage| storage.object_type_hash(self.memory.cast(), self.id, self.page))
729 }
730
731 pub fn lifetime_borrow(&self) -> Option<LifetimeRef> {
732 STORAGE.with_borrow(|storage| {
733 let (_, lifetime, _) = storage.access_object_lifetime_type::<u8>(
734 self.memory.cast(),
735 self.id,
736 self.page,
737 false,
738 )?;
739 unsafe { lifetime.as_ref()?.borrow() }
740 })
741 }
742
743 pub fn lifetime_borrow_mut(&self) -> Option<LifetimeRefMut> {
744 STORAGE.with_borrow(|storage| {
745 let (_, lifetime, _) = storage.access_object_lifetime_type::<u8>(
746 self.memory.cast(),
747 self.id,
748 self.page,
749 false,
750 )?;
751 unsafe { lifetime.as_ref()?.borrow_mut() }
752 })
753 }
754
755 pub fn lifetime_lazy(&self) -> Option<LifetimeLazy> {
756 STORAGE.with_borrow(|storage| {
757 let (_, lifetime, _) = storage.access_object_lifetime_type::<u8>(
758 self.memory.cast(),
759 self.id,
760 self.page,
761 false,
762 )?;
763 unsafe { Some(lifetime.as_ref()?.lazy()) }
764 })
765 }
766
767 pub fn read(&self) -> Option<ValueReadAccess<T>> {
768 STORAGE.with_borrow(|storage| {
769 let (pointer, lifetime, _) = storage.access_object_lifetime_type::<T>(
770 self.memory.cast(),
771 self.id,
772 self.page,
773 true,
774 )?;
775 unsafe { lifetime.as_ref()?.read_ptr(pointer) }
776 })
777 }
778
779 pub async fn read_async(&self) -> ValueReadAccess<T> {
780 loop {
781 if let Some(access) = self.read() {
782 return access;
783 }
784 poll_fn(|cx| {
785 cx.waker().wake_by_ref();
786 Poll::<ValueReadAccess<T>>::Pending
787 })
788 .await;
789 }
790 }
791
792 pub fn write(&mut self) -> Option<ValueWriteAccess<T>> {
793 STORAGE.with_borrow(|storage| {
794 let (pointer, lifetime, _) = storage.access_object_lifetime_type::<T>(
795 self.memory.cast(),
796 self.id,
797 self.page,
798 true,
799 )?;
800 unsafe { lifetime.as_mut()?.write_ptr(pointer) }
801 })
802 }
803
804 pub async fn write_async(&mut self) -> ValueWriteAccess<T> {
805 loop {
806 let result = STORAGE.with_borrow(|storage| {
807 let (pointer, lifetime, _) = storage.access_object_lifetime_type::<T>(
808 self.memory.cast(),
809 self.id,
810 self.page,
811 true,
812 )?;
813 unsafe { lifetime.as_mut()?.write_ptr(pointer) }
814 });
815 if let Some(access) = result {
816 return access;
817 }
818 poll_fn(|cx| {
819 cx.waker().wake_by_ref();
820 Poll::<ValueWriteAccess<T>>::Pending
821 })
822 .await;
823 }
824 }
825
826 pub fn borrow(&self) -> Option<ManagedRef<T>> {
827 STORAGE.with_borrow(|storage| {
828 let (pointer, lifetime, _) = storage.access_object_lifetime_type::<T>(
829 self.memory.cast(),
830 self.id,
831 self.page,
832 true,
833 )?;
834 unsafe { ManagedRef::new_raw(pointer, lifetime.as_ref()?.borrow()?) }
835 })
836 }
837
838 pub async fn borrow_async(&self) -> ManagedRef<T> {
839 loop {
840 if let Some(access) = self.borrow() {
841 return access;
842 }
843 poll_fn(|cx| {
844 cx.waker().wake_by_ref();
845 Poll::<ManagedRef<T>>::Pending
846 })
847 .await;
848 }
849 }
850
851 pub fn borrow_mut(&mut self) -> Option<ManagedRefMut<T>> {
852 STORAGE.with_borrow(|storage| {
853 let (pointer, lifetime, _) = storage.access_object_lifetime_type::<T>(
854 self.memory.cast(),
855 self.id,
856 self.page,
857 true,
858 )?;
859 unsafe { ManagedRefMut::new_raw(pointer, lifetime.as_mut()?.borrow_mut()?) }
860 })
861 }
862
863 pub async fn borrow_mut_async(&mut self) -> ManagedRefMut<T> {
864 loop {
865 if let Some(access) = self.borrow_mut() {
866 return access;
867 }
868 poll_fn(|cx| {
869 cx.waker().wake_by_ref();
870 Poll::<ManagedRefMut<T>>::Pending
871 })
872 .await;
873 }
874 }
875
876 pub fn lazy(&self) -> Option<ManagedLazy<T>> {
877 STORAGE.with_borrow(|storage| {
878 let (pointer, lifetime, _) = storage.access_object_lifetime_type::<T>(
879 self.memory.cast(),
880 self.id,
881 self.page,
882 true,
883 )?;
884 unsafe { ManagedLazy::new_raw(pointer, lifetime.as_mut().unwrap().lazy()) }
885 })
886 }
887
888 pub unsafe fn as_ptr(&self) -> Option<*const T> {
890 STORAGE.with_borrow(|storage| {
891 let (pointer, _, _) = storage.access_object_lifetime_type::<T>(
892 self.memory.cast(),
893 self.id,
894 self.page,
895 true,
896 )?;
897 Some(pointer.cast_const())
898 })
899 }
900
901 pub unsafe fn as_ptr_mut(&mut self) -> Option<*mut T> {
903 STORAGE.with_borrow(|storage| {
904 let (pointer, _, _) = storage.access_object_lifetime_type::<T>(
905 self.memory.cast(),
906 self.id,
907 self.page,
908 true,
909 )?;
910 Some(pointer)
911 })
912 }
913
914 pub unsafe fn as_ptr_raw(&self) -> Option<*const u8> {
916 STORAGE.with_borrow(|storage| {
917 let (pointer, _, _) = storage.access_object_lifetime_type::<T>(
918 self.memory.cast(),
919 self.id,
920 self.page,
921 false,
922 )?;
923 Some(pointer.cast_const().cast())
924 })
925 }
926
927 pub unsafe fn as_mut_ptr_raw(&mut self) -> Option<*mut u8> {
929 STORAGE.with_borrow(|storage| {
930 let (pointer, _, _) = storage.access_object_lifetime_type::<T>(
931 self.memory.cast(),
932 self.id,
933 self.page,
934 false,
935 )?;
936 Some(pointer.cast())
937 })
938 }
939}
940
941impl<T> Clone for ManagedBox<T> {
942 fn clone(&self) -> Self {
943 STORAGE.with_borrow_mut(|storage| {
944 storage.increment(self.id, self.page, self.memory.cast());
945 Self {
946 memory: self.memory,
947 id: self.id,
948 page: self.page,
949 drop: true,
950 }
951 })
952 }
953}
954
955pub struct DynamicManagedBox {
956 memory: *mut u8,
957 id: usize,
958 page: usize,
959 drop: bool,
960}
961
962impl Drop for DynamicManagedBox {
963 fn drop(&mut self) {
964 if self.drop {
965 STORAGE.with_borrow_mut(|storage| {
966 storage.decrement(self.id, self.page, self.memory);
967 })
968 }
969 }
970}
971
972impl DynamicManagedBox {
973 pub fn new<T: Finalize>(value: T) -> Self {
974 unsafe {
975 let mut result =
976 Self::new_uninitialized(TypeHash::of::<T>(), Layout::new::<T>(), T::finalize_raw);
977 result.as_ptr_mut::<T>().unwrap().write(value);
978 result
979 }
980 }
981
982 pub fn new_uninitialized(
983 type_hash: TypeHash,
984 layout: Layout,
985 finalizer: unsafe fn(*mut ()),
986 ) -> Self {
987 STORAGE.with_borrow_mut(|storage| {
988 storage.alloc_uninitialized(type_hash, layout.pad_to_align(), finalizer)
989 })
990 }
991
992 pub fn into_typed<T>(mut self) -> Result<ManagedBox<T>, Self> {
993 if self.is::<T>() {
994 self.drop = false;
995 Ok(ManagedBox {
996 memory: self.memory.cast(),
997 id: self.id,
998 page: self.page,
999 drop: true,
1000 })
1001 } else {
1002 Err(self)
1003 }
1004 }
1005
1006 pub fn instances_count(&self) -> usize {
1007 STORAGE
1008 .with_borrow(|storage| storage.object_instances_count(self.memory, self.id, self.page))
1009 }
1010
1011 pub fn does_share_reference(&self, other: &Self) -> bool {
1012 self.id == other.id && self.page == other.page && self.memory == other.memory
1013 }
1014
1015 pub fn type_hash(&self) -> Option<TypeHash> {
1016 STORAGE.with_borrow(|storage| storage.object_type_hash(self.memory, self.id, self.page))
1017 }
1018
1019 pub fn lifetime_borrow(&self) -> Option<LifetimeRef> {
1020 STORAGE.with_borrow(|storage| {
1021 let (_, lifetime, _) = storage.access_object_lifetime_type::<u8>(
1022 self.memory.cast(),
1023 self.id,
1024 self.page,
1025 false,
1026 )?;
1027 unsafe { lifetime.as_ref()?.borrow() }
1028 })
1029 }
1030
1031 pub fn lifetime_borrow_mut(&self) -> Option<LifetimeRefMut> {
1032 STORAGE.with_borrow(|storage| {
1033 let (_, lifetime, _) = storage.access_object_lifetime_type::<u8>(
1034 self.memory.cast(),
1035 self.id,
1036 self.page,
1037 false,
1038 )?;
1039 unsafe { lifetime.as_ref()?.borrow_mut() }
1040 })
1041 }
1042
1043 pub fn lifetime_lazy(&self) -> Option<LifetimeLazy> {
1044 STORAGE.with_borrow(|storage| {
1045 let (_, lifetime, _) = storage.access_object_lifetime_type::<u8>(
1046 self.memory.cast(),
1047 self.id,
1048 self.page,
1049 false,
1050 )?;
1051 unsafe { Some(lifetime.as_ref()?.lazy()) }
1052 })
1053 }
1054
1055 pub fn is<T>(&self) -> bool {
1056 STORAGE.with_borrow(|storage| {
1057 storage
1058 .access_object_lifetime_type::<T>(self.memory, self.id, self.page, true)
1059 .is_some()
1060 })
1061 }
1062
1063 pub fn borrow(&self) -> Option<DynamicManagedRef> {
1064 STORAGE.with_borrow(|storage| {
1065 let (pointer, lifetime, type_hash) = storage.access_object_lifetime_type::<u8>(
1066 self.memory,
1067 self.id,
1068 self.page,
1069 false,
1070 )?;
1071 unsafe { DynamicManagedRef::new_raw(type_hash, lifetime.as_ref()?.borrow()?, pointer) }
1072 })
1073 }
1074
1075 pub async fn borrow_async(&self) -> DynamicManagedRef {
1076 loop {
1077 if let Some(access) = self.borrow() {
1078 return access;
1079 }
1080 poll_fn(|cx| {
1081 cx.waker().wake_by_ref();
1082 Poll::<DynamicManagedRef>::Pending
1083 })
1084 .await;
1085 }
1086 }
1087
1088 pub fn borrow_mut(&mut self) -> Option<DynamicManagedRefMut> {
1089 STORAGE.with_borrow(|storage| {
1090 let (pointer, lifetime, type_hash) = storage.access_object_lifetime_type::<u8>(
1091 self.memory,
1092 self.id,
1093 self.page,
1094 false,
1095 )?;
1096 unsafe {
1097 DynamicManagedRefMut::new_raw(type_hash, lifetime.as_mut()?.borrow_mut()?, pointer)
1098 }
1099 })
1100 }
1101
1102 pub async fn borrow_mut_async(&mut self) -> DynamicManagedRefMut {
1103 loop {
1104 if let Some(access) = self.borrow_mut() {
1105 return access;
1106 }
1107 poll_fn(|cx| {
1108 cx.waker().wake_by_ref();
1109 Poll::<DynamicManagedRefMut>::Pending
1110 })
1111 .await;
1112 }
1113 }
1114
1115 pub fn lazy(&self) -> Option<DynamicManagedLazy> {
1116 STORAGE.with_borrow(|storage| {
1117 let (pointer, lifetime, type_hash) = storage.access_object_lifetime_type::<u8>(
1118 self.memory,
1119 self.id,
1120 self.page,
1121 false,
1122 )?;
1123 unsafe {
1124 DynamicManagedLazy::new_raw(type_hash, lifetime.as_mut().unwrap().lazy(), pointer)
1125 }
1126 })
1127 }
1128
1129 pub fn read<T>(&self) -> Option<ValueReadAccess<T>> {
1130 STORAGE.with_borrow(|storage| {
1131 let (pointer, lifetime, _) =
1132 storage.access_object_lifetime_type::<T>(self.memory, self.id, self.page, true)?;
1133 unsafe { lifetime.as_ref()?.read_ptr(pointer) }
1134 })
1135 }
1136
1137 pub async fn read_async<'a, T: 'a>(&'a self) -> ValueReadAccess<'a, T> {
1138 loop {
1139 if let Some(access) = self.read() {
1140 return access;
1141 }
1142 poll_fn(|cx| {
1143 cx.waker().wake_by_ref();
1144 Poll::<ValueReadAccess<T>>::Pending
1145 })
1146 .await;
1147 }
1148 }
1149
1150 pub fn write<T>(&mut self) -> Option<ValueWriteAccess<T>> {
1151 STORAGE.with_borrow(|storage| {
1152 let (pointer, lifetime, _) =
1153 storage.access_object_lifetime_type::<T>(self.memory, self.id, self.page, true)?;
1154 unsafe { lifetime.as_mut()?.write_ptr(pointer) }
1155 })
1156 }
1157
1158 pub async fn write_async<'a, T: 'a>(&'a mut self) -> ValueWriteAccess<'a, T> {
1159 loop {
1160 let result = STORAGE.with_borrow(|storage| {
1161 let (pointer, lifetime, _) = storage.access_object_lifetime_type::<T>(
1162 self.memory,
1163 self.id,
1164 self.page,
1165 true,
1166 )?;
1167 unsafe { lifetime.as_mut()?.write_ptr(pointer) }
1168 });
1169 if let Some(access) = result {
1170 return access;
1171 }
1172 poll_fn(|cx| {
1173 cx.waker().wake_by_ref();
1174 Poll::<ValueWriteAccess<T>>::Pending
1175 })
1176 .await;
1177 }
1178 }
1179
1180 pub unsafe fn memory(&self) -> Option<&[u8]> {
1182 STORAGE.with_borrow(|storage| {
1183 storage
1184 .object_layout_with_offset(self.memory, self.id, self.page)
1185 .map(|(layout, offset)| unsafe {
1186 std::slice::from_raw_parts(self.memory.add(offset), layout.size())
1187 })
1188 })
1189 }
1190
1191 pub unsafe fn memory_mut(&mut self) -> Option<&mut [u8]> {
1193 STORAGE.with_borrow(|storage| {
1194 storage
1195 .object_layout_with_offset(self.memory, self.id, self.page)
1196 .map(|(layout, offset)| unsafe {
1197 std::slice::from_raw_parts_mut(self.memory.add(offset), layout.size())
1198 })
1199 })
1200 }
1201
1202 pub unsafe fn as_ptr<T>(&self) -> Option<*const T> {
1204 STORAGE.with_borrow(|storage| {
1205 let (pointer, _, _) =
1206 storage.access_object_lifetime_type::<T>(self.memory, self.id, self.page, true)?;
1207 Some(pointer.cast_const().cast())
1208 })
1209 }
1210
1211 pub unsafe fn as_ptr_mut<T>(&mut self) -> Option<*mut T> {
1213 STORAGE.with_borrow(|storage| {
1214 let (pointer, _, _) =
1215 storage.access_object_lifetime_type::<T>(self.memory, self.id, self.page, true)?;
1216 Some(pointer.cast())
1217 })
1218 }
1219
1220 pub unsafe fn as_ptr_raw(&self) -> Option<*const u8> {
1222 STORAGE.with_borrow(|storage| {
1223 let (pointer, _, _) = storage.access_object_lifetime_type::<u8>(
1224 self.memory,
1225 self.id,
1226 self.page,
1227 false,
1228 )?;
1229 Some(pointer.cast_const())
1230 })
1231 }
1232
1233 pub unsafe fn as_mut_ptr_raw(&mut self) -> Option<*mut u8> {
1235 STORAGE.with_borrow(|storage| {
1236 let (pointer, _, _) = storage.access_object_lifetime_type::<u8>(
1237 self.memory,
1238 self.id,
1239 self.page,
1240 false,
1241 )?;
1242 Some(pointer)
1243 })
1244 }
1245}
1246
1247impl Clone for DynamicManagedBox {
1248 fn clone(&self) -> Self {
1249 STORAGE.with_borrow_mut(|storage| {
1250 storage.increment(self.id, self.page, self.memory);
1251 Self {
1252 memory: self.memory,
1253 id: self.id,
1254 page: self.page,
1255 drop: true,
1256 }
1257 })
1258 }
1259}
1260
1261#[cfg(test)]
1262mod tests {
1263 use super::*;
1264
1265 #[test]
1266 fn test_occupancy_range() {
1267 let v = OccupancyRange {
1268 bits_start_inclusive: 0,
1269 bits_end_exclusive: 128,
1270 ..Default::default()
1271 }
1272 .update_mask();
1273 assert_eq!(v.mask, 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF);
1274 assert_eq!(v.bits_start_inclusive..v.bits_end_exclusive, 0..128);
1275 assert_eq!(v.byte_size(), MEMORY_PAGE_SIZE);
1276
1277 let v = OccupancyRange::default();
1278 assert_eq!(v.mask, 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF);
1279 assert_eq!(v.bits_start_inclusive..v.bits_end_exclusive, 0..128);
1280 assert_eq!(v.byte_size(), MEMORY_PAGE_SIZE);
1281
1282 let v = v.split().unwrap().0;
1283 assert_eq!(v.mask, 0x0000000000000000FFFFFFFFFFFFFFFF);
1284 assert_eq!(v.bits_start_inclusive..v.bits_end_exclusive, 0..64);
1285 assert_eq!(v.byte_size(), MEMORY_PAGE_SIZE / 2);
1286
1287 let v = v.split().unwrap().1;
1288 assert_eq!(v.mask, 0x0000000000000000FFFFFFFF00000000);
1289 assert_eq!(v.bits_start_inclusive..v.bits_end_exclusive, 32..64);
1290 assert_eq!(v.byte_size(), MEMORY_PAGE_SIZE / 4);
1291
1292 let v = v.split().unwrap().0;
1293 assert_eq!(v.mask, 0x00000000000000000000FFFF00000000);
1294 assert_eq!(v.bits_start_inclusive..v.bits_end_exclusive, 32..48);
1295 assert_eq!(v.byte_size(), MEMORY_PAGE_SIZE / 8);
1296
1297 let v = v.split().unwrap().1;
1298 assert_eq!(v.mask, 0x00000000000000000000FF0000000000);
1299 assert_eq!(v.bits_start_inclusive..v.bits_end_exclusive, 40..48);
1300 assert_eq!(v.byte_size(), MEMORY_PAGE_SIZE / 16);
1301
1302 let v = v.split().unwrap().0;
1303 assert_eq!(v.mask, 0x000000000000000000000F0000000000);
1304 assert_eq!(v.bits_start_inclusive..v.bits_end_exclusive, 40..44);
1305 assert_eq!(v.byte_size(), MEMORY_PAGE_SIZE / 32);
1306
1307 let v = v.split().unwrap().1;
1308 assert_eq!(
1309 v.mask,
1310 0b0000000000000000000011000000000000000000000000000000000000000000
1311 );
1312 assert_eq!(v.bits_start_inclusive..v.bits_end_exclusive, 42..44);
1313 assert_eq!(v.byte_size(), MEMORY_PAGE_SIZE / 64);
1314
1315 let v = v.split().unwrap().0;
1316 assert_eq!(
1317 v.mask,
1318 0b0000000000000000000001000000000000000000000000000000000000000000
1319 );
1320 assert_eq!(v.bits_start_inclusive..v.bits_end_exclusive, 42..43);
1321 assert_eq!(v.byte_size(), MEMORY_PAGE_SIZE / 128);
1322
1323 assert!(v.split().is_none());
1324 }
1325
1326 #[test]
1327 fn test_occupancy_map() {
1328 let header_size = Layout::new::<ManagedObjectHeader>().pad_to_align().size();
1329 let mut map = OccupancyMap::default();
1330
1331 let range = map
1332 .find_free_space(
1333 std::mem::size_of::<f32>() + header_size,
1334 OccupancyRange::default(),
1335 )
1336 .unwrap();
1337 map.occuppy(range);
1338 assert_eq!(range.bits_start_inclusive..range.bits_end_exclusive, 0..1);
1339
1340 let range = map
1341 .find_free_space(
1342 std::mem::size_of::<u8>() + header_size,
1343 OccupancyRange::default(),
1344 )
1345 .unwrap();
1346 map.occuppy(range);
1347 assert_eq!(range.bits_start_inclusive..range.bits_end_exclusive, 1..2);
1348 }
1349
1350 #[test]
1351 fn test_managed_box() {
1352 assert_eq!(managed_storage_stats(), ManagedStorageStats::default());
1353 let a = ManagedBox::new(42usize);
1354 assert_eq!(
1355 managed_storage_stats(),
1356 ManagedStorageStats {
1357 pages_count: 1,
1358 chunked_pages_count: 1,
1359 total_size: 16392,
1360 occupied_size: 128,
1361 free_size: 16256,
1362 ..Default::default()
1363 }
1364 );
1365 assert_eq!(*a.read().unwrap(), 42);
1366 assert_eq!(a.instances_count(), 1);
1367 let mut b = a.clone();
1368 assert_eq!(
1369 managed_storage_stats(),
1370 ManagedStorageStats {
1371 pages_count: 1,
1372 chunked_pages_count: 1,
1373 total_size: 16392,
1374 occupied_size: 128,
1375 free_size: 16256,
1376 ..Default::default()
1377 }
1378 );
1379 assert_eq!(a.instances_count(), 2);
1380 assert_eq!(b.instances_count(), 2);
1381 assert!(a.does_share_reference(&b));
1382 assert_eq!(*b.read().unwrap(), 42);
1383 *b.write().unwrap() = 10;
1384 assert_eq!(*a.read().unwrap(), 10);
1385 assert_eq!(*b.read().unwrap(), 10);
1386 drop(a);
1387 assert_eq!(
1388 managed_storage_stats(),
1389 ManagedStorageStats {
1390 pages_count: 1,
1391 chunked_pages_count: 1,
1392 total_size: 16392,
1393 occupied_size: 128,
1394 free_size: 16256,
1395 ..Default::default()
1396 }
1397 );
1398 assert_eq!(b.instances_count(), 1);
1399 drop(b);
1400 assert_eq!(managed_storage_stats(), ManagedStorageStats::default());
1401 }
1402
1403 #[test]
1404 fn test_dynamic_managed_box() {
1405 assert_eq!(managed_storage_stats(), ManagedStorageStats::default());
1406 let a = DynamicManagedBox::new(42usize);
1407 assert_eq!(
1408 managed_storage_stats(),
1409 ManagedStorageStats {
1410 pages_count: 1,
1411 chunked_pages_count: 1,
1412 total_size: 16392,
1413 occupied_size: 128,
1414 free_size: 16256,
1415 ..Default::default()
1416 }
1417 );
1418 assert!(a.is::<usize>());
1419 assert_eq!(*a.read::<usize>().unwrap(), 42);
1420 assert_eq!(a.instances_count(), 1);
1421 let mut b = a.clone();
1422 assert_eq!(
1423 managed_storage_stats(),
1424 ManagedStorageStats {
1425 pages_count: 1,
1426 chunked_pages_count: 1,
1427 total_size: 16392,
1428 occupied_size: 128,
1429 free_size: 16256,
1430 ..Default::default()
1431 }
1432 );
1433 assert!(b.is::<usize>());
1434 assert_eq!(a.instances_count(), 2);
1435 assert_eq!(b.instances_count(), 2);
1436 assert!(a.does_share_reference(&b));
1437 assert_eq!(*b.read::<usize>().unwrap(), 42);
1438 *b.write::<usize>().unwrap() = 10;
1439 assert_eq!(*a.read::<usize>().unwrap(), 10);
1440 assert_eq!(*b.read::<usize>().unwrap(), 10);
1441 drop(a);
1442 assert_eq!(
1443 managed_storage_stats(),
1444 ManagedStorageStats {
1445 pages_count: 1,
1446 chunked_pages_count: 1,
1447 total_size: 16392,
1448 occupied_size: 128,
1449 free_size: 16256,
1450 ..Default::default()
1451 }
1452 );
1453 assert_eq!(b.instances_count(), 1);
1454 drop(b);
1455 assert_eq!(managed_storage_stats(), ManagedStorageStats::default());
1456 }
1457
1458 #[test]
1459 fn test_growing_allocations() {
1460 assert_eq!(managed_storage_stats(), ManagedStorageStats::default());
1461 let a = ManagedBox::<[u64; 10]>::new(std::array::from_fn(|index| index as _));
1462 assert_eq!(
1463 managed_storage_stats(),
1464 ManagedStorageStats {
1465 pages_count: 1,
1466 chunked_pages_count: 1,
1467 total_size: 16392,
1468 occupied_size: 256,
1469 free_size: 16128,
1470 ..Default::default()
1471 }
1472 );
1473 let b = ManagedBox::<[u64; 100]>::new(std::array::from_fn(|index| index as _));
1474 assert_eq!(
1475 managed_storage_stats(),
1476 ManagedStorageStats {
1477 pages_count: 1,
1478 chunked_pages_count: 1,
1479 total_size: 16392,
1480 occupied_size: 1280,
1481 free_size: 15104,
1482 ..Default::default()
1483 }
1484 );
1485 let c = ManagedBox::<[u64; 1000]>::new(std::array::from_fn(|index| index as _));
1486 assert_eq!(
1487 managed_storage_stats(),
1488 ManagedStorageStats {
1489 pages_count: 1,
1490 chunked_pages_count: 1,
1491 total_size: 16392,
1492 occupied_size: 9472,
1493 free_size: 6912,
1494 ..Default::default()
1495 }
1496 );
1497 let d = ManagedBox::<[u64; 10000]>::new(std::array::from_fn(|index| index as _));
1498 assert_eq!(
1499 managed_storage_stats(),
1500 ManagedStorageStats {
1501 pages_count: 2,
1502 chunked_pages_count: 1,
1503 exclusive_pages_count: 1,
1504 total_size: 96528,
1505 occupied_size: 89608,
1506 free_size: 6912
1507 }
1508 );
1509 drop(a);
1510 assert_eq!(
1511 managed_storage_stats(),
1512 ManagedStorageStats {
1513 pages_count: 2,
1514 chunked_pages_count: 1,
1515 exclusive_pages_count: 1,
1516 total_size: 96528,
1517 occupied_size: 89352,
1518 free_size: 7168
1519 }
1520 );
1521 drop(b);
1522 assert_eq!(
1523 managed_storage_stats(),
1524 ManagedStorageStats {
1525 pages_count: 2,
1526 chunked_pages_count: 1,
1527 exclusive_pages_count: 1,
1528 total_size: 96528,
1529 occupied_size: 88328,
1530 free_size: 8192
1531 }
1532 );
1533 drop(c);
1534 assert_eq!(
1535 managed_storage_stats(),
1536 ManagedStorageStats {
1537 pages_count: 1,
1538 chunked_pages_count: 0,
1539 exclusive_pages_count: 1,
1540 total_size: 80136,
1541 occupied_size: 80136,
1542 free_size: 0
1543 }
1544 );
1545 drop(d);
1546 assert_eq!(managed_storage_stats(), ManagedStorageStats::default());
1547 }
1548
1549 #[test]
1550 fn test_managed_box_borrows() {
1551 let v = ManagedBox::new(42usize);
1552 let r = v.borrow().unwrap();
1553 drop(v);
1554 assert!(r.read().is_none());
1555 }
1556
1557 #[test]
1558 fn test_fuzz_managed_box() {
1559 let builders = [
1560 || DynamicManagedBox::new(1u8),
1561 || DynamicManagedBox::new(2u16),
1562 || DynamicManagedBox::new(3u32),
1563 || DynamicManagedBox::new(4u64),
1564 || DynamicManagedBox::new(5u128),
1565 || DynamicManagedBox::new([42u8; 1000]),
1566 || DynamicManagedBox::new([42u8; 10000]),
1567 || DynamicManagedBox::new([42u8; 100000]),
1568 ];
1569 let mut boxes = std::array::from_fn::<_, 50, _>(|_| None);
1570 for index in 0..100 {
1571 let source = index % builders.len();
1572 let target = index % boxes.len();
1573 boxes[target] = Some((builders[source])());
1574 }
1575 }
1576}