bytesbuf 0.4.2

Types for creating and manipulating byte sequences.
Documentation
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
// Copyright (c) Microsoft Corporation.
// Licensed under the MIT License.

use std::any::Any;
use std::fmt::Debug;
use std::marker::PhantomData;
use std::ptr::NonNull;

/// Metadata describing a memory block, as provided by the memory provider.
///
/// This is a marker trait that combines [`Any`], [`Send`], [`Sync`], and [`Debug`] to ensure
/// that block metadata is thread-safe and can be downcast to a concrete type.
///
/// Implement this trait on your metadata type to make it usable as block metadata:
///
/// ```
/// use bytesbuf::mem::BlockMeta;
///
/// #[derive(Debug)]
/// struct MyMetadata {
///     page_aligned: bool,
/// }
///
/// impl BlockMeta for MyMetadata {}
/// ```
pub trait BlockMeta: Any + Send + Sync + Debug {}

// This block exists for ergonomics, so you can just `foo.is()` instead of `<T as Any>::is(foo)`.
impl dyn BlockMeta {
    /// Returns `true` if the metadata is of type `T`.
    #[must_use]
    pub fn is<T: Any>(&self) -> bool {
        <dyn Any>::is::<T>(self)
    }

    /// Attempts to downcast the metadata to a concrete type.
    #[must_use]
    pub fn downcast_ref<T: Any>(&self) -> Option<&T> {
        <dyn Any>::downcast_ref::<T>(self)
    }
}

/// References a block of memory capacity rented from a memory provider.
///
/// While a memory provider only leases each block to one caller at a time, this caller may further
/// share and subdivide the block between multiple co-owners. These co-owners will coordinate the
/// read/write permissions over different slices of the block via their own logic, with the
/// `BlockRef` only used to represent the block as a whole, each co-owner having a cloned
/// `BlockRef` to the same block.
///
/// # Implementation design
///
/// Each memory provider implements its own accounting logic for tracking the memory blocks it
/// provides. This takes the form of a "manual" dynamic dispatch implementation via a function
/// table and data pointer passed to [`new()`][1].
///
/// You can think of `BlockRef` as an `Arc<RealBlock>`, except we are intentionally obscuring the
/// `RealBlock` from the API surface to allow all code upstream of `BlockRef` to be ignorant of the
/// real type of the block.
///
/// The assumption is that an efficient memory provider will allocate its data objects in a pool,
/// so if the `BlockRef` itself is held on the stack, there are no heap allocations necessary to
/// operate on memory blocks. This would be infeasible to achieve with trait objects, which are
/// unsized and have significant limitations on how they can be used. This is why we use the
/// manual dynamic dispatch mechanism instead of using Rust's trait system.
///
/// [1]: Self::new
#[derive(Debug)]
pub struct BlockRef {
    // Note that this entire object is simply a fat pointer - there is no real state.
    // At rent-time, some state is presented to the `SpanBuilder` that takes ownership
    // of the memory block but this is not preserved in the block reference itself.
    state: OpaqueStatePtr,
    vtable: &'static BlockRefVTableInner,
}

impl BlockRef {
    /// Creates a new block reference using the provided dynamic implementation
    /// state and matching function table.
    ///
    /// # Safety
    ///
    /// `state` must remain valid for reads and writes until `BlockRefDynamic::drop`
    /// is called via `vtable`.
    #[must_use]
    pub const unsafe fn new<T: BlockRefDynamic>(state: NonNull<T::State>, vtable: &'static BlockRefVTable<T>) -> Self {
        Self {
            state: state.cast(),
            vtable: &vtable.inner,
        }
    }

    /// Memory provider specific metadata describing the block.
    #[must_use]
    pub fn meta(&self) -> Option<&dyn BlockMeta> {
        self.vtable.meta.map(|f| {
            // SAFETY: We are required to pass the original `state` here. We do.
            let meta_ptr = unsafe { f(self.state) };

            // SAFETY: The implementation is required to return a pointer that is valid for
            // reads for the lifetime of the `BlockRef`, so all is well here
            // because the returned reference borrows the `BlockRef`.
            unsafe { meta_ptr.as_ref() }
        })
    }
}

impl Clone for BlockRef {
    fn clone(&self) -> Self {
        // SAFETY: We are required to pass the original `state` here. We do.
        let new_data = unsafe { (self.vtable.clone)(self.state) };

        Self {
            state: new_data,
            vtable: self.vtable,
        }
    }
}

impl Drop for BlockRef {
    fn drop(&mut self) {
        // SAFETY: We are required to pass the original `state` here. We do.
        unsafe { (self.vtable.drop)(self.state) }
    }
}

type OpaqueStatePtr = NonNull<()>;

// # Safety
//
// These functions must always be called with the original `OpaqueStatePtr` supplied by the memory
// provider when creating the BlockRef (with clones using the clone's `OpaqueStatePtr`, respectively).
type CloneFn = unsafe fn(state: OpaqueStatePtr) -> OpaqueStatePtr;
type DropFn = unsafe fn(state: OpaqueStatePtr);
type MetaFn = unsafe fn(state: OpaqueStatePtr) -> NonNull<dyn BlockMeta>;

// SAFETY: The safety requirements of the dynamic implementation traits require thread-safety.
// The type itself consists entirely of data fields treated as read-only, so the
// thread-safety guarantee only relies on the implementation behind the trait being thread-safe.
unsafe impl Send for BlockRef {}

// SAFETY: The safety requirements of the dynamic implementation traits require thread-safety.
// The type itself consists entirely of data fields treated as read-only, so the
// thread-safety guarantee only relies on the implementation behind the trait being thread-safe.
unsafe impl Sync for BlockRef {}

#[derive(Debug)]
struct BlockRefVTableInner {
    clone: CloneFn,
    drop: DropFn,
    meta: Option<MetaFn>,
}

/// Function table that implements [`BlockRef`] for a specific memory provider.
///
/// Wraps a specific memory provider's [`BlockRefDynamic`] or [`BlockRefDynamicWithMeta`]
/// implementation into a form required to construct a [`BlockRef`].
///
/// # Examples
///
/// Create a vtable from a type implementing [`BlockRefDynamicWithMeta`]:
///
/// ```
/// # use std::ptr::NonNull;
/// # use std::sync::atomic::{AtomicUsize, Ordering};
/// # use bytesbuf::mem::{BlockMeta, BlockRefDynamic, BlockRefDynamicWithMeta};
/// use bytesbuf::mem::{BlockRef, BlockRefVTable};
/// #
/// # struct MyBlock {
/// #     ref_count: AtomicUsize,
/// #     meta: NonNull<dyn BlockMeta>,
/// # }
/// #
/// # // SAFETY: Implementation is thread-safe via atomic operations.
/// # unsafe impl BlockRefDynamic for MyBlock {
/// #     type State = Self;
/// #
/// #     fn clone(state: NonNull<Self::State>) -> NonNull<Self::State> {
/// #         unsafe { state.as_ref() }.ref_count.fetch_add(1, Ordering::Relaxed);
/// #         state
/// #     }
/// #
/// #     fn drop(state: NonNull<Self::State>) {
/// #         unsafe { state.as_ref() }.ref_count.fetch_sub(1, Ordering::Release);
/// #     }
/// # }
/// #
/// # // SAFETY: Implementation is thread-safe via atomic operations.
/// # unsafe impl BlockRefDynamicWithMeta for MyBlock {
/// #     fn meta(state: NonNull<Self::State>) -> NonNull<dyn BlockMeta> {
/// #         unsafe { state.as_ref() }.meta
/// #     }
/// # }
///
/// // Create a vtable at compile time from a BlockRefDynamicWithMeta implementor.
/// const MY_BLOCK_VTABLE: BlockRefVTable<MyBlock> = BlockRefVTable::from_trait_with_meta();
/// ```
///
/// For a complete implementation example, see `examples/bb_basic.rs` in the repository.
///
/// Use [`from_trait()`][Self::from_trait] instead if your implementation does not
/// expose block metadata.
#[derive(Debug)]
pub struct BlockRefVTable<T> {
    inner: BlockRefVTableInner,
    _t: PhantomData<T>,
}

impl<T: BlockRefDynamicWithMeta> BlockRefVTable<T> {
    #[expect(missing_docs, reason = "TODO")]
    #[must_use]
    pub const fn from_trait_with_meta() -> Self {
        Self {
            inner: BlockRefVTableInner {
                clone: wrap_clone::<T>,
                drop: wrap_drop::<T>,
                meta: Some(wrap_meta::<T>),
            },
            _t: PhantomData,
        }
    }
}

impl<T: BlockRefDynamic> BlockRefVTable<T> {
    #[expect(missing_docs, reason = "TODO")]
    #[must_use]
    pub const fn from_trait() -> Self {
        Self {
            inner: BlockRefVTableInner {
                clone: wrap_clone::<T>,
                drop: wrap_drop::<T>,
                meta: None,
            },
            _t: PhantomData,
        }
    }
}

#[cfg_attr(test, mutants::skip)] // Mutations can violate memory safety and cause UB.
fn wrap_clone<T: BlockRefDynamic>(state_ptr: OpaqueStatePtr) -> OpaqueStatePtr {
    T::clone(state_ptr.cast()).cast()
}

#[cfg_attr(test, mutants::skip)] // Mutations can violate memory safety and cause UB.
fn wrap_drop<T: BlockRefDynamic>(state_ptr: OpaqueStatePtr) {
    T::drop(state_ptr.cast());
}

fn wrap_meta<T: BlockRefDynamicWithMeta>(state_ptr: OpaqueStatePtr) -> NonNull<dyn BlockMeta> {
    T::meta(state_ptr.cast())
}

/// Implements [`BlockRefVTable`] via a trait, without publishing block metadata.
///
/// This is the minimal that required to implement a [`BlockRef`] for a memory provider.
///
/// A typical high-efficiency implementation for a pooling memory provider will resemble something
/// like an `Arc<...>`, with cloning and dropping adjusting the reference count and potentially
/// returning the block to the pool.
///
/// # Safety
///
/// A [`BlockRef`] may move between threads and be accessed from any thread, while different
/// clones of a [`BlockRef`] may be accessed concurrently from different threads.
///
/// The implementation must accordingly be thread-safe to the degree required to
/// correctly operate under these conditions.
pub unsafe trait BlockRefDynamic {
    /// The inner state passed from the [`BlockRef`] to the implementation
    /// of this trait with each function call.
    type State;

    /// Will be called when a [`BlockRef`] is cloned, which means ownership of the block is
    /// to be shared with another co-owner.
    ///
    /// The owners themselves coordinate who owns which part of the block and the [`BlockRef`]
    /// always represents the block as a whole.
    ///
    /// # Returns
    ///
    /// Returns a pointer to use for the dynamic implementation state of the new clone.
    /// The same state may be reused between clones, so the returned pointer may just be
    /// a pointer to the first function parameter received here.
    ///
    /// The pointer must be valid for reads for the lifetime of the clone and there must never
    /// exist any exclusive references to it, as the caller will create shared references on
    /// demand.
    fn clone(state_ptr: NonNull<Self::State>) -> NonNull<Self::State>;

    /// Will be called when a [`BlockRef`] is dropped.
    ///
    /// The caller will not access `state` after this call, so it is safe to deallocate the
    /// backing memory if the implementation itself no longer needs the state.
    fn drop(state_ptr: NonNull<Self::State>);
}

/// Implements [`BlockRefVTable`] via a trait.
///
/// This is an extension of [`BlockRefDynamic`] that adds the ability to
/// retrieve metadata about the memory block.
///
/// # Safety
///
/// A [`BlockRef`] may move between threads and be accessed from any thread, while different
/// clones of a [`BlockRef`] may be accessed concurrently from different threads.
///
/// The implementation must accordingly be thread-safe to the degree required to
/// correctly operate under these conditions.
pub unsafe trait BlockRefDynamicWithMeta: BlockRefDynamic {
    /// Will be called to retrieve the memory provider specific metadata of the memory block.
    ///
    /// Must return a pointer to an object whose lifetime it least as long as all clones of
    /// the [`BlockRef`] and which is valid for reads.
    fn meta(state_ptr: NonNull<Self::State>) -> NonNull<dyn BlockMeta>;
}

#[cfg_attr(coverage_nightly, coverage(off))]
#[cfg(test)]
mod tests {
    use std::sync::atomic::{self, AtomicUsize};

    use super::*;

    struct TestBlock {
        ref_count: AtomicUsize,
        meta: Option<NonNull<dyn BlockMeta>>,
    }

    #[derive(Debug)]
    struct TestBlockMeta {
        label: String,
    }

    impl BlockMeta for TestBlockMeta {}

    // SAFETY: We must ensure thread-safety of the implementation. We do.
    unsafe impl BlockRefDynamic for TestBlock {
        type State = Self;

        fn clone(state_ptr: NonNull<Self::State>) -> NonNull<Self::State> {
            // SAFETY: The state pointer is always valid for reads.
            let state = unsafe { state_ptr.as_ref() };

            state.ref_count.fetch_add(1, atomic::Ordering::Relaxed);

            state_ptr
        }

        fn drop(state_ptr: NonNull<Self::State>) {
            // SAFETY: The state pointer is always valid for reads.
            let state = unsafe { state_ptr.as_ref() };

            state.ref_count.fetch_sub(1, atomic::Ordering::Release);

            // We do not actually deallocate anything - the test logic will do that because it
            // first needs to inspect the block structure to verify the reference count.
        }
    }

    // SAFETY: We must ensure thread-safety of the implementation. We do.
    unsafe impl BlockRefDynamicWithMeta for TestBlock {
        fn meta(state_ptr: NonNull<Self::State>) -> NonNull<dyn BlockMeta> {
            // SAFETY: The state pointer is always valid for reads.
            let state = unsafe { state_ptr.as_ref() };

            state.meta.unwrap()
        }
    }

    const TEST_BLOCK_REF_FNS: BlockRefVTable<TestBlock> = BlockRefVTable::from_trait_with_meta();

    const TEST_BLOCK_REF_FNS_WITHOUT_META: BlockRefVTable<TestBlock> = BlockRefVTable::from_trait();

    #[test]
    fn smoke_test() {
        let meta_ptr = NonNull::new(Box::into_raw(Box::new(TestBlockMeta {
            label: "Test Block".to_string(),
        })))
        .unwrap();

        let block_ptr = NonNull::new(Box::into_raw(Box::new(TestBlock {
            ref_count: AtomicUsize::new(1),
            meta: Some(meta_ptr),
        })))
        .unwrap();

        // SAFETY: block_ptr must remain valid for reads and writes until drop()
        // is called via the dynamic fns. Yep, it does - the dynamic impl type takes ownership.
        let block_ref = unsafe { BlockRef::new(block_ptr, &TEST_BLOCK_REF_FNS) };

        let meta = block_ref.meta().unwrap();

        assert_eq!(meta.downcast_ref::<TestBlockMeta>().unwrap().label, "Test Block");

        let block_ref_clone = block_ref.clone();

        let meta = block_ref_clone.meta().unwrap();

        assert_eq!(meta.downcast_ref::<TestBlockMeta>().unwrap().label, "Test Block");

        // SAFETY: That is our block and it is perfectly valid for reads.
        let ref_count = unsafe { block_ptr.as_ref() }.ref_count.load(atomic::Ordering::Relaxed);

        assert_eq!(2, ref_count);

        drop(block_ref_clone);
        drop(block_ref);

        // SAFETY: That is our block and it is perfectly valid for reads.
        let ref_count = unsafe { block_ptr.as_ref() }.ref_count.load(atomic::Ordering::Relaxed);

        assert_eq!(0, ref_count);

        // All done, clean up please.
        // SAFETY: Yep, that is our block.
        drop(unsafe { Box::from_raw(block_ptr.as_ptr()) });
        // SAFETY: Yep, that is our meta.
        drop(unsafe { Box::from_raw(meta_ptr.as_ptr()) });
    }

    #[test]
    fn without_meta_returns_none_meta() {
        let block_ptr = NonNull::new(Box::into_raw(Box::new(TestBlock {
            ref_count: AtomicUsize::new(1),
            meta: None,
        })))
        .unwrap();

        // SAFETY: block_ptr must remain valid for reads and writes until drop()
        // is called via the dynamic fns. Yep, it does - the dynamic impl type takes ownership.
        let block_ref = unsafe { BlockRef::new(block_ptr, &TEST_BLOCK_REF_FNS_WITHOUT_META) };

        assert!(block_ref.meta().is_none());

        let block_ref_clone = block_ref.clone();

        assert!(block_ref_clone.meta().is_none());

        drop(block_ref_clone);
        drop(block_ref);

        // All done, clean up please.
        // SAFETY: Yep, that is our block.
        drop(unsafe { Box::from_raw(block_ptr.as_ptr()) });
    }

    #[test]
    #[cfg_attr(miri, ignore)]
    fn from_trait_with_meta_creates_vtable_with_meta_fn() {
        // Create a vtable using from_trait_with_meta at runtime to ensure we measure test coverage.
        // We leak the Box to get a 'static reference, which is okay as long as Miri is not looking.
        let vtable: &'static BlockRefVTable<TestBlock> = Box::leak(Box::new(BlockRefVTable::from_trait_with_meta()));

        // Verify that the vtable has a meta function pointer set
        assert!(vtable.inner.meta.is_some());

        // Create a test block with metadata
        let meta_ptr = NonNull::new(Box::into_raw(Box::new(TestBlockMeta {
            label: "Test Metadata".to_string(),
        })))
        .unwrap();

        let block_ptr = NonNull::new(Box::into_raw(Box::new(TestBlock {
            ref_count: AtomicUsize::new(1),
            meta: Some(meta_ptr),
        })))
        .unwrap();

        // SAFETY: block_ptr must remain valid for reads and writes until drop()
        // is called via the dynamic fns. It is - we clean it up at the end.
        let block_ref = unsafe { BlockRef::new(block_ptr, vtable) };

        // Verify that meta() works correctly
        let meta = block_ref.meta().expect("Meta should be available");
        assert_eq!(meta.downcast_ref::<TestBlockMeta>().unwrap().label, "Test Metadata");

        drop(block_ref);

        // Clean up
        // SAFETY: Yep, that is our block.
        drop(unsafe { Box::from_raw(block_ptr.as_ptr()) });
        // SAFETY: Yep, that is our meta.
        drop(unsafe { Box::from_raw(meta_ptr.as_ptr()) });
    }

    #[test]
    #[cfg_attr(miri, ignore)]
    fn from_trait_creates_vtable_without_meta_fn() {
        // Create a vtable using from_trait (without meta) at runtime to ensure we measure test coverage.
        // We leak the Box to get a 'static reference, which is okay as long as Miri is not looking.
        let vtable: &'static BlockRefVTable<TestBlock> = Box::leak(Box::new(BlockRefVTable::from_trait()));

        // Verify that the vtable does NOT have a meta function pointer
        assert!(vtable.inner.meta.is_none());

        // Create a test block
        let block_ptr = NonNull::new(Box::into_raw(Box::new(TestBlock {
            ref_count: AtomicUsize::new(1),
            meta: None,
        })))
        .unwrap();

        // SAFETY: block_ptr must remain valid for reads and writes until drop()
        // is called via the dynamic fns. It is - we clean it up at the end.
        let block_ref = unsafe { BlockRef::new(block_ptr, vtable) };

        // Verify that meta() returns None
        assert!(block_ref.meta().is_none());

        drop(block_ref);

        // Clean up
        // SAFETY: Yep, that is our block.
        drop(unsafe { Box::from_raw(block_ptr.as_ptr()) });
    }

    #[test]
    #[cfg_attr(miri, ignore)]
    fn from_trait_with_meta_vtable_handles_clone_correctly() {
        // Create vtable at runtime to ensure we measure test coverage.
        // We leak the Box to get a 'static reference, which is okay as long as Miri is not looking.
        let vtable: &'static BlockRefVTable<TestBlock> = Box::leak(Box::new(BlockRefVTable::from_trait_with_meta()));

        let block_ptr = NonNull::new(Box::into_raw(Box::new(TestBlock {
            ref_count: AtomicUsize::new(1),
            meta: None,
        })))
        .unwrap();

        // SAFETY: block_ptr must remain valid for reads and writes until drop()
        // is called via the dynamic fns. It is - we clean it up at the end.
        let block_ref = unsafe { BlockRef::new(block_ptr, vtable) };

        // Clone the block ref
        let block_ref_clone = block_ref.clone();

        // Verify reference count increased
        // SAFETY: That is our block and it is perfectly valid for reads.
        let ref_count = unsafe { block_ptr.as_ref() }.ref_count.load(atomic::Ordering::Relaxed);
        assert_eq!(ref_count, 2);

        drop(block_ref_clone);
        drop(block_ref);

        // Verify reference count decreased to 0
        // SAFETY: That is our block and it is perfectly valid for reads.
        let ref_count = unsafe { block_ptr.as_ref() }.ref_count.load(atomic::Ordering::Relaxed);
        assert_eq!(ref_count, 0);

        // Clean up
        // SAFETY: Yep, that is our block.
        drop(unsafe { Box::from_raw(block_ptr.as_ptr()) });
    }

    #[test]
    #[cfg_attr(miri, ignore)]
    fn from_trait_vtable_handles_clone_correctly() {
        // Create vtable at runtime to ensure we measure test coverage.
        // We leak the Box to get a 'static reference, which is okay as long as Miri is not looking.
        let vtable: &'static BlockRefVTable<TestBlock> = Box::leak(Box::new(BlockRefVTable::from_trait()));

        let block_ptr = NonNull::new(Box::into_raw(Box::new(TestBlock {
            ref_count: AtomicUsize::new(1),
            meta: None,
        })))
        .unwrap();

        // SAFETY: block_ptr must remain valid for reads and writes until drop()
        // is called via the dynamic fns. It is - we clean it up at the end.
        let block_ref = unsafe { BlockRef::new(block_ptr, vtable) };

        // Clone the block ref
        let block_ref_clone = block_ref.clone();

        // Verify reference count increased
        // SAFETY: That is our block and it is perfectly valid for reads.
        let ref_count = unsafe { block_ptr.as_ref() }.ref_count.load(atomic::Ordering::Relaxed);
        assert_eq!(ref_count, 2);

        drop(block_ref_clone);
        drop(block_ref);

        // Verify reference count decreased to 0
        // SAFETY: That is our block and it is perfectly valid for reads.
        let ref_count = unsafe { block_ptr.as_ref() }.ref_count.load(atomic::Ordering::Relaxed);
        assert_eq!(ref_count, 0);

        // Clean up
        // SAFETY: Yep, that is our block.
        drop(unsafe { Box::from_raw(block_ptr.as_ptr()) });
    }
}