azul_core/refany.rs
1//! Type-erased, reference-counted smart pointer with runtime borrow checking.
2//!
3//! # Safety
4//!
5//! This module provides `RefAny`, a type-erased container similar to `Arc<RefCell<dyn Any>>`,
6//! but designed for FFI compatibility and cross-language interoperability.
7//!
8//! ## Memory Safety Guarantees
9//!
10//! 1. **Proper Alignment**: Fixed in commit addressing Miri UB - memory is allocated with correct
11//! alignment for the stored type using `Layout::from_size_align()`.
12//!
13//! 2. **Atomic Reference Counting**: All reference counts use `AtomicUsize` with `SeqCst` ordering,
14//! ensuring thread-safe access and preventing use-after-free.
15//!
16//! 3. **Runtime Type Safety**: Type IDs are checked before downcasting, preventing invalid pointer
17//! casts that would cause undefined behavior.
18//!
19//! 4. **Runtime Borrow Checking**: Shared and mutable borrows are tracked at runtime, enforcing
20//! Rust's borrowing rules dynamically (similar to `RefCell`).
21//!
22//! ## Thread Safety
23//!
24//! - `RefAny` is `Send`: Can be transferred between threads (data is heap-allocated)
25//! - `RefAny` is `Sync`: Can be shared between threads (atomic operations + `&mut self` for
26//! borrows)
27//!
28//! The `SeqCst` (Sequentially Consistent) memory ordering provides the strongest guarantees:
29//! all atomic operations appear in a single global order visible to all threads, preventing
30//! race conditions where one thread doesn't see another's reference count updates.
31
32use alloc::boxed::Box;
33use alloc::string::String;
34use core::{
35 alloc::Layout,
36 ffi::c_void,
37 fmt,
38 sync::atomic::{AtomicUsize, Ordering as AtomicOrdering},
39};
40
41use azul_css::AzString;
42
43/// C-compatible destructor function type for RefAny.
44/// Called when the last reference to a RefAny is dropped.
45pub type RefAnyDestructorType = extern "C" fn(*mut c_void);
46
47// NOTE: JSON serialization/deserialization callback types are defined in azul_layout::json
48// The actual types are:
49// RefAnySerializeFnType = extern "C" fn(RefAny) -> Json
50// RefAnyDeserializeFnType = extern "C" fn(Json) -> ResultRefAnyString
51// In azul_core, we only store function pointers as usize (0 = not set).
52
53/// Internal reference counting metadata for `RefAny`.
54///
55/// This struct tracks:
56///
57/// - How many `RefAny` clones exist (`num_copies`)
58/// - How many shared borrows are active (`num_refs`)
59/// - How many mutable borrows are active (`num_mutable_refs`)
60/// - Memory layout information for correct deallocation
61/// - Type information for runtime type checking
62///
63/// # Thread Safety
64///
65/// All counters are `AtomicUsize` with `SeqCst` ordering, making them safe to access
66/// from multiple threads simultaneously. The strong ordering ensures no thread can
67/// observe inconsistent states (e.g., both seeing count=1 during final drop).
68#[derive(Debug)]
69#[repr(C)]
70pub struct RefCountInner {
71 /// Type-erased pointer to heap-allocated data.
72 ///
73 /// SAFETY: Must be properly aligned for the stored type (guaranteed by
74 /// `Layout::from_size_align` in `new_c`). Never null for non-ZST types.
75 ///
76 /// This pointer is shared by all RefAny clones, so replace_contents
77 /// updates are visible to all clones.
78 pub _internal_ptr: *const c_void,
79
80 /// Number of `RefAny` instances sharing the same data.
81 /// When this reaches 0, the data is deallocated.
82 pub num_copies: AtomicUsize,
83
84 /// Number of active shared borrows (`Ref<T>`).
85 /// While > 0, mutable borrows are forbidden.
86 pub num_refs: AtomicUsize,
87
88 /// Number of active mutable borrows (`RefMut<T>`).
89 /// While > 0, all other borrows are forbidden.
90 pub num_mutable_refs: AtomicUsize,
91
92 /// Size of the stored type in bytes (from `size_of::<T>()`).
93 pub _internal_len: usize,
94
95 /// Layout size for deallocation (from `Layout::size()`).
96 pub _internal_layout_size: usize,
97
98 /// Required alignment for the stored type (from `align_of::<T>()`).
99 /// CRITICAL: Must match the alignment used during allocation to prevent UB.
100 pub _internal_layout_align: usize,
101
102 /// Runtime type identifier computed from `TypeId::of::<T>()`.
103 /// Used to prevent invalid downcasts.
104 pub type_id: u64,
105
106 /// Human-readable type name (e.g., "MyStruct") for debugging.
107 pub type_name: AzString,
108
109 /// Function pointer to correctly drop the type-erased data.
110 /// SAFETY: Must be called with a pointer to data of the correct type.
111 pub custom_destructor: extern "C" fn(*mut c_void),
112
113 /// Function pointer to serialize RefAny to JSON (0 = not set).
114 /// Cast to RefAnySerializeFnType (defined in azul_layout::json) when called.
115 /// Type: extern "C" fn(RefAny) -> Json
116 pub serialize_fn: usize,
117
118 /// Function pointer to deserialize JSON to new RefAny (0 = not set).
119 /// Cast to RefAnyDeserializeFnType (defined in azul_layout::json) when called.
120 /// Type: extern "C" fn(Json) -> ResultRefAnyString
121 pub deserialize_fn: usize,
122}
123
124/// Wrapper around a heap-allocated `RefCountInner`.
125///
126/// This is the shared metadata that all `RefAny` clones point to.
127/// The `RefCount` is responsible for all memory management:
128///
129/// - `RefCount::clone()` increments `num_copies` in RefCountInner
130/// - `RefCount::drop()` decrements `num_copies` and, if it reaches 0:
131/// 1. Frees the RefCountInner
132/// 2. Calls the custom destructor on the data
133/// 3. Deallocates the data memory
134///
135/// # Why `run_destructor: bool`
136///
137/// This flag tracks whether this `RefCount` instance should decrement
138/// `num_copies` when dropped. Set to `true` for all clones (including
139/// those created by `RefAny::clone()` and `AZ_REFLECT` macros).
140/// Set to `false` after the decrement has been performed to prevent
141/// double-decrement.
142#[derive(Hash, PartialEq, PartialOrd, Ord, Eq)]
143#[repr(C)]
144pub struct RefCount {
145 pub ptr: *const RefCountInner,
146 pub run_destructor: bool,
147}
148
149impl fmt::Debug for RefCount {
150 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
151 self.downcast().fmt(f)
152 }
153}
154
155impl Clone for RefCount {
156 /// Clones the RefCount and increments the reference count.
157 ///
158 /// # Safety
159 ///
160 /// This is safe because:
161 /// - The ptr is valid (created from Box::into_raw)
162 /// - num_copies is atomically incremented with SeqCst ordering
163 /// - This ensures the RefCountInner is not freed while clones exist
164 fn clone(&self) -> Self {
165 // CRITICAL: Must increment num_copies so the RefCountInner is not freed
166 // while this clone exists. The C macros (AZ_REFLECT) use AzRefCount_clone
167 // to create Ref/RefMut guards, and those guards must keep the data alive.
168 if !self.ptr.is_null() {
169 unsafe {
170 (*self.ptr).num_copies.fetch_add(1, AtomicOrdering::SeqCst);
171 }
172 }
173 Self {
174 ptr: self.ptr,
175 run_destructor: true,
176 }
177 }
178}
179
180impl Drop for RefCount {
181 /// Decrements the reference count when a RefCount clone is dropped.
182 ///
183 /// If this was the last reference (num_copies reaches 0), this will also
184 /// free the RefCountInner and call the custom destructor.
185 fn drop(&mut self) {
186 // Only decrement if run_destructor is true (meaning this is a clone)
187 // and the pointer is valid
188 if !self.run_destructor || self.ptr.is_null() {
189 return;
190 }
191 self.run_destructor = false;
192
193 // Atomically decrement and get the PREVIOUS value
194 let current_copies = unsafe {
195 (*self.ptr).num_copies.fetch_sub(1, AtomicOrdering::SeqCst)
196 };
197
198 // If previous value wasn't 1, other references still exist
199 if current_copies != 1 {
200 return;
201 }
202
203 // We're the last reference! Clean up.
204 // SAFETY: ptr came from Box::into_raw, and we're the last reference
205 let sharing_info = unsafe { Box::from_raw(self.ptr as *mut RefCountInner) };
206 let sharing_info = *sharing_info; // Box deallocates RefCountInner here
207
208 // Get the data pointer
209 let data_ptr = sharing_info._internal_ptr;
210
211 // Handle zero-sized types specially
212 if sharing_info._internal_len == 0
213 || sharing_info._internal_layout_size == 0
214 || data_ptr.is_null()
215 {
216 let mut _dummy: [u8; 0] = [];
217 // Call destructor even for ZSTs (may have side effects)
218 (sharing_info.custom_destructor)(_dummy.as_ptr() as *mut c_void);
219 } else {
220 // Reconstruct the layout used during allocation
221 let layout = unsafe {
222 Layout::from_size_align_unchecked(
223 sharing_info._internal_layout_size,
224 sharing_info._internal_layout_align,
225 )
226 };
227
228 // Phase 1: Run the custom destructor
229 (sharing_info.custom_destructor)(data_ptr as *mut c_void);
230
231 // Phase 2: Deallocate the memory
232 unsafe {
233 alloc::alloc::dealloc(data_ptr as *mut u8, layout);
234 }
235 }
236 }
237}
238
239/// Debug-friendly snapshot of `RefCountInner` with non-atomic values.
240#[derive(Debug, Clone)]
241pub struct RefCountInnerDebug {
242 pub num_copies: usize,
243 pub num_refs: usize,
244 pub num_mutable_refs: usize,
245 pub _internal_len: usize,
246 pub _internal_layout_size: usize,
247 pub _internal_layout_align: usize,
248 pub type_id: u64,
249 pub type_name: AzString,
250 pub custom_destructor: usize,
251 /// Serialization function pointer (0 = not set)
252 pub serialize_fn: usize,
253 /// Deserialization function pointer (0 = not set)
254 pub deserialize_fn: usize,
255}
256
257impl RefCount {
258 /// Creates a new `RefCount` by boxing the metadata on the heap.
259 ///
260 /// # Safety
261 ///
262 /// Safe because we're creating a new allocation with `Box::new`,
263 /// then immediately leaking it with `into_raw` to get a stable pointer.
264 fn new(ref_count: RefCountInner) -> Self {
265 RefCount {
266 ptr: Box::into_raw(Box::new(ref_count)),
267 run_destructor: true,
268 }
269 }
270
271 /// Dereferences the raw pointer to access the metadata.
272 ///
273 /// # Safety
274 ///
275 /// Safe because:
276 /// - The pointer is created from `Box::into_raw`, so it's valid and properly aligned
277 /// - The lifetime is tied to `&self`, ensuring the pointer is still alive
278 /// - Reference counting ensures the data isn't freed while references exist
279 fn downcast(&self) -> &RefCountInner {
280 if self.ptr.is_null() {
281 panic!("[RefCount::downcast] FATAL: self.ptr is null!");
282 }
283 unsafe { &*self.ptr }
284 }
285
286 /// Creates a debug snapshot of the current reference counts.
287 ///
288 /// Loads all atomic values with `SeqCst` ordering to get a consistent view.
289 pub fn debug_get_refcount_copied(&self) -> RefCountInnerDebug {
290 let dc = self.downcast();
291 RefCountInnerDebug {
292 num_copies: dc.num_copies.load(AtomicOrdering::SeqCst),
293 num_refs: dc.num_refs.load(AtomicOrdering::SeqCst),
294 num_mutable_refs: dc.num_mutable_refs.load(AtomicOrdering::SeqCst),
295 _internal_len: dc._internal_len,
296 _internal_layout_size: dc._internal_layout_size,
297 _internal_layout_align: dc._internal_layout_align,
298 type_id: dc.type_id,
299 type_name: dc.type_name.clone(),
300 custom_destructor: dc.custom_destructor as usize,
301 serialize_fn: dc.serialize_fn,
302 deserialize_fn: dc.deserialize_fn,
303 }
304 }
305
306 /// Runtime check: can we create a shared borrow?
307 ///
308 /// Returns `true` if there are no active mutable borrows.
309 /// Multiple shared borrows can coexist (like `&T` in Rust).
310 ///
311 /// # Memory Ordering
312 ///
313 /// Uses `SeqCst` to ensure we see the most recent state from all threads.
314 /// If another thread just released a mutable borrow, we'll see it.
315 pub fn can_be_shared(&self) -> bool {
316 self.downcast()
317 .num_mutable_refs
318 .load(AtomicOrdering::SeqCst)
319 == 0
320 }
321
322 /// Runtime check: can we create a mutable borrow?
323 ///
324 /// Returns `true` only if there are ZERO active borrows of any kind.
325 /// This enforces Rust's exclusive mutability rule (like `&mut T`).
326 ///
327 /// # Memory Ordering
328 ///
329 /// Uses `SeqCst` to ensure we see all recent borrows from all threads.
330 /// Both counters must be checked atomically to prevent races.
331 pub fn can_be_shared_mut(&self) -> bool {
332 let info = self.downcast();
333 info.num_mutable_refs.load(AtomicOrdering::SeqCst) == 0
334 && info.num_refs.load(AtomicOrdering::SeqCst) == 0
335 }
336
337 /// Increments the shared borrow counter.
338 ///
339 /// Called when a `Ref<T>` is created. The `Ref::drop` will decrement it.
340 ///
341 /// # Memory Ordering
342 ///
343 /// `SeqCst` ensures this increment is visible to all threads before they
344 /// try to acquire a mutable borrow (which checks this counter).
345 pub fn increase_ref(&self) {
346 self.downcast()
347 .num_refs
348 .fetch_add(1, AtomicOrdering::SeqCst);
349 }
350
351 /// Decrements the shared borrow counter.
352 ///
353 /// Called when a `Ref<T>` is dropped, indicating the borrow is released.
354 ///
355 /// # Memory Ordering
356 ///
357 /// `SeqCst` ensures this decrement is immediately visible to other threads
358 /// waiting to acquire a mutable borrow.
359 pub fn decrease_ref(&self) {
360 self.downcast()
361 .num_refs
362 .fetch_sub(1, AtomicOrdering::SeqCst);
363 }
364
365 /// Increments the mutable borrow counter.
366 ///
367 /// Called when a `RefMut<T>` is created. Should only succeed when this
368 /// counter and `num_refs` are both 0.
369 ///
370 /// # Memory Ordering
371 ///
372 /// `SeqCst` ensures this increment is visible to all other threads,
373 /// blocking them from acquiring any borrow (shared or mutable).
374 pub fn increase_refmut(&self) {
375 self.downcast()
376 .num_mutable_refs
377 .fetch_add(1, AtomicOrdering::SeqCst);
378 }
379
380 /// Decrements the mutable borrow counter.
381 ///
382 /// Called when a `RefMut<T>` is dropped, releasing exclusive access.
383 ///
384 /// # Memory Ordering
385 ///
386 /// `SeqCst` ensures this decrement is immediately visible, allowing
387 /// other threads to acquire borrows.
388 pub fn decrease_refmut(&self) {
389 self.downcast()
390 .num_mutable_refs
391 .fetch_sub(1, AtomicOrdering::SeqCst);
392 }
393}
394
395/// RAII guard for a shared borrow of type `T` from a `RefAny`.
396///
397/// Similar to `std::cell::Ref`, this automatically decrements the borrow
398/// counter when dropped, ensuring borrows are properly released.
399///
400/// # Deref
401///
402/// Implements `Deref<Target = T>` so you can use it like `&T`.
403#[derive(Debug)]
404#[repr(C)]
405pub struct Ref<'a, T> {
406 ptr: &'a T,
407 sharing_info: RefCount,
408}
409
410impl<'a, T> Drop for Ref<'a, T> {
411 /// Automatically releases the shared borrow when the guard goes out of scope.
412 ///
413 /// # Safety
414 ///
415 /// Safe because `decrease_ref` uses atomic operations and is designed to be
416 /// called exactly once per `Ref` instance.
417 fn drop(&mut self) {
418 self.sharing_info.decrease_ref();
419 }
420}
421
422impl<'a, T> core::ops::Deref for Ref<'a, T> {
423 type Target = T;
424
425 fn deref(&self) -> &Self::Target {
426 self.ptr
427 }
428}
429
430/// RAII guard for a mutable borrow of type `T` from a `RefAny`.
431///
432/// Similar to `std::cell::RefMut`, this automatically decrements the mutable
433/// borrow counter when dropped, releasing exclusive access.
434///
435/// # Deref / DerefMut
436///
437/// Implements both `Deref` and `DerefMut` so you can use it like `&mut T`.
438#[derive(Debug)]
439#[repr(C)]
440pub struct RefMut<'a, T> {
441 ptr: &'a mut T,
442 sharing_info: RefCount,
443}
444
445impl<'a, T> Drop for RefMut<'a, T> {
446 /// Automatically releases the mutable borrow when the guard goes out of scope.
447 ///
448 /// # Safety
449 ///
450 /// Safe because `decrease_refmut` uses atomic operations and is designed to be
451 /// called exactly once per `RefMut` instance.
452 fn drop(&mut self) {
453 self.sharing_info.decrease_refmut();
454 }
455}
456
457impl<'a, T> core::ops::Deref for RefMut<'a, T> {
458 type Target = T;
459
460 fn deref(&self) -> &Self::Target {
461 &*self.ptr
462 }
463}
464
465impl<'a, T> core::ops::DerefMut for RefMut<'a, T> {
466 fn deref_mut(&mut self) -> &mut Self::Target {
467 self.ptr
468 }
469}
470
471/// Type-erased, reference-counted smart pointer with runtime borrow checking.
472///
473/// `RefAny` is similar to `Arc<RefCell<dyn Any>>`, providing:
474/// - Type erasure (stores any `'static` type)
475/// - Reference counting (clones share the same data)
476/// - Runtime borrow checking (enforces Rust's borrowing rules at runtime)
477/// - FFI compatibility (`#[repr(C)]` and C-compatible API)
478///
479/// # Thread Safety
480///
481/// - `Send`: Can be moved between threads (heap-allocated data, atomic counters)
482/// - `Sync`: Can be shared between threads (`downcast_ref/mut` require `&mut self`)
483///
484/// # Memory Safety
485///
486/// Fixed critical UB bugs in alignment, copy count, and pointer provenance (see
487/// REFANY_UB_FIXES.md). All operations are verified with Miri to ensure absence of
488/// undefined behavior.
489///
490/// # Usage
491///
492/// ```rust
493/// # use azul_core::refany::RefAny;
494/// let data = RefAny::new(42i32);
495/// let mut data_clone = data.clone(); // shares the same heap allocation
496///
497/// // Runtime-checked downcasting with type safety
498/// if let Some(value_ref) = data_clone.downcast_ref::<i32>() {
499/// assert_eq!(*value_ref, 42);
500/// };
501///
502/// // Runtime-checked mutable borrowing
503/// if let Some(mut value_mut) = data_clone.downcast_mut::<i32>() {
504/// *value_mut = 100;
505/// };
506/// ```
507#[derive(Debug, Hash, PartialEq, PartialOrd, Ord, Eq)]
508#[repr(C)]
509pub struct RefAny {
510 /// Shared metadata: reference counts, type info, destructor, AND data pointer.
511 ///
512 /// All `RefAny` clones point to the same `RefCountInner` via this field.
513 /// The data pointer is stored in RefCountInner so all clones see the same
514 /// pointer, even after replace_contents() is called.
515 ///
516 /// The `run_destructor` flag on `RefCount` controls whether dropping this
517 /// RefAny should decrement the reference count and potentially free memory.
518 pub sharing_info: RefCount,
519
520 /// Unique ID for this specific clone (root = 0, subsequent clones increment).
521 ///
522 /// Used to distinguish between the original and clones for debugging.
523 pub instance_id: u64,
524}
525
526impl_option!(
527 RefAny,
528 OptionRefAny,
529 copy = false,
530 [Debug, Hash, Clone, PartialEq, PartialOrd, Ord, Eq]
531);
532
533// SAFETY: RefAny is Send because:
534// - The data pointer points to heap memory (can be sent between threads)
535// - All shared state (RefCountInner) uses atomic operations
536// - No thread-local storage is used
537unsafe impl Send for RefAny {}
538
539// SAFETY: RefAny is Sync because:
540// - Methods that access the inner data (`downcast_ref/mut`) require `&mut self`, which
541// is checked by the compiler and prevents concurrent access
542// - Methods on `&RefAny` (like `clone`, `get_type_id`) only use atomic operations or
543// read immutable data, which is inherently thread-safe
544// - The runtime borrow checker (via `can_be_shared/shared_mut`) uses SeqCst atomics,
545// ensures proper synchronization across threads
546unsafe impl Sync for RefAny {}
547
548impl RefAny {
549 /// Creates a new type-erased `RefAny` containing the given value.
550 ///
551 /// This is the primary way to construct a `RefAny` from Rust code.
552 ///
553 /// # Type Safety
554 ///
555 /// Stores the `TypeId` of `T` for runtime type checking during downcasts.
556 ///
557 /// # Memory Layout
558 ///
559 /// - Allocates memory on the heap with correct size (`size_of::<T>()`) and alignment
560 /// (`align_of::<T>()`)
561 /// - Copies the value into the heap allocation
562 /// - Forgets the original value to prevent double-drop
563 ///
564 /// # Custom Destructor
565 ///
566 /// Creates a type-specific destructor that:
567 /// 1. Copies the data from heap back to stack
568 /// 2. Calls `mem::drop` to run `T`'s destructor
569 /// 3. The heap memory is freed separately in `RefAny::drop`
570 ///
571 /// This two-phase destruction ensures proper cleanup even for complex types.
572 ///
573 /// # Safety
574 ///
575 /// Safe because:
576 /// - `mem::forget` prevents double-drop of the original value
577 /// - Type `T` and destructor `<U>` are matched at compile time
578 /// - `ptr::copy_nonoverlapping` with count=1 copies exactly one `T`
579 ///
580 /// # Example
581 ///
582 /// ```rust
583 /// # use azul_core::refany::RefAny;
584 /// let mut data = RefAny::new(42i32);
585 /// let value = data.downcast_ref::<i32>().unwrap();
586 /// assert_eq!(*value, 42);
587 /// ```
588 pub fn new<T: 'static>(value: T) -> Self {
589 /// Type-specific destructor that properly drops the inner value.
590 ///
591 /// # Safety
592 ///
593 /// Safe to call ONLY with a pointer that was created by `RefAny::new<U>`.
594 /// The type `U` must match the original type `T`.
595 ///
596 /// # Why Copy to Stack?
597 ///
598 /// Rust's drop glue expects a value, not a pointer. We copy the data
599 /// to the stack so `mem::drop` can run the destructor properly.
600 ///
601 /// # Critical Fix
602 ///
603 /// The third argument to `copy_nonoverlapping` is the COUNT (1 element),
604 /// not the SIZE in bytes. Using `size_of::<U>()` here would copy
605 /// `size_of::<U>()` elements, causing buffer overflow.
606 extern "C" fn default_custom_destructor<U: 'static>(ptr: *mut c_void) {
607 use core::{mem, ptr};
608
609 unsafe {
610 // Allocate uninitialized stack space for one `U`
611 let mut stack_mem = mem::MaybeUninit::<U>::uninit();
612
613 // Copy 1 element of type U from heap to stack
614 ptr::copy_nonoverlapping(
615 ptr as *const U,
616 stack_mem.as_mut_ptr(),
617 1, // CRITICAL: This is element count, not byte count!
618 );
619
620 // Take ownership and run the destructor
621 let stack_mem = stack_mem.assume_init();
622 mem::drop(stack_mem); // Runs U's Drop implementation
623 }
624 }
625
626 let type_name = ::core::any::type_name::<T>();
627 let type_id = Self::get_type_id_static::<T>();
628
629 let st = AzString::from_const_str(type_name);
630 let s = Self::new_c(
631 (&value as *const T) as *const c_void,
632 ::core::mem::size_of::<T>(),
633 ::core::mem::align_of::<T>(), // CRITICAL: Pass alignment to prevent UB
634 type_id,
635 st,
636 default_custom_destructor::<T>,
637 0, // serialize_fn: not set for Rust types by default
638 0, // deserialize_fn: not set for Rust types by default
639 );
640 ::core::mem::forget(value); // Prevent double-drop
641 s
642 }
643
644 /// C-ABI compatible function to create a `RefAny` from raw components.
645 ///
646 /// This is the low-level constructor used by FFI bindings (C, Python, etc.).
647 ///
648 /// # Parameters
649 ///
650 /// - `ptr`: Pointer to the value to store (will be copied)
651 /// - `len`: Size of the value in bytes (`size_of::<T>()`)
652 /// - `align`: Required alignment in bytes (`align_of::<T>()`)
653 /// - `type_id`: Unique identifier for the type (for downcast safety)
654 /// - `type_name`: Human-readable type name (for debugging)
655 /// - `custom_destructor`: Function to call when the last reference is dropped
656 /// - `serialize_fn`: Function pointer for JSON serialization (0 = not set)
657 /// - `deserialize_fn`: Function pointer for JSON deserialization (0 = not set)
658 ///
659 /// # Safety
660 ///
661 /// Caller must ensure:
662 /// - `ptr` points to valid data of size `len` with alignment `align`
663 /// - `type_id` uniquely identifies the type
664 /// - `custom_destructor` correctly drops the type at `ptr`
665 /// - `len` and `align` match the actual type's layout
666 /// - If `serialize_fn != 0`, it must be a valid function pointer of type
667 /// `extern "C" fn(RefAny) -> Json`
668 /// - If `deserialize_fn != 0`, it must be a valid function pointer of type
669 /// `extern "C" fn(Json) -> ResultRefAnyString`
670 ///
671 /// # Zero-Sized Types
672 ///
673 /// Special case: ZSTs use a null pointer but still track the type info
674 /// and call the destructor (which may have side effects even for ZSTs).
675 pub fn new_c(
676 // *const T
677 ptr: *const c_void,
678 // sizeof(T)
679 len: usize,
680 // alignof(T)
681 align: usize,
682 // unique ID of the type (used for type comparison when downcasting)
683 type_id: u64,
684 // name of the class such as "app::MyData", usually compiler- or macro-generated
685 type_name: AzString,
686 custom_destructor: extern "C" fn(*mut c_void),
687 // function pointer for JSON serialization (0 = not set)
688 serialize_fn: usize,
689 // function pointer for JSON deserialization (0 = not set)
690 deserialize_fn: usize,
691 ) -> Self {
692 use core::ptr;
693
694 // CRITICAL: Validate input pointer for non-ZST types
695 // A NULL pointer for a non-zero-sized type would cause UB when copying
696 // and would lead to crashes when cloning (as documented in REPORT2.md)
697 if len > 0 && ptr.is_null() {
698 panic!(
699 "RefAny::new_c: NULL pointer passed for non-ZST type (size={}). \
700 This would cause undefined behavior. Type: {:?}",
701 len,
702 type_name.as_str()
703 );
704 }
705
706 // Special case: Zero-sized types
707 //
708 // Calling `alloc(Layout { size: 0, .. })` is UB, so we use a null pointer.
709 // The destructor is still called (it may have side effects even for ZSTs).
710 let (_internal_ptr, layout) = if len == 0 {
711 let _dummy: [u8; 0] = [];
712 (ptr::null_mut(), Layout::for_value(&_dummy))
713 } else {
714 // CRITICAL FIX: Use the caller-provided alignment, not alignment of [u8]
715 //
716 // Previous bug: `Layout::for_value(&[u8])` created align=1
717 // This caused unaligned references when downcasting to types like i32 (align=4)
718 //
719 // Fixed: `Layout::from_size_align(len, align)` respects the type's alignment
720 let layout = Layout::from_size_align(len, align).expect("Failed to create layout");
721
722 // Allocate heap memory with correct alignment
723 let heap_struct_as_bytes = unsafe { alloc::alloc::alloc(layout) };
724
725 // Handle allocation failure (aborts the program)
726 if heap_struct_as_bytes.is_null() {
727 alloc::alloc::handle_alloc_error(layout);
728 }
729
730 // Copy the data byte-by-byte to the heap
731 // SAFETY: Both pointers are valid, non-overlapping, and properly aligned
732 unsafe { ptr::copy_nonoverlapping(ptr as *const u8, heap_struct_as_bytes, len) };
733
734 (heap_struct_as_bytes, layout)
735 };
736
737 let ref_count_inner = RefCountInner {
738 _internal_ptr: _internal_ptr as *const c_void,
739 num_copies: AtomicUsize::new(1), // This is the first instance
740 num_refs: AtomicUsize::new(0), // No borrows yet
741 num_mutable_refs: AtomicUsize::new(0), // No mutable borrows yet
742 _internal_len: len,
743 _internal_layout_size: layout.size(),
744 _internal_layout_align: layout.align(),
745 type_id,
746 type_name,
747 custom_destructor,
748 serialize_fn,
749 deserialize_fn,
750 };
751
752 let sharing_info = RefCount::new(ref_count_inner);
753
754 Self {
755 sharing_info,
756 instance_id: 0, // Root instance
757 }
758 }
759
760 /// Returns the raw data pointer for FFI downcasting.
761 ///
762 /// This is used by the AZ_REFLECT macros in C/C++ to access the
763 /// type-erased data pointer for downcasting operations.
764 ///
765 /// # Safety
766 ///
767 /// The returned pointer must only be dereferenced after verifying
768 /// the type ID matches the expected type. Callers are responsible
769 /// for proper type safety checks.
770 pub fn get_data_ptr(&self) -> *const c_void {
771 self.sharing_info.downcast()._internal_ptr
772 }
773
774 /// Checks if this is the only `RefAny` instance with no active borrows.
775 ///
776 /// Returns `true` only if:
777 /// - `num_copies == 1` (no clones exist)
778 /// - `num_refs == 0` (no shared borrows active)
779 /// - `num_mutable_refs == 0` (no mutable borrows active)
780 ///
781 /// Useful for checking if you have exclusive ownership.
782 ///
783 /// # Memory Ordering
784 ///
785 /// Uses `SeqCst` to ensure a consistent view across all three counters.
786 pub fn has_no_copies(&self) -> bool {
787 self.sharing_info
788 .downcast()
789 .num_copies
790 .load(AtomicOrdering::SeqCst)
791 == 1
792 && self
793 .sharing_info
794 .downcast()
795 .num_refs
796 .load(AtomicOrdering::SeqCst)
797 == 0
798 && self
799 .sharing_info
800 .downcast()
801 .num_mutable_refs
802 .load(AtomicOrdering::SeqCst)
803 == 0
804 }
805
806 /// Attempts to downcast to a shared reference of type `U`.
807 ///
808 /// Returns `None` if:
809 /// - The stored type doesn't match `U` (type safety)
810 /// - A mutable borrow is already active (borrow checking)
811 /// - The pointer is null (ZST or uninitialized)
812 ///
813 /// # Type Safety
814 ///
815 /// Compares `type_id` at runtime before casting. This prevents casting
816 /// `*const c_void` to the wrong type, which would be immediate UB.
817 ///
818 /// # Borrow Checking
819 ///
820 /// Checks `can_be_shared()` to enforce Rust's borrowing rules:
821 /// - Multiple shared borrows are allowed
822 /// - Shared and mutable borrows cannot coexist
823 ///
824 /// # Safety
825 ///
826 /// The `unsafe` cast is safe because:
827 /// - Type ID check ensures `U` matches the stored type
828 /// - Memory was allocated with correct alignment for `U`
829 /// - Lifetime `'a` is tied to `&'a mut self`, preventing use-after-free
830 /// - Reference count is incremented atomically before returning
831 ///
832 /// # Why `&mut self`?
833 ///
834 /// Requires `&mut self` to prevent multiple threads from calling this
835 /// simultaneously on the same `RefAny`. The borrow checker enforces this.
836 /// Clones of the `RefAny` can call this independently (they share data
837 /// but have separate runtime borrow tracking).
838 #[inline]
839 pub fn downcast_ref<'a, U: 'static>(&'a mut self) -> Option<Ref<'a, U>> {
840 // Runtime type check: prevent downcasting to wrong type
841 let stored_type_id = self.get_type_id();
842 let target_type_id = Self::get_type_id_static::<U>();
843 let is_same_type = stored_type_id == target_type_id;
844
845 if !is_same_type {
846 return None;
847 }
848
849 // Runtime borrow check: ensure no mutable borrows exist
850 let can_be_shared = self.sharing_info.can_be_shared();
851 if !can_be_shared {
852 return None;
853 }
854
855 // Get data pointer from shared RefCountInner
856 let data_ptr = self.sharing_info.downcast()._internal_ptr;
857
858 // Null check: ZSTs or uninitialized
859 if data_ptr.is_null() {
860 return None;
861 }
862
863 // Increment shared borrow count atomically
864 self.sharing_info.increase_ref();
865
866 Some(Ref {
867 // SAFETY: Type check passed, pointer is non-null and properly aligned
868 ptr: unsafe { &*(data_ptr as *const U) },
869 sharing_info: self.sharing_info.clone(),
870 })
871 }
872
873 /// Attempts to downcast to a mutable reference of type `U`.
874 ///
875 /// Returns `None` if:
876 /// - The stored type doesn't match `U` (type safety)
877 /// - Any borrow is already active (borrow checking)
878 /// - The pointer is null (ZST or uninitialized)
879 ///
880 /// # Type Safety
881 ///
882 /// Compares `type_id` at runtime before casting, preventing UB.
883 ///
884 /// # Borrow Checking
885 ///
886 /// Checks `can_be_shared_mut()` to enforce exclusive mutability:
887 /// - No other borrows (shared or mutable) can be active
888 /// - This is Rust's `&mut T` rule, enforced at runtime
889 ///
890 /// # Safety
891 ///
892 /// The `unsafe` cast is safe because:
893 ///
894 /// - Type ID check ensures `U` matches the stored type
895 /// - Memory was allocated with correct alignment for `U`
896 /// - Borrow check ensures no other references exist
897 /// - Lifetime `'a` is tied to `&'a mut self`, preventing aliasing
898 /// - Mutable reference count is incremented atomically
899 ///
900 /// # Memory Ordering
901 ///
902 /// The `increase_refmut()` uses `SeqCst`, ensuring other threads see
903 /// this mutable borrow before they try to acquire any borrow.
904 #[inline]
905 pub fn downcast_mut<'a, U: 'static>(&'a mut self) -> Option<RefMut<'a, U>> {
906 // Runtime type check
907 let is_same_type = self.get_type_id() == Self::get_type_id_static::<U>();
908 if !is_same_type {
909 return None;
910 }
911
912 // Runtime exclusive borrow check
913 let can_be_shared_mut = self.sharing_info.can_be_shared_mut();
914 if !can_be_shared_mut {
915 return None;
916 }
917
918 // Get data pointer from shared RefCountInner
919 let data_ptr = self.sharing_info.downcast()._internal_ptr;
920
921 // Null check
922 if data_ptr.is_null() {
923 return None;
924 }
925
926 // Increment mutable borrow count atomically
927 self.sharing_info.increase_refmut();
928
929 Some(RefMut {
930 // SAFETY: Type and borrow checks passed, exclusive access guaranteed
931 ptr: unsafe { &mut *(data_ptr as *mut U) },
932 sharing_info: self.sharing_info.clone(),
933 })
934 }
935
936 /// Computes a runtime type ID from Rust's `TypeId`.
937 ///
938 /// Rust's `TypeId` is not `#[repr(C)]` and can't cross FFI boundaries.
939 /// This function converts it to a `u64` by treating it as a byte array.
940 ///
941 /// # Safety
942 ///
943 /// Safe because:
944 /// - `TypeId` is a valid type with a stable layout
945 /// - We only read from it, never write
946 /// - The slice lifetime is bounded by the function scope
947 ///
948 /// # Implementation
949 ///
950 /// Treats the `TypeId` as bytes and sums them with bit shifts to create
951 /// a unique (but not cryptographically secure) hash.
952 #[inline]
953 fn get_type_id_static<T: 'static>() -> u64 {
954 use core::{any::TypeId, mem};
955
956 let t_id = TypeId::of::<T>();
957
958 // SAFETY: TypeId is a valid type, we're only reading it
959 let struct_as_bytes = unsafe {
960 core::slice::from_raw_parts(
961 (&t_id as *const TypeId) as *const u8,
962 mem::size_of::<TypeId>(),
963 )
964 };
965
966 // Convert first 8 bytes to u64 using proper bit positions
967 struct_as_bytes
968 .into_iter()
969 .enumerate()
970 .take(8) // Only use first 8 bytes (64 bits fit in u64)
971 .map(|(s_pos, s)| (*s as u64) << (s_pos * 8))
972 .sum()
973 }
974
975 /// Checks if the stored type matches the given type ID.
976 pub fn is_type(&self, type_id: u64) -> bool {
977 self.sharing_info.downcast().type_id == type_id
978 }
979
980 /// Returns the stored type ID.
981 pub fn get_type_id(&self) -> u64 {
982 self.sharing_info.downcast().type_id
983 }
984
985 /// Returns the human-readable type name for debugging.
986 pub fn get_type_name(&self) -> AzString {
987 self.sharing_info.downcast().type_name.clone()
988 }
989
990 /// Returns the current reference count (number of `RefAny` clones sharing this data).
991 ///
992 /// This is useful for debugging and metadata purposes.
993 pub fn get_ref_count(&self) -> usize {
994 self.sharing_info
995 .downcast()
996 .num_copies
997 .load(AtomicOrdering::SeqCst)
998 }
999
1000 /// Returns the serialize function pointer (0 = not set).
1001 ///
1002 /// This is used for JSON serialization of RefAny contents.
1003 pub fn get_serialize_fn(&self) -> usize {
1004 self.sharing_info.downcast().serialize_fn
1005 }
1006
1007 /// Returns the deserialize function pointer (0 = not set).
1008 ///
1009 /// This is used for JSON deserialization to create a new RefAny.
1010 pub fn get_deserialize_fn(&self) -> usize {
1011 self.sharing_info.downcast().deserialize_fn
1012 }
1013
1014 /// Sets the serialize function pointer.
1015 ///
1016 /// # Safety
1017 ///
1018 /// The caller must ensure the function pointer is valid and has the correct
1019 /// signature: `extern "C" fn(RefAny) -> Json`
1020 pub fn set_serialize_fn(&mut self, serialize_fn: usize) {
1021 // Safety: We have &mut self, so we have exclusive access
1022 let inner = self.sharing_info.ptr as *mut RefCountInner;
1023 unsafe {
1024 (*inner).serialize_fn = serialize_fn;
1025 }
1026 }
1027
1028 /// Sets the deserialize function pointer.
1029 ///
1030 /// # Safety
1031 ///
1032 /// The caller must ensure the function pointer is valid and has the correct
1033 /// signature: `extern "C" fn(Json) -> ResultRefAnyString`
1034 pub fn set_deserialize_fn(&mut self, deserialize_fn: usize) {
1035 // Safety: We have &mut self, so we have exclusive access
1036 let inner = self.sharing_info.ptr as *mut RefCountInner;
1037 unsafe {
1038 (*inner).deserialize_fn = deserialize_fn;
1039 }
1040 }
1041
1042 /// Returns true if this RefAny supports JSON serialization.
1043 pub fn can_serialize(&self) -> bool {
1044 self.get_serialize_fn() != 0
1045 }
1046
1047 /// Returns true if this RefAny type supports JSON deserialization.
1048 pub fn can_deserialize(&self) -> bool {
1049 self.get_deserialize_fn() != 0
1050 }
1051
1052 /// Replaces the contents of this RefAny with a new value from another RefAny.
1053 ///
1054 /// This method:
1055 /// 1. Atomically acquires a mutable "lock" via compare_exchange
1056 /// 2. Calls the destructor on the old value
1057 /// 3. Deallocates the old memory
1058 /// 4. Copies the new value's memory
1059 /// 5. Updates metadata (type_id, type_name, destructor, serialize/deserialize fns)
1060 /// 6. Updates the shared _internal_ptr so ALL clones see the new data
1061 /// 7. Releases the lock
1062 ///
1063 /// Since all clones of a RefAny share the same `RefCountInner`, this change
1064 /// will be visible to ALL clones of this RefAny.
1065 ///
1066 /// # Returns
1067 ///
1068 /// - `true` if the replacement was successful
1069 /// - `false` if there are active borrows (would cause UB)
1070 ///
1071 /// # Thread Safety
1072 ///
1073 /// Uses compare_exchange to atomically acquire exclusive access, preventing
1074 /// any race condition between checking for borrows and modifying the data.
1075 ///
1076 /// # Safety
1077 ///
1078 /// Safe because:
1079 /// - We atomically acquire exclusive access before modifying
1080 /// - The old destructor is called before deallocation
1081 /// - Memory is properly allocated with correct alignment
1082 /// - All metadata is updated while holding the lock
1083 pub fn replace_contents(&mut self, new_value: RefAny) -> bool {
1084 use core::ptr;
1085
1086 let inner = self.sharing_info.ptr as *mut RefCountInner;
1087
1088 // Atomically acquire exclusive access by setting num_mutable_refs to 1.
1089 // This uses compare_exchange to ensure no race condition:
1090 // - If num_mutable_refs is 0, set it to 1 (success)
1091 // - If num_mutable_refs is not 0, someone else has it (fail)
1092 // We also need to check num_refs == 0 atomically.
1093 let inner_ref = self.sharing_info.downcast();
1094
1095 // First, try to acquire the mutable lock
1096 let mutable_lock_result = inner_ref.num_mutable_refs.compare_exchange(
1097 0, // expected: no mutable refs
1098 1, // desired: we take the mutable ref
1099 AtomicOrdering::SeqCst,
1100 AtomicOrdering::SeqCst,
1101 );
1102
1103 if mutable_lock_result.is_err() {
1104 // Someone else has a mutable reference
1105 return false;
1106 }
1107
1108 // Now check that there are no shared references
1109 // Note: We hold the mutable lock, so no new shared refs can be acquired
1110 if inner_ref.num_refs.load(AtomicOrdering::SeqCst) != 0 {
1111 // Release the lock and fail
1112 inner_ref.num_mutable_refs.store(0, AtomicOrdering::SeqCst);
1113 return false;
1114 }
1115
1116 // We now have exclusive access - perform the replacement
1117 unsafe {
1118 // Get old layout info before we overwrite it
1119 let old_ptr = (*inner)._internal_ptr;
1120 let old_len = (*inner)._internal_len;
1121 let old_layout_size = (*inner)._internal_layout_size;
1122 let old_layout_align = (*inner)._internal_layout_align;
1123 let old_destructor = (*inner).custom_destructor;
1124
1125 // Step 1: Call destructor on old value (if non-ZST)
1126 if old_len > 0 && !old_ptr.is_null() {
1127 old_destructor(old_ptr as *mut c_void);
1128 }
1129
1130 // Step 2: Deallocate old memory (if non-ZST)
1131 if old_layout_size > 0 && !old_ptr.is_null() {
1132 let old_layout = Layout::from_size_align_unchecked(old_layout_size, old_layout_align);
1133 alloc::alloc::dealloc(old_ptr as *mut u8, old_layout);
1134 }
1135
1136 // Get new value's metadata
1137 let new_inner = new_value.sharing_info.downcast();
1138 let new_ptr = new_inner._internal_ptr;
1139 let new_len = new_inner._internal_len;
1140 let new_layout_size = new_inner._internal_layout_size;
1141 let new_layout_align = new_inner._internal_layout_align;
1142
1143 // Step 3: Allocate new memory and copy data
1144 let allocated_ptr = if new_len == 0 {
1145 ptr::null_mut()
1146 } else {
1147 let new_layout = Layout::from_size_align(new_len, new_layout_align)
1148 .expect("Failed to create layout");
1149 let heap_ptr = alloc::alloc::alloc(new_layout);
1150 if heap_ptr.is_null() {
1151 alloc::alloc::handle_alloc_error(new_layout);
1152 }
1153 // Copy data from new_value
1154 ptr::copy_nonoverlapping(
1155 new_ptr as *const u8,
1156 heap_ptr,
1157 new_len,
1158 );
1159 heap_ptr
1160 };
1161
1162 // Step 4: Update the shared internal pointer in RefCountInner
1163 // All clones will see this new pointer!
1164 (*inner)._internal_ptr = allocated_ptr as *const c_void;
1165
1166 // Step 5: Update metadata in RefCountInner
1167 (*inner)._internal_len = new_len;
1168 (*inner)._internal_layout_size = new_layout_size;
1169 (*inner)._internal_layout_align = new_layout_align;
1170 (*inner).type_id = new_inner.type_id;
1171 (*inner).type_name = new_inner.type_name.clone();
1172 (*inner).custom_destructor = new_inner.custom_destructor;
1173 (*inner).serialize_fn = new_inner.serialize_fn;
1174 (*inner).deserialize_fn = new_inner.deserialize_fn;
1175 }
1176
1177 // Release the mutable lock
1178 self.sharing_info.downcast().num_mutable_refs.store(0, AtomicOrdering::SeqCst);
1179
1180 // Prevent new_value from running its destructor (we copied the data)
1181 core::mem::forget(new_value);
1182
1183 true
1184 }
1185}
1186
1187impl Clone for RefAny {
1188 /// Creates a new `RefAny` sharing the same heap-allocated data.
1189 ///
1190 /// This is cheap (just increments a counter) and is how multiple parts
1191 /// of the code can hold references to the same data.
1192 ///
1193 /// # Reference Counting
1194 ///
1195 /// Atomically increments `num_copies` with `SeqCst` ordering before
1196 /// creating the clone. This ensures all threads see the updated count
1197 /// before the clone can be used.
1198 ///
1199 /// # Instance ID
1200 ///
1201 /// Each clone gets a unique `instance_id` based on the current copy count.
1202 /// The original has `instance_id=0`, the first clone gets `1`, etc.
1203 ///
1204 /// # Memory Ordering
1205 ///
1206 /// The `fetch_add` followed by `load` both use `SeqCst`:
1207 /// - `fetch_add`: Ensures the increment is visible to all threads
1208 /// - `load`: Gets the updated value for the instance_id
1209 ///
1210 /// This prevents race conditions where two threads clone simultaneously
1211 /// and both see the same instance_id.
1212 ///
1213 /// # Safety
1214 ///
1215 /// Safe because:
1216 ///
1217 /// - Atomic operations prevent data races
1218 /// - The heap allocation remains valid (only freed when count reaches 0)
1219 /// - `run_destructor` is set to `true` for all clones
1220 fn clone(&self) -> Self {
1221 // Atomically increment the reference count
1222 let inner = self.sharing_info.downcast();
1223 inner.num_copies.fetch_add(1, AtomicOrdering::SeqCst);
1224
1225 let new_instance_id = inner.num_copies.load(AtomicOrdering::SeqCst) as u64;
1226
1227 Self {
1228 // Data pointer is now in RefCountInner, shared automatically
1229 sharing_info: RefCount {
1230 ptr: self.sharing_info.ptr, // Share the same metadata (and data pointer)
1231 run_destructor: true, // This clone should decrement num_copies on drop
1232 },
1233 // Give this clone a unique ID based on the updated count
1234 instance_id: new_instance_id,
1235 }
1236 }
1237}
1238
1239impl Drop for RefAny {
1240 /// Empty drop implementation - all cleanup is handled by `RefCount::drop`.
1241 ///
1242 /// When a `RefAny` is dropped, its `sharing_info: RefCount` field is automatically
1243 /// dropped by Rust. The `RefCount::drop` implementation handles all cleanup:
1244 ///
1245 /// 1. Atomically decrements `num_copies` with `fetch_sub`
1246 /// 2. If the previous value was 1 (we're the last reference):
1247 /// - Reclaims the `RefCountInner` via `Box::from_raw`
1248 /// - Calls the custom destructor to run `T::drop()`
1249 /// - Deallocates the heap memory with the stored layout
1250 ///
1251 /// # Why No Code Here?
1252 ///
1253 /// Previously, `RefAny::drop` handled cleanup, but this caused issues with the
1254 /// C API where `Ref<T>` and `RefMut<T>` guards (which clone the `RefCount`) need
1255 /// to keep the data alive even after the original `RefAny` is dropped.
1256 ///
1257 /// By moving all cleanup to `RefCount::drop`, we ensure that:
1258 /// - `RefAny::clone()` creates a `RefCount` with `run_destructor = true`
1259 /// - `AZ_REFLECT` macros create `Ref`/`RefMut` guards that clone `RefCount`
1260 /// - Each `RefCount` drop decrements the counter
1261 /// - Only the LAST drop (when `num_copies` was 1) cleans up memory
1262 ///
1263 /// See `RefCount::drop` for the full algorithm and safety documentation.
1264 fn drop(&mut self) {
1265 // RefCount::drop handles everything automatically.
1266 // The sharing_info field is dropped by Rust, triggering RefCount::drop.
1267 }
1268}
1269
1270#[cfg(test)]
1271mod tests {
1272 use super::*;
1273
1274 #[derive(Debug, Clone, PartialEq)]
1275 struct TestStruct {
1276 value: i32,
1277 name: String,
1278 }
1279
1280 #[derive(Debug, Clone, PartialEq)]
1281 struct NestedStruct {
1282 inner: TestStruct,
1283 data: Vec<u8>,
1284 }
1285
1286 #[test]
1287 fn test_refany_basic_create_and_downcast() {
1288 let test_val = TestStruct {
1289 value: 42,
1290 name: "test".to_string(),
1291 };
1292
1293 let mut refany = RefAny::new(test_val.clone());
1294
1295 // Test downcast_ref
1296 let borrowed = refany
1297 .downcast_ref::<TestStruct>()
1298 .expect("Should downcast successfully");
1299 assert_eq!(borrowed.value, 42);
1300 assert_eq!(borrowed.name, "test");
1301 drop(borrowed);
1302
1303 // Test downcast_mut
1304 {
1305 let mut borrowed_mut = refany
1306 .downcast_mut::<TestStruct>()
1307 .expect("Should downcast mutably");
1308 borrowed_mut.value = 100;
1309 borrowed_mut.name = "modified".to_string();
1310 }
1311
1312 // Verify mutation
1313 let borrowed = refany
1314 .downcast_ref::<TestStruct>()
1315 .expect("Should downcast after mutation");
1316 assert_eq!(borrowed.value, 100);
1317 assert_eq!(borrowed.name, "modified");
1318 }
1319
1320 #[test]
1321 fn test_refany_clone_and_sharing() {
1322 let test_val = TestStruct {
1323 value: 42,
1324 name: "test".to_string(),
1325 };
1326
1327 let mut refany1 = RefAny::new(test_val);
1328 let mut refany2 = refany1.clone();
1329 let mut refany3 = refany1.clone();
1330
1331 // All three should point to the same data
1332 let borrowed1 = refany1
1333 .downcast_ref::<TestStruct>()
1334 .expect("Should downcast ref1");
1335 assert_eq!(borrowed1.value, 42);
1336 drop(borrowed1);
1337
1338 let borrowed2 = refany2
1339 .downcast_ref::<TestStruct>()
1340 .expect("Should downcast ref2");
1341 assert_eq!(borrowed2.value, 42);
1342 drop(borrowed2);
1343
1344 // Modify through refany3
1345 {
1346 let mut borrowed_mut = refany3
1347 .downcast_mut::<TestStruct>()
1348 .expect("Should downcast mut");
1349 borrowed_mut.value = 200;
1350 }
1351
1352 // Verify all see the change
1353 let borrowed1 = refany1
1354 .downcast_ref::<TestStruct>()
1355 .expect("Should see mutation from ref1");
1356 assert_eq!(borrowed1.value, 200);
1357 drop(borrowed1);
1358
1359 let borrowed2 = refany2
1360 .downcast_ref::<TestStruct>()
1361 .expect("Should see mutation from ref2");
1362 assert_eq!(borrowed2.value, 200);
1363 }
1364
1365 #[test]
1366 fn test_refany_borrow_checking() {
1367 let test_val = TestStruct {
1368 value: 42,
1369 name: "test".to_string(),
1370 };
1371
1372 let mut refany = RefAny::new(test_val);
1373
1374 // Test that we can get an immutable reference
1375 {
1376 let borrowed1 = refany
1377 .downcast_ref::<TestStruct>()
1378 .expect("First immutable borrow");
1379 assert_eq!(borrowed1.value, 42);
1380 assert_eq!(borrowed1.name, "test");
1381 }
1382
1383 // Test that we can get a mutable reference and modify the value
1384 {
1385 let mut borrowed_mut = refany
1386 .downcast_mut::<TestStruct>()
1387 .expect("Mutable borrow should work");
1388 borrowed_mut.value = 100;
1389 borrowed_mut.name = "modified".to_string();
1390 }
1391
1392 // Verify the modification persisted
1393 {
1394 let borrowed = refany
1395 .downcast_ref::<TestStruct>()
1396 .expect("Should be able to borrow again");
1397 assert_eq!(borrowed.value, 100);
1398 assert_eq!(borrowed.name, "modified");
1399 }
1400 }
1401
1402 #[test]
1403 fn test_refany_type_safety() {
1404 let test_val = TestStruct {
1405 value: 42,
1406 name: "test".to_string(),
1407 };
1408
1409 let mut refany = RefAny::new(test_val);
1410
1411 // Try to downcast to wrong type
1412 assert!(
1413 refany.downcast_ref::<i32>().is_none(),
1414 "Should not allow downcasting to wrong type"
1415 );
1416 assert!(
1417 refany.downcast_mut::<String>().is_none(),
1418 "Should not allow mutable downcasting to wrong type"
1419 );
1420
1421 // Correct type should still work
1422 let borrowed = refany
1423 .downcast_ref::<TestStruct>()
1424 .expect("Correct type should work");
1425 assert_eq!(borrowed.value, 42);
1426 }
1427
1428 #[test]
1429 fn test_refany_zero_sized_type() {
1430 #[derive(Debug, Clone, PartialEq)]
1431 struct ZeroSized;
1432
1433 let refany = RefAny::new(ZeroSized);
1434
1435 // Zero-sized types are stored differently (null pointer)
1436 // Verify that the RefAny can be created and cloned without issues
1437 let _cloned = refany.clone();
1438
1439 // Note: downcast operations on ZSTs may have limitations
1440 // This test primarily verifies that creation and cloning work
1441 }
1442
1443 #[test]
1444 fn test_refany_with_vec() {
1445 let test_val = vec![1, 2, 3, 4, 5];
1446 let mut refany = RefAny::new(test_val);
1447
1448 {
1449 let mut borrowed_mut = refany
1450 .downcast_mut::<Vec<i32>>()
1451 .expect("Should downcast vec");
1452 borrowed_mut.push(6);
1453 borrowed_mut.push(7);
1454 }
1455
1456 let borrowed = refany
1457 .downcast_ref::<Vec<i32>>()
1458 .expect("Should downcast vec");
1459 assert_eq!(&**borrowed, &[1, 2, 3, 4, 5, 6, 7]);
1460 }
1461
1462 #[test]
1463 fn test_refany_nested_struct() {
1464 let nested = NestedStruct {
1465 inner: TestStruct {
1466 value: 42,
1467 name: "inner".to_string(),
1468 },
1469 data: vec![1, 2, 3],
1470 };
1471
1472 let mut refany = RefAny::new(nested);
1473
1474 {
1475 let mut borrowed_mut = refany
1476 .downcast_mut::<NestedStruct>()
1477 .expect("Should downcast nested");
1478 borrowed_mut.inner.value = 100;
1479 borrowed_mut.data.push(4);
1480 }
1481
1482 let borrowed = refany
1483 .downcast_ref::<NestedStruct>()
1484 .expect("Should downcast nested");
1485 assert_eq!(borrowed.inner.value, 100);
1486 assert_eq!(&borrowed.data, &[1, 2, 3, 4]);
1487 }
1488
1489 #[test]
1490 fn test_refany_drop_order() {
1491 use std::sync::{Arc, Mutex};
1492
1493 let drop_counter = Arc::new(Mutex::new(0));
1494
1495 struct DropTracker {
1496 counter: Arc<Mutex<i32>>,
1497 }
1498
1499 impl Drop for DropTracker {
1500 fn drop(&mut self) {
1501 *self.counter.lock().unwrap() += 1;
1502 }
1503 }
1504
1505 {
1506 let tracker = DropTracker {
1507 counter: drop_counter.clone(),
1508 };
1509 let refany1 = RefAny::new(tracker);
1510 let refany2 = refany1.clone();
1511 let refany3 = refany1.clone();
1512
1513 assert_eq!(*drop_counter.lock().unwrap(), 0, "Should not drop yet");
1514
1515 drop(refany1);
1516 assert_eq!(
1517 *drop_counter.lock().unwrap(),
1518 0,
1519 "Should not drop after first clone dropped"
1520 );
1521
1522 drop(refany2);
1523 assert_eq!(
1524 *drop_counter.lock().unwrap(),
1525 0,
1526 "Should not drop after second clone dropped"
1527 );
1528
1529 drop(refany3);
1530 assert_eq!(
1531 *drop_counter.lock().unwrap(),
1532 1,
1533 "Should drop after last clone dropped"
1534 );
1535 }
1536 }
1537
1538 #[test]
1539 fn test_refany_callback_simulation() {
1540 // Simulate the IFrame callback pattern
1541 #[derive(Clone)]
1542 struct CallbackData {
1543 counter: i32,
1544 }
1545
1546 let data = CallbackData { counter: 0 };
1547 let mut refany = RefAny::new(data);
1548
1549 // Simulate callback invocation
1550 {
1551 let mut borrowed = refany
1552 .downcast_mut::<CallbackData>()
1553 .expect("Should downcast in callback");
1554 borrowed.counter += 1;
1555 }
1556
1557 let borrowed = refany
1558 .downcast_ref::<CallbackData>()
1559 .expect("Should read after callback");
1560 assert_eq!(borrowed.counter, 1);
1561 }
1562}