wasmer_types/extern_ref.rs
1use std::any::Any;
2use std::ptr;
3use std::sync::atomic;
4
5/// This type does not do reference counting automatically, reference counting can be done with
6/// [`Self::ref_clone`] and [`Self::ref_drop`].
7#[derive(Debug, Clone, Copy, PartialEq, Eq)]
8#[repr(transparent)]
9pub struct VMExternRef(*const VMExternRefInner);
10
11impl VMExternRef {
12 /// The maximum number of references allowed to this data.
13 const MAX_REFCOUNT: usize = std::usize::MAX - 1;
14
15 /// Checks if the given ExternRef is null.
16 pub fn is_null(&self) -> bool {
17 self.0.is_null()
18 }
19
20 /// New null extern ref
21 pub const fn null() -> Self {
22 Self(ptr::null())
23 }
24
25 /// Get a bit-level representation of an externref.
26 /// For internal use for packing / unpacking it for calling functions.
27 pub(crate) fn to_binary(self) -> i128 {
28 self.0 as i128
29 }
30
31 /// Create an externref from bit-level representation.
32 /// For internal use for packing / unpacking it for calling functions.
33 ///
34 /// # Safety
35 /// The pointer is assumed valid or null. Passing arbitrary data to this function will
36 /// result in undefined behavior. It is the caller's responsibility to verify that only
37 /// valid externref bit patterns are passed to this function.
38 pub(crate) unsafe fn from_binary(bits: i128) -> Self {
39 Self(bits as usize as *const _)
40 }
41
42 /// Make a new extern reference
43 pub fn new<T>(value: T) -> Self
44 where
45 T: Any + Send + Sync + 'static + Sized,
46 {
47 Self(Box::into_raw(Box::new(VMExternRefInner::new::<T>(value))))
48 }
49
50 /// Try to downcast to the given value
51 pub fn downcast<T>(&self) -> Option<&T>
52 where
53 T: Any + Send + Sync + 'static + Sized,
54 {
55 if self.is_null() {
56 return None;
57 }
58 unsafe {
59 let inner = &*self.0;
60
61 inner.data.downcast_ref::<T>()
62 }
63 }
64
65 /// Panic if the ref count gets too high.
66 #[track_caller]
67 fn sanity_check_ref_count(old_size: usize, growth_amount: usize) {
68 // If we exceed 18_446_744_073_709_551_614 references on a 64bit system (or
69 // 2_147_483_646 references on a 32bit system) then we either live in a future with
70 // magic technology or we have a bug in our ref counting logic (i.e. a leak).
71 // Either way, the best course of action is to terminate the program and update
72 // some code on our side.
73 //
74 // Note to future readers: exceeding `usize` ref count is trivially provable as a
75 // bug on systems that can address `usize` sized memory blocks or smaller because
76 // the reference itself is at least `usize` in size and all virtual memory would be
77 // taken by references to the data leaving no room for the data itself.
78 if old_size
79 .checked_add(growth_amount)
80 .map(|v| v > Self::MAX_REFCOUNT)
81 .unwrap_or(true)
82 {
83 panic!("Too many references to `ExternRef`");
84 }
85 }
86
87 /// A low-level function to increment the strong-count a given number of times.
88 ///
89 /// This is used as an optimization when implementing some low-level VM primitives.
90 /// If you're using this type directly for whatever reason, you probably want
91 /// [`Self::ref_clone`] instead.
92 pub fn ref_inc_by(&self, val: usize) {
93 if self.0.is_null() {
94 return;
95 }
96
97 let old_size = unsafe {
98 let ref_inner = &*self.0;
99 ref_inner.increment_ref_count(val)
100 };
101
102 Self::sanity_check_ref_count(old_size, val);
103 }
104
105 /// A deep copy of the reference, increments the strong count.
106 pub fn ref_clone(&self) -> Self {
107 if self.0.is_null() {
108 return Self(self.0);
109 }
110
111 let old_size = unsafe {
112 let ref_inner = &*self.0;
113 ref_inner.increment_ref_count(1)
114 };
115
116 // See comments in [`Self::sanity_check_ref_count`] for more information.
117 if old_size > Self::MAX_REFCOUNT {
118 panic!("Too many references to `ExternRef`");
119 }
120
121 Self(self.0)
122 }
123
124 /// Does an inner drop, decrementing the strong count
125 pub fn ref_drop(&mut self) {
126 if !self.0.is_null() {
127 unsafe {
128 let should_drop = {
129 let ref_inner: &VMExternRefInner = &*self.0;
130 ref_inner.decrement_and_drop()
131 };
132 if should_drop {
133 let _ = Box::from_raw(self.0 as *mut VMExternRefInner);
134 }
135 }
136 }
137 }
138
139 #[allow(dead_code)]
140 /// Get the number of strong references to this data.
141 fn strong_count(&self) -> usize {
142 if self.0.is_null() {
143 0
144 } else {
145 unsafe { (&*self.0).strong.load(atomic::Ordering::SeqCst) }
146 }
147 }
148}
149
150#[derive(Debug)]
151#[repr(C)]
152pub(crate) struct VMExternRefInner {
153 strong: atomic::AtomicUsize,
154 /// Do something obviously correct to get started. This can "easily" be improved
155 /// to be an inline allocation later as the logic is fully encapsulated.
156 data: Box<dyn Any + Send + Sync + 'static>,
157}
158
159impl VMExternRefInner {
160 fn new<T>(value: T) -> Self
161 where
162 T: Any + Send + Sync + Sized + 'static,
163 {
164 Self {
165 strong: atomic::AtomicUsize::new(1),
166 data: Box::new(value),
167 }
168 }
169
170 /// Increments the reference count.
171 /// Returns the old value.
172 fn increment_ref_count(&self, val: usize) -> usize {
173 // Using a relaxed ordering is alright here, as knowledge of
174 // the original reference prevents other threads from
175 // erroneously deleting the object.
176 //
177 // As explained in the [Boost documentation][1]:
178 //
179 // > Increasing the reference counter can always be done with
180 // > `memory_order_relaxed`: New references to an object can
181 // > only be formed from an existing reference, and passing an
182 // > existing reference from one thread to another must already
183 // > provide any required synchronization.
184 //
185 // [1]: https://www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html
186 self.strong.fetch_add(val, atomic::Ordering::Relaxed)
187 }
188
189 /// Decrement the count and drop the data if the count hits 0
190 /// returns `true` if the containing allocation should be dropped
191 fn decrement_and_drop(&self) -> bool {
192 // Because `fetch_sub` is already atomic, we do not need to
193 // synchronize with other thread.
194 if self.strong.fetch_sub(1, atomic::Ordering::Release) != 1 {
195 return false;
196 }
197
198 // This fence is needed to prevent reordering of use of the data and
199 // deletion of the data. Because it is marked `Release`, the decreasing
200 // of the reference count synchronizes with this `Acquire` fence. This
201 // means that use of the data happens before decreasing the reference
202 // count, which happens before this fence, which happens before the
203 // deletion of the data.
204 //
205 // As explained in the [Boost documentation][1]:
206 //
207 // > It is important to enforce any possible access to the object in one
208 // > thread (through an existing reference) to *happen before* deleting
209 // > the object in a different thread. This is achieved by a "release"
210 // > operation after dropping a reference (any access to the object
211 // > through this reference must obviously happened before), and an
212 // > "acquire" operation before deleting the object.
213 //
214 // [1]: https://www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html
215 atomic::fence(atomic::Ordering::Acquire);
216
217 return true;
218 }
219}
220
221#[derive(Debug, PartialEq, Eq)]
222#[repr(transparent)]
223/// An opaque reference to some data. This reference can be passed through Wasm.
224pub struct ExternRef {
225 inner: VMExternRef,
226}
227
228impl Clone for ExternRef {
229 fn clone(&self) -> Self {
230 Self {
231 inner: self.inner.ref_clone(),
232 }
233 }
234}
235
236impl Drop for ExternRef {
237 fn drop(&mut self) {
238 self.inner.ref_drop()
239 }
240}
241
242impl ExternRef {
243 /// Checks if the given ExternRef is null.
244 pub fn is_null(&self) -> bool {
245 self.inner.is_null()
246 }
247
248 /// New null extern ref
249 pub fn null() -> Self {
250 Self {
251 inner: VMExternRef::null(),
252 }
253 }
254
255 #[cfg(feature = "experimental-reference-types-extern-ref")]
256 /// Make a new extern reference
257 pub fn new<T>(value: T) -> Self
258 where
259 T: Any + Send + Sync + 'static + Sized,
260 {
261 Self {
262 inner: VMExternRef::new(value),
263 }
264 }
265
266 #[cfg(feature = "experimental-reference-types-extern-ref")]
267 /// Try to downcast to the given value
268 pub fn downcast<T>(&self) -> Option<&T>
269 where
270 T: Any + Send + Sync + 'static + Sized,
271 {
272 self.inner.downcast::<T>()
273 }
274
275 #[cfg(feature = "experimental-reference-types-extern-ref")]
276 /// Get the number of strong references to this data.
277 pub fn strong_count(&self) -> usize {
278 self.inner.strong_count()
279 }
280}
281
282impl From<VMExternRef> for ExternRef {
283 fn from(other: VMExternRef) -> Self {
284 Self { inner: other }
285 }
286}
287
288impl From<ExternRef> for VMExternRef {
289 fn from(other: ExternRef) -> Self {
290 let out = other.inner;
291 // We want to make this transformation without decrementing the count.
292 std::mem::forget(other);
293 out
294 }
295}