v8/
array_buffer.rs

1// Copyright 2019-2021 the Deno authors. All rights reserved. MIT license.
2
3use std::cell::Cell;
4use std::ffi::c_void;
5use std::ops::Deref;
6use std::ptr::NonNull;
7use std::ptr::null;
8use std::slice;
9
10use crate::ArrayBuffer;
11use crate::DataView;
12use crate::Isolate;
13use crate::Local;
14use crate::Value;
15use crate::isolate::RealIsolate;
16use crate::scope::PinScope;
17use crate::support::MaybeBool;
18use crate::support::Opaque;
19use crate::support::Shared;
20use crate::support::SharedPtrBase;
21use crate::support::SharedRef;
22use crate::support::UniquePtr;
23use crate::support::UniqueRef;
24use crate::support::long;
25
26unsafe extern "C" {
27  fn v8__ArrayBuffer__Allocator__NewDefaultAllocator() -> *mut Allocator;
28  fn v8__ArrayBuffer__Allocator__NewRustAllocator(
29    handle: *const c_void,
30    vtable: *const RustAllocatorVtable<c_void>,
31  ) -> *mut Allocator;
32  fn v8__ArrayBuffer__Allocator__DELETE(this: *mut Allocator);
33  fn v8__ArrayBuffer__New__with_byte_length(
34    isolate: *mut RealIsolate,
35    byte_length: usize,
36  ) -> *const ArrayBuffer;
37  fn v8__ArrayBuffer__New__with_backing_store(
38    isolate: *mut RealIsolate,
39    backing_store: *const SharedRef<BackingStore>,
40  ) -> *const ArrayBuffer;
41  fn v8__ArrayBuffer__Detach(
42    this: *const ArrayBuffer,
43    key: *const Value,
44  ) -> MaybeBool;
45  fn v8__ArrayBuffer__SetDetachKey(this: *const ArrayBuffer, key: *const Value);
46  fn v8__ArrayBuffer__Data(this: *const ArrayBuffer) -> *mut c_void;
47  fn v8__ArrayBuffer__IsDetachable(this: *const ArrayBuffer) -> bool;
48  fn v8__ArrayBuffer__WasDetached(this: *const ArrayBuffer) -> bool;
49  fn v8__ArrayBuffer__ByteLength(this: *const ArrayBuffer) -> usize;
50  fn v8__ArrayBuffer__GetBackingStore(
51    this: *const ArrayBuffer,
52  ) -> SharedRef<BackingStore>;
53  fn v8__ArrayBuffer__NewBackingStore__with_byte_length(
54    isolate: *mut RealIsolate,
55    byte_length: usize,
56  ) -> *mut BackingStore;
57  fn v8__ArrayBuffer__NewBackingStore__with_data(
58    data: *mut c_void,
59    byte_length: usize,
60    deleter: BackingStoreDeleterCallback,
61    deleter_data: *mut c_void,
62  ) -> *mut BackingStore;
63
64  fn v8__BackingStore__Data(this: *const BackingStore) -> *mut c_void;
65  fn v8__BackingStore__ByteLength(this: *const BackingStore) -> usize;
66  fn v8__BackingStore__IsShared(this: *const BackingStore) -> bool;
67  fn v8__BackingStore__IsResizableByUserJavaScript(
68    this: *const BackingStore,
69  ) -> bool;
70  fn v8__BackingStore__DELETE(this: *mut BackingStore);
71
72  fn v8__DataView__New(
73    arraybuffer: *const ArrayBuffer,
74    byte_offset: usize,
75    length: usize,
76  ) -> *const DataView;
77
78  fn std__shared_ptr__v8__BackingStore__COPY(
79    ptr: *const SharedPtrBase<BackingStore>,
80  ) -> SharedPtrBase<BackingStore>;
81  fn std__shared_ptr__v8__BackingStore__CONVERT__std__unique_ptr(
82    unique_ptr: UniquePtr<BackingStore>,
83  ) -> SharedPtrBase<BackingStore>;
84  fn std__shared_ptr__v8__BackingStore__get(
85    ptr: *const SharedPtrBase<BackingStore>,
86  ) -> *mut BackingStore;
87  fn std__shared_ptr__v8__BackingStore__reset(
88    ptr: *mut SharedPtrBase<BackingStore>,
89  );
90  fn std__shared_ptr__v8__BackingStore__use_count(
91    ptr: *const SharedPtrBase<BackingStore>,
92  ) -> long;
93
94  fn std__shared_ptr__v8__ArrayBuffer__Allocator__COPY(
95    ptr: *const SharedPtrBase<Allocator>,
96  ) -> SharedPtrBase<Allocator>;
97  fn std__shared_ptr__v8__ArrayBuffer__Allocator__CONVERT__std__unique_ptr(
98    unique_ptr: UniquePtr<Allocator>,
99  ) -> SharedPtrBase<Allocator>;
100  fn std__shared_ptr__v8__ArrayBuffer__Allocator__get(
101    ptr: *const SharedPtrBase<Allocator>,
102  ) -> *mut Allocator;
103  fn std__shared_ptr__v8__ArrayBuffer__Allocator__reset(
104    ptr: *mut SharedPtrBase<Allocator>,
105  );
106  fn std__shared_ptr__v8__ArrayBuffer__Allocator__use_count(
107    ptr: *const SharedPtrBase<Allocator>,
108  ) -> long;
109}
110
111/// A thread-safe allocator that V8 uses to allocate |ArrayBuffer|'s memory.
112/// The allocator is a global V8 setting. It has to be set via
113/// Isolate::CreateParams.
114///
115/// Memory allocated through this allocator by V8 is accounted for as external
116/// memory by V8. Note that V8 keeps track of the memory for all internalized
117/// |ArrayBuffer|s. Responsibility for tracking external memory (using
118/// Isolate::AdjustAmountOfExternalAllocatedMemory) is handed over to the
119/// embedder upon externalization and taken over upon internalization (creating
120/// an internalized buffer from an existing buffer).
121///
122/// Note that it is unsafe to call back into V8 from any of the allocator
123/// functions.
124///
125/// This is called v8::ArrayBuffer::Allocator in C++. Rather than use the
126/// namespace array_buffer, which will contain only the Allocator we opt in Rust
127/// to allow it to live in the top level: v8::Allocator
128#[repr(C)]
129#[derive(Debug)]
130pub struct Allocator(Opaque);
131
132/// A wrapper around the V8 Allocator class.
133#[repr(C)]
134pub struct RustAllocatorVtable<T> {
135  pub allocate: unsafe extern "C" fn(handle: &T, len: usize) -> *mut c_void,
136  pub allocate_uninitialized:
137    unsafe extern "C" fn(handle: &T, len: usize) -> *mut c_void,
138  pub free: unsafe extern "C" fn(handle: &T, data: *mut c_void, len: usize),
139  pub drop: unsafe extern "C" fn(handle: *const T),
140}
141
142impl Shared for Allocator {
143  fn clone(ptr: &SharedPtrBase<Self>) -> SharedPtrBase<Self> {
144    unsafe { std__shared_ptr__v8__ArrayBuffer__Allocator__COPY(ptr) }
145  }
146  fn from_unique_ptr(unique_ptr: UniquePtr<Self>) -> SharedPtrBase<Self> {
147    unsafe {
148      std__shared_ptr__v8__ArrayBuffer__Allocator__CONVERT__std__unique_ptr(
149        unique_ptr,
150      )
151    }
152  }
153  fn get(ptr: &SharedPtrBase<Self>) -> *const Self {
154    unsafe { std__shared_ptr__v8__ArrayBuffer__Allocator__get(ptr) }
155  }
156  fn reset(ptr: &mut SharedPtrBase<Self>) {
157    unsafe { std__shared_ptr__v8__ArrayBuffer__Allocator__reset(ptr) }
158  }
159  fn use_count(ptr: &SharedPtrBase<Self>) -> long {
160    unsafe { std__shared_ptr__v8__ArrayBuffer__Allocator__use_count(ptr) }
161  }
162}
163
164/// malloc/free based convenience allocator.
165#[inline(always)]
166pub fn new_default_allocator() -> UniqueRef<Allocator> {
167  unsafe {
168    UniqueRef::from_raw(v8__ArrayBuffer__Allocator__NewDefaultAllocator())
169  }
170}
171
172/// Creates an allocator managed by Rust code.
173///
174/// Marked `unsafe` because the caller must ensure that `handle` is valid and matches what `vtable` expects.
175#[inline(always)]
176pub unsafe fn new_rust_allocator<T: Sized + Send + Sync + 'static>(
177  handle: *const T,
178  vtable: &'static RustAllocatorVtable<T>,
179) -> UniqueRef<Allocator> {
180  unsafe {
181    UniqueRef::from_raw(v8__ArrayBuffer__Allocator__NewRustAllocator(
182      handle as *const c_void,
183      vtable as *const RustAllocatorVtable<T>
184        as *const RustAllocatorVtable<c_void>,
185    ))
186  }
187}
188
189#[test]
190fn test_rust_allocator() {
191  use std::sync::Arc;
192  use std::sync::atomic::{AtomicUsize, Ordering};
193
194  unsafe extern "C" fn allocate(_: &AtomicUsize, _: usize) -> *mut c_void {
195    unimplemented!()
196  }
197  unsafe extern "C" fn allocate_uninitialized(
198    _: &AtomicUsize,
199    _: usize,
200  ) -> *mut c_void {
201    unimplemented!()
202  }
203  unsafe extern "C" fn free(_: &AtomicUsize, _: *mut c_void, _: usize) {
204    unimplemented!()
205  }
206  unsafe extern "C" fn drop(x: *const AtomicUsize) {
207    unsafe {
208      let arc = Arc::from_raw(x);
209      arc.store(42, Ordering::SeqCst);
210    }
211  }
212
213  let retval = Arc::new(AtomicUsize::new(0));
214
215  let vtable: &'static RustAllocatorVtable<AtomicUsize> =
216    &RustAllocatorVtable {
217      allocate,
218      allocate_uninitialized,
219      free,
220      drop,
221    };
222  unsafe { new_rust_allocator(Arc::into_raw(retval.clone()), vtable) };
223  assert_eq!(retval.load(Ordering::SeqCst), 42);
224  assert_eq!(Arc::strong_count(&retval), 1);
225}
226
227#[test]
228fn test_default_allocator() {
229  new_default_allocator();
230}
231
232impl Drop for Allocator {
233  fn drop(&mut self) {
234    unsafe { v8__ArrayBuffer__Allocator__DELETE(self) };
235  }
236}
237
238pub type BackingStoreDeleterCallback = unsafe extern "C" fn(
239  data: *mut c_void,
240  byte_length: usize,
241  deleter_data: *mut c_void,
242);
243
244pub(crate) mod sealed {
245  pub trait Rawable {
246    fn byte_len(&mut self) -> usize;
247    fn into_raw(self) -> (*const (), *const u8);
248    unsafe fn drop_raw(ptr: *const (), size: usize);
249  }
250}
251
252macro_rules! rawable {
253  ($ty:ty) => {
254    impl sealed::Rawable for Box<[$ty]> {
255      fn byte_len(&mut self) -> usize {
256        self.as_mut().len() * std::mem::size_of::<$ty>()
257      }
258
259      fn into_raw(mut self) -> (*const (), *const u8) {
260        // Thin the fat pointer
261        let ptr = self.as_mut_ptr();
262        std::mem::forget(self);
263        (ptr as _, ptr as _)
264      }
265
266      unsafe fn drop_raw(ptr: *const (), len: usize) {
267        // Fatten the thin pointer
268        _ = unsafe {
269          Self::from_raw(std::ptr::slice_from_raw_parts_mut(ptr as _, len))
270        };
271      }
272    }
273
274    impl sealed::Rawable for Vec<$ty> {
275      fn byte_len(&mut self) -> usize {
276        Vec::<$ty>::len(self) * std::mem::size_of::<$ty>()
277      }
278
279      unsafe fn drop_raw(ptr: *const (), size: usize) {
280        unsafe {
281          <Box<[$ty]> as sealed::Rawable>::drop_raw(ptr, size);
282        }
283      }
284
285      fn into_raw(self) -> (*const (), *const u8) {
286        self.into_boxed_slice().into_raw()
287      }
288    }
289  };
290}
291
292rawable!(u8);
293rawable!(u16);
294rawable!(u32);
295rawable!(u64);
296rawable!(i8);
297rawable!(i16);
298rawable!(i32);
299rawable!(i64);
300rawable!(f32);
301rawable!(f64);
302
303impl<T: Sized> sealed::Rawable for Box<T>
304where
305  T: AsMut<[u8]>,
306{
307  fn byte_len(&mut self) -> usize {
308    self.as_mut().as_mut().len()
309  }
310
311  fn into_raw(mut self) -> (*const (), *const u8) {
312    let data = self.as_mut().as_mut().as_mut_ptr();
313    let ptr = Self::into_raw(self);
314    (ptr as _, data)
315  }
316
317  unsafe fn drop_raw(ptr: *const (), _len: usize) {
318    unsafe {
319      _ = Self::from_raw(ptr as _);
320    }
321  }
322}
323
324/// A wrapper around the backing store (i.e. the raw memory) of an array buffer.
325/// See a document linked in http://crbug.com/v8/9908 for more information.
326///
327/// The allocation and destruction of backing stores is generally managed by
328/// V8. Clients should always use standard C++ memory ownership types (i.e.
329/// std::unique_ptr and std::shared_ptr) to manage lifetimes of backing stores
330/// properly, since V8 internal objects may alias backing stores.
331///
332/// This object does not keep the underlying |ArrayBuffer::Allocator| alive by
333/// default. Use Isolate::CreateParams::array_buffer_allocator_shared when
334/// creating the Isolate to make it hold a reference to the allocator itself.
335#[repr(C)]
336#[derive(Debug)]
337pub struct BackingStore([usize; 6]);
338
339unsafe impl Send for BackingStore {}
340
341impl BackingStore {
342  /// Return a pointer to the beginning of the memory block for this backing
343  /// store. The pointer is only valid as long as this backing store object
344  /// lives.
345  ///
346  /// Might return `None` if the backing store has zero length.
347  #[inline(always)]
348  pub fn data(&self) -> Option<NonNull<c_void>> {
349    let raw_ptr =
350      unsafe { v8__BackingStore__Data(self as *const _ as *mut Self) };
351    NonNull::new(raw_ptr)
352  }
353
354  /// The length (in bytes) of this backing store.
355  #[inline(always)]
356  pub fn byte_length(&self) -> usize {
357    unsafe { v8__BackingStore__ByteLength(self) }
358  }
359
360  /// Indicates whether the backing store was created for an ArrayBuffer or
361  /// a SharedArrayBuffer.
362  #[inline(always)]
363  pub fn is_shared(&self) -> bool {
364    unsafe { v8__BackingStore__IsShared(self) }
365  }
366
367  /// Indicates whether the backing store was created for a resizable ArrayBuffer
368  /// or a growable SharedArrayBuffer, and thus may be resized by user
369  /// JavaScript code.
370  #[inline(always)]
371  pub fn is_resizable_by_user_javascript(&self) -> bool {
372    unsafe { v8__BackingStore__IsResizableByUserJavaScript(self) }
373  }
374}
375
376impl Deref for BackingStore {
377  type Target = [Cell<u8>];
378
379  /// Returns a [u8] slice refencing the data in the backing store.
380  #[inline]
381  fn deref(&self) -> &Self::Target {
382    // We use a dangling pointer if `self.data()` returns None because it's UB
383    // to create even an empty slice from a null pointer.
384    let data = self
385      .data()
386      .unwrap_or_else(NonNull::dangling)
387      .cast::<Cell<u8>>();
388    let len = self.byte_length();
389    unsafe { slice::from_raw_parts(data.as_ptr(), len) }
390  }
391}
392
393impl Drop for BackingStore {
394  #[inline]
395  fn drop(&mut self) {
396    unsafe { v8__BackingStore__DELETE(self) };
397  }
398}
399
400impl Shared for BackingStore {
401  #[inline]
402  fn clone(ptr: &SharedPtrBase<Self>) -> SharedPtrBase<Self> {
403    unsafe { std__shared_ptr__v8__BackingStore__COPY(ptr) }
404  }
405  #[inline]
406  fn from_unique_ptr(unique_ptr: UniquePtr<Self>) -> SharedPtrBase<Self> {
407    unsafe {
408      std__shared_ptr__v8__BackingStore__CONVERT__std__unique_ptr(unique_ptr)
409    }
410  }
411  #[inline]
412  fn get(ptr: &SharedPtrBase<Self>) -> *const Self {
413    unsafe { std__shared_ptr__v8__BackingStore__get(ptr) }
414  }
415  #[inline]
416  fn reset(ptr: &mut SharedPtrBase<Self>) {
417    unsafe { std__shared_ptr__v8__BackingStore__reset(ptr) }
418  }
419  #[inline]
420  fn use_count(ptr: &SharedPtrBase<Self>) -> long {
421    unsafe { std__shared_ptr__v8__BackingStore__use_count(ptr) }
422  }
423}
424
425impl ArrayBuffer {
426  /// Create a new ArrayBuffer. Allocate |byte_length| bytes.
427  /// Allocated memory will be owned by a created ArrayBuffer and
428  /// will be deallocated when it is garbage-collected,
429  /// unless the object is externalized.
430  #[inline(always)]
431  pub fn new<'s>(
432    scope: &PinScope<'s, '_, ()>,
433    byte_length: usize,
434  ) -> Local<'s, ArrayBuffer> {
435    unsafe {
436      scope.cast_local(|sd| {
437        v8__ArrayBuffer__New__with_byte_length(
438          sd.get_isolate_ptr(),
439          byte_length,
440        )
441      })
442    }
443    .unwrap()
444  }
445
446  #[inline(always)]
447  pub fn with_backing_store<'s>(
448    scope: &PinScope<'s, '_, ()>,
449    backing_store: &SharedRef<BackingStore>,
450  ) -> Local<'s, ArrayBuffer> {
451    unsafe {
452      scope.cast_local(|sd| {
453        v8__ArrayBuffer__New__with_backing_store(
454          sd.get_isolate_ptr(),
455          backing_store,
456        )
457      })
458    }
459    .unwrap()
460  }
461
462  /// Data length in bytes.
463  #[inline(always)]
464  pub fn byte_length(&self) -> usize {
465    unsafe { v8__ArrayBuffer__ByteLength(self) }
466  }
467
468  /// Returns true if this ArrayBuffer may be detached.
469  #[inline(always)]
470  pub fn is_detachable(&self) -> bool {
471    unsafe { v8__ArrayBuffer__IsDetachable(self) }
472  }
473
474  /// Returns true if this ArrayBuffer was detached.
475  #[inline(always)]
476  pub fn was_detached(&self) -> bool {
477    if self.byte_length() != 0 {
478      return false;
479    }
480    unsafe { v8__ArrayBuffer__WasDetached(self) }
481  }
482
483  /// Detaches this ArrayBuffer and all its views (typed arrays).
484  /// Detaching sets the byte length of the buffer and all typed arrays to zero,
485  /// preventing JavaScript from ever accessing underlying backing store.
486  /// ArrayBuffer should have been externalized and must be detachable. Returns
487  /// `None` if the key didn't pass the `[[ArrayBufferDetachKey]]` check,
488  /// and `Some(true)` otherwise.
489  #[inline(always)]
490  pub fn detach(&self, key: Option<Local<Value>>) -> Option<bool> {
491    // V8 terminates when the ArrayBuffer is not detachable. Non-detachable
492    // buffers are buffers that are in use by WebAssembly or asm.js.
493    if self.is_detachable() {
494      let key = key.map_or(null(), |v| &*v as *const Value);
495      unsafe { v8__ArrayBuffer__Detach(self, key) }.into()
496    } else {
497      Some(true)
498    }
499  }
500
501  /// Sets the `[[ArrayBufferDetachKey]]`.
502  #[inline(always)]
503  pub fn set_detach_key(&self, key: Local<Value>) {
504    unsafe { v8__ArrayBuffer__SetDetachKey(self, &*key) };
505  }
506
507  /// More efficient shortcut for GetBackingStore()->Data().
508  /// The returned pointer is valid as long as the ArrayBuffer is alive.
509  #[inline(always)]
510  pub fn data(&self) -> Option<NonNull<c_void>> {
511    let raw_ptr = unsafe { v8__ArrayBuffer__Data(self) };
512    NonNull::new(raw_ptr)
513  }
514
515  /// Get a shared pointer to the backing store of this array buffer. This
516  /// pointer coordinates the lifetime management of the internal storage
517  /// with any live ArrayBuffers on the heap, even across isolates. The embedder
518  /// should not attempt to manage lifetime of the storage through other means.
519  #[inline(always)]
520  pub fn get_backing_store(&self) -> SharedRef<BackingStore> {
521    unsafe { v8__ArrayBuffer__GetBackingStore(self) }
522  }
523
524  /// Returns a new standalone BackingStore that is allocated using the array
525  /// buffer allocator of the isolate. The result can be later passed to
526  /// ArrayBuffer::New.
527  ///
528  /// If the allocator returns nullptr, then the function may cause GCs in the
529  /// given isolate and re-try the allocation. If GCs do not help, then the
530  /// function will crash with an out-of-memory error.
531  #[inline(always)]
532  pub fn new_backing_store(
533    scope: &mut Isolate,
534    byte_length: usize,
535  ) -> UniqueRef<BackingStore> {
536    unsafe {
537      UniqueRef::from_raw(v8__ArrayBuffer__NewBackingStore__with_byte_length(
538        (*scope).as_real_ptr(),
539        byte_length,
540      ))
541    }
542  }
543
544  /// Returns a new standalone BackingStore that takes over the ownership of
545  /// the given buffer.
546  ///
547  /// The destructor of the BackingStore frees owned buffer memory.
548  ///
549  /// The result can be later passed to ArrayBuffer::New. The raw pointer
550  /// to the buffer must not be passed again to any V8 API function.
551  #[inline(always)]
552  pub fn new_backing_store_from_boxed_slice(
553    data: Box<[u8]>,
554  ) -> UniqueRef<BackingStore> {
555    Self::new_backing_store_from_bytes(data)
556  }
557
558  /// Returns a new standalone BackingStore that takes over the ownership of
559  /// the given buffer.
560  ///
561  /// The destructor of the BackingStore frees owned buffer memory.
562  ///
563  /// The result can be later passed to ArrayBuffer::New. The raw pointer
564  /// to the buffer must not be passed again to any V8 API function.
565  #[inline(always)]
566  pub fn new_backing_store_from_vec(data: Vec<u8>) -> UniqueRef<BackingStore> {
567    Self::new_backing_store_from_bytes(data)
568  }
569
570  /// Returns a new standalone BackingStore backed by a container that dereferences
571  /// to a mutable slice of bytes. The object is dereferenced once, and the resulting slice's
572  /// memory is used for the lifetime of the buffer.
573  ///
574  /// This method may be called with most single-ownership containers that implement `AsMut<[u8]>`, including
575  /// `Box<[u8]>`, and `Vec<u8>`. This will also support most other mutable bytes containers (including `bytes::BytesMut`),
576  /// though these buffers will need to be boxed to manage ownership of memory.
577  ///
578  /// ```
579  /// // Vector of bytes
580  /// let backing_store = v8::ArrayBuffer::new_backing_store_from_bytes(vec![1, 2, 3]);
581  /// // Boxes slice of bytes
582  /// let boxed_slice: Box<[u8]> = vec![1, 2, 3].into_boxed_slice();
583  /// let backing_store = v8::ArrayBuffer::new_backing_store_from_bytes(boxed_slice);
584  /// // BytesMut from bytes crate
585  /// let backing_store = v8::ArrayBuffer::new_backing_store_from_bytes(Box::new(bytes::BytesMut::new()));
586  /// ```
587  #[inline(always)]
588  pub fn new_backing_store_from_bytes<T>(
589    mut bytes: T,
590  ) -> UniqueRef<BackingStore>
591  where
592    T: sealed::Rawable,
593  {
594    let len = bytes.byte_len();
595
596    let (ptr, slice) = T::into_raw(bytes);
597
598    unsafe extern "C" fn drop_rawable<T: sealed::Rawable>(
599      _ptr: *mut c_void,
600      len: usize,
601      data: *mut c_void,
602    ) {
603      // SAFETY: We know that data is a raw T from above
604      unsafe { T::drop_raw(data as _, len) }
605    }
606
607    // SAFETY: We are extending the lifetime of a slice, but we're locking away the box that we
608    // derefed from so there's no way to get another mutable reference.
609    unsafe {
610      Self::new_backing_store_from_ptr(
611        slice as _,
612        len,
613        drop_rawable::<T>,
614        ptr as _,
615      )
616    }
617  }
618
619  /// Returns a new standalone BackingStore backed by given ptr.
620  ///
621  /// SAFETY: This API consumes raw pointers so is inherently
622  /// unsafe. Usually you should use new_backing_store_from_boxed_slice.
623  #[inline(always)]
624  pub unsafe fn new_backing_store_from_ptr(
625    data_ptr: *mut c_void,
626    byte_length: usize,
627    deleter_callback: BackingStoreDeleterCallback,
628    deleter_data: *mut c_void,
629  ) -> UniqueRef<BackingStore> {
630    unsafe {
631      UniqueRef::from_raw(v8__ArrayBuffer__NewBackingStore__with_data(
632        data_ptr,
633        byte_length,
634        deleter_callback,
635        deleter_data,
636      ))
637    }
638  }
639}
640
641impl DataView {
642  /// Returns a new DataView.
643  #[inline(always)]
644  pub fn new<'s>(
645    scope: &PinScope<'s, '_, ()>,
646    arraybuffer: Local<'s, ArrayBuffer>,
647    byte_offset: usize,
648    length: usize,
649  ) -> Local<'s, DataView> {
650    unsafe {
651      scope
652        .cast_local(|_| v8__DataView__New(&*arraybuffer, byte_offset, length))
653    }
654    .unwrap()
655  }
656}