rusty_v8/
isolate.rs

1// Copyright 2019-2021 the Deno authors. All rights reserved. MIT license.
2use crate::function::FunctionCallbackInfo;
3use crate::isolate_create_params::raw;
4use crate::isolate_create_params::CreateParams;
5use crate::promise::PromiseRejectMessage;
6use crate::scope::data::ScopeData;
7use crate::support::BuildTypeIdHasher;
8use crate::support::MapFnFrom;
9use crate::support::MapFnTo;
10use crate::support::Opaque;
11use crate::support::ToCFn;
12use crate::support::UnitType;
13use crate::wasm::trampoline;
14use crate::wasm::WasmStreaming;
15use crate::Array;
16use crate::CallbackScope;
17use crate::Context;
18use crate::FixedArray;
19use crate::Function;
20use crate::HandleScope;
21use crate::Local;
22use crate::Message;
23use crate::Module;
24use crate::Object;
25use crate::Promise;
26use crate::ScriptOrModule;
27use crate::String;
28use crate::Value;
29
30use std::any::Any;
31use std::any::TypeId;
32
33use std::collections::HashMap;
34use std::ffi::c_void;
35use std::fmt::{self, Debug, Formatter};
36use std::mem::MaybeUninit;
37use std::ops::Deref;
38use std::ops::DerefMut;
39use std::os::raw::c_char;
40use std::ptr::null_mut;
41use std::ptr::NonNull;
42use std::sync::Arc;
43use std::sync::Mutex;
44
45/// Policy for running microtasks:
46///   - explicit: microtasks are invoked with the
47///               Isolate::PerformMicrotaskCheckpoint() method;
48///   - auto: microtasks are invoked when the script call depth decrements
49///           to zero.
50#[derive(Debug, Clone, Copy, PartialEq)]
51#[repr(C)]
52pub enum MicrotasksPolicy {
53  Explicit = 0,
54  // Scoped = 1 (RAII) is omitted for now, doesn't quite map to idiomatic Rust.
55  Auto = 2,
56}
57
58/// PromiseHook with type Init is called when a new promise is
59/// created. When a new promise is created as part of the chain in the
60/// case of Promise.then or in the intermediate promises created by
61/// Promise.{race, all}/AsyncFunctionAwait, we pass the parent promise
62/// otherwise we pass undefined.
63///
64/// PromiseHook with type Resolve is called at the beginning of
65/// resolve or reject function defined by CreateResolvingFunctions.
66///
67/// PromiseHook with type Before is called at the beginning of the
68/// PromiseReactionJob.
69///
70/// PromiseHook with type After is called right at the end of the
71/// PromiseReactionJob.
72#[derive(Debug, Clone, Copy, PartialEq)]
73#[repr(C)]
74pub enum PromiseHookType {
75  Init,
76  Resolve,
77  Before,
78  After,
79}
80
81pub type MessageCallback = extern "C" fn(Local<Message>, Local<Value>);
82
83pub type PromiseHook =
84  extern "C" fn(PromiseHookType, Local<Promise>, Local<Value>);
85
86pub type PromiseRejectCallback = extern "C" fn(PromiseRejectMessage);
87
88/// HostInitializeImportMetaObjectCallback is called the first time import.meta
89/// is accessed for a module. Subsequent access will reuse the same value.
90///
91/// The method combines two implementation-defined abstract operations into one:
92/// HostGetImportMetaProperties and HostFinalizeImportMeta.
93///
94/// The embedder should use v8::Object::CreateDataProperty to add properties on
95/// the meta object.
96pub type HostInitializeImportMetaObjectCallback =
97  extern "C" fn(Local<Context>, Local<Module>, Local<Object>);
98
99/// HostImportModuleDynamicallyWithImportAssertionsCallback is called when we require the
100/// embedder to load a module. This is used as part of the dynamic
101/// import syntax.
102///
103/// The referrer contains metadata about the script/module that calls
104/// import.
105///
106/// The specifier is the name of the module that should be imported.
107///
108/// The embedder must compile, instantiate, evaluate the Module, and
109/// obtain it's namespace object.
110///
111/// The Promise returned from this function is forwarded to userland
112/// JavaScript. The embedder must resolve this promise with the module
113/// namespace object. In case of an exception, the embedder must reject
114/// this promise with the exception. If the promise creation itself
115/// fails (e.g. due to stack overflow), the embedder must propagate
116/// that exception by returning an empty MaybeLocal.
117pub type HostImportModuleDynamicallyWithImportAssertionsCallback =
118  extern "C" fn(
119    Local<Context>,
120    Local<ScriptOrModule>,
121    Local<String>,
122    Local<FixedArray>,
123  ) -> *mut Promise;
124
125pub type InterruptCallback =
126  extern "C" fn(isolate: &mut Isolate, data: *mut c_void);
127
128pub type NearHeapLimitCallback = extern "C" fn(
129  data: *mut c_void,
130  current_heap_limit: usize,
131  initial_heap_limit: usize,
132) -> usize;
133
134pub type OomErrorCallback =
135  extern "C" fn(location: *const c_char, is_heap_oom: bool);
136
137/// Collection of V8 heap information.
138///
139/// Instances of this class can be passed to v8::Isolate::GetHeapStatistics to
140/// get heap statistics from V8.
141// Must be >= sizeof(v8::HeapStatistics), see v8__HeapStatistics__CONSTRUCT().
142#[repr(C)]
143#[derive(Debug)]
144pub struct HeapStatistics([usize; 16]);
145
146// Windows x64 ABI: MaybeLocal<Value> returned on the stack.
147#[cfg(target_os = "windows")]
148pub type PrepareStackTraceCallback<'s> = extern "C" fn(
149  *mut *const Value,
150  Local<'s, Context>,
151  Local<'s, Value>,
152  Local<'s, Array>,
153) -> *mut *const Value;
154
155// System V ABI: MaybeLocal<Value> returned in a register.
156#[cfg(not(target_os = "windows"))]
157pub type PrepareStackTraceCallback<'s> = extern "C" fn(
158  Local<'s, Context>,
159  Local<'s, Value>,
160  Local<'s, Array>,
161) -> *const Value;
162
163extern "C" {
164  fn v8__Isolate__New(params: *const raw::CreateParams) -> *mut Isolate;
165  fn v8__Isolate__Dispose(this: *mut Isolate);
166  fn v8__Isolate__SetData(this: *mut Isolate, slot: u32, data: *mut c_void);
167  fn v8__Isolate__GetData(this: *const Isolate, slot: u32) -> *mut c_void;
168  fn v8__Isolate__GetNumberOfDataSlots(this: *const Isolate) -> u32;
169  fn v8__Isolate__Enter(this: *mut Isolate);
170  fn v8__Isolate__Exit(this: *mut Isolate);
171  fn v8__Isolate__ClearKeptObjects(isolate: *mut Isolate);
172  fn v8__Isolate__LowMemoryNotification(isolate: *mut Isolate);
173  fn v8__Isolate__GetHeapStatistics(this: *mut Isolate, s: *mut HeapStatistics);
174  fn v8__Isolate__SetCaptureStackTraceForUncaughtExceptions(
175    this: *mut Isolate,
176    caputre: bool,
177    frame_limit: i32,
178  );
179  fn v8__Isolate__AddMessageListener(
180    isolate: *mut Isolate,
181    callback: MessageCallback,
182  ) -> bool;
183  fn v8__Isolate__AddNearHeapLimitCallback(
184    isolate: *mut Isolate,
185    callback: NearHeapLimitCallback,
186    data: *mut c_void,
187  );
188  fn v8__Isolate__RemoveNearHeapLimitCallback(
189    isolate: *mut Isolate,
190    callback: NearHeapLimitCallback,
191    heap_limit: usize,
192  );
193  fn v8__Isolate__SetOOMErrorHandler(
194    isolate: *mut Isolate,
195    callback: OomErrorCallback,
196  );
197  fn v8__Isolate__SetPrepareStackTraceCallback(
198    isolate: *mut Isolate,
199    callback: PrepareStackTraceCallback,
200  );
201  fn v8__Isolate__SetPromiseHook(isolate: *mut Isolate, hook: PromiseHook);
202  fn v8__Isolate__SetPromiseRejectCallback(
203    isolate: *mut Isolate,
204    callback: PromiseRejectCallback,
205  );
206  fn v8__Isolate__SetHostInitializeImportMetaObjectCallback(
207    isolate: *mut Isolate,
208    callback: HostInitializeImportMetaObjectCallback,
209  );
210  fn v8__Isolate__SetHostImportModuleDynamicallyCallback(
211    isolate: *mut Isolate,
212    callback: HostImportModuleDynamicallyWithImportAssertionsCallback,
213  );
214  fn v8__Isolate__RequestInterrupt(
215    isolate: *const Isolate,
216    callback: InterruptCallback,
217    data: *mut c_void,
218  );
219  fn v8__Isolate__TerminateExecution(isolate: *const Isolate);
220  fn v8__Isolate__IsExecutionTerminating(isolate: *const Isolate) -> bool;
221  fn v8__Isolate__CancelTerminateExecution(isolate: *const Isolate);
222  fn v8__Isolate__GetMicrotasksPolicy(
223    isolate: *const Isolate,
224  ) -> MicrotasksPolicy;
225  fn v8__Isolate__SetMicrotasksPolicy(
226    isolate: *mut Isolate,
227    policy: MicrotasksPolicy,
228  );
229  fn v8__Isolate__PerformMicrotaskCheckpoint(isolate: *mut Isolate);
230  fn v8__Isolate__EnqueueMicrotask(
231    isolate: *mut Isolate,
232    function: *const Function,
233  );
234  fn v8__Isolate__SetAllowAtomicsWait(isolate: *mut Isolate, allow: bool);
235  fn v8__Isolate__SetWasmStreamingCallback(
236    isolate: *mut Isolate,
237    callback: extern "C" fn(*const FunctionCallbackInfo),
238  );
239  fn v8__Isolate__HasPendingBackgroundTasks(isolate: *const Isolate) -> bool;
240
241  fn v8__HeapProfiler__TakeHeapSnapshot(
242    isolate: *mut Isolate,
243    callback: extern "C" fn(*mut c_void, *const u8, usize) -> bool,
244    arg: *mut c_void,
245  );
246
247  fn v8__HeapStatistics__CONSTRUCT(s: *mut MaybeUninit<HeapStatistics>);
248  fn v8__HeapStatistics__total_heap_size(s: *const HeapStatistics) -> usize;
249  fn v8__HeapStatistics__total_heap_size_executable(
250    s: *const HeapStatistics,
251  ) -> usize;
252  fn v8__HeapStatistics__total_physical_size(s: *const HeapStatistics)
253    -> usize;
254  fn v8__HeapStatistics__total_available_size(
255    s: *const HeapStatistics,
256  ) -> usize;
257  fn v8__HeapStatistics__total_global_handles_size(
258    s: *const HeapStatistics,
259  ) -> usize;
260  fn v8__HeapStatistics__used_global_handles_size(
261    s: *const HeapStatistics,
262  ) -> usize;
263  fn v8__HeapStatistics__used_heap_size(s: *const HeapStatistics) -> usize;
264  fn v8__HeapStatistics__heap_size_limit(s: *const HeapStatistics) -> usize;
265  fn v8__HeapStatistics__malloced_memory(s: *const HeapStatistics) -> usize;
266  fn v8__HeapStatistics__external_memory(s: *const HeapStatistics) -> usize;
267  fn v8__HeapStatistics__peak_malloced_memory(
268    s: *const HeapStatistics,
269  ) -> usize;
270  fn v8__HeapStatistics__number_of_native_contexts(
271    s: *const HeapStatistics,
272  ) -> usize;
273  fn v8__HeapStatistics__number_of_detached_contexts(
274    s: *const HeapStatistics,
275  ) -> usize;
276  fn v8__HeapStatistics__does_zap_garbage(s: *const HeapStatistics) -> usize;
277}
278
279/// Isolate represents an isolated instance of the V8 engine.  V8 isolates have
280/// completely separate states.  Objects from one isolate must not be used in
281/// other isolates.  The embedder can create multiple isolates and use them in
282/// parallel in multiple threads.  An isolate can be entered by at most one
283/// thread at any given time.  The Locker/Unlocker API must be used to
284/// synchronize.
285///
286/// rusty_v8 note: Unlike in the C++ API, the Isolate is entered when it is
287/// constructed and exited when dropped.
288#[repr(C)]
289#[derive(Debug)]
290pub struct Isolate(Opaque);
291
292impl Isolate {
293  const ANNEX_SLOT: u32 = 0;
294  const CURRENT_SCOPE_DATA_SLOT: u32 = 1;
295  const INTERNAL_SLOT_COUNT: u32 = 2;
296
297  /// Creates a new isolate.  Does not change the currently entered
298  /// isolate.
299  ///
300  /// When an isolate is no longer used its resources should be freed
301  /// by calling V8::dispose().  Using the delete operator is not allowed.
302  ///
303  /// V8::initialize() must have run prior to this.
304  #[allow(clippy::new_ret_no_self)]
305  pub fn new(params: CreateParams) -> OwnedIsolate {
306    crate::V8::assert_initialized();
307    let (raw_create_params, create_param_allocations) = params.finalize();
308    let cxx_isolate = unsafe { v8__Isolate__New(&raw_create_params) };
309    let mut owned_isolate = OwnedIsolate::new(cxx_isolate);
310    ScopeData::new_root(&mut owned_isolate);
311    owned_isolate.create_annex(create_param_allocations);
312    unsafe {
313      owned_isolate.enter();
314    }
315    owned_isolate
316  }
317
318  /// Initial configuration parameters for a new Isolate.
319  pub fn create_params() -> CreateParams {
320    CreateParams::default()
321  }
322
323  pub fn thread_safe_handle(&self) -> IsolateHandle {
324    IsolateHandle::new(self)
325  }
326
327  /// See [`IsolateHandle::terminate_execution`]
328  pub fn terminate_execution(&self) -> bool {
329    self.thread_safe_handle().terminate_execution()
330  }
331
332  /// See [`IsolateHandle::cancel_terminate_execution`]
333  pub fn cancel_terminate_execution(&self) -> bool {
334    self.thread_safe_handle().cancel_terminate_execution()
335  }
336
337  /// See [`IsolateHandle::is_execution_terminating`]
338  pub fn is_execution_terminating(&self) -> bool {
339    self.thread_safe_handle().is_execution_terminating()
340  }
341
342  pub(crate) fn create_annex(
343    &mut self,
344    create_param_allocations: Box<dyn Any>,
345  ) {
346    let annex_arc = Arc::new(IsolateAnnex::new(self, create_param_allocations));
347    let annex_ptr = Arc::into_raw(annex_arc);
348    unsafe {
349      assert!(v8__Isolate__GetData(self, Self::ANNEX_SLOT).is_null());
350      v8__Isolate__SetData(self, Self::ANNEX_SLOT, annex_ptr as *mut c_void);
351    };
352  }
353
354  fn get_annex(&self) -> &IsolateAnnex {
355    unsafe {
356      &*(v8__Isolate__GetData(self, Self::ANNEX_SLOT) as *const _
357        as *const IsolateAnnex)
358    }
359  }
360
361  fn get_annex_mut(&mut self) -> &mut IsolateAnnex {
362    unsafe {
363      &mut *(v8__Isolate__GetData(self, Self::ANNEX_SLOT) as *mut IsolateAnnex)
364    }
365  }
366
367  fn get_annex_arc(&self) -> Arc<IsolateAnnex> {
368    let annex_ptr = self.get_annex();
369    let annex_arc = unsafe { Arc::from_raw(annex_ptr) };
370    Arc::into_raw(annex_arc.clone());
371    annex_arc
372  }
373
374  /// Associate embedder-specific data with the isolate. `slot` has to be
375  /// between 0 and `Isolate::get_number_of_data_slots()`.
376  unsafe fn set_data(&mut self, slot: u32, ptr: *mut c_void) {
377    v8__Isolate__SetData(self, slot + Self::INTERNAL_SLOT_COUNT, ptr)
378  }
379
380  /// Retrieve embedder-specific data from the isolate.
381  /// Returns NULL if SetData has never been called for the given `slot`.
382  fn get_data(&self, slot: u32) -> *mut c_void {
383    unsafe { v8__Isolate__GetData(self, slot + Self::INTERNAL_SLOT_COUNT) }
384  }
385
386  /// Returns the maximum number of available embedder data slots. Valid slots
387  /// are in the range of 0 - `Isolate::get_number_of_data_slots() - 1`.
388  fn get_number_of_data_slots(&self) -> u32 {
389    unsafe {
390      v8__Isolate__GetNumberOfDataSlots(self) - Self::INTERNAL_SLOT_COUNT
391    }
392  }
393
394  /// Returns a pointer to the `ScopeData` struct for the current scope.
395  pub(crate) fn get_current_scope_data(&self) -> Option<NonNull<ScopeData>> {
396    let scope_data_ptr =
397      unsafe { v8__Isolate__GetData(self, Self::CURRENT_SCOPE_DATA_SLOT) };
398    NonNull::new(scope_data_ptr).map(NonNull::cast)
399  }
400
401  /// Updates the slot that stores a `ScopeData` pointer for the current scope.
402  pub(crate) fn set_current_scope_data(
403    &mut self,
404    scope_data: Option<NonNull<ScopeData>>,
405  ) {
406    let scope_data_ptr = scope_data
407      .map(NonNull::cast)
408      .map(NonNull::as_ptr)
409      .unwrap_or_else(null_mut);
410    unsafe {
411      v8__Isolate__SetData(self, Self::CURRENT_SCOPE_DATA_SLOT, scope_data_ptr)
412    };
413  }
414
415  /// Get a reference to embedder data added with `set_slot()`.
416  pub fn get_slot<T: 'static>(&self) -> Option<&T> {
417    let b = self.get_annex().slots.get(&TypeId::of::<T>())?;
418    let r = <dyn Any>::downcast_ref::<T>(&**b).unwrap();
419    Some(r)
420  }
421
422  /// Get a mutable reference to embedder data added with `set_slot()`.
423  pub fn get_slot_mut<T: 'static>(&mut self) -> Option<&mut T> {
424    let b = self.get_annex_mut().slots.get_mut(&TypeId::of::<T>())?;
425    let r = <dyn Any>::downcast_mut::<T>(&mut **b).unwrap();
426    Some(r)
427  }
428
429  /// Use with Isolate::get_slot and Isolate::get_slot_mut to associate state
430  /// with an Isolate.
431  ///
432  /// This method gives ownership of value to the Isolate. Exactly one object of
433  /// each type can be associated with an Isolate. If called more than once with
434  /// an object of the same type, the earlier version will be dropped and
435  /// replaced.
436  ///
437  /// Returns true if value was set without replacing an existing value.
438  ///
439  /// The value will be dropped when the isolate is dropped.
440  pub fn set_slot<T: 'static>(&mut self, value: T) -> bool {
441    self
442      .get_annex_mut()
443      .slots
444      .insert(Any::type_id(&value), Box::new(value))
445      .is_none()
446  }
447
448  /// Removes the embedder data added with `set_slot()` and returns it if it exists.
449  pub fn remove_slot<T: 'static>(&mut self) -> Option<T> {
450    let b = self.get_annex_mut().slots.remove(&TypeId::of::<T>())?;
451    let v: T = *b.downcast::<T>().unwrap();
452    Some(v)
453  }
454
455  /// Sets this isolate as the entered one for the current thread.
456  /// Saves the previously entered one (if any), so that it can be
457  /// restored when exiting.  Re-entering an isolate is allowed.
458  ///
459  /// rusty_v8 note: Unlike in the C++ API, the isolate is entered when it is
460  /// constructed and exited when dropped.
461  pub unsafe fn enter(&mut self) {
462    v8__Isolate__Enter(self)
463  }
464
465  /// Exits this isolate by restoring the previously entered one in the
466  /// current thread.  The isolate may still stay the same, if it was
467  /// entered more than once.
468  ///
469  /// Requires: self == Isolate::GetCurrent().
470  ///
471  /// rusty_v8 note: Unlike in the C++ API, the isolate is entered when it is
472  /// constructed and exited when dropped.
473  pub unsafe fn exit(&mut self) {
474    v8__Isolate__Exit(self)
475  }
476
477  /// Clears the set of objects held strongly by the heap. This set of
478  /// objects are originally built when a WeakRef is created or
479  /// successfully dereferenced.
480  ///
481  /// This is invoked automatically after microtasks are run. See
482  /// MicrotasksPolicy for when microtasks are run.
483  ///
484  /// This needs to be manually invoked only if the embedder is manually
485  /// running microtasks via a custom MicrotaskQueue class's PerformCheckpoint.
486  /// In that case, it is the embedder's responsibility to make this call at a
487  /// time which does not interrupt synchronous ECMAScript code execution.
488  pub fn clear_kept_objects(&mut self) {
489    unsafe { v8__Isolate__ClearKeptObjects(self) }
490  }
491
492  /// Optional notification that the system is running low on memory.
493  /// V8 uses these notifications to attempt to free memory.
494  pub fn low_memory_notification(&mut self) {
495    unsafe { v8__Isolate__LowMemoryNotification(self) }
496  }
497
498  /// Get statistics about the heap memory usage.
499  pub fn get_heap_statistics(&mut self, s: &mut HeapStatistics) {
500    unsafe { v8__Isolate__GetHeapStatistics(self, s) }
501  }
502
503  /// Tells V8 to capture current stack trace when uncaught exception occurs
504  /// and report it to the message listeners. The option is off by default.
505  pub fn set_capture_stack_trace_for_uncaught_exceptions(
506    &mut self,
507    capture: bool,
508    frame_limit: i32,
509  ) {
510    unsafe {
511      v8__Isolate__SetCaptureStackTraceForUncaughtExceptions(
512        self,
513        capture,
514        frame_limit,
515      )
516    }
517  }
518
519  /// Adds a message listener (errors only).
520  ///
521  /// The same message listener can be added more than once and in that
522  /// case it will be called more than once for each message.
523  ///
524  /// The exception object will be passed to the callback.
525  pub fn add_message_listener(&mut self, callback: MessageCallback) -> bool {
526    unsafe { v8__Isolate__AddMessageListener(self, callback) }
527  }
528
529  /// This specifies the callback called when the stack property of Error
530  /// is accessed.
531  ///
532  /// PrepareStackTraceCallback is called when the stack property of an error is
533  /// first accessed. The return value will be used as the stack value. If this
534  /// callback is registed, the |Error.prepareStackTrace| API will be disabled.
535  /// |sites| is an array of call sites, specified in
536  /// https://v8.dev/docs/stack-trace-api
537  pub fn set_prepare_stack_trace_callback<'s>(
538    &mut self,
539    callback: impl MapFnTo<PrepareStackTraceCallback<'s>>,
540  ) {
541    // Note: the C++ API returns a MaybeLocal but V8 asserts at runtime when
542    // it's empty. That is, you can't return None and that's why the Rust API
543    // expects Local<Value> instead of Option<Local<Value>>.
544    unsafe {
545      v8__Isolate__SetPrepareStackTraceCallback(self, callback.map_fn_to())
546    };
547  }
548
549  /// Set the PromiseHook callback for various promise lifecycle
550  /// events.
551  pub fn set_promise_hook(&mut self, hook: PromiseHook) {
552    unsafe { v8__Isolate__SetPromiseHook(self, hook) }
553  }
554
555  /// Set callback to notify about promise reject with no handler, or
556  /// revocation of such a previous notification once the handler is added.
557  pub fn set_promise_reject_callback(
558    &mut self,
559    callback: PromiseRejectCallback,
560  ) {
561    unsafe { v8__Isolate__SetPromiseRejectCallback(self, callback) }
562  }
563  /// This specifies the callback called by the upcoming importa.meta
564  /// language feature to retrieve host-defined meta data for a module.
565  pub fn set_host_initialize_import_meta_object_callback(
566    &mut self,
567    callback: HostInitializeImportMetaObjectCallback,
568  ) {
569    unsafe {
570      v8__Isolate__SetHostInitializeImportMetaObjectCallback(self, callback)
571    }
572  }
573
574  /// This specifies the callback called by the upcoming dynamic
575  /// import() language feature to load modules.
576  pub fn set_host_import_module_dynamically_callback(
577    &mut self,
578    callback: HostImportModuleDynamicallyWithImportAssertionsCallback,
579  ) {
580    unsafe {
581      v8__Isolate__SetHostImportModuleDynamicallyCallback(self, callback)
582    }
583  }
584
585  /// Add a callback to invoke in case the heap size is close to the heap limit.
586  /// If multiple callbacks are added, only the most recently added callback is
587  /// invoked.
588  #[allow(clippy::not_unsafe_ptr_arg_deref)] // False positive.
589  pub fn add_near_heap_limit_callback(
590    &mut self,
591    callback: NearHeapLimitCallback,
592    data: *mut c_void,
593  ) {
594    unsafe { v8__Isolate__AddNearHeapLimitCallback(self, callback, data) };
595  }
596
597  /// Remove the given callback and restore the heap limit to the given limit.
598  /// If the given limit is zero, then it is ignored. If the current heap size
599  /// is greater than the given limit, then the heap limit is restored to the
600  /// minimal limit that is possible for the current heap size.
601  pub fn remove_near_heap_limit_callback(
602    &mut self,
603    callback: NearHeapLimitCallback,
604    heap_limit: usize,
605  ) {
606    unsafe {
607      v8__Isolate__RemoveNearHeapLimitCallback(self, callback, heap_limit)
608    };
609  }
610
611  pub fn set_oom_error_handler(&mut self, callback: OomErrorCallback) {
612    unsafe { v8__Isolate__SetOOMErrorHandler(self, callback) };
613  }
614
615  /// Returns the policy controlling how Microtasks are invoked.
616  pub fn get_microtasks_policy(&self) -> MicrotasksPolicy {
617    unsafe { v8__Isolate__GetMicrotasksPolicy(self) }
618  }
619
620  /// Returns the policy controlling how Microtasks are invoked.
621  pub fn set_microtasks_policy(&mut self, policy: MicrotasksPolicy) {
622    unsafe { v8__Isolate__SetMicrotasksPolicy(self, policy) }
623  }
624
625  /// Runs the default MicrotaskQueue until it gets empty and perform other
626  /// microtask checkpoint steps, such as calling ClearKeptObjects. Asserts that
627  /// the MicrotasksPolicy is not kScoped. Any exceptions thrown by microtask
628  /// callbacks are swallowed.
629  pub fn perform_microtask_checkpoint(&mut self) {
630    unsafe { v8__Isolate__PerformMicrotaskCheckpoint(self) }
631  }
632
633  /// An alias for PerformMicrotaskCheckpoint.
634  #[deprecated(note = "Use Isolate::perform_microtask_checkpoint() instead")]
635  pub fn run_microtasks(&mut self) {
636    self.perform_microtask_checkpoint()
637  }
638
639  /// Enqueues the callback to the default MicrotaskQueue
640  pub fn enqueue_microtask(&mut self, microtask: Local<Function>) {
641    unsafe { v8__Isolate__EnqueueMicrotask(self, &*microtask) }
642  }
643
644  /// Set whether calling Atomics.wait (a function that may block) is allowed in
645  /// this isolate. This can also be configured via
646  /// CreateParams::allow_atomics_wait.
647  pub fn set_allow_atomics_wait(&mut self, allow: bool) {
648    unsafe { v8__Isolate__SetAllowAtomicsWait(self, allow) }
649  }
650
651  /// Embedder injection point for `WebAssembly.compileStreaming(source)`.
652  /// The expectation is that the embedder sets it at most once.
653  ///
654  /// The callback receives the source argument (string, Promise, etc.)
655  /// and an instance of [WasmStreaming]. The [WasmStreaming] instance
656  /// can outlive the callback and is used to feed data chunks to V8
657  /// asynchronously.
658  pub fn set_wasm_streaming_callback<F>(&mut self, _: F)
659  where
660    F: UnitType + Fn(&mut HandleScope, Local<Value>, WasmStreaming),
661  {
662    unsafe { v8__Isolate__SetWasmStreamingCallback(self, trampoline::<F>()) }
663  }
664
665  /// Returns true if there is ongoing background work within V8 that will
666  /// eventually post a foreground task, like asynchronous WebAssembly
667  /// compilation.
668  pub fn has_pending_background_tasks(&self) -> bool {
669    unsafe { v8__Isolate__HasPendingBackgroundTasks(self) }
670  }
671
672  /// Disposes the isolate.  The isolate must not be entered by any
673  /// thread to be disposable.
674  unsafe fn dispose(&mut self) {
675    // Drop the scope stack.
676    ScopeData::drop_root(self);
677
678    // Set the `isolate` pointer inside the annex struct to null, so any
679    // IsolateHandle that outlives the isolate will know that it can't call
680    // methods on the isolate.
681    let annex = self.get_annex_mut();
682    {
683      let _lock = annex.isolate_mutex.lock().unwrap();
684      annex.isolate = null_mut();
685    }
686
687    // Clear slots and drop owned objects that were taken out of `CreateParams`.
688    annex.create_param_allocations = Box::new(());
689    annex.slots.clear();
690
691    // Subtract one from the Arc<IsolateAnnex> reference count.
692    Arc::from_raw(annex);
693    self.set_data(0, null_mut());
694
695    // No test case in rusty_v8 show this, but there have been situations in
696    // deno where dropping Annex before the states causes a segfault.
697    v8__Isolate__Dispose(self)
698  }
699
700  /// Take a heap snapshot. The callback is invoked one or more times
701  /// with byte slices containing the snapshot serialized as JSON.
702  /// It's the callback's responsibility to reassemble them into
703  /// a single document, e.g., by writing them to a file.
704  /// Note that Chrome DevTools refuses to load snapshots without
705  /// a .heapsnapshot suffix.
706  pub fn take_heap_snapshot<F>(&mut self, mut callback: F)
707  where
708    F: FnMut(&[u8]) -> bool,
709  {
710    extern "C" fn trampoline<F>(
711      arg: *mut c_void,
712      data: *const u8,
713      size: usize,
714    ) -> bool
715    where
716      F: FnMut(&[u8]) -> bool,
717    {
718      let p = arg as *mut F;
719      let callback = unsafe { &mut *p };
720      let slice = unsafe { std::slice::from_raw_parts(data, size) };
721      callback(slice)
722    }
723
724    let arg = &mut callback as *mut F as *mut c_void;
725    unsafe { v8__HeapProfiler__TakeHeapSnapshot(self, trampoline::<F>, arg) }
726  }
727}
728
729pub(crate) struct IsolateAnnex {
730  create_param_allocations: Box<dyn Any>,
731  slots: HashMap<TypeId, Box<dyn Any>, BuildTypeIdHasher>,
732  // The `isolate` and `isolate_mutex` fields are there so an `IsolateHandle`
733  // (which may outlive the isolate itself) can determine whether the isolate
734  // is still alive, and if so, get a reference to it. Safety rules:
735  // - The 'main thread' must lock the mutex and reset `isolate` to null just
736  //   before the isolate is disposed.
737  // - Any other thread must lock the mutex while it's reading/using the
738  //   `isolate` pointer.
739  isolate: *mut Isolate,
740  isolate_mutex: Mutex<()>,
741}
742
743impl IsolateAnnex {
744  fn new(
745    isolate: &mut Isolate,
746    create_param_allocations: Box<dyn Any>,
747  ) -> Self {
748    Self {
749      create_param_allocations,
750      slots: HashMap::default(),
751      isolate,
752      isolate_mutex: Mutex::new(()),
753    }
754  }
755}
756
757impl Debug for IsolateAnnex {
758  fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
759    f.debug_struct("IsolateAnnex")
760      .field("isolate", &self.isolate)
761      .field("isolate_mutex", &self.isolate_mutex)
762      .finish()
763  }
764}
765
766/// IsolateHandle is a thread-safe reference to an Isolate. It's main use is to
767/// terminate execution of a running isolate from another thread.
768///
769/// It is created with Isolate::thread_safe_handle().
770///
771/// IsolateHandle is Cloneable, Send, and Sync.
772#[derive(Clone, Debug)]
773pub struct IsolateHandle(Arc<IsolateAnnex>);
774
775unsafe impl Send for IsolateHandle {}
776unsafe impl Sync for IsolateHandle {}
777
778impl IsolateHandle {
779  // This function is marked unsafe because it must be called only with either
780  // IsolateAnnex::mutex locked, or from the main thread associated with the V8
781  // isolate.
782  pub(crate) unsafe fn get_isolate_ptr(&self) -> *mut Isolate {
783    self.0.isolate
784  }
785
786  fn new(isolate: &Isolate) -> Self {
787    Self(isolate.get_annex_arc())
788  }
789
790  /// Forcefully terminate the current thread of JavaScript execution
791  /// in the given isolate.
792  ///
793  /// This method can be used by any thread even if that thread has not
794  /// acquired the V8 lock with a Locker object.
795  ///
796  /// Returns false if Isolate was already destroyed.
797  pub fn terminate_execution(&self) -> bool {
798    let _lock = self.0.isolate_mutex.lock().unwrap();
799    if self.0.isolate.is_null() {
800      false
801    } else {
802      unsafe { v8__Isolate__TerminateExecution(self.0.isolate) };
803      true
804    }
805  }
806
807  /// Resume execution capability in the given isolate, whose execution
808  /// was previously forcefully terminated using TerminateExecution().
809  ///
810  /// When execution is forcefully terminated using TerminateExecution(),
811  /// the isolate can not resume execution until all JavaScript frames
812  /// have propagated the uncatchable exception which is generated.  This
813  /// method allows the program embedding the engine to handle the
814  /// termination event and resume execution capability, even if
815  /// JavaScript frames remain on the stack.
816  ///
817  /// This method can be used by any thread even if that thread has not
818  /// acquired the V8 lock with a Locker object.
819  ///
820  /// Returns false if Isolate was already destroyed.
821  pub fn cancel_terminate_execution(&self) -> bool {
822    let _lock = self.0.isolate_mutex.lock().unwrap();
823    if self.0.isolate.is_null() {
824      false
825    } else {
826      unsafe { v8__Isolate__CancelTerminateExecution(self.0.isolate) };
827      true
828    }
829  }
830
831  /// Is V8 terminating JavaScript execution.
832  ///
833  /// Returns true if JavaScript execution is currently terminating
834  /// because of a call to TerminateExecution.  In that case there are
835  /// still JavaScript frames on the stack and the termination
836  /// exception is still active.
837  ///
838  /// Returns false if Isolate was already destroyed.
839  pub fn is_execution_terminating(&self) -> bool {
840    let _lock = self.0.isolate_mutex.lock().unwrap();
841    if self.0.isolate.is_null() {
842      false
843    } else {
844      unsafe { v8__Isolate__IsExecutionTerminating(self.0.isolate) }
845    }
846  }
847
848  /// Request V8 to interrupt long running JavaScript code and invoke
849  /// the given |callback| passing the given |data| to it. After |callback|
850  /// returns control will be returned to the JavaScript code.
851  /// There may be a number of interrupt requests in flight.
852  /// Can be called from another thread without acquiring a |Locker|.
853  /// Registered |callback| must not reenter interrupted Isolate.
854  ///
855  /// Returns false if Isolate was already destroyed.
856  // Clippy warns that this method is dereferencing a raw pointer, but it is
857  // not: https://github.com/rust-lang/rust-clippy/issues/3045
858  #[allow(clippy::not_unsafe_ptr_arg_deref)]
859  pub fn request_interrupt(
860    &self,
861    callback: InterruptCallback,
862    data: *mut c_void,
863  ) -> bool {
864    let _lock = self.0.isolate_mutex.lock().unwrap();
865    if self.0.isolate.is_null() {
866      false
867    } else {
868      unsafe { v8__Isolate__RequestInterrupt(self.0.isolate, callback, data) };
869      true
870    }
871  }
872}
873
874/// Same as Isolate but gets disposed when it goes out of scope.
875#[derive(Debug)]
876pub struct OwnedIsolate {
877  cxx_isolate: NonNull<Isolate>,
878}
879
880impl OwnedIsolate {
881  pub(crate) fn new(cxx_isolate: *mut Isolate) -> Self {
882    let cxx_isolate = NonNull::new(cxx_isolate).unwrap();
883    Self { cxx_isolate }
884  }
885}
886
887impl Drop for OwnedIsolate {
888  fn drop(&mut self) {
889    unsafe {
890      self.exit();
891      self.cxx_isolate.as_mut().dispose()
892    }
893  }
894}
895
896impl Deref for OwnedIsolate {
897  type Target = Isolate;
898  fn deref(&self) -> &Self::Target {
899    unsafe { self.cxx_isolate.as_ref() }
900  }
901}
902
903impl DerefMut for OwnedIsolate {
904  fn deref_mut(&mut self) -> &mut Self::Target {
905    unsafe { self.cxx_isolate.as_mut() }
906  }
907}
908
909impl HeapStatistics {
910  pub fn total_heap_size(&self) -> usize {
911    unsafe { v8__HeapStatistics__total_heap_size(self) }
912  }
913
914  pub fn total_heap_size_executable(&self) -> usize {
915    unsafe { v8__HeapStatistics__total_heap_size_executable(self) }
916  }
917
918  pub fn total_physical_size(&self) -> usize {
919    unsafe { v8__HeapStatistics__total_physical_size(self) }
920  }
921
922  pub fn total_available_size(&self) -> usize {
923    unsafe { v8__HeapStatistics__total_available_size(self) }
924  }
925
926  pub fn total_global_handles_size(&self) -> usize {
927    unsafe { v8__HeapStatistics__total_global_handles_size(self) }
928  }
929
930  pub fn used_global_handles_size(&self) -> usize {
931    unsafe { v8__HeapStatistics__used_global_handles_size(self) }
932  }
933
934  pub fn used_heap_size(&self) -> usize {
935    unsafe { v8__HeapStatistics__used_heap_size(self) }
936  }
937
938  pub fn heap_size_limit(&self) -> usize {
939    unsafe { v8__HeapStatistics__heap_size_limit(self) }
940  }
941
942  pub fn malloced_memory(&self) -> usize {
943    unsafe { v8__HeapStatistics__malloced_memory(self) }
944  }
945
946  pub fn external_memory(&self) -> usize {
947    unsafe { v8__HeapStatistics__external_memory(self) }
948  }
949
950  pub fn peak_malloced_memory(&self) -> usize {
951    unsafe { v8__HeapStatistics__peak_malloced_memory(self) }
952  }
953
954  pub fn number_of_native_contexts(&self) -> usize {
955    unsafe { v8__HeapStatistics__number_of_native_contexts(self) }
956  }
957
958  pub fn number_of_detached_contexts(&self) -> usize {
959    unsafe { v8__HeapStatistics__number_of_detached_contexts(self) }
960  }
961
962  /// Returns a 0/1 boolean, which signifies whether the V8 overwrite heap
963  /// garbage with a bit pattern.
964  pub fn does_zap_garbage(&self) -> usize {
965    unsafe { v8__HeapStatistics__does_zap_garbage(self) }
966  }
967}
968
969impl Default for HeapStatistics {
970  fn default() -> Self {
971    let mut s = MaybeUninit::<Self>::uninit();
972    unsafe {
973      v8__HeapStatistics__CONSTRUCT(&mut s);
974      s.assume_init()
975    }
976  }
977}
978
979impl<'s, F> MapFnFrom<F> for PrepareStackTraceCallback<'s>
980where
981  F: UnitType
982    + Fn(
983      &mut HandleScope<'s>,
984      Local<'s, Value>,
985      Local<'s, Array>,
986    ) -> Local<'s, Value>,
987{
988  // Windows x64 ABI: MaybeLocal<Value> returned on the stack.
989  #[cfg(target_os = "windows")]
990  fn mapping() -> Self {
991    let f = |ret_ptr, context, error, sites| {
992      let mut scope: CallbackScope = unsafe { CallbackScope::new(context) };
993      let r = (F::get())(&mut scope, error, sites);
994      unsafe { std::ptr::write(ret_ptr, &*r as *const _) };
995      ret_ptr
996    };
997    f.to_c_fn()
998  }
999
1000  // System V ABI: MaybeLocal<Value> returned in a register.
1001  #[cfg(not(target_os = "windows"))]
1002  fn mapping() -> Self {
1003    let f = |context, error, sites| {
1004      let mut scope: CallbackScope = unsafe { CallbackScope::new(context) };
1005      let r = (F::get())(&mut scope, error, sites);
1006      &*r as *const _
1007    };
1008    f.to_c_fn()
1009  }
1010}