gluon_vm/
thread.rs

1//! The thread/vm type
2use std::{
3    any::{Any, TypeId},
4    cmp::Ordering,
5    fmt,
6    marker::Unpin,
7    mem,
8    ops::{Add, Deref, DerefMut, Div, Mul, Sub},
9    pin::Pin,
10    ptr,
11    result::Result as StdResult,
12    slice,
13    sync::{
14        self,
15        atomic::{self, AtomicBool},
16        Arc, Mutex, MutexGuard, RwLock,
17    },
18    usize,
19};
20
21use futures::{
22    future::{self, Either, Ready},
23    ready,
24    task::{self, Poll},
25    Future,
26};
27
28use async_trait::async_trait;
29
30use crate::base::{
31    pos::Line,
32    symbol::Symbol,
33    types::{self, Alias, ArcType},
34};
35
36use crate::{
37    api::{Getable, Pushable, ValueRef, VmType},
38    compiler::UpvarInfo,
39    gc::{self, CloneUnrooted, DataDef, Gc, GcPtr, GcRef, Generation, Move},
40    interner::InternedStr,
41    macros::MacroEnv,
42    source_map::LocalIter,
43    stack::{
44        ClosureState, ExternCallState, ExternState, Frame, Lock, Stack, StackFrame, StackState,
45        State,
46    },
47    types::*,
48    value::{
49        BytecodeFunction, Callable, ClosureData, ClosureDataDef, ClosureInitDef, Def,
50        ExternFunction, PartialApplicationDataDef, RecordDef, UninitializedRecord,
51        UninitializedVariantDef, Userdata, Value, ValueRepr,
52        ValueRepr::{Closure, Data, Float, Function, Int, PartialApplication, String},
53        VariantDef,
54    },
55    vm::{GlobalVmState, GlobalVmStateBuilder, ThreadSlab, VmEnvInstance},
56    Error, Result, Variants,
57};
58
59pub use crate::{gc::Trace, stack::PopValue};
60
61pub type FutureValue<F> = Either<Ready<<F as Future>::Output>, F>;
62
63pub struct Execute<T> {
64    thread: Option<T>,
65}
66
67impl<T> Execute<T>
68where
69    T: Deref<Target = Thread>,
70{
71    pub fn new(thread: T) -> Execute<T> {
72        Execute {
73            thread: Some(thread),
74        }
75    }
76
77    pub fn root(&mut self) -> Execute<RootedThread> {
78        Execute {
79            thread: self.thread.as_ref().map(|t| t.root_thread()),
80        }
81    }
82}
83
84impl<'vm, T> Future for Execute<T>
85where
86    T: Deref<Target = Thread> + Unpin,
87    T: VmRoot<'vm>,
88{
89    type Output = Result<RootedValue<T>>;
90
91    // Returns `T` so that it can be reused by the caller
92    fn poll(mut self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll<Self::Output> {
93        let value = {
94            let thread = self
95                .thread
96                .as_ref()
97                .expect("cannot poll Execute future after it has succeded");
98            let mut context = ready!(thread.resume(cx))?;
99            context.stack.pop()
100        };
101
102        let thread = self.thread.take().unwrap();
103        // SAFETY `value` is produced (and owned) by `thread`
104        unsafe { Poll::Ready(Ok(thread.root_value_with_self(&value))) }
105    }
106}
107
108pub struct ExecuteTop<T>(pub Execute<T>);
109
110impl<'vm, T> Future for ExecuteTop<T>
111where
112    T: Deref<Target = Thread> + Unpin,
113    T: VmRoot<'vm>,
114{
115    type Output = Result<RootedValue<T>>;
116
117    // Returns `T` so that it can be reused by the caller
118    fn poll(mut self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll<Self::Output> {
119        match ready!(Pin::new(&mut self.0).poll(cx)) {
120            Ok(x) => Ok(x).into(),
121            Err(mut err) => {
122                let thread = self
123                    .0
124                    .thread
125                    .as_ref()
126                    .expect("cannot poll Execute future after it has succeded");
127                let mut context = thread.context();
128                let stack = StackFrame::<State>::current(&mut context.stack);
129                let new_trace = reset_stack(stack, 1)?;
130                if let Error::Panic(_, ref mut trace) = err {
131                    *trace = Some(new_trace);
132                }
133                Err(err).into()
134            }
135        }
136    }
137}
138
139/// Enum signaling a successful or unsuccess ful call to an extern function.
140/// If an error occured the error message is expected to be on the top of the stack.
141#[derive(Eq, PartialEq)]
142#[repr(C)]
143pub enum Status {
144    Ok,
145    Yield,
146    Error,
147}
148
149/// A rooted value
150pub struct RootedValue<T>
151where
152    T: VmRootInternal,
153{
154    vm: T,
155    rooted: bool,
156    value: Value,
157}
158
159impl<T> Deref for RootedValue<T>
160where
161    T: VmRootInternal,
162{
163    type Target = Value;
164    fn deref(&self) -> &Value {
165        &self.value
166    }
167}
168
169unsafe impl<T> Trace for RootedValue<T>
170where
171    T: VmRootInternal,
172{
173    unsafe fn root(&mut self) {
174        self.root_();
175    }
176    unsafe fn unroot(&mut self) {
177        self.unroot_();
178    }
179    fn trace(&self, gc: &mut Gc) {
180        self.value.trace(gc);
181    }
182}
183
184impl<T> Clone for RootedValue<T>
185where
186    T: VmRootInternal + Clone,
187{
188    fn clone(&self) -> Self {
189        // SAFETY `self.vm` owns the value already, we just create another root
190        unsafe { RootedValue::new(self.vm.clone(), &self.value) }
191    }
192}
193
194impl<T, U> PartialEq<RootedValue<U>> for RootedValue<T>
195where
196    T: VmRootInternal,
197    U: VmRootInternal,
198{
199    fn eq(&self, other: &RootedValue<U>) -> bool {
200        self.value == other.value
201    }
202}
203
204impl<T> Drop for RootedValue<T>
205where
206    T: VmRootInternal,
207{
208    fn drop(&mut self) {
209        if self.rooted {
210            self.unroot_();
211        }
212    }
213}
214
215impl<T> fmt::Debug for RootedValue<T>
216where
217    T: VmRootInternal,
218{
219    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
220        write!(f, "{:?}", self.value)
221    }
222}
223
224impl<T> RootedValue<T>
225where
226    T: VmRootInternal,
227{
228    pub fn re_root<'vm, U>(&self, vm: U) -> Result<RootedValue<U>>
229    where
230        U: VmRoot<'vm>,
231    {
232        // SAFETY deep cloning ensures that `vm` owns the value
233        unsafe {
234            let value = {
235                let value = vm.deep_clone_value(&self.vm, &self.value)?;
236                value.get_value().clone_unrooted()
237            };
238
239            Ok(RootedValue::new(vm, &value))
240        }
241    }
242
243    // SAFETY The value must be owned by `vm`'s GC
244    unsafe fn new(vm: T, value: &Value) -> Self {
245        vm.rooted_values
246            .write()
247            .unwrap()
248            .push(value.clone_unrooted());
249        RootedValue {
250            vm,
251            rooted: true,
252            value: value.clone_unrooted(),
253        }
254    }
255
256    pub fn get_value(&self) -> &Value {
257        &self.value
258    }
259
260    pub fn get_variant(&self) -> Variants {
261        Variants::new(&self.value)
262    }
263
264    pub fn vm(&self) -> &T {
265        &self.vm
266    }
267
268    pub fn vm_mut(&mut self) -> &mut T {
269        &mut self.vm
270    }
271
272    pub fn clone_vm(&self) -> T
273    where
274        T: Clone,
275    {
276        self.vm.clone()
277    }
278
279    /// looks up the field at the given offset
280    pub fn get<'vm>(&'vm self, index: usize) -> Option<RootedValue<T>>
281    where
282        T: VmRoot<'vm>,
283    {
284        match self.get_variant().as_ref() {
285            ValueRef::Data(ref v) => v.get_variant(index).map(|value| self.vm.root_value(value)),
286            _ => None,
287        }
288    }
289
290    /// looks up the record field with the given name
291    pub fn get_field<'vm>(&'vm self, name: &str) -> Option<RootedValue<T>>
292    where
293        T: VmRoot<'vm>,
294    {
295        match self.get_variant().as_ref() {
296            ValueRef::Data(ref v) => v
297                .lookup_field(&*self.vm, name)
298                .map(|value| self.vm.root_value(value)),
299            _ => None,
300        }
301    }
302
303    pub fn as_ref(&self) -> RootedValue<&Thread> {
304        self.vm.root_value(self.get_variant())
305    }
306
307    unsafe fn root_(&mut self) {
308        self.vm.root_vm();
309        let mut rooted_values = self.vm.rooted_values.write().unwrap();
310        assert!(self.rooted);
311        self.rooted = true;
312        rooted_values.push(self.value.clone_unrooted());
313    }
314
315    fn unroot_(&mut self) {
316        self.vm.unroot_vm();
317        let mut rooted_values = self.vm.rooted_values.write().unwrap();
318        self.rooted = false;
319        let i = rooted_values
320            .iter()
321            .position(|p| p.obj_eq(&self.value))
322            .unwrap_or_else(|| ice!("Rooted value has already been dropped"));
323        rooted_values.swap_remove(i);
324    }
325
326    pub fn into_owned(self) -> RootedValue<RootedThread> {
327        let value = RootedValue {
328            vm: self.vm.root_thread(),
329            rooted: self.rooted,
330            value: unsafe { self.value.clone_unrooted() },
331        };
332        mem::forget(self);
333        value
334    }
335}
336
337impl<'vm> RootedValue<&'vm Thread> {
338    pub fn vm_(&self) -> &'vm Thread {
339        self.vm
340    }
341}
342
343struct Roots<'b> {
344    vm: &'b GcPtr<Thread>,
345    stack: &'b Stack,
346}
347unsafe impl<'b> Trace for Roots<'b> {
348    unsafe fn unroot(&mut self) {
349        unreachable!()
350    }
351    unsafe fn root(&mut self) {
352        unreachable!()
353    }
354
355    fn trace(&self, gc: &mut Gc) {
356        // Since this vm's stack is already borrowed in self we need to manually mark it to prevent
357        // it from being traced normally
358        gc.mark(&self.vm);
359        self.stack.trace(gc);
360
361        // Traverse the vm's fields, avoiding the stack which is traced above
362        self.vm.trace_fields_except_stack(gc);
363    }
364}
365
366impl<'b> crate::gc::CollectScope for Roots<'b> {
367    fn scope<F>(&self, gc: &mut Gc, sweep: F)
368    where
369        F: FnOnce(&mut Gc),
370    {
371        // We need to pretend that the threads lives for longer than it does on the stack or we
372        // can't move the RwLockGuard into the vec. This does end up safe in the end because we
373        // never leak any lifetimes outside of this function
374        unsafe {
375            let locks = self.mark_child_roots(gc);
376
377            // Remove any threads that aren't marked as they are about to be collected
378
379            sweep(gc);
380
381            // `sweep` all child gcs
382            for (_, mut context, _) in locks {
383                context.gc.sweep();
384            }
385        }
386    }
387}
388
389impl<'b> Roots<'b> {
390    unsafe fn mark_child_roots(
391        &self,
392        gc: &mut Gc,
393    ) -> Vec<(
394        sync::RwLockReadGuard<ThreadSlab>,
395        MutexGuard<Context>,
396        GcPtr<Thread>,
397    )> {
398        let mut stack: Vec<GcPtr<Thread>> = Vec::new();
399        let mut locks: Vec<(_, _, GcPtr<Thread>)> = Vec::new();
400
401        let child_threads = self.vm.child_threads.read().unwrap();
402        stack.extend(child_threads.iter().map(|(_, t)| t.clone()));
403
404        while let Some(thread_ptr) = stack.pop() {
405            if locks.iter().any(|&(_, _, ref lock_thread)| {
406                &*thread_ptr as *const Thread == &**lock_thread as *const Thread
407            }) {
408                continue;
409            }
410
411            let thread = &*(&*thread_ptr as *const Thread);
412
413            let context = thread.context.lock().unwrap();
414
415            let child_threads = thread.child_threads.read().unwrap();
416            stack.extend(child_threads.iter().map(|(_, t)| t.clone()));
417
418            // Since we locked the context we need to scan the thread using `Roots` rather than
419            // letting it be scanned normally
420            Roots {
421                vm: &thread_ptr,
422                stack: &context.stack,
423            }
424            .trace(gc);
425
426            Vec::push(&mut locks, (child_threads, context, thread_ptr));
427        }
428        locks
429    }
430}
431
432// All threads MUST be allocated in the garbage collected heap. This is necessary as a thread
433// calling collect need to mark itself if it is on the garbage collected heap and it has no way of
434// knowing wheter it is or not. So the only way of allowing it to mark itself is to disallow it to
435// be allocated anywhere else.
436/// Representation of the virtual machine
437#[cfg_attr(feature = "serde_derive", derive(DeserializeState, SerializeState))]
438#[cfg_attr(
439    feature = "serde_derive",
440    serde(
441        deserialize_state = "crate::serialization::DeSeed<'gc>",
442        de_parameters = "'gc"
443    )
444)]
445#[cfg_attr(
446    feature = "serde_derive",
447    serde(serialize_state = "crate::serialization::SeSeed")
448)]
449pub struct Thread {
450    #[cfg_attr(
451        feature = "serde_derive",
452        serde(state_with = "crate::base::serialization::shared")
453    )]
454    global_state: Arc<GlobalVmState>,
455    // The parent of this thread, if it exists must live at least as long as this thread as this
456    // thread can refer to any value in the parent thread
457    #[cfg_attr(feature = "serde_derive", serde(state))]
458    parent: Option<GcPtr<Thread>>,
459
460    #[cfg_attr(feature = "serde_derive", serde(state))]
461    rooted_values: RwLock<Vec<Value>>,
462
463    /// All threads which this thread have spawned in turn. Necessary as this thread needs to scan
464    /// the roots of all its children as well since those may contain references to this threads
465    /// garbage collected values
466    #[cfg_attr(feature = "serde_derive", serde(skip))]
467    pub(crate) child_threads: RwLock<ThreadSlab>,
468    // Default to an invalid index so we panic reliably if it is not filled in when deserializing
469    #[cfg_attr(feature = "serde_derive", serde(skip, default = "usize::max_value"))]
470    pub(crate) thread_index: usize,
471
472    #[cfg_attr(feature = "serde_derive", serde(state))]
473    context: Mutex<Context>,
474
475    #[cfg_attr(feature = "serde_derive", serde(skip))]
476    interrupt: AtomicBool,
477}
478
479impl fmt::Debug for Thread {
480    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
481        write!(f, "Thread({:p})", self)
482    }
483}
484
485impl Userdata for Thread {}
486
487impl VmType for Thread {
488    type Type = Self;
489}
490
491unsafe impl Trace for Thread {
492    unsafe fn root(&mut self) {
493        // Thread is always behind a `GcPtr`
494    }
495    unsafe fn unroot(&mut self) {
496        // Ditto
497    }
498    fn trace(&self, gc: &mut Gc) {
499        self.trace_fields_except_stack(gc);
500        self.context.lock().unwrap().stack.trace(gc);
501    }
502}
503
504impl PartialEq for Thread {
505    fn eq(&self, other: &Thread) -> bool {
506        self as *const _ == other as *const _
507    }
508}
509
510impl VmType for RootedThread {
511    type Type = Thread;
512}
513
514impl<'vm> Pushable<'vm> for RootedThread {
515    fn vm_push(self, context: &mut ActiveThread<'vm>) -> Result<()> {
516        context.push(construct_gc!(ValueRepr::Thread(@&self.thread)));
517        Ok(())
518    }
519}
520
521impl<'vm, 'value> Getable<'vm, 'value> for RootedThread {
522    impl_getable_simple!();
523
524    fn from_value(_: &'vm Thread, value: Variants<'value>) -> Self {
525        match value.as_ref() {
526            ValueRef::Thread(thread) => thread.root_thread(),
527            _ => ice!("ValueRef is not a Thread"),
528        }
529    }
530}
531
532/// An instance of `Thread` which is rooted. See the `Thread` type for documentation on interacting
533/// with the type.
534#[derive(Debug)]
535#[cfg_attr(feature = "serde_derive", derive(SerializeState))]
536#[cfg_attr(
537    feature = "serde_derive",
538    serde(serialize_state = "crate::serialization::SeSeed")
539)]
540pub struct RootedThread {
541    #[cfg_attr(feature = "serde_derive", serde(state))]
542    thread: GcPtr<Thread>,
543    #[cfg_attr(feature = "serde_derive", serde(skip))]
544    rooted: bool,
545}
546
547#[cfg(feature = "serde_derive")]
548impl<'de, 'gc> serde::de::DeserializeState<'de, crate::serialization::DeSeed<'gc>>
549    for RootedThread
550{
551    fn deserialize_state<D>(
552        seed: &mut crate::serialization::DeSeed<'gc>,
553        deserializer: D,
554    ) -> StdResult<Self, <D as serde::Deserializer<'de>>::Error>
555    where
556        D: serde::Deserializer<'de>,
557    {
558        #[derive(DeserializeState)]
559        #[serde(
560            deserialize_state = "crate::serialization::DeSeed<'gc>",
561            de_parameters = "'gc"
562        )]
563        pub struct RootedThreadProxy {
564            #[serde(state)]
565            thread: GcPtr<Thread>,
566        }
567
568        let RootedThreadProxy { thread } =
569            RootedThreadProxy::deserialize_state(seed, deserializer)?;
570
571        Ok(thread.root_thread())
572    }
573}
574
575impl Drop for Thread {
576    fn drop(&mut self) {
577        // Make sure that context reference is dropped before the Gc itself as the RwLock is dropped
578        // when the Gc is dropped
579        let mut gc_to_drop = {
580            // The child threads need to refer to `self` so drop the gc (and thus the child threads)
581            // first so that `self` is valid while dropping them
582            let context = self.context.get_mut().unwrap_or_else(|err| {
583                // Ignore poisoning since we don't need to interact with the Gc values, only
584                // drop them
585                err.into_inner()
586            });
587            ::std::mem::replace(&mut context.gc, Gc::new(Generation::default(), 0))
588        };
589
590        // SAFETY GcPtr's may not leak outside of the `Thread` so we can safely clear it when
591        // droppting the thread
592        unsafe {
593            gc_to_drop.clear();
594        }
595
596        let mut parent_threads = self.parent_threads();
597        parent_threads.remove(self.thread_index);
598    }
599}
600
601impl Drop for RootedThread {
602    fn drop(&mut self) {
603        if self.rooted && self.unroot_() {
604            // The last RootedThread was dropped, there is no way to refer to the global state any
605            // longer so drop everything
606            let mut gc_ref = self.thread.global_state.gc.lock().unwrap_or_else(|err| {
607                // Ignore poisoning since we don't need to interact with the Gc values, only
608                // drop them
609                err.into_inner()
610            });
611            let mut gc_to_drop = std::mem::replace(&mut *gc_ref, Gc::new(Generation::default(), 0));
612            // Make sure that the RefMut is dropped before the Gc itself as the RwLock is dropped
613            // when the Gc is dropped
614            drop(gc_ref);
615
616            // Macros can contain unrooted thread references via the database so we must drop those first
617            self.global_state.get_macros().clear();
618
619            // SAFETY GcPtr's may not leak outside of the `Thread` so we can safely clear it when
620            // droppting the thread
621            unsafe {
622                gc_to_drop.clear();
623            }
624        }
625    }
626}
627
628impl Deref for RootedThread {
629    type Target = Thread;
630    fn deref(&self) -> &Thread {
631        &self.thread
632    }
633}
634
635impl Clone for RootedThread {
636    fn clone(&self) -> RootedThread {
637        self.root_thread()
638    }
639}
640
641unsafe impl Trace for RootedThread {
642    unsafe fn root(&mut self) {
643        self.root_();
644    }
645    unsafe fn unroot(&mut self) {
646        self.unroot_();
647    }
648    fn trace(&self, gc: &mut Gc) {
649        self.thread.trace(gc);
650    }
651}
652
653impl RootedThread {
654    /// Creates a new virtual machine with an empty global environment
655    pub fn new() -> RootedThread {
656        RootedThread::with_global_state(GlobalVmStateBuilder::default().build())
657    }
658
659    pub fn with_global_state(mut global_state: GlobalVmState) -> RootedThread {
660        let context = Mutex::new(Context::new(
661            global_state.gc.get_mut().unwrap().new_child_gc(),
662        ));
663        let global_state = Arc::new(global_state);
664        let thread = Thread {
665            parent: None,
666            context,
667            global_state: global_state.clone(),
668            rooted_values: RwLock::new(Vec::new()),
669            child_threads: Default::default(),
670            interrupt: AtomicBool::new(false),
671            thread_index: usize::max_value(),
672        };
673
674        let ptr = unsafe {
675            let mut gc = Gc::new(Generation::default(), usize::MAX);
676            let mut ptr = gc
677                .alloc_owned(Move(thread))
678                .expect("Not enough memory to allocate thread")
679                .unrooted();
680            *ptr.global_state.gc.lock().unwrap() = gc;
681
682            let mut parent_threads = global_state.generation_0_threads.write().unwrap();
683            let entry = parent_threads.vacant_entry();
684            ptr.thread_index = entry.key();
685            let ptr = GcPtr::from(ptr);
686            entry.insert(ptr.unrooted());
687            ptr
688        };
689
690        let vm = ptr.root_thread();
691
692        // Enter the top level scope
693        {
694            let mut context = vm.context.lock().unwrap();
695            StackFrame::<State>::new_frame(&mut context.stack, 0, State::Unknown).unwrap();
696        }
697        vm
698    }
699
700    /// Converts a `RootedThread` into a raw pointer allowing to be passed through a C api.
701    /// The reference count for the thread is not modified
702    pub fn into_raw(self) -> *const Thread {
703        assert!(self.rooted);
704        let ptr: *const Thread = &*self.thread;
705        ::std::mem::forget(self);
706        ptr
707    }
708
709    /// Converts a raw pointer into a `RootedThread`.
710    /// The reference count for the thread is not modified so it is up to the caller to ensure that
711    /// the count is correct.
712    pub unsafe fn from_raw(ptr: *const Thread) -> RootedThread {
713        RootedThread {
714            thread: GcPtr::from_raw(ptr),
715            rooted: true,
716        }
717    }
718
719    fn root_(&mut self) {
720        assert!(!self.rooted);
721        self.rooted = true;
722        self.global_state
723            .thread_reference_count
724            .fetch_add(1, atomic::Ordering::Relaxed);
725    }
726
727    fn unroot_(&mut self) -> bool {
728        let root_count = {
729            if !self.rooted {
730                return false;
731            }
732            self.rooted = false;
733            let root_count = self
734                .global_state
735                .thread_reference_count
736                .fetch_sub(1, atomic::Ordering::Release);
737            assert!(root_count > 0);
738            root_count - 1
739        };
740
741        root_count == 0
742    }
743}
744
745impl Thread {
746    /// Spawns a new gluon thread with its own stack and heap but while still sharing the same
747    /// global environment
748    pub fn new_thread(&self) -> Result<RootedThread> {
749        let vm = Thread {
750            global_state: self.global_state.clone(),
751            parent: Some(unsafe { GcPtr::from_raw(self) }),
752            context: Mutex::new(Context::new(self.owned_context().gc.new_child_gc())),
753            rooted_values: RwLock::new(Vec::new()),
754            child_threads: Default::default(),
755            interrupt: AtomicBool::new(false),
756            thread_index: usize::max_value(),
757        };
758        // Enter the top level scope
759        {
760            let mut context = vm.owned_context();
761            StackFrame::<State>::new_frame(&mut context.stack, 0, State::Unknown).unwrap();
762        }
763        let ptr = {
764            let mut context = self.context();
765            let mut ptr = context.alloc_owned(Move(vm))?;
766
767            unsafe {
768                let mut parent_threads = self.child_threads.write().unwrap();
769                let entry = parent_threads.vacant_entry();
770                ptr.thread_index = entry.key();
771                let ptr = GcRef::from(ptr).unrooted();
772                entry.insert(ptr.clone_unrooted());
773                ptr
774            }
775        };
776
777        Ok(ptr.root_thread())
778    }
779
780    /// Roots `self`, extending the lifetime of this thread until at least the returned
781    /// `RootedThread` is droppped
782    pub fn root_thread(&self) -> RootedThread {
783        unsafe {
784            let thread = GcPtr::from_raw(self);
785
786            // Using a relaxed ordering is alright here, as knowledge of the
787            // original reference prevents other threads from erroneously deleting
788            // the object.
789            let old_count = self
790                .global_state
791                .thread_reference_count
792                .fetch_add(1, atomic::Ordering::Relaxed);
793
794            const MAX_REFCOUNT: usize = std::isize::MAX as usize;
795            if old_count > MAX_REFCOUNT {
796                std::process::abort();
797            }
798
799            RootedThread {
800                thread,
801                rooted: true,
802            }
803        }
804    }
805
806    pub fn spawner(&self) -> Option<&(dyn futures::task::Spawn + Send + Sync)> {
807        self.global_env().spawner()
808    }
809
810    /// Retrieves the global called `name`.
811    ///
812    /// # Examples
813    ///
814    /// Bind the `(+)` function in gluon's prelude standard library
815    /// to an `add` function in rust
816    ///
817    /// ```rust
818    /// # use gluon::{new_vm_async, Thread, ThreadExt};
819    /// # use gluon::vm::api::{FunctionRef, Hole, OpaqueValue};
820    /// # #[tokio::main]
821    /// # async fn main() {
822    ///
823    /// # if ::std::env::var("GLUON_PATH").is_err() {
824    /// #     ::std::env::set_var("GLUON_PATH", "..");
825    /// # }
826    ///
827    /// let vm = new_vm_async().await;
828    ///
829    /// vm.run_expr_async::<OpaqueValue<&Thread, Hole>>("example", r#" import! std.int "#)
830    ///     .await
831    ///     .unwrap_or_else(|err| panic!("{}", err));
832    /// let mut add: FunctionRef<fn(i32, i32) -> i32> =
833    ///     vm.get_global("std.int.num.(+)").unwrap();
834    /// let result = add.call_async(1, 2).await;
835    /// assert_eq!(result, Ok(3));
836    /// # }
837    /// ```
838    ///
839    /// # Errors
840    ///
841    /// if the global does not exist or it does not have the correct type.
842    ///
843    pub fn get_global<'vm, T>(&'vm self, name: &str) -> Result<T>
844    where
845        T: for<'value> Getable<'vm, 'value> + VmType,
846    {
847        use crate::check::check_signature;
848
849        let expected = T::make_type(self);
850
851        let env = self.get_env();
852        let (value, actual) = env.get_binding(name)?;
853
854        // Finally check that type of the returned value is correct
855        if check_signature(&env, &expected, &actual) {
856            Ok(T::from_value(self, Variants::new(&value)))
857        } else {
858            Err(Error::WrongType(expected, actual))
859        }
860    }
861
862    pub fn get_global_type(&self, name: &str) -> Result<ArcType> {
863        let env = self.get_env();
864        let (_value, actual) = env.get_binding(name)?;
865        Ok(actual)
866    }
867
868    /// Retrieves type information about the type `name`. Types inside records can be accessed
869    /// using dot notation (std.prelude.Option)
870    pub fn find_type_info(&self, name: &str) -> Result<types::Alias<Symbol, ArcType>> {
871        let env = self.get_env();
872        env.find_type_info(name)
873    }
874
875    /// Returns the gluon type that was bound to `T`
876    pub fn get_type<T: ?Sized + Any>(&self) -> Option<ArcType> {
877        self.global_env().get_type::<T>()
878    }
879
880    /// Registers the type `T` as being a gluon type called `name` with generic arguments `args`
881    pub fn register_type<T: ?Sized + Any>(&self, name: &str, args: &[&str]) -> Result<ArcType> {
882        self.global_env().register_type::<T>(name, args)
883    }
884    pub fn register_type_as(
885        &self,
886        name: Symbol,
887        alias: Alias<Symbol, ArcType>,
888        id: TypeId,
889    ) -> Result<ArcType> {
890        self.global_env().register_type_as(name, alias, id)
891    }
892
893    pub fn get_cache_alias(&self, name: &str) -> Option<ArcType> {
894        self.global_env().get_cache_alias(name)
895    }
896
897    pub fn cache_alias(&self, alias: Alias<Symbol, ArcType>) -> ArcType {
898        self.global_env().cache_alias(alias)
899    }
900
901    /// Locks and retrieves the global environment of the vm
902    pub fn get_env<'b>(&'b self) -> VmEnvInstance<'b> {
903        self.global_env().get_env(self)
904    }
905
906    #[doc(hidden)]
907    pub fn get_lookup_env<'t>(&'t self) -> VmEnvInstance<'t> {
908        self.global_env().get_lookup_env(self)
909    }
910
911    /// Retrieves the macros defined for this vm
912    pub fn get_macros(&self) -> &MacroEnv {
913        self.global_env().get_macros()
914    }
915
916    /// Runs a garbage collection.
917    pub fn collect(&self) {
918        let mut context = self.owned_context();
919        self.collect_with_context(&mut context);
920    }
921
922    fn collect_with_context(&self, context: &mut OwnedContext) {
923        debug_assert!(ptr::eq::<Thread>(self, context.thread));
924        self.with_roots(context, |gc, roots| unsafe {
925            gc.collect(roots);
926        })
927    }
928
929    /// Pushes a value to the top of the stack
930    pub fn push<'vm, T>(&'vm self, v: T) -> Result<()>
931    where
932        T: Pushable<'vm>,
933    {
934        let mut context = self.current_context();
935        v.vm_push(&mut context)
936    }
937
938    /// Removes the top value from the stack
939    pub fn pop(&self) {
940        self.owned_context().stack.pop();
941    }
942
943    pub fn allocated_memory(&self) -> usize {
944        self.owned_context().gc.allocated_memory()
945    }
946
947    pub fn set_memory_limit(&self, memory_limit: usize) {
948        self.owned_context().gc.set_memory_limit(memory_limit)
949    }
950
951    pub fn interrupt(&self) {
952        self.interrupt.store(true, atomic::Ordering::Relaxed)
953    }
954
955    pub fn interrupted(&self) -> bool {
956        self.interrupt.load(atomic::Ordering::Relaxed)
957    }
958
959    #[doc(hidden)]
960    pub fn global_env(&self) -> &Arc<GlobalVmState> {
961        &self.global_state
962    }
963
964    pub fn current_context(&self) -> ActiveThread {
965        ActiveThread {
966            thread: self,
967            context: Some(self.context().context),
968        }
969    }
970
971    fn owned_context(&self) -> OwnedContext {
972        self.context()
973    }
974
975    fn trace_fields_except_stack(&self, gc: &mut Gc) {
976        if gc.generation().is_root() {
977            self.global_state.trace(gc);
978        }
979        self.rooted_values.read().unwrap().trace(gc);
980        self.child_threads.read().unwrap().trace(gc);
981    }
982
983    pub(crate) fn parent_threads(&self) -> sync::RwLockWriteGuard<ThreadSlab> {
984        match self.parent {
985            Some(ref parent) => parent.child_threads.write().unwrap(),
986            None => self.global_state.generation_0_threads.write().unwrap(),
987        }
988    }
989
990    fn with_roots<F, R>(&self, context: &mut Context, f: F) -> R
991    where
992        F: for<'b> FnOnce(&mut Gc, Roots<'b>) -> R,
993    {
994        // For this to be safe we require that the received stack is the same one that is in this
995        // VM
996        {
997            let self_context: *const _ = &self.context;
998            let context: *const _ = context;
999            assert!(unsafe {
1000                context as usize >= self_context as usize
1001                    && context as usize <= self_context.offset(1) as usize
1002            });
1003        }
1004        let ptr = unsafe {
1005            // Threads must only be on the garbage collectors heap which makes this safe
1006            GcPtr::from_raw(self)
1007        };
1008        let roots = Roots {
1009            vm: &ptr,
1010            stack: &context.stack,
1011        };
1012        f(&mut context.gc, roots)
1013    }
1014}
1015
1016pub trait VmRoot<'a>: VmRootInternal + 'a {
1017    fn new_root(thread: &'a Thread) -> Self;
1018}
1019
1020pub trait VmRootInternal: Deref<Target = Thread> + Clone {
1021    fn root_vm(&mut self);
1022
1023    fn unroot_vm(&mut self);
1024
1025    /// Roots a value
1026    unsafe fn root_value_with_self(self, value: &Value) -> RootedValue<Self>
1027    where
1028        Self: Sized,
1029    {
1030        RootedValue::new(self, value)
1031    }
1032}
1033
1034impl<'a> VmRoot<'a> for &'a Thread {
1035    fn new_root(thread: &'a Thread) -> Self {
1036        thread
1037    }
1038}
1039
1040impl<'a> VmRootInternal for &'a Thread {
1041    fn root_vm(&mut self) {}
1042
1043    fn unroot_vm(&mut self) {}
1044}
1045
1046impl<'a> VmRoot<'a> for RootedThread {
1047    fn new_root(thread: &'a Thread) -> Self {
1048        thread.root_thread()
1049    }
1050}
1051
1052impl VmRootInternal for RootedThread {
1053    fn root_vm(&mut self) {
1054        self.root_();
1055    }
1056
1057    fn unroot_vm(&mut self) {
1058        self.unroot_();
1059    }
1060}
1061
1062/// Internal functions for interacting with threads. These functions should be considered both
1063/// unsafe and unstable.
1064#[async_trait]
1065pub trait ThreadInternal: Sized
1066where
1067    Self: ::std::borrow::Borrow<Thread>,
1068{
1069    /// Locks and retrives this threads stack
1070    fn context(&self) -> OwnedContext;
1071
1072    /// Roots a value
1073    fn root_value<'vm, T>(&'vm self, value: Variants) -> RootedValue<T>
1074    where
1075        T: VmRoot<'vm>;
1076
1077    /// Evaluates a zero argument function (a thunk)
1078    async fn call_thunk(&self, closure: &GcPtr<ClosureData>) -> Result<RootedValue<RootedThread>>;
1079
1080    async fn call_thunk_top(
1081        &self,
1082        closure: &GcPtr<ClosureData>,
1083    ) -> Result<RootedValue<RootedThread>>
1084    where
1085        Self: Send + Sync,
1086    {
1087        let self_ = RootedThread::new_root(self.borrow());
1088        let level = self_.context().stack.get_frames().len();
1089
1090        self.call_thunk(closure).await.or_else(move |mut err| {
1091            let mut context = self_.context();
1092            let stack = StackFrame::<State>::current(&mut context.stack);
1093            let new_trace = reset_stack(stack, level)?;
1094            if let Error::Panic(_, ref mut trace) = err {
1095                *trace = Some(new_trace);
1096            }
1097            Err(err)
1098        })
1099    }
1100
1101    /// Executes an `IO` action
1102    async fn execute_io(&self, value: Variants<'_>) -> Result<RootedValue<RootedThread>>;
1103
1104    async fn execute_io_top(&self, value: Variants<'_>) -> Result<RootedValue<RootedThread>>
1105    where
1106        Self: Send + Sync,
1107    {
1108        let self_ = RootedThread::new_root(self.borrow());
1109        let level = self_.context().stack.get_frames().len();
1110        self.execute_io(value).await.or_else(move |mut err| {
1111            let mut context = self_.context();
1112            let stack = StackFrame::<State>::current(&mut context.stack);
1113            let new_trace = reset_stack(stack, level)?;
1114            if let Error::Panic(_, ref mut trace) = err {
1115                *trace = Some(new_trace);
1116            }
1117            Err(err)
1118        })
1119    }
1120
1121    /// Calls a function on the stack.
1122    /// When this function is called it is expected that the function exists at
1123    /// `stack.len() - args - 1` and that the arguments are of the correct type
1124    fn call_function<'b>(
1125        &'b self,
1126        cx: &mut task::Context<'_>,
1127        stack: OwnedContext<'b>,
1128        args: VmIndex,
1129    ) -> Poll<Result<Option<OwnedContext<'b>>>>;
1130
1131    fn resume(&self, cx: &mut task::Context<'_>) -> Poll<Result<OwnedContext>>;
1132
1133    fn deep_clone_value(&self, owner: &Thread, value: &Value) -> Result<RootedValue<&Thread>>;
1134
1135    fn can_share_values_with(&self, gc: &mut Gc, other: &Thread) -> bool;
1136}
1137
1138#[async_trait]
1139impl ThreadInternal for Thread {
1140    fn context(&self) -> OwnedContext {
1141        OwnedContext {
1142            thread: self,
1143            context: self.context.lock().unwrap(),
1144        }
1145    }
1146
1147    /// Roots a value
1148    fn root_value<'vm, T>(&'vm self, value: Variants) -> RootedValue<T>
1149    where
1150        T: VmRoot<'vm>,
1151    {
1152        unsafe { T::new_root(self).root_value_with_self(value.get_value()) }
1153    }
1154
1155    async fn call_thunk(&self, closure: &GcPtr<ClosureData>) -> Result<RootedValue<RootedThread>> {
1156        let mut fut = None;
1157        future::poll_fn(|cx| match &mut fut {
1158            None => {
1159                let mut context = self.owned_context();
1160                context.stack.push(construct_gc!(Closure(@&closure)));
1161                StackFrame::<State>::current(&mut context.stack).enter_scope(
1162                    0,
1163                    &*construct_gc!(ClosureState {
1164                        @closure: gc::Borrow::new(closure),
1165                        instruction_index: 0,
1166                    }),
1167                )?;
1168                let mut context = match context.execute(cx) {
1169                    Poll::Pending => {
1170                        fut = Some(Execute::new(self.root_thread()));
1171                        return Poll::Pending;
1172                    }
1173                    Poll::Ready(Ok(context)) => {
1174                        context.expect("call_module to have the stack remaining")
1175                    }
1176                    Poll::Ready(Err(err)) => return Err(err).into(),
1177                };
1178                let value = self.root_value(context.stack.last().unwrap());
1179                context.stack.pop();
1180                Ok(value).into()
1181            }
1182            Some(fut) => Pin::new(fut).poll(cx),
1183        })
1184        .await
1185    }
1186
1187    /// Calls a module, allowed to to run IO expressions
1188    async fn execute_io(&self, value: Variants<'_>) -> Result<RootedValue<RootedThread>> {
1189        trace!("Run IO {:?}", value);
1190
1191        let mut fut = None;
1192        future::poll_fn(|cx| {
1193            match &mut fut {
1194                None => {
1195                    let mut context = self.context();
1196                    // Dummy value to fill the place of the function for TailCall
1197                    context
1198                        .stack
1199                        .extend(&[Variants::int(0), value.clone(), Variants::int(0)]);
1200
1201                    context
1202                        .borrow_mut()
1203                        .enter_scope(2, &State::Unknown, false)?;
1204                    context = match self.call_function(cx, context, 1) {
1205                        Poll::Pending => {
1206                            fut = Some(Execute::new(self.root_thread()));
1207                            return Poll::Pending;
1208                        }
1209                        Poll::Ready(Ok(context)) => {
1210                            context.expect("call_module to have the stack remaining")
1211                        }
1212                        Poll::Ready(Err(err)) => return Err(err).into(),
1213                    };
1214                    let result = self.root_value(context.stack.last().unwrap());
1215                    context.stack.pop();
1216                    {
1217                        let mut context = context.borrow_mut();
1218                        context.stack.clear();
1219                    }
1220                    let _ = context.exit_scope();
1221                    Ok(result).into()
1222                }
1223                Some(fut) => Pin::new(fut).poll(cx),
1224            }
1225        })
1226        .await
1227    }
1228
1229    /// Calls a function on the stack.
1230    /// When this function is called it is expected that the function exists at
1231    /// `stack.len() - args - 1` and that the arguments are of the correct type
1232    fn call_function<'b>(
1233        &'b self,
1234        cx: &mut task::Context<'_>,
1235        mut context: OwnedContext<'b>,
1236        args: VmIndex,
1237    ) -> Poll<Result<Option<OwnedContext<'b>>>> {
1238        context.borrow_mut().do_call(args)?;
1239        context.execute(cx)
1240    }
1241
1242    fn resume(&self, cx: &mut task::Context<'_>) -> Poll<Result<OwnedContext>> {
1243        let mut context = self.owned_context();
1244        if let Some(poll_fn) = context.poll_fns.last() {
1245            let frame_offset = poll_fn.frame_index as usize;
1246            for frame in &mut context.stack.get_frames_mut()[frame_offset..] {
1247                match frame.state {
1248                    State::Extern(ref mut e) => {
1249                        assert!(
1250                            e.call_state == ExternCallState::Pending
1251                                || e.call_state == ExternCallState::Poll
1252                        );
1253                        e.call_state = ExternCallState::Poll
1254                    }
1255                    _ => (),
1256                }
1257            }
1258        }
1259        if context.stack.get_frames().len() == 1 {
1260            // Only the top level frame left means that the thread has finished
1261            return Err(Error::Dead).into();
1262        }
1263        context = ready!(context.execute(cx))?.expect("Resume called on the top frame");
1264        Ok(context).into()
1265    }
1266
1267    fn deep_clone_value(&self, owner: &Thread, value: &Value) -> Result<RootedValue<&Thread>> {
1268        let mut context = self.owned_context();
1269        let full_clone = !self.can_share_values_with(&mut context.gc, owner);
1270        let mut cloner = crate::value::Cloner::new(self, &mut context.gc);
1271        if full_clone {
1272            cloner.force_full_clone();
1273        }
1274        let value = cloner.deep_clone(value)?;
1275        // SAFETY `value` is just cloned and therefore tied to `self`
1276        unsafe { Ok(self.root_value_with_self(value.get_value())) }
1277    }
1278
1279    fn can_share_values_with(&self, gc: &mut Gc, other: &Thread) -> bool {
1280        if self as *const Thread == other as *const Thread {
1281            return true;
1282        }
1283        // If the threads do not share the same global state then they are disjoint and can't share
1284        // values
1285        if &*self.global_state as *const GlobalVmState
1286            != &*other.global_state as *const GlobalVmState
1287        {
1288            return false;
1289        }
1290        // Otherwise the threads might be able to share values but only if they are on the same
1291        // of the generation tree (see src/gc.rs)
1292        // Search from the thread which MAY be a child to the parent. If `parent` could not be
1293        // found then the threads must be in different branches of the tree
1294        let self_gen = gc.generation();
1295        let other_gen = other.context.lock().unwrap().gc.generation();
1296        let (parent, mut child) = if self_gen.is_parent_of(other_gen) {
1297            (self, other)
1298        } else {
1299            (other, self)
1300        };
1301        while let Some(ref next) = child.parent {
1302            if &**next as *const Thread == parent as *const Thread {
1303                return true;
1304            }
1305            child = next;
1306        }
1307        false
1308    }
1309}
1310
1311pub type HookFn = Box<dyn FnMut(&Thread, DebugInfo) -> Poll<Result<()>> + Send + Sync>;
1312
1313pub struct DebugInfo<'a> {
1314    stack: &'a Stack,
1315    state: HookFlags,
1316}
1317
1318impl fmt::Debug for DebugInfo<'_> {
1319    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
1320        f.debug_struct("DebugInfo")
1321            .field("state", &self.state())
1322            .field(
1323                "stack_infos",
1324                &(0..self.stack_info_len())
1325                    .map(|i| self.stack_info(i).unwrap())
1326                    .collect::<Vec<_>>(),
1327            )
1328            .finish()
1329    }
1330}
1331
1332impl<'a> DebugInfo<'a> {
1333    /// Returns the reason for the hook being called
1334    pub fn state(&self) -> HookFlags {
1335        self.state
1336    }
1337
1338    /// Returns a struct which can be queried about information about the stack
1339    /// at a specific level where `0` is the currently executing frame.
1340    pub fn stack_info(&self, level: usize) -> Option<StackInfo> {
1341        let frames = self.stack.get_frames();
1342        if level < frames.len() {
1343            Some(StackInfo {
1344                info: self,
1345                index: frames.len() - level - 1,
1346            })
1347        } else {
1348            None
1349        }
1350    }
1351
1352    pub fn stack_info_len(&self) -> usize {
1353        self.stack.get_frames().len()
1354    }
1355}
1356
1357pub struct StackInfo<'a> {
1358    info: &'a DebugInfo<'a>,
1359    index: usize,
1360}
1361
1362impl fmt::Debug for StackInfo<'_> {
1363    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
1364        f.debug_struct("DebugInfo")
1365            .field("frame", &self.frame())
1366            .field("line", &self.line())
1367            .field("source_name", &self.source_name())
1368            .field("function_name", &self.function_name())
1369            .field("locals", &self.locals().collect::<Vec<_>>())
1370            .field("upvars", &self.upvars())
1371            .finish()
1372    }
1373}
1374
1375impl<'a> StackInfo<'a> {
1376    fn frame(&self) -> &Frame {
1377        &self.info.stack.get_frames()[self.index]
1378    }
1379
1380    // For frames except the top we subtract one to account for the `Call` instruction adding one
1381    fn instruction_index(&self, instruction_index: usize) -> usize {
1382        if self.info.stack.get_frames().len() - 1 == self.index {
1383            instruction_index
1384        } else {
1385            instruction_index - 1
1386        }
1387    }
1388
1389    /// Returns the line which create the current instruction of this frame
1390    pub fn line(&self) -> Option<Line> {
1391        let frame = self.frame();
1392        match frame.state {
1393            State::Closure(ClosureState {
1394                ref closure,
1395                instruction_index,
1396            }) => closure
1397                .function
1398                .debug_info
1399                .source_map
1400                .line(self.instruction_index(instruction_index)),
1401            _ => None,
1402        }
1403    }
1404
1405    /// Returns the name of the source which defined the funtion executing at this frame
1406    pub fn source_name(&self) -> &str {
1407        match self.frame().state {
1408            State::Closure(ClosureState { ref closure, .. }) => {
1409                &closure.function.debug_info.source_name
1410            }
1411            _ => "<unknown>",
1412        }
1413    }
1414
1415    /// Returns the name of the function executing at this frame
1416    pub fn function_name(&self) -> Option<&str> {
1417        match self.frame().state {
1418            State::Unknown => None,
1419            State::Closure(ClosureState { ref closure, .. }) => {
1420                Some(closure.function.name.declared_name())
1421            }
1422            State::Extern(ref function) => Some(function.function.id.declared_name()),
1423        }
1424    }
1425
1426    /// Returns an iterator over all locals available at the current executing instruction
1427    pub fn locals(&self) -> LocalIter {
1428        let frame = self.frame();
1429        match frame.state {
1430            State::Closure(ClosureState {
1431                ref closure,
1432                instruction_index,
1433            }) => closure
1434                .function
1435                .debug_info
1436                .local_map
1437                .locals(self.instruction_index(instruction_index)),
1438            _ => LocalIter::empty(),
1439        }
1440    }
1441
1442    /// Returns a slice with information about the values bound to this closure
1443    pub fn upvars(&self) -> &[UpvarInfo] {
1444        match self.frame().state {
1445            State::Closure(ClosureState { ref closure, .. }) => &closure.function.debug_info.upvars,
1446            _ => &[],
1447        }
1448    }
1449}
1450
1451bitflags::bitflags! {
1452    #[derive(Default)]
1453    pub struct HookFlags: u8 {
1454        /// Call the hook when execution moves to a new line
1455        const LINE_FLAG = 0b01;
1456        /// Call the hook when a function is called
1457        const CALL_FLAG = 0b10;
1458    }
1459}
1460
1461#[derive(Default)]
1462struct Hook {
1463    function: Option<HookFn>,
1464    flags: HookFlags,
1465    // The index of the last executed instruction
1466    previous_instruction_index: usize,
1467}
1468
1469type PollFnInner<'a> = Box<
1470    dyn for<'vm> FnMut(
1471            &mut task::Context<'_>,
1472            &'vm Thread,
1473        ) -> Poll<super::Result<OwnedContext<'vm>>>
1474        + Send
1475        + 'a,
1476>;
1477
1478struct PollFn {
1479    poll_fn: PollFnInner<'static>,
1480    frame_index: VmIndex,
1481}
1482
1483#[cfg_attr(feature = "serde_derive", derive(DeserializeState, SerializeState))]
1484#[cfg_attr(
1485    feature = "serde_derive",
1486    serde(
1487        deserialize_state = "crate::serialization::DeSeed<'gc>",
1488        de_parameters = "'gc"
1489    )
1490)]
1491#[cfg_attr(
1492    feature = "serde_derive",
1493    serde(serialize_state = "crate::serialization::SeSeed")
1494)]
1495pub struct Context {
1496    #[cfg_attr(feature = "serde_derive", serde(state))]
1497    pub(crate) stack: Stack,
1498    #[cfg_attr(feature = "serde_derive", serde(state))]
1499    pub(crate) gc: Gc,
1500    #[cfg_attr(feature = "serde_derive", serde(skip))]
1501    hook: Hook,
1502
1503    /// Stack of polling functions used for extern functions returning futures
1504    #[cfg_attr(feature = "serde_derive", serde(skip))]
1505    poll_fns: Vec<PollFn>,
1506}
1507
1508impl Context {
1509    fn new(gc: Gc) -> Context {
1510        Context {
1511            gc: gc,
1512            stack: Stack::new(),
1513            hook: Hook {
1514                function: None,
1515                flags: HookFlags::empty(),
1516                previous_instruction_index: usize::max_value(),
1517            },
1518            poll_fns: Vec::new(),
1519        }
1520    }
1521
1522    pub fn push_new_data(
1523        &mut self,
1524        thread: &Thread,
1525        tag: VmTag,
1526        fields: usize,
1527    ) -> Result<Variants> {
1528        let value = {
1529            let fields = &self.stack[self.stack.len() - fields as VmIndex..];
1530            Variants::from(alloc(
1531                &mut self.gc,
1532                thread,
1533                &self.stack,
1534                Def {
1535                    tag: tag,
1536                    elems: fields,
1537                },
1538            )?)
1539        };
1540        self.stack.pop_many(fields as u32);
1541        self.stack.push(value.clone());
1542        Ok(value)
1543    }
1544
1545    pub fn push_new_record(
1546        &mut self,
1547        thread: &Thread,
1548        fields: usize,
1549        field_names: &[InternedStr],
1550    ) -> Result<Variants> {
1551        let value = {
1552            let fields = &self.stack[self.stack.len() - fields as VmIndex..];
1553            Variants::from(alloc(
1554                &mut self.gc,
1555                thread,
1556                &self.stack,
1557                RecordDef {
1558                    elems: fields,
1559                    fields: field_names,
1560                },
1561            )?)
1562        };
1563        self.stack.pop_many(fields as u32);
1564        self.stack.push(value.clone());
1565        Ok(value)
1566    }
1567
1568    pub fn alloc_with<D>(&mut self, thread: &Thread, data: D) -> Result<GcRef<D::Value>>
1569    where
1570        D: DataDef + Trace,
1571        D::Value: Sized + Any,
1572    {
1573        alloc(&mut self.gc, thread, &self.stack, data)
1574    }
1575
1576    pub fn alloc_ignore_limit<D>(&mut self, data: D) -> GcRef<D::Value>
1577    where
1578        D: DataDef + Trace,
1579        D::Value: Sized + Any,
1580    {
1581        self.gc.alloc_ignore_limit(data)
1582    }
1583
1584    pub fn set_hook(&mut self, hook: Option<HookFn>) -> Option<HookFn> {
1585        mem::replace(&mut self.hook.function, hook)
1586    }
1587
1588    pub fn set_hook_mask(&mut self, flags: HookFlags) {
1589        self.hook.flags = flags;
1590    }
1591
1592    pub fn set_max_stack_size(&mut self, limit: VmIndex) {
1593        self.stack.set_max_stack_size(limit);
1594    }
1595
1596    pub fn stacktrace(&self, frame_level: usize) -> crate::stack::Stacktrace {
1597        self.stack.stacktrace(frame_level)
1598    }
1599
1600    /// "Returns a future", letting the virtual machine know that `future` must be resolved to
1601    /// produce the actual value.
1602    ///
1603    /// # Safety
1604    ///
1605    /// This function is unsafe because the `vm` lifetime must not outlive the lifetime of the
1606    /// `Thread`
1607    pub unsafe fn return_future<'vm, F>(&mut self, mut future: F, lock: Lock, frame_index: VmIndex)
1608    where
1609        F: Future + Send + 'vm,
1610        F::Output: Pushable<'vm>,
1611    {
1612        let poll_fn: PollFnInner<'_> = Box::new(move |cx: &mut task::Context<'_>, vm: &Thread| {
1613            // `future` is moved into the closure, which is boxed and therefore pinned
1614            let value = ready!(Pin::new_unchecked(&mut future).poll(cx));
1615
1616            let mut context = vm.current_context();
1617            let result = {
1618                context.stack().release_lock(lock);
1619                let context =
1620                    mem::transmute::<&mut ActiveThread<'_>, &mut ActiveThread<'vm>>(&mut context);
1621                value.vm_push(context)
1622            };
1623            Poll::Ready(result.map(|()| context.into_owned()))
1624        });
1625        self.poll_fns.push(PollFn {
1626            frame_index,
1627            poll_fn: mem::transmute::<PollFnInner<'_>, PollFnInner<'static>>(poll_fn),
1628        });
1629    }
1630}
1631
1632impl<'b> OwnedContext<'b> {
1633    pub fn alloc<D>(&mut self, data: D) -> Result<GcRef<D::Value>>
1634    where
1635        D: DataDef + Trace,
1636        D::Value: Sized + Any,
1637    {
1638        self.alloc_owned(data).map(GcRef::from)
1639    }
1640
1641    pub fn alloc_owned<D>(&mut self, data: D) -> Result<gc::OwnedGcRef<D::Value>>
1642    where
1643        D: DataDef + Trace,
1644        D::Value: Sized + Any,
1645    {
1646        let thread = self.thread;
1647        let Context {
1648            ref mut gc,
1649            ref stack,
1650            ..
1651        } = **self;
1652        alloc_owned(gc, thread, &stack, data)
1653    }
1654
1655    pub fn debug_info(&self) -> DebugInfo {
1656        DebugInfo {
1657            stack: &self.stack,
1658            state: HookFlags::empty(),
1659        }
1660    }
1661
1662    pub fn frame_level(&self) -> usize {
1663        self.stack.get_frames().len()
1664    }
1665
1666    pub fn stack_frame<T>(&mut self) -> StackFrame<T>
1667    where
1668        T: StackState,
1669    {
1670        StackFrame::current(&mut self.stack)
1671    }
1672}
1673
1674pub(crate) fn alloc<'gc, D>(
1675    gc: &'gc mut Gc,
1676    thread: &Thread,
1677    stack: &Stack,
1678    def: D,
1679) -> Result<GcRef<'gc, D::Value>>
1680where
1681    D: DataDef + Trace,
1682    D::Value: Sized + Any,
1683{
1684    alloc_owned(gc, thread, stack, def).map(GcRef::from)
1685}
1686
1687pub(crate) fn alloc_owned<'gc, D>(
1688    gc: &'gc mut Gc,
1689    thread: &Thread,
1690    stack: &Stack,
1691    def: D,
1692) -> Result<gc::OwnedGcRef<'gc, D::Value>>
1693where
1694    D: DataDef + Trace,
1695    D::Value: Sized + Any,
1696{
1697    unsafe {
1698        let ptr = GcPtr::from_raw(thread);
1699        let roots = Roots {
1700            // Threads must only be on the garbage collectors heap which makes this safe
1701            vm: &ptr,
1702            stack: stack,
1703        };
1704        gc.alloc_and_collect(roots, def)
1705    }
1706}
1707
1708pub struct OwnedContext<'b> {
1709    thread: &'b Thread,
1710    context: MutexGuard<'b, Context>,
1711}
1712
1713impl<'b> Deref for OwnedContext<'b> {
1714    type Target = Context;
1715    fn deref(&self) -> &Context {
1716        &self.context
1717    }
1718}
1719
1720impl<'b> DerefMut for OwnedContext<'b> {
1721    fn deref_mut(&mut self) -> &mut Context {
1722        &mut self.context
1723    }
1724}
1725
1726impl<'b> OwnedContext<'b> {
1727    fn exit_scope(mut self) -> StdResult<OwnedContext<'b>, ()> {
1728        let exists = StackFrame::<State>::current(&mut self.stack)
1729            .exit_scope()
1730            .is_ok();
1731        if exists {
1732            Ok(self)
1733        } else {
1734            Err(())
1735        }
1736    }
1737
1738    fn execute(mut self, cx: &mut task::Context<'_>) -> Poll<Result<Option<OwnedContext<'b>>>> {
1739        let mut context = self.borrow_mut();
1740        // Return when the starting frame is finished
1741        loop {
1742            if context.thread.interrupted() {
1743                return Err(Error::Interrupted).into();
1744            }
1745            trace!("STACK\n{:?}", context.stack.stack().get_frames());
1746            let state = &context.stack.frame().state;
1747
1748            if context.hook.flags.contains(HookFlags::CALL_FLAG) {
1749                match state {
1750                    State::Extern(ExternState {
1751                        call_state: ExternCallState::Start,
1752                        ..
1753                    })
1754                    | State::Closure(ClosureState {
1755                        instruction_index: 0,
1756                        ..
1757                    }) => {
1758                        let thread = context.thread;
1759                        if let Some(ref mut hook) = context.hook.function {
1760                            let info = DebugInfo {
1761                                stack: &context.stack.stack(),
1762                                state: HookFlags::CALL_FLAG,
1763                            };
1764                            ready!(hook(thread, info))?
1765                        }
1766                    }
1767                    _ => (),
1768                }
1769            }
1770
1771            match state {
1772                State::Unknown => {
1773                    return Ok(Some(self)).into();
1774                }
1775
1776                State::Extern(ext) if ext.is_locked() && context.poll_fns.is_empty() => {
1777                    // The frame is locked and there is no futures to poll => The owner of the
1778                    // frame is up the call stack so we should return and let them handle it.
1779                    return Ok(Some(self)).into();
1780                }
1781
1782                State::Extern(ext) => {
1783                    let mut ext = unsafe { ext.clone_unrooted() };
1784                    // We are currently in the poll call of this extern function.
1785                    // Return control to the caller.
1786                    if ext.call_state == ExternCallState::InPoll {
1787                        return Ok(Some(self)).into();
1788                    }
1789                    if ext.call_state == ExternCallState::Pending {
1790                        return Err(format!("Thread is already in use in another task").into())
1791                            .into();
1792                    }
1793                    if ext.call_state == ExternCallState::Poll {
1794                        if let Some(frame_index) = context.poll_fns.last().map(|f| f.frame_index) {
1795                            unsafe {
1796                                ext = ExternState::from_state(
1797                                    &context.stack.stack().get_frames()[frame_index as usize].state,
1798                                )
1799                                .clone_unrooted();
1800                            }
1801                        }
1802                    }
1803                    match &mut context.stack.frame_mut().state {
1804                        State::Extern(ext) => ext.call_state = ExternCallState::Poll,
1805                        _ => unreachable!(),
1806                    }
1807
1808                    self = ready!(self.execute_function(cx, ext.call_state, &ext.function))?;
1809                    context = self.borrow_mut();
1810                }
1811
1812                State::Closure(ClosureState {
1813                    closure,
1814                    instruction_index,
1815                }) => {
1816                    let instruction_index = *instruction_index;
1817
1818                    debug!(
1819                        "Continue with {}\nAt: {}/{}\n{:?}",
1820                        closure.function.name,
1821                        instruction_index,
1822                        closure.function.instructions.len(),
1823                        &context.stack[..]
1824                    );
1825
1826                    let closure_context = context.from_state();
1827                    match ready!(closure_context.execute_())? {
1828                        Some(new_context) => context = new_context,
1829                        None => return Ok(None).into(),
1830                    }
1831                }
1832            };
1833        }
1834    }
1835
1836    fn execute_function(
1837        mut self,
1838        cx: &mut task::Context<'_>,
1839        call_state: ExternCallState,
1840        function: &ExternFunction,
1841    ) -> Poll<Result<OwnedContext<'b>>> {
1842        debug!(
1843            "CALL EXTERN {} {:?} {} {:?}",
1844            function.id,
1845            call_state,
1846            self.poll_fns.len(),
1847            &self.stack.current_frame::<ExternState>()[..],
1848        );
1849
1850        let mut status = Status::Ok;
1851        match call_state {
1852            ExternCallState::Start => {
1853                // Make sure that the stack is not borrowed during the external function call
1854                // Necessary since we do not know what will happen during the function call
1855                let thread = self.thread;
1856                drop(self);
1857                status = (function.function)(thread);
1858
1859                if status == Status::Yield {
1860                    return Poll::Pending;
1861                }
1862
1863                self = thread.owned_context();
1864
1865                if status == Status::Error {
1866                    return match self.stack.pop().get_repr() {
1867                        String(s) => {
1868                            Err(Error::Panic(s.to_string(), Some(self.stack.stacktrace(0)))).into()
1869                        }
1870                        _ => Err(Error::Message(format!(
1871                            "Unexpected error calling function `{}`",
1872                            function.id
1873                        )))
1874                        .into(),
1875                    };
1876                }
1877
1878                // The `poll_fn` at the top may be for a stack frame at a lower level, return to the
1879                // state loop to ensure that we are executing the frame at the top of the stack
1880                if !self.poll_fns.is_empty() {
1881                    return Ok(self).into();
1882                }
1883            }
1884
1885            ExternCallState::Poll => {
1886                if let Some(mut poll_fn) = self.poll_fns.pop() {
1887                    let frame_offset = poll_fn.frame_index as usize;
1888
1889                    match self.stack.get_frames_mut()[frame_offset].state {
1890                        State::Extern(ref mut e) => e.call_state = ExternCallState::InPoll,
1891                        _ => unreachable!(),
1892                    }
1893                    let thread = self.thread;
1894                    drop(self);
1895                    // Poll the future that was returned from the initial call to this extern function
1896                    debug!("POLL EXTERN {}", function.id);
1897                    match (poll_fn.poll_fn)(cx, thread) {
1898                        Poll::Ready(Ok(context)) => {
1899                            debug!("READY EXTERN {}", function.id);
1900                            self = context;
1901                        }
1902                        Poll::Pending => {
1903                            debug!("NOT READY EXTERN {}", function.id);
1904                            self = thread.owned_context();
1905                            match self.stack.get_frames_mut()[frame_offset].state {
1906                                State::Extern(ref mut e) => e.call_state = ExternCallState::Pending,
1907                                _ => unreachable!(),
1908                            }
1909                            // Restore `poll_fn` so it can be polled again
1910                            self.poll_fns.push(poll_fn);
1911                            return Poll::Pending;
1912                        }
1913                        Poll::Ready(Err(err)) => return Err(err).into(),
1914                    }
1915                }
1916            }
1917            // Handled outside of this function
1918            ExternCallState::Pending => unreachable!(),
1919            ExternCallState::InPoll => unreachable!(),
1920        }
1921
1922        // The function call is done at this point so remove any extra values from the frame and
1923        // return the value at the top of the stack
1924        let result = self.stack.pop();
1925        {
1926            let mut stack = self.stack.current_frame();
1927            debug_assert!(
1928                match stack.frame().state {
1929                    State::Extern(ref e) => e.function.id == function.id,
1930                    _ => false,
1931                },
1932                "Attempted to pop {:?} but {} was expected",
1933                stack.frame(),
1934                function.id
1935            );
1936
1937            stack.clear();
1938        }
1939        self = self.exit_scope().map_err(|_| {
1940            Error::Message(format!(
1941                "Popped the last frame or a locked frame in execute_function: {}",
1942                function.id
1943            ))
1944        })?;
1945        self.stack.pop(); // Pop function
1946        self.stack.push(result);
1947
1948        debug!(
1949            "EXIT EXTERN {} {:?}",
1950            function.id,
1951            &self.stack.current_frame::<State>()[..]
1952        );
1953
1954        match status {
1955            Status::Ok => Ok(self).into(),
1956            Status::Yield => Poll::Pending,
1957            Status::Error => match self.stack.pop().get_repr() {
1958                String(s) => {
1959                    Err(Error::Panic(s.to_string(), Some(self.stack.stacktrace(0)))).into()
1960                }
1961                _ => Err(Error::Message(format!(
1962                    "Unexpected error calling function `{}`",
1963                    function.id
1964                )))
1965                .into(),
1966            },
1967        }
1968    }
1969
1970    fn borrow_mut(&mut self) -> ExecuteContext<State> {
1971        let thread = self.thread;
1972        let context = &mut **self;
1973        ExecuteContext {
1974            thread,
1975            gc: &mut context.gc,
1976            stack: StackFrame::current(&mut context.stack),
1977            hook: &mut context.hook,
1978            poll_fns: &context.poll_fns,
1979        }
1980    }
1981}
1982
1983unsafe fn lock_gc<'gc>(gc: &'gc Gc, value: &Value) -> Variants<'gc> {
1984    Variants::with_root(value, gc)
1985}
1986
1987// SAFETY By branding the variant with the gc lifetime we prevent mutation in the gc
1988// Since that implies that no collection can occur while the variant is alive alive it is
1989// safe to keep clone the value (to disconnect the previous lifetime from the stack)
1990// and store the value unrooted
1991macro_rules! transfer {
1992    ($context: expr, $value: expr) => {
1993        unsafe { lock_gc(&$context.gc, $value) }
1994    };
1995}
1996
1997unsafe fn lock_gc_ptr<'gc, T: ?Sized>(gc: &'gc Gc, value: &GcPtr<T>) -> GcRef<'gc, T> {
1998    GcRef::with_root(value.clone_unrooted(), gc)
1999}
2000
2001macro_rules! transfer_ptr {
2002    ($context: expr, $value: expr) => {
2003        unsafe { lock_gc_ptr(&$context.gc, $value) }
2004    };
2005}
2006
2007pub struct ExecuteContext<'b, 'gc, S: StackState = ClosureState> {
2008    pub thread: &'b Thread,
2009    pub stack: StackFrame<'b, S>,
2010    pub gc: &'gc mut Gc,
2011    hook: &'b mut Hook,
2012    poll_fns: &'b [PollFn],
2013}
2014
2015impl<'b, 'gc, S> ExecuteContext<'b, 'gc, S>
2016where
2017    S: StackState,
2018{
2019    pub fn alloc<D>(&mut self, data: D) -> Result<GcRef<D::Value>>
2020    where
2021        D: DataDef + Trace,
2022        D::Value: Sized + Any,
2023    {
2024        alloc(&mut self.gc, self.thread, &self.stack.stack(), data)
2025    }
2026
2027    pub fn push_new_data(&mut self, tag: VmTag, fields: usize) -> Result<Variants> {
2028        let value = {
2029            let fields = &self.stack[self.stack.len() - fields as VmIndex..];
2030            Variants::from(alloc(
2031                &mut self.gc,
2032                self.thread,
2033                &self.stack.stack(),
2034                Def {
2035                    tag: tag,
2036                    elems: fields,
2037                },
2038            )?)
2039        };
2040        self.stack.pop_many(fields as u32);
2041        self.stack.push(value.clone());
2042        Ok(value)
2043    }
2044
2045    pub fn push_new_record(
2046        &mut self,
2047        fields: usize,
2048        field_names: &[InternedStr],
2049    ) -> Result<Variants> {
2050        let value = {
2051            let fields = &self.stack[self.stack.len() - fields as VmIndex..];
2052            Variants::from(alloc(
2053                &mut self.gc,
2054                self.thread,
2055                &self.stack.stack(),
2056                RecordDef {
2057                    elems: fields,
2058                    fields: field_names,
2059                },
2060            )?)
2061        };
2062        self.stack.pop_many(fields as u32);
2063        self.stack.push(value.clone());
2064        Ok(value)
2065    }
2066
2067    pub fn push_new_alloc<D>(&mut self, def: D) -> Result<Variants>
2068    where
2069        D: DataDef + Trace,
2070        D::Value: Sized + Any,
2071        for<'a> Variants<'a>: From<GcRef<'a, D::Value>>,
2072    {
2073        let value = Variants::from(alloc(&mut self.gc, self.thread, &self.stack.stack(), def)?);
2074        self.stack.push(value.clone());
2075        Ok(value)
2076    }
2077}
2078
2079impl<'b, 'gc> ExecuteContext<'b, 'gc> {
2080    fn execute_(mut self) -> Poll<Result<Option<ExecuteContext<'b, 'gc, State>>>> {
2081        let state = &self.stack.frame().state;
2082        let function = unsafe { state.closure.function.clone_unrooted() };
2083        {
2084            trace!(
2085                ">>>\nEnter frame {}: {:?}\n{:?}",
2086                function.name,
2087                &self.stack[..],
2088                self.stack.frame()
2089            );
2090        }
2091
2092        let instructions = &function.instructions[..];
2093        let mut program_counter = ProgramCounter::new(state.instruction_index, instructions);
2094        loop {
2095            // SAFETY Safe since we exit the loop when encountring the Return instruction
2096            // that we know exists since we could construct a `ProgramCounter`
2097            let instr = unsafe { program_counter.instruction() };
2098            let instruction_index = program_counter.instruction_index;
2099            program_counter.step();
2100
2101            debug_instruction(&self.stack, instruction_index, instr);
2102
2103            if !self.hook.flags.is_empty() && self.hook.flags.contains(HookFlags::LINE_FLAG) {
2104                ready!(self.run_hook(&function, instruction_index))?;
2105            }
2106
2107            match instr {
2108                Push(i) => {
2109                    let v = match self.stack.get(i as usize) {
2110                        Some(v) => transfer!(self, v),
2111                        None => {
2112                            return Err(Error::Panic(
2113                                format!("ICE: Stack push out of bounds in {}", function.name),
2114                                Some(self.stack.stack().stacktrace(0)),
2115                            ))
2116                            .into();
2117                        }
2118                    };
2119                    self.stack.push(v);
2120                }
2121                PushInt(i) => {
2122                    self.stack.push(Int(i));
2123                }
2124                PushByte(b) => {
2125                    self.stack.push(ValueRepr::Byte(b));
2126                }
2127                PushString(string_index) => {
2128                    self.stack.push(
2129                        &*construct_gc!(String(@function.strings[string_index as usize].inner())),
2130                    );
2131                }
2132                PushFloat(f) => self.stack.push(Float(f.into())),
2133                Call(args) => {
2134                    self.stack
2135                        .set_instruction_index(program_counter.instruction_index);
2136                    return self.do_call(args).map(Some).into();
2137                }
2138                TailCall(mut args) => {
2139                    let mut amount = self.stack.len() - args;
2140                    if self.stack.frame().excess {
2141                        amount += 1;
2142                        match self.stack.excess_args() {
2143                            Some(excess) => {
2144                                let excess = transfer_ptr!(self, excess);
2145                                trace!("TailCall: Push excess args {:?}", excess.fields);
2146                                self.stack.extend(&excess.fields);
2147                                args += excess.fields.len() as VmIndex;
2148                            }
2149                            None => ice!("Expected excess args"),
2150                        }
2151                    }
2152                    debug_assert!(
2153                        self.stack.frame().state.closure.function.name == function.name,
2154                        "Attempted to pop {:?} but `{}` was expected",
2155                        self.stack.frame().state,
2156                        function.name
2157                    );
2158                    let mut context = self.exit_scope().unwrap_or_else(|x| x);
2159                    debug!(
2160                        "Clearing {} {} {:?}",
2161                        context.stack.len(),
2162                        amount,
2163                        &context.stack[..]
2164                    );
2165                    let end = context.stack.len() - args - 1;
2166                    context.stack.remove_range(end - amount, end);
2167                    trace!("{:?}", &context.stack[..]);
2168                    return context.do_call(args).map(Some).into();
2169                }
2170                ConstructVariant { tag, args } => {
2171                    let d = {
2172                        if args == 0 {
2173                            Variants::tag(tag)
2174                        } else {
2175                            let fields = &self.stack[self.stack.len() - args..];
2176                            Variants::from(alloc(
2177                                &mut self.gc,
2178                                self.thread,
2179                                &self.stack.stack(),
2180                                Def {
2181                                    tag: tag,
2182                                    elems: fields,
2183                                },
2184                            )?)
2185                        }
2186                    };
2187                    self.stack.pop_many(args);
2188                    self.stack.push(d);
2189                }
2190                ConstructPolyVariant { tag, args } => {
2191                    let d = {
2192                        let tag = &function.strings[tag as usize];
2193                        let fields = &self.stack[self.stack.len() - args..];
2194                        Variants::from(alloc(
2195                            &mut self.gc,
2196                            self.thread,
2197                            &self.stack.stack(),
2198                            VariantDef {
2199                                tag: 10_000_000,
2200                                poly_tag: Some(tag),
2201                                elems: fields,
2202                            },
2203                        )?)
2204                    };
2205                    self.stack.pop_many(args);
2206                    self.stack.push(d);
2207                }
2208                ConstructRecord { record, args } => {
2209                    let d = {
2210                        if args == 0 {
2211                            Variants::tag(0)
2212                        } else {
2213                            let fields = &self.stack[self.stack.len() - args..];
2214                            let field_names = &function.records[record as usize];
2215                            Variants::from(alloc(
2216                                self.gc,
2217                                self.thread,
2218                                &self.stack.stack(),
2219                                RecordDef {
2220                                    elems: fields,
2221                                    fields: field_names,
2222                                },
2223                            )?)
2224                        }
2225                    };
2226                    self.stack.pop_many(args);
2227                    self.stack.push(d);
2228                }
2229                NewVariant { tag, args } => {
2230                    let d = {
2231                        if args == 0 {
2232                            Variants::tag(tag)
2233                        } else {
2234                            Variants::from(alloc(
2235                                &mut self.gc,
2236                                self.thread,
2237                                &self.stack.stack(),
2238                                UninitializedVariantDef {
2239                                    tag: tag,
2240                                    elems: args as usize,
2241                                },
2242                            )?)
2243                        }
2244                    };
2245                    self.stack.push(d);
2246                }
2247                NewRecord { record, args } => {
2248                    let d = {
2249                        if args == 0 {
2250                            Variants::tag(0)
2251                        } else {
2252                            let field_names = &function.records[record as usize];
2253                            Variants::from(alloc(
2254                                &mut self.gc,
2255                                self.thread,
2256                                &self.stack.stack(),
2257                                UninitializedRecord {
2258                                    elems: args as usize,
2259                                    fields: field_names,
2260                                },
2261                            )?)
2262                        }
2263                    };
2264                    self.stack.push(d);
2265                }
2266                CloseData { index } => {
2267                    match self.stack[index].get_repr() {
2268                        Data(data) => {
2269                            // Unique access is safe as the record is only reachable from this
2270                            // thread and none of those places will use it until after we have
2271                            // closed it
2272                            unsafe {
2273                                let mut data = data.unrooted();
2274                                let start = self.stack.len() - data.fields.len() as VmIndex;
2275                                for (var, value) in
2276                                    data.as_mut().fields.iter_mut().zip(&self.stack[start..])
2277                                {
2278                                    *var = value.clone_unrooted();
2279                                }
2280                                self.stack.pop_many(data.fields.len() as VmIndex);
2281                            }
2282                        }
2283                        x => ice!("Expected closure, got {:?}", x),
2284                    }
2285                }
2286                ConstructArray(args) => {
2287                    let d = {
2288                        let fields = &self.stack[self.stack.len() - args..];
2289                        alloc(
2290                            &mut self.gc,
2291                            self.thread,
2292                            &self.stack.stack(),
2293                            crate::value::ArrayDef(fields),
2294                        )?
2295                    };
2296                    self.stack.pop_many(args);
2297                    self.stack.push(Variants::from(d));
2298                }
2299                GetOffset(i) => match self.stack.pop().get_repr() {
2300                    Data(data) => {
2301                        let v = &data.fields[i as usize];
2302                        self.stack.push(v);
2303                    }
2304                    x => return Err(Error::Message(format!("GetOffset on {:?}", x))).into(),
2305                },
2306                GetField(i) => {
2307                    let field = &function.strings[i as usize];
2308                    match self.stack.pop().get_repr() {
2309                        Data(data) => {
2310                            let v = GcRef::new(data).get_field(field).unwrap_or_else(|| {
2311                                error!("{}", self.stack.stack().stacktrace(0));
2312                                ice!("Field `{}` does not exist", field)
2313                            });
2314                            self.stack.push(v);
2315                        }
2316                        x => {
2317                            return Err(Error::Message(format!("GetField on {:?}", x))).into();
2318                        }
2319                    }
2320                }
2321                TestTag(tag) => {
2322                    let data_tag = match self.stack.top().get_repr() {
2323                        Data(data) => data.tag(),
2324                        ValueRepr::Tag(tag) => *tag,
2325                        data => {
2326                            return Err(Error::Message(format!(
2327                                "Op TestTag called on non data type: {:?}",
2328                                data
2329                            )))
2330                            .into();
2331                        }
2332                    };
2333                    self.stack
2334                        .push(ValueRepr::Tag(if data_tag == tag { 1 } else { 0 }));
2335                }
2336                TestPolyTag(string_index) => {
2337                    let expected_tag = &function.strings[string_index as usize];
2338                    let data_tag = match self.stack.top().get_repr() {
2339                        Data(ref data) => data.poly_tag(),
2340                        _ => {
2341                            return Err(Error::Message(
2342                                "Op TestTag called on non data type".to_string(),
2343                            ))
2344                            .into();
2345                        }
2346                    };
2347                    debug_assert!(
2348                        data_tag.is_some(),
2349                        "ICE: Polymorphic match on non-polymorphic variant {:#?}\n{:p}",
2350                        self.stack.top(),
2351                        match self.stack.top().get_repr() {
2352                            Data(ref data) => &**data,
2353                            _ => unreachable!(),
2354                        }
2355                    );
2356
2357                    let value = ValueRepr::Tag(if data_tag == Some(expected_tag) { 1 } else { 0 });
2358                    self.stack.push(value);
2359                }
2360                Split => {
2361                    match self.stack.pop().get_repr() {
2362                        Data(data) => {
2363                            self.stack.extend(&data.fields);
2364                        }
2365                        // Zero argument variant
2366                        ValueRepr::Tag(_) => (),
2367                        _ => {
2368                            return Err(Error::Message(
2369                                "Op Split called on non data type".to_string(),
2370                            ))
2371                            .into();
2372                        }
2373                    }
2374                }
2375                Jump(i) => {
2376                    program_counter.jump(i as usize);
2377                    continue;
2378                }
2379                CJump(i) => match self.stack.pop().get_repr() {
2380                    ValueRepr::Tag(0) => (),
2381                    _ => {
2382                        program_counter.jump(i as usize);
2383                        continue;
2384                    }
2385                },
2386                Pop(n) => self.stack.pop_many(n),
2387                Slide(n) => {
2388                    trace!("{:?}", &self.stack[..]);
2389                    self.stack.slide(n);
2390                }
2391                MakeClosure {
2392                    function_index,
2393                    upvars,
2394                } => {
2395                    let closure = {
2396                        let args = &self.stack[self.stack.len() - upvars..];
2397                        let func = &function.inner_functions[function_index as usize];
2398                        Variants::from(alloc(
2399                            &mut self.gc,
2400                            self.thread,
2401                            &self.stack.stack(),
2402                            ClosureDataDef(func, args.iter()),
2403                        )?)
2404                    };
2405                    self.stack.pop_many(upvars);
2406                    self.stack.push(closure);
2407                }
2408                NewClosure {
2409                    function_index,
2410                    upvars,
2411                } => {
2412                    let closure = {
2413                        // Use dummy variables until it is filled
2414                        let func = &function.inner_functions[function_index as usize];
2415                        Variants::from(alloc(
2416                            &mut self.gc,
2417                            self.thread,
2418                            &self.stack.stack(),
2419                            construct_gc!(ClosureInitDef(@func, upvars as usize)),
2420                        )?)
2421                    };
2422                    self.stack.push(closure);
2423                }
2424                CloseClosure(n) => {
2425                    let i = self.stack.len() - n - 1;
2426                    match self.stack[i].get_repr() {
2427                        Closure(closure) => {
2428                            // Unique access should be safe as this closure should not be shared as
2429                            // it has just been allocated and havent even had its upvars set yet
2430                            // (which is done here).
2431                            unsafe {
2432                                let mut closure = closure.clone_unrooted();
2433                                let start = self.stack.len() - closure.upvars.len() as VmIndex;
2434                                for (var, value) in
2435                                    closure.as_mut().upvars.iter_mut().zip(&self.stack[start..])
2436                                {
2437                                    *var = value.clone_unrooted();
2438                                }
2439                            }
2440                            let pop = closure.upvars.len() as VmIndex + 1;
2441                            self.stack.pop_many(pop); //Remove the closure
2442                        }
2443                        x => ice!("Expected closure, got {:?}", x),
2444                    }
2445                }
2446                PushUpVar(i) => {
2447                    let v = transfer!(self, self.stack.get_upvar(i).get_value());
2448                    self.stack.push(v);
2449                }
2450                AddInt => binop_int(self.thread, &mut self.stack, VmInt::checked_add)?,
2451                SubtractInt => binop_int(self.thread, &mut self.stack, VmInt::checked_sub)?,
2452                MultiplyInt => binop_int(self.thread, &mut self.stack, VmInt::checked_mul)?,
2453                DivideInt => binop_int(self.thread, &mut self.stack, VmInt::checked_div)?,
2454                IntLT => binop_bool(self.thread, &mut self.stack, |l: VmInt, r| l < r)?,
2455                IntEQ => binop_bool(self.thread, &mut self.stack, |l: VmInt, r| l == r)?,
2456
2457                AddByte => binop_byte(self.thread, &mut self.stack, u8::checked_add)?,
2458                SubtractByte => binop_byte(self.thread, &mut self.stack, u8::checked_sub)?,
2459                MultiplyByte => binop_byte(self.thread, &mut self.stack, u8::checked_mul)?,
2460                DivideByte => binop_byte(self.thread, &mut self.stack, u8::checked_div)?,
2461                ByteLT => binop_bool(self.thread, &mut self.stack, |l: u8, r| l < r)?,
2462                ByteEQ => binop_bool(self.thread, &mut self.stack, |l: u8, r| l == r)?,
2463
2464                AddFloat => binop_f64(self.thread, &mut self.stack, f64::add)?,
2465                SubtractFloat => binop_f64(self.thread, &mut self.stack, f64::sub)?,
2466                MultiplyFloat => binop_f64(self.thread, &mut self.stack, f64::mul)?,
2467                DivideFloat => binop_f64(self.thread, &mut self.stack, f64::div)?,
2468                FloatLT => binop_bool(self.thread, &mut self.stack, |l: f64, r| l < r)?,
2469                FloatEQ => binop_bool(self.thread, &mut self.stack, |l: f64, r| l == r)?,
2470
2471                Return => {
2472                    drop(program_counter);
2473                    break;
2474                }
2475            }
2476        }
2477        let len = self.stack.len();
2478        let frame_has_excess = self.stack.frame().excess;
2479
2480        // We might not get access to the frame above the current as it could be locked
2481        debug_assert!(
2482            self.stack.frame().state.closure.function.name == function.name,
2483            "Attempted to pop {:?} but `{}` was expected",
2484            self.stack.frame().state,
2485            function.name
2486        );
2487        let (stack_exists, mut context) = {
2488            let r = self.exit_scope();
2489            (
2490                r.is_ok(),
2491                match r {
2492                    Ok(context) => context,
2493                    Err(context) => context,
2494                },
2495            )
2496        };
2497        debug!("Return {} {:?}", function.name, context.stack.top());
2498
2499        context.stack.slide(len);
2500        if frame_has_excess {
2501            // If the function that just finished had extra arguments we need to call the result of
2502            // the call with the extra arguments
2503            match transfer!(context, &context.stack[context.stack.len() - 2]).get_repr() {
2504                Data(excess) => {
2505                    trace!("Push excess args {:?}", &excess.fields);
2506                    context.stack.slide(1);
2507                    context.stack.extend(&excess.fields);
2508                    let excess_fields_len = excess.fields.len() as VmIndex;
2509                    Poll::Ready(context.do_call(excess_fields_len).map(|x| Some(x)))
2510                }
2511                x => ice!("Expected excess arguments found {:?}", x),
2512            }
2513        } else {
2514            Poll::Ready(Ok(if stack_exists { Some(context) } else { None }))
2515        }
2516    }
2517
2518    fn run_hook(&mut self, function: &BytecodeFunction, index: usize) -> Poll<Result<()>> {
2519        if let Some(ref mut hook) = self.hook.function {
2520            let current_line = function.debug_info.source_map.line(index);
2521            let previous_line = function
2522                .debug_info
2523                .source_map
2524                .line(self.hook.previous_instruction_index);
2525            self.hook.previous_instruction_index = index;
2526            if current_line != previous_line {
2527                self.stack.frame_mut().state.instruction_index = index;
2528                let info = DebugInfo {
2529                    stack: &self.stack.stack(),
2530                    state: HookFlags::LINE_FLAG,
2531                };
2532                ready!(hook(self.thread, info))?
2533            }
2534        }
2535        Ok(()).into()
2536    }
2537}
2538
2539impl<'b, 'gc> ExecuteContext<'b, 'gc, State> {
2540    fn from_state<T>(self) -> ExecuteContext<'b, 'gc, T>
2541    where
2542        T: StackState,
2543    {
2544        ExecuteContext {
2545            thread: self.thread,
2546            stack: self.stack.from_state(),
2547            gc: self.gc,
2548            hook: self.hook,
2549            poll_fns: self.poll_fns,
2550        }
2551    }
2552}
2553
2554impl<'b, 'gc, S> ExecuteContext<'b, 'gc, S>
2555where
2556    S: StackState,
2557{
2558    fn to_state(self) -> ExecuteContext<'b, 'gc, State> {
2559        ExecuteContext {
2560            thread: self.thread,
2561            stack: self.stack.to_state(),
2562            gc: self.gc,
2563            hook: self.hook,
2564            poll_fns: self.poll_fns,
2565        }
2566    }
2567
2568    fn enter_scope<T>(
2569        self,
2570        args: VmIndex,
2571        state: &T,
2572        excess: bool,
2573    ) -> Result<ExecuteContext<'b, 'gc, T>>
2574    where
2575        T: StackState,
2576    {
2577        let stack = self.stack.enter_scope_excess(args, state, excess)?;
2578        self.hook.previous_instruction_index = usize::max_value();
2579        Ok(ExecuteContext {
2580            thread: self.thread,
2581            stack,
2582            gc: self.gc,
2583            hook: self.hook,
2584            poll_fns: self.poll_fns,
2585        })
2586    }
2587
2588    fn exit_scope(
2589        self,
2590    ) -> StdResult<ExecuteContext<'b, 'gc, State>, ExecuteContext<'b, 'gc, State>> {
2591        match self.stack.exit_scope() {
2592            Ok(stack) => {
2593                if self.hook.flags.bits() != 0 {
2594                    // Subtract 1 to compensate for the `Call` instruction adding one earlier
2595                    // ensuring that the line hook runs after function calls
2596                    if let State::Closure(ref state) = stack.frame().state {
2597                        self.hook.previous_instruction_index =
2598                            state.instruction_index.saturating_sub(1);
2599                    }
2600                }
2601                Ok(ExecuteContext {
2602                    thread: self.thread,
2603                    stack,
2604                    gc: self.gc,
2605                    hook: self.hook,
2606                    poll_fns: self.poll_fns,
2607                })
2608            }
2609            Err(stack) => Err(ExecuteContext {
2610                thread: self.thread,
2611                stack: StackFrame::current(stack),
2612                gc: self.gc,
2613                hook: self.hook,
2614                poll_fns: self.poll_fns,
2615            }),
2616        }
2617    }
2618
2619    fn enter_closure(
2620        self,
2621        closure: &GcPtr<ClosureData>,
2622        excess: bool,
2623    ) -> Result<ExecuteContext<'b, 'gc, State>> {
2624        info!("Call {} {:?}", closure.function.name, &self.stack[..]);
2625        Ok(self
2626            .enter_scope(
2627                closure.function.args,
2628                &*construct_gc!(ClosureState {
2629                    @closure,
2630                    instruction_index: 0,
2631                }),
2632                excess,
2633            )?
2634            .to_state())
2635    }
2636
2637    fn enter_extern(
2638        self,
2639        ext: &GcPtr<ExternFunction>,
2640        excess: bool,
2641    ) -> Result<ExecuteContext<'b, 'gc, State>> {
2642        assert!(self.stack.len() >= ext.args + 1);
2643        info!("Call {} {:?}", ext.id, &self.stack[..]);
2644        Ok(self
2645            .enter_scope(ext.args, &*ExternState::new(ext), excess)?
2646            .to_state())
2647    }
2648
2649    fn call_function_with_upvars(
2650        mut self,
2651        args: VmIndex,
2652        required_args: VmIndex,
2653        callable: &Callable,
2654        enter_scope: impl FnOnce(Self, bool) -> Result<ExecuteContext<'b, 'gc, State>>,
2655    ) -> Result<ExecuteContext<'b, 'gc, State>> {
2656        trace!("cmp {} {} {:?} {:?}", args, required_args, callable, {
2657            let function_index = self.stack.len() - 1 - args;
2658            &(*self.stack)[(function_index + 1) as usize..]
2659        });
2660        match args.cmp(&required_args) {
2661            Ordering::Equal => enter_scope(self, false),
2662            Ordering::Less => {
2663                let app = {
2664                    let fields = &self.stack[self.stack.len() - args..];
2665                    let def = construct_gc!(PartialApplicationDataDef(@gc::Borrow::new(callable), fields));
2666                    Variants::from(alloc(&mut self.gc, self.thread, &self.stack.stack(), def)?)
2667                };
2668                self.stack.pop_many(args + 1);
2669                self.stack.push(app);
2670                Ok(self.to_state())
2671            }
2672            Ordering::Greater => {
2673                let excess_args = args - required_args;
2674                let d = {
2675                    let fields = &self.stack[self.stack.len() - excess_args..];
2676                    alloc(
2677                        &mut self.gc,
2678                        self.thread,
2679                        &self.stack.stack(),
2680                        Def {
2681                            tag: 0,
2682                            elems: fields,
2683                        },
2684                    )?
2685                };
2686                self.stack.pop_many(excess_args);
2687                // Insert the excess args before the actual closure so it does not get
2688                // collected
2689                let offset = self.stack.len() - required_args - 1;
2690                self.stack
2691                    .insert_slice(offset, slice::from_ref(Variants::from(d).get_value()));
2692                trace!(
2693                    "xxxxxx {:?}\n{:?}",
2694                    &(*self.stack)[..],
2695                    self.stack.stack().get_frames()
2696                );
2697                enter_scope(self, true)
2698            }
2699        }
2700    }
2701
2702    fn do_call(mut self, args: VmIndex) -> Result<ExecuteContext<'b, 'gc, State>> {
2703        let function_index = self.stack.len() - 1 - args;
2704        debug!(
2705            "Do call {:?} {:?}",
2706            self.stack[function_index],
2707            &(*self.stack)[(function_index + 1) as usize..]
2708        );
2709        // SAFETY We do not `function_index` in this scope so it remains valid
2710        let value = unsafe { self.stack[function_index].get_repr().clone_unrooted() };
2711        match &value {
2712            Closure(closure) => {
2713                let callable = construct_gc!(Callable::Closure(@gc::Borrow::new(closure)));
2714                self.call_function_with_upvars(
2715                    args,
2716                    closure.function.args,
2717                    &callable,
2718                    |self_, excess| self_.enter_closure(closure, excess),
2719                )
2720            }
2721            Function(f) => {
2722                let callable = construct_gc!(Callable::Extern(@gc::Borrow::new(f)));
2723                self.call_function_with_upvars(args, f.args, &callable, |self_, excess| {
2724                    self_.enter_extern(f, excess)
2725                })
2726            }
2727            PartialApplication(app) => {
2728                let total_args = app.args.len() as VmIndex + args;
2729                let offset = self.stack.len() - args;
2730                self.stack.insert_slice(offset, &app.args);
2731                self.call_function_with_upvars(
2732                    total_args,
2733                    app.function.args(),
2734                    &app.function,
2735                    |self_, excess| match &app.function {
2736                        Callable::Closure(closure) => self_.enter_closure(closure, excess),
2737                        Callable::Extern(f) => self_.enter_extern(f, excess),
2738                    },
2739                )
2740            }
2741            x => Err(Error::Message(format!("Cannot call {:?}", x))),
2742        }
2743    }
2744}
2745
2746#[inline(always)]
2747fn binop_int<'b, 'c, F, T>(
2748    vm: &'b Thread,
2749    stack: &'b mut StackFrame<'c, ClosureState>,
2750    f: F,
2751) -> Result<()>
2752where
2753    F: FnOnce(T, T) -> Option<VmInt>,
2754    T: for<'d, 'e> Getable<'d, 'e> + fmt::Debug,
2755{
2756    binop(vm, stack, |l, r| {
2757        Ok(ValueRepr::Int(f(l, r).ok_or_else(|| {
2758            Error::Message("Arithmetic overflow".into())
2759        })?))
2760    })
2761}
2762
2763#[inline(always)]
2764fn binop_f64<'b, 'c, F, T>(
2765    vm: &'b Thread,
2766    stack: &'b mut StackFrame<'c, ClosureState>,
2767    f: F,
2768) -> Result<()>
2769where
2770    F: FnOnce(T, T) -> f64,
2771    T: for<'d, 'e> Getable<'d, 'e> + fmt::Debug,
2772{
2773    binop(vm, stack, |l, r| Ok(ValueRepr::Float(f(l, r))))
2774}
2775
2776#[inline(always)]
2777fn binop_byte<'b, 'c, F, T>(
2778    vm: &'b Thread,
2779    stack: &'b mut StackFrame<'c, ClosureState>,
2780    f: F,
2781) -> Result<()>
2782where
2783    F: FnOnce(T, T) -> Option<u8>,
2784    T: for<'d, 'e> Getable<'d, 'e> + fmt::Debug,
2785{
2786    binop(vm, stack, |l, r| {
2787        Ok(ValueRepr::Byte(f(l, r).ok_or_else(|| {
2788            Error::Message("Arithmetic overflow".into())
2789        })?))
2790    })
2791}
2792
2793#[inline(always)]
2794fn binop_bool<'b, 'c, F, T>(
2795    vm: &'b Thread,
2796    stack: &'b mut StackFrame<'c, ClosureState>,
2797    f: F,
2798) -> Result<()>
2799where
2800    F: FnOnce(T, T) -> bool,
2801    T: for<'d, 'e> Getable<'d, 'e> + fmt::Debug,
2802{
2803    binop(vm, stack, |l, r| {
2804        Ok(ValueRepr::Tag(if f(l, r) { 1 } else { 0 }))
2805    })
2806}
2807
2808#[inline(always)]
2809fn binop<'b, 'c, F, T>(
2810    vm: &'b Thread,
2811    stack: &'b mut StackFrame<'c, ClosureState>,
2812    f: F,
2813) -> Result<()>
2814where
2815    F: FnOnce(T, T) -> Result<ValueRepr>,
2816    T: for<'d, 'e> Getable<'d, 'e> + fmt::Debug,
2817{
2818    assert!(stack.len() >= 2);
2819    let r = stack.get_value(vm, stack.len() - 1).unwrap();
2820    let l = stack.get_value(vm, stack.len() - 2).unwrap();
2821    let result = f(l, r)?;
2822    stack.pop();
2823    *stack.last_mut().unwrap() = result.into();
2824    Ok(())
2825}
2826
2827fn debug_instruction(stack: &StackFrame<ClosureState>, index: usize, instr: Instruction) {
2828    trace!(
2829        "{:?}: {:?} -> {:?} {:?}",
2830        index,
2831        instr,
2832        stack.len(),
2833        match instr {
2834            Push(i) => {
2835                let x = stack.get_variant(i);
2836                if x.is_none() {
2837                    trace!("{:?}", &stack[..])
2838                }
2839                x
2840            }
2841            PushUpVar(i) => Some(stack.get_upvar(i)),
2842            NewClosure { .. } | MakeClosure { .. } => Some(Variants::int(stack.len() as VmInt)),
2843            _ => None,
2844        }
2845    );
2846}
2847
2848pub struct ActiveThread<'vm> {
2849    thread: &'vm Thread,
2850    context: Option<MutexGuard<'vm, Context>>,
2851}
2852
2853impl<'vm> ActiveThread<'vm> {
2854    pub fn release_for<R>(&mut self, f: impl FnOnce() -> R) -> R {
2855        self.context = None;
2856        let r = f();
2857        *self = self.thread.current_context();
2858        r
2859    }
2860
2861    pub fn thread(&self) -> &'vm Thread {
2862        self.thread
2863    }
2864
2865    pub fn push<'a, T>(&mut self, v: T)
2866    where
2867        T: crate::stack::StackPrimitive,
2868    {
2869        self.context.as_mut().unwrap().stack.push(v);
2870    }
2871
2872    pub(crate) fn into_owned(self) -> OwnedContext<'vm> {
2873        OwnedContext {
2874            thread: self.thread,
2875            context: self.context.expect("context"),
2876        }
2877    }
2878
2879    pub fn pop<'a>(&'a mut self) -> PopValue<'a> {
2880        self.context.as_mut().unwrap().stack.pop_value()
2881    }
2882
2883    pub(crate) fn last<'a>(&'a self) -> Option<Variants<'a>> {
2884        let stack = &self.context.as_ref().unwrap().stack;
2885        let last = stack.len() - 1;
2886        stack.get_variant(last)
2887    }
2888
2889    // For gluon_codegen
2890    pub fn context(&mut self) -> ExecuteContext<State> {
2891        let thread = self.thread;
2892        let context = &mut **self.context.as_mut().expect("context");
2893        ExecuteContext {
2894            thread,
2895            gc: &mut context.gc,
2896            stack: StackFrame::current(&mut context.stack),
2897            hook: &mut context.hook,
2898            poll_fns: &context.poll_fns,
2899        }
2900    }
2901
2902    // For gluon_codegen
2903    #[doc(hidden)]
2904    pub(crate) fn stack(&mut self) -> &mut Stack {
2905        &mut self.context.as_mut().unwrap().stack
2906    }
2907
2908    pub unsafe fn return_future<F>(&mut self, future: F, lock: Lock, frame_index: VmIndex)
2909    where
2910        F: Future + Send + 'vm,
2911        F::Output: Pushable<'vm>,
2912    {
2913        self.context
2914            .as_mut()
2915            .expect("context")
2916            .return_future(future, lock, frame_index)
2917    }
2918}
2919#[doc(hidden)]
2920pub fn reset_stack(mut stack: StackFrame<State>, level: usize) -> Result<crate::stack::Stacktrace> {
2921    let trace = stack.stack().stacktrace(level);
2922    while stack.stack().get_frames().len() > level {
2923        stack = match stack.exit_scope() {
2924            Ok(s) => s,
2925            Err(_) => return Err(format!("Attempted to exit scope above current").into()),
2926        };
2927    }
2928    Ok(trace)
2929}
2930
2931struct ProgramCounter<'a> {
2932    instruction_index: usize,
2933    instructions: &'a [Instruction],
2934}
2935
2936impl<'a> ProgramCounter<'a> {
2937    fn new(instruction_index: usize, instructions: &'a [Instruction]) -> Self {
2938        assert!(instruction_index < instructions.len());
2939        // As long as we end with a `Return` instruction and `step` is not called after `Return` is
2940        // we do not need to bounds check on `step`
2941        assert!(instructions.last() == Some(&Return));
2942        ProgramCounter {
2943            instruction_index,
2944            instructions,
2945        }
2946    }
2947
2948    #[inline(always)]
2949    unsafe fn instruction(&self) -> Instruction {
2950        *self.instructions.get_unchecked(self.instruction_index)
2951    }
2952
2953    #[inline(always)]
2954    fn step(&mut self) {
2955        self.instruction_index += 1;
2956    }
2957
2958    #[inline(always)]
2959    fn jump(&mut self, index: usize) {
2960        assert!(index < self.instructions.len());
2961        self.instruction_index = index;
2962    }
2963}
2964
2965#[cfg(test)]
2966mod tests {
2967    use super::*;
2968
2969    #[test]
2970    fn send_vm() {
2971        fn send<T: Send>(_: T) {}
2972        send(RootedThread::new());
2973    }
2974}