Skip to main content

shape_vm/executor/vm_impl/
init.rs

1use super::super::*;
2
3impl VirtualMachine {
4    pub fn new(config: VMConfig) -> Self {
5        let debugger = if config.debug_mode {
6            Some(VMDebugger::new())
7        } else {
8            None
9        };
10
11        let gc = GarbageCollector::new(config.gc_config.clone());
12
13        // Initialize builtin schema IDs (overwritten from loaded bytecode registry
14        // in `load_program`).
15        let (registry, builtin_schemas) =
16            shape_runtime::type_schema::TypeSchemaRegistry::with_stdlib_types_and_builtin_ids();
17
18        let mut program = BytecodeProgram::new();
19        program.type_schema_registry = registry;
20
21        let mut vm = Self {
22            config,
23            program,
24            ip: 0,
25            stack: (0..crate::constants::DEFAULT_STACK_CAPACITY)
26                .map(|_| ValueWord::none())
27                .collect(),
28            sp: 0,
29            module_bindings: Vec::new(),
30            call_stack: Vec::with_capacity(crate::constants::DEFAULT_CALL_STACK_CAPACITY),
31            loop_stack: Vec::new(),
32            timeframe_stack: Vec::new(),
33            debugger,
34            gc,
35            instruction_count: 0,
36            exception_handlers: Vec::new(),
37            builtin_schemas,
38            last_error_line: None,
39            last_error_file: None,
40            last_uncaught_exception: None,
41            module_init_done: false,
42            output_buffer: None,
43            module_registry: shape_runtime::module_exports::ModuleExportRegistry::new(),
44            module_fn_table: Vec::new(),
45            function_name_index: HashMap::new(),
46            extension_methods: HashMap::new(),
47            merged_schema_cache: HashMap::new(),
48            interrupt: Arc::new(AtomicU8::new(0)),
49            future_id_counter: 0,
50            async_scope_stack: Vec::new(),
51            task_scheduler: task_scheduler::TaskScheduler::new(),
52            foreign_fn_handles: Vec::new(),
53            function_hashes: Vec::new(),
54            function_hash_raw: Vec::new(),
55            function_id_by_hash: HashMap::new(),
56            function_entry_points: Vec::new(),
57            program_entry_ip: 0,
58            resource_usage: None,
59            time_travel: None,
60            #[cfg(feature = "gc")]
61            gc_heap: None,
62            #[cfg(feature = "jit")]
63            jit_compiled: false,
64            #[cfg(feature = "jit")]
65            jit_dispatch_table: std::collections::HashMap::new(),
66            tier_manager: None,
67            pending_resume: None,
68            pending_frame_resume: None,
69            metrics: None,
70            feedback_vectors: Vec::new(),
71            megamorphic_cache: crate::megamorphic_cache::MegamorphicCache::new(),
72        };
73
74        // VM-native stdlib modules are always available, independent of
75        // user-installed extension plugins.
76        // VM-side modules (state, transport, remote) live in shape-vm.
77        vm.register_stdlib_module(state_builtins::create_state_module());
78        vm.register_stdlib_module(create_transport_module_exports());
79        vm.register_stdlib_module(create_remote_module_exports());
80        // shape-runtime canonical registry covers all non-VM modules.
81        for module in shape_runtime::stdlib::all_stdlib_modules() {
82            vm.register_stdlib_module(module);
83        }
84
85        // Initialise metrics collector when requested.
86        if vm.config.metrics_enabled {
87            vm.metrics = Some(crate::metrics::VmMetrics::new());
88        }
89
90        // Auto-initialise the tracing GC heap when requested.
91        #[cfg(feature = "gc")]
92        if vm.config.use_tracing_gc {
93            vm.init_gc_heap();
94        }
95
96        vm
97    }
98
99    /// Attach resource limits to this VM. The dispatch loop will enforce them.
100    pub fn with_resource_limits(mut self, limits: crate::resource_limits::ResourceLimits) -> Self {
101        let mut usage = crate::resource_limits::ResourceUsage::new(limits);
102        usage.start();
103        self.resource_usage = Some(usage);
104        self
105    }
106
107    /// Initialize the GC heap for this VM instance (gc feature only).
108    ///
109    /// Sets up the GcHeap and registers it as the thread-local heap so
110    /// ValueWord::heap_box() and ValueSlot::from_heap() can allocate through it.
111    /// Also configures the GC threshold from the VM's GCConfig.
112    #[cfg(feature = "gc")]
113    pub fn init_gc_heap(&mut self) {
114        let heap = shape_gc::GcHeap::new();
115        self.gc_heap = Some(heap);
116        // Set thread-local GC heap pointer AFTER the move into self.gc_heap
117        // so the pointer remains valid for the VM's lifetime.
118        if let Some(ref mut heap) = self.gc_heap {
119            unsafe { shape_gc::set_thread_gc_heap(heap as *mut _) };
120        }
121    }
122
123    /// Set the interrupt flag (shared with Ctrl+C handler).
124    pub fn set_interrupt(&mut self, flag: Arc<AtomicU8>) {
125        self.interrupt = flag;
126    }
127
128    /// Enable time-travel debugging with the given capture mode and history limit.
129    pub fn enable_time_travel(&mut self, mode: time_travel::CaptureMode, max_entries: usize) {
130        self.time_travel = Some(time_travel::TimeTravel::new(mode, max_entries));
131    }
132
133    /// Disable time-travel debugging and discard history.
134    pub fn disable_time_travel(&mut self) {
135        self.time_travel = None;
136    }
137
138    /// Mark this VM as having been JIT-compiled selectively.
139    ///
140    /// Call this after using `shape_jit::JITCompiler::compile_program_selective`
141    /// externally to JIT-compile functions that benefit from native execution.
142    /// The caller is responsible for performing the compilation via `shape-jit`
143    /// (which depends on `shape-vm`, so the dependency flows one way).
144    ///
145    /// # Example (in a crate that depends on both `shape-vm` and `shape-jit`):
146    ///
147    /// ```ignore
148    /// let mut compiler = shape_jit::JITCompiler::new()?;
149    /// let (_jitted_fn, _table) = compiler.compile_program_selective("main", vm.program())?;
150    /// vm.set_jit_compiled();
151    /// ```
152    #[cfg(feature = "jit")]
153    pub fn set_jit_compiled(&mut self) {
154        self.jit_compiled = true;
155    }
156
157    /// Returns whether selective JIT compilation has been applied to this VM.
158    #[cfg(feature = "jit")]
159    pub fn is_jit_compiled(&self) -> bool {
160        self.jit_compiled
161    }
162
163    /// Register a JIT-compiled function in the dispatch table.
164    ///
165    /// After registration, calls to this function_id will attempt JIT dispatch
166    /// before falling back to bytecode interpretation.
167    #[cfg(feature = "jit")]
168    pub fn register_jit_function(&mut self, function_id: u16, ptr: JitFnPtr) {
169        self.jit_dispatch_table.insert(function_id, ptr);
170        self.jit_compiled = true;
171    }
172
173    /// Get the JIT dispatch table for inspection or external use.
174    #[cfg(feature = "jit")]
175    pub fn jit_dispatch_table(&self) -> &std::collections::HashMap<u16, JitFnPtr> {
176        &self.jit_dispatch_table
177    }
178
179    /// Enable tiered compilation for this VM.
180    ///
181    /// Must be called after `load_program()` so the function count is known.
182    /// The caller is responsible for spawning a background compilation thread
183    /// that reads from the request channel and sends results back.
184    ///
185    /// Returns `(request_rx, result_tx)` that the background thread should use.
186    pub fn enable_tiered_compilation(
187        &mut self,
188    ) -> (
189        std::sync::mpsc::Receiver<crate::tier::CompilationRequest>,
190        std::sync::mpsc::Sender<crate::tier::CompilationResult>,
191    ) {
192        let function_count = self.program.functions.len();
193        let mut mgr = crate::tier::TierManager::new(function_count, true);
194
195        let (req_tx, req_rx) = std::sync::mpsc::channel();
196        let (res_tx, res_rx) = std::sync::mpsc::channel();
197        mgr.set_channels(req_tx, res_rx);
198
199        self.tier_manager = Some(mgr);
200        (req_rx, res_tx)
201    }
202
203    /// Get a reference to the tier manager, if tiered compilation is enabled.
204    pub fn tier_manager(&self) -> Option<&crate::tier::TierManager> {
205        self.tier_manager.as_ref()
206    }
207
208    /// Poll the tier manager for completed background JIT compilations.
209    ///
210    /// Completed compilations are applied by `TierManager::poll_completions()`,
211    /// which updates its internal `native_code_table`. The JIT dispatch fast
212    /// path in `op_call` reads from `tier_mgr.get_native_code()`.
213    ///
214    /// Called every 1024 instructions from the dispatch loop (same cadence as
215    /// interrupt and GC safepoint checks).
216    pub(crate) fn poll_tier_completions(&mut self) {
217        if let Some(ref mut tier_mgr) = self.tier_manager {
218            // poll_completions() reads from the compilation_rx channel and
219            // updates native_code_table internally.
220            let completions = tier_mgr.poll_completions();
221
222            // Record tier transition events in metrics if enabled.
223            if let Some(ref mut metrics) = self.metrics {
224                for result in &completions {
225                    if result.native_code.is_some() {
226                        let from_tier = match result.compiled_tier {
227                            crate::tier::Tier::BaselineJit => 0,   // was Interpreted
228                            crate::tier::Tier::OptimizingJit => 1, // was BaselineJit
229                            crate::tier::Tier::Interpreted => continue,
230                        };
231                        let to_tier = match result.compiled_tier {
232                            crate::tier::Tier::BaselineJit => 1,
233                            crate::tier::Tier::OptimizingJit => 2,
234                            crate::tier::Tier::Interpreted => continue,
235                        };
236                        metrics.record_tier_event(crate::metrics::TierEvent {
237                            function_id: result.function_id,
238                            from_tier,
239                            to_tier,
240                            call_count: tier_mgr.get_call_count(result.function_id),
241                            timestamp_us: metrics.elapsed_us(),
242                        });
243                    }
244                }
245            }
246        }
247    }
248
249    /// Get or create a feedback vector for the current function.
250    /// Returns None if tiered compilation is disabled.
251    #[inline]
252    pub(crate) fn current_feedback_vector(
253        &mut self,
254    ) -> Option<&mut crate::feedback::FeedbackVector> {
255        let func_id = self.call_stack.last()?.function_id? as usize;
256        if func_id >= self.feedback_vectors.len() {
257            return None;
258        }
259        if self.feedback_vectors[func_id].is_none() {
260            if self.tier_manager.is_none() {
261                return None;
262            }
263            self.feedback_vectors[func_id] =
264                Some(crate::feedback::FeedbackVector::new(func_id as u16));
265        }
266        self.feedback_vectors[func_id].as_mut()
267    }
268
269    /// Access the feedback vectors (for JIT compilation).
270    pub fn feedback_vectors(&self) -> &[Option<crate::feedback::FeedbackVector>] {
271        &self.feedback_vectors
272    }
273
274    /// Get a reference to the loaded program (for external JIT compilation).
275    pub fn program(&self) -> &BytecodeProgram {
276        &self.program
277    }
278
279    /// Get a reference to the time-travel debugger, if enabled.
280    pub fn time_travel(&self) -> Option<&time_travel::TimeTravel> {
281        self.time_travel.as_ref()
282    }
283
284    /// Get a mutable reference to the time-travel debugger, if enabled.
285    pub fn time_travel_mut(&mut self) -> Option<&mut time_travel::TimeTravel> {
286        self.time_travel.as_mut()
287    }
288
289    /// Get a reference to the extension module registry.
290    pub fn module_registry(&self) -> &shape_runtime::module_exports::ModuleExportRegistry {
291        &self.module_registry
292    }
293
294    /// Generate a unique future ID for spawned async tasks
295    pub(crate) fn next_future_id(&mut self) -> u64 {
296        self.future_id_counter += 1;
297        self.future_id_counter
298    }
299
300    /// Get function ID for fast repeated calls (avoids name lookup in hot loops)
301    pub fn get_function_id(&self, name: &str) -> Option<u16> {
302        self.program
303            .functions
304            .iter()
305            .position(|f| f.name == name)
306            .map(|id| id as u16)
307    }
308}