miden_processor/fast/
call_and_dyn.rs

1use alloc::{sync::Arc, vec::Vec};
2
3use miden_core::{
4    FMP_ADDR, FMP_INIT_VALUE, Program, ZERO,
5    mast::{CallNode, MastForest, MastNodeExt, MastNodeId},
6    stack::MIN_STACK_DEPTH,
7    utils::range,
8};
9
10use crate::{
11    AsyncHost, ContextId, ErrorContext, ExecutionError,
12    continuation_stack::ContinuationStack,
13    err_ctx,
14    fast::{
15        ExecutionContextInfo, FastProcessor, INITIAL_STACK_TOP_IDX, STACK_BUFFER_SIZE, Tracer,
16        trace_state::NodeExecutionState,
17    },
18};
19
20impl FastProcessor {
21    /// Executes a Call node from the start.
22    #[expect(clippy::too_many_arguments)]
23    #[inline(always)]
24    pub(super) fn start_call_node(
25        &mut self,
26        call_node: &CallNode,
27        current_node_id: MastNodeId,
28        program: &Program,
29        current_forest: &Arc<MastForest>,
30        continuation_stack: &mut ContinuationStack,
31        host: &mut impl AsyncHost,
32        tracer: &mut impl Tracer,
33    ) -> Result<(), ExecutionError> {
34        tracer.start_clock_cycle(
35            self,
36            NodeExecutionState::Start(current_node_id),
37            continuation_stack,
38            current_forest,
39        );
40
41        // Execute decorators that should be executed before entering the node
42        self.execute_before_enter_decorators(current_node_id, current_forest, host)?;
43
44        let err_ctx = err_ctx!(current_forest, call_node, host);
45
46        let callee_hash = current_forest
47            .get_node_by_id(call_node.callee())
48            .ok_or(ExecutionError::MastNodeNotFoundInForest { node_id: call_node.callee() })?
49            .digest();
50
51        self.save_context_and_truncate_stack(tracer);
52
53        if call_node.is_syscall() {
54            // check if the callee is in the kernel
55            if !program.kernel().contains_proc(callee_hash) {
56                return Err(ExecutionError::syscall_target_not_in_kernel(callee_hash, &err_ctx));
57            }
58            tracer.record_kernel_proc_access(callee_hash);
59
60            // set the system registers to the syscall context
61            self.ctx = ContextId::root();
62        } else {
63            let new_ctx: ContextId = self.get_next_ctx_id();
64
65            // Set the system registers to the callee context.
66            self.ctx = new_ctx;
67            self.caller_hash = callee_hash;
68
69            // Initialize the frame pointer in memory for the new context.
70            self.memory
71                .write_element(new_ctx, FMP_ADDR, FMP_INIT_VALUE, &err_ctx)
72                .map_err(ExecutionError::MemoryError)?;
73            tracer.record_memory_write_element(FMP_INIT_VALUE, FMP_ADDR, new_ctx, self.clk);
74        }
75
76        // push the callee onto the continuation stack, and increment the clock (corresponding to
77        // the row inserted for the CALL or SYSCALL operation added to the trace).
78        continuation_stack.push_finish_call(current_node_id);
79        continuation_stack.push_start_node(call_node.callee());
80
81        // Corresponds to the CALL or SYSCALL operation added to the trace.
82        self.increment_clk(tracer);
83
84        Ok(())
85    }
86
87    /// Executes the finish phase of a Call node.
88    #[inline(always)]
89    pub(super) fn finish_call_node(
90        &mut self,
91        node_id: MastNodeId,
92        current_forest: &Arc<MastForest>,
93        continuation_stack: &mut ContinuationStack,
94        host: &mut impl AsyncHost,
95        tracer: &mut impl Tracer,
96    ) -> Result<(), ExecutionError> {
97        tracer.start_clock_cycle(
98            self,
99            NodeExecutionState::End(node_id),
100            continuation_stack,
101            current_forest,
102        );
103
104        // When the `no_err_ctx` feature is enabled, the err_ctx! macro expands to `()`
105        // and doesn't use its parameters. In this case, _call_node would be unused,
106        // so we prefix it with underscore to indicate this intentional unused state
107        // and suppress warnings in feature combinations that include `no_err_ctx`.
108        let _call_node = current_forest[node_id].unwrap_call();
109        let err_ctx = err_ctx!(current_forest, _call_node, host);
110        // when returning from a function call or a syscall, restore the
111        // context of the
112        // system registers and the operand stack to what it was prior
113        // to the call.
114        self.restore_context(tracer, &err_ctx)?;
115
116        // Corresponds to the row inserted for the END operation added to the trace.
117        self.increment_clk(tracer);
118        self.execute_after_exit_decorators(node_id, current_forest, host)
119    }
120
121    /// Executes a Dyn node from the start.
122    #[inline(always)]
123    pub(super) async fn start_dyn_node(
124        &mut self,
125        current_node_id: MastNodeId,
126        current_forest: &mut Arc<MastForest>,
127        continuation_stack: &mut ContinuationStack,
128        host: &mut impl AsyncHost,
129        tracer: &mut impl Tracer,
130    ) -> Result<(), ExecutionError> {
131        tracer.start_clock_cycle(
132            self,
133            NodeExecutionState::Start(current_node_id),
134            continuation_stack,
135            current_forest,
136        );
137
138        // Execute decorators that should be executed before entering the node
139        self.execute_before_enter_decorators(current_node_id, current_forest, host)?;
140
141        // Corresponds to the row inserted for the DYN or DYNCALL operation
142        // added to the trace.
143        let dyn_node = current_forest[current_node_id].unwrap_dyn();
144
145        let err_ctx = err_ctx!(&current_forest, dyn_node, host);
146
147        // Retrieve callee hash from memory, using stack top as the memory
148        // address.
149        let callee_hash = {
150            let mem_addr = self.stack_get(0);
151            let word = self
152                .memory
153                .read_word(self.ctx, mem_addr, self.clk, &err_ctx)
154                .map_err(ExecutionError::MemoryError)?;
155            tracer.record_memory_read_word(word, mem_addr, self.ctx, self.clk);
156
157            word
158        };
159
160        // Drop the memory address from the stack. This needs to be done before saving the context.
161        self.decrement_stack_size(tracer);
162
163        // For dyncall,
164        // - save the context and reset it,
165        // - initialize the frame pointer in memory for the new context.
166        if dyn_node.is_dyncall() {
167            let new_ctx: ContextId = self.get_next_ctx_id();
168
169            // Save the current state, and update the system registers.
170            self.save_context_and_truncate_stack(tracer);
171
172            self.ctx = new_ctx;
173            self.caller_hash = callee_hash;
174
175            // Initialize the frame pointer in memory for the new context.
176            self.memory
177                .write_element(new_ctx, FMP_ADDR, FMP_INIT_VALUE, &err_ctx)
178                .map_err(ExecutionError::MemoryError)?;
179            tracer.record_memory_write_element(FMP_INIT_VALUE, FMP_ADDR, new_ctx, self.clk);
180        };
181
182        // Update continuation stack
183        // -----------------------------
184        continuation_stack.push_finish_dyn(current_node_id);
185
186        // if the callee is not in the program's MAST forest, try to find a MAST forest for it in
187        // the host (corresponding to an external library loaded in the host); if none are found,
188        // return an error.
189        match current_forest.find_procedure_root(callee_hash) {
190            Some(callee_id) => {
191                continuation_stack.push_start_node(callee_id);
192            },
193            None => {
194                let (root_id, new_forest) = self
195                    .load_mast_forest(
196                        callee_hash,
197                        host,
198                        ExecutionError::dynamic_node_not_found,
199                        &err_ctx,
200                    )
201                    .await?;
202                tracer.record_mast_forest_resolution(root_id, &new_forest);
203
204                // Push current forest to the continuation stack so that we can return to it
205                continuation_stack.push_enter_forest(current_forest.clone());
206
207                // Push the root node of the external MAST forest onto the continuation stack.
208                continuation_stack.push_start_node(root_id);
209
210                // Set the new MAST forest as current
211                *current_forest = new_forest;
212            },
213        }
214
215        // Increment the clock, corresponding to the row inserted for the DYN or DYNCALL operation
216        // added to the trace.
217        self.increment_clk(tracer);
218
219        Ok(())
220    }
221
222    /// Executes the finish phase of a Dyn node.
223    #[inline(always)]
224    pub(super) fn finish_dyn_node(
225        &mut self,
226        node_id: MastNodeId,
227        current_forest: &Arc<MastForest>,
228        continuation_stack: &mut ContinuationStack,
229        host: &mut impl AsyncHost,
230        tracer: &mut impl Tracer,
231    ) -> Result<(), ExecutionError> {
232        tracer.start_clock_cycle(
233            self,
234            NodeExecutionState::End(node_id),
235            continuation_stack,
236            current_forest,
237        );
238
239        let dyn_node = current_forest[node_id].unwrap_dyn();
240        let err_ctx = err_ctx!(current_forest, dyn_node, host);
241        // For dyncall, restore the context.
242        if dyn_node.is_dyncall() {
243            self.restore_context(tracer, &err_ctx)?;
244        }
245
246        // Corresponds to the row inserted for the END operation added to
247        // the trace.
248        self.increment_clk(tracer);
249        self.execute_after_exit_decorators(node_id, current_forest, host)
250    }
251
252    // HELPERS
253    // ----------------------------------------------------------------------------------------------
254
255    /// Returns the next context ID that would be created given the current state.
256    ///
257    /// Note: This only applies to the context created upon a `CALL` or `DYNCALL` operation;
258    /// specifically the `SYSCALL` operation doesn't apply as it always goes back to the root
259    /// context.
260    pub fn get_next_ctx_id(&self) -> ContextId {
261        (self.clk + 1).into()
262    }
263
264    /// Saves the current execution context and truncates the stack to 16 elements in preparation to
265    /// start a new execution context.
266    fn save_context_and_truncate_stack(&mut self, tracer: &mut impl Tracer) {
267        let overflow_stack = if self.stack_size() > MIN_STACK_DEPTH {
268            // save the overflow stack, and zero out the buffer.
269            //
270            // Note: we need to zero the overflow buffer, since the new context expects ZERO's to be
271            // pulled in if they decrement the stack size (e.g. by executing a `drop`).
272            let overflow_stack =
273                self.stack[self.stack_bot_idx..self.stack_top_idx - MIN_STACK_DEPTH].to_vec();
274            self.stack[self.stack_bot_idx..self.stack_top_idx - MIN_STACK_DEPTH].fill(ZERO);
275
276            overflow_stack
277        } else {
278            Vec::new()
279        };
280
281        self.stack_bot_idx = self.stack_top_idx - MIN_STACK_DEPTH;
282
283        self.call_stack.push(ExecutionContextInfo {
284            overflow_stack,
285            ctx: self.ctx,
286            fn_hash: self.caller_hash,
287        });
288
289        tracer.start_context();
290    }
291
292    /// Restores the execution context to the state it was in before the last `call`, `syscall` or
293    /// `dyncall`.
294    ///
295    /// This includes restoring the overflow stack and the system parameters.
296    ///
297    /// # Errors
298    /// - Returns an error if the overflow stack is larger than the space available in the stack
299    ///   buffer.
300    fn restore_context(
301        &mut self,
302        tracer: &mut impl Tracer,
303        err_ctx: &impl ErrorContext,
304    ) -> Result<(), ExecutionError> {
305        // when a call/dyncall/syscall node ends, stack depth must be exactly 16.
306        if self.stack_size() > MIN_STACK_DEPTH {
307            return Err(ExecutionError::invalid_stack_depth_on_return(self.stack_size(), err_ctx));
308        }
309
310        let ctx_info = self
311            .call_stack
312            .pop()
313            .expect("execution context stack should never be empty when restoring context");
314
315        // restore the overflow stack
316        self.restore_overflow_stack(&ctx_info);
317
318        // restore system parameters
319        self.ctx = ctx_info.ctx;
320        self.caller_hash = ctx_info.fn_hash;
321
322        tracer.restore_context();
323
324        Ok(())
325    }
326
327    /// Restores the overflow stack from a previous context.
328    ///
329    /// If necessary, moves the stack in the buffer to make room for the overflow stack to be
330    /// restored.
331    ///
332    /// # Preconditions
333    /// - The current stack depth is exactly `MIN_STACK_DEPTH` (16).
334    #[inline(always)]
335    fn restore_overflow_stack(&mut self, ctx_info: &ExecutionContextInfo) {
336        let target_overflow_len = ctx_info.overflow_stack.len();
337
338        // Check if there's enough room to restore the overflow stack in the current stack buffer.
339        if target_overflow_len > self.stack_bot_idx {
340            // There's not enough room to restore the overflow stack, so we have to move the
341            // location of the stack in the buffer. We reset it so that after restoring the overflow
342            // stack, the stack_bot_idx is at its original position (i.e. INITIAL_STACK_TOP_IDX -
343            // 16).
344            let new_stack_top_idx =
345                core::cmp::min(INITIAL_STACK_TOP_IDX + target_overflow_len, STACK_BUFFER_SIZE - 1);
346
347            self.reset_stack_in_buffer(new_stack_top_idx);
348        }
349
350        // Restore the overflow
351        self.stack[range(self.stack_bot_idx - target_overflow_len, target_overflow_len)]
352            .copy_from_slice(&ctx_info.overflow_stack);
353        self.stack_bot_idx -= target_overflow_len;
354    }
355}