miden_processor/fast/
call_and_dyn.rs

1use alloc::{sync::Arc, vec::Vec};
2
3use miden_core::{
4    FMP_ADDR, FMP_INIT_VALUE, Program, ZERO,
5    mast::{CallNode, MastForest, MastNodeExt, MastNodeId},
6    stack::MIN_STACK_DEPTH,
7    utils::range,
8};
9
10use crate::{
11    AsyncHost, ContextId, ErrorContext, ExecutionError,
12    continuation_stack::ContinuationStack,
13    err_ctx,
14    fast::{
15        ExecutionContextInfo, FastProcessor, INITIAL_STACK_TOP_IDX, STACK_BUFFER_SIZE, Tracer,
16        trace_state::NodeExecutionState,
17    },
18};
19
20impl FastProcessor {
21    /// Executes a Call node from the start.
22    #[allow(clippy::too_many_arguments)]
23    #[inline(always)]
24    pub(super) fn start_call_node(
25        &mut self,
26        call_node: &CallNode,
27        current_node_id: MastNodeId,
28        program: &Program,
29        current_forest: &Arc<MastForest>,
30        continuation_stack: &mut ContinuationStack,
31        host: &mut impl AsyncHost,
32        tracer: &mut impl Tracer,
33    ) -> Result<(), ExecutionError> {
34        tracer.start_clock_cycle(
35            self,
36            NodeExecutionState::Start(current_node_id),
37            continuation_stack,
38            current_forest,
39        );
40
41        // Execute decorators that should be executed before entering the node
42        self.execute_before_enter_decorators(current_node_id, current_forest, host)?;
43
44        let err_ctx = err_ctx!(current_forest, call_node, host);
45
46        let callee_hash = current_forest
47            .get_node_by_id(call_node.callee())
48            .ok_or(ExecutionError::MastNodeNotFoundInForest { node_id: call_node.callee() })?
49            .digest();
50
51        self.save_context_and_truncate_stack(tracer);
52
53        if call_node.is_syscall() {
54            // check if the callee is in the kernel
55            if !program.kernel().contains_proc(callee_hash) {
56                return Err(ExecutionError::syscall_target_not_in_kernel(callee_hash, &err_ctx));
57            }
58            tracer.record_kernel_proc_access(callee_hash);
59
60            // set the system registers to the syscall context
61            self.ctx = ContextId::root();
62        } else {
63            let new_ctx: ContextId = self.get_next_ctx_id();
64
65            // Set the system registers to the callee context.
66            self.ctx = new_ctx;
67            self.caller_hash = callee_hash;
68
69            // Initialize the frame pointer in memory for the new context.
70            self.memory
71                .write_element(new_ctx, FMP_ADDR, FMP_INIT_VALUE, &err_ctx)
72                .map_err(ExecutionError::MemoryError)?;
73            tracer.record_memory_write_element(FMP_INIT_VALUE, FMP_ADDR, new_ctx, self.clk);
74        }
75
76        // push the callee onto the continuation stack, and increment the clock (corresponding to
77        // the row inserted for the CALL or SYSCALL operation added to the trace).
78        continuation_stack.push_finish_call(current_node_id);
79        continuation_stack.push_start_node(call_node.callee());
80
81        // Corresponds to the CALL or SYSCALL operation added to the trace.
82        self.increment_clk(tracer);
83
84        Ok(())
85    }
86
87    /// Executes the finish phase of a Call node.
88    #[inline(always)]
89    pub(super) fn finish_call_node(
90        &mut self,
91        node_id: MastNodeId,
92        current_forest: &Arc<MastForest>,
93        continuation_stack: &mut ContinuationStack,
94        host: &mut impl AsyncHost,
95        tracer: &mut impl Tracer,
96    ) -> Result<(), ExecutionError> {
97        tracer.start_clock_cycle(
98            self,
99            NodeExecutionState::End(node_id),
100            continuation_stack,
101            current_forest,
102        );
103
104        let call_node = current_forest[node_id].unwrap_call();
105        let err_ctx = err_ctx!(current_forest, call_node, host);
106        // when returning from a function call or a syscall, restore the
107        // context of the
108        // system registers and the operand stack to what it was prior
109        // to the call.
110        self.restore_context(tracer, &err_ctx)?;
111
112        // Corresponds to the row inserted for the END operation added to the trace.
113        self.increment_clk(tracer);
114        self.execute_after_exit_decorators(node_id, current_forest, host)
115    }
116
117    /// Executes a Dyn node from the start.
118    #[inline(always)]
119    pub(super) async fn start_dyn_node(
120        &mut self,
121        current_node_id: MastNodeId,
122        current_forest: &mut Arc<MastForest>,
123        continuation_stack: &mut ContinuationStack,
124        host: &mut impl AsyncHost,
125        tracer: &mut impl Tracer,
126    ) -> Result<(), ExecutionError> {
127        tracer.start_clock_cycle(
128            self,
129            NodeExecutionState::Start(current_node_id),
130            continuation_stack,
131            current_forest,
132        );
133
134        // Execute decorators that should be executed before entering the node
135        self.execute_before_enter_decorators(current_node_id, current_forest, host)?;
136
137        // Corresponds to the row inserted for the DYN or DYNCALL operation
138        // added to the trace.
139        let dyn_node = current_forest[current_node_id].unwrap_dyn();
140
141        let err_ctx = err_ctx!(&current_forest, dyn_node, host);
142
143        // Retrieve callee hash from memory, using stack top as the memory
144        // address.
145        let callee_hash = {
146            let mem_addr = self.stack_get(0);
147            let word = self
148                .memory
149                .read_word(self.ctx, mem_addr, self.clk, &err_ctx)
150                .map_err(ExecutionError::MemoryError)?;
151            tracer.record_memory_read_word(word, mem_addr, self.ctx, self.clk);
152
153            word
154        };
155
156        // Drop the memory address from the stack. This needs to be done before saving the context.
157        self.decrement_stack_size(tracer);
158
159        // For dyncall,
160        // - save the context and reset it,
161        // - initialize the frame pointer in memory for the new context.
162        if dyn_node.is_dyncall() {
163            let new_ctx: ContextId = self.get_next_ctx_id();
164
165            // Save the current state, and update the system registers.
166            self.save_context_and_truncate_stack(tracer);
167
168            self.ctx = new_ctx;
169            self.caller_hash = callee_hash;
170
171            // Initialize the frame pointer in memory for the new context.
172            self.memory
173                .write_element(new_ctx, FMP_ADDR, FMP_INIT_VALUE, &err_ctx)
174                .map_err(ExecutionError::MemoryError)?;
175            tracer.record_memory_write_element(FMP_INIT_VALUE, FMP_ADDR, new_ctx, self.clk);
176        };
177
178        // Update continuation stack
179        // -----------------------------
180        continuation_stack.push_finish_dyn(current_node_id);
181
182        // if the callee is not in the program's MAST forest, try to find a MAST forest for it in
183        // the host (corresponding to an external library loaded in the host); if none are found,
184        // return an error.
185        match current_forest.find_procedure_root(callee_hash) {
186            Some(callee_id) => {
187                continuation_stack.push_start_node(callee_id);
188            },
189            None => {
190                let (root_id, new_forest) = self
191                    .load_mast_forest(
192                        callee_hash,
193                        host,
194                        ExecutionError::dynamic_node_not_found,
195                        &err_ctx,
196                    )
197                    .await?;
198                tracer.record_mast_forest_resolution(root_id, &new_forest);
199
200                // Push current forest to the continuation stack so that we can return to it
201                continuation_stack.push_enter_forest(current_forest.clone());
202
203                // Push the root node of the external MAST forest onto the continuation stack.
204                continuation_stack.push_start_node(root_id);
205
206                // Set the new MAST forest as current
207                *current_forest = new_forest;
208            },
209        }
210
211        // Increment the clock, corresponding to the row inserted for the DYN or DYNCALL operation
212        // added to the trace.
213        self.increment_clk(tracer);
214
215        Ok(())
216    }
217
218    /// Executes the finish phase of a Dyn node.
219    #[inline(always)]
220    pub(super) fn finish_dyn_node(
221        &mut self,
222        node_id: MastNodeId,
223        current_forest: &Arc<MastForest>,
224        continuation_stack: &mut ContinuationStack,
225        host: &mut impl AsyncHost,
226        tracer: &mut impl Tracer,
227    ) -> Result<(), ExecutionError> {
228        tracer.start_clock_cycle(
229            self,
230            NodeExecutionState::End(node_id),
231            continuation_stack,
232            current_forest,
233        );
234
235        let dyn_node = current_forest[node_id].unwrap_dyn();
236        let err_ctx = err_ctx!(current_forest, dyn_node, host);
237        // For dyncall, restore the context.
238        if dyn_node.is_dyncall() {
239            self.restore_context(tracer, &err_ctx)?;
240        }
241
242        // Corresponds to the row inserted for the END operation added to
243        // the trace.
244        self.increment_clk(tracer);
245        self.execute_after_exit_decorators(node_id, current_forest, host)
246    }
247
248    // HELPERS
249    // ----------------------------------------------------------------------------------------------
250
251    /// Returns the next context ID that would be created given the current state.
252    ///
253    /// Note: This only applies to the context created upon a `CALL` or `DYNCALL` operation;
254    /// specifically the `SYSCALL` operation doesn't apply as it always goes back to the root
255    /// context.
256    pub fn get_next_ctx_id(&self) -> ContextId {
257        (self.clk + 1).into()
258    }
259
260    /// Saves the current execution context and truncates the stack to 16 elements in preparation to
261    /// start a new execution context.
262    fn save_context_and_truncate_stack(&mut self, tracer: &mut impl Tracer) {
263        let overflow_stack = if self.stack_size() > MIN_STACK_DEPTH {
264            // save the overflow stack, and zero out the buffer.
265            //
266            // Note: we need to zero the overflow buffer, since the new context expects ZERO's to be
267            // pulled in if they decrement the stack size (e.g. by executing a `drop`).
268            let overflow_stack =
269                self.stack[self.stack_bot_idx..self.stack_top_idx - MIN_STACK_DEPTH].to_vec();
270            self.stack[self.stack_bot_idx..self.stack_top_idx - MIN_STACK_DEPTH].fill(ZERO);
271
272            overflow_stack
273        } else {
274            Vec::new()
275        };
276
277        self.stack_bot_idx = self.stack_top_idx - MIN_STACK_DEPTH;
278
279        self.call_stack.push(ExecutionContextInfo {
280            overflow_stack,
281            ctx: self.ctx,
282            fn_hash: self.caller_hash,
283        });
284
285        tracer.start_context();
286    }
287
288    /// Restores the execution context to the state it was in before the last `call`, `syscall` or
289    /// `dyncall`.
290    ///
291    /// This includes restoring the overflow stack and the system parameters.
292    ///
293    /// # Errors
294    /// - Returns an error if the overflow stack is larger than the space available in the stack
295    ///   buffer.
296    fn restore_context(
297        &mut self,
298        tracer: &mut impl Tracer,
299        err_ctx: &impl ErrorContext,
300    ) -> Result<(), ExecutionError> {
301        // when a call/dyncall/syscall node ends, stack depth must be exactly 16.
302        if self.stack_size() > MIN_STACK_DEPTH {
303            return Err(ExecutionError::invalid_stack_depth_on_return(self.stack_size(), err_ctx));
304        }
305
306        let ctx_info = self
307            .call_stack
308            .pop()
309            .expect("execution context stack should never be empty when restoring context");
310
311        // restore the overflow stack
312        self.restore_overflow_stack(&ctx_info);
313
314        // restore system parameters
315        self.ctx = ctx_info.ctx;
316        self.caller_hash = ctx_info.fn_hash;
317
318        tracer.restore_context();
319
320        Ok(())
321    }
322
323    /// Restores the overflow stack from a previous context.
324    ///
325    /// If necessary, moves the stack in the buffer to make room for the overflow stack to be
326    /// restored.
327    ///
328    /// # Preconditions
329    /// - The current stack depth is exactly `MIN_STACK_DEPTH` (16).
330    #[inline(always)]
331    fn restore_overflow_stack(&mut self, ctx_info: &ExecutionContextInfo) {
332        let target_overflow_len = ctx_info.overflow_stack.len();
333
334        // Check if there's enough room to restore the overflow stack in the current stack buffer.
335        if target_overflow_len > self.stack_bot_idx {
336            // There's not enough room to restore the overflow stack, so we have to move the
337            // location of the stack in the buffer. We reset it so that after restoring the overflow
338            // stack, the stack_bot_idx is at its original position (i.e. INITIAL_STACK_TOP_IDX -
339            // 16).
340            let new_stack_top_idx =
341                core::cmp::min(INITIAL_STACK_TOP_IDX + target_overflow_len, STACK_BUFFER_SIZE - 1);
342
343            self.reset_stack_in_buffer(new_stack_top_idx);
344        }
345
346        // Restore the overflow
347        self.stack[range(self.stack_bot_idx - target_overflow_len, target_overflow_len)]
348            .copy_from_slice(&ctx_info.overflow_stack);
349        self.stack_bot_idx -= target_overflow_len;
350    }
351}