1use alloc::{sync::Arc, vec::Vec};
2
3use miden_core::{
4 FMP_ADDR, FMP_INIT_VALUE, Program, ZERO,
5 mast::{CallNode, MastForest, MastNodeExt, MastNodeId},
6 stack::MIN_STACK_DEPTH,
7 utils::range,
8};
9
10use crate::{
11 AsyncHost, ContextId, ErrorContext, ExecutionError,
12 continuation_stack::ContinuationStack,
13 err_ctx,
14 fast::{
15 ExecutionContextInfo, FastProcessor, INITIAL_STACK_TOP_IDX, STACK_BUFFER_SIZE, Tracer,
16 trace_state::NodeExecutionState,
17 },
18};
19
20impl FastProcessor {
21 #[expect(clippy::too_many_arguments)]
23 #[inline(always)]
24 pub(super) fn start_call_node(
25 &mut self,
26 call_node: &CallNode,
27 current_node_id: MastNodeId,
28 program: &Program,
29 current_forest: &Arc<MastForest>,
30 continuation_stack: &mut ContinuationStack,
31 host: &mut impl AsyncHost,
32 tracer: &mut impl Tracer,
33 ) -> Result<(), ExecutionError> {
34 tracer.start_clock_cycle(
35 self,
36 NodeExecutionState::Start(current_node_id),
37 continuation_stack,
38 current_forest,
39 );
40
41 self.execute_before_enter_decorators(current_node_id, current_forest, host)?;
43
44 let err_ctx = err_ctx!(current_forest, call_node, host);
45
46 let callee_hash = current_forest
47 .get_node_by_id(call_node.callee())
48 .ok_or(ExecutionError::MastNodeNotFoundInForest { node_id: call_node.callee() })?
49 .digest();
50
51 self.save_context_and_truncate_stack(tracer);
52
53 if call_node.is_syscall() {
54 if !program.kernel().contains_proc(callee_hash) {
56 return Err(ExecutionError::syscall_target_not_in_kernel(callee_hash, &err_ctx));
57 }
58 tracer.record_kernel_proc_access(callee_hash);
59
60 self.ctx = ContextId::root();
62 } else {
63 let new_ctx: ContextId = self.get_next_ctx_id();
64
65 self.ctx = new_ctx;
67 self.caller_hash = callee_hash;
68
69 self.memory
71 .write_element(new_ctx, FMP_ADDR, FMP_INIT_VALUE, &err_ctx)
72 .map_err(ExecutionError::MemoryError)?;
73 tracer.record_memory_write_element(FMP_INIT_VALUE, FMP_ADDR, new_ctx, self.clk);
74 }
75
76 continuation_stack.push_finish_call(current_node_id);
79 continuation_stack.push_start_node(call_node.callee());
80
81 self.increment_clk(tracer);
83
84 Ok(())
85 }
86
87 #[inline(always)]
89 pub(super) fn finish_call_node(
90 &mut self,
91 node_id: MastNodeId,
92 current_forest: &Arc<MastForest>,
93 continuation_stack: &mut ContinuationStack,
94 host: &mut impl AsyncHost,
95 tracer: &mut impl Tracer,
96 ) -> Result<(), ExecutionError> {
97 tracer.start_clock_cycle(
98 self,
99 NodeExecutionState::End(node_id),
100 continuation_stack,
101 current_forest,
102 );
103
104 let _call_node = current_forest[node_id].unwrap_call();
109 let err_ctx = err_ctx!(current_forest, _call_node, host);
110 self.restore_context(tracer, &err_ctx)?;
115
116 self.increment_clk(tracer);
118 self.execute_after_exit_decorators(node_id, current_forest, host)
119 }
120
121 #[inline(always)]
123 pub(super) async fn start_dyn_node(
124 &mut self,
125 current_node_id: MastNodeId,
126 current_forest: &mut Arc<MastForest>,
127 continuation_stack: &mut ContinuationStack,
128 host: &mut impl AsyncHost,
129 tracer: &mut impl Tracer,
130 ) -> Result<(), ExecutionError> {
131 tracer.start_clock_cycle(
132 self,
133 NodeExecutionState::Start(current_node_id),
134 continuation_stack,
135 current_forest,
136 );
137
138 self.execute_before_enter_decorators(current_node_id, current_forest, host)?;
140
141 let dyn_node = current_forest[current_node_id].unwrap_dyn();
144
145 let err_ctx = err_ctx!(¤t_forest, dyn_node, host);
146
147 let callee_hash = {
150 let mem_addr = self.stack_get(0);
151 let word = self
152 .memory
153 .read_word(self.ctx, mem_addr, self.clk, &err_ctx)
154 .map_err(ExecutionError::MemoryError)?;
155 tracer.record_memory_read_word(word, mem_addr, self.ctx, self.clk);
156
157 word
158 };
159
160 self.decrement_stack_size(tracer);
162
163 if dyn_node.is_dyncall() {
167 let new_ctx: ContextId = self.get_next_ctx_id();
168
169 self.save_context_and_truncate_stack(tracer);
171
172 self.ctx = new_ctx;
173 self.caller_hash = callee_hash;
174
175 self.memory
177 .write_element(new_ctx, FMP_ADDR, FMP_INIT_VALUE, &err_ctx)
178 .map_err(ExecutionError::MemoryError)?;
179 tracer.record_memory_write_element(FMP_INIT_VALUE, FMP_ADDR, new_ctx, self.clk);
180 };
181
182 continuation_stack.push_finish_dyn(current_node_id);
185
186 match current_forest.find_procedure_root(callee_hash) {
190 Some(callee_id) => {
191 continuation_stack.push_start_node(callee_id);
192 },
193 None => {
194 let (root_id, new_forest) = self
195 .load_mast_forest(
196 callee_hash,
197 host,
198 ExecutionError::dynamic_node_not_found,
199 &err_ctx,
200 )
201 .await?;
202 tracer.record_mast_forest_resolution(root_id, &new_forest);
203
204 continuation_stack.push_enter_forest(current_forest.clone());
206
207 continuation_stack.push_start_node(root_id);
209
210 *current_forest = new_forest;
212 },
213 }
214
215 self.increment_clk(tracer);
218
219 Ok(())
220 }
221
222 #[inline(always)]
224 pub(super) fn finish_dyn_node(
225 &mut self,
226 node_id: MastNodeId,
227 current_forest: &Arc<MastForest>,
228 continuation_stack: &mut ContinuationStack,
229 host: &mut impl AsyncHost,
230 tracer: &mut impl Tracer,
231 ) -> Result<(), ExecutionError> {
232 tracer.start_clock_cycle(
233 self,
234 NodeExecutionState::End(node_id),
235 continuation_stack,
236 current_forest,
237 );
238
239 let dyn_node = current_forest[node_id].unwrap_dyn();
240 let err_ctx = err_ctx!(current_forest, dyn_node, host);
241 if dyn_node.is_dyncall() {
243 self.restore_context(tracer, &err_ctx)?;
244 }
245
246 self.increment_clk(tracer);
249 self.execute_after_exit_decorators(node_id, current_forest, host)
250 }
251
252 pub fn get_next_ctx_id(&self) -> ContextId {
261 (self.clk + 1).into()
262 }
263
264 fn save_context_and_truncate_stack(&mut self, tracer: &mut impl Tracer) {
267 let overflow_stack = if self.stack_size() > MIN_STACK_DEPTH {
268 let overflow_stack =
273 self.stack[self.stack_bot_idx..self.stack_top_idx - MIN_STACK_DEPTH].to_vec();
274 self.stack[self.stack_bot_idx..self.stack_top_idx - MIN_STACK_DEPTH].fill(ZERO);
275
276 overflow_stack
277 } else {
278 Vec::new()
279 };
280
281 self.stack_bot_idx = self.stack_top_idx - MIN_STACK_DEPTH;
282
283 self.call_stack.push(ExecutionContextInfo {
284 overflow_stack,
285 ctx: self.ctx,
286 fn_hash: self.caller_hash,
287 });
288
289 tracer.start_context();
290 }
291
292 fn restore_context(
301 &mut self,
302 tracer: &mut impl Tracer,
303 err_ctx: &impl ErrorContext,
304 ) -> Result<(), ExecutionError> {
305 if self.stack_size() > MIN_STACK_DEPTH {
307 return Err(ExecutionError::invalid_stack_depth_on_return(self.stack_size(), err_ctx));
308 }
309
310 let ctx_info = self
311 .call_stack
312 .pop()
313 .expect("execution context stack should never be empty when restoring context");
314
315 self.restore_overflow_stack(&ctx_info);
317
318 self.ctx = ctx_info.ctx;
320 self.caller_hash = ctx_info.fn_hash;
321
322 tracer.restore_context();
323
324 Ok(())
325 }
326
327 #[inline(always)]
335 fn restore_overflow_stack(&mut self, ctx_info: &ExecutionContextInfo) {
336 let target_overflow_len = ctx_info.overflow_stack.len();
337
338 if target_overflow_len > self.stack_bot_idx {
340 let new_stack_top_idx =
345 core::cmp::min(INITIAL_STACK_TOP_IDX + target_overflow_len, STACK_BUFFER_SIZE - 1);
346
347 self.reset_stack_in_buffer(new_stack_top_idx);
348 }
349
350 self.stack[range(self.stack_bot_idx - target_overflow_len, target_overflow_len)]
352 .copy_from_slice(&ctx_info.overflow_stack);
353 self.stack_bot_idx -= target_overflow_len;
354 }
355}