1use alloc::{sync::Arc, vec::Vec};
2
3use miden_core::{
4 FMP_ADDR, FMP_INIT_VALUE, Program, ZERO,
5 mast::{CallNode, MastForest, MastNodeExt, MastNodeId},
6 stack::MIN_STACK_DEPTH,
7 utils::range,
8};
9
10use crate::{
11 AsyncHost, ContextId, ErrorContext, ExecutionError,
12 continuation_stack::ContinuationStack,
13 err_ctx,
14 fast::{
15 ExecutionContextInfo, FastProcessor, INITIAL_STACK_TOP_IDX, STACK_BUFFER_SIZE, Tracer,
16 trace_state::NodeExecutionState,
17 },
18};
19
20impl FastProcessor {
21 #[allow(clippy::too_many_arguments)]
23 #[inline(always)]
24 pub(super) fn start_call_node(
25 &mut self,
26 call_node: &CallNode,
27 current_node_id: MastNodeId,
28 program: &Program,
29 current_forest: &Arc<MastForest>,
30 continuation_stack: &mut ContinuationStack,
31 host: &mut impl AsyncHost,
32 tracer: &mut impl Tracer,
33 ) -> Result<(), ExecutionError> {
34 tracer.start_clock_cycle(
35 self,
36 NodeExecutionState::Start(current_node_id),
37 continuation_stack,
38 current_forest,
39 );
40
41 self.execute_before_enter_decorators(current_node_id, current_forest, host)?;
43
44 let err_ctx = err_ctx!(current_forest, call_node, host);
45
46 let callee_hash = current_forest
47 .get_node_by_id(call_node.callee())
48 .ok_or(ExecutionError::MastNodeNotFoundInForest { node_id: call_node.callee() })?
49 .digest();
50
51 self.save_context_and_truncate_stack(tracer);
52
53 if call_node.is_syscall() {
54 if !program.kernel().contains_proc(callee_hash) {
56 return Err(ExecutionError::syscall_target_not_in_kernel(callee_hash, &err_ctx));
57 }
58 tracer.record_kernel_proc_access(callee_hash);
59
60 self.ctx = ContextId::root();
62 } else {
63 let new_ctx: ContextId = self.get_next_ctx_id();
64
65 self.ctx = new_ctx;
67 self.caller_hash = callee_hash;
68
69 self.memory
71 .write_element(new_ctx, FMP_ADDR, FMP_INIT_VALUE, &err_ctx)
72 .map_err(ExecutionError::MemoryError)?;
73 tracer.record_memory_write_element(FMP_INIT_VALUE, FMP_ADDR, new_ctx, self.clk);
74 }
75
76 continuation_stack.push_finish_call(current_node_id);
79 continuation_stack.push_start_node(call_node.callee());
80
81 self.increment_clk(tracer);
83
84 Ok(())
85 }
86
87 #[inline(always)]
89 pub(super) fn finish_call_node(
90 &mut self,
91 node_id: MastNodeId,
92 current_forest: &Arc<MastForest>,
93 continuation_stack: &mut ContinuationStack,
94 host: &mut impl AsyncHost,
95 tracer: &mut impl Tracer,
96 ) -> Result<(), ExecutionError> {
97 tracer.start_clock_cycle(
98 self,
99 NodeExecutionState::End(node_id),
100 continuation_stack,
101 current_forest,
102 );
103
104 let call_node = current_forest[node_id].unwrap_call();
105 let err_ctx = err_ctx!(current_forest, call_node, host);
106 self.restore_context(tracer, &err_ctx)?;
111
112 self.increment_clk(tracer);
114 self.execute_after_exit_decorators(node_id, current_forest, host)
115 }
116
117 #[inline(always)]
119 pub(super) async fn start_dyn_node(
120 &mut self,
121 current_node_id: MastNodeId,
122 current_forest: &mut Arc<MastForest>,
123 continuation_stack: &mut ContinuationStack,
124 host: &mut impl AsyncHost,
125 tracer: &mut impl Tracer,
126 ) -> Result<(), ExecutionError> {
127 tracer.start_clock_cycle(
128 self,
129 NodeExecutionState::Start(current_node_id),
130 continuation_stack,
131 current_forest,
132 );
133
134 self.execute_before_enter_decorators(current_node_id, current_forest, host)?;
136
137 let dyn_node = current_forest[current_node_id].unwrap_dyn();
140
141 let err_ctx = err_ctx!(¤t_forest, dyn_node, host);
142
143 let callee_hash = {
146 let mem_addr = self.stack_get(0);
147 let word = self
148 .memory
149 .read_word(self.ctx, mem_addr, self.clk, &err_ctx)
150 .map_err(ExecutionError::MemoryError)?;
151 tracer.record_memory_read_word(word, mem_addr, self.ctx, self.clk);
152
153 word
154 };
155
156 self.decrement_stack_size(tracer);
158
159 if dyn_node.is_dyncall() {
163 let new_ctx: ContextId = self.get_next_ctx_id();
164
165 self.save_context_and_truncate_stack(tracer);
167
168 self.ctx = new_ctx;
169 self.caller_hash = callee_hash;
170
171 self.memory
173 .write_element(new_ctx, FMP_ADDR, FMP_INIT_VALUE, &err_ctx)
174 .map_err(ExecutionError::MemoryError)?;
175 tracer.record_memory_write_element(FMP_INIT_VALUE, FMP_ADDR, new_ctx, self.clk);
176 };
177
178 continuation_stack.push_finish_dyn(current_node_id);
181
182 match current_forest.find_procedure_root(callee_hash) {
186 Some(callee_id) => {
187 continuation_stack.push_start_node(callee_id);
188 },
189 None => {
190 let (root_id, new_forest) = self
191 .load_mast_forest(
192 callee_hash,
193 host,
194 ExecutionError::dynamic_node_not_found,
195 &err_ctx,
196 )
197 .await?;
198 tracer.record_mast_forest_resolution(root_id, &new_forest);
199
200 continuation_stack.push_enter_forest(current_forest.clone());
202
203 continuation_stack.push_start_node(root_id);
205
206 *current_forest = new_forest;
208 },
209 }
210
211 self.increment_clk(tracer);
214
215 Ok(())
216 }
217
218 #[inline(always)]
220 pub(super) fn finish_dyn_node(
221 &mut self,
222 node_id: MastNodeId,
223 current_forest: &Arc<MastForest>,
224 continuation_stack: &mut ContinuationStack,
225 host: &mut impl AsyncHost,
226 tracer: &mut impl Tracer,
227 ) -> Result<(), ExecutionError> {
228 tracer.start_clock_cycle(
229 self,
230 NodeExecutionState::End(node_id),
231 continuation_stack,
232 current_forest,
233 );
234
235 let dyn_node = current_forest[node_id].unwrap_dyn();
236 let err_ctx = err_ctx!(current_forest, dyn_node, host);
237 if dyn_node.is_dyncall() {
239 self.restore_context(tracer, &err_ctx)?;
240 }
241
242 self.increment_clk(tracer);
245 self.execute_after_exit_decorators(node_id, current_forest, host)
246 }
247
248 pub fn get_next_ctx_id(&self) -> ContextId {
257 (self.clk + 1).into()
258 }
259
260 fn save_context_and_truncate_stack(&mut self, tracer: &mut impl Tracer) {
263 let overflow_stack = if self.stack_size() > MIN_STACK_DEPTH {
264 let overflow_stack =
269 self.stack[self.stack_bot_idx..self.stack_top_idx - MIN_STACK_DEPTH].to_vec();
270 self.stack[self.stack_bot_idx..self.stack_top_idx - MIN_STACK_DEPTH].fill(ZERO);
271
272 overflow_stack
273 } else {
274 Vec::new()
275 };
276
277 self.stack_bot_idx = self.stack_top_idx - MIN_STACK_DEPTH;
278
279 self.call_stack.push(ExecutionContextInfo {
280 overflow_stack,
281 ctx: self.ctx,
282 fn_hash: self.caller_hash,
283 });
284
285 tracer.start_context();
286 }
287
288 fn restore_context(
297 &mut self,
298 tracer: &mut impl Tracer,
299 err_ctx: &impl ErrorContext,
300 ) -> Result<(), ExecutionError> {
301 if self.stack_size() > MIN_STACK_DEPTH {
303 return Err(ExecutionError::invalid_stack_depth_on_return(self.stack_size(), err_ctx));
304 }
305
306 let ctx_info = self
307 .call_stack
308 .pop()
309 .expect("execution context stack should never be empty when restoring context");
310
311 self.restore_overflow_stack(&ctx_info);
313
314 self.ctx = ctx_info.ctx;
316 self.caller_hash = ctx_info.fn_hash;
317
318 tracer.restore_context();
319
320 Ok(())
321 }
322
323 #[inline(always)]
331 fn restore_overflow_stack(&mut self, ctx_info: &ExecutionContextInfo) {
332 let target_overflow_len = ctx_info.overflow_stack.len();
333
334 if target_overflow_len > self.stack_bot_idx {
336 let new_stack_top_idx =
341 core::cmp::min(INITIAL_STACK_TOP_IDX + target_overflow_len, STACK_BUFFER_SIZE - 1);
342
343 self.reset_stack_in_buffer(new_stack_top_idx);
344 }
345
346 self.stack[range(self.stack_bot_idx - target_overflow_len, target_overflow_len)]
348 .copy_from_slice(&ctx_info.overflow_stack);
349 self.stack_bot_idx -= target_overflow_len;
350 }
351}