1use alloc::{sync::Arc, vec::Vec};
2use core::cmp::min;
3
4use memory::Memory;
5use miden_air::RowIndex;
6use miden_core::{
7 Decorator, DecoratorIterator, EMPTY_WORD, Felt, ONE, Operation, Program, StackOutputs,
8 WORD_SIZE, Word, ZERO,
9 mast::{
10 BasicBlockNode, CallNode, ExternalNode, JoinNode, LoopNode, MastForest, MastNode,
11 MastNodeId, OP_GROUP_SIZE, OpBatch, SplitNode,
12 },
13 stack::MIN_STACK_DEPTH,
14 utils::range,
15};
16
17use crate::{
18 AdviceInputs, AdviceProvider, AsyncHost, ContextId, ErrorContext, ExecutionError, FMP_MIN,
19 ProcessState, SYSCALL_FMP_MIN,
20 chiplets::Ace,
21 continuation_stack::{Continuation, ContinuationStack},
22 err_ctx,
23};
24
25mod memory;
26
27mod circuit_eval;
29mod crypto_ops;
30mod field_ops;
31mod fri_ops;
32mod horner_ops;
33mod io_ops;
34mod stack_ops;
35mod sys_ops;
36mod u32_ops;
37
38#[cfg(test)]
39mod tests;
40
41const STACK_BUFFER_SIZE: usize = 6850;
50
51const INITIAL_STACK_TOP_IDX: usize = 250;
58
59const WORD_SIZE_FELT: Felt = Felt::new(4);
61
62const DOUBLE_WORD_SIZE: Felt = Felt::new(8);
64
65#[derive(Debug)]
97pub struct FastProcessor {
98 pub(super) stack: [Felt; STACK_BUFFER_SIZE],
100 stack_top_idx: usize,
102 stack_bot_idx: usize,
104 bounds_check_counter: usize,
107
108 pub(super) clk: RowIndex,
110
111 pub(super) ctx: ContextId,
113
114 pub(super) fmp: Felt,
116
117 in_syscall: bool,
119
120 pub(super) caller_hash: Word,
123
124 pub(super) advice: AdviceProvider,
126
127 pub(super) memory: Memory,
129
130 pub(super) ace: Ace,
132
133 call_stack: Vec<ExecutionContextInfo>,
137
138 in_debug_mode: bool,
140}
141
142impl FastProcessor {
143 pub fn new(stack_inputs: &[Felt]) -> Self {
151 Self::initialize(stack_inputs, AdviceInputs::default(), false)
152 }
153
154 pub fn new_with_advice_inputs(stack_inputs: &[Felt], advice_inputs: AdviceInputs) -> Self {
159 Self::initialize(stack_inputs, advice_inputs, false)
160 }
161
162 pub fn new_debug(stack_inputs: &[Felt], advice_inputs: AdviceInputs) -> Self {
168 Self::initialize(stack_inputs, advice_inputs, true)
169 }
170
171 fn initialize(stack_inputs: &[Felt], advice_inputs: AdviceInputs, in_debug_mode: bool) -> Self {
177 assert!(stack_inputs.len() <= MIN_STACK_DEPTH);
178
179 let stack_top_idx = INITIAL_STACK_TOP_IDX;
180 let stack = {
181 let mut stack = [ZERO; STACK_BUFFER_SIZE];
182 let bottom_idx = stack_top_idx - stack_inputs.len();
183
184 stack[bottom_idx..stack_top_idx].copy_from_slice(stack_inputs);
185 stack
186 };
187
188 let stack_bot_idx = stack_top_idx - MIN_STACK_DEPTH;
189
190 let bounds_check_counter = stack_bot_idx;
191 Self {
192 advice: advice_inputs.into(),
193 stack,
194 stack_top_idx,
195 stack_bot_idx,
196 bounds_check_counter,
197 clk: 0_u32.into(),
198 ctx: 0_u32.into(),
199 fmp: Felt::new(FMP_MIN),
200 in_syscall: false,
201 caller_hash: EMPTY_WORD,
202 memory: Memory::new(),
203 call_stack: Vec::new(),
204 ace: Ace::default(),
205 in_debug_mode,
206 }
207 }
208
209 pub fn stack(&self) -> &[Felt] {
215 &self.stack[self.stack_bot_idx..self.stack_top_idx]
216 }
217
218 #[inline(always)]
220 pub fn stack_get(&self, idx: usize) -> Felt {
221 self.stack[self.stack_top_idx - idx - 1]
222 }
223
224 #[inline(always)]
226 pub fn stack_get_mut(&mut self, idx: usize) -> &mut Felt {
227 &mut self.stack[self.stack_top_idx - idx - 1]
228 }
229
230 #[inline(always)]
246 pub fn stack_get_word(&self, start_idx: usize) -> Word {
247 debug_assert!(start_idx < MIN_STACK_DEPTH);
248
249 let word_start_idx = self.stack_top_idx - start_idx - 4;
250 let result: [Felt; WORD_SIZE] =
251 self.stack[range(word_start_idx, WORD_SIZE)].try_into().unwrap();
252 result.into()
253 }
254
255 #[inline(always)]
257 pub fn stack_depth(&self) -> u32 {
258 (self.stack_top_idx - self.stack_bot_idx) as u32
259 }
260
261 #[inline(always)]
266 pub fn stack_write(&mut self, idx: usize, element: Felt) {
267 self.stack[self.stack_top_idx - idx - 1] = element
268 }
269
270 #[inline(always)]
275 pub fn stack_write_word(&mut self, start_idx: usize, word: &Word) {
276 debug_assert!(start_idx < MIN_STACK_DEPTH);
277
278 let word_start_idx = self.stack_top_idx - start_idx - 4;
279 let source: [Felt; WORD_SIZE] = (*word).into();
280 self.stack[range(word_start_idx, WORD_SIZE)].copy_from_slice(&source)
281 }
282
283 #[inline(always)]
285 pub fn stack_swap(&mut self, idx1: usize, idx2: usize) {
286 let a = self.stack_get(idx1);
287 let b = self.stack_get(idx2);
288 self.stack_write(idx1, b);
289 self.stack_write(idx2, a);
290 }
291
292 pub async fn execute(
297 mut self,
298 program: &Program,
299 host: &mut impl AsyncHost,
300 ) -> Result<(StackOutputs, AdviceProvider), ExecutionError> {
301 let stack_outputs = self.execute_impl(program, host).await?;
302
303 Ok((stack_outputs, self.advice))
304 }
305
306 async fn execute_impl(
307 &mut self,
308 program: &Program,
309 host: &mut impl AsyncHost,
310 ) -> Result<StackOutputs, ExecutionError> {
311 let mut continuation_stack = ContinuationStack::new(program);
312 let mut current_forest = program.mast_forest().clone();
313
314 while let Some(processing_step) = continuation_stack.pop_continuation() {
315 match processing_step {
316 Continuation::StartNode(node_id) => {
317 let node = current_forest.get_node_by_id(node_id).unwrap();
318 match node {
319 MastNode::Block(basic_block_node) => {
320 self.execute_basic_block_node(
321 basic_block_node,
322 node_id,
323 ¤t_forest,
324 host,
325 )
326 .await?
327 },
328 MastNode::Join(join_node) => self.start_join_node(
329 join_node,
330 node_id,
331 ¤t_forest,
332 &mut continuation_stack,
333 host,
334 )?,
335 MastNode::Split(split_node) => self.start_split_node(
336 split_node,
337 node_id,
338 ¤t_forest,
339 &mut continuation_stack,
340 host,
341 )?,
342 MastNode::Loop(loop_node) => self.start_loop_node(
343 loop_node,
344 node_id,
345 ¤t_forest,
346 &mut continuation_stack,
347 host,
348 )?,
349 MastNode::Call(call_node) => self.start_call_node(
350 call_node,
351 node_id,
352 program,
353 ¤t_forest,
354 &mut continuation_stack,
355 host,
356 )?,
357 MastNode::Dyn(_dyn_node) => {
358 self.start_dyn_node(
359 node_id,
360 &mut current_forest,
361 &mut continuation_stack,
362 host,
363 )
364 .await?
365 },
366 MastNode::External(_external_node) => {
367 self.execute_external_node(
368 node_id,
369 &mut current_forest,
370 &mut continuation_stack,
371 host,
372 )
373 .await?
374 },
375 }
376 },
377 Continuation::FinishJoin(node_id) => {
378 self.finish_join_node(node_id, ¤t_forest, host)?
379 },
380 Continuation::FinishSplit(node_id) => {
381 self.finish_split_node(node_id, ¤t_forest, host)?
382 },
383 Continuation::FinishLoop(node_id) => {
384 self.finish_loop_node(node_id, ¤t_forest, &mut continuation_stack, host)?
385 },
386 Continuation::FinishCall(node_id) => {
387 self.finish_call_node(node_id, ¤t_forest, host)?
388 },
389 Continuation::FinishDyn(node_id) => {
390 self.finish_dyn_node(node_id, ¤t_forest, host)?
391 },
392 Continuation::EnterForest(previous_forest) => {
393 current_forest = previous_forest;
395 },
396 }
397 }
398
399 StackOutputs::new(
400 self.stack[self.stack_bot_idx..self.stack_top_idx]
401 .iter()
402 .rev()
403 .copied()
404 .collect(),
405 )
406 .map_err(|_| {
407 ExecutionError::OutputStackOverflow(
408 self.stack_top_idx - self.stack_bot_idx - MIN_STACK_DEPTH,
409 )
410 })
411 }
412
413 #[inline(always)]
418 fn start_join_node(
419 &mut self,
420 join_node: &JoinNode,
421 node_id: MastNodeId,
422 current_forest: &MastForest,
423 continuation_stack: &mut ContinuationStack,
424 host: &mut impl AsyncHost,
425 ) -> Result<(), ExecutionError> {
426 self.execute_before_enter_decorators(node_id, current_forest, host)?;
428
429 self.clk += 1_u32;
432
433 continuation_stack.push_finish_join(node_id);
434 continuation_stack.push_start_node(join_node.second());
435 continuation_stack.push_start_node(join_node.first());
436 Ok(())
437 }
438
439 #[inline(always)]
441 fn finish_join_node(
442 &mut self,
443 node_id: MastNodeId,
444 current_forest: &MastForest,
445 host: &mut impl AsyncHost,
446 ) -> Result<(), ExecutionError> {
447 self.clk += 1_u32;
450
451 self.execute_after_exit_decorators(node_id, current_forest, host)
452 }
453
454 #[inline(always)]
456 fn start_split_node(
457 &mut self,
458 split_node: &SplitNode,
459 node_id: MastNodeId,
460 current_forest: &MastForest,
461 continuation_stack: &mut ContinuationStack,
462 host: &mut impl AsyncHost,
463 ) -> Result<(), ExecutionError> {
464 self.execute_before_enter_decorators(node_id, current_forest, host)?;
466
467 self.clk += 1_u32;
470
471 let condition = self.stack_get(0);
472
473 self.decrement_stack_size();
475
476 continuation_stack.push_finish_split(node_id);
478 if condition == ONE {
479 continuation_stack.push_start_node(split_node.on_true());
480 } else if condition == ZERO {
481 continuation_stack.push_start_node(split_node.on_false());
482 } else {
483 let err_ctx = err_ctx!(current_forest, split_node, host);
484 return Err(ExecutionError::not_binary_value_if(condition, &err_ctx));
485 };
486 Ok(())
487 }
488
489 #[inline(always)]
491 fn finish_split_node(
492 &mut self,
493 node_id: MastNodeId,
494 current_forest: &MastForest,
495 host: &mut impl AsyncHost,
496 ) -> Result<(), ExecutionError> {
497 self.clk += 1_u32;
500
501 self.execute_after_exit_decorators(node_id, current_forest, host)
502 }
503
504 #[inline(always)]
506 fn start_loop_node(
507 &mut self,
508 loop_node: &LoopNode,
509 current_node_id: MastNodeId,
510 current_forest: &MastForest,
511 continuation_stack: &mut ContinuationStack,
512 host: &mut impl AsyncHost,
513 ) -> Result<(), ExecutionError> {
514 self.execute_before_enter_decorators(current_node_id, current_forest, host)?;
516
517 self.clk += 1_u32;
520
521 let condition = self.stack_get(0);
522
523 self.decrement_stack_size();
525
526 if condition == ONE {
528 continuation_stack.push_finish_loop(current_node_id);
531 continuation_stack.push_start_node(loop_node.body());
532 } else if condition == ZERO {
533 self.clk += 1_u32;
536 } else {
537 let err_ctx = err_ctx!(current_forest, loop_node, host);
538 return Err(ExecutionError::not_binary_value_loop(condition, &err_ctx));
539 }
540 Ok(())
541 }
542
543 #[inline(always)]
545 fn finish_loop_node(
546 &mut self,
547 current_node_id: MastNodeId,
548 current_forest: &MastForest,
549 continuation_stack: &mut ContinuationStack,
550 host: &mut impl AsyncHost,
551 ) -> Result<(), ExecutionError> {
552 let condition = self.stack_get(0);
555 self.decrement_stack_size();
556
557 let loop_node = current_forest[current_node_id].unwrap_loop();
558 if condition == ONE {
559 self.clk += 1_u32;
561 continuation_stack.push_finish_loop(current_node_id);
562 continuation_stack.push_start_node(loop_node.body());
563 } else if condition == ZERO {
564 self.clk += 1_u32;
566
567 self.execute_after_exit_decorators(current_node_id, current_forest, host)?;
568 } else {
569 let err_ctx = err_ctx!(current_forest, loop_node, host);
570 return Err(ExecutionError::not_binary_value_loop(condition, &err_ctx));
571 }
572 Ok(())
573 }
574
575 #[inline(always)]
577 fn start_call_node(
578 &mut self,
579 call_node: &CallNode,
580 current_node_id: MastNodeId,
581 program: &Program,
582 current_forest: &MastForest,
583 continuation_stack: &mut ContinuationStack,
584 host: &mut impl AsyncHost,
585 ) -> Result<(), ExecutionError> {
586 self.execute_before_enter_decorators(current_node_id, current_forest, host)?;
588
589 let err_ctx = err_ctx!(current_forest, call_node, host);
590
591 self.clk += 1_u32;
594
595 if self.in_syscall {
597 let instruction = if call_node.is_syscall() { "syscall" } else { "call" };
598 return Err(ExecutionError::CallInSyscall(instruction));
599 }
600
601 let callee_hash = current_forest
602 .get_node_by_id(call_node.callee())
603 .ok_or(ExecutionError::MastNodeNotFoundInForest { node_id: call_node.callee() })?
604 .digest();
605
606 self.save_context_and_truncate_stack();
607
608 if call_node.is_syscall() {
609 if !program.kernel().contains_proc(callee_hash) {
611 return Err(ExecutionError::syscall_target_not_in_kernel(callee_hash, &err_ctx));
612 }
613
614 self.ctx = ContextId::root();
616 self.fmp = SYSCALL_FMP_MIN.into();
617 self.in_syscall = true;
618 } else {
619 self.ctx = self.clk.into();
621 self.fmp = Felt::new(FMP_MIN);
622 self.caller_hash = callee_hash;
623 }
624
625 continuation_stack.push_finish_call(current_node_id);
627 continuation_stack.push_start_node(call_node.callee());
628 Ok(())
629 }
630
631 #[inline(always)]
633 fn finish_call_node(
634 &mut self,
635 node_id: MastNodeId,
636 current_forest: &MastForest,
637 host: &mut impl AsyncHost,
638 ) -> Result<(), ExecutionError> {
639 let call_node = current_forest[node_id].unwrap_call();
640 let err_ctx = err_ctx!(current_forest, call_node, host);
641 self.restore_context(&err_ctx)?;
646
647 self.clk += 1_u32;
650
651 self.execute_after_exit_decorators(node_id, current_forest, host)
652 }
653
654 #[inline(always)]
656 async fn start_dyn_node(
657 &mut self,
658 current_node_id: MastNodeId,
659 current_forest: &mut Arc<MastForest>,
660 continuation_stack: &mut ContinuationStack,
661 host: &mut impl AsyncHost,
662 ) -> Result<(), ExecutionError> {
663 self.execute_before_enter_decorators(current_node_id, current_forest, host)?;
665
666 self.clk += 1_u32;
669
670 let dyn_node = current_forest[current_node_id].unwrap_dyn();
671
672 if dyn_node.is_dyncall() && self.in_syscall {
674 return Err(ExecutionError::CallInSyscall("dyncall"));
675 }
676
677 let err_ctx = err_ctx!(¤t_forest, dyn_node, host);
678
679 let callee_hash = {
682 let mem_addr = self.stack_get(0);
683 self.memory
684 .read_word(self.ctx, mem_addr, self.clk, &err_ctx)
685 .map_err(ExecutionError::MemoryError)?
686 };
687
688 self.decrement_stack_size();
690
691 if dyn_node.is_dyncall() {
693 self.save_context_and_truncate_stack();
694 self.ctx = self.clk.into();
695 self.fmp = Felt::new(FMP_MIN);
696 self.caller_hash = callee_hash;
697 };
698
699 continuation_stack.push_finish_dyn(current_node_id);
702
703 match current_forest.find_procedure_root(callee_hash) {
707 Some(callee_id) => {
708 continuation_stack.push_start_node(callee_id);
709 },
710 None => {
711 let (root_id, new_forest) = self
712 .load_mast_forest(
713 callee_hash,
714 host,
715 ExecutionError::dynamic_node_not_found,
716 &err_ctx,
717 )
718 .await?;
719
720 continuation_stack.push_enter_forest(current_forest.clone());
722
723 continuation_stack.push_start_node(root_id);
725
726 *current_forest = new_forest;
728 },
729 }
730 Ok(())
731 }
732
733 #[inline(always)]
735 fn finish_dyn_node(
736 &mut self,
737 node_id: MastNodeId,
738 current_forest: &MastForest,
739 host: &mut impl AsyncHost,
740 ) -> Result<(), ExecutionError> {
741 let dyn_node = current_forest[node_id].unwrap_dyn();
742 let err_ctx = err_ctx!(current_forest, dyn_node, host);
743 if dyn_node.is_dyncall() {
745 self.restore_context(&err_ctx)?;
746 }
747
748 self.clk += 1_u32;
751
752 self.execute_after_exit_decorators(node_id, current_forest, host)
753 }
754
755 #[inline(always)]
757 async fn execute_external_node(
758 &mut self,
759 node_id: MastNodeId,
760 current_forest: &mut Arc<MastForest>,
761 continuation_stack: &mut ContinuationStack,
762 host: &mut impl AsyncHost,
763 ) -> Result<(), ExecutionError> {
764 self.execute_before_enter_decorators(node_id, current_forest, host)?;
766
767 let external_node = current_forest[node_id].unwrap_external();
768 let (root_id, new_mast_forest) = self.resolve_external_node(external_node, host).await?;
769
770 continuation_stack.push_enter_forest(current_forest.clone());
772
773 continuation_stack.push_start_node(root_id);
775
776 self.execute_after_exit_decorators(node_id, current_forest, host)?;
777
778 *current_forest = new_mast_forest;
780
781 Ok(())
782 }
783
784 #[inline(always)]
788 async fn execute_basic_block_node(
789 &mut self,
790 basic_block_node: &BasicBlockNode,
791 node_id: MastNodeId,
792 program: &MastForest,
793 host: &mut impl AsyncHost,
794 ) -> Result<(), ExecutionError> {
795 self.execute_before_enter_decorators(node_id, program, host)?;
797
798 self.clk += 1_u32;
800
801 let mut batch_offset_in_block = 0;
802 let mut op_batches = basic_block_node.op_batches().iter();
803 let mut decorator_ids = basic_block_node.decorator_iter();
804
805 if let Some(first_op_batch) = op_batches.next() {
807 self.execute_op_batch(
808 basic_block_node,
809 first_op_batch,
810 &mut decorator_ids,
811 batch_offset_in_block,
812 program,
813 host,
814 )
815 .await?;
816 batch_offset_in_block += first_op_batch.ops().len();
817 }
818
819 for op_batch in op_batches {
821 self.clk += 1_u32;
823
824 self.execute_op_batch(
825 basic_block_node,
826 op_batch,
827 &mut decorator_ids,
828 batch_offset_in_block,
829 program,
830 host,
831 )
832 .await?;
833 batch_offset_in_block += op_batch.ops().len();
834 }
835
836 self.clk += 1_u32;
838
839 for &decorator_id in decorator_ids {
844 let decorator = program
845 .get_decorator_by_id(decorator_id)
846 .ok_or(ExecutionError::DecoratorNotFoundInForest { decorator_id })?;
847 self.execute_decorator(decorator, host)?;
848 }
849
850 self.execute_after_exit_decorators(node_id, program, host)
851 }
852
853 #[inline(always)]
854 async fn execute_op_batch(
855 &mut self,
856 basic_block: &BasicBlockNode,
857 batch: &OpBatch,
858 decorators: &mut DecoratorIterator<'_>,
859 batch_offset_in_block: usize,
860 program: &MastForest,
861 host: &mut impl AsyncHost,
862 ) -> Result<(), ExecutionError> {
863 let op_counts = batch.op_counts();
864 let mut op_idx_in_group = 0;
865 let mut group_idx = 0;
866 let mut next_group_idx = 1;
867
868 let num_batch_groups = batch.num_groups().next_power_of_two();
872
873 for (op_idx_in_batch, op) in batch.ops().iter().enumerate() {
875 while let Some(&decorator_id) =
876 decorators.next_filtered(batch_offset_in_block + op_idx_in_batch)
877 {
878 let decorator = program
879 .get_decorator_by_id(decorator_id)
880 .ok_or(ExecutionError::DecoratorNotFoundInForest { decorator_id })?;
881 self.execute_decorator(decorator, host)?;
882 }
883
884 let op_idx_in_block = batch_offset_in_block + op_idx_in_batch;
886 let err_ctx = err_ctx!(program, basic_block, host, op_idx_in_block);
887
888 match op {
894 Operation::Emit(event_id) => self.op_emit(*event_id, host, &err_ctx).await?,
895 _ => {
896 self.execute_op(op, op_idx_in_block, program, host, &err_ctx)?;
898 },
899 }
900
901 let has_imm = op.imm_value().is_some();
904 if has_imm {
905 next_group_idx += 1;
906 }
907
908 if op_idx_in_group == op_counts[group_idx] - 1 {
910 if has_imm {
913 debug_assert!(op_idx_in_group < OP_GROUP_SIZE - 1, "invalid op index");
917 self.clk += 1_u32;
918 }
919
920 group_idx = next_group_idx;
922 next_group_idx += 1;
923 op_idx_in_group = 0;
924 } else {
925 op_idx_in_group += 1;
926 }
927
928 self.clk += 1_u32;
929 }
930
931 self.clk += (num_batch_groups - group_idx) as u32;
937
938 Ok(())
939 }
940
941 fn execute_before_enter_decorators(
943 &mut self,
944 node_id: MastNodeId,
945 current_forest: &MastForest,
946 host: &mut impl AsyncHost,
947 ) -> Result<(), ExecutionError> {
948 let node = current_forest
949 .get_node_by_id(node_id)
950 .expect("internal error: node id {node_id} not found in current forest");
951
952 for &decorator_id in node.before_enter() {
953 self.execute_decorator(¤t_forest[decorator_id], host)?;
954 }
955
956 Ok(())
957 }
958
959 fn execute_after_exit_decorators(
961 &mut self,
962 node_id: MastNodeId,
963 current_forest: &MastForest,
964 host: &mut impl AsyncHost,
965 ) -> Result<(), ExecutionError> {
966 let node = current_forest
967 .get_node_by_id(node_id)
968 .expect("internal error: node id {node_id} not found in current forest");
969
970 for &decorator_id in node.after_exit() {
971 self.execute_decorator(¤t_forest[decorator_id], host)?;
972 }
973
974 Ok(())
975 }
976
977 fn execute_decorator(
979 &mut self,
980 decorator: &Decorator,
981 host: &mut impl AsyncHost,
982 ) -> Result<(), ExecutionError> {
983 match decorator {
984 Decorator::Debug(options) => {
985 if self.in_debug_mode {
986 let process = &mut self.state();
987 host.on_debug(process, options)?;
988 }
989 },
990 Decorator::AsmOp(_assembly_op) => {
991 },
993 Decorator::Trace(id) => {
994 let process = &mut self.state();
995 host.on_trace(process, *id)?;
996 },
997 };
998 Ok(())
999 }
1000
1001 #[inline(always)]
1007 fn execute_op(
1008 &mut self,
1009 operation: &Operation,
1010 op_idx: usize,
1011 program: &MastForest,
1012 host: &mut impl AsyncHost,
1013 err_ctx: &impl ErrorContext,
1014 ) -> Result<(), ExecutionError> {
1015 if self.bounds_check_counter == 0 {
1016 let err_str = if self.stack_top_idx - MIN_STACK_DEPTH == 0 {
1017 "stack underflow"
1018 } else {
1019 "stack overflow"
1020 };
1021 return Err(ExecutionError::FailedToExecuteProgram(err_str));
1022 }
1023
1024 match operation {
1025 Operation::Noop => {
1027 },
1029 Operation::Assert(err_code) => self.op_assert(*err_code, host, program, err_ctx)?,
1030 Operation::FmpAdd => self.op_fmpadd(),
1031 Operation::FmpUpdate => self.op_fmpupdate()?,
1032 Operation::SDepth => self.op_sdepth(),
1033 Operation::Caller => self.op_caller()?,
1034 Operation::Clk => self.op_clk()?,
1035 Operation::Emit(_event_id) => {
1036 panic!("emit instruction requires async, so is not supported by execute_op()")
1037 },
1038
1039 Operation::Join => unreachable!("control flow operation"),
1042 Operation::Split => unreachable!("control flow operation"),
1043 Operation::Loop => unreachable!("control flow operation"),
1044 Operation::Call => unreachable!("control flow operation"),
1045 Operation::SysCall => unreachable!("control flow operation"),
1046 Operation::Dyn => unreachable!("control flow operation"),
1047 Operation::Dyncall => unreachable!("control flow operation"),
1048 Operation::Span => unreachable!("control flow operation"),
1049 Operation::Repeat => unreachable!("control flow operation"),
1050 Operation::Respan => unreachable!("control flow operation"),
1051 Operation::End => unreachable!("control flow operation"),
1052 Operation::Halt => unreachable!("control flow operation"),
1053
1054 Operation::Add => self.op_add()?,
1056 Operation::Neg => self.op_neg()?,
1057 Operation::Mul => self.op_mul()?,
1058 Operation::Inv => self.op_inv(err_ctx)?,
1059 Operation::Incr => self.op_incr()?,
1060 Operation::And => self.op_and(err_ctx)?,
1061 Operation::Or => self.op_or(err_ctx)?,
1062 Operation::Not => self.op_not(err_ctx)?,
1063 Operation::Eq => self.op_eq()?,
1064 Operation::Eqz => self.op_eqz()?,
1065 Operation::Expacc => self.op_expacc(),
1066 Operation::Ext2Mul => self.op_ext2mul(),
1067
1068 Operation::U32split => self.op_u32split(),
1070 Operation::U32add => self.op_u32add(err_ctx)?,
1071 Operation::U32add3 => self.op_u32add3(err_ctx)?,
1072 Operation::U32sub => self.op_u32sub(op_idx, err_ctx)?,
1073 Operation::U32mul => self.op_u32mul(err_ctx)?,
1074 Operation::U32madd => self.op_u32madd(err_ctx)?,
1075 Operation::U32div => self.op_u32div(err_ctx)?,
1076 Operation::U32and => self.op_u32and(err_ctx)?,
1077 Operation::U32xor => self.op_u32xor(err_ctx)?,
1078 Operation::U32assert2(err_code) => self.op_u32assert2(*err_code, err_ctx)?,
1079
1080 Operation::Pad => self.op_pad(),
1082 Operation::Drop => self.decrement_stack_size(),
1083 Operation::Dup0 => self.dup_nth(0),
1084 Operation::Dup1 => self.dup_nth(1),
1085 Operation::Dup2 => self.dup_nth(2),
1086 Operation::Dup3 => self.dup_nth(3),
1087 Operation::Dup4 => self.dup_nth(4),
1088 Operation::Dup5 => self.dup_nth(5),
1089 Operation::Dup6 => self.dup_nth(6),
1090 Operation::Dup7 => self.dup_nth(7),
1091 Operation::Dup9 => self.dup_nth(9),
1092 Operation::Dup11 => self.dup_nth(11),
1093 Operation::Dup13 => self.dup_nth(13),
1094 Operation::Dup15 => self.dup_nth(15),
1095 Operation::Swap => self.op_swap(),
1096 Operation::SwapW => self.swapw_nth(1),
1097 Operation::SwapW2 => self.swapw_nth(2),
1098 Operation::SwapW3 => self.swapw_nth(3),
1099 Operation::SwapDW => self.op_swap_double_word(),
1100 Operation::MovUp2 => self.rotate_left(3),
1101 Operation::MovUp3 => self.rotate_left(4),
1102 Operation::MovUp4 => self.rotate_left(5),
1103 Operation::MovUp5 => self.rotate_left(6),
1104 Operation::MovUp6 => self.rotate_left(7),
1105 Operation::MovUp7 => self.rotate_left(8),
1106 Operation::MovUp8 => self.rotate_left(9),
1107 Operation::MovDn2 => self.rotate_right(3),
1108 Operation::MovDn3 => self.rotate_right(4),
1109 Operation::MovDn4 => self.rotate_right(5),
1110 Operation::MovDn5 => self.rotate_right(6),
1111 Operation::MovDn6 => self.rotate_right(7),
1112 Operation::MovDn7 => self.rotate_right(8),
1113 Operation::MovDn8 => self.rotate_right(9),
1114 Operation::CSwap => self.op_cswap(err_ctx)?,
1115 Operation::CSwapW => self.op_cswapw(err_ctx)?,
1116
1117 Operation::Push(element) => self.op_push(*element),
1119 Operation::AdvPop => self.op_advpop(err_ctx)?,
1120 Operation::AdvPopW => self.op_advpopw(err_ctx)?,
1121 Operation::MLoadW => self.op_mloadw(err_ctx)?,
1122 Operation::MStoreW => self.op_mstorew(err_ctx)?,
1123 Operation::MLoad => self.op_mload(err_ctx)?,
1124 Operation::MStore => self.op_mstore(err_ctx)?,
1125 Operation::MStream => self.op_mstream(err_ctx)?,
1126 Operation::Pipe => self.op_pipe(err_ctx)?,
1127
1128 Operation::HPerm => self.op_hperm(),
1130 Operation::MpVerify(err_code) => self.op_mpverify(*err_code, program, err_ctx)?,
1131 Operation::MrUpdate => self.op_mrupdate(err_ctx)?,
1132 Operation::FriE2F4 => self.op_fri_ext2fold4()?,
1133 Operation::HornerBase => self.op_horner_eval_base(err_ctx)?,
1134 Operation::HornerExt => self.op_horner_eval_ext(err_ctx)?,
1135 Operation::EvalCircuit => self.op_eval_circuit(err_ctx)?,
1136 }
1137
1138 Ok(())
1139 }
1140
1141 async fn load_mast_forest<E>(
1145 &mut self,
1146 node_digest: Word,
1147 host: &mut impl AsyncHost,
1148 get_mast_forest_failed: impl Fn(Word, &E) -> ExecutionError,
1149 err_ctx: &E,
1150 ) -> Result<(MastNodeId, Arc<MastForest>), ExecutionError>
1151 where
1152 E: ErrorContext,
1153 {
1154 let mast_forest = host
1155 .get_mast_forest(&node_digest)
1156 .ok_or_else(|| get_mast_forest_failed(node_digest, err_ctx))?;
1157
1158 let root_id = mast_forest
1161 .find_procedure_root(node_digest)
1162 .ok_or(ExecutionError::malfored_mast_forest_in_host(node_digest, err_ctx))?;
1163
1164 self.advice
1170 .extend_map(mast_forest.advice_map())
1171 .map_err(|err| ExecutionError::advice_error(err, self.clk, err_ctx))?;
1172
1173 Ok((root_id, mast_forest))
1174 }
1175
1176 async fn resolve_external_node(
1179 &mut self,
1180 external_node: &ExternalNode,
1181 host: &mut impl AsyncHost,
1182 ) -> Result<(MastNodeId, Arc<MastForest>), ExecutionError> {
1183 let (root_id, mast_forest) = self
1184 .load_mast_forest(
1185 external_node.digest(),
1186 host,
1187 ExecutionError::no_mast_forest_with_procedure,
1188 &(),
1189 )
1190 .await?;
1191
1192 if mast_forest[root_id].is_external() {
1195 return Err(ExecutionError::CircularExternalNode(external_node.digest()));
1196 }
1197
1198 Ok((root_id, mast_forest))
1199 }
1200
1201 #[inline(always)]
1205 fn increment_stack_size(&mut self) {
1206 self.stack_top_idx += 1;
1207 self.update_bounds_check_counter();
1208 }
1209
1210 #[inline(always)]
1215 fn decrement_stack_size(&mut self) {
1216 self.stack_top_idx -= 1;
1217 self.stack_bot_idx = min(self.stack_bot_idx, self.stack_top_idx - MIN_STACK_DEPTH);
1218 self.update_bounds_check_counter();
1219 }
1220
1221 #[inline(always)]
1223 fn stack_size(&self) -> usize {
1224 self.stack_top_idx - self.stack_bot_idx
1225 }
1226
1227 #[inline(always)]
1235 fn update_bounds_check_counter(&mut self) {
1236 self.bounds_check_counter -= 1;
1237
1238 if self.bounds_check_counter == 0 {
1239 self.bounds_check_counter =
1249 min(self.stack_top_idx - MIN_STACK_DEPTH, STACK_BUFFER_SIZE - self.stack_top_idx);
1250 }
1251 }
1252
1253 fn save_context_and_truncate_stack(&mut self) {
1256 let overflow_stack = if self.stack_size() > MIN_STACK_DEPTH {
1257 let overflow_stack =
1262 self.stack[self.stack_bot_idx..self.stack_top_idx - MIN_STACK_DEPTH].to_vec();
1263 self.stack[self.stack_bot_idx..self.stack_top_idx - MIN_STACK_DEPTH].fill(ZERO);
1264
1265 overflow_stack
1266 } else {
1267 Vec::new()
1268 };
1269
1270 self.stack_bot_idx = self.stack_top_idx - MIN_STACK_DEPTH;
1271
1272 self.call_stack.push(ExecutionContextInfo {
1273 overflow_stack,
1274 ctx: self.ctx,
1275 fn_hash: self.caller_hash,
1276 fmp: self.fmp,
1277 });
1278 }
1279
1280 fn restore_context(&mut self, err_ctx: &impl ErrorContext) -> Result<(), ExecutionError> {
1289 if self.stack_size() > MIN_STACK_DEPTH {
1291 return Err(ExecutionError::invalid_stack_depth_on_return(self.stack_size(), err_ctx));
1292 }
1293
1294 let ctx_info = self
1295 .call_stack
1296 .pop()
1297 .expect("execution context stack should never be empty when restoring context");
1298
1299 {
1301 let overflow_len = ctx_info.overflow_stack.len();
1302 if overflow_len > self.stack_bot_idx {
1303 return Err(ExecutionError::FailedToExecuteProgram(
1304 "stack underflow when restoring context",
1305 ));
1306 }
1307
1308 self.stack[range(self.stack_bot_idx - overflow_len, overflow_len)]
1309 .copy_from_slice(&ctx_info.overflow_stack);
1310 self.stack_bot_idx -= overflow_len;
1311 }
1312
1313 self.ctx = ctx_info.ctx;
1315 self.fmp = ctx_info.fmp;
1316 self.in_syscall = false;
1317 self.caller_hash = ctx_info.fn_hash;
1318
1319 Ok(())
1320 }
1321
1322 #[cfg(any(test, feature = "testing"))]
1327 pub fn execute_sync(
1328 self,
1329 program: &Program,
1330 host: &mut impl AsyncHost,
1331 ) -> Result<StackOutputs, ExecutionError> {
1332 let rt = tokio::runtime::Builder::new_current_thread().build().unwrap();
1334
1335 let (stack_outputs, _advice) = rt.block_on(self.execute(program, host))?;
1336
1337 Ok(stack_outputs)
1338 }
1339
1340 #[cfg(any(test, feature = "testing"))]
1342 pub fn execute_sync_mut(
1343 &mut self,
1344 program: &Program,
1345 host: &mut impl AsyncHost,
1346 ) -> Result<StackOutputs, ExecutionError> {
1347 let rt = tokio::runtime::Builder::new_current_thread().build().unwrap();
1349
1350 rt.block_on(self.execute_impl(program, host))
1351 }
1352}
1353
1354#[derive(Debug)]
1355pub struct FastProcessState<'a> {
1356 pub(super) processor: &'a mut FastProcessor,
1357}
1358
1359impl FastProcessor {
1360 #[inline(always)]
1361 pub fn state(&mut self) -> ProcessState<'_> {
1362 ProcessState::Fast(FastProcessState { processor: self })
1363 }
1364}
1365
1366#[derive(Debug)]
1374struct ExecutionContextInfo {
1375 overflow_stack: Vec<Felt>,
1378 ctx: ContextId,
1379 fn_hash: Word,
1380 fmp: Felt,
1381}