1use std::{
2 collections::{BTreeSet, VecDeque},
3 rc::Rc,
4};
5
6use miden_assembly::SourceManager;
7use miden_core::{
8 mast::{MastNode, MastNodeId},
9 operations::AssemblyOp,
10};
11use miden_processor::{
12 ContextId, Continuation, ExecutionError, FastProcessor, Felt, ResumeContext, StackOutputs,
13 operation::Operation, trace::RowIndex,
14};
15
16use super::{DebuggerHost, ExecutionTrace, TraceMonitor};
17use crate::{
18 Breakpoint, BreakpointType,
19 debug::{CallFrame, CallStack, ControlFlowOp, DebugVarTracker, StepInfo},
20};
21
22fn poll_immediately<T>(fut: impl std::future::Future<Output = T>) -> T {
29 let waker = std::task::Waker::noop();
30 let mut cx = std::task::Context::from_waker(waker);
31 let mut fut = std::pin::pin!(fut);
32 match fut.as_mut().poll(&mut cx) {
33 std::task::Poll::Ready(val) => val,
34 std::task::Poll::Pending => panic!("future was expected to complete immediately"),
35 }
36}
37
38pub struct DebugExecutor {
45 pub processor: FastProcessor,
47 pub host: DebuggerHost<dyn miden_assembly::SourceManager>,
49 pub resume_ctx: Option<ResumeContext>,
51
52 pub current_stack: Vec<Felt>,
55 pub current_op: Option<Operation>,
57 pub current_asmop: Option<AssemblyOp>,
59
60 pub stack_outputs: StackOutputs,
62 pub contexts: BTreeSet<ContextId>,
64 pub root_context: ContextId,
66 pub current_context: ContextId,
68 pub callstack: CallStack,
70 pub current_proc: Option<Rc<str>>,
72 pub debug_vars: DebugVarTracker,
74 pub last_debug_var_count: usize,
76 pub recent: VecDeque<Operation>,
78 pub cycle: usize,
80 pub stopped: bool,
82}
83
84pub(crate) fn extract_current_op(
87 ctx: &ResumeContext,
88) -> (Option<Operation>, Option<MastNodeId>, Option<usize>, Option<ControlFlowOp>) {
89 let forest = ctx.current_forest();
90 for cont in ctx.continuation_stack().iter_continuations_for_next_clock() {
91 match cont {
92 Continuation::ResumeBasicBlock {
93 node_id,
94 batch_index,
95 op_idx_in_batch,
96 } => {
97 let node = &forest[*node_id];
98 if let MastNode::Block(block) = node {
99 let mut global_idx = 0;
101 for batch in &block.op_batches()[..*batch_index] {
102 global_idx += batch.ops().len();
103 }
104 global_idx += op_idx_in_batch;
105 let op = block.op_batches()[*batch_index].ops().get(*op_idx_in_batch).copied();
106 return (op, Some(*node_id), Some(global_idx), None);
107 }
108 }
109 Continuation::Respan {
110 node_id,
111 batch_index,
112 } => {
113 let node = &forest[*node_id];
114 if let MastNode::Block(block) = node {
115 let mut global_idx = 0;
116 for batch in &block.op_batches()[..*batch_index] {
117 global_idx += batch.ops().len();
118 }
119 return (None, Some(*node_id), Some(global_idx), Some(ControlFlowOp::Respan));
120 }
121 }
122 Continuation::StartNode(node_id) => {
123 let control = match &forest[*node_id] {
124 MastNode::Block(_) => Some(ControlFlowOp::Span),
125 MastNode::Join(_) => Some(ControlFlowOp::Join),
126 MastNode::Split(_) => Some(ControlFlowOp::Split),
127 _ => None,
128 };
129 return (None, Some(*node_id), None, control);
130 }
131 Continuation::FinishBasicBlock(_)
132 | Continuation::FinishJoin(_)
133 | Continuation::FinishSplit(_)
134 | Continuation::FinishLoop { .. }
135 | Continuation::FinishCall(_)
136 | Continuation::FinishDyn(_)
137 | Continuation::FinishExternal(_) => {
138 return (None, None, None, Some(ControlFlowOp::End));
139 }
140 other if other.increments_clk() => {
141 return (None, None, None, None);
142 }
143 _ => continue,
144 }
145 }
146 (None, None, None, None)
147}
148
149impl DebugExecutor {
150 pub fn procedure_has_debug_vars(&self, procedure: &str) -> bool {
153 let Some(resume_ctx) = self.resume_ctx.as_ref() else {
154 return false;
155 };
156
157 let forest = resume_ctx.current_forest();
158 for (node_idx, node) in forest.nodes().iter().enumerate() {
159 let MastNode::Block(block) = node else {
160 continue;
161 };
162 let node_id = MastNodeId::new_unchecked(node_idx as u32);
163 for op_idx in 0..block.num_operations() as usize {
164 if forest.debug_vars_for_operation(node_id, op_idx).is_empty() {
165 continue;
166 }
167 if forest
168 .get_assembly_op(node_id, Some(op_idx))
169 .is_some_and(|op| op.context_name() == procedure)
170 {
171 return true;
172 }
173 }
174 }
175
176 false
177 }
178
179 pub fn register_trace_monitor_for(&mut self, monitor: TraceMonitor, event: super::TraceEvent) {
180 self.host.register_trace_handler(event, move |state, event| {
181 monitor.handle_event(state.clock(), event)
182 });
183 }
184
185 pub fn step(&mut self) -> Result<Option<CallFrame>, ExecutionError> {
192 if self.stopped {
193 self.last_debug_var_count = 0;
194 return Ok(None);
195 }
196
197 let resume_ctx = match self.resume_ctx.take() {
198 Some(ctx) => ctx,
199 None => {
200 self.stopped = true;
201 self.last_debug_var_count = 0;
202 return Ok(None);
203 }
204 };
205
206 let (op, node_id, op_idx, control) = extract_current_op(&resume_ctx);
208 let asmop = node_id
209 .and_then(|nid| resume_ctx.current_forest().get_assembly_op(nid, op_idx).cloned());
210
211 let debug_var_infos: Vec<_> = if let (Some(nid), Some(idx)) = (node_id, op_idx) {
213 let forest = resume_ctx.current_forest();
214 forest
215 .debug_vars_for_operation(nid, idx)
216 .iter()
217 .filter_map(|vid| forest.debug_var(*vid).cloned())
218 .collect()
219 } else {
220 vec![]
221 };
222
223 match poll_immediately(self.processor.step(&mut self.host, resume_ctx)) {
225 Ok(Some(new_ctx)) => {
226 self.resume_ctx = Some(new_ctx);
227 self.cycle += 1;
228
229 let state = self.processor.state();
231 let ctx = state.ctx();
232 self.current_stack = state.get_stack_state();
233
234 if self.current_context != ctx {
235 self.contexts.insert(ctx);
236 self.current_context = ctx;
237 }
238
239 self.current_op = op;
241 self.current_asmop = asmop.clone();
242 if let Some(asmop) = asmop.as_ref() {
243 self.current_proc = Some(Rc::from(asmop.context_name()));
244 }
245
246 if let Some(op) = op {
247 if self.recent.len() == 5 {
248 self.recent.pop_front();
249 }
250 self.recent.push_back(op);
251 }
252
253 let step_info = StepInfo {
255 op,
256 control,
257 asmop: self.current_asmop.as_ref(),
258 clk: RowIndex::from(self.cycle as u32),
259 ctx: self.current_context,
260 };
261 let exited = self.callstack.next(&step_info);
262
263 let debug_var_count = debug_var_infos.len();
265 self.debug_vars
266 .record_events(RowIndex::from(self.cycle as u32), debug_var_infos);
267 self.debug_vars.update_to_cycle(RowIndex::from(self.cycle as u32));
268 self.last_debug_var_count = debug_var_count;
269
270 Ok(exited)
271 }
272 Ok(None) => {
273 self.stopped = true;
275 self.last_debug_var_count = 0;
276 let state = self.processor.state();
277 self.current_stack = state.get_stack_state();
278
279 let len = self.current_stack.len().min(16);
281 self.stack_outputs =
282 StackOutputs::new(&self.current_stack[..len]).expect("invalid stack outputs");
283 Ok(None)
284 }
285 Err(err) => {
286 self.stopped = true;
287 self.last_debug_var_count = 0;
288 Err(err)
289 }
290 }
291 }
292
293 pub fn step_until(
298 &mut self,
299 breakpoint: BreakpointType,
300 trace_monitor: Option<TraceMonitor>,
301 source_manager: &dyn SourceManager,
302 ) -> Result<(), ExecutionError> {
303 let start_cycle = self.cycle;
304 let start_clock = self.processor.state().clock();
305 let breakpoint = Breakpoint {
306 id: 0,
307 creation_cycle: start_cycle,
308 ty: breakpoint,
309 };
310 let start_asmop = self.current_asmop.clone();
311 while !self.stopped {
312 match self.step()? {
313 Some(exited)
314 if exited.should_break_on_exit() && breakpoint.ty == BreakpointType::Finish =>
315 {
316 return Ok(());
317 }
318 _ => (),
319 }
320
321 if let BreakpointType::Trace(event_id) = breakpoint.ty
323 && let Some(trace_monitor) = trace_monitor.as_ref()
324 && trace_monitor.has_event_occurred_since(start_clock, |event| event == event_id)
325 {
326 return Ok(());
327 }
328
329 let (op, is_op_boundary, proc, loc) = {
330 let op = self.current_op;
331 let is_boundary = self.current_asmop.as_ref().map(|_info| true).unwrap_or(false);
332 let (proc, loc) = match self.callstack.current_frame() {
333 Some(frame) => {
334 let loc = frame
335 .recent()
336 .back()
337 .and_then(|detail| detail.resolve(source_manager))
338 .cloned();
339 (frame.procedure(""), loc)
340 }
341 None => (None, None),
342 };
343 (op, is_boundary, proc, loc)
344 };
345
346 if let Some(op) = op
347 && breakpoint.should_break_for(&op)
348 {
349 return Ok(());
350 }
351
352 if is_op_boundary
353 && let Some(asmop) = self.current_asmop.as_ref()
354 && matches!(breakpoint.ty, BreakpointType::AsmOpcode(asm_opcode) if asmop.op() == asm_opcode)
355 {
356 return Ok(());
357 }
358
359 let current_cycle = self.cycle;
361 let cycles_stepped = current_cycle - start_cycle;
362 if let Some(n) = breakpoint.cycles_to_skip(current_cycle)
363 && cycles_stepped >= n
364 {
365 return Ok(());
366 }
367
368 if cycles_stepped > 0
369 && is_op_boundary
370 && matches!(&breakpoint.ty, BreakpointType::Next)
371 && self.current_asmop != start_asmop
372 {
373 return Ok(());
374 }
375
376 if let Some(loc) = loc.as_ref()
377 && breakpoint.should_break_at(loc)
378 {
379 return Ok(());
380 }
381
382 if let Some(proc) = proc.as_deref()
383 && breakpoint.should_break_in(proc)
384 {
385 return Ok(());
386 }
387 }
388
389 Ok(())
390 }
391
392 pub fn into_execution_trace(self) -> ExecutionTrace {
394 ExecutionTrace {
395 root_context: self.root_context,
396 last_cycle: RowIndex::from(self.cycle as u32),
397 processor: self.processor,
398 outputs: self.stack_outputs,
399 }
400 }
401}
402
403#[cfg(test)]
404mod tests {
405 use std::sync::Arc;
406
407 use miden_assembly::DefaultSourceManager;
408
409 use super::*;
410 use crate::exec::Executor;
411
412 #[test]
413 fn callstack_tracks_nested_frame_trace_events() {
414 let source_manager = Arc::new(DefaultSourceManager::default());
415 let program = miden_assembly::Assembler::new(source_manager.clone())
416 .assemble_program(
417 r#"
418proc inner
419 nop
420end
421
422proc outer
423 trace.240
424 nop
425 exec.inner
426 trace.252
427 nop
428end
429
430begin
431 trace.240
432 nop
433 exec.outer
434 trace.252
435 nop
436end
437"#,
438 )
439 .unwrap();
440
441 let mut executor = Executor::new(Vec::<Felt>::new()).into_debug(&program, source_manager);
442 let mut max_depth = 0;
443 let mut saw_inner = false;
444 let mut snapshots = Vec::new();
445
446 for _ in 0..64 {
447 executor.step().unwrap();
448 let frames = executor.callstack.frames();
449 max_depth = max_depth.max(frames.len());
450 snapshots.push(
451 frames
452 .iter()
453 .map(|frame| {
454 frame
455 .procedure("")
456 .map(|name| name.to_string())
457 .unwrap_or_else(|| "<unknown>".to_string())
458 })
459 .collect::<Vec<_>>(),
460 );
461 saw_inner |= frames.len() >= 3
462 && frames
463 .last()
464 .and_then(|frame| frame.procedure(""))
465 .is_some_and(|name| name.contains("inner"));
466
467 if saw_inner || executor.stopped {
468 break;
469 }
470 }
471
472 assert!(
473 max_depth >= 3,
474 "expected nested main -> outer -> inner frames, max depth was {max_depth}"
475 );
476 assert!(
477 saw_inner,
478 "expected innermost frame to resolve to inner; snapshots: {snapshots:?}"
479 );
480 }
481}