1use crate::machine_assign::MachineAssign;
2use ckb_traits::{CellDataProvider, ExtensionProvider, HeaderProvider};
3use ckb_vm::cost_model::estimate_cycles;
4use ckb_vm::{
5 Error, Memory, Register,
6 bytes::Bytes,
7 decoder::{Decoder, build_decoder},
8 instructions::{execute, extract_opcode, insts},
9 machine::{CoreMachine, Machine, SupportMachine},
10 registers::A7,
11};
12use gdbstub::{
13 arch::Arch,
14 common::Signal,
15 conn::{Connection, ConnectionExt},
16 stub::{
17 SingleThreadStopReason,
18 run_blocking::{BlockingEventLoop, Event, WaitForStopReasonError},
19 },
20 target::{
21 Target, TargetError, TargetResult,
22 ext::{
23 base::{
24 BaseOps,
25 single_register_access::{SingleRegisterAccess, SingleRegisterAccessOps},
26 singlethread::{
27 SingleThreadBase, SingleThreadRangeStepping, SingleThreadRangeSteppingOps, SingleThreadResume,
28 SingleThreadResumeOps, SingleThreadSingleStep, SingleThreadSingleStepOps,
29 },
30 },
31 breakpoints::{
32 Breakpoints, BreakpointsOps, HwWatchpoint, HwWatchpointOps, SwBreakpoint, SwBreakpointOps, WatchKind,
33 },
34 catch_syscalls::{CatchSyscallPosition, CatchSyscalls, CatchSyscallsOps, SyscallNumbers},
35 },
36 },
37};
38use gdbstub_arch::riscv::Riscv64;
39use gdbstub_arch::riscv::reg::id::RiscvRegId;
40use std::collections::HashSet;
41use std::fmt::Debug;
42use std::marker::PhantomData;
43
44#[derive(Debug, Clone)]
45pub enum ExecMode {
46 Step,
47 Continue,
48 RangeStep(u64, u64),
49}
50
51pub enum FilteredSyscalls {
52 None,
53 All,
54 Filter(HashSet<u64>),
55}
56
57impl FilteredSyscalls {
58 pub fn filtered(&self, syscall_number: &u64) -> bool {
59 match self {
60 FilteredSyscalls::None => false,
61 FilteredSyscalls::All => true,
62 FilteredSyscalls::Filter(filter) => filter.contains(syscall_number),
63 }
64 }
65}
66
67pub struct GdbStubHandler<DL>
68where
69 DL: CellDataProvider + HeaderProvider + ExtensionProvider + Send + Sync + Clone + 'static,
70{
71 pub machine: MachineAssign<DL>,
72 exec_mode: ExecMode,
73 decoder: Decoder,
74 breakpoints: Vec<u64>,
75 catch_syscalls: FilteredSyscalls,
76 watchpoints: Vec<(u64, WatchKind)>,
77 memory_writes: Vec<u64>,
78 memory_reads: Vec<u64>,
79}
80
81#[derive(Debug, Clone, PartialEq, Eq)]
84enum VmEvent {
85 IncomingData,
86 DoneStep,
87 Exited(u8),
88 Break,
89 WatchWrite(u64),
90 WatchRead(u64),
91 CatchSyscall(u64),
92 Error(Error),
93}
94
95impl<DL> GdbStubHandler<DL>
96where
97 DL: CellDataProvider + HeaderProvider + ExtensionProvider + Send + Sync + Clone + 'static,
98{
99 pub fn new(machine: MachineAssign<DL>) -> Self {
100 let decoder = build_decoder::<u64>(machine.isa(), machine.version());
101 Self {
102 machine,
103 decoder,
104 exec_mode: ExecMode::Continue,
105 breakpoints: vec![],
106 catch_syscalls: FilteredSyscalls::None,
107 watchpoints: vec![],
108 memory_writes: vec![],
109 memory_reads: vec![],
110 }
111 }
112
113 fn clear_memory_ops(&mut self) {
114 self.memory_writes.clear();
115 self.memory_reads.clear();
116 }
117}
118
119impl<DL> GdbStubHandler<DL>
120where
121 DL: CellDataProvider + HeaderProvider + ExtensionProvider + Send + Sync + Clone + 'static,
122{
123 pub fn run_till_exited(mut self) -> Result<(i8, u64), Error> {
124 while self.machine.running() {
125 self.step_inner()?;
126 }
127 Ok((self.machine.exit_code(), self.machine.scheduler.consumed_cycles()))
128 }
129
130 fn next_opcode(&mut self) -> Option<u16> {
131 let pc = self.machine.pc().to_u64();
132 let memory = self.machine.memory_mut();
133 let inst = self.decoder.decode(memory, pc).ok()?;
134 Some(extract_opcode(inst))
135 }
136
137 fn step_inner(&mut self) -> Result<(), Error> {
138 let instruction = {
139 let pc = self.machine.pc().to_u64();
140 let memory = self.machine.memory_mut();
141 self.decoder.decode(memory, pc)?
142 };
143 let cycles = estimate_cycles(instruction);
144 self.machine.add_cycles(cycles)?;
145 self.clear_memory_ops();
146 execute(instruction, self)
147 }
148
149 fn step(&mut self) -> Option<VmEvent> {
150 if self.machine.reset_signal() {
151 self.decoder.reset_instructions_cache()
152 }
153 if !self.machine.running() {
154 return Some(VmEvent::Exited(self.machine.exit_code() as u8));
155 }
156 match self.step_inner() {
157 Ok(_) => {
158 if let Some(opcode) = self.next_opcode() {
159 if opcode == insts::OP_ECALL {
160 let number = self.machine.registers()[A7].clone();
161 if self.catch_syscalls.filtered(&number) {
162 return Some(VmEvent::CatchSyscall(number));
163 }
164 }
165 }
166 if self.breakpoints.contains(self.machine.pc()) {
167 return Some(VmEvent::Break);
168 }
169 if !self.memory_writes.is_empty() {
170 return Some(VmEvent::WatchWrite(self.memory_writes.pop().unwrap()));
171 }
172 if !self.memory_reads.is_empty() {
173 return Some(VmEvent::WatchRead(self.memory_reads.pop().unwrap()));
174 }
175 None
176 }
177 Err(e) => Some(VmEvent::Error(e)),
178 }
179 }
180
181 fn execute(&mut self, mut poll_incoming_data: impl FnMut() -> bool) -> VmEvent {
182 if poll_incoming_data() {
183 return VmEvent::IncomingData;
184 }
185 match self.exec_mode.clone() {
186 ExecMode::Step => self.step().unwrap_or(VmEvent::DoneStep),
187 ExecMode::Continue => {
188 let mut executed_cycles = 0;
189 loop {
190 if let Some(event) = self.step() {
191 let mut continue_step = true;
192 match event {
193 VmEvent::DoneStep | VmEvent::Exited(_) | VmEvent::Break | VmEvent::Error(_) => {
194 continue_step = false;
195 }
196 _ => {}
197 };
198 if !continue_step {
199 break event;
200 }
201 }
202
203 executed_cycles += 1;
204 if executed_cycles % 1024 == 0 && poll_incoming_data() {
205 break VmEvent::IncomingData;
206 }
207 }
208 }
209 ExecMode::RangeStep(start, end) => {
210 let mut executed_cycles = 0;
211 loop {
212 if let Some(event) = self.step() {
213 break event;
214 }
215
216 if !(start.to_u64()..end.to_u64()).contains(&self.machine.pc().to_u64()) {
217 break VmEvent::DoneStep;
218 }
219
220 executed_cycles += 1;
221 if executed_cycles % 1024 == 0 && poll_incoming_data() {
222 break VmEvent::IncomingData;
223 }
224 }
225 }
226 }
227 }
228}
229
230impl<DL> Target for GdbStubHandler<DL>
231where
232 DL: CellDataProvider + HeaderProvider + ExtensionProvider + Send + Sync + Clone + 'static,
233{
234 type Arch = Riscv64;
235 type Error = Error;
236
237 fn base_ops(&mut self) -> BaseOps<Self::Arch, Self::Error> {
238 BaseOps::SingleThread(self)
239 }
240
241 fn support_breakpoints(&mut self) -> Option<BreakpointsOps<'_, Self>> {
242 Some(self)
243 }
244
245 fn support_catch_syscalls(&mut self) -> Option<CatchSyscallsOps<'_, Self>> {
246 Some(self)
247 }
248}
249
250impl<DL> SingleThreadBase for GdbStubHandler<DL>
251where
252 DL: CellDataProvider + HeaderProvider + ExtensionProvider + Send + Sync + Clone + 'static,
253{
254 fn read_registers(&mut self, regs: &mut <Self::Arch as Arch>::Registers) -> TargetResult<(), Self> {
255 for (i, val) in self.machine.registers().iter().enumerate() {
256 regs.x[i] = val.clone().to_u64();
257 }
258 regs.pc = self.machine.pc().clone().to_u64();
259 Ok(())
260 }
261
262 fn write_registers(&mut self, regs: &<Self::Arch as Arch>::Registers) -> TargetResult<(), Self> {
263 regs.x.iter().enumerate().for_each(|(i, val)| {
264 self.machine.set_register(i, val.clone());
265 });
266 self.machine.update_pc(regs.pc.clone());
267 self.machine.commit_pc();
268 Ok(())
269 }
270
271 fn read_addrs(&mut self, start_addr: <Self::Arch as Arch>::Usize, data: &mut [u8]) -> TargetResult<usize, Self> {
272 for i in 0..data.len() {
273 data[i] =
274 self.machine.memory_mut().load8(&(start_addr.to_u64() + i as u64)).map_err(TargetError::Fatal)?.to_u8();
275 }
276 Ok(data.len())
277 }
278
279 fn write_addrs(&mut self, start_addr: <Self::Arch as Arch>::Usize, data: &[u8]) -> TargetResult<(), Self> {
280 self.machine.memory_mut().store_bytes(start_addr.to_u64(), data).map_err(TargetError::Fatal)
281 }
282
283 fn support_single_register_access(&mut self) -> Option<SingleRegisterAccessOps<'_, (), Self>> {
284 Some(self)
285 }
286
287 fn support_resume(&mut self) -> Option<SingleThreadResumeOps<'_, Self>> {
288 Some(self)
289 }
290}
291
292impl<DL> SingleRegisterAccess<()> for GdbStubHandler<DL>
293where
294 DL: CellDataProvider + HeaderProvider + ExtensionProvider + Send + Sync + Clone + 'static,
295{
296 fn read_register(
297 &mut self,
298 _tid: (),
299 reg_id: <Self::Arch as Arch>::RegId,
300 buf: &mut [u8],
301 ) -> TargetResult<usize, Self> {
302 let value = match reg_id {
303 RiscvRegId::Pc => self.machine.pc(),
304 RiscvRegId::Gpr(idx) => &self.machine.registers()[idx as usize],
305 _ => return Err(TargetError::Fatal(Error::External(format!("Invalid register id: {:?}", reg_id)))),
306 };
307 buf.copy_from_slice(&value.to_u64().to_le_bytes()[0..(u64::BITS as usize / 8)]);
308 Ok(buf.len())
309 }
310
311 fn write_register(&mut self, _tid: (), reg_id: <Self::Arch as Arch>::RegId, val: &[u8]) -> TargetResult<(), Self> {
312 let mut u64_buf = [0u8; 8];
313 u64_buf[0..val.len()].copy_from_slice(val);
314 let v = u64::from_le_bytes(u64_buf);
315 match reg_id {
316 RiscvRegId::Pc => {
317 self.machine.update_pc(v);
318 self.machine.commit_pc();
319 }
320 RiscvRegId::Gpr(idx) => {
321 self.machine.set_register(idx as usize, v);
322 }
323 _ => return Err(TargetError::Fatal(Error::External(format!("Invalid register id: {:?}", reg_id)))),
324 };
325
326 Ok(())
327 }
328}
329
330impl<DL> SingleThreadResume for GdbStubHandler<DL>
333where
334 DL: CellDataProvider + HeaderProvider + ExtensionProvider + Send + Sync + Clone + 'static,
335{
336 fn resume(&mut self, signal: Option<Signal>) -> Result<(), Self::Error> {
337 if signal.is_some() {
338 return Err(Error::External("no support for continuing with signal".to_string()));
339 }
340 self.exec_mode = ExecMode::Continue;
341 Ok(())
342 }
343
344 fn support_single_step(&mut self) -> Option<SingleThreadSingleStepOps<'_, Self>> {
345 Some(self)
346 }
347
348 fn support_range_step(&mut self) -> Option<SingleThreadRangeSteppingOps<'_, Self>> {
349 Some(self)
350 }
351}
352
353impl<DL> SingleThreadRangeStepping for GdbStubHandler<DL>
354where
355 DL: CellDataProvider + HeaderProvider + ExtensionProvider + Send + Sync + Clone + 'static,
356{
357 fn resume_range_step(&mut self, start: u64, end: u64) -> Result<(), Self::Error> {
358 self.exec_mode = ExecMode::RangeStep(start, end);
359 Ok(())
360 }
361}
362
363impl<DL> SingleThreadSingleStep for GdbStubHandler<DL>
364where
365 DL: CellDataProvider + HeaderProvider + ExtensionProvider + Send + Sync + Clone + 'static,
366{
367 fn step(&mut self, signal: Option<Signal>) -> Result<(), Self::Error> {
368 if signal.is_some() {
369 return Err(Error::External("no support for stepping with signal".to_string()));
370 }
371 self.exec_mode = ExecMode::Step;
372 Ok(())
373 }
374}
375
376impl<DL> Breakpoints for GdbStubHandler<DL>
377where
378 DL: CellDataProvider + HeaderProvider + ExtensionProvider + Send + Sync + Clone + 'static,
379{
380 fn support_sw_breakpoint(&mut self) -> Option<SwBreakpointOps<'_, Self>> {
381 Some(self)
382 }
383
384 fn support_hw_watchpoint(&mut self) -> Option<HwWatchpointOps<'_, Self>> {
385 Some(self)
386 }
387}
388
389impl<DL> SwBreakpoint for GdbStubHandler<DL>
390where
391 DL: CellDataProvider + HeaderProvider + ExtensionProvider + Send + Sync + Clone + 'static,
392{
393 fn add_sw_breakpoint(
394 &mut self,
395 addr: <Self::Arch as Arch>::Usize,
396 _kind: <Self::Arch as Arch>::BreakpointKind,
397 ) -> TargetResult<bool, Self> {
398 self.breakpoints.push(addr);
399 Ok(true)
400 }
401
402 fn remove_sw_breakpoint(
403 &mut self,
404 addr: <Self::Arch as Arch>::Usize,
405 _kind: <Self::Arch as Arch>::BreakpointKind,
406 ) -> TargetResult<bool, Self> {
407 match self.breakpoints.iter().position(|x| *x == addr) {
408 None => return Ok(false),
409 Some(pos) => self.breakpoints.remove(pos),
410 };
411
412 Ok(true)
413 }
414}
415
416impl<DL> HwWatchpoint for GdbStubHandler<DL>
417where
418 DL: CellDataProvider + HeaderProvider + ExtensionProvider + Send + Sync + Clone + 'static,
419{
420 fn add_hw_watchpoint(
421 &mut self,
422 addr: <Self::Arch as Arch>::Usize,
423 _len: <Self::Arch as Arch>::Usize,
424 kind: WatchKind,
425 ) -> TargetResult<bool, Self> {
426 self.watchpoints.push((addr, kind));
427 Ok(true)
428 }
429
430 fn remove_hw_watchpoint(
431 &mut self,
432 addr: <Self::Arch as Arch>::Usize,
433 _len: <Self::Arch as Arch>::Usize,
434 kind: WatchKind,
435 ) -> TargetResult<bool, Self> {
436 match self.watchpoints.iter().position(|(a, k)| *a == addr && *k == kind) {
437 None => return Ok(false),
438 Some(pos) => self.breakpoints.remove(pos),
439 };
440
441 Ok(true)
442 }
443}
444
445impl<DL> CatchSyscalls for GdbStubHandler<DL>
446where
447 DL: CellDataProvider + HeaderProvider + ExtensionProvider + Send + Sync + Clone + 'static,
448{
449 fn enable_catch_syscalls(
450 &mut self,
451 filter: Option<SyscallNumbers<'_, <Self::Arch as Arch>::Usize>>,
452 ) -> TargetResult<(), Self> {
453 self.catch_syscalls = match filter {
454 Some(numbers) => FilteredSyscalls::Filter(numbers.collect()),
455 None => FilteredSyscalls::All,
456 };
457 Ok(())
458 }
459
460 fn disable_catch_syscalls(&mut self) -> TargetResult<(), Self> {
461 self.catch_syscalls = FilteredSyscalls::None;
462 Ok(())
463 }
464}
465
466#[derive(Default)]
467pub struct GdbStubHandlerEventLoop<DL>
468where
469 DL: CellDataProvider + HeaderProvider + ExtensionProvider + Send + Sync + Clone + 'static,
470{
471 dl: PhantomData<DL>,
472}
473
474impl<DL> BlockingEventLoop for GdbStubHandlerEventLoop<DL>
475where
476 DL: CellDataProvider + HeaderProvider + ExtensionProvider + Send + Sync + Clone + 'static,
477{
478 type Target = GdbStubHandler<DL>;
479 type Connection = Box<dyn ConnectionExt<Error = std::io::Error>>;
480 type StopReason = SingleThreadStopReason<u64>;
481
482 fn on_interrupt(
483 _target: &mut Self::Target,
484 ) -> Result<Option<SingleThreadStopReason<u64>>, <GdbStubHandler<DL> as Target>::Error> {
485 Ok(Some(SingleThreadStopReason::Signal(Signal::SIGINT)))
486 }
487
488 #[allow(clippy::type_complexity)]
489 fn wait_for_stop_reason(
490 target: &mut Self::Target,
491 conn: &mut Self::Connection,
492 ) -> Result<
493 Event<SingleThreadStopReason<u64>>,
494 WaitForStopReasonError<<Self::Target as Target>::Error, <Self::Connection as Connection>::Error>,
495 > {
496 let poll_incoming_data = || conn.peek().map(|b| b.is_some()).unwrap_or(true);
497
498 Ok(match target.execute(poll_incoming_data) {
499 VmEvent::IncomingData => {
500 let byte = conn.read().map_err(WaitForStopReasonError::Connection)?;
501 Event::IncomingData(byte)
502 }
503 VmEvent::DoneStep => Event::TargetStopped(SingleThreadStopReason::DoneStep),
504 VmEvent::Exited(code) => Event::TargetStopped(SingleThreadStopReason::Exited(code)),
505 VmEvent::Break => Event::TargetStopped(SingleThreadStopReason::SwBreak(())),
506 VmEvent::WatchRead(addr) => Event::TargetStopped(SingleThreadStopReason::Watch {
507 tid: (),
508 kind: WatchKind::Read,
509 addr: addr.to_u64(),
510 }),
511 VmEvent::WatchWrite(addr) => Event::TargetStopped(SingleThreadStopReason::Watch {
512 tid: (),
513 kind: WatchKind::Write,
514 addr: addr.to_u64(),
515 }),
516 VmEvent::CatchSyscall(number) => Event::TargetStopped(SingleThreadStopReason::CatchSyscall {
517 tid: None,
518 number: number.to_u64(),
519 position: CatchSyscallPosition::Entry,
520 }),
521 VmEvent::Error(e) => return Err(WaitForStopReasonError::Target(e)),
522 })
523 }
524}
525
526impl<DL> Memory for GdbStubHandler<DL>
527where
528 DL: CellDataProvider + HeaderProvider + ExtensionProvider + Send + Sync + Clone + 'static,
529{
530 type REG = u64;
531
532 fn new() -> Self {
533 todo!()
534 }
535
536 fn new_with_memory(_: usize) -> Self {
537 todo!()
538 }
539
540 fn memory_size(&self) -> usize {
541 self.machine.memory().memory_size()
542 }
543
544 fn load_bytes(&mut self, addr: u64, size: u64) -> Result<Bytes, ckb_vm::Error> {
545 self.machine.memory_mut().load_bytes(addr, size)
546 }
547
548 fn lr(&self) -> &<Self as Memory>::REG {
549 self.machine.memory().lr()
550 }
551
552 fn set_lr(&mut self, addr: &<Self as Memory>::REG) {
553 self.machine.memory_mut().set_lr(addr)
554 }
555
556 fn init_pages(
557 &mut self,
558 addr: u64,
559 size: u64,
560 flags: u8,
561 source: Option<Bytes>,
562 offset_from_addr: u64,
563 ) -> Result<(), Error> {
564 self.machine.memory_mut().init_pages(addr, size, flags, source, offset_from_addr)
565 }
566
567 fn fetch_flag(&mut self, page: u64) -> Result<u8, Error> {
568 self.machine.memory_mut().fetch_flag(page)
569 }
570
571 fn set_flag(&mut self, page: u64, flag: u8) -> Result<(), Error> {
572 self.machine.memory_mut().set_flag(page, flag)
573 }
574
575 fn clear_flag(&mut self, page: u64, flag: u8) -> Result<(), Error> {
576 self.machine.memory_mut().clear_flag(page, flag)
577 }
578
579 fn store_byte(&mut self, addr: u64, size: u64, value: u8) -> Result<(), Error> {
580 self.machine.memory_mut().store_byte(addr, size, value)
581 }
582
583 fn store_bytes(&mut self, addr: u64, value: &[u8]) -> Result<(), Error> {
584 self.machine.memory_mut().store_bytes(addr, value)
585 }
586
587 fn execute_load16(&mut self, addr: u64) -> Result<u16, Error> {
588 self.machine.memory_mut().execute_load16(addr)
589 }
590
591 fn execute_load32(&mut self, addr: u64) -> Result<u32, Error> {
592 self.machine.memory_mut().execute_load32(addr)
593 }
594
595 fn load8(&mut self, addr: &Self::REG) -> Result<Self::REG, Error> {
596 let result = self.machine.memory_mut().load8(addr)?;
597 self.memory_reads.push(addr.clone());
598 Ok(result)
599 }
600
601 fn load16(&mut self, addr: &Self::REG) -> Result<Self::REG, Error> {
602 let result = self.machine.memory_mut().load16(addr)?;
603 self.memory_reads.push(addr.clone());
604 Ok(result)
605 }
606
607 fn load32(&mut self, addr: &Self::REG) -> Result<Self::REG, Error> {
608 let result = self.machine.memory_mut().load32(addr)?;
609 self.memory_reads.push(addr.clone());
610 Ok(result)
611 }
612
613 fn load64(&mut self, addr: &Self::REG) -> Result<Self::REG, Error> {
614 let result = self.machine.memory_mut().load64(addr)?;
615 self.memory_reads.push(addr.clone());
616 Ok(result)
617 }
618
619 fn store8(&mut self, addr: &Self::REG, value: &Self::REG) -> Result<(), Error> {
620 self.machine.memory_mut().store8(addr, value)?;
621 self.memory_writes.push(addr.clone());
622 Ok(())
623 }
624
625 fn store16(&mut self, addr: &Self::REG, value: &Self::REG) -> Result<(), Error> {
626 self.machine.memory_mut().store16(addr, value)?;
627 self.memory_writes.push(addr.clone());
628 Ok(())
629 }
630
631 fn store32(&mut self, addr: &Self::REG, value: &Self::REG) -> Result<(), Error> {
632 self.machine.memory_mut().store32(addr, value)?;
633 self.memory_writes.push(addr.clone());
634 Ok(())
635 }
636
637 fn store64(&mut self, addr: &Self::REG, value: &Self::REG) -> Result<(), Error> {
638 self.machine.memory_mut().store64(addr, value)?;
639 self.memory_writes.push(addr.clone());
640 Ok(())
641 }
642}
643
644impl<DL> CoreMachine for GdbStubHandler<DL>
645where
646 DL: CellDataProvider + HeaderProvider + ExtensionProvider + Send + Sync + Clone + 'static,
647{
648 type REG = u64;
649 type MEM = Self;
650
651 fn pc(&self) -> &Self::REG {
652 self.machine.pc()
653 }
654
655 fn update_pc(&mut self, pc: Self::REG) {
656 self.machine.update_pc(pc)
657 }
658
659 fn commit_pc(&mut self) {
660 self.machine.commit_pc()
661 }
662
663 fn memory(&self) -> &Self::MEM {
664 self
665 }
666
667 fn memory_mut(&mut self) -> &mut Self::MEM {
668 self
669 }
670
671 fn registers(&self) -> &[Self::REG] {
672 self.machine.registers()
673 }
674
675 fn set_register(&mut self, idx: usize, value: Self::REG) {
676 self.machine.set_register(idx, value)
677 }
678
679 fn version(&self) -> u32 {
680 self.machine.version()
681 }
682
683 fn isa(&self) -> u8 {
684 self.machine.isa()
685 }
686}
687
688impl<DL> Machine for GdbStubHandler<DL>
689where
690 DL: CellDataProvider + HeaderProvider + ExtensionProvider + Send + Sync + Clone + 'static,
691{
692 fn ecall(&mut self) -> Result<(), Error> {
693 self.machine.ecall()
694 }
695
696 fn ebreak(&mut self) -> Result<(), Error> {
697 self.machine.ebreak()
698 }
699}