ckb_vm/machine/
mod.rs

1#[cfg(has_asm)]
2pub mod asm;
3pub mod trace;
4
5use std::fmt::{self, Display};
6use std::sync::atomic::{AtomicU8, Ordering};
7use std::sync::Arc;
8
9use bytes::Bytes;
10
11use super::debugger::Debugger;
12use super::decoder::{build_decoder, Decoder};
13use super::elf::{parse_elf, LoadingAction, ProgramMetadata};
14use super::instructions::{execute, Instruction, Register};
15use super::memory::{load_c_string_byte_by_byte, Memory};
16use super::syscalls::Syscalls;
17use super::{
18    registers::{A0, A7, REGISTER_ABI_NAMES, SP},
19    Error, ISA_MOP, RISCV_GENERAL_REGISTER_NUMBER, RISCV_MAX_MEMORY,
20};
21
22// Version 0 is the initial launched CKB VM, it is used in CKB Lina mainnet
23pub const VERSION0: u32 = 0;
24// Version 1 fixes known bugs discovered in version 0:
25// * It's not possible to read the last byte in the VM memory;
26// * https://github.com/nervosnetwork/ckb-vm/issues/92
27// * https://github.com/nervosnetwork/ckb-vm/issues/97
28// * https://github.com/nervosnetwork/ckb-vm/issues/98
29// * https://github.com/nervosnetwork/ckb-vm/issues/106
30pub const VERSION1: u32 = 1;
31pub const VERSION2: u32 = 2;
32
33/// This is the core part of RISC-V that only deals with data part, it
34/// is extracted from Machine so we can handle lifetime logic in dynamic
35/// syscall support.
36pub trait CoreMachine {
37    type REG: Register;
38    type MEM: Memory<REG = Self::REG>;
39
40    fn pc(&self) -> &Self::REG;
41    fn update_pc(&mut self, pc: Self::REG);
42    fn commit_pc(&mut self);
43    fn memory(&self) -> &Self::MEM;
44    fn memory_mut(&mut self) -> &mut Self::MEM;
45    fn registers(&self) -> &[Self::REG];
46    fn set_register(&mut self, idx: usize, value: Self::REG);
47
48    // Current running machine version, used to support compatible behavior
49    // in case of bug fixes.
50    fn version(&self) -> u32;
51    fn isa(&self) -> u8;
52}
53
54/// This is the core trait describing a full RISC-V machine. Instruction
55/// package only needs to deal with the functions in this trait.
56pub trait Machine: CoreMachine {
57    fn ecall(&mut self) -> Result<(), Error>;
58    fn ebreak(&mut self) -> Result<(), Error>;
59}
60
61/// This traits extend on top of CoreMachine by adding additional support
62/// such as ELF range, cycles which might be needed on Rust side of the logic,
63/// such as runner or syscall implementations.
64pub trait SupportMachine: CoreMachine {
65    // Current execution cycles, it's up to the actual implementation to
66    // call add_cycles for each instruction/operation to provide cycles.
67    // The implementation might also choose not to do this to ignore this
68    // feature.
69    fn cycles(&self) -> u64;
70    fn set_cycles(&mut self, cycles: u64);
71    fn max_cycles(&self) -> u64;
72    fn set_max_cycles(&mut self, cycles: u64);
73
74    fn running(&self) -> bool;
75    fn set_running(&mut self, running: bool);
76
77    // Erase all the states of the virtual machine.
78    fn reset(&mut self, max_cycles: u64);
79    fn reset_signal(&mut self) -> bool;
80
81    fn add_cycles(&mut self, cycles: u64) -> Result<(), Error> {
82        let new_cycles = self
83            .cycles()
84            .checked_add(cycles)
85            .ok_or(Error::CyclesOverflow)?;
86        if new_cycles > self.max_cycles() {
87            return Err(Error::CyclesExceeded);
88        }
89        self.set_cycles(new_cycles);
90        Ok(())
91    }
92
93    fn add_cycles_no_checking(&mut self, cycles: u64) -> Result<(), Error> {
94        let new_cycles = self
95            .cycles()
96            .checked_add(cycles)
97            .ok_or(Error::CyclesOverflow)?;
98        self.set_cycles(new_cycles);
99        Ok(())
100    }
101
102    fn load_elf_inner(&mut self, program: &Bytes, update_pc: bool) -> Result<u64, Error> {
103        let version = self.version();
104        let metadata = parse_elf::<Self::REG>(program, version)?;
105        self.load_binary(program, &metadata, update_pc)
106    }
107
108    fn load_elf(&mut self, program: &Bytes, update_pc: bool) -> Result<u64, Error> {
109        // Allows to override load_elf by writing the real function body in load_elf_inner.
110        //
111        // impl SupportMachine for Somebody {
112        //     fn load_elf(&mut self, program: &Bytes, update_pc: bool) -> Result<u64, Error> {
113        //         // Do something before load_elf
114        //         let r = self.load_elf_inner(program, update_pc);
115        //         // Do something after
116        //         return r;
117        //     }
118        // }
119        self.load_elf_inner(program, update_pc)
120    }
121
122    fn load_binary_inner(
123        &mut self,
124        program: &Bytes,
125        metadata: &ProgramMetadata,
126        update_pc: bool,
127    ) -> Result<u64, Error> {
128        let version = self.version();
129        let mut bytes: u64 = 0;
130        for action in &metadata.actions {
131            let LoadingAction {
132                addr,
133                size,
134                flags,
135                source,
136                offset_from_addr,
137            } = action;
138
139            self.memory_mut().init_pages(
140                *addr,
141                *size,
142                *flags,
143                Some(program.slice(source.start as usize..source.end as usize)),
144                *offset_from_addr,
145            )?;
146            if version < VERSION1 {
147                self.memory_mut().store_byte(*addr, *offset_from_addr, 0)?;
148            }
149            bytes = bytes
150                .checked_add(source.end - source.start)
151                .ok_or_else(|| {
152                    Error::Unexpected(String::from("The bytes count overflowed on loading elf"))
153                })?;
154        }
155        if update_pc {
156            self.update_pc(Self::REG::from_u64(metadata.entry));
157            self.commit_pc();
158        }
159        Ok(bytes)
160    }
161
162    fn load_binary(
163        &mut self,
164        program: &Bytes,
165        metadata: &ProgramMetadata,
166        update_pc: bool,
167    ) -> Result<u64, Error> {
168        // Similar to load_elf, this provides a way to adjust the behavior of load_binary_inner
169        self.load_binary_inner(program, metadata, update_pc)
170    }
171
172    fn initialize_stack(
173        &mut self,
174        args: impl ExactSizeIterator<Item = Result<Bytes, Error>>,
175        stack_start: u64,
176        stack_size: u64,
177    ) -> Result<u64, Error> {
178        // When we re-ordered the sections of a program, writing data in high memory
179        // will cause unnecessary changes. At the same time, for ckb, argc is always 0
180        // and the memory is initialized to 0, so memory writing can be safely skipped.
181        //
182        // It should be noted that when "chaos_mode" enabled and "argv" is empty,
183        // reading "argc" will return an unexpected data. This situation is not very common.
184        //
185        // See https://github.com/nervosnetwork/ckb-vm/issues/106 for more details.
186        if self.version() >= VERSION1 && args.len() == 0 {
187            let argc_size = u64::from(Self::REG::BITS / 8);
188            let origin_sp = stack_start + stack_size;
189            let unaligned_sp_address = origin_sp - argc_size;
190            let aligned_sp_address = unaligned_sp_address & (!15);
191            let used_bytes = origin_sp - aligned_sp_address;
192            self.set_register(SP, Self::REG::from_u64(aligned_sp_address));
193            return Ok(used_bytes);
194        }
195
196        // We are enforcing WXorX now, there's no need to call init_pages here
197        // since all the required bits are already set.
198        self.set_register(SP, Self::REG::from_u64(stack_start + stack_size));
199        // First value in this array is argc, then it contains the address(pointer)
200        // of each argv object.
201        let mut values = vec![Self::REG::from_u64(args.len() as u64)];
202        for arg in args {
203            let arg = arg?;
204            let len = Self::REG::from_u64(arg.len() as u64 + 1);
205            let address = self.registers()[SP].overflowing_sub(&len);
206
207            self.memory_mut().store_bytes(address.to_u64(), &arg)?;
208            self.memory_mut()
209                .store_byte(address.to_u64() + arg.len() as u64, 1, 0)?;
210
211            values.push(address.clone());
212            self.set_register(SP, address.clone());
213
214            if self.version() >= VERSION2 && address.to_u64() < stack_start {
215                // Provides an early exit to large argv array.
216                return Err(Error::MemOutOfStack);
217            }
218        }
219        if self.version() >= VERSION1 {
220            // There are 2 standard requirements of the initialized stack:
221            // 1. argv[argc] should contain a null pointer here, hence we are
222            // pushing another 0 to the values array;
223            values.push(Self::REG::zero());
224            // 2. SP must be aligned to 16-byte boundary, also considering _start
225            // will read argc from SP and argv from SP + 8, we have to factor in
226            // alignment here first, then push the values.
227            let values_bytes =
228                Self::REG::from_u64(Self::REG::BITS as u64 / 8 * values.len() as u64);
229            let unaligned_sp_address = self.registers()[SP].overflowing_sub(&values_bytes).to_u64();
230            // Perform alignment at 16-byte boundary towards lower address
231            let aligned_sp_address = unaligned_sp_address & (!15);
232            let aligned_bytes = unaligned_sp_address - aligned_sp_address;
233            self.set_register(
234                SP,
235                self.registers()[SP].overflowing_sub(&Self::REG::from_u64(aligned_bytes)),
236            );
237        }
238        // Since we are dealing with a stack, we need to push items in reversed
239        // order
240        for value in values.iter().rev() {
241            let address =
242                self.registers()[SP].overflowing_sub(&Self::REG::from_u8(Self::REG::BITS / 8));
243            if self.version() >= VERSION1 {
244                if Self::REG::BITS == 64 {
245                    self.memory_mut().store64(&address, value)?;
246                } else {
247                    self.memory_mut().store32(&address, value)?;
248                }
249            } else {
250                self.memory_mut().store32(&address, value)?;
251            }
252            self.set_register(SP, address);
253        }
254        if self.registers()[SP].to_u64() < stack_start {
255            // Args exceed stack size.
256            return Err(Error::MemOutOfStack);
257        }
258        Ok(stack_start + stack_size - self.registers()[SP].to_u64())
259    }
260
261    #[cfg(feature = "pprof")]
262    fn code(&self) -> &Bytes;
263}
264
265#[derive(Default)]
266pub struct DefaultCoreMachine<R, M> {
267    registers: [R; RISCV_GENERAL_REGISTER_NUMBER],
268    pc: R,
269    next_pc: R,
270    reset_signal: bool,
271    memory: M,
272    cycles: u64,
273    max_cycles: u64,
274    running: bool,
275    isa: u8,
276    version: u32,
277    #[cfg(feature = "pprof")]
278    code: Bytes,
279}
280
281impl<R: Register, M: Memory<REG = R>> CoreMachine for DefaultCoreMachine<R, M> {
282    type REG = R;
283    type MEM = M;
284    fn pc(&self) -> &Self::REG {
285        &self.pc
286    }
287
288    fn update_pc(&mut self, pc: Self::REG) {
289        self.next_pc = pc;
290    }
291
292    fn commit_pc(&mut self) {
293        self.pc = self.next_pc.clone();
294    }
295
296    fn memory(&self) -> &Self::MEM {
297        &self.memory
298    }
299
300    fn memory_mut(&mut self) -> &mut Self::MEM {
301        &mut self.memory
302    }
303
304    fn registers(&self) -> &[Self::REG] {
305        &self.registers
306    }
307
308    fn set_register(&mut self, idx: usize, value: Self::REG) {
309        self.registers[idx] = value;
310    }
311
312    fn isa(&self) -> u8 {
313        self.isa
314    }
315
316    fn version(&self) -> u32 {
317        self.version
318    }
319}
320
321impl<R: Register, M: Memory<REG = R>> SupportMachine for DefaultCoreMachine<R, M> {
322    fn cycles(&self) -> u64 {
323        self.cycles
324    }
325
326    fn set_cycles(&mut self, cycles: u64) {
327        self.cycles = cycles;
328    }
329
330    fn max_cycles(&self) -> u64 {
331        self.max_cycles
332    }
333
334    fn set_max_cycles(&mut self, max_cycles: u64) {
335        self.max_cycles = max_cycles;
336    }
337
338    fn reset(&mut self, max_cycles: u64) {
339        self.registers = Default::default();
340        self.pc = Default::default();
341        self.memory = M::new_with_memory(self.memory().memory_size());
342        self.cycles = 0;
343        self.max_cycles = max_cycles;
344        self.reset_signal = true;
345        self.memory_mut().set_lr(&R::from_u64(u64::MAX));
346    }
347
348    fn reset_signal(&mut self) -> bool {
349        let ret = self.reset_signal;
350        self.reset_signal = false;
351        ret
352    }
353
354    fn running(&self) -> bool {
355        self.running
356    }
357
358    fn set_running(&mut self, running: bool) {
359        self.running = running;
360    }
361
362    fn load_binary(
363        &mut self,
364        program: &Bytes,
365        metadata: &ProgramMetadata,
366        update_pc: bool,
367    ) -> Result<u64, Error> {
368        #[cfg(feature = "pprof")]
369        {
370            self.code = program.clone();
371        }
372        self.load_binary_inner(program, metadata, update_pc)
373    }
374
375    fn load_elf(&mut self, program: &Bytes, update_pc: bool) -> Result<u64, Error> {
376        #[cfg(feature = "pprof")]
377        {
378            self.code = program.clone();
379        }
380        self.load_elf_inner(program, update_pc)
381    }
382
383    #[cfg(feature = "pprof")]
384    fn code(&self) -> &Bytes {
385        &self.code
386    }
387}
388
389impl<R: Register, M: Memory> DefaultCoreMachine<R, M> {
390    pub fn new(isa: u8, version: u32, max_cycles: u64) -> Self {
391        Self::new_with_memory(isa, version, max_cycles, RISCV_MAX_MEMORY)
392    }
393
394    pub fn new_with_memory(isa: u8, version: u32, max_cycles: u64, memory_size: usize) -> Self {
395        Self {
396            registers: Default::default(),
397            pc: Default::default(),
398            next_pc: Default::default(),
399            reset_signal: Default::default(),
400            memory: M::new_with_memory(memory_size),
401            cycles: Default::default(),
402            max_cycles,
403            running: Default::default(),
404            isa,
405            version,
406            #[cfg(feature = "pprof")]
407            code: Default::default(),
408        }
409    }
410
411    pub fn set_max_cycles(&mut self, cycles: u64) {
412        self.max_cycles = cycles;
413    }
414
415    pub fn take_memory(self) -> M {
416        self.memory
417    }
418}
419
420pub type InstructionCycleFunc = dyn Fn(Instruction) -> u64 + Send + Sync;
421
422pub struct DefaultMachine<Inner> {
423    inner: Inner,
424    pause: Pause,
425
426    // We have run benchmarks on secp256k1 verification, the performance
427    // cost of the Box wrapper here is neglectable, hence we are sticking
428    // with Box solution for simplicity now. Later if this becomes an issue,
429    // we can change to static dispatch.
430    instruction_cycle_func: Box<InstructionCycleFunc>,
431    debugger: Option<Box<dyn Debugger<Inner>>>,
432    syscalls: Vec<Box<dyn Syscalls<Inner>>>,
433    exit_code: i8,
434}
435
436impl<Inner: CoreMachine> CoreMachine for DefaultMachine<Inner> {
437    type REG = <Inner as CoreMachine>::REG;
438    type MEM = <Inner as CoreMachine>::MEM;
439
440    fn pc(&self) -> &Self::REG {
441        self.inner.pc()
442    }
443
444    fn update_pc(&mut self, pc: Self::REG) {
445        self.inner.update_pc(pc);
446    }
447
448    fn commit_pc(&mut self) {
449        self.inner.commit_pc();
450    }
451
452    fn memory(&self) -> &Self::MEM {
453        self.inner.memory()
454    }
455
456    fn memory_mut(&mut self) -> &mut Self::MEM {
457        self.inner.memory_mut()
458    }
459
460    fn registers(&self) -> &[Self::REG] {
461        self.inner.registers()
462    }
463
464    fn set_register(&mut self, idx: usize, value: Self::REG) {
465        self.inner.set_register(idx, value)
466    }
467
468    fn isa(&self) -> u8 {
469        self.inner.isa()
470    }
471
472    fn version(&self) -> u32 {
473        self.inner.version()
474    }
475}
476
477impl<Inner: SupportMachine> SupportMachine for DefaultMachine<Inner> {
478    fn cycles(&self) -> u64 {
479        self.inner.cycles()
480    }
481
482    fn set_cycles(&mut self, cycles: u64) {
483        self.inner.set_cycles(cycles)
484    }
485
486    fn max_cycles(&self) -> u64 {
487        self.inner.max_cycles()
488    }
489
490    fn set_max_cycles(&mut self, max_cycles: u64) {
491        self.inner.set_max_cycles(max_cycles)
492    }
493
494    fn reset(&mut self, max_cycles: u64) {
495        self.inner_mut().reset(max_cycles);
496    }
497
498    fn reset_signal(&mut self) -> bool {
499        self.inner_mut().reset_signal()
500    }
501
502    fn running(&self) -> bool {
503        self.inner.running()
504    }
505
506    fn set_running(&mut self, running: bool) {
507        self.inner.set_running(running);
508    }
509
510    fn load_binary(
511        &mut self,
512        program: &Bytes,
513        metadata: &ProgramMetadata,
514        update_pc: bool,
515    ) -> Result<u64, Error> {
516        self.inner.load_binary(program, metadata, update_pc)
517    }
518
519    fn load_elf(&mut self, program: &Bytes, update_pc: bool) -> Result<u64, Error> {
520        self.inner.load_elf(program, update_pc)
521    }
522
523    #[cfg(feature = "pprof")]
524    fn code(&self) -> &Bytes {
525        self.inner.code()
526    }
527}
528
529impl<Inner: SupportMachine> Machine for DefaultMachine<Inner> {
530    fn ecall(&mut self) -> Result<(), Error> {
531        let code = self.registers()[A7].to_u64();
532        match code {
533            93 => {
534                // exit
535                self.exit_code = self.registers()[A0].to_i8();
536                self.set_running(false);
537                Ok(())
538            }
539            _ => {
540                for syscall in &mut self.syscalls {
541                    let processed = syscall.ecall(&mut self.inner)?;
542                    if processed {
543                        if self.cycles() > self.max_cycles() {
544                            return Err(Error::CyclesExceeded);
545                        }
546                        return Ok(());
547                    }
548                }
549                Err(Error::InvalidEcall(code))
550            }
551        }
552    }
553
554    fn ebreak(&mut self) -> Result<(), Error> {
555        if let Some(debugger) = &mut self.debugger {
556            debugger.ebreak(&mut self.inner)
557        } else {
558            // Unlike ecall, the default behavior of an EBREAK operation is
559            // a dummy one.
560            Ok(())
561        }
562    }
563}
564
565impl<Inner: CoreMachine> Display for DefaultMachine<Inner> {
566    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
567        writeln!(f, "pc  : 0x{:16X}", self.pc().to_u64())?;
568        for (i, name) in REGISTER_ABI_NAMES.iter().enumerate() {
569            write!(f, "{:4}: 0x{:16X}", name, self.registers()[i].to_u64())?;
570            if (i + 1) % 4 == 0 {
571                writeln!(f)?;
572            } else {
573                write!(f, " ")?;
574            }
575        }
576        Ok(())
577    }
578}
579
580impl<Inner: SupportMachine> DefaultMachine<Inner> {
581    pub fn load_program(
582        &mut self,
583        program: &Bytes,
584        args: impl ExactSizeIterator<Item = Result<Bytes, Error>>,
585    ) -> Result<u64, Error> {
586        let elf_bytes = self.load_elf(program, true)?;
587        let stack_bytes = self.initialize(args)?;
588        let bytes = elf_bytes.checked_add(stack_bytes).ok_or_else(|| {
589            Error::Unexpected(String::from(
590                "The bytes count overflowed on loading program",
591            ))
592        })?;
593        Ok(bytes)
594    }
595
596    pub fn load_program_with_metadata(
597        &mut self,
598        program: &Bytes,
599        metadata: &ProgramMetadata,
600        args: impl ExactSizeIterator<Item = Result<Bytes, Error>>,
601    ) -> Result<u64, Error> {
602        let elf_bytes = self.load_binary(program, metadata, true)?;
603        let stack_bytes = self.initialize(args)?;
604        let bytes = elf_bytes.checked_add(stack_bytes).ok_or_else(|| {
605            Error::Unexpected(String::from(
606                "The bytes count overflowed on loading program",
607            ))
608        })?;
609        Ok(bytes)
610    }
611
612    fn initialize(
613        &mut self,
614        args: impl ExactSizeIterator<Item = Result<Bytes, Error>>,
615    ) -> Result<u64, Error> {
616        for syscall in &mut self.syscalls {
617            syscall.initialize(&mut self.inner)?;
618        }
619        if let Some(debugger) = &mut self.debugger {
620            debugger.initialize(&mut self.inner)?;
621        }
622        let memory_size = self.memory().memory_size();
623        let stack_size = memory_size / 4;
624        let stack_bytes =
625            self.initialize_stack(args, (memory_size - stack_size) as u64, stack_size as u64)?;
626        // Make sure SP is 16 byte aligned
627        if self.inner.version() >= VERSION1 {
628            debug_assert!(self.registers()[SP].to_u64() % 16 == 0);
629        }
630        Ok(stack_bytes)
631    }
632
633    pub fn take_inner(self) -> Inner {
634        self.inner
635    }
636
637    pub fn pause(&self) -> Pause {
638        self.pause.clone()
639    }
640
641    pub fn set_pause(&mut self, pause: Pause) {
642        self.pause = pause;
643    }
644
645    pub fn exit_code(&self) -> i8 {
646        self.exit_code
647    }
648
649    pub fn instruction_cycle_func(&self) -> &InstructionCycleFunc {
650        &self.instruction_cycle_func
651    }
652
653    pub fn inner_mut(&mut self) -> &mut Inner {
654        &mut self.inner
655    }
656
657    // This is the most naive way of running the VM, it only decodes each
658    // instruction and run it, no optimization is performed here. It might
659    // not be practical in production, but it serves as a baseline and
660    // reference implementation
661    pub fn run(&mut self) -> Result<i8, Error> {
662        if self.isa() & ISA_MOP != 0 && self.version() == VERSION0 {
663            return Err(Error::InvalidVersion);
664        }
665        let mut decoder = build_decoder::<Inner::REG>(self.isa(), self.version());
666        self.set_running(true);
667        while self.running() {
668            if self.pause.has_interrupted() {
669                self.pause.free();
670                return Err(Error::Pause);
671            }
672            if self.reset_signal() {
673                decoder.reset_instructions_cache();
674            }
675            self.step(&mut decoder)?;
676        }
677        Ok(self.exit_code())
678    }
679
680    pub fn step(&mut self, decoder: &mut Decoder) -> Result<(), Error> {
681        let instruction = {
682            let pc = self.pc().to_u64();
683            let memory = self.memory_mut();
684            decoder.decode(memory, pc)?
685        };
686        let cycles = self.instruction_cycle_func()(instruction);
687        self.add_cycles(cycles)?;
688        execute(instruction, self)
689    }
690}
691
692pub struct DefaultMachineBuilder<Inner> {
693    inner: Inner,
694    instruction_cycle_func: Box<InstructionCycleFunc>,
695    debugger: Option<Box<dyn Debugger<Inner>>>,
696    syscalls: Vec<Box<dyn Syscalls<Inner>>>,
697    pause: Pause,
698}
699
700impl<Inner> DefaultMachineBuilder<Inner> {
701    pub fn new(inner: Inner) -> Self {
702        Self {
703            inner,
704            instruction_cycle_func: Box::new(|_| 0),
705            debugger: None,
706            syscalls: vec![],
707            pause: Pause::new(),
708        }
709    }
710
711    pub fn instruction_cycle_func(
712        mut self,
713        instruction_cycle_func: Box<InstructionCycleFunc>,
714    ) -> Self {
715        self.instruction_cycle_func = instruction_cycle_func;
716        self
717    }
718
719    pub fn syscall(mut self, syscall: Box<dyn Syscalls<Inner>>) -> Self {
720        self.syscalls.push(syscall);
721        self
722    }
723
724    pub fn pause(mut self, pause: Pause) -> Self {
725        self.pause = pause;
726        self
727    }
728
729    pub fn debugger(mut self, debugger: Box<dyn Debugger<Inner>>) -> Self {
730        self.debugger = Some(debugger);
731        self
732    }
733
734    pub fn build(self) -> DefaultMachine<Inner> {
735        DefaultMachine {
736            inner: self.inner,
737            pause: self.pause,
738            instruction_cycle_func: self.instruction_cycle_func,
739            debugger: self.debugger,
740            syscalls: self.syscalls,
741            exit_code: 0,
742        }
743    }
744}
745
746#[derive(Clone, Default)]
747pub struct Pause {
748    s: Arc<AtomicU8>,
749}
750
751impl Pause {
752    pub fn new() -> Self {
753        Self {
754            s: Arc::new(AtomicU8::new(0)),
755        }
756    }
757
758    pub fn interrupt(&self) {
759        self.s.store(1, Ordering::SeqCst);
760    }
761
762    pub fn has_interrupted(&self) -> bool {
763        self.s.load(Ordering::SeqCst) != 0
764    }
765
766    pub fn get_raw_ptr(&self) -> *mut u8 {
767        &*self.s as *const _ as *mut u8
768    }
769
770    pub fn free(&mut self) {
771        self.s.store(0, Ordering::SeqCst);
772    }
773}
774
775pub struct FlattenedArgsReader<'a, M: Memory> {
776    memory: &'a mut M,
777    argc: M::REG,
778    argv: M::REG,
779    cidx: M::REG,
780}
781impl<'a, M: Memory> FlattenedArgsReader<'a, M> {
782    pub fn new(memory: &'a mut M, argc: M::REG, argv: M::REG) -> Self {
783        Self {
784            memory,
785            argc,
786            argv,
787            cidx: M::REG::zero(),
788        }
789    }
790}
791impl<'a, M: Memory> Iterator for FlattenedArgsReader<'a, M> {
792    type Item = Result<Bytes, Error>;
793    fn next(&mut self) -> Option<Self::Item> {
794        if self.cidx.ge(&self.argc).to_u8() == 1 {
795            return None;
796        }
797        let addr = match M::REG::BITS {
798            32 => self.memory.load32(&self.argv),
799            64 => self.memory.load64(&self.argv),
800            _ => unreachable!(),
801        };
802        if let Err(err) = addr {
803            return Some(Err(err));
804        };
805        let addr = addr.unwrap();
806        let cstr = load_c_string_byte_by_byte(self.memory, &addr);
807        if let Err(err) = cstr {
808            return Some(Err(err));
809        };
810        let cstr = cstr.unwrap();
811        self.cidx = self.cidx.overflowing_add(&M::REG::from_u8(1));
812        self.argv = self
813            .argv
814            .overflowing_add(&M::REG::from_u8(M::REG::BITS / 8));
815        Some(Ok(cstr))
816    }
817}
818impl<'a, M: Memory> ExactSizeIterator for FlattenedArgsReader<'a, M> {
819    fn len(&self) -> usize {
820        self.argc.to_u64() as usize
821    }
822}
823
824#[cfg(test)]
825mod tests {
826    use std::sync::atomic::AtomicU8;
827
828    #[test]
829    fn test_atomicu8() {
830        // Assert AtomicU8 type has the same in-memory representation as u8.
831        // This ensures that Pause::get_raw_ptr() works properly.
832        assert_eq!(std::mem::size_of::<AtomicU8>(), 1);
833    }
834}