unicorn_engine/
lib.rs

1//! Bindings for the Unicorn emulator.
2//!
3//!
4//!
5//! # Example use
6//!
7//! ```rust
8//! use unicorn_engine::{
9//!     RegisterARM,
10//!     unicorn_const::{Arch, Mode, Prot, SECOND_SCALE},
11//! };
12//!
13//! fn emulate() {
14//!     let arm_code32 = [0x17, 0x00, 0x40, 0xe2]; // sub r0, #23
15//!
16//!     let mut emu = unicorn_engine::Unicorn::new(Arch::ARM, Mode::LITTLE_ENDIAN)
17//!         .expect("failed to initialize Unicorn instance");
18//!     emu.mem_map(0x1000, 0x4000, Prot::ALL)
19//!         .expect("failed to map code page");
20//!     emu.mem_write(0x1000, &arm_code32)
21//!         .expect("failed to write instructions");
22//!
23//!     emu.reg_write(RegisterARM::R0, 123)
24//!         .expect("failed to write to R0");
25//!     emu.reg_write(RegisterARM::R5, 1337)
26//!         .expect("failed to write to R5");
27//!
28//!     emu.emu_start(
29//!         0x1000,
30//!         (0x1000 + arm_code32.len()) as u64,
31//!         10 * SECOND_SCALE,
32//!         1000,
33//!     )
34//!     .unwrap();
35//!     assert_eq!(emu.reg_read(RegisterARM::R0), Ok(100));
36//!     assert_eq!(emu.reg_read(RegisterARM::R5), Ok(1337));
37//! }
38//! ```
39
40#![no_std]
41
42#[macro_use]
43extern crate alloc;
44
45use alloc::{boxed::Box, rc::Rc, vec::Vec};
46use core::{cell::UnsafeCell, ffi::c_void, ptr};
47
48#[macro_use]
49pub mod unicorn_const;
50pub use unicorn_const::*;
51pub mod hook; // lets consumers call hooks
52
53#[cfg(test)]
54mod tests;
55
56#[derive(Debug)]
57pub struct Context {
58    context: *mut uc_context,
59}
60
61impl Context {
62    #[must_use]
63    pub const fn is_initialized(&self) -> bool {
64        !self.context.is_null()
65    }
66
67    pub fn reg_read<T: Into<i32>>(&self, regid: T) -> Result<u64, uc_error> {
68        let mut value = 0;
69        unsafe { uc_context_reg_read(self.context, regid.into(), (&raw mut value).cast()) }
70            .and(Ok(value))
71    }
72
73    pub fn reg_write<T: Into<i32>>(&mut self, regid: T, value: u64) -> Result<(), uc_error> {
74        unsafe { uc_context_reg_write(self.context, regid.into(), (&raw const value).cast()) }
75            .into()
76    }
77}
78
79impl Drop for Context {
80    fn drop(&mut self) {
81        if self.is_initialized() {
82            unsafe {
83                uc_context_free(self.context);
84            }
85        }
86        self.context = ptr::null_mut();
87    }
88}
89
90pub struct MmioCallbackScope<'a> {
91    pub regions: Vec<(u64, u64)>,
92    pub read_callback: Option<Box<dyn hook::IsUcHook<'a> + 'a>>,
93    pub write_callback: Option<Box<dyn hook::IsUcHook<'a> + 'a>>,
94}
95
96impl MmioCallbackScope<'_> {
97    fn has_regions(&self) -> bool {
98        !self.regions.is_empty()
99    }
100
101    fn unmap(&mut self, begin: u64, size: u64) {
102        let end: u64 = begin + size as u64;
103        self.regions = self
104            .regions
105            .iter()
106            .flat_map(|(b, s)| {
107                let e = b + *s as u64;
108                if begin > *b {
109                    if begin >= e {
110                        // The unmapped region is completely after this region
111                        vec![(*b, *s)]
112                    } else if end >= e {
113                        // The unmapped region overlaps with the end of this region
114                        vec![(*b, (begin - *b) as u64)]
115                    } else {
116                        // The unmapped region is in the middle of this region
117                        let second_b = end + 1;
118                        vec![(*b, (begin - *b) as u64), (second_b, (e - second_b) as u64)]
119                    }
120                } else if end > *b {
121                    if end >= e {
122                        // The unmapped region completely contains this region
123                        vec![]
124                    } else {
125                        // The unmapped region overlaps with the start of this region
126                        vec![(end, (e - end) as u64)]
127                    }
128                } else {
129                    // The unmapped region is completely before this region
130                    vec![(*b, *s)]
131                }
132            })
133            .collect();
134    }
135}
136
137#[derive(Clone, Copy, PartialEq, Eq, Debug)]
138pub struct UcHookId(uc_hook);
139
140pub struct UnicornInner<'a, D> {
141    pub handle: *mut uc_engine,
142    pub ffi: bool,
143    pub arch: Arch,
144    /// to keep ownership over the hook for this uc instance's lifetime
145    pub hooks: Vec<(UcHookId, Box<dyn hook::IsUcHook<'a> + 'a>)>,
146    /// To keep ownership over the mmio callbacks for this uc instance's lifetime
147    pub mmio_callbacks: Vec<MmioCallbackScope<'a>>,
148    pub data: D,
149}
150
151impl<D> Drop for UnicornInner<'_, D> {
152    fn drop(&mut self) {
153        if !self.ffi && !self.handle.is_null() {
154            unsafe { uc_close(self.handle) };
155        }
156        self.handle = ptr::null_mut();
157    }
158}
159
160/// A Unicorn emulator instance.
161///
162/// You could clone this instance cheaply, since it has an `Rc` inside.
163pub struct Unicorn<'a, D: 'a> {
164    inner: Rc<UnsafeCell<UnicornInner<'a, D>>>,
165}
166
167impl<'a> Unicorn<'a, ()> {
168    /// Create a new instance of the unicorn engine for the specified architecture
169    /// and hardware mode.
170    pub fn new(arch: Arch, mode: Mode) -> Result<Unicorn<'a, ()>, uc_error> {
171        Self::new_with_data(arch, mode, ())
172    }
173
174    /// # Safety
175    /// The function has to be called with a valid [`uc_engine`] pointer
176    /// that was previously allocated by a call to [`uc_open`].
177    /// Calling the function with a non null pointer value that
178    /// does not point to a unicorn instance will cause undefined
179    /// behavior.
180    pub unsafe fn from_handle(handle: *mut uc_engine) -> Result<Unicorn<'a, ()>, uc_error> {
181        unsafe { Self::from_handle_with_data(handle, ()) }
182    }
183}
184
185impl<'a, D> Unicorn<'a, D>
186where
187    D: 'a,
188{
189    /// Create a new instance of the unicorn engine for the specified architecture
190    /// and hardware mode.
191    pub fn new_with_data(arch: Arch, mode: Mode, data: D) -> Result<Unicorn<'a, D>, uc_error> {
192        let mut handle = ptr::null_mut();
193        unsafe { uc_open(arch, mode, &mut handle) }.and_then(|| {
194            Ok(Unicorn {
195                inner: Rc::new(UnsafeCell::from(UnicornInner {
196                    handle,
197                    ffi: false,
198                    arch,
199                    data,
200                    hooks: vec![],
201                    mmio_callbacks: vec![],
202                })),
203            })
204        })
205    }
206
207    /// # Safety
208    /// The function has to be called with a valid [`uc_engine`] pointer
209    /// that was previously allocated by a call to [`uc_open`].
210    /// Calling the function with a non null pointer value that
211    /// does not point to a unicorn instance will cause undefined
212    /// behavior.
213    pub unsafe fn from_handle_with_data(
214        handle: *mut uc_engine,
215        data: D,
216    ) -> Result<Unicorn<'a, D>, uc_error> {
217        if handle.is_null() {
218            return Err(uc_error::HANDLE);
219        }
220        let mut arch = 0;
221        let err = unsafe { uc_query(handle, Query::ARCH, &mut arch) };
222        if err != uc_error::OK {
223            return Err(err);
224        }
225        Ok(Unicorn {
226            inner: Rc::new(UnsafeCell::from(UnicornInner {
227                handle,
228                ffi: true,
229                arch: arch.try_into()?,
230                data,
231                hooks: vec![],
232                mmio_callbacks: vec![],
233            })),
234        })
235    }
236}
237
238impl<D> core::fmt::Debug for Unicorn<'_, D> {
239    fn fmt(&self, formatter: &mut core::fmt::Formatter) -> core::fmt::Result {
240        write!(formatter, "Unicorn {{ uc: {:p} }}", self.get_handle())
241    }
242}
243
244impl<D> Clone for Unicorn<'_, D> {
245    fn clone(&self) -> Self {
246        Self {
247            inner: Rc::clone(&self.inner),
248        }
249    }
250}
251
252impl<'a, D> Unicorn<'a, D> {
253    fn inner(&self) -> &UnicornInner<'a, D> {
254        unsafe { self.inner.get().as_ref().unwrap() }
255    }
256
257    fn inner_mut(&mut self) -> &mut UnicornInner<'a, D> {
258        unsafe { self.inner.get().as_mut().unwrap() }
259    }
260
261    /// Return whatever data was passed during initialization.
262    ///
263    /// For an example, have a look at `utils::init_emu_with_heap` where
264    /// a struct is passed which is used for a custom allocator.
265    #[must_use]
266    pub fn get_data(&self) -> &D {
267        &self.inner().data
268    }
269
270    /// Return a mutable reference to whatever data was passed during initialization.
271    #[must_use]
272    pub fn get_data_mut(&mut self) -> &mut D {
273        &mut self.inner_mut().data
274    }
275
276    /// Return the architecture of the current emulator.
277    #[must_use]
278    pub fn get_arch(&self) -> Arch {
279        self.inner().arch
280    }
281
282    /// Return the handle of the current emulator.
283    #[must_use]
284    pub fn get_handle(&self) -> *mut uc_engine {
285        self.inner().handle
286    }
287
288    /// Returns a vector with the memory regions that are mapped in the emulator.
289    pub fn mem_regions(&self) -> Result<Vec<MemRegion>, uc_error> {
290        let mut nb_regions = 0;
291        let mut p_regions = ptr::null_mut();
292        unsafe { uc_mem_regions(self.get_handle(), &raw mut p_regions, &mut nb_regions) }.and_then(
293            || {
294                let mut regions = Vec::new();
295                for i in 0..nb_regions {
296                    regions.push(unsafe { core::mem::transmute_copy(&*p_regions.add(i as usize)) });
297                }
298                unsafe { uc_free(p_regions.cast()) };
299                Ok(regions)
300            },
301        )
302    }
303
304    /// Read a range of bytes from memory at the specified emulated physical address.
305    pub fn mem_read(&self, address: u64, buf: &mut [u8]) -> Result<(), uc_error> {
306        unsafe {
307            uc_mem_read(
308                self.get_handle(),
309                address,
310                buf.as_mut_ptr().cast(),
311                buf.len().try_into().unwrap(),
312            )
313        }
314        .into()
315    }
316
317    /// Return a range of bytes from memory at the specified emulated physical address as vector.
318    pub fn mem_read_as_vec(&self, address: u64, size: usize) -> Result<Vec<u8>, uc_error> {
319        let mut buf = vec![0; size];
320        unsafe {
321            uc_mem_read(
322                self.get_handle(),
323                address,
324                buf.as_mut_ptr().cast(),
325                size.try_into().unwrap(),
326            )
327        }
328        .and(Ok(buf))
329    }
330
331    /// Read a range of bytes from memory at the specified emulated virtual address.
332    pub fn vmem_read(&self, address: u64, prot: Prot, buf: &mut [u8]) -> Result<(), uc_error> {
333        unsafe {
334            uc_vmem_read(
335                self.get_handle(),
336                address,
337                prot,
338                buf.as_mut_ptr() as _,
339                buf.len(),
340            )
341        }
342        .into()
343    }
344
345    /// Return a range of bytes from memory at the specified emulated virtual address as vector.
346    pub fn vmem_read_as_vec(
347        &self,
348        address: u64,
349        prot: Prot,
350        size: usize,
351    ) -> Result<Vec<u8>, uc_error> {
352        let mut buf = vec![0; size];
353        unsafe {
354            uc_vmem_read(
355                self.get_handle(),
356                address,
357                prot,
358                buf.as_mut_ptr() as _,
359                buf.len(),
360            )
361        }
362        .and(Ok(buf))
363    }
364
365    /// Write the data in `bytes` to the emulated physical address `address`
366    pub fn mem_write(&mut self, address: u64, bytes: &[u8]) -> Result<(), uc_error> {
367        unsafe {
368            uc_mem_write(
369                self.get_handle(),
370                address,
371                bytes.as_ptr().cast(),
372                bytes.len().try_into().unwrap(),
373            )
374        }
375        .into()
376    }
377
378    /// translate virtual to physical address
379    pub fn vmem_translate(&mut self, address: u64, prot: Prot) -> Result<u64, uc_error> {
380        let mut physical: u64 = 0;
381        let err = unsafe { uc_vmem_translate(self.get_handle(), address, prot, &mut physical) };
382        if err != uc_error::OK {
383            return Err(err);
384        }
385        return Ok(physical);
386    }
387
388    /// Map an existing memory region in the emulator at the specified address.
389    ///
390    /// # Safety
391    ///
392    /// This function is marked unsafe because it is the responsibility of the caller to
393    /// ensure that `size` matches the size of the passed buffer, an invalid `size` value will
394    /// likely cause a crash in unicorn.
395    ///
396    /// `address` must be aligned to 4kb or this will return `Error::ARG`.
397    ///
398    /// `size` must be a multiple of 4kb or this will return `Error::ARG`.
399    ///
400    /// `ptr` is a pointer to the provided memory region that will be used by the emulator.
401    pub unsafe fn mem_map_ptr(
402        &mut self,
403        address: u64,
404        size: u64,
405        perms: Prot,
406        ptr: *mut c_void,
407    ) -> Result<(), uc_error> {
408        unsafe { uc_mem_map_ptr(self.get_handle(), address, size, perms.0 as _, ptr).into() }
409    }
410
411    /// Map a memory region in the emulator at the specified address.
412    ///
413    /// `address` must be aligned to 4kb or this will return `Error::ARG`.
414    /// `size` must be a multiple of 4kb or this will return `Error::ARG`.
415    pub fn mem_map(&mut self, address: u64, size: u64, perms: Prot) -> Result<(), uc_error> {
416        unsafe { uc_mem_map(self.get_handle(), address, size, perms.0 as _) }.into()
417    }
418
419    /// Map in am MMIO region backed by callbacks.
420    ///
421    /// `address` must be aligned to 4kb or this will return `Error::ARG`.
422    /// `size` must be a multiple of 4kb or this will return `Error::ARG`.
423    pub fn mmio_map<R, W>(
424        &mut self,
425        address: u64,
426        size: u64,
427        read_callback: Option<R>,
428        write_callback: Option<W>,
429    ) -> Result<(), uc_error>
430    where
431        R: FnMut(&mut Unicorn<'_, D>, u64, usize) -> u64 + 'a,
432        W: FnMut(&mut Unicorn<'_, D>, u64, usize, u64) + 'a,
433    {
434        let mut read_data = read_callback.map(|c| {
435            Box::new(hook::UcHook {
436                callback: c,
437                uc: Rc::downgrade(&self.inner),
438            })
439        });
440        let mut write_data = write_callback.map(|c| {
441            Box::new(hook::UcHook {
442                callback: c,
443                uc: Rc::downgrade(&self.inner),
444            })
445        });
446
447        let (read_cb, user_data_read) = read_data.as_mut().map_or((None, ptr::null_mut()), |d| {
448            (
449                Some(hook::mmio_read_callback_proxy::<D, R> as _),
450                core::ptr::from_mut(d.as_mut()).cast(),
451            )
452        });
453
454        let (write_cb, user_data_write) =
455            write_data.as_mut().map_or((None, ptr::null_mut()), |d| {
456                (
457                    Some(hook::mmio_write_callback_proxy::<D, W> as _),
458                    core::ptr::from_mut(d.as_mut()).cast(),
459                )
460            });
461
462        unsafe {
463            uc_mmio_map(
464                self.get_handle(),
465                address,
466                size,
467                read_cb,
468                user_data_read,
469                write_cb,
470                user_data_write,
471            )
472        }
473        .and_then(|| {
474            let u64_size: u64 = size.try_into().unwrap();
475            let rd = read_data.map(|c| c as Box<dyn hook::IsUcHook>);
476            let wd = write_data.map(|c| c as Box<dyn hook::IsUcHook>);
477            self.inner_mut().mmio_callbacks.push(MmioCallbackScope {
478                regions: vec![(address, u64_size)],
479                read_callback: rd,
480                write_callback: wd,
481            });
482
483            Ok(())
484        })
485    }
486
487    /// Map in a read-only MMIO region backed by a callback.
488    ///
489    /// `address` must be aligned to 4kb or this will return `Error::ARG`.
490    /// `size` must be a multiple of 4kb or this will return `Error::ARG`.
491    pub fn mmio_map_ro<F>(&mut self, address: u64, size: u64, callback: F) -> Result<(), uc_error>
492    where
493        F: FnMut(&mut Unicorn<D>, u64, usize) -> u64 + 'a,
494    {
495        self.mmio_map(
496            address,
497            size,
498            Some(callback),
499            None::<fn(&mut Unicorn<D>, u64, usize, u64)>,
500        )
501    }
502
503    /// Map in a write-only MMIO region backed by a callback.
504    ///
505    /// `address` must be aligned to 4kb or this will return `Error::ARG`.
506    /// `size` must be a multiple of 4kb or this will return `Error::ARG`.
507    pub fn mmio_map_wo<F>(&mut self, address: u64, size: u64, callback: F) -> Result<(), uc_error>
508    where
509        F: FnMut(&mut Unicorn<D>, u64, usize, u64) + 'a,
510    {
511        self.mmio_map(
512            address,
513            size,
514            None::<fn(&mut Unicorn<D>, u64, usize) -> u64>,
515            Some(callback),
516        )
517    }
518
519    /// Unmap a memory region.
520    ///
521    /// `address` must be aligned to 4kb or this will return `Error::ARG`.
522    /// `size` must be a multiple of 4kb or this will return `Error::ARG`.
523    pub fn mem_unmap(&mut self, address: u64, size: u64) -> Result<(), uc_error> {
524        let err = unsafe { uc_mem_unmap(self.get_handle(), address, size) };
525        self.mmio_unmap(address, size);
526        err.into()
527    }
528
529    fn mmio_unmap(&mut self, address: u64, size: u64) {
530        for scope in &mut self.inner_mut().mmio_callbacks {
531            scope.unmap(address, size);
532        }
533        self.inner_mut()
534            .mmio_callbacks
535            .retain(MmioCallbackScope::has_regions);
536    }
537
538    /// Set the memory permissions for an existing memory region.
539    ///
540    /// `address` must be aligned to 4kb or this will return `Error::ARG`.
541    /// `size` must be a multiple of 4kb or this will return `Error::ARG`.
542    pub fn mem_protect(&mut self, address: u64, size: u64, perms: Prot) -> Result<(), uc_error> {
543        unsafe { uc_mem_protect(self.get_handle(), address, size, perms.0 as _) }.into()
544    }
545
546    /// Write an unsigned value from a register.
547    pub fn reg_write<T: Into<i32>>(&mut self, regid: T, value: u64) -> Result<(), uc_error> {
548        unsafe { uc_reg_write(self.get_handle(), regid.into(), (&raw const value).cast()) }.into()
549    }
550
551    /// Write values into batch of registers
552    pub fn reg_write_batch<T>(
553        &self,
554        regids: &[T],
555        values: &[u64],
556        count: i32,
557    ) -> Result<(), uc_error>
558    where
559        T: Copy + Into<i32>,
560    {
561        let mut values_ptrs = vec![core::ptr::null::<u64>(); count as usize];
562        let mut regids = regids
563            .iter()
564            .map(|regid| (*regid).into())
565            .collect::<Vec<i32>>();
566        for i in 0..values.len() {
567            values_ptrs[i] = &raw const values[i];
568        }
569        unsafe {
570            uc_reg_write_batch(
571                self.get_handle(),
572                regids.as_mut_ptr(),
573                values_ptrs.as_ptr().cast::<*mut c_void>(),
574                count,
575            )
576        }
577        .into()
578    }
579
580    /// Write variable sized values into registers.
581    ///
582    /// The user has to make sure that the buffer length matches the register size.
583    /// This adds support for registers >64 bit (GDTR/IDTR, XMM, YMM, ZMM (x86); Q, V (arm64)).
584    pub fn reg_write_long<T: Into<i32>>(&self, regid: T, value: &[u8]) -> Result<(), uc_error> {
585        unsafe { uc_reg_write(self.get_handle(), regid.into(), value.as_ptr().cast()) }.into()
586    }
587
588    /// Read an unsigned value from a register.
589    ///
590    /// Not to be used with registers larger than 64 bit.
591    pub fn reg_read<T: Into<i32>>(&self, regid: T) -> Result<u64, uc_error> {
592        let mut value = 0;
593        unsafe { uc_reg_read(self.get_handle(), regid.into(), (&raw mut value).cast()) }
594            .and(Ok(value))
595    }
596
597    /// Read batch of registers
598    ///
599    /// Not to be used with registers larger than 64 bit
600    pub fn reg_read_batch<T>(&self, regids: &mut [T], count: i32) -> Result<Vec<u64>, uc_error>
601    where
602        T: Copy + Into<i32>,
603    {
604        unsafe {
605            let mut addrs_vec = vec![0u64; count as usize];
606            let addrs = addrs_vec.as_mut_slice();
607            let mut regids = regids
608                .iter()
609                .map(|regid| (*regid).into())
610                .collect::<Vec<i32>>();
611            for i in 0..count {
612                addrs[i as usize] = &raw mut addrs[i as usize] as u64;
613            }
614            let res = uc_reg_read_batch(
615                self.get_handle(),
616                regids.as_mut_ptr(),
617                addrs.as_mut_ptr().cast::<*mut c_void>(),
618                count,
619            );
620            match res {
621                uc_error::OK => Ok(addrs_vec),
622                _ => Err(res),
623            }
624        }
625    }
626
627    /// Read 128, 256 or 512 bit register value into heap allocated byte array.
628    ///
629    /// This adds safe support for registers >64 bit (GDTR/IDTR, XMM, YMM, ZMM, ST (x86); Q, V
630    /// (arm64)).
631    pub fn reg_read_long<T: Into<i32>>(&self, regid: T) -> Result<Box<[u8]>, uc_error> {
632        let curr_reg_id = regid.into();
633        let curr_arch = self.get_arch();
634
635        let value_size = match curr_arch {
636            #[cfg(feature = "arch_x86")]
637            Arch::X86 => Self::value_size_x86(curr_reg_id)?,
638            #[cfg(feature = "arch_arm")]
639            Arch::ARM64 => Self::value_size_arm64(curr_reg_id)?,
640            _ => Err(uc_error::ARCH)?,
641        };
642        let mut value = vec![0; value_size];
643        unsafe { uc_reg_read(self.get_handle(), curr_reg_id, value.as_mut_ptr().cast()) }
644            .and_then(|| Ok(value.into_boxed_slice()))
645    }
646
647    /// Read ARM Coprocessor register
648    pub fn reg_read_arm_coproc(&self, reg: &mut RegisterARMCP) -> Result<(), uc_error> {
649        let curr_arch = self.get_arch();
650        match curr_arch {
651            #[cfg(feature = "arch_arm")]
652            Arch::ARM => {}
653            _ => return Err(uc_error::ARCH),
654        }
655
656        unsafe {
657            uc_reg_read(
658                self.get_handle(),
659                RegisterARM::CP_REG.into(),
660                core::ptr::from_mut(reg).cast(),
661            )
662        }
663        .into()
664    }
665
666    /// Write ARM Coprocessor register
667    pub fn reg_write_arm_coproc(&mut self, reg: &RegisterARMCP) -> Result<(), uc_error> {
668        let curr_arch = self.get_arch();
669        match curr_arch {
670            #[cfg(feature = "arch_arm")]
671            Arch::ARM => {}
672            _ => return Err(uc_error::ARCH),
673        }
674
675        unsafe {
676            uc_reg_write(
677                self.get_handle(),
678                RegisterARM::CP_REG.into(),
679                core::ptr::from_ref(reg).cast(),
680            )
681        }
682        .into()
683    }
684
685    /// Read ARM64 Coprocessor register
686    pub fn reg_read_arm64_coproc(&self, reg: &mut RegisterARM64CP) -> Result<(), uc_error> {
687        let curr_arch = self.get_arch();
688        match curr_arch {
689            #[cfg(feature = "arch_aarch64")]
690            Arch::ARM64 => {}
691            _ => return Err(uc_error::ARCH),
692        }
693
694        unsafe {
695            uc_reg_read(
696                self.get_handle(),
697                RegisterARM64::CP_REG.into(),
698                core::ptr::from_mut(reg).cast(),
699            )
700        }
701        .and(Ok(()))
702    }
703
704    /// Write ARM64 Coprocessor register
705    pub fn reg_write_arm64_coproc(&mut self, reg: &RegisterARM64CP) -> Result<(), uc_error> {
706        let curr_arch = self.get_arch();
707        match curr_arch {
708            #[cfg(feature = "arch_aarch64")]
709            Arch::ARM64 => {}
710            _ => return Err(uc_error::ARCH),
711        }
712
713        unsafe {
714            uc_reg_write(
715                self.get_handle(),
716                RegisterARM64::CP_REG.into(),
717                core::ptr::from_ref(reg).cast(),
718            )
719        }
720        .and(Ok(()))
721    }
722
723    #[cfg(feature = "arch_arm")]
724    fn value_size_arm64(curr_reg_id: i32) -> Result<usize, uc_error> {
725        match curr_reg_id {
726            r if (RegisterARM64::Q0 as i32..=RegisterARM64::Q31 as i32).contains(&r)
727                || (RegisterARM64::V0 as i32..=RegisterARM64::V31 as i32).contains(&r) =>
728            {
729                Ok(16)
730            }
731            _ => Err(uc_error::ARG),
732        }
733    }
734
735    #[cfg(feature = "arch_x86")]
736    fn value_size_x86(curr_reg_id: i32) -> Result<usize, uc_error> {
737        match curr_reg_id {
738            r if (RegisterX86::XMM0 as i32..=RegisterX86::XMM31 as i32).contains(&r) => Ok(16),
739            r if (RegisterX86::YMM0 as i32..=RegisterX86::YMM31 as i32).contains(&r) => Ok(32),
740            r if (RegisterX86::ZMM0 as i32..=RegisterX86::ZMM31 as i32).contains(&r) => Ok(64),
741            r if r == RegisterX86::GDTR as i32
742                || r == RegisterX86::IDTR as i32
743                || (RegisterX86::ST0 as i32..=RegisterX86::ST7 as i32).contains(&r) =>
744            {
745                Ok(10)
746            }
747            _ => Err(uc_error::ARG),
748        }
749    }
750
751    /// Read a signed 32-bit value from a register.
752    pub fn reg_read_i32<T: Into<i32>>(&self, regid: T) -> Result<i32, uc_error> {
753        let mut value = 0;
754        unsafe { uc_reg_read(self.get_handle(), regid.into(), (&raw mut value).cast()) }
755            .and(Ok(value))
756    }
757
758    /// Add a code hook.
759    pub fn add_code_hook<F>(
760        &mut self,
761        begin: u64,
762        end: u64,
763        callback: F,
764    ) -> Result<UcHookId, uc_error>
765    where
766        F: FnMut(&mut Unicorn<D>, u64, u32) + 'a,
767    {
768        let mut hook_id = 0;
769        let mut user_data = Box::new(hook::UcHook {
770            callback,
771            uc: Rc::downgrade(&self.inner),
772        });
773
774        unsafe {
775            uc_hook_add(
776                self.get_handle(),
777                (&raw mut hook_id).cast(),
778                HookType::CODE.0 as i32,
779                hook::code_hook_proxy::<D, F> as _,
780                core::ptr::from_mut(user_data.as_mut()).cast(),
781                begin,
782                end,
783            )
784        }
785        .and_then(|| {
786            let hook_id = UcHookId(hook_id);
787            self.inner_mut().hooks.push((hook_id, user_data));
788            Ok(hook_id)
789        })
790    }
791
792    /// Add a block hook.
793    pub fn add_block_hook<F>(
794        &mut self,
795        begin: u64,
796        end: u64,
797        callback: F,
798    ) -> Result<UcHookId, uc_error>
799    where
800        F: FnMut(&mut Unicorn<D>, u64, u32) + 'a,
801    {
802        let mut hook_id = 0;
803        let mut user_data = Box::new(hook::UcHook {
804            callback,
805            uc: Rc::downgrade(&self.inner),
806        });
807
808        unsafe {
809            uc_hook_add(
810                self.get_handle(),
811                (&raw mut hook_id).cast(),
812                HookType::BLOCK.0 as i32,
813                hook::block_hook_proxy::<D, F> as _,
814                core::ptr::from_mut(user_data.as_mut()).cast(),
815                begin,
816                end,
817            )
818        }
819        .and_then(|| {
820            let hook_id = UcHookId(hook_id);
821            self.inner_mut().hooks.push((hook_id, user_data));
822            Ok(hook_id)
823        })
824    }
825
826    /// Add a memory hook.
827    pub fn add_mem_hook<F>(
828        &mut self,
829        hook_type: HookType,
830        begin: u64,
831        end: u64,
832        callback: F,
833    ) -> Result<UcHookId, uc_error>
834    where
835        F: FnMut(&mut Unicorn<D>, MemType, u64, usize, i64) -> bool + 'a,
836    {
837        if hook_type & (HookType::MEM_ALL | HookType::MEM_READ_AFTER) != hook_type {
838            return Err(uc_error::ARG);
839        }
840
841        let mut hook_id = 0;
842        let mut user_data = Box::new(hook::UcHook {
843            callback,
844            uc: Rc::downgrade(&self.inner),
845        });
846
847        unsafe {
848            uc_hook_add(
849                self.get_handle(),
850                (&raw mut hook_id).cast(),
851                hook_type.0 as i32,
852                hook::mem_hook_proxy::<D, F> as _,
853                core::ptr::from_mut(user_data.as_mut()).cast(),
854                begin,
855                end,
856            )
857        }
858        .and_then(|| {
859            let hook_id = UcHookId(hook_id);
860            self.inner_mut().hooks.push((hook_id, user_data));
861            Ok(hook_id)
862        })
863    }
864
865    /// Add an interrupt hook.
866    pub fn add_intr_hook<F>(&mut self, callback: F) -> Result<UcHookId, uc_error>
867    where
868        F: FnMut(&mut Unicorn<D>, u32) + 'a,
869    {
870        let mut hook_id = 0;
871        let mut user_data = Box::new(hook::UcHook {
872            callback,
873            uc: Rc::downgrade(&self.inner),
874        });
875
876        unsafe {
877            uc_hook_add(
878                self.get_handle(),
879                (&raw mut hook_id).cast(),
880                HookType::INTR.0 as i32,
881                hook::intr_hook_proxy::<D, F> as _,
882                core::ptr::from_mut(user_data.as_mut()).cast(),
883                0,
884                0,
885            )
886        }
887        .and_then(|| {
888            let hook_id = UcHookId(hook_id);
889            self.inner_mut().hooks.push((hook_id, user_data));
890            Ok(hook_id)
891        })
892    }
893
894    /// Add hook for invalid instructions
895    pub fn add_insn_invalid_hook<F>(&mut self, callback: F) -> Result<UcHookId, uc_error>
896    where
897        F: FnMut(&mut Unicorn<D>) -> bool + 'a,
898    {
899        let mut hook_id = 0;
900        let mut user_data = Box::new(hook::UcHook {
901            callback,
902            uc: Rc::downgrade(&self.inner),
903        });
904
905        unsafe {
906            uc_hook_add(
907                self.get_handle(),
908                (&raw mut hook_id).cast(),
909                HookType::INSN_INVALID.0 as i32,
910                hook::insn_invalid_hook_proxy::<D, F> as _,
911                core::ptr::from_mut(user_data.as_mut()).cast(),
912                0,
913                0,
914            )
915        }
916        .and_then(|| {
917            let hook_id = UcHookId(hook_id);
918            self.inner_mut().hooks.push((hook_id, user_data));
919            Ok(hook_id)
920        })
921    }
922
923    /// Add hook for x86 IN instruction.
924    #[cfg(feature = "arch_x86")]
925    pub fn add_insn_in_hook<F>(&mut self, callback: F) -> Result<UcHookId, uc_error>
926    where
927        F: FnMut(&mut Unicorn<D>, u32, usize) -> u32 + 'a,
928    {
929        let mut hook_id = 0;
930        let mut user_data = Box::new(hook::UcHook {
931            callback,
932            uc: Rc::downgrade(&self.inner),
933        });
934
935        unsafe {
936            uc_hook_add(
937                self.get_handle(),
938                (&raw mut hook_id).cast(),
939                HookType::INSN.0 as i32,
940                hook::insn_in_hook_proxy::<D, F> as _,
941                core::ptr::from_mut(user_data.as_mut()).cast(),
942                0,
943                0,
944                X86Insn::IN,
945            )
946        }
947        .and_then(|| {
948            let hook_id = UcHookId(hook_id);
949            self.inner_mut().hooks.push((hook_id, user_data));
950            Ok(hook_id)
951        })
952    }
953
954    /// Add hook for x86 OUT instruction.
955    #[cfg(feature = "arch_x86")]
956    pub fn add_insn_out_hook<F>(&mut self, callback: F) -> Result<UcHookId, uc_error>
957    where
958        F: FnMut(&mut Unicorn<D>, u32, usize, u32) + 'a,
959    {
960        let mut hook_id = 0;
961        let mut user_data = Box::new(hook::UcHook {
962            callback,
963            uc: Rc::downgrade(&self.inner),
964        });
965
966        unsafe {
967            uc_hook_add(
968                self.get_handle(),
969                (&raw mut hook_id).cast(),
970                HookType::INSN.0 as i32,
971                hook::insn_out_hook_proxy::<D, F> as _,
972                core::ptr::from_mut(user_data.as_mut()).cast(),
973                0,
974                0,
975                X86Insn::OUT,
976            )
977        }
978        .and_then(|| {
979            let hook_id = UcHookId(hook_id);
980            self.inner_mut().hooks.push((hook_id, user_data));
981            Ok(hook_id)
982        })
983    }
984
985    /// Add hook for x86 SYSCALL or SYSENTER.
986    #[cfg(feature = "arch_x86")]
987    pub fn add_insn_sys_hook<F>(
988        &mut self,
989        insn_type: X86Insn,
990        begin: u64,
991        end: u64,
992        callback: F,
993    ) -> Result<UcHookId, uc_error>
994    where
995        F: FnMut(&mut Unicorn<D>) + 'a,
996    {
997        let mut hook_id = 0;
998        let mut user_data = Box::new(hook::UcHook {
999            callback,
1000            uc: Rc::downgrade(&self.inner),
1001        });
1002
1003        unsafe {
1004            uc_hook_add(
1005                self.get_handle(),
1006                (&raw mut hook_id).cast(),
1007                HookType::INSN.0 as i32,
1008                hook::insn_sys_hook_proxy::<D, F> as _,
1009                core::ptr::from_mut(user_data.as_mut()).cast(),
1010                begin,
1011                end,
1012                insn_type,
1013            )
1014        }
1015        .and_then(|| {
1016            let hook_id = UcHookId(hook_id);
1017            self.inner_mut().hooks.push((hook_id, user_data));
1018            Ok(hook_id)
1019        })
1020    }
1021
1022    /// Add hook for ARM MRS/MSR/SYS/SYSL instructions.
1023    ///
1024    /// If the callback returns true, the read/write to system registers would be skipped (even
1025    /// though that may cause exceptions!). Note one callback per instruction is allowed.
1026    #[cfg(feature = "arch_aarch64")]
1027    pub fn add_insn_sys_hook_arm64<F>(
1028        &mut self,
1029        insn_type: Arm64Insn,
1030        begin: u64,
1031        end: u64,
1032        callback: F,
1033    ) -> Result<UcHookId, uc_error>
1034    where
1035        F: FnMut(&mut Unicorn<D>, RegisterARM64, &RegisterARM64CP) -> bool + 'a,
1036    {
1037        let mut hook_id = 0;
1038        let mut user_data = Box::new(hook::UcHook {
1039            callback,
1040            uc: Rc::downgrade(&self.inner),
1041        });
1042
1043        unsafe {
1044            uc_hook_add(
1045                self.get_handle(),
1046                (&raw mut hook_id).cast(),
1047                HookType::INSN.0 as i32,
1048                hook::insn_sys_hook_proxy_arm64::<D, F> as _,
1049                core::ptr::from_mut(user_data.as_mut()).cast(),
1050                begin,
1051                end,
1052                insn_type,
1053            )
1054        }
1055        .and_then(|| {
1056            let hook_id = UcHookId(hook_id);
1057            self.inner_mut().hooks.push((hook_id, user_data));
1058            Ok(hook_id)
1059        })
1060    }
1061
1062    pub fn add_tlb_hook<F>(
1063        &mut self,
1064        begin: u64,
1065        end: u64,
1066        callback: F,
1067    ) -> Result<UcHookId, uc_error>
1068    where
1069        F: FnMut(&mut Unicorn<D>, u64, MemType) -> Option<TlbEntry> + 'a,
1070    {
1071        let mut hook_id = 0;
1072        let mut user_data = Box::new(hook::UcHook {
1073            callback,
1074            uc: Rc::downgrade(&self.inner),
1075        });
1076
1077        unsafe {
1078            uc_hook_add(
1079                self.get_handle(),
1080                (&raw mut hook_id).cast(),
1081                HookType::TLB_FILL.0 as i32,
1082                hook::tlb_lookup_hook_proxy::<D, F> as _,
1083                core::ptr::from_mut(user_data.as_mut()).cast(),
1084                begin,
1085                end,
1086            )
1087        }
1088        .and_then(|| {
1089            let hook_id = UcHookId(hook_id);
1090            self.inner_mut().hooks.push((hook_id, user_data));
1091            Ok(hook_id)
1092        })
1093    }
1094
1095    pub fn add_tcg_hook<F>(
1096        &mut self,
1097        code: TcgOpCode,
1098        flag: TcgOpFlag,
1099        begin: u64,
1100        end: u64,
1101        callback: F,
1102    ) -> Result<UcHookId, uc_error>
1103    where
1104        F: FnMut(&mut Unicorn<D>, u64, u64, u64, usize) + 'a,
1105    {
1106        let mut hook_id = 0;
1107        let mut user_data = Box::new(hook::UcHook {
1108            callback,
1109            uc: Rc::downgrade(&self.inner),
1110        });
1111
1112        unsafe {
1113            uc_hook_add(
1114                self.get_handle(),
1115                (&raw mut hook_id).cast(),
1116                HookType::TCG_OPCODE.0 as i32,
1117                hook::tcg_proxy::<D, F> as _,
1118                core::ptr::from_mut(user_data.as_mut()).cast(),
1119                begin,
1120                end,
1121                code as i32,
1122                flag.0 as i32,
1123            )
1124            .and_then(|| {
1125                let hook_id = UcHookId(hook_id);
1126                self.inner_mut().hooks.push((hook_id, user_data));
1127                Ok(hook_id)
1128            })
1129        }
1130    }
1131
1132    /// Add hook for edge generated event.
1133    ///
1134    /// Callback parameters: (uc, cur_tb, prev_tb)
1135    pub fn add_edge_gen_hook<F>(
1136        &mut self,
1137        begin: u64,
1138        end: u64,
1139        callback: F,
1140    ) -> Result<UcHookId, uc_error>
1141    where
1142        F: FnMut(&mut Unicorn<D>, &mut TranslationBlock, &mut TranslationBlock) + 'a,
1143    {
1144        let mut hook_id = 0;
1145        let mut user_data = Box::new(hook::UcHook {
1146            callback,
1147            uc: Rc::downgrade(&self.inner),
1148        });
1149
1150        unsafe {
1151            uc_hook_add(
1152                self.get_handle(),
1153                (&raw mut hook_id).cast(),
1154                HookType::EDGE_GENERATED.0 as i32,
1155                hook::edge_gen_hook_proxy::<D, F> as _,
1156                core::ptr::from_mut(user_data.as_mut()).cast(),
1157                begin,
1158                end,
1159            )
1160        }
1161        .and_then(|| {
1162            let hook_id = UcHookId(hook_id);
1163            self.inner_mut().hooks.push((hook_id, user_data));
1164            Ok(hook_id)
1165        })
1166    }
1167
1168    /// Remove a hook.
1169    ///
1170    /// `hook_id` is the value returned by `add_*_hook` functions.
1171    pub fn remove_hook(&mut self, hook_id: UcHookId) -> Result<(), uc_error> {
1172        // drop the hook
1173        let inner = self.inner_mut();
1174        inner.hooks.retain(|(id, _)| id != &hook_id);
1175
1176        unsafe { uc_hook_del(inner.handle, hook_id.0) }.into()
1177    }
1178
1179    /// Allocate and return an empty Unicorn context.
1180    ///
1181    /// To be populated via `context_save`.
1182    pub fn context_alloc(&self) -> Result<Context, uc_error> {
1183        let mut empty_context = ptr::null_mut();
1184        unsafe { uc_context_alloc(self.get_handle(), &raw mut empty_context) }.and(Ok(Context {
1185            context: empty_context,
1186        }))
1187    }
1188
1189    /// Save current Unicorn context to previously allocated Context struct.
1190    pub fn context_save(&self, context: &mut Context) -> Result<(), uc_error> {
1191        unsafe { uc_context_save(self.get_handle(), context.context) }.into()
1192    }
1193
1194    /// Allocate and return a Context struct initialized with the current CPU context.
1195    ///
1196    /// This can be used for fast rollbacks with `context_restore`.
1197    /// In case of many non-concurrent context saves, use `context_alloc` and *_save
1198    /// individually to avoid unnecessary allocations.
1199    pub fn context_init(&self) -> Result<Context, uc_error> {
1200        let mut new_context = ptr::null_mut();
1201        unsafe {
1202            uc_context_alloc(self.get_handle(), &raw mut new_context).and_then(|| {
1203                uc_context_save(self.get_handle(), new_context)
1204                    .and(Ok(Context {
1205                        context: new_context,
1206                    }))
1207                    .inspect_err(|_| {
1208                        uc_context_free(new_context);
1209                    })
1210            })
1211        }
1212    }
1213
1214    /// Restore a previously saved Unicorn context.
1215    ///
1216    /// Perform a quick rollback of the CPU context, including registers and some
1217    /// internal metadata. Contexts may not be shared across engine instances with
1218    /// differing arches or modes. Memory has to be restored manually, if needed.
1219    pub fn context_restore(&self, context: &Context) -> Result<(), uc_error> {
1220        unsafe { uc_context_restore(self.get_handle(), context.context) }.into()
1221    }
1222
1223    /// Emulate machine code for a specified duration.
1224    ///
1225    /// `begin` is the address where to start the emulation. The emulation stops if `until`
1226    /// is hit. `timeout` specifies a duration in microseconds after which the emulation is
1227    /// stopped (infinite execution if set to 0). `count` is the maximum number of instructions
1228    /// to emulate (emulate all the available instructions if set to 0).
1229    pub fn emu_start(
1230        &mut self,
1231        begin: u64,
1232        until: u64,
1233        timeout: u64,
1234        count: usize,
1235    ) -> Result<(), uc_error> {
1236        unsafe { uc_emu_start(self.get_handle(), begin, until, timeout, count as _) }.into()
1237    }
1238
1239    /// Stop the emulation.
1240    ///
1241    /// This is usually called from callback function in hooks.
1242    /// NOTE: For now, this will stop the execution only after the current block.
1243    pub fn emu_stop(&mut self) -> Result<(), uc_error> {
1244        unsafe { uc_emu_stop(self.get_handle()).into() }
1245    }
1246
1247    /// Query the internal status of the engine.
1248    ///
1249    /// supported: `MODE`, `PAGE_SIZE`, `ARCH`
1250    pub fn query(&self, query: Query) -> Result<usize, uc_error> {
1251        let mut result = 0;
1252        unsafe { uc_query(self.get_handle(), query, &mut result) }.and(Ok(result))
1253    }
1254
1255    /// Get the `i32` register value for the program counter for the specified architecture.
1256    ///
1257    /// If an architecture is not compiled in, this function will return `uc_error::ARCH`.
1258    const fn arch_to_pc_register(arch: Arch, mode: Mode) -> Result<i32, uc_error> {
1259        match arch {
1260            #[cfg(feature = "arch_x86")]
1261            Arch::X86 => match mode {
1262                Mode::MODE_16 => Ok(RegisterX86::IP as _),
1263                Mode::MODE_32 => Ok(RegisterX86::EIP as _),
1264                Mode::MODE_64 => Ok(RegisterX86::RIP as _),
1265                _ => Err(uc_error::ARCH),
1266            },
1267            #[cfg(feature = "arch_arm")]
1268            Arch::ARM => Ok(RegisterARM::PC as i32),
1269            #[cfg(feature = "arch_arm")]
1270            Arch::ARM64 => Ok(RegisterARM64::PC as i32),
1271            #[cfg(feature = "arch_mips")]
1272            Arch::MIPS => Ok(RegisterMIPS::PC as i32),
1273            #[cfg(feature = "arch_sparc")]
1274            Arch::SPARC => Ok(RegisterSPARC::PC as i32),
1275            #[cfg(feature = "arch_m68k")]
1276            Arch::M68K => Ok(RegisterM68K::PC as i32),
1277            #[cfg(feature = "arch_ppc")]
1278            Arch::PPC => Ok(RegisterPPC::PC as i32),
1279            #[cfg(feature = "arch_riscv")]
1280            Arch::RISCV => Ok(RegisterRISCV::PC as i32),
1281            #[cfg(feature = "arch_s390x")]
1282            Arch::S390X => Ok(RegisterS390X::PC as i32),
1283            #[cfg(feature = "arch_tricore")]
1284            Arch::TRICORE => Ok(RegisterTRICORE::PC as i32),
1285            // returns `uc_error::ARCH` for `Arch::MAX`, and any
1286            // other architecture that are not compiled in
1287            _ => Err(uc_error::ARCH),
1288        }
1289    }
1290
1291    /// Gets the current program counter for this `unicorn` instance.
1292    pub fn pc_read(&self) -> Result<u64, uc_error> {
1293        let arch = self.get_arch();
1294        let mode = self.ctl_get_mode()?;
1295
1296        self.reg_read(Self::arch_to_pc_register(arch, mode)?)
1297    }
1298
1299    /// Sets the program counter for this `unicorn` instance.
1300    pub fn set_pc(&mut self, value: u64) -> Result<(), uc_error> {
1301        let arch = self.get_arch();
1302        let mode = self.ctl_get_mode()?;
1303
1304        self.reg_write(Self::arch_to_pc_register(arch, mode)?, value)
1305    }
1306
1307    pub fn ctl_get_mode(&self) -> Result<Mode, uc_error> {
1308        let mut result = 0;
1309        unsafe {
1310            uc_ctl(
1311                self.get_handle(),
1312                UC_CTL_READ!(ControlType::UC_MODE),
1313                &mut result,
1314            )
1315        }
1316        .and_then(|| Ok(Mode::try_from(result)))?
1317    }
1318
1319    pub fn ctl_get_page_size(&self) -> Result<u32, uc_error> {
1320        let mut result = 0;
1321        unsafe {
1322            uc_ctl(
1323                self.get_handle(),
1324                UC_CTL_READ!(ControlType::UC_PAGE_SIZE),
1325                &mut result,
1326            )
1327        }
1328        .and_then(|| Ok(result))
1329    }
1330
1331    pub fn ctl_set_page_size(&mut self, page_size: u32) -> Result<(), uc_error> {
1332        unsafe {
1333            uc_ctl(
1334                self.get_handle(),
1335                UC_CTL_WRITE!(ControlType::UC_PAGE_SIZE),
1336                page_size,
1337            )
1338        }
1339        .into()
1340    }
1341
1342    pub fn ctl_get_arch(&self) -> Result<Arch, uc_error> {
1343        let mut result = 0;
1344        unsafe {
1345            uc_ctl(
1346                self.get_handle(),
1347                UC_CTL_READ!(ControlType::UC_ARCH),
1348                &mut result,
1349            )
1350        }
1351        .and_then(|| Arch::try_from(result as usize))
1352    }
1353
1354    pub fn ctl_get_timeout(&self) -> Result<u64, uc_error> {
1355        let mut result = 0;
1356        unsafe {
1357            uc_ctl(
1358                self.get_handle(),
1359                UC_CTL_READ!(ControlType::UC_TIMEOUT),
1360                &mut result,
1361            )
1362        }
1363        .and(Ok(result))
1364    }
1365
1366    pub fn ctl_exits_enable(&mut self) -> Result<(), uc_error> {
1367        unsafe {
1368            uc_ctl(
1369                self.get_handle(),
1370                UC_CTL_WRITE!(ControlType::UC_USE_EXITS),
1371                1,
1372            )
1373        }
1374        .into()
1375    }
1376
1377    pub fn ctl_exits_disable(&mut self) -> Result<(), uc_error> {
1378        unsafe {
1379            uc_ctl(
1380                self.get_handle(),
1381                UC_CTL_WRITE!(ControlType::UC_USE_EXITS),
1382                0,
1383            )
1384        }
1385        .into()
1386    }
1387
1388    pub fn ctl_get_exits_count(&self) -> Result<usize, uc_error> {
1389        let mut result = 0;
1390        unsafe {
1391            uc_ctl(
1392                self.get_handle(),
1393                UC_CTL_READ!(ControlType::UC_EXITS_CNT),
1394                &mut result,
1395            )
1396        }
1397        .and(Ok(result))
1398    }
1399
1400    pub fn ctl_get_exits(&self) -> Result<Vec<u64>, uc_error> {
1401        let exits_count = self.ctl_get_exits_count()?;
1402        let mut exits = Vec::with_capacity(exits_count);
1403        unsafe {
1404            uc_ctl(
1405                self.get_handle(),
1406                UC_CTL_READ!(ControlType::UC_EXITS),
1407                exits.as_mut_ptr(),
1408                exits_count,
1409            )
1410        }
1411        .and_then(|| unsafe {
1412            exits.set_len(exits_count);
1413            Ok(exits)
1414        })
1415    }
1416
1417    pub fn ctl_set_exits(&mut self, exits: &[u64]) -> Result<(), uc_error> {
1418        unsafe {
1419            uc_ctl(
1420                self.get_handle(),
1421                UC_CTL_WRITE!(ControlType::UC_EXITS),
1422                exits.as_ptr(),
1423                exits.len(),
1424            )
1425        }
1426        .into()
1427    }
1428
1429    pub fn ctl_get_cpu_model(&self) -> Result<i32, uc_error> {
1430        let mut result = 0;
1431        unsafe {
1432            uc_ctl(
1433                self.get_handle(),
1434                UC_CTL_READ!(ControlType::CPU_MODEL),
1435                &mut result,
1436            )
1437        }
1438        .and(Ok(result))
1439    }
1440
1441    pub fn ctl_set_cpu_model(&mut self, cpu_model: i32) -> Result<(), uc_error> {
1442        unsafe {
1443            uc_ctl(
1444                self.get_handle(),
1445                UC_CTL_WRITE!(ControlType::CPU_MODEL),
1446                cpu_model,
1447            )
1448        }
1449        .into()
1450    }
1451
1452    pub fn ctl_remove_cache(&mut self, address: u64, end: u64) -> Result<(), uc_error> {
1453        unsafe {
1454            uc_ctl(
1455                self.get_handle(),
1456                UC_CTL_WRITE!(ControlType::TB_REMOVE_CACHE),
1457                address,
1458                end,
1459            )
1460        }
1461        .into()
1462    }
1463
1464    pub fn ctl_request_cache(
1465        &self,
1466        address: u64,
1467        tb: Option<&mut TranslationBlock>,
1468    ) -> Result<(), uc_error> {
1469        let tb_ptr = tb.map_or(ptr::null_mut(), core::ptr::from_mut);
1470        unsafe {
1471            uc_ctl(
1472                self.get_handle(),
1473                UC_CTL_READ_WRITE!(ControlType::TB_REQUEST_CACHE),
1474                address,
1475                tb_ptr,
1476            )
1477        }
1478        .into()
1479    }
1480
1481    pub fn ctl_flush_tb(&mut self) -> Result<(), uc_error> {
1482        unsafe { uc_ctl(self.get_handle(), UC_CTL_WRITE!(ControlType::TB_FLUSH)) }.into()
1483    }
1484
1485    pub fn ctl_flush_tlb(&mut self) -> Result<(), uc_error> {
1486        unsafe { uc_ctl(self.get_handle(), UC_CTL_WRITE!(ControlType::TLB_FLUSH)) }.into()
1487    }
1488
1489    pub fn ctl_set_context_mode(&mut self, mode: ContextMode) -> Result<(), uc_error> {
1490        unsafe {
1491            uc_ctl(
1492                self.get_handle(),
1493                UC_CTL_WRITE!(ControlType::CONTEXT_MODE),
1494                mode,
1495            )
1496        }
1497        .into()
1498    }
1499
1500    pub fn ctl_set_tlb_type(&mut self, t: TlbType) -> Result<(), uc_error> {
1501        unsafe {
1502            uc_ctl(
1503                self.get_handle(),
1504                UC_CTL_WRITE!(ControlType::TLB_TYPE),
1505                t as i32,
1506            )
1507        }
1508        .into()
1509    }
1510}