ckb_vm/memory/
mod.rs

1use super::{
2    bits::{rounddown, roundup},
3    Error, Register, RISCV_PAGESIZE,
4};
5use bytes::Bytes;
6use std::cmp::min;
7use std::ptr;
8
9pub mod flat;
10pub mod sparse;
11pub mod wxorx;
12
13pub use ckb_vm_definitions::{
14    memory::{FLAG_DIRTY, FLAG_EXECUTABLE, FLAG_FREEZED, FLAG_WRITABLE, FLAG_WXORX_BIT},
15    MEMORY_FRAME_PAGE_SHIFTS, RISCV_MAX_MEMORY, RISCV_PAGE_SHIFTS,
16};
17
18#[inline(always)]
19pub fn round_page_down(x: u64) -> u64 {
20    rounddown(x, RISCV_PAGESIZE as u64)
21}
22
23#[inline(always)]
24pub fn round_page_up(x: u64) -> u64 {
25    roundup(x, RISCV_PAGESIZE as u64)
26}
27
28pub type Page = [u8; RISCV_PAGESIZE];
29
30pub trait Memory {
31    type REG: Register;
32
33    fn new() -> Self;
34    fn new_with_memory(memory_size: usize) -> Self;
35    fn init_pages(
36        &mut self,
37        addr: u64,
38        size: u64,
39        flags: u8,
40        source: Option<Bytes>,
41        offset_from_addr: u64,
42    ) -> Result<(), Error>;
43    fn fetch_flag(&mut self, page: u64) -> Result<u8, Error>;
44    fn set_flag(&mut self, page: u64, flag: u8) -> Result<(), Error>;
45    fn clear_flag(&mut self, page: u64, flag: u8) -> Result<(), Error>;
46    fn memory_size(&self) -> usize;
47    fn memory_pages(&self) -> usize {
48        self.memory_size() >> RISCV_PAGE_SHIFTS
49    }
50
51    // This is in fact just memset
52    fn store_byte(&mut self, addr: u64, size: u64, value: u8) -> Result<(), Error>;
53    fn store_bytes(&mut self, addr: u64, value: &[u8]) -> Result<(), Error>;
54    fn load_bytes(&mut self, addr: u64, size: u64) -> Result<Bytes, Error>;
55    fn execute_load16(&mut self, addr: u64) -> Result<u16, Error>;
56    fn execute_load32(&mut self, addr: u64) -> Result<u32, Error>;
57
58    // Methods below are used to implement RISC-V instructions, to make JIT
59    // possible, we need to use register type here so as to pass enough
60    // information around.
61    fn load8(&mut self, addr: &Self::REG) -> Result<Self::REG, Error>;
62    fn load16(&mut self, addr: &Self::REG) -> Result<Self::REG, Error>;
63    fn load32(&mut self, addr: &Self::REG) -> Result<Self::REG, Error>;
64    fn load64(&mut self, addr: &Self::REG) -> Result<Self::REG, Error>;
65
66    fn store8(&mut self, addr: &Self::REG, value: &Self::REG) -> Result<(), Error>;
67    fn store16(&mut self, addr: &Self::REG, value: &Self::REG) -> Result<(), Error>;
68    fn store32(&mut self, addr: &Self::REG, value: &Self::REG) -> Result<(), Error>;
69    fn store64(&mut self, addr: &Self::REG, value: &Self::REG) -> Result<(), Error>;
70
71    // Load reservation address for atomic extension.
72    fn lr(&self) -> &Self::REG;
73    fn set_lr(&mut self, value: &Self::REG);
74}
75
76#[inline(always)]
77pub fn fill_page_data<M: Memory>(
78    memory: &mut M,
79    addr: u64,
80    size: u64,
81    source: Option<Bytes>,
82    offset_from_addr: u64,
83) -> Result<(), Error> {
84    let mut written_size = 0;
85    if offset_from_addr > 0 {
86        let real_size = min(size, offset_from_addr);
87        memory.store_byte(addr, real_size, 0)?;
88        written_size += real_size;
89    }
90    if let Some(source) = source {
91        let real_size = min(size - written_size, source.len() as u64);
92        if real_size > 0 {
93            memory.store_bytes(addr + written_size, &source[0..real_size as usize])?;
94            written_size += real_size;
95        }
96    }
97    if written_size < size {
98        memory.store_byte(addr + written_size, size - written_size, 0)?;
99    }
100    Ok(())
101}
102
103// `size` should be none zero u64
104pub fn get_page_indices(addr: u64, size: u64) -> Result<(u64, u64), Error> {
105    debug_assert!(size > 0);
106    let (addr_end, overflowed) = addr.overflowing_add(size);
107    if overflowed {
108        return Err(Error::MemOutOfBound);
109    }
110    if addr_end > RISCV_MAX_MEMORY as u64 {
111        return Err(Error::MemOutOfBound);
112    }
113    let page = addr >> RISCV_PAGE_SHIFTS;
114    let page_end = (addr_end - 1) >> RISCV_PAGE_SHIFTS;
115    Ok((page, page_end))
116}
117
118pub fn check_permission<M: Memory>(
119    memory: &mut M,
120    page_indices: &(u64, u64),
121    flag: u8,
122) -> Result<(), Error> {
123    for page in page_indices.0..=page_indices.1 {
124        let page_flag = memory.fetch_flag(page)?;
125        if (page_flag & FLAG_WXORX_BIT) != (flag & FLAG_WXORX_BIT) {
126            return Err(Error::MemWriteOnExecutablePage);
127        }
128    }
129    Ok(())
130}
131
132pub fn set_dirty<M: Memory>(memory: &mut M, page_indices: &(u64, u64)) -> Result<(), Error> {
133    for page in page_indices.0..=page_indices.1 {
134        memory.set_flag(page, FLAG_DIRTY)?
135    }
136    Ok(())
137}
138
139// Keep this in a central place to allow for future optimization
140#[inline(always)]
141pub fn memset(slice: &mut [u8], value: u8) {
142    let p = slice.as_mut_ptr();
143    unsafe {
144        ptr::write_bytes(p, value, slice.len());
145    }
146}
147
148pub fn load_c_string_byte_by_byte<M: Memory>(
149    memory: &mut M,
150    addr: &M::REG,
151) -> Result<Bytes, Error> {
152    let mut buffer = Vec::new();
153    let mut addr = addr.clone();
154    loop {
155        let byte = memory.load8(&addr)?.to_u8();
156        if byte == 0 {
157            break;
158        }
159        buffer.push(byte);
160        addr = addr.overflowing_add(&M::REG::from_u8(1));
161    }
162    Ok(Bytes::from(buffer))
163}