ckb_vm/memory/
sparse.rs

1use super::super::{Error, Register, RISCV_MAX_MEMORY, RISCV_PAGESIZE, RISCV_PAGE_SHIFTS};
2use super::{fill_page_data, memset, round_page_down, Memory, Page, FLAG_DIRTY};
3
4use bytes::Bytes;
5use std::cmp::min;
6use std::marker::PhantomData;
7
8const INVALID_PAGE_INDEX: u16 = 0xFFFF;
9
10/// A sparse flat memory implementation, it allocates pages only when requested,
11/// but besides that, it does not permission checking.
12pub struct SparseMemory<R> {
13    // Stores the indices of each page in pages data structure, if a page hasn't
14    // been initialized, the corresponding position will be filled with
15    // INVALID_PAGE_INDEX. Considering u16 takes 2 bytes, this add an additional
16    // of 64KB extra storage cost assuming we have 128MB memory.
17    indices: Vec<u16>,
18    pages: Vec<Page>,
19    flags: Vec<u8>,
20    memory_size: usize,
21    riscv_pages: usize,
22    load_reservation_address: R,
23    _inner: PhantomData<R>,
24}
25
26impl<R> SparseMemory<R> {
27    fn fetch_page(&mut self, aligned_addr: u64) -> Result<&mut Page, Error> {
28        let page = aligned_addr / RISCV_PAGESIZE as u64;
29        if page >= self.riscv_pages as u64 {
30            return Err(Error::MemOutOfBound);
31        }
32        let mut index = self.indices[page as usize];
33        if index == INVALID_PAGE_INDEX {
34            self.pages.push([0; RISCV_PAGESIZE]);
35            index = (self.pages.len() - 1) as u16;
36            self.indices[page as usize] = index;
37        }
38        Ok(&mut self.pages[index as usize])
39    }
40
41    fn load(&mut self, addr: u64, bytes: u64) -> Result<u64, Error> {
42        debug_assert!(bytes == 1 || bytes == 2 || bytes == 4 || bytes == 8);
43        let page_addr = round_page_down(addr);
44        let first_page_bytes = min(bytes, RISCV_PAGESIZE as u64 - (addr - page_addr));
45        let mut shift = 0;
46        let mut value: u64 = 0;
47        {
48            let page = self.fetch_page(page_addr)?;
49            for &byte in page
50                .iter()
51                .skip((addr - page_addr) as usize)
52                .take(first_page_bytes as usize)
53            {
54                value |= u64::from(byte) << shift;
55                shift += 8;
56            }
57        }
58        let second_page_bytes = bytes - first_page_bytes;
59        if second_page_bytes > 0 {
60            let second_page = self.fetch_page(page_addr + RISCV_PAGESIZE as u64)?;
61            for &byte in second_page.iter().take(second_page_bytes as usize) {
62                value |= u64::from(byte) << shift;
63                shift += 8;
64            }
65        }
66        Ok(value)
67    }
68}
69
70impl<R: Register> Memory for SparseMemory<R> {
71    type REG = R;
72
73    fn new() -> Self {
74        Self::new_with_memory(RISCV_MAX_MEMORY)
75    }
76
77    fn new_with_memory(memory_size: usize) -> Self {
78        assert!(memory_size <= RISCV_MAX_MEMORY);
79        assert!(memory_size % RISCV_PAGESIZE == 0);
80        Self {
81            indices: vec![INVALID_PAGE_INDEX; memory_size / RISCV_PAGESIZE],
82            pages: Vec::new(),
83            flags: vec![0; memory_size / RISCV_PAGESIZE],
84            memory_size,
85            riscv_pages: memory_size / RISCV_PAGESIZE,
86            load_reservation_address: R::from_u64(u64::MAX),
87            _inner: PhantomData,
88        }
89    }
90
91    fn init_pages(
92        &mut self,
93        addr: u64,
94        size: u64,
95        _flags: u8,
96        source: Option<Bytes>,
97        offset_from_addr: u64,
98    ) -> Result<(), Error> {
99        fill_page_data(self, addr, size, source, offset_from_addr)
100    }
101
102    fn fetch_flag(&mut self, page: u64) -> Result<u8, Error> {
103        if page < self.riscv_pages as u64 {
104            Ok(self.flags[page as usize])
105        } else {
106            Err(Error::MemOutOfBound)
107        }
108    }
109
110    fn set_flag(&mut self, page: u64, flag: u8) -> Result<(), Error> {
111        if page < self.riscv_pages as u64 {
112            self.flags[page as usize] |= flag;
113            Ok(())
114        } else {
115            Err(Error::MemOutOfBound)
116        }
117    }
118
119    fn clear_flag(&mut self, page: u64, flag: u8) -> Result<(), Error> {
120        if page < self.riscv_pages as u64 {
121            self.flags[page as usize] &= !flag;
122            Ok(())
123        } else {
124            Err(Error::MemOutOfBound)
125        }
126    }
127
128    fn memory_size(&self) -> usize {
129        self.memory_size
130    }
131
132    fn load8(&mut self, addr: &Self::REG) -> Result<Self::REG, Error> {
133        let v = self.load(addr.to_u64(), 1).map(|v| v as u8)?;
134        Ok(Self::REG::from_u8(v))
135    }
136
137    fn load16(&mut self, addr: &Self::REG) -> Result<Self::REG, Error> {
138        let v = self.load(addr.to_u64(), 2).map(|v| v as u16)?;
139        Ok(Self::REG::from_u16(v))
140    }
141
142    fn load32(&mut self, addr: &Self::REG) -> Result<Self::REG, Error> {
143        let v = self.load(addr.to_u64(), 4).map(|v| v as u32)?;
144        Ok(Self::REG::from_u32(v))
145    }
146
147    fn load64(&mut self, addr: &Self::REG) -> Result<Self::REG, Error> {
148        let v = self.load(addr.to_u64(), 8)?;
149        Ok(Self::REG::from_u64(v))
150    }
151
152    fn execute_load16(&mut self, addr: u64) -> Result<u16, Error> {
153        self.load(addr, 2).map(|v| v as u16)
154    }
155
156    fn execute_load32(&mut self, addr: u64) -> Result<u32, Error> {
157        self.load(addr, 4).map(|v| v as u32)
158    }
159
160    fn store_bytes(&mut self, addr: u64, value: &[u8]) -> Result<(), Error> {
161        let mut remaining_data = value;
162        let mut current_page_addr = round_page_down(addr);
163        let mut current_page_offset = addr - current_page_addr;
164        while !remaining_data.is_empty() {
165            let page = self.fetch_page(current_page_addr)?;
166            let bytes = min(
167                RISCV_PAGESIZE as u64 - current_page_offset,
168                remaining_data.len() as u64,
169            );
170            let slice =
171                &mut page[current_page_offset as usize..(current_page_offset + bytes) as usize];
172            slice.copy_from_slice(&remaining_data[..bytes as usize]);
173            self.set_flag(current_page_addr >> RISCV_PAGE_SHIFTS, FLAG_DIRTY)?;
174
175            remaining_data = &remaining_data[bytes as usize..];
176            current_page_addr += RISCV_PAGESIZE as u64;
177            current_page_offset = 0;
178        }
179        Ok(())
180    }
181
182    fn store_byte(&mut self, addr: u64, size: u64, value: u8) -> Result<(), Error> {
183        let mut current_page_addr = round_page_down(addr);
184        let mut current_page_offset = addr - current_page_addr;
185        let mut remaining_size = size;
186        while remaining_size > 0 {
187            let page = self.fetch_page(current_page_addr)?;
188            let bytes = min(RISCV_PAGESIZE as u64 - current_page_offset, remaining_size);
189            memset(
190                &mut page[current_page_offset as usize..(current_page_offset + bytes) as usize],
191                value,
192            );
193            self.set_flag(current_page_addr >> RISCV_PAGE_SHIFTS, FLAG_DIRTY)?;
194
195            remaining_size -= bytes;
196            current_page_addr += RISCV_PAGESIZE as u64;
197            current_page_offset = 0;
198        }
199        Ok(())
200    }
201
202    fn load_bytes(&mut self, addr: u64, size: u64) -> Result<Bytes, Error> {
203        if size == 0 {
204            return Ok(Bytes::new());
205        }
206        if addr.checked_add(size).ok_or(Error::MemOutOfBound)? > self.memory_size() as u64 {
207            return Err(Error::MemOutOfBound);
208        }
209        let mut current_page_addr = round_page_down(addr);
210        let mut current_page_offset = addr - current_page_addr;
211        let mut need_read_len = size;
212        let mut out_value = Vec::<u8>::with_capacity(size as usize);
213        while need_read_len != 0 {
214            let page = self.fetch_page(current_page_addr)?;
215            let bytes = min(RISCV_PAGESIZE as u64 - current_page_offset, need_read_len);
216            out_value.extend(
217                &page[current_page_offset as usize..(current_page_offset + bytes) as usize],
218            );
219            need_read_len -= bytes;
220            current_page_addr += RISCV_PAGESIZE as u64;
221            current_page_offset = 0;
222        }
223        Ok(Bytes::from(out_value))
224    }
225
226    fn store8(&mut self, addr: &Self::REG, value: &Self::REG) -> Result<(), Error> {
227        self.store_bytes(addr.to_u64(), &[value.to_u8()])
228    }
229
230    fn store16(&mut self, addr: &Self::REG, value: &Self::REG) -> Result<(), Error> {
231        let value = value.to_u16();
232        // RISC-V is little-endian by specification
233        self.store_bytes(addr.to_u64(), &[(value & 0xFF) as u8, (value >> 8) as u8])
234    }
235
236    fn store32(&mut self, addr: &Self::REG, value: &Self::REG) -> Result<(), Error> {
237        let value = value.to_u32();
238        // RISC-V is little-endian by specification
239        self.store_bytes(
240            addr.to_u64(),
241            &[
242                (value & 0xFF) as u8,
243                ((value >> 8) & 0xFF) as u8,
244                ((value >> 16) & 0xFF) as u8,
245                ((value >> 24) & 0xFF) as u8,
246            ],
247        )
248    }
249
250    fn store64(&mut self, addr: &Self::REG, value: &Self::REG) -> Result<(), Error> {
251        let value = value.to_u64();
252        // RISC-V is little-endian by specification
253        self.store_bytes(
254            addr.to_u64(),
255            &[
256                (value & 0xFF) as u8,
257                ((value >> 8) & 0xFF) as u8,
258                ((value >> 16) & 0xFF) as u8,
259                ((value >> 24) & 0xFF) as u8,
260                ((value >> 32) & 0xFF) as u8,
261                ((value >> 40) & 0xFF) as u8,
262                ((value >> 48) & 0xFF) as u8,
263                ((value >> 56) & 0xFF) as u8,
264            ],
265        )
266    }
267
268    fn lr(&self) -> &Self::REG {
269        &self.load_reservation_address
270    }
271
272    fn set_lr(&mut self, value: &Self::REG) {
273        self.load_reservation_address = value.clone();
274    }
275}