1use fugue::ir::{Address, AddressSpace, AddressValue};
2
3use iset::IntervalSet;
4use std::sync::Arc;
5use thiserror::Error;
6
7use crate::flat::{self, Access, FlatState};
8use crate::traits::{State, StateOps, StateValue};
9
10#[derive(Debug, Error, Clone)]
11pub enum Error {
12 #[error(transparent)]
13 Backing(flat::Error),
14 #[error("not enough free space to allocate {0} bytes")]
15 NotEnoughFreeSpace(usize),
16 #[error("attempt to access unmanaged region of `{size}` bytes at {address}")]
17 AccessUnmanaged { address: Address, size: usize },
18 #[error("attempt to free unmanaged region at {0}")]
19 FreeUnmanaged(Address),
20 #[error("attempt to reallocate unmanaged region at {0}")]
21 ReallocateUnmanaged(Address),
22 #[error("access at {address} of `{size}` bytes spans multiple allocations")]
23 HeapOverflow { address: Address, size: usize },
24}
25
26impl Error {
27 fn backing(base: Address, e: flat::Error) -> Self {
28 Self::Backing(match e {
29 flat::Error::OOBRead { address, size } => flat::Error::OOBRead {
30 address: address + base,
31 size,
32 },
33 flat::Error::OOBWrite { address, size } => flat::Error::OOBWrite {
34 address: address + base,
35 size,
36 },
37 flat::Error::AccessViolation {
38 address,
39 access,
40 size,
41 } => flat::Error::AccessViolation {
42 address: address + base,
43 access,
44 size,
45 },
46 })
47 }
48}
49
50#[derive(Debug, Clone)]
51enum ChunkStatus {
52 Taken { offset: usize, size: usize },
53 Free { offset: usize, size: usize },
54}
55
56impl ChunkStatus {
57 fn free(offset: usize, size: usize) -> Self {
58 Self::Free { offset, size }
59 }
60
61 fn taken(offset: usize, size: usize) -> Self {
62 Self::Taken { offset, size }
63 }
64
65 fn is_free(&self) -> bool {
66 matches!(self, Self::Free { .. })
67 }
68
69 fn is_taken(&self) -> bool {
70 matches!(self, Self::Taken { .. })
71 }
72
73 fn offset(&self) -> usize {
74 match self {
75 Self::Free { offset, .. } | Self::Taken { offset, .. } => *offset,
76 }
77 }
78
79 fn size(&self) -> usize {
80 match self {
81 Self::Free { size, .. } | Self::Taken { size, .. } => *size,
82 }
83 }
84}
85
86#[derive(Debug, Clone)]
87#[repr(transparent)]
88struct ChunkList(Vec<ChunkStatus>);
89
90impl ChunkList {
91 fn new(size: usize) -> Self {
92 Self(vec![ChunkStatus::free(0, size)])
93 }
94
95 fn allocate(&mut self, size: usize) -> Option<usize> {
96 for i in 0..self.0.len() {
97 if self.0[i].is_free() {
98 let free_size = self.0[i].size();
99 if free_size == size {
100 let offset = self.0[i].offset();
101 self.0[i] = ChunkStatus::taken(offset, size);
103 return Some(offset);
104 } else if free_size > size {
105 let offset = self.0[i].offset();
107 self.0[i] = ChunkStatus::taken(offset, size);
108 self.0
109 .insert(i + 1, ChunkStatus::free(offset + size, free_size - size));
110 return Some(offset);
111 }
112 }
113 }
114 None
115 }
116
117 fn reallocate(&mut self, offset: usize, new_size: usize) -> Option<(usize, usize)> {
118 for i in 0..self.0.len() {
119 if self.0[i].is_taken() && self.0[i].offset() == offset {
120 let mut size = self.0[i].size();
121 let old_size = size;
122
123 if new_size == old_size {
124 return Some((offset, old_size));
126 } else if new_size < old_size {
127 self.0[i] = ChunkStatus::taken(offset, new_size);
128 let diff = old_size - new_size;
129
130 if i < self.0.len() - 1 && self.0[i + 1].is_free() {
132 let upd_offset = self.0[i + 1].offset() - diff;
133 let upd_size = self.0[i + 1].size() + diff;
134
135 self.0[i + 1] = ChunkStatus::free(upd_offset, upd_size);
136 } else {
137 self.0
138 .insert(i + 1, ChunkStatus::free(offset + new_size, diff));
139 }
140
141 return Some((offset, old_size));
142 }
143
144 let mut spos = i;
146 let mut epos = i;
147
148 let mut offset = offset;
149
150 if i < self.0.len() - 1 && self.0[i + 1].is_free() {
151 size += self.0[i + 1].size();
152 epos = i + 1;
153 }
154
155 if size > new_size {
156 self.0[spos] = ChunkStatus::taken(offset, new_size);
157 self.0[epos] = ChunkStatus::free(offset + new_size, size - new_size);
158 return Some((offset, old_size));
159 } else if size == new_size {
160 self.0[spos] = ChunkStatus::taken(offset, new_size);
161 self.0.remove(epos);
162 return Some((offset, old_size));
163 }
164
165 if i > 0 && self.0[i - 1].is_free() {
166 offset = self.0[i - 1].offset();
167 size += self.0[i - 1].size();
168 spos = i - 1;
169 }
170
171 if size > new_size {
172 self.0[spos] = ChunkStatus::taken(offset, new_size);
173 self.0[spos + 1] = ChunkStatus::free(offset + new_size, size - new_size);
174 self.0.remove(i + 1);
175 return Some((offset, old_size));
176 } else if size == new_size {
177 self.0[spos] = ChunkStatus::taken(offset, new_size);
178 self.0.remove(i);
179 self.0.remove(i + 1);
180 return Some((offset, old_size));
181 }
182
183 if let Some(new_offset) = self.allocate(new_size) {
185 self.deallocate(offset);
186 return Some((new_offset, old_size));
187 } else {
188 return None;
189 }
190 }
191 }
192 None
193 }
194
195 fn deallocate(&mut self, offset: usize) -> Option<usize> {
196 for i in 0..self.0.len() {
197 if self.0[i].is_taken() && self.0[i].offset() == offset {
198 let mut spos = i;
200 let mut offset = offset;
201 let mut size = self.0[i].size();
202 let old_size = size;
203
204 if i < self.0.len() - 1 && self.0[i + 1].is_free() {
205 size += self.0[i + 1].size();
206 self.0.remove(i + 1);
207 }
208
209 if i > 0 && self.0[i - 1].is_free() {
210 offset = self.0[i - 1].offset();
211 size += self.0[i - 1].size();
212 self.0.remove(i);
213 spos = i - 1;
214 }
215
216 self.0[spos] = ChunkStatus::free(offset, size);
217 return Some(old_size);
218 }
219 }
220 None
221 }
222}
223
224#[derive(Debug, Clone)]
225pub struct ChunkState<T: StateValue> {
226 base_address: Address,
227 chunks: ChunkList,
228 regions: IntervalSet<Address>,
229 backing: FlatState<T>,
230 space: Arc<AddressSpace>,
231}
232
233impl<T: StateValue> AsRef<Self> for ChunkState<T> {
234 #[inline(always)]
235 fn as_ref(&self) -> &Self {
236 self
237 }
238}
239
240impl<T: StateValue> AsMut<Self> for ChunkState<T> {
241 #[inline(always)]
242 fn as_mut(&mut self) -> &mut Self {
243 self
244 }
245}
246
247impl<T: StateValue> AsRef<FlatState<T>> for ChunkState<T> {
248 #[inline(always)]
249 fn as_ref(&self) -> &FlatState<T> {
250 &self.backing
251 }
252}
253
254impl<T: StateValue> AsMut<FlatState<T>> for ChunkState<T> {
255 #[inline(always)]
256 fn as_mut(&mut self) -> &mut FlatState<T> {
257 &mut self.backing
258 }
259}
260
261impl<T: StateValue> ChunkState<T> {
262 pub fn new<A>(space: Arc<AddressSpace>, base_address: A, size: usize) -> Self
263 where
264 A: Into<Address>,
265 {
266 Self {
267 base_address: base_address.into(),
268 chunks: ChunkList::new(size),
269 regions: IntervalSet::new(),
270 backing: FlatState::read_only(space.clone(), size),
271 space,
272 }
273 }
274
275 pub fn base_address(&self) -> Address {
276 self.base_address
277 }
278
279 pub fn inner(&self) -> &FlatState<T> {
280 &self.backing
281 }
282
283 pub fn inner_mut(&mut self) -> &mut FlatState<T> {
284 &mut self.backing
285 }
286
287 pub fn allocate(&mut self, size: usize) -> Result<Address, Error> {
288 self.allocate_with(size, |_, _| ())
289 }
290
291 pub fn allocate_all(&mut self) -> Result<(), Error> {
292 self.allocate_with(self.len() - 1, |_, _| ()).map(|_| ())
293 }
294
295 #[inline]
296 pub fn allocate_with<F>(&mut self, size: usize, f: F) -> Result<Address, Error>
297 where
298 F: FnOnce(Address, &mut [T]),
299 {
300 let offset = self
302 .chunks
303 .allocate(size + 1)
304 .ok_or_else(|| Error::NotEnoughFreeSpace(size))?;
305 let address = self.base_address + offset;
306
307 self.backing.permissions_mut().set_region(
309 &Address::from(offset as u64),
310 size,
311 Access::ReadWrite,
312 );
313 self.backing
314 .permissions_mut()
315 .clear_byte(&(Address::from(offset as u64) + size), Access::Write);
316
317 self.regions.insert(address..address + size);
319
320 let view = self
322 .backing
323 .view_values_mut(Address::from(offset as u64), size)
324 .map_err(Error::Backing)?;
325
326 f(address, view);
327
328 Ok(address)
329 }
330
331 pub fn reallocate<A>(&mut self, address: A, size: usize) -> Result<Address, Error>
332 where
333 A: Into<Address>,
334 {
335 let address = address.into();
336 let interval = self
337 .regions
338 .overlap(address)
339 .next()
340 .ok_or_else(|| Error::ReallocateUnmanaged(address))?;
341
342 if interval.start != address {
343 return Err(Error::ReallocateUnmanaged(address));
344 }
345
346 let old_offset = address - self.base_address;
348 let old_size = interval.end - interval.start;
349
350 if !self
351 .backing
352 .permissions()
353 .all_readable(&old_offset, old_size.into())
354 {
355 return Err(Error::Backing(flat::Error::AccessViolation {
356 address: AddressValue::new(self.space.clone(), address.into()),
357 access: Access::Read,
358 size,
359 }));
360 }
361
362 let (offset, _old_size) = self
364 .chunks
365 .reallocate(old_offset.into(), size + 1)
366 .ok_or_else(|| Error::NotEnoughFreeSpace(size))?;
367
368 let new_address = self.base_address + offset;
369
370 self.backing.permissions_mut().set_region(
372 &Address::from(offset as u64),
373 size,
374 Access::ReadWrite,
375 );
376 self.backing
377 .permissions_mut()
378 .clear_byte(&(Address::from(offset as u64) + size), Access::Write);
379
380 let offset = Address::from(offset as u64);
382 if old_offset != offset {
383 self.backing
384 .copy_values(old_offset, offset, old_size.into())
385 .map_err(Error::Backing)?;
386
387 self.backing.permissions_mut().clear_region(
388 &old_offset,
389 old_size.into(),
390 Access::Write,
391 );
392 }
393
394 self.regions.remove(interval);
396 self.regions
397 .insert(new_address..new_address + size);
398
399 Ok(new_address)
400 }
401
402 pub fn deallocate<A>(&mut self, address: A) -> Result<(), Error>
403 where
404 A: Into<Address>,
405 {
406 let address = address.into();
407 let interval = self
408 .regions
409 .overlap(address)
410 .next()
411 .ok_or_else(|| Error::FreeUnmanaged(address))?;
412
413 if interval.start != address {
414 return Err(Error::FreeUnmanaged(address));
415 }
416
417 let offset = address - self.base_address;
418 self.chunks
419 .deallocate(offset.into())
420 .ok_or_else(|| Error::FreeUnmanaged(address))?;
421
422 let size = usize::from(interval.end - interval.start);
423
424 self.backing
425 .permissions_mut()
426 .clear_region(&offset, size, flat::Access::Write);
427
428 self.regions.remove(interval);
429
430 Ok(())
431 }
432
433 pub(crate) fn translate_checked<A>(&self, address: A, size: usize) -> Result<usize, Error>
434 where
435 A: Into<Address>,
436 {
437 let address = address.into();
438 let mut regions = self.regions.iter(address..address + size);
439
440 let _region = regions.next().ok_or_else(|| {
442 Error::AccessUnmanaged { address, size }
443 })?;
444
445 if regions.next().is_some() {
447 return Err(Error::HeapOverflow { address, size });
449 }
450
451 Ok(usize::from(address - self.base_address))
452 }
453}
454
455impl<V: StateValue> State for ChunkState<V> {
456 type Error = Error;
457
458 fn fork(&self) -> Self {
459 Self {
460 base_address: self.base_address.clone(),
461 chunks: self.chunks.clone(),
462 regions: self.regions.clone(),
463 backing: self.backing.fork(),
464 space: self.space.clone(),
465 }
466 }
467
468 fn restore(&mut self, other: &Self) {
469 self.base_address = other.base_address;
470 self.chunks = other.chunks.clone();
471 self.regions = other.regions.clone();
472 self.backing.restore(&other.backing);
473 }
474}
475
476impl<V: StateValue> StateOps for ChunkState<V> {
477 type Value = V;
478
479 fn len(&self) -> usize {
480 self.backing.len()
481 }
482
483 fn copy_values<F, T>(&mut self, from: F, to: T, size: usize) -> Result<(), Error>
484 where
485 F: Into<Address>,
486 T: Into<Address>,
487 {
488 let from = self.translate_checked(from, size)?;
489 let to = self.translate_checked(to, size)?;
490
491 self.backing
492 .copy_values(from as u64, to as u64, size)
493 .map_err(|e| Error::backing(self.base_address, e))
494 }
495
496 fn get_values<A>(&self, address: A, values: &mut [Self::Value]) -> Result<(), Error>
497 where
498 A: Into<Address>,
499 {
500 let size = values.len();
501 let address = self.translate_checked(address, size)?;
502
503 self.backing
504 .get_values(address as u64, values)
505 .map_err(|e| Error::backing(self.base_address, e))
506 }
507
508 fn view_values<A>(&self, address: A, n: usize) -> Result<&[Self::Value], Error>
509 where
510 A: Into<Address>,
511 {
512 let address = self.translate_checked(address, n)?;
513
514 self.backing
515 .view_values(address as u64, n)
516 .map_err(|e| Error::backing(self.base_address, e))
517 }
518
519 fn view_values_mut<A>(&mut self, address: A, n: usize) -> Result<&mut [Self::Value], Error>
520 where
521 A: Into<Address>,
522 {
523 let address = self.translate_checked(address, n)?;
524 let base_address = self.base_address;
525
526 self.backing
527 .view_values_mut(address as u64, n)
528 .map_err(|e| Error::backing(base_address, e))
529 }
530
531 fn set_values<A>(&mut self, address: A, values: &[Self::Value]) -> Result<(), Error>
532 where
533 A: Into<Address>,
534 {
535 let size = values.len();
536 let address = self.translate_checked(address, size)?;
537
538 self.backing
539 .set_values(address as u64, values)
540 .map_err(|e| Error::backing(self.base_address, e))
541 }
542}