use std::sync::{
atomic::{AtomicU64, Ordering},
Arc, RwLock,
};
use imbl::HashMap as ImHashMap;
use crate::{
emulation::{
memory::{
region::{MemoryProtection, MemoryRegion, SectionInfo},
statics::StaticFieldStorage,
},
EmValue, EmulationError, HeapRef, ManagedHeap,
},
metadata::token::Token,
Error, Result,
};
#[derive(Clone, Debug)]
pub struct SharedHeap {
inner: Arc<ManagedHeap>,
}
impl SharedHeap {
#[must_use]
pub fn new(max_size: usize) -> Self {
Self {
inner: Arc::new(ManagedHeap::new(max_size)),
}
}
pub fn from_heap(heap: ManagedHeap) -> Self {
Self {
inner: Arc::new(heap),
}
}
#[must_use]
pub fn heap(&self) -> &ManagedHeap {
&self.inner
}
#[must_use]
pub fn ref_count(&self) -> usize {
Arc::strong_count(&self.inner)
}
#[must_use]
pub fn is_unique(&self) -> bool {
Arc::strong_count(&self.inner) == 1
}
pub fn fork(&self) -> Result<Self> {
Ok(Self {
inner: Arc::new(self.inner.fork()?),
})
}
}
impl Default for SharedHeap {
fn default() -> Self {
Self::new(64 * 1024 * 1024) }
}
impl std::ops::Deref for SharedHeap {
type Target = ManagedHeap;
fn deref(&self) -> &Self::Target {
&self.inner
}
}
#[derive(Clone, Debug)]
struct PinnedArrayEntry {
array_ref: HeapRef,
base_addr: u64,
byte_length: usize,
element_size: usize,
}
#[derive(Debug)]
pub struct AddressSpace {
heap: SharedHeap,
regions: RwLock<Vec<MemoryRegion>>,
statics: StaticFieldStorage,
next_address: AtomicU64,
size: u64,
protection_overrides: RwLock<ImHashMap<u64, MemoryProtection>>,
monitor_locks: RwLock<ImHashMap<u64, u32>>,
pinned_arrays: RwLock<ImHashMap<u64, PinnedArrayEntry>>,
}
impl AddressSpace {
const PAGE_SIZE: u64 = super::page::PAGE_SIZE as u64;
#[must_use]
pub fn new() -> Self {
Self::with_config(64 * 1024 * 1024, 0x1_0000_0000) }
#[must_use]
pub fn with_config(heap_size: usize, address_space_size: u64) -> Self {
Self {
heap: SharedHeap::new(heap_size),
regions: RwLock::new(Vec::new()),
statics: StaticFieldStorage::new(),
next_address: AtomicU64::new(0x1000_0000), size: address_space_size,
protection_overrides: RwLock::new(ImHashMap::new()),
monitor_locks: RwLock::new(ImHashMap::new()),
pinned_arrays: RwLock::new(ImHashMap::new()),
}
}
#[must_use]
pub fn with_heap(heap: SharedHeap) -> Self {
Self {
heap,
regions: RwLock::new(Vec::new()),
statics: StaticFieldStorage::new(),
next_address: AtomicU64::new(0x1000_0000),
size: 0x1_0000_0000,
protection_overrides: RwLock::new(ImHashMap::new()),
monitor_locks: RwLock::new(ImHashMap::new()),
pinned_arrays: RwLock::new(ImHashMap::new()),
}
}
#[must_use]
pub fn heap(&self) -> &SharedHeap {
&self.heap
}
#[must_use]
pub fn managed_heap(&self) -> &ManagedHeap {
self.heap.heap()
}
#[must_use]
pub fn statics(&self) -> &StaticFieldStorage {
&self.statics
}
pub fn monitor_enter(&self, object_id: u64) -> u32 {
let mut locks = self.monitor_locks.write().unwrap();
let count = locks.get(&object_id).copied().unwrap_or(0) + 1;
*locks = locks.update(object_id, count);
count
}
pub fn monitor_exit(&self, object_id: u64) -> bool {
let mut locks = self.monitor_locks.write().unwrap();
match locks.get(&object_id).copied() {
Some(count) if count > 1 => {
*locks = locks.update(object_id, count - 1);
true
}
Some(1) => {
*locks = locks.without(&object_id);
true
}
_ => false, }
}
#[must_use]
pub fn monitor_is_locked(&self, object_id: u64) -> bool {
self.monitor_locks
.read()
.map(|l| l.get(&object_id).copied().unwrap_or(0) > 0)
.unwrap_or(false)
}
pub fn map_at(&self, address: u64, region: MemoryRegion) -> Result<()> {
let mut regions = self.regions.write().map_err(|_| {
Error::from(EmulationError::InternalError {
description: "region lock poisoned".to_string(),
})
})?;
for existing in regions.iter() {
if Self::regions_overlap(existing, ®ion) {
return Err(EmulationError::InvalidAddress {
address,
reason: "region overlaps with existing mapping".to_string(),
}
.into());
}
}
regions.push(region);
Ok(())
}
pub fn map(&self, region: MemoryRegion) -> Result<u64> {
let size = region.size();
let aligned_size = (size + 0xFFF) & !0xFFF;
let base = self
.next_address
.fetch_add(aligned_size as u64, Ordering::SeqCst);
if region.is_pe_image() {
return Err(EmulationError::InternalError {
description: "PE images must use map_at with explicit base address".to_string(),
}
.into());
}
let region = region.with_base(base);
self.map_at(base, region)?;
Ok(base)
}
pub fn unmap(&self, base: u64) -> Result<()> {
let mut regions = self.regions.write().map_err(|_| {
Error::from(EmulationError::InternalError {
description: "region lock poisoned".to_string(),
})
})?;
if let Some(pos) = regions.iter().position(|r| r.base() == base) {
regions.remove(pos);
Ok(())
} else {
Err(EmulationError::InvalidAddress {
address: base,
reason: "no region at this address".to_string(),
}
.into())
}
}
pub fn read(&self, address: u64, len: usize) -> Result<Vec<u8>> {
if let Some(result) = self.read_pinned(address, len) {
return result;
}
let regions = self.regions.read().map_err(|_| {
Error::from(EmulationError::InternalError {
description: "region lock poisoned".to_string(),
})
})?;
for region in regions.iter() {
if region.contains_range(address, len) {
return region.read(address, len).ok_or_else(|| {
EmulationError::InvalidAddress {
address,
reason: "read failed".to_string(),
}
.into()
});
}
}
Err(EmulationError::InvalidAddress {
address,
reason: "address not mapped".to_string(),
}
.into())
}
pub fn write(&self, address: u64, data: &[u8]) -> Result<()> {
if let Some(result) = self.write_pinned(address, data) {
return result;
}
let regions = self.regions.read().map_err(|_| {
Error::from(EmulationError::InternalError {
description: "region lock poisoned".to_string(),
})
})?;
for region in regions.iter() {
if region.contains_range(address, data.len()) {
if region.write(address, data) {
return Ok(());
}
return Err(EmulationError::InvalidAddress {
address,
reason: "write failed (possibly read-only)".to_string(),
}
.into());
}
}
Err(EmulationError::InvalidAddress {
address,
reason: "address not mapped".to_string(),
}
.into())
}
#[must_use]
pub fn is_valid(&self, address: u64) -> bool {
if let Ok(pins) = self.pinned_arrays.read() {
for entry in pins.values() {
let end = entry.base_addr + entry.byte_length as u64;
if address >= entry.base_addr && address < end {
return true;
}
}
}
let Ok(regions) = self.regions.read() else {
return false;
};
regions.iter().any(|r| r.contains(address))
}
#[must_use]
pub fn get_region(&self, address: u64) -> Option<MemoryRegion> {
let regions = self.regions.read().ok()?;
regions.iter().find(|r| r.contains(address)).cloned()
}
#[must_use]
pub fn get_protection(&self, address: u64) -> Option<MemoryProtection> {
let page_addr = address & !(Self::PAGE_SIZE - 1);
if let Ok(overrides) = self.protection_overrides.read() {
if let Some(&prot) = overrides.get(&page_addr) {
return Some(prot);
}
}
let regions = self.regions.read().ok()?;
regions
.iter()
.find(|r| r.contains(address))
.and_then(|r| r.protection_at(address).ok())
}
pub fn set_protection(
&self,
address: u64,
size: usize,
new_protection: MemoryProtection,
) -> Option<MemoryProtection> {
let start_page = address & !(Self::PAGE_SIZE - 1);
let old_protection = if let Ok(overrides) = self.protection_overrides.read() {
if overrides.contains_key(&start_page) {
drop(overrides);
self.get_protection(address)?
} else {
drop(overrides);
let region_prot = self.get_protection(address)?;
if region_prot.contains(MemoryProtection::EXECUTE) {
MemoryProtection::READ_EXECUTE
} else {
region_prot
}
}
} else {
self.get_protection(address)?
};
let end_addr = address.saturating_add(size as u64);
let end_page = (end_addr + Self::PAGE_SIZE - 1) & !(Self::PAGE_SIZE - 1);
if let Ok(mut overrides) = self.protection_overrides.write() {
let mut page = start_page;
while page < end_page {
overrides.insert(page, new_protection);
page += Self::PAGE_SIZE;
}
}
Some(old_protection)
}
pub fn get_static(&self, field_token: Token) -> Result<Option<EmValue>> {
self.statics.get(field_token)
}
pub fn set_static(&self, field_token: Token, value: EmValue) -> Result<()> {
self.statics.set(field_token, value)
}
pub fn alloc_unmanaged(&self, size: usize) -> Result<u64> {
let region = MemoryRegion::unmanaged_alloc(0, size);
self.map(region)
}
pub fn free_unmanaged(&self, address: u64) -> Result<()> {
let regions = self.regions.read().map_err(|_| {
Error::from(EmulationError::InternalError {
description: "region lock poisoned".to_string(),
})
})?;
let is_unmanaged = regions
.iter()
.any(|r| r.base() == address && r.is_unmanaged_alloc());
drop(regions);
if is_unmanaged {
self.unmap(address)
} else {
Err(EmulationError::InvalidAddress {
address,
reason: "not an unmanaged allocation".to_string(),
}
.into())
}
}
pub fn reserve_address_range(&self, size: usize) -> u64 {
let aligned_size = (size + 0xFFF) & !0xFFF; self.next_address
.fetch_add(aligned_size as u64, Ordering::SeqCst)
}
pub fn register_pinned_array(
&self,
base_addr: u64,
array_ref: HeapRef,
element_size: usize,
element_count: usize,
) -> Result<()> {
let entry = PinnedArrayEntry {
array_ref,
base_addr,
byte_length: element_size * element_count,
element_size,
};
let mut pins = self.pinned_arrays.write().map_err(|_| {
Error::from(EmulationError::LockPoisoned {
description: "pinned arrays",
})
})?;
pins.insert(base_addr, entry);
Ok(())
}
fn read_pinned(&self, addr: u64, len: usize) -> Option<Result<Vec<u8>>> {
if len == 0 {
return None;
}
let pins = self.pinned_arrays.read().ok()?;
if pins.is_empty() {
return None;
}
for entry in pins.values() {
let end = entry.base_addr + entry.byte_length as u64;
if addr >= entry.base_addr && addr + len as u64 <= end {
return Some(self.read_pinned_bytes(entry, addr, len));
}
}
None
}
fn read_pinned_bytes(
&self,
entry: &PinnedArrayEntry,
addr: u64,
len: usize,
) -> Result<Vec<u8>> {
let byte_offset = (addr - entry.base_addr) as usize;
let heap = self.managed_heap();
let mut result = vec![0u8; len];
if entry.element_size == 1 {
for (i, slot) in result.iter_mut().enumerate().take(len) {
let elem_idx = byte_offset + i;
match heap.get_array_element(entry.array_ref, elem_idx) {
Ok(EmValue::I32(v)) => {
#[allow(clippy::cast_sign_loss)]
{
*slot = (v & 0xFF) as u8;
}
}
Ok(_) | Err(_) => *slot = 0,
}
}
} else {
let start_elem = byte_offset / entry.element_size;
let end_elem = (byte_offset + len).div_ceil(entry.element_size);
let mut elem_buf = vec![0u8; entry.element_size];
for elem_idx in start_elem..end_elem {
Self::emvalue_to_bytes(
&heap
.get_array_element(entry.array_ref, elem_idx)
.unwrap_or(EmValue::I32(0)),
&mut elem_buf,
);
let elem_byte_start = elem_idx * entry.element_size;
for (j, &b) in elem_buf.iter().enumerate() {
let abs_byte = elem_byte_start + j;
if abs_byte >= byte_offset && abs_byte < byte_offset + len {
result[abs_byte - byte_offset] = b;
}
}
}
}
Ok(result)
}
fn write_pinned(&self, addr: u64, data: &[u8]) -> Option<Result<()>> {
if data.is_empty() {
return None;
}
let pins = self.pinned_arrays.read().ok()?;
if pins.is_empty() {
return None;
}
for entry in pins.values() {
let end = entry.base_addr + entry.byte_length as u64;
if addr >= entry.base_addr && addr + data.len() as u64 <= end {
return Some(self.write_pinned_bytes(entry, addr, data));
}
}
None
}
fn write_pinned_bytes(&self, entry: &PinnedArrayEntry, addr: u64, data: &[u8]) -> Result<()> {
let byte_offset = (addr - entry.base_addr) as usize;
let heap = self.managed_heap();
if entry.element_size == 1 {
for (i, &byte) in data.iter().enumerate() {
let elem_idx = byte_offset + i;
heap.set_array_element(entry.array_ref, elem_idx, EmValue::I32(i32::from(byte)))?;
}
} else {
let start_elem = byte_offset / entry.element_size;
let end_elem = (byte_offset + data.len()).div_ceil(entry.element_size);
for elem_idx in start_elem..end_elem {
let elem_byte_start = elem_idx * entry.element_size;
let mut elem_buf = vec![0u8; entry.element_size];
Self::emvalue_to_bytes(
&heap
.get_array_element(entry.array_ref, elem_idx)
.unwrap_or(EmValue::I32(0)),
&mut elem_buf,
);
for (j, byte) in elem_buf.iter_mut().enumerate() {
let abs_byte = elem_byte_start + j;
if abs_byte >= byte_offset && abs_byte < byte_offset + data.len() {
*byte = data[abs_byte - byte_offset];
}
}
let value = Self::bytes_to_emvalue(&elem_buf);
heap.set_array_element(entry.array_ref, elem_idx, value)?;
}
}
Ok(())
}
fn emvalue_to_bytes(value: &EmValue, buf: &mut [u8]) {
match value {
EmValue::I32(v) => {
let bytes = v.to_le_bytes();
let copy_len = buf.len().min(4);
buf[..copy_len].copy_from_slice(&bytes[..copy_len]);
}
EmValue::I64(v) | EmValue::NativeInt(v) => {
let bytes = v.to_le_bytes();
let copy_len = buf.len().min(8);
buf[..copy_len].copy_from_slice(&bytes[..copy_len]);
}
EmValue::F32(v) => {
let bytes = v.to_le_bytes();
let copy_len = buf.len().min(4);
buf[..copy_len].copy_from_slice(&bytes[..copy_len]);
}
EmValue::F64(v) => {
let bytes = v.to_le_bytes();
let copy_len = buf.len().min(8);
buf[..copy_len].copy_from_slice(&bytes[..copy_len]);
}
_ => buf.fill(0),
}
}
fn bytes_to_emvalue(bytes: &[u8]) -> EmValue {
match bytes.len() {
1 => EmValue::I32(i32::from(bytes[0])),
2 => EmValue::I32(i32::from(i16::from_le_bytes([bytes[0], bytes[1]]))),
4 => EmValue::I32(i32::from_le_bytes([bytes[0], bytes[1], bytes[2], bytes[3]])),
8 => EmValue::I64(i64::from_le_bytes([
bytes[0], bytes[1], bytes[2], bytes[3], bytes[4], bytes[5], bytes[6], bytes[7],
])),
_ => EmValue::I32(0),
}
}
pub fn copy_block(&self, dest: u64, src: u64, size: usize) -> Result<()> {
if size == 0 {
return Ok(());
}
let src_data = self.read(src, size)?;
self.write(dest, &src_data)
}
pub fn init_block(&self, address: u64, value: u8, size: usize) -> Result<()> {
if size == 0 {
return Ok(());
}
let data = vec![value; size];
self.write(address, &data)
}
pub fn map_pe_image(
&self,
data: &[u8],
preferred_base: u64,
sections: Vec<SectionInfo>,
name: impl Into<String>,
) -> Result<u64> {
let region = MemoryRegion::pe_image(preferred_base, data, sections, name);
self.map_at(preferred_base, region)?;
Ok(preferred_base)
}
pub fn map_data(&self, address: u64, data: &[u8], label: impl Into<String>) -> Result<()> {
let region = MemoryRegion::mapped_data(address, data, label, MemoryProtection::READ_WRITE);
self.map_at(address, region)
}
#[must_use]
pub fn regions(&self) -> Vec<(u64, usize, String)> {
match self.regions.read() {
Ok(regions) => regions
.iter()
.map(|r| (r.base(), r.size(), r.label().to_string()))
.collect(),
Err(_) => Vec::new(),
}
}
#[must_use]
pub fn mapped_size(&self) -> usize {
match self.regions.read() {
Ok(regions) => regions.iter().map(MemoryRegion::size).sum(),
Err(_) => 0,
}
}
fn regions_overlap(a: &MemoryRegion, b: &MemoryRegion) -> bool {
let a_start = a.base();
let a_end = a.end();
let b_start = b.base();
let b_end = b.end();
a_start < b_end && b_start < a_end
}
pub fn alloc_string(&self, value: &str) -> Result<HeapRef> {
self.heap.alloc_string(value)
}
pub fn get_string(&self, heap_ref: HeapRef) -> Result<std::sync::Arc<str>> {
self.heap.get_string(heap_ref)
}
pub fn alloc_object(&self, type_token: Token) -> Result<HeapRef> {
self.heap.alloc_object(type_token)
}
pub fn get_field(&self, heap_ref: HeapRef, field_token: Token) -> Result<EmValue> {
self.heap.get_field(heap_ref, field_token)
}
pub fn set_field(&self, heap_ref: HeapRef, field_token: Token, value: EmValue) -> Result<()> {
self.heap.set_field(heap_ref, field_token, value)
}
}
impl Default for AddressSpace {
fn default() -> Self {
Self::new()
}
}
impl AddressSpace {
#[must_use]
pub fn spawn_fresh(&self) -> Self {
let regions = match self.regions.read() {
Ok(r) => r.clone(),
Err(_) => Vec::new(),
};
Self {
heap: SharedHeap::default(),
regions: RwLock::new(regions),
statics: StaticFieldStorage::new(),
next_address: AtomicU64::new(self.next_address.load(Ordering::SeqCst)),
size: self.size,
protection_overrides: RwLock::new(ImHashMap::new()),
monitor_locks: RwLock::new(ImHashMap::new()),
pinned_arrays: RwLock::new(ImHashMap::new()),
}
}
pub fn fork(&self) -> Result<Self> {
let regions = self
.regions
.read()
.map_err(|_| EmulationError::LockPoisoned {
description: "address space regions",
})?
.iter()
.map(|region| region.fork())
.collect::<std::result::Result<Vec<_>, _>>()?;
let protection_overrides = self
.protection_overrides
.read()
.map_err(|_| EmulationError::LockPoisoned {
description: "address space protection overrides",
})?
.clone();
let monitor_locks = self
.monitor_locks
.read()
.map_err(|_| EmulationError::LockPoisoned {
description: "address space monitor locks",
})?
.clone();
let pinned_arrays = self
.pinned_arrays
.read()
.map_err(|_| EmulationError::LockPoisoned {
description: "address space pinned arrays",
})?
.clone();
Ok(Self {
heap: self.heap.fork()?,
regions: RwLock::new(regions),
statics: self.statics.fork()?,
next_address: AtomicU64::new(self.next_address.load(Ordering::SeqCst)),
size: self.size,
protection_overrides: RwLock::new(protection_overrides),
monitor_locks: RwLock::new(monitor_locks),
pinned_arrays: RwLock::new(pinned_arrays),
})
}
}
#[cfg(test)]
mod tests {
use crate::{
emulation::{
memory::{
addressspace::{AddressSpace, SharedHeap},
region::MemoryProtection,
},
EmValue,
},
metadata::token::Token,
};
#[test]
fn test_address_space_creation() {
let space = AddressSpace::new();
assert!(space.regions().is_empty());
}
#[test]
fn test_map_and_read_data() {
let space = AddressSpace::new();
let data = vec![0xDE, 0xAD, 0xBE, 0xEF];
space.map_data(0x1000, &data, "test").unwrap();
let read = space.read(0x1000, 4).unwrap();
assert_eq!(read, data);
}
#[test]
fn test_write_data() {
let space = AddressSpace::new();
space.map_data(0x1000, &[0u8; 16], "test").unwrap();
space.write(0x1000, &[0xCA, 0xFE]).unwrap();
let read = space.read(0x1000, 2).unwrap();
assert_eq!(read, vec![0xCA, 0xFE]);
}
#[test]
fn test_static_fields() {
let space = AddressSpace::new();
let field = Token::new(0x04000001);
assert!(space.get_static(field).unwrap().is_none());
space.set_static(field, EmValue::I32(42)).unwrap();
assert_eq!(space.get_static(field).unwrap(), Some(EmValue::I32(42)));
}
#[test]
fn test_shared_heap() {
let space1 = AddressSpace::new();
let str_ref = space1.alloc_string("Hello").unwrap();
let space2 = AddressSpace::with_heap(space1.heap().clone());
let s1 = space1.get_string(str_ref).unwrap();
let s2 = space2.get_string(str_ref).unwrap();
assert_eq!(&*s1, "Hello");
assert_eq!(&*s2, "Hello");
let str_ref2 = space2.alloc_string("World").unwrap();
let s3 = space1.get_string(str_ref2).unwrap();
assert_eq!(&*s3, "World");
}
#[test]
fn test_unmanaged_alloc() {
let space = AddressSpace::new();
let addr = space.alloc_unmanaged(256).unwrap();
assert!(space.is_valid(addr));
space.write(addr, &[1, 2, 3, 4]).unwrap();
let data = space.read(addr, 4).unwrap();
assert_eq!(data, vec![1, 2, 3, 4]);
space.free_unmanaged(addr).unwrap();
assert!(!space.is_valid(addr));
}
#[test]
fn test_heap_delegation() {
let space = AddressSpace::new();
let str_ref = space.alloc_string("Test").unwrap();
let s = space.get_string(str_ref).unwrap();
assert_eq!(&*s, "Test");
let type_token = Token::new(0x02000001);
let field_token = Token::new(0x04000001);
let obj_ref = space.alloc_object(type_token).unwrap();
space
.set_field(obj_ref, field_token, EmValue::I32(100))
.unwrap();
let value = space.get_field(obj_ref, field_token).unwrap();
assert_eq!(value, EmValue::I32(100));
}
#[test]
fn test_fork_memory_isolation() {
let space = AddressSpace::new();
space.map_data(0x1000, &[1, 2, 3, 4], "test").unwrap();
let forked = space.fork().unwrap();
assert_eq!(space.read(0x1000, 4).unwrap(), vec![1, 2, 3, 4]);
assert_eq!(forked.read(0x1000, 4).unwrap(), vec![1, 2, 3, 4]);
forked.write(0x1000, &[0xFF, 0xFE]).unwrap();
assert_eq!(space.read(0x1000, 4).unwrap(), vec![1, 2, 3, 4]);
assert_eq!(forked.read(0x1000, 4).unwrap(), vec![0xFF, 0xFE, 3, 4]);
}
#[test]
fn test_fork_heap_isolation() {
let space = AddressSpace::new();
let str_ref = space.alloc_string("Original").unwrap();
let forked = space.fork().unwrap();
assert_eq!(&*space.get_string(str_ref).unwrap(), "Original");
assert_eq!(&*forked.get_string(str_ref).unwrap(), "Original");
let new_ref = forked.alloc_string("Forked").unwrap();
assert_eq!(&*forked.get_string(new_ref).unwrap(), "Forked");
assert!(space.get_string(new_ref).is_err());
}
#[test]
fn test_fork_statics_isolation() {
let space = AddressSpace::new();
let field = Token::new(0x04000001);
space.set_static(field, EmValue::I32(42)).unwrap();
let forked = space.fork().unwrap();
assert_eq!(space.get_static(field).unwrap(), Some(EmValue::I32(42)));
assert_eq!(forked.get_static(field).unwrap(), Some(EmValue::I32(42)));
forked.set_static(field, EmValue::I32(100)).unwrap();
assert_eq!(space.get_static(field).unwrap(), Some(EmValue::I32(42)));
assert_eq!(forked.get_static(field).unwrap(), Some(EmValue::I32(100)));
}
#[test]
fn test_fork_protection_isolation() {
let space = AddressSpace::new();
space.map_data(0x1000, &vec![0u8; 0x2000], "test").unwrap();
space.set_protection(0x1000, 0x1000, MemoryProtection::READ_EXECUTE);
let forked = space.fork().unwrap();
assert_eq!(
space.get_protection(0x1000),
Some(MemoryProtection::READ_EXECUTE)
);
assert_eq!(
forked.get_protection(0x1000),
Some(MemoryProtection::READ_EXECUTE)
);
forked.set_protection(0x1000, 0x1000, MemoryProtection::READ_WRITE);
assert_eq!(
space.get_protection(0x1000),
Some(MemoryProtection::READ_EXECUTE)
);
assert_eq!(
forked.get_protection(0x1000),
Some(MemoryProtection::READ_WRITE)
);
}
#[test]
fn test_multiple_forks_isolation() {
let space = AddressSpace::new();
let field = Token::new(0x04000001);
space.set_static(field, EmValue::I32(1)).unwrap();
let fork1 = space.fork().unwrap();
let fork2 = space.fork().unwrap();
fork1.set_static(field, EmValue::I32(10)).unwrap();
fork2.set_static(field, EmValue::I32(20)).unwrap();
assert_eq!(space.get_static(field).unwrap(), Some(EmValue::I32(1)));
assert_eq!(fork1.get_static(field).unwrap(), Some(EmValue::I32(10)));
assert_eq!(fork2.get_static(field).unwrap(), Some(EmValue::I32(20)));
}
#[test]
fn test_shared_heap_fork() {
let heap = SharedHeap::new(1024 * 1024);
let str_ref = heap.alloc_string("Hello").unwrap();
let forked = heap.fork().unwrap();
assert_eq!(&*heap.get_string(str_ref).unwrap(), "Hello");
assert_eq!(&*forked.get_string(str_ref).unwrap(), "Hello");
let new_ref = forked.alloc_string("World").unwrap();
assert_eq!(&*forked.get_string(new_ref).unwrap(), "World");
assert!(heap.get_string(new_ref).is_err());
}
}