corevm_engine/
mem.rs

1use crate::{
2	get_work_output_len, hash_raw, AddressKind, InnerVm, OuterVm, OutputBuffers, PageMapper,
3	ProgramData, MAX_TOTAL_OUTPUT_BLOB_SIZE,
4};
5use corevm_host::{OutputStream, PageSegment, Range, RangeSet, PAGE_SIZE};
6use jam_pvm_common::ApiError;
7use jam_types::{max_exports, Hash, VecMap, VecSet, SEGMENT_LEN};
8use log::{debug, trace};
9
10/// Inner VM's memory manager.
11///
12/// Allocates and deallocates inner VM's memory pages and handles page faults.
13pub struct MemoryManager<I: InnerVm> {
14	pub(crate) inner_vm: I,
15	program: ProgramData,
16	/// All memory pages that were read from/written to across all program runs but haven't been
17	/// unmapped yet.
18	resident_pages: RangeSet,
19	/// Memory pages that were imported by the builder.
20	imported_pages: VecMap<u64, PageSegment>,
21	/// Memory pages that were touched (faulted) during the current program run.
22	///
23	/// Include deallocated pages.
24	touched_pages: VecSet<u64>,
25	touched_ro_pages: VecSet<u64>,
26	touched_imported_pages: VecSet<u64>,
27	/// Should be equal to `WorkItem::export_count`.
28	export_count: usize,
29	/// Guest program's output streams.
30	output: OutputBuffers,
31	heap_page_mapper: PageMapper,
32	auth_output_len: usize,
33}
34
35impl<I: InnerVm> MemoryManager<I> {
36	pub fn new<O: OuterVm>(
37		inner_vm: I,
38		outer_vm: &mut O,
39		program: ProgramData,
40		heap_pages: RangeSet,
41		resident_pages: RangeSet,
42		export_count: usize,
43		auth_output_len: usize,
44	) -> Result<Self, ApiError> {
45		let heap_address_range = program.heap_range();
46		let heap_page_range =
47			Range::new(heap_address_range.start / PAGE_SIZE, heap_address_range.end / PAGE_SIZE);
48		let heap_page_mapper = PageMapper::new(heap_pages, heap_page_range);
49		assert!(export_count <= max_exports() as usize);
50		let mut man = Self {
51			inner_vm,
52			program,
53			resident_pages,
54			imported_pages: Default::default(),
55			touched_pages: Default::default(),
56			touched_ro_pages: Default::default(),
57			touched_imported_pages: Default::default(),
58			export_count,
59			output: Default::default(),
60			heap_page_mapper,
61			auth_output_len,
62		};
63		man.import_pages(outer_vm);
64		Ok(man)
65	}
66
67	pub fn alloc(&mut self, size: u64) -> Option<(u64, u64)> {
68		let size = size.checked_next_multiple_of(PAGE_SIZE)?;
69		let num_pages = size / PAGE_SIZE;
70		let page_range = self.heap_page_mapper.map(num_pages)?;
71		let address = page_range.start * PAGE_SIZE;
72		let size = (page_range.end - page_range.start) * PAGE_SIZE;
73		Some((address, size))
74	}
75
76	pub fn dealloc(&mut self, address: u64, size: u64) -> Result<(), ApiError> {
77		let start_page = address / PAGE_SIZE;
78		let end_page = address.saturating_add(size).div_ceil(PAGE_SIZE);
79		self.heap_page_mapper.unmap(start_page, end_page);
80		self.resident_pages.remove(&Range::new(start_page, end_page));
81		self.inner_vm.void(start_page, end_page - start_page)?;
82		trace!("Freed guest memory block {:#x}..{:#x}", address, address + size);
83		Ok(())
84	}
85
86	/// Handle page fault at the specified `address`.
87	pub fn touch(&mut self, address: u64) -> Result<(), PageFaultError> {
88		let Some(class) = self.program.classify_address(address) else {
89			// Memory map doesn't have this address.
90			trace!("Page fault at out-of-range address {:#x}", address);
91			return Err(ApiError::OutOfBounds.into());
92		};
93		// We don't add RO data pages to `self.touched_pages` because they don't need to be
94		// exported.
95		let page = address / PAGE_SIZE;
96		if class == AddressKind::RoData {
97			if self.touched_ro_pages.contains(&address) {
98				return Ok(());
99			}
100			trace!("Page fault at {page}/{address:#x}/{class:?}");
101			self.inner_vm.poke_ro_data_page(address, &self.program)?;
102			self.touched_ro_pages.insert(address);
103			return Ok(());
104		}
105		// RW data, stack and heap need to be exported.
106		if self
107			.num_exported_pages()
108			.checked_add(self.output.segment_count())
109			.unwrap_or(usize::MAX) >=
110			self.export_count
111		{
112			// The page will not fit into work package exports.
113			trace!(
114				"Page fault at {page}/{address:#x}/{class:?}: \
115                The page will not fit into work package exports"
116			);
117			return Err(PageFaultError::PageFault { page, num_pages: 1 });
118		}
119		let resident_page_inserted = if self.resident_pages.contains_index(page) {
120			// We know this page.
121			let Some(segment) = self.imported_pages.get(&address) else {
122				// This page was allocated by some previous work package,
123				// however, the builder haven't imported this page to the current work package.
124				// We error out to let the builder import this page for the next program run.
125				trace!("Page fault at {page}/{address:#x}/{class:?} (missing page)");
126				return Err(PageFaultError::PageFault { page, num_pages: 1 });
127			};
128			trace!("Page fault at {page}/{address:#x}/{class:?} (resident page)");
129			self.inner_vm.zero_poke(segment.page(), address)?;
130			self.touched_imported_pages.insert(address);
131			false
132		} else {
133			// We don't know this page. Initialize the page based on the address range.
134			match class {
135				AddressKind::RoData => {
136					// RO pages are handled above.
137					unreachable!()
138				},
139				AddressKind::RwData => {
140					trace!("Page fault at {page}/{address:#x}/{class:?}");
141					self.inner_vm.poke_rw_data_page(address, &self.program)?;
142				},
143				AddressKind::Stack => {
144					trace!("Page fault at {page}/{address:#x}/{class:?}");
145					self.inner_vm.zero_stack_page(address, &self.program)?;
146				},
147				AddressKind::Heap => {
148					if !self.heap_page_mapper.is_mapped(address / PAGE_SIZE) {
149						panic!("Page fault at {page}/{address:#x}/{class:?} (unmapped)");
150					}
151					trace!("Page fault at {page}/{address:#x}/{class:?}");
152					self.inner_vm.zero_heap_page(address, &self.program)?;
153				},
154			}
155			self.resident_pages.insert(Range::new(page, page + 1));
156			true
157		};
158		let touched_pages_inserted = self.touched_pages.insert(address);
159		// Check that we're still within the maximum bounds of the work output.
160		if (resident_page_inserted || touched_pages_inserted) &&
161			self.check_work_output_len().is_err()
162		{
163			// The page will not fit into the work output.
164			trace!("Page fault at {page}/{address:#x}/{class:?}: The page will not fit into work output");
165			// Revert all insertions that we made.
166			if resident_page_inserted {
167				self.resident_pages.remove(&Range::new(page, page + 1));
168			}
169			if touched_pages_inserted {
170				self.touched_pages.remove(&address);
171			}
172			return Err(PageFaultError::PageFault { page, num_pages: 1 });
173		}
174		Ok(())
175	}
176
177	fn check_work_output_len(&self) -> Result<(), ()> {
178		let work_output_len = get_work_output_len(
179			&self.heap_page_mapper.pages,
180			&self.resident_pages,
181			self.touched_imported_pages.len(),
182			self.num_exported_pages(),
183		);
184		if work_output_len + self.auth_output_len >= MAX_TOTAL_OUTPUT_BLOB_SIZE {
185			return Err(());
186		}
187		Ok(())
188	}
189
190	fn num_exported_pages(&self) -> usize {
191		self.touched_pages
192			.iter()
193			.filter(|address| {
194				// Filter out deallocated pages.
195				let page = *address / PAGE_SIZE;
196				self.resident_pages.contains_index(page)
197			})
198			.count()
199	}
200
201	/// Append data to the guest program's output stream.
202	pub fn append_output(
203		&mut self,
204		stream: OutputStream,
205		inner_src: u64,
206		len: u64,
207	) -> Result<(), AppendOutputError> {
208		if len == 0 {
209			// Succeed regardless of the validity of `inner_src` to minimize potential page faults.
210			return Ok(());
211		}
212		let Some(new_segment_count) = self.output.segment_count_after(stream, len as usize) else {
213			// The `len` is too large for appending to the current buffer.
214			trace!("Output limit reached while appending {len} byte(s) to {stream:?}");
215			return Err(AppendOutputError::OutputLimitReached);
216		};
217		if self.num_exported_pages().saturating_add(new_segment_count) >= self.export_count {
218			trace!("Output limit reached while appending {len} byte(s) to {stream:?}");
219			// TODO @ivan This error can trigger in the middle of the execution because we've run
220			// out of exports, however, at the end of the execution the program might deallocate
221			// some pages and free space can be reused for the output.
222			return Err(AppendOutputError::OutputLimitReached);
223		}
224		if inner_src == 0 {
225			// Confirm that the operation can be carried out successfully when repeated with a
226			// valid address.
227			trace!("Confirm {len} byte(s) can be appended to {stream:?} without reaching the output limit");
228			return Ok(());
229		}
230		// Touch the pages first because they might have been swapped out in the previous run.
231		let page_aligned_inner_src = inner_src - inner_src % PAGE_SIZE;
232		let page_aligned_inner_src_end = inner_src
233			.checked_add(len)
234			.and_then(|x| x.checked_next_multiple_of(PAGE_SIZE))
235			.ok_or(AppendOutputError::ApiError(ApiError::OutOfBounds))?;
236		let num_pages = (page_aligned_inner_src_end - page_aligned_inner_src) / PAGE_SIZE;
237		for address in
238			(page_aligned_inner_src..page_aligned_inner_src_end).step_by(PAGE_SIZE as usize)
239		{
240			let page = address / PAGE_SIZE;
241			if self.touched_pages.contains(&address) && self.resident_pages.contains_index(page) {
242				// We already handled this page fault.
243				continue;
244			}
245			self.touch(address).map_err(|e| match e {
246				// Replace normal page fault with the one that includes all the pages that we
247				// need to move forward.
248				PageFaultError::PageFault { .. } => AppendOutputError::PageFault {
249					page: page_aligned_inner_src / PAGE_SIZE,
250					num_pages,
251				},
252				e => e.into(),
253			})?;
254		}
255		let buf = self.output.pre_allocate(stream, len as usize);
256		self.inner_vm.peek_into(buf, inner_src)?;
257		trace!(
258			"Appended {len} bytes to {stream:?}: src={inner_src:#x}, utf8={:?}",
259			if matches!(stream, OutputStream::Stdout | OutputStream::Stderr) {
260				core::str::from_utf8(buf)
261			} else {
262				Ok("")
263			}
264		);
265		Ok(())
266	}
267
268	/// Exports the modified pages as segments.
269	pub fn export<O: OuterVm>(
270		&mut self,
271		outer_vm: &mut O,
272	) -> Result<
273		(RangeSet, RangeSet, VecMap<u64, Hash>, VecMap<u64, Hash>, u32, [u32; OutputStream::COUNT]),
274		ApiError,
275	> {
276		let stream_len = self.output.stream_len();
277		let output_len = self.output.total_len() as u32;
278		// Export all touched pages.
279		let mut touched_pages: VecMap<u64, Hash> = VecMap::new();
280		let mut num_exported_pages = 0;
281		for address in self.touched_pages.iter() {
282			let page = address / PAGE_SIZE;
283			let hash = if self.resident_pages.contains_index(page) {
284				let mut segment = PageSegment::zero(*address);
285				self.inner_vm.peek_into(segment.page_mut(), *address)?;
286				outer_vm.export(segment.as_ref())?;
287				num_exported_pages += 1;
288				trace!("Exported regular page {:#x}", address);
289				hash_raw(segment.as_ref())
290			} else {
291				trace!("Not exporting freed regular page {:#x}", address);
292				// The page was freed.
293				[0; 32]
294			};
295			// Update page hash.
296			touched_pages.insert(*address, hash);
297		}
298		// Export output as segments.
299		self.output.export_segments(|segment| outer_vm.export(&segment[..]))?;
300		let num_output_segments = output_len.div_ceil(SEGMENT_LEN as u32) as usize;
301		// We need to make the total no. of exported pages equal `WorkItem::export_count` for
302		// the work output to be considered valid. Hence we export zero-filled pages with null
303		// addresses.
304		let total_segment_count = num_exported_pages + num_output_segments;
305		for _ in total_segment_count..self.export_count {
306			let segment = PageSegment::null();
307			outer_vm.export(segment.as_ref())?;
308		}
309		debug!(
310			"Exported page(s): {:?}",
311			touched_pages
312				.iter()
313				.map(|(address, _)| {
314					let page = address / PAGE_SIZE;
315					Range::new(page, page + 1)
316				})
317				.collect::<RangeSet>()
318		);
319		debug!(
320			"Exported {} segments: {} memory page(s), {} output segment(s), {} null segment(s)",
321			self.export_count,
322			num_exported_pages,
323			output_len.div_ceil(SEGMENT_LEN as u32),
324			self.export_count.saturating_sub(total_segment_count)
325		);
326		let resident_pages = core::mem::take(&mut self.resident_pages);
327		let heap_pages = core::mem::take(&mut self.heap_page_mapper.pages);
328		let touched_imported_pages = self
329			.imported_pages
330			.iter()
331			.filter(|(address, _segment)| self.touched_imported_pages.contains(address))
332			.map(|(address, segment)| (*address, hash_raw(segment.as_ref())))
333			.collect();
334		Ok((
335			heap_pages,
336			resident_pages,
337			touched_imported_pages,
338			touched_pages,
339			num_exported_pages as u32,
340			stream_len,
341		))
342	}
343
344	fn import_pages<O: OuterVm>(&mut self, outer_vm: &mut O) {
345		let mut i = 0;
346		while let Some(page_segment) = outer_vm.import(i) {
347			let address = page_segment.address();
348			let page = address / PAGE_SIZE;
349			let Some(class) = self.program.classify_address(address) else {
350				panic!("Failed to import segment {i}: Invalid page {page}/{address:#x}");
351			};
352			if !self.resident_pages.contains_index(page) {
353				panic!(
354					"Failed to import segment {i}: Unknown page {page}/{address:#x}/{class:?}, known pages {:?}",
355					self.resident_pages
356				);
357			}
358			trace!("Imported page {page}/{address:#x}/{class:?}");
359			self.imported_pages.insert(address, page_segment);
360			i += 1;
361		}
362		debug!(
363			"Imported page(s): {:?}",
364			self.imported_pages
365				.iter()
366				.map(|(address, _)| {
367					let page = address / PAGE_SIZE;
368					Range::new(page, page + 1)
369				})
370				.collect::<RangeSet>()
371		);
372	}
373}
374
375#[derive(Debug)]
376pub enum PageFaultError {
377	ApiError(ApiError),
378	PageFault { page: u64, num_pages: u64 },
379}
380
381impl From<ApiError> for PageFaultError {
382	fn from(e: ApiError) -> Self {
383		Self::ApiError(e)
384	}
385}
386
387#[derive(Debug)]
388pub enum AppendOutputError {
389	ApiError(ApiError),
390	PageFault { page: u64, num_pages: u64 },
391	OutputLimitReached,
392}
393
394impl From<ApiError> for AppendOutputError {
395	fn from(e: ApiError) -> Self {
396		Self::ApiError(e)
397	}
398}
399
400impl From<PageFaultError> for AppendOutputError {
401	fn from(e: PageFaultError) -> Self {
402		match e {
403			PageFaultError::ApiError(e) => Self::ApiError(e),
404			PageFaultError::PageFault { page, num_pages } => Self::PageFault { page, num_pages },
405		}
406	}
407}
408
409#[cfg(test)]
410mod tests {
411	use super::*;
412
413	use alloc::{vec, vec::Vec};
414	use jam_pvm_common::InvokeOutcome;
415	use jam_types::{ServiceId, SignedGas};
416	use polkavm::MemoryMapBuilder;
417	use rand::{seq::IndexedRandom, Rng, RngCore};
418
419	#[test]
420	fn touch_and_append_output_works() {
421		let mut rng = rand::rng();
422		let (mut mem, _) = MemoryManager::with_data(
423			{
424				let len = rng.random_range(0..=2 * PAGE_SIZE);
425				let mut data = vec![0_u8; len as usize];
426				rng.fill_bytes(&mut data[..]);
427				data
428			},
429			{
430				let len = rng.random_range(0..=2 * PAGE_SIZE);
431				let mut data = vec![0_u8; len as usize];
432				rng.fill_bytes(&mut data[..]);
433				data
434			},
435		);
436		let ro_page_range = {
437			let r = mem.program.ro_data_range();
438			let ro_start_page = r.start / PAGE_SIZE;
439			let ro_end_page = r.end / PAGE_SIZE;
440			ro_start_page..ro_end_page
441		};
442		let ro_data_page_range = {
443			let r = mem.program.ro_data_range();
444			let data_len = mem.program.ro_data.len() as u64;
445			let ro_start_page = r.start / PAGE_SIZE;
446			let ro_end_page = (r.start + data_len).div_ceil(PAGE_SIZE);
447			ro_start_page..ro_end_page
448		};
449		let rw_page_range = {
450			let r = mem.program.rw_data_range();
451			let rw_start_page = r.start / PAGE_SIZE;
452			let rw_end_page = r.end / PAGE_SIZE;
453			rw_start_page..rw_end_page
454		};
455		let rw_data_page_range = {
456			let r = mem.program.rw_data_range();
457			let data_len = mem.program.rw_data.len() as u64;
458			let rw_start_page = r.start / PAGE_SIZE;
459			let rw_end_page = (r.start + data_len).div_ceil(PAGE_SIZE);
460			rw_start_page..rw_end_page
461		};
462		let stack_page_range = {
463			let r = mem.program.stack_range();
464			let stack_start_page = r.start / PAGE_SIZE;
465			let stack_end_page = r.end / PAGE_SIZE;
466			stack_start_page..stack_end_page
467		};
468		let heap_page_range = {
469			let r = mem.program.heap_range();
470			let heap_start_page = r.start / PAGE_SIZE;
471			let heap_end_page = r.end / PAGE_SIZE;
472			heap_start_page..heap_end_page
473		};
474		assert_eq!(None, mem.program.classify_address(0));
475		assert!(matches!(mem.touch(0), Err(PageFaultError::ApiError(ApiError::OutOfBounds))));
476		// Check that `touch` inits the pages and stores them in `resident_pages` and
477		// `touched_pages`.
478		//
479		// - Stack and heap pages are always zeroed.
480		// - RO/RW data pages are either initialized with the static data from PolkaVM program or
481		//   are zeroed if the address goes beyond the static data.
482		macro_rules! check_touch {
483			($range: expr, readonly = $read_only: expr, alloc = $alloc: expr, data = $data: expr$(,)*) => {{
484				let range = $range;
485				let data = $data;
486				let (page, address) = if $alloc {
487					let (address, _size) = mem.alloc(PAGE_SIZE as u64).unwrap();
488					let page = address / PAGE_SIZE as u64;
489					(page, address)
490				} else {
491					let page = rng.random_range(range.clone());
492					let address = page * PAGE_SIZE as u64;
493					(page, address)
494				};
495				mem.resident_pages = Default::default();
496				mem.touched_pages = Default::default();
497				mem.inner_vm.calls.clear();
498				assert!(mem.touch(address).is_ok());
499				if data.is_empty() {
500					assert_eq!(
501						[Call::Zero { page, num_pages: 1 }].as_slice(),
502						mem.inner_vm.calls.as_slice()
503					);
504				} else {
505					let start = (page - range.start) as usize * PAGE_SIZE as usize;
506					let end = (start + PAGE_SIZE as usize).min(data.len());
507					let outer_src = data[start..end].to_vec();
508					assert_eq!(
509						[
510							Call::Zero { page, num_pages: 1 },
511							Call::Poke { outer_src, inner_dst: address }
512						]
513						.as_slice(),
514						mem.inner_vm.calls.as_slice()
515					);
516				}
517				if !$read_only {
518					assert_eq!(
519						[Range::new(page, page + 1)].as_slice(),
520						mem.resident_pages.as_slice()
521					);
522					assert_eq!(VecSet::from_iter([address]), mem.touched_pages);
523				}
524			}};
525		}
526		for _ in 0..1000 {
527			// RO pages with no data.
528			check_touch!(
529				ro_data_page_range.end..ro_page_range.end,
530				readonly = true,
531				alloc = false,
532				data = [0_u8; 0],
533			);
534			// RO pages with data.
535			check_touch!(
536				ro_data_page_range.start..ro_data_page_range.end,
537				readonly = true,
538				alloc = false,
539				data = mem.program.ro_data.clone(),
540			);
541			// RW pages with no data.
542			check_touch!(
543				rw_data_page_range.end..rw_page_range.end,
544				readonly = false,
545				alloc = false,
546				data = [0_u8; 0],
547			);
548			// RW pages with data.
549			check_touch!(
550				rw_data_page_range.start..rw_data_page_range.end,
551				readonly = false,
552				alloc = false,
553				data = mem.program.rw_data.clone(),
554			);
555			// Stack pages.
556			check_touch!(
557				stack_page_range.start..stack_page_range.end,
558				readonly = false,
559				alloc = false,
560				data = [0_u8; 0],
561			);
562			// Heap pages.
563			check_touch!(
564				heap_page_range.start..heap_page_range.end,
565				readonly = false,
566				alloc = true,
567				data = [0_u8; 0],
568			);
569			// Append output.
570			{
571				mem.resident_pages = Default::default();
572				mem.touched_pages = Default::default();
573				mem.touched_ro_pages = Default::default();
574				mem.inner_vm.calls.clear();
575				let stream = *OutputStream::ALL.choose(&mut rng).unwrap();
576				let len = rng.random_range(1..=PAGE_SIZE);
577				let (address, _size) = mem.alloc(len).unwrap();
578				let page_aligned_address = address - address % PAGE_SIZE;
579				let start_page = page_aligned_address / PAGE_SIZE;
580				let end_page = (address + len).next_multiple_of(PAGE_SIZE) / PAGE_SIZE;
581				assert!(mem.append_output(stream, address, len).is_ok());
582				let mut calls = Vec::new();
583				if len != 0 {
584					for page in start_page..end_page {
585						calls.push(Call::Zero { page, num_pages: 1 });
586					}
587					calls.push(Call::PeekInto { inner_src: address, len });
588				}
589				assert_eq!(calls.as_slice(), mem.inner_vm.calls.as_slice());
590			}
591		}
592	}
593
594	#[test]
595	fn page_limit_on_touch_works() {
596		let (mut mem, _) = MemoryManager::new_for_tests();
597		let mut num_touched = 0;
598		for _ in 0..max_exports() {
599			let (address, _size) = mem.alloc(PAGE_SIZE).unwrap();
600			let result = mem.touch(address);
601			assert!(mem.touched_pages.len() <= max_exports() as usize);
602			assert_eq!(Ok(()), mem.check_work_output_len());
603			if result.is_err() {
604				break;
605			}
606			num_touched += 1;
607		}
608		assert_ne!(0, num_touched);
609	}
610
611	#[test]
612	fn page_limit_on_output_works() {
613		let _ = env_logger::try_init();
614		let (mut mem, mut outer_vm) = MemoryManager::new_for_tests();
615		mem.inner_vm.randomize_memory = true;
616		assert!(
617			mem.program.heap_range().end - mem.program.heap_range().start > max_exports() as u64
618		);
619		let mut rng = rand::rng();
620		let (address, _len) = mem.alloc(PAGE_SIZE).unwrap();
621		loop {
622			let stream = *OutputStream::ALL.choose(&mut rng).unwrap();
623			let len = rng.random_range(0..=PAGE_SIZE);
624			let result = mem.append_output(stream, address, len);
625			assert_eq!(Ok(()), mem.check_work_output_len());
626			match result {
627				Ok(()) => {},
628				Err(AppendOutputError::OutputLimitReached) => {
629					break;
630				},
631				Err(e) => {
632					panic!("Unexpected error {e:?}");
633				},
634			}
635		}
636		for _ in 0..10 {
637			let stream = *OutputStream::ALL.choose(&mut rng).unwrap();
638			assert!(mem.append_output(stream, 0, 0).is_ok());
639		}
640		assert!(mem.touched_pages.len() <= max_exports() as usize);
641		assert_eq!(Ok(()), mem.check_work_output_len());
642		let (_, _, _, _, num_memory_pages, stream_len) = mem.export(&mut outer_vm).unwrap();
643		let output_len: u32 = stream_len.iter().sum();
644		let num_output_segments = output_len.div_ceil(SEGMENT_LEN as u32) as usize;
645		assert_eq!(num_memory_pages as usize + num_output_segments, outer_vm.real_export_count());
646		assert!(outer_vm.exports.len() <= max_exports() as usize);
647	}
648
649	#[test]
650	fn append_output_edge_cases() {
651		let (mut mem, _) = MemoryManager::new_for_tests();
652		let mut rng = rand::rng();
653		let stream = *OutputStream::ALL.choose(&mut rng).unwrap();
654		let result = mem.append_output(stream, mem.program.heap_range().start, u64::MAX);
655		assert!(
656			matches!(result, Err(AppendOutputError::OutputLimitReached)),
657			"Result = {result:?}"
658		);
659		for kind in AddressKind::ALL {
660			let address = mem.program.address_range(kind).end;
661			if mem.program.classify_address(address).is_some() {
662				// Blocks are laid out in memory one after another.
663				continue;
664			}
665			assert!(
666				matches!(
667					mem.append_output(stream, address, 1),
668					Err(AppendOutputError::ApiError(ApiError::OutOfBounds)),
669				),
670				"address kind = {kind:?}",
671			);
672		}
673	}
674
675	impl MemoryManager<TestInnerVm> {
676		fn new_for_tests() -> (Self, TestOuterVm) {
677			Self::with_data(Default::default(), Default::default())
678		}
679
680		fn with_data(ro_data: Vec<u8>, rw_data: Vec<u8>) -> (Self, TestOuterVm) {
681			let inner_vm = TestInnerVm::default();
682			let mut outer_vm = TestOuterVm::default();
683			let memory_map = MemoryMapBuilder::new(PAGE_SIZE as u32)
684				.ro_data_size((PAGE_SIZE * 10) as u32)
685				.rw_data_size((PAGE_SIZE * 10) as u32)
686				.stack_size((PAGE_SIZE * 10) as u32)
687				.build()
688				.expect("Failed to build memory map");
689			let program_data = ProgramData::new(&memory_map, ro_data.into(), rw_data.into());
690			let man = Self::new(
691				inner_vm,
692				&mut outer_vm,
693				program_data,
694				Default::default(),
695				Default::default(),
696				max_exports() as usize,
697				0,
698			)
699			.unwrap();
700			(man, outer_vm)
701		}
702	}
703
704	#[derive(Default)]
705	struct TestInnerVm {
706		calls: Vec<Call>,
707		randomize_memory: bool,
708	}
709
710	impl InnerVm for TestInnerVm {
711		fn void(&mut self, page: u64, num_pages: u64) -> Result<(), ApiError> {
712			self.calls.push(Call::Void { page, num_pages });
713			Ok(())
714		}
715
716		fn zero(&mut self, page: u64, num_pages: u64) -> Result<(), ApiError> {
717			self.calls.push(Call::Zero { page, num_pages });
718			Ok(())
719		}
720
721		fn poke(&mut self, outer_src: &[u8], inner_dst: u64) -> Result<(), ApiError> {
722			self.calls.push(Call::Poke { outer_src: outer_src.to_vec(), inner_dst });
723			Ok(())
724		}
725
726		fn peek_into(&mut self, outer_dst: &mut [u8], inner_src: u64) -> Result<(), ApiError> {
727			self.calls.push(Call::PeekInto { len: outer_dst.len() as u64, inner_src });
728			if self.randomize_memory {
729				rand::rng().fill_bytes(outer_dst);
730			}
731			Ok(())
732		}
733
734		fn expunge(self) -> Result<u64, ApiError> {
735			unreachable!()
736		}
737
738		fn invoke(
739			&mut self,
740			_gas: SignedGas,
741			_regs: [u64; 13],
742		) -> Result<(InvokeOutcome, SignedGas, [u64; 13]), ApiError> {
743			unreachable!()
744		}
745	}
746
747	#[derive(Debug, PartialEq, Eq)]
748	enum Call {
749		Void { page: u64, num_pages: u64 },
750		Zero { page: u64, num_pages: u64 },
751		Poke { outer_src: Vec<u8>, inner_dst: u64 },
752		PeekInto { len: u64, inner_src: u64 },
753	}
754
755	#[derive(Default)]
756	struct TestOuterVm {
757		imports: Vec<PageSegment>,
758		exports: Vec<PageSegment>,
759	}
760
761	impl TestOuterVm {
762		/// The number of exports without counting the trailing null segments.
763		fn real_export_count(&self) -> usize {
764			let num_null_segments = self
765				.exports
766				.iter()
767				.rev()
768				.take_while(|segment| segment.as_ref().iter().all(|&b| b == 0))
769				.count();
770			self.exports.len() - num_null_segments
771		}
772	}
773
774	impl OuterVm for TestOuterVm {
775		type InnerVm = TestInnerVm;
776
777		fn import(&mut self, i: usize) -> Option<PageSegment> {
778			self.imports.get(i).cloned()
779		}
780
781		fn export(&mut self, segment: &[u8]) -> Result<(), ApiError> {
782			self.exports.push(PageSegment::new(segment.to_vec().try_into().unwrap()));
783			Ok(())
784		}
785
786		fn lookup(&mut self, _service_id: ServiceId, _hash: &Hash) -> Option<Vec<u8>> {
787			unreachable!()
788		}
789
790		fn get_export_count(&mut self) -> u16 {
791			unreachable!()
792		}
793
794		fn get_auth_output_len(&mut self) -> u32 {
795			unreachable!()
796		}
797
798		fn machine(
799			&mut self,
800			_code: &[u8],
801			_program_counter: u64,
802		) -> Result<TestInnerVm, ApiError> {
803			unreachable!()
804		}
805	}
806}