gear_sp_allocator/
freeing_bump.rs

1// This file is part of Substrate.
2
3// Copyright (C) Parity Technologies (UK) Ltd.
4// SPDX-License-Identifier: Apache-2.0
5
6// Licensed under the Apache License, Version 2.0 (the "License");
7// you may not use this file except in compliance with the License.
8// You may obtain a copy of the License at
9//
10// 	http://www.apache.org/licenses/LICENSE-2.0
11//
12// Unless required by applicable law or agreed to in writing, software
13// distributed under the License is distributed on an "AS IS" BASIS,
14// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15// See the License for the specific language governing permissions and
16// limitations under the License.
17
18//! This module implements a freeing-bump allocator.
19//!
20//! The heap is a continuous linear memory and chunks are allocated using a bump allocator.
21//!
22//! ```ignore
23//! +-------------+-------------------------------------------------+
24//! | <allocated> | <unallocated>                                   |
25//! +-------------+-------------------------------------------------+
26//!               ^
27//!               |_ bumper
28//! ```
29//!
30//! Only allocations with sizes of power of two can be allocated. If the incoming request has a non
31//! power of two size it is increased to the nearest power of two. The power of two of size is
32//! referred as **an order**.
33//!
34//! Each allocation has a header immediately preceding to it. The header is always 8 bytes and can
35//! be of two types: free and occupied.
36//!
37//! For implementing freeing we maintain a linked lists for each order. The maximum supported
38//! allocation size is capped, therefore the number of orders and thus the linked lists is as well
39//! limited. Currently, the maximum size of an allocation is 32 MiB.
40//!
41//! When the allocator serves an allocation request it first checks the linked list for the
42//! respective order. If it doesn't have any free chunks, the allocator requests memory from the
43//! bump allocator. In any case the order is stored in the header of the allocation.
44//!
45//! Upon deallocation we get the order of the allocation from its header and then add that
46//! allocation to the linked list for the respective order.
47//!
48//! # Caveats
49//!
50//! This is a fast allocator but it is also dumb. There are specifically two main shortcomings
51//! that the user should keep in mind:
52//!
53//! - Once the bump allocator space is exhausted, there is no way to reclaim the memory. This means
54//!   that it's possible to end up in a situation where there are no live allocations yet a new
55//!   allocation will fail.
56//!
57//!   Let's look into an example. Given a heap of 32 MiB. The user makes a 32 MiB allocation that we
58//!   call `X` . Now the heap is full. Then user deallocates `X`. Since all the space in the bump
59//!   allocator was consumed by the 32 MiB allocation, allocations of all sizes except 32 MiB will
60//!   fail.
61//!
62//! - Sizes of allocations are rounded up to the nearest order. That is, an allocation of 2,00001
63//!   MiB will be put into the bucket of 4 MiB. Therefore, any allocation of size `(N, 2N]` will
64//!   take up to `2N`, thus assuming a uniform distribution of allocation sizes, the average amount
65//!   in use of a `2N` space on the heap will be `(3N + ε) / 2`. So average utilization is going to
66//!   be around 75% (`(3N + ε) / 2 / 2N`) meaning that around 25% of the space in allocation will be
67//!   wasted. This is more pronounced (in terms of absolute heap amounts) with larger allocation
68//!   sizes.
69
70use crate::{Error, Memory, MAX_POSSIBLE_ALLOCATION, MAX_WASM_PAGES, PAGE_SIZE};
71use sp_wasm_interface_common::{Pointer, WordSize};
72use std::{
73	cmp::{max, min},
74	mem,
75	ops::{Index, IndexMut, Range},
76};
77
78/// The minimal alignment guaranteed by this allocator.
79///
80/// The alignment of 8 is chosen because it is the maximum size of a primitive type supported by the
81/// target version of wasm32: i64's natural alignment is 8.
82const ALIGNMENT: u32 = 8;
83
84// Each pointer is prefixed with 8 bytes, which identify the list index
85// to which it belongs.
86const HEADER_SIZE: u32 = 8;
87
88/// Create an allocator error.
89fn error(msg: &'static str) -> Error {
90	Error::Other(msg)
91}
92
93const LOG_TARGET: &str = "wasm-heap";
94
95// The minimum possible allocation size is chosen to be 8 bytes because in that case we would have
96// easier time to provide the guaranteed alignment of 8.
97//
98// The maximum possible allocation size is set in the primitives to 32MiB.
99//
100// N_ORDERS - represents the number of orders supported.
101//
102// This number corresponds to the number of powers between the minimum possible allocation and
103// maximum possible allocation, or: 2^3...2^25 (both ends inclusive, hence 23).
104const N_ORDERS: usize = 23;
105const MIN_POSSIBLE_ALLOCATION: u32 = 8; // 2^3 bytes, 8 bytes
106
107/// The exponent for the power of two sized block adjusted to the minimum size.
108///
109/// This way, if `MIN_POSSIBLE_ALLOCATION == 8`, we would get:
110///
111/// power_of_two_size | order
112/// 8                 | 0
113/// 16                | 1
114/// 32                | 2
115/// 64                | 3
116/// ...
117/// 16777216          | 21
118/// 33554432          | 22
119///
120/// and so on.
121#[derive(Copy, Clone, PartialEq, Eq, Debug)]
122struct Order(u32);
123
124impl Order {
125	/// Create `Order` object from a raw order.
126	///
127	/// Returns `Err` if it is greater than the maximum supported order.
128	fn from_raw(order: u32) -> Result<Self, Error> {
129		if order < N_ORDERS as u32 {
130			Ok(Self(order))
131		} else {
132			Err(error("invalid order"))
133		}
134	}
135
136	/// Compute the order by the given size
137	///
138	/// The size is clamped, so that the following holds:
139	///
140	/// `MIN_POSSIBLE_ALLOCATION <= size <= MAX_POSSIBLE_ALLOCATION`
141	fn from_size(size: u32) -> Result<Self, Error> {
142		let clamped_size = if size > MAX_POSSIBLE_ALLOCATION {
143			log::warn!(target: LOG_TARGET, "going to fail due to allocating {:?}", size);
144			return Err(Error::RequestedAllocationTooLarge)
145		} else if size < MIN_POSSIBLE_ALLOCATION {
146			MIN_POSSIBLE_ALLOCATION
147		} else {
148			size
149		};
150
151		// Round the clamped size to the next power of two.
152		//
153		// It returns the unchanged value if the value is already a power of two.
154		let power_of_two_size = clamped_size.next_power_of_two();
155
156		// Compute the number of trailing zeroes to get the order. We adjust it by the number of
157		// trailing zeroes in the minimum possible allocation.
158		let order = power_of_two_size.trailing_zeros() - MIN_POSSIBLE_ALLOCATION.trailing_zeros();
159
160		Ok(Self(order))
161	}
162
163	/// Returns the corresponding size in bytes for this order.
164	///
165	/// Note that it is always a power of two.
166	fn size(&self) -> u32 {
167		MIN_POSSIBLE_ALLOCATION << self.0
168	}
169
170	/// Extract the order as `u32`.
171	fn into_raw(self) -> u32 {
172		self.0
173	}
174}
175
176/// A special magic value for a pointer in a link that denotes the end of the linked list.
177const NIL_MARKER: u32 = u32::MAX;
178
179/// A link between headers in the free list.
180#[derive(Clone, Copy, Debug, PartialEq, Eq)]
181enum Link {
182	/// Nil, denotes that there is no next element.
183	Nil,
184	/// Link to the next element represented as a pointer to the a header.
185	Ptr(u32),
186}
187
188impl Link {
189	/// Creates a link from raw value.
190	fn from_raw(raw: u32) -> Self {
191		if raw != NIL_MARKER {
192			Self::Ptr(raw)
193		} else {
194			Self::Nil
195		}
196	}
197
198	/// Converts this link into a raw u32.
199	fn into_raw(self) -> u32 {
200		match self {
201			Self::Nil => NIL_MARKER,
202			Self::Ptr(ptr) => ptr,
203		}
204	}
205}
206
207/// A header of an allocation.
208///
209/// The header is encoded in memory as follows.
210///
211/// ## Free header
212///
213/// ```ignore
214/// 64             32                  0
215//  +--------------+-------------------+
216/// |            0 | next element link |
217/// +--------------+-------------------+
218/// ```
219/// ## Occupied header
220/// ```ignore
221/// 64             32                  0
222//  +--------------+-------------------+
223/// |            1 |             order |
224/// +--------------+-------------------+
225/// ```
226#[derive(Clone, Debug, PartialEq, Eq)]
227enum Header {
228	/// A free header contains a link to the next element to form a free linked list.
229	Free(Link),
230	/// An occupied header has attached order to know in which free list we should put the
231	/// allocation upon deallocation.
232	Occupied(Order),
233}
234
235impl Header {
236	/// Reads a header from memory.
237	///
238	/// Returns an error if the `header_ptr` is out of bounds of the linear memory or if the read
239	/// header is corrupted (e.g. the order is incorrect).
240	fn read_from(memory: &impl Memory, header_ptr: u32) -> Result<Self, Error> {
241		let raw_header = memory.read_le_u64(header_ptr)?;
242
243		// Check if the header represents an occupied or free allocation and extract the header data
244		// by trimming (and discarding) the high bits.
245		let occupied = raw_header & 0x00000001_00000000 != 0;
246		let header_data = raw_header as u32;
247
248		Ok(if occupied {
249			Self::Occupied(Order::from_raw(header_data)?)
250		} else {
251			Self::Free(Link::from_raw(header_data))
252		})
253	}
254
255	/// Write out this header to memory.
256	///
257	/// Returns an error if the `header_ptr` is out of bounds of the linear memory.
258	fn write_into(&self, memory: &mut impl Memory, header_ptr: u32) -> Result<(), Error> {
259		let (header_data, occupied_mask) = match *self {
260			Self::Occupied(order) => (order.into_raw(), 0x00000001_00000000),
261			Self::Free(link) => (link.into_raw(), 0x00000000_00000000),
262		};
263		let raw_header = header_data as u64 | occupied_mask;
264		memory.write_le_u64(header_ptr, raw_header)?;
265		Ok(())
266	}
267
268	/// Returns the order of the allocation if this is an occupied header.
269	fn into_occupied(self) -> Option<Order> {
270		match self {
271			Self::Occupied(order) => Some(order),
272			_ => None,
273		}
274	}
275
276	/// Returns the link to the next element in the free list if this is a free header.
277	fn into_free(self) -> Option<Link> {
278		match self {
279			Self::Free(link) => Some(link),
280			_ => None,
281		}
282	}
283}
284
285/// This struct represents a collection of intrusive linked lists for each order.
286struct FreeLists {
287	heads: [Link; N_ORDERS],
288}
289
290impl FreeLists {
291	/// Creates the free empty lists.
292	fn new() -> Self {
293		Self { heads: [Link::Nil; N_ORDERS] }
294	}
295
296	/// Replaces a given link for the specified order and returns the old one.
297	fn replace(&mut self, order: Order, new: Link) -> Link {
298		let prev = self[order];
299		self[order] = new;
300		prev
301	}
302}
303
304impl Index<Order> for FreeLists {
305	type Output = Link;
306	fn index(&self, index: Order) -> &Link {
307		&self.heads[index.0 as usize]
308	}
309}
310
311impl IndexMut<Order> for FreeLists {
312	fn index_mut(&mut self, index: Order) -> &mut Link {
313		&mut self.heads[index.0 as usize]
314	}
315}
316
317/// Memory allocation stats gathered during the lifetime of the allocator.
318#[derive(Clone, Debug, Default)]
319#[non_exhaustive]
320pub struct AllocationStats {
321	/// The current number of bytes allocated.
322	///
323	/// This represents how many bytes are allocated *right now*.
324	pub bytes_allocated: u32,
325
326	/// The peak number of bytes ever allocated.
327	///
328	/// This is the maximum the `bytes_allocated` ever reached.
329	pub bytes_allocated_peak: u32,
330
331	/// The sum of every allocation ever made.
332	///
333	/// This increases every time a new allocation is made.
334	pub bytes_allocated_sum: u128,
335
336	/// The amount of address space (in bytes) used by the allocator.
337	///
338	/// This is calculated as the difference between the allocator's bumper
339	/// and the heap base.
340	///
341	/// Currently the bumper's only ever incremented, so this is simultaneously
342	/// the current value as well as the peak value.
343	pub address_space_used: u32,
344}
345
346/// Convert the given `size` in bytes into the number of pages.
347///
348/// The returned number of pages is ensured to be big enough to hold memory with the given `size`.
349///
350/// Returns `None` if the number of pages to not fit into `u32`.
351fn pages_from_size(size: u64) -> Option<u32> {
352	u32::try_from((size + PAGE_SIZE as u64 - 1) / PAGE_SIZE as u64).ok()
353}
354
355/// An implementation of freeing bump allocator.
356///
357/// Refer to the module-level documentation for further details.
358pub struct FreeingBumpHeapAllocator {
359	original_heap_base: u32,
360	bumper: u32,
361	free_lists: FreeLists,
362	poisoned: bool,
363	last_observed_memory_size: u64,
364	stats: AllocationStats,
365}
366
367impl Drop for FreeingBumpHeapAllocator {
368	fn drop(&mut self) {
369		log::debug!(target: LOG_TARGET, "allocator dropped: {:?}", self.stats)
370	}
371}
372
373impl FreeingBumpHeapAllocator {
374	/// Creates a new allocation heap which follows a freeing-bump strategy.
375	///
376	/// # Arguments
377	///
378	/// - `heap_base` - the offset from the beginning of the linear memory where the heap starts.
379	pub fn new(heap_base: u32) -> Self {
380		let aligned_heap_base = (heap_base + ALIGNMENT - 1) / ALIGNMENT * ALIGNMENT;
381
382		FreeingBumpHeapAllocator {
383			original_heap_base: aligned_heap_base,
384			bumper: aligned_heap_base,
385			free_lists: FreeLists::new(),
386			poisoned: false,
387			last_observed_memory_size: 0,
388			stats: AllocationStats::default(),
389		}
390	}
391
392	/// Gets requested number of bytes to allocate and returns a pointer.
393	/// The maximum size which can be allocated at once is 32 MiB.
394	/// There is no minimum size, but whatever size is passed into
395	/// this function is rounded to the next power of two. If the requested
396	/// size is below 8 bytes it will be rounded up to 8 bytes.
397	///
398	/// The identity or the type of the passed memory object does not matter. However, the size of
399	/// memory cannot shrink compared to the memory passed in previous invocations.
400	///
401	/// NOTE: Once the allocator has returned an error all subsequent requests will return an error.
402	///
403	/// # Arguments
404	///
405	/// - `mem` - a slice representing the linear memory on which this allocator operates.
406	/// - `size` - size in bytes of the allocation request
407	pub fn allocate(
408		&mut self,
409		mem: &mut impl Memory,
410		size: WordSize,
411	) -> Result<Pointer<u8>, Error> {
412		if self.poisoned {
413			return Err(error("the allocator has been poisoned"))
414		}
415
416		let bomb = PoisonBomb { poisoned: &mut self.poisoned };
417
418		Self::observe_memory_size(&mut self.last_observed_memory_size, mem)?;
419		let order = Order::from_size(size)?;
420
421		let header_ptr: u32 = match self.free_lists[order] {
422			Link::Ptr(header_ptr) => {
423				assert!(
424					u64::from(header_ptr + order.size() + HEADER_SIZE) <= mem.size(),
425					"Pointer is looked up in list of free entries, into which
426					only valid values are inserted; qed"
427				);
428
429				// Remove this header from the free list.
430				let next_free = Header::read_from(mem, header_ptr)?
431					.into_free()
432					.ok_or_else(|| error("free list points to a occupied header"))?;
433				self.free_lists[order] = next_free;
434
435				header_ptr
436			},
437			Link::Nil => {
438				// Corresponding free list is empty. Allocate a new item.
439				Self::bump(&mut self.bumper, order.size() + HEADER_SIZE, mem)?
440			},
441		};
442
443		// Write the order in the occupied header.
444		Header::Occupied(order).write_into(mem, header_ptr)?;
445
446		self.stats.bytes_allocated += order.size() + HEADER_SIZE;
447		self.stats.bytes_allocated_sum += u128::from(order.size() + HEADER_SIZE);
448		self.stats.bytes_allocated_peak =
449			max(self.stats.bytes_allocated_peak, self.stats.bytes_allocated);
450		self.stats.address_space_used = self.bumper - self.original_heap_base;
451
452		log::trace!(target: LOG_TARGET, "after allocation: {:?}", self.stats);
453
454		bomb.disarm();
455		Ok(Pointer::new(header_ptr + HEADER_SIZE))
456	}
457
458	/// Deallocates the space which was allocated for a pointer.
459	///
460	/// The identity or the type of the passed memory object does not matter. However, the size of
461	/// memory cannot shrink compared to the memory passed in previous invocations.
462	///
463	/// NOTE: Once the allocator has returned an error all subsequent requests will return an error.
464	///
465	/// # Arguments
466	///
467	/// - `mem` - a slice representing the linear memory on which this allocator operates.
468	/// - `ptr` - pointer to the allocated chunk
469	pub fn deallocate(&mut self, mem: &mut impl Memory, ptr: Pointer<u8>) -> Result<(), Error> {
470		if self.poisoned {
471			return Err(error("the allocator has been poisoned"))
472		}
473
474		let bomb = PoisonBomb { poisoned: &mut self.poisoned };
475
476		Self::observe_memory_size(&mut self.last_observed_memory_size, mem)?;
477
478		let header_ptr = u32::from(ptr)
479			.checked_sub(HEADER_SIZE)
480			.ok_or_else(|| error("Invalid pointer for deallocation"))?;
481
482		let order = Header::read_from(mem, header_ptr)?
483			.into_occupied()
484			.ok_or_else(|| error("the allocation points to an empty header"))?;
485
486		// Update the just freed header and knit it back to the free list.
487		let prev_head = self.free_lists.replace(order, Link::Ptr(header_ptr));
488		Header::Free(prev_head).write_into(mem, header_ptr)?;
489
490		self.stats.bytes_allocated = self
491			.stats
492			.bytes_allocated
493			.checked_sub(order.size() + HEADER_SIZE)
494			.ok_or_else(|| error("underflow of the currently allocated bytes count"))?;
495
496		log::trace!("after deallocation: {:?}", self.stats);
497
498		bomb.disarm();
499		Ok(())
500	}
501
502	/// Returns the allocation stats for this allocator.
503	pub fn stats(&self) -> AllocationStats {
504		self.stats.clone()
505	}
506
507	/// Increases the `bumper` by `size`.
508	///
509	/// Returns the `bumper` from before the increase. Returns an `Error::AllocatorOutOfSpace` if
510	/// the operation would exhaust the heap.
511	fn bump(bumper: &mut u32, size: u32, memory: &mut impl Memory) -> Result<u32, Error> {
512		let required_size = u64::from(*bumper) + u64::from(size);
513
514		if required_size > memory.size() {
515			let required_pages =
516				pages_from_size(required_size).ok_or_else(|| Error::AllocatorOutOfSpace)?;
517
518			let current_pages = memory.pages();
519			let max_pages = memory.max_pages().unwrap_or(MAX_WASM_PAGES);
520			debug_assert!(
521				current_pages < required_pages,
522				"current pages {current_pages} < required pages {required_pages}"
523			);
524
525			if current_pages >= max_pages {
526				log::debug!(
527					target: LOG_TARGET,
528					"Wasm pages ({current_pages}) are already at the maximum.",
529				);
530
531				return Err(Error::AllocatorOutOfSpace)
532			} else if required_pages > max_pages {
533				log::debug!(
534					target: LOG_TARGET,
535					"Failed to grow memory from {current_pages} pages to at least {required_pages}\
536						 pages due to the maximum limit of {max_pages} pages",
537				);
538				return Err(Error::AllocatorOutOfSpace)
539			}
540
541			// Ideally we want to double our current number of pages,
542			// as long as it's less than the absolute maximum we can have.
543			let next_pages = min(current_pages * 2, max_pages);
544			// ...but if even more pages are required then try to allocate that many.
545			let next_pages = max(next_pages, required_pages);
546
547			if memory.grow(next_pages - current_pages).is_err() {
548				log::error!(
549					target: LOG_TARGET,
550					"Failed to grow memory from {current_pages} pages to {next_pages} pages",
551				);
552
553				return Err(Error::AllocatorOutOfSpace)
554			}
555
556			debug_assert_eq!(memory.pages(), next_pages, "Number of pages should have increased!");
557		}
558
559		let res = *bumper;
560		*bumper += size;
561		Ok(res)
562	}
563
564	fn observe_memory_size(
565		last_observed_memory_size: &mut u64,
566		mem: &mut impl Memory,
567	) -> Result<(), Error> {
568		if mem.size() < *last_observed_memory_size {
569			return Err(Error::MemoryShrinked)
570		}
571		*last_observed_memory_size = mem.size();
572		Ok(())
573	}
574}
575
576/// A trait for abstraction of accesses to a wasm linear memory. Used to read or modify the
577/// allocation prefixes.
578///
579/// A wasm linear memory behaves similarly to a vector. The address space doesn't have holes and is
580/// accessible up to the reported size.
581///
582/// The linear memory can grow in size with the wasm page granularity (64KiB), but it cannot shrink.
583trait MemoryExt: Memory {
584	/// Read a u64 from the heap in LE form. Returns an error if any of the bytes read are out of
585	/// bounds.
586	fn read_le_u64(&self, ptr: u32) -> Result<u64, Error> {
587		self.with_access(|memory| {
588			let range =
589				heap_range(ptr, 8, memory.len()).ok_or_else(|| error("read out of heap bounds"))?;
590			let bytes = memory[range]
591				.try_into()
592				.expect("[u8] slice of length 8 must be convertible to [u8; 8]");
593			Ok(u64::from_le_bytes(bytes))
594		})
595	}
596
597	/// Write a u64 to the heap in LE form. Returns an error if any of the bytes written are out of
598	/// bounds.
599	fn write_le_u64(&mut self, ptr: u32, val: u64) -> Result<(), Error> {
600		self.with_access_mut(|memory| {
601			let range = heap_range(ptr, 8, memory.len())
602				.ok_or_else(|| error("write out of heap bounds"))?;
603			let bytes = val.to_le_bytes();
604			memory[range].copy_from_slice(&bytes[..]);
605			Ok(())
606		})
607	}
608
609	/// Returns the full size of the memory in bytes.
610	fn size(&self) -> u64 {
611		debug_assert!(self.pages() <= MAX_WASM_PAGES);
612
613		self.pages() as u64 * PAGE_SIZE as u64
614	}
615}
616
617impl<T: Memory> MemoryExt for T {}
618
619fn heap_range(offset: u32, length: u32, heap_len: usize) -> Option<Range<usize>> {
620	let start = offset as usize;
621	let end = offset.checked_add(length)? as usize;
622	if end <= heap_len {
623		Some(start..end)
624	} else {
625		None
626	}
627}
628
629/// A guard that will raise the poisoned flag on drop unless disarmed.
630struct PoisonBomb<'a> {
631	poisoned: &'a mut bool,
632}
633
634impl<'a> PoisonBomb<'a> {
635	fn disarm(self) {
636		mem::forget(self)
637	}
638}
639
640impl<'a> Drop for PoisonBomb<'a> {
641	fn drop(&mut self) {
642		*self.poisoned = true;
643	}
644}
645
646#[cfg(test)]
647mod tests {
648	use super::*;
649
650	/// Makes a pointer out of the given address.
651	fn to_pointer(address: u32) -> Pointer<u8> {
652		Pointer::new(address)
653	}
654
655	#[derive(Debug)]
656	struct MemoryInstance {
657		data: Vec<u8>,
658		max_wasm_pages: u32,
659	}
660
661	impl MemoryInstance {
662		fn with_pages(pages: u32) -> Self {
663			Self { data: vec![0; (pages * PAGE_SIZE) as usize], max_wasm_pages: MAX_WASM_PAGES }
664		}
665
666		fn set_max_wasm_pages(&mut self, max_pages: u32) {
667			self.max_wasm_pages = max_pages;
668		}
669	}
670
671	impl Memory for MemoryInstance {
672		fn with_access<R>(&self, run: impl FnOnce(&[u8]) -> R) -> R {
673			run(&self.data)
674		}
675
676		fn with_access_mut<R>(&mut self, run: impl FnOnce(&mut [u8]) -> R) -> R {
677			run(&mut self.data)
678		}
679
680		fn pages(&self) -> u32 {
681			pages_from_size(self.data.len() as u64).unwrap()
682		}
683
684		fn max_pages(&self) -> Option<u32> {
685			Some(self.max_wasm_pages)
686		}
687
688		fn grow(&mut self, pages: u32) -> Result<(), ()> {
689			if self.pages() + pages > self.max_wasm_pages {
690				Err(())
691			} else {
692				self.data.resize(((self.pages() + pages) * PAGE_SIZE) as usize, 0);
693				Ok(())
694			}
695		}
696	}
697
698	#[test]
699	fn test_pages_from_size() {
700		assert_eq!(pages_from_size(0).unwrap(), 0);
701		assert_eq!(pages_from_size(1).unwrap(), 1);
702		assert_eq!(pages_from_size(65536).unwrap(), 1);
703		assert_eq!(pages_from_size(65536 + 1).unwrap(), 2);
704		assert_eq!(pages_from_size(2 * 65536).unwrap(), 2);
705		assert_eq!(pages_from_size(2 * 65536 + 1).unwrap(), 3);
706	}
707
708	#[test]
709	fn should_allocate_properly() {
710		// given
711		let mut mem = MemoryInstance::with_pages(1);
712		let mut heap = FreeingBumpHeapAllocator::new(0);
713
714		// when
715		let ptr = heap.allocate(&mut mem, 1).unwrap();
716
717		// then
718		// returned pointer must start right after `HEADER_SIZE`
719		assert_eq!(ptr, to_pointer(HEADER_SIZE));
720	}
721
722	#[test]
723	fn should_always_align_pointers_to_multiples_of_8() {
724		// given
725		let mut mem = MemoryInstance::with_pages(1);
726		let mut heap = FreeingBumpHeapAllocator::new(13);
727
728		// when
729		let ptr = heap.allocate(&mut mem, 1).unwrap();
730
731		// then
732		// the pointer must start at the next multiple of 8 from 13
733		// + the prefix of 8 bytes.
734		assert_eq!(ptr, to_pointer(24));
735	}
736
737	#[test]
738	fn should_increment_pointers_properly() {
739		// given
740		let mut mem = MemoryInstance::with_pages(1);
741		let mut heap = FreeingBumpHeapAllocator::new(0);
742
743		// when
744		let ptr1 = heap.allocate(&mut mem, 1).unwrap();
745		let ptr2 = heap.allocate(&mut mem, 9).unwrap();
746		let ptr3 = heap.allocate(&mut mem, 1).unwrap();
747
748		// then
749		// a prefix of 8 bytes is prepended to each pointer
750		assert_eq!(ptr1, to_pointer(HEADER_SIZE));
751
752		// the prefix of 8 bytes + the content of ptr1 padded to the lowest possible
753		// item size of 8 bytes + the prefix of ptr1
754		assert_eq!(ptr2, to_pointer(24));
755
756		// ptr2 + its content of 16 bytes + the prefix of 8 bytes
757		assert_eq!(ptr3, to_pointer(24 + 16 + HEADER_SIZE));
758	}
759
760	#[test]
761	fn should_free_properly() {
762		// given
763		let mut mem = MemoryInstance::with_pages(1);
764		let mut heap = FreeingBumpHeapAllocator::new(0);
765		let ptr1 = heap.allocate(&mut mem, 1).unwrap();
766		// the prefix of 8 bytes is prepended to the pointer
767		assert_eq!(ptr1, to_pointer(HEADER_SIZE));
768
769		let ptr2 = heap.allocate(&mut mem, 1).unwrap();
770		// the prefix of 8 bytes + the content of ptr 1 is prepended to the pointer
771		assert_eq!(ptr2, to_pointer(24));
772
773		// when
774		heap.deallocate(&mut mem, ptr2).unwrap();
775
776		// then
777		// then the heads table should contain a pointer to the
778		// prefix of ptr2 in the leftmost entry
779		assert_eq!(heap.free_lists.heads[0], Link::Ptr(u32::from(ptr2) - HEADER_SIZE));
780	}
781
782	#[test]
783	fn should_deallocate_and_reallocate_properly() {
784		// given
785		let mut mem = MemoryInstance::with_pages(1);
786		let padded_offset = 16;
787		let mut heap = FreeingBumpHeapAllocator::new(13);
788
789		let ptr1 = heap.allocate(&mut mem, 1).unwrap();
790		// the prefix of 8 bytes is prepended to the pointer
791		assert_eq!(ptr1, to_pointer(padded_offset + HEADER_SIZE));
792
793		let ptr2 = heap.allocate(&mut mem, 9).unwrap();
794		// the padded_offset + the previously allocated ptr (8 bytes prefix +
795		// 8 bytes content) + the prefix of 8 bytes which is prepended to the
796		// current pointer
797		assert_eq!(ptr2, to_pointer(padded_offset + 16 + HEADER_SIZE));
798
799		// when
800		heap.deallocate(&mut mem, ptr2).unwrap();
801		let ptr3 = heap.allocate(&mut mem, 9).unwrap();
802
803		// then
804		// should have re-allocated
805		assert_eq!(ptr3, to_pointer(padded_offset + 16 + HEADER_SIZE));
806		assert_eq!(heap.free_lists.heads, [Link::Nil; N_ORDERS]);
807	}
808
809	#[test]
810	fn should_build_linked_list_of_free_areas_properly() {
811		// given
812		let mut mem = MemoryInstance::with_pages(1);
813		let mut heap = FreeingBumpHeapAllocator::new(0);
814
815		let ptr1 = heap.allocate(&mut mem, 8).unwrap();
816		let ptr2 = heap.allocate(&mut mem, 8).unwrap();
817		let ptr3 = heap.allocate(&mut mem, 8).unwrap();
818
819		// when
820		heap.deallocate(&mut mem, ptr1).unwrap();
821		heap.deallocate(&mut mem, ptr2).unwrap();
822		heap.deallocate(&mut mem, ptr3).unwrap();
823
824		// then
825		assert_eq!(heap.free_lists.heads[0], Link::Ptr(u32::from(ptr3) - HEADER_SIZE));
826
827		let ptr4 = heap.allocate(&mut mem, 8).unwrap();
828		assert_eq!(ptr4, ptr3);
829
830		assert_eq!(heap.free_lists.heads[0], Link::Ptr(u32::from(ptr2) - HEADER_SIZE));
831	}
832
833	#[test]
834	fn should_not_allocate_if_too_large() {
835		// given
836		let mut mem = MemoryInstance::with_pages(1);
837		mem.set_max_wasm_pages(1);
838		let mut heap = FreeingBumpHeapAllocator::new(13);
839
840		// when
841		let ptr = heap.allocate(&mut mem, PAGE_SIZE - 13);
842
843		// then
844		assert_eq!(Error::AllocatorOutOfSpace, ptr.unwrap_err());
845	}
846
847	#[test]
848	fn should_not_allocate_if_full() {
849		// given
850		let mut mem = MemoryInstance::with_pages(1);
851		mem.set_max_wasm_pages(1);
852		let mut heap = FreeingBumpHeapAllocator::new(0);
853		let ptr1 = heap.allocate(&mut mem, (PAGE_SIZE / 2) - HEADER_SIZE).unwrap();
854		assert_eq!(ptr1, to_pointer(HEADER_SIZE));
855
856		// when
857		let ptr2 = heap.allocate(&mut mem, PAGE_SIZE / 2);
858
859		// then
860		// there is no room for another half page incl. its 8 byte prefix
861		match ptr2.unwrap_err() {
862			Error::AllocatorOutOfSpace => {},
863			e => panic!("Expected allocator out of space error, got: {:?}", e),
864		}
865	}
866
867	#[test]
868	fn should_allocate_max_possible_allocation_size() {
869		// given
870		let mut mem = MemoryInstance::with_pages(1);
871		let mut heap = FreeingBumpHeapAllocator::new(0);
872
873		// when
874		let ptr = heap.allocate(&mut mem, MAX_POSSIBLE_ALLOCATION).unwrap();
875
876		// then
877		assert_eq!(ptr, to_pointer(HEADER_SIZE));
878	}
879
880	#[test]
881	fn should_not_allocate_if_requested_size_too_large() {
882		// given
883		let mut mem = MemoryInstance::with_pages(1);
884		let mut heap = FreeingBumpHeapAllocator::new(0);
885
886		// when
887		let ptr = heap.allocate(&mut mem, MAX_POSSIBLE_ALLOCATION + 1);
888
889		// then
890		assert_eq!(Error::RequestedAllocationTooLarge, ptr.unwrap_err());
891	}
892
893	#[test]
894	fn should_return_error_when_bumper_greater_than_heap_size() {
895		// given
896		let mut mem = MemoryInstance::with_pages(1);
897		mem.set_max_wasm_pages(1);
898		let mut heap = FreeingBumpHeapAllocator::new(0);
899
900		let mut ptrs = Vec::new();
901		for _ in 0..(PAGE_SIZE as usize / 40) {
902			ptrs.push(heap.allocate(&mut mem, 32).expect("Allocate 32 byte"));
903		}
904
905		assert_eq!(heap.stats.bytes_allocated, PAGE_SIZE - 16);
906		assert_eq!(heap.bumper, PAGE_SIZE - 16);
907
908		ptrs.into_iter()
909			.for_each(|ptr| heap.deallocate(&mut mem, ptr).expect("Deallocate 32 byte"));
910
911		assert_eq!(heap.stats.bytes_allocated, 0);
912		assert_eq!(heap.stats.bytes_allocated_peak, PAGE_SIZE - 16);
913		assert_eq!(heap.bumper, PAGE_SIZE - 16);
914
915		// Allocate another 8 byte to use the full heap.
916		heap.allocate(&mut mem, 8).expect("Allocate 8 byte");
917
918		// when
919		// the `bumper` value is equal to `size` here and any
920		// further allocation which would increment the bumper must fail.
921		// we try to allocate 8 bytes here, which will increment the
922		// bumper since no 8 byte item has been freed before.
923		assert_eq!(heap.bumper as u64, mem.size());
924		let ptr = heap.allocate(&mut mem, 8);
925
926		// then
927		assert_eq!(Error::AllocatorOutOfSpace, ptr.unwrap_err());
928	}
929
930	#[test]
931	fn should_include_prefixes_in_total_heap_size() {
932		// given
933		let mut mem = MemoryInstance::with_pages(1);
934		let mut heap = FreeingBumpHeapAllocator::new(1);
935
936		// when
937		// an item size of 16 must be used then
938		heap.allocate(&mut mem, 9).unwrap();
939
940		// then
941		assert_eq!(heap.stats.bytes_allocated, HEADER_SIZE + 16);
942	}
943
944	#[test]
945	fn should_calculate_total_heap_size_to_zero() {
946		// given
947		let mut mem = MemoryInstance::with_pages(1);
948		let mut heap = FreeingBumpHeapAllocator::new(13);
949
950		// when
951		let ptr = heap.allocate(&mut mem, 42).unwrap();
952		assert_eq!(ptr, to_pointer(16 + HEADER_SIZE));
953		heap.deallocate(&mut mem, ptr).unwrap();
954
955		// then
956		assert_eq!(heap.stats.bytes_allocated, 0);
957	}
958
959	#[test]
960	fn should_calculate_total_size_of_zero() {
961		// given
962		let mut mem = MemoryInstance::with_pages(1);
963		let mut heap = FreeingBumpHeapAllocator::new(19);
964
965		// when
966		for _ in 1..10 {
967			let ptr = heap.allocate(&mut mem, 42).unwrap();
968			heap.deallocate(&mut mem, ptr).unwrap();
969		}
970
971		// then
972		assert_eq!(heap.stats.bytes_allocated, 0);
973	}
974
975	#[test]
976	fn should_read_and_write_u64_correctly() {
977		// given
978		let mut mem = MemoryInstance::with_pages(1);
979
980		// when
981		mem.write_le_u64(40, 4480113).unwrap();
982
983		// then
984		let value = MemoryExt::read_le_u64(&mut mem, 40).unwrap();
985		assert_eq!(value, 4480113);
986	}
987
988	#[test]
989	fn should_get_item_size_from_order() {
990		// given
991		let raw_order = 0;
992
993		// when
994		let item_size = Order::from_raw(raw_order).unwrap().size();
995
996		// then
997		assert_eq!(item_size, 8);
998	}
999
1000	#[test]
1001	fn should_get_max_item_size_from_index() {
1002		// given
1003		let raw_order = 22;
1004
1005		// when
1006		let item_size = Order::from_raw(raw_order).unwrap().size();
1007
1008		// then
1009		assert_eq!(item_size as u32, MAX_POSSIBLE_ALLOCATION);
1010	}
1011
1012	#[test]
1013	fn deallocate_needs_to_maintain_linked_list() {
1014		let mut mem = MemoryInstance::with_pages(1);
1015		let mut heap = FreeingBumpHeapAllocator::new(0);
1016
1017		// Allocate and free some pointers
1018		let ptrs = (0..4).map(|_| heap.allocate(&mut mem, 8).unwrap()).collect::<Vec<_>>();
1019		ptrs.iter().rev().for_each(|ptr| heap.deallocate(&mut mem, *ptr).unwrap());
1020
1021		// Second time we should be able to allocate all of them again and get the same pointers!
1022		let new_ptrs = (0..4).map(|_| heap.allocate(&mut mem, 8).unwrap()).collect::<Vec<_>>();
1023		assert_eq!(ptrs, new_ptrs);
1024	}
1025
1026	#[test]
1027	fn header_read_write() {
1028		let roundtrip = |header: Header| {
1029			let mut memory = MemoryInstance::with_pages(1);
1030			header.write_into(&mut memory, 0).unwrap();
1031
1032			let read_header = Header::read_from(&memory, 0).unwrap();
1033			assert_eq!(header, read_header);
1034		};
1035
1036		roundtrip(Header::Occupied(Order(0)));
1037		roundtrip(Header::Occupied(Order(1)));
1038		roundtrip(Header::Free(Link::Nil));
1039		roundtrip(Header::Free(Link::Ptr(0)));
1040		roundtrip(Header::Free(Link::Ptr(4)));
1041	}
1042
1043	#[test]
1044	fn poison_oom() {
1045		// given
1046		let mut mem = MemoryInstance::with_pages(1);
1047		mem.set_max_wasm_pages(1);
1048
1049		let mut heap = FreeingBumpHeapAllocator::new(0);
1050
1051		// when
1052		let alloc_ptr = heap.allocate(&mut mem, PAGE_SIZE / 2).unwrap();
1053		assert_eq!(Error::AllocatorOutOfSpace, heap.allocate(&mut mem, PAGE_SIZE).unwrap_err());
1054
1055		// then
1056		assert!(heap.poisoned);
1057		assert!(heap.deallocate(&mut mem, alloc_ptr).is_err());
1058	}
1059
1060	#[test]
1061	fn test_n_orders() {
1062		// Test that N_ORDERS is consistent with min and max possible allocation.
1063		assert_eq!(
1064			MIN_POSSIBLE_ALLOCATION * 2u32.pow(N_ORDERS as u32 - 1),
1065			MAX_POSSIBLE_ALLOCATION
1066		);
1067	}
1068
1069	#[test]
1070	fn accepts_growing_memory() {
1071		let mut mem = MemoryInstance::with_pages(1);
1072		let mut heap = FreeingBumpHeapAllocator::new(0);
1073
1074		heap.allocate(&mut mem, PAGE_SIZE / 2).unwrap();
1075		heap.allocate(&mut mem, PAGE_SIZE / 2).unwrap();
1076
1077		mem.grow(1).unwrap();
1078
1079		heap.allocate(&mut mem, PAGE_SIZE / 2).unwrap();
1080	}
1081
1082	#[test]
1083	fn doesnt_accept_shrinking_memory() {
1084		let mut mem = MemoryInstance::with_pages(2);
1085		let mut heap = FreeingBumpHeapAllocator::new(0);
1086
1087		heap.allocate(&mut mem, PAGE_SIZE / 2).unwrap();
1088
1089		mem.data.truncate(PAGE_SIZE as usize);
1090
1091		match heap.allocate(&mut mem, PAGE_SIZE / 2).unwrap_err() {
1092			Error::MemoryShrinked => (),
1093			_ => panic!(),
1094		}
1095	}
1096
1097	#[test]
1098	fn should_grow_memory_when_running_out_of_memory() {
1099		let mut mem = MemoryInstance::with_pages(1);
1100		let mut heap = FreeingBumpHeapAllocator::new(0);
1101
1102		assert_eq!(1, mem.pages());
1103
1104		heap.allocate(&mut mem, PAGE_SIZE * 2).unwrap();
1105
1106		assert_eq!(3, mem.pages());
1107	}
1108}