tp_allocator/
freeing_bump.rs

1// This file is part of Tetcore.
2
3// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd.
4// SPDX-License-Identifier: Apache-2.0
5
6// Licensed under the Apache License, Version 2.0 (the "License");
7// you may not use this file except in compliance with the License.
8// You may obtain a copy of the License at
9//
10// 	http://www.apache.org/licenses/LICENSE-2.0
11//
12// Unless required by applicable law or agreed to in writing, software
13// distributed under the License is distributed on an "AS IS" BASIS,
14// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15// See the License for the specific language governing permissions and
16// limitations under the License.
17
18//! This module implements a freeing-bump allocator.
19//!
20//! The heap is a continuous linear memory and chunks are allocated using a bump allocator.
21//!
22//! ```ignore
23//! +-------------+-------------------------------------------------+
24//! | <allocated> | <unallocated>                                   |
25//! +-------------+-------------------------------------------------+
26//!               ^
27//!               |_ bumper
28//! ```
29//!
30//! Only allocations with sizes of power of two can be allocated. If the incoming request has a non
31//! power of two size it is increased to the nearest power of two. The power of two of size is
32//! referred as **an order**.
33//!
34//! Each allocation has a header immediately preceding to it. The header is always 8 bytes and can
35//! be of two types: free and occupied.
36//!
37//! For implementing freeing we maintain a linked lists for each order. The maximum supported
38//! allocation size is capped, therefore the number of orders and thus the linked lists is as well
39//! limited. Currently, the maximum size of an allocation is 16 MiB.
40//!
41//! When the allocator serves an allocation request it first checks the linked list for the respective
42//! order. If it doesn't have any free chunks, the allocator requests memory from the bump allocator.
43//! In any case the order is stored in the header of the allocation.
44//!
45//! Upon deallocation we get the order of the allocation from its header and then add that
46//! allocation to the linked list for the respective order.
47
48use crate::Error;
49use tetcore_std::{mem, convert::{TryFrom, TryInto}, ops::{Range, Index, IndexMut}};
50use tetcore_wasm_interface::{Pointer, WordSize};
51
52/// The minimal alignment guaranteed by this allocator.
53///
54/// The alignment of 8 is chosen because it is the maximum size of a primitive type supported by the
55/// target version of wasm32: i64's natural alignment is 8.
56const ALIGNMENT: u32 = 8;
57
58// Each pointer is prefixed with 8 bytes, which identify the list index
59// to which it belongs.
60const HEADER_SIZE: u32 = 8;
61
62/// Create an allocator error.
63fn error(msg: &'static str) -> Error {
64	Error::Other(msg)
65}
66
67/// A custom "trace" implementation that is only activated when `feature = std`.
68///
69/// Uses `wasm-heap` as default target.
70macro_rules! trace {
71	( $( $args:expr ),+ ) => {
72		tetcore_std::if_std! {
73			log::trace!(target: "wasm-heap", $( $args ),+);
74		}
75	}
76}
77
78// The minimum possible allocation size is chosen to be 8 bytes because in that case we would have
79// easier time to provide the guaranteed alignment of 8.
80//
81// The maximum possible allocation size was chosen rather arbitrary. 16 MiB should be enough for
82// everybody.
83//
84// N_ORDERS - represents the number of orders supported.
85//
86// This number corresponds to the number of powers between the minimum possible allocation and
87// maximum possible allocation, or: 2^3...2^24 (both ends inclusive, hence 22).
88const N_ORDERS: usize = 22;
89const MAX_POSSIBLE_ALLOCATION: u32 = 16777216; // 2^24 bytes, 16 MiB
90const MIN_POSSIBLE_ALLOCATION: u32 = 8; // 2^3 bytes, 8 bytes
91
92/// The exponent for the power of two sized block adjusted to the minimum size.
93///
94/// This way, if `MIN_POSSIBLE_ALLOCATION == 8`, we would get:
95///
96/// power_of_two_size | order
97/// 8                 | 0
98/// 16                | 1
99/// 32                | 2
100/// 64                | 3
101/// ...
102/// 16777216          | 21
103///
104/// and so on.
105#[derive(Copy, Clone, PartialEq, Eq, Debug)]
106struct Order(u32);
107
108impl Order {
109	/// Create `Order` object from a raw order.
110	///
111	/// Returns `Err` if it is greater than the maximum supported order.
112	fn from_raw(order: u32) -> Result<Self, Error> {
113		if order < N_ORDERS as u32 {
114			Ok(Self(order))
115		} else {
116			Err(error("invalid order"))
117		}
118	}
119
120	/// Compute the order by the given size
121	///
122	/// The size is clamped, so that the following holds:
123	///
124	/// `MIN_POSSIBLE_ALLOCATION <= size <= MAX_POSSIBLE_ALLOCATION`
125	fn from_size(size: u32) -> Result<Self, Error> {
126		let clamped_size = if size > MAX_POSSIBLE_ALLOCATION {
127			return Err(Error::RequestedAllocationTooLarge);
128		} else if size < MIN_POSSIBLE_ALLOCATION {
129			MIN_POSSIBLE_ALLOCATION
130		} else {
131			size
132		};
133
134		// Round the clamped size to the next power of two.
135		//
136		// It returns the unchanged value if the value is already a power of two.
137		let power_of_two_size = clamped_size.next_power_of_two();
138
139		// Compute the number of trailing zeroes to get the order. We adjust it by the number of
140		// trailing zeroes in the minimum possible allocation.
141		let order = power_of_two_size.trailing_zeros() - MIN_POSSIBLE_ALLOCATION.trailing_zeros();
142
143		Ok(Self(order))
144	}
145
146	/// Returns the corresponding size in bytes for this order.
147	///
148	/// Note that it is always a power of two.
149	fn size(&self) -> u32 {
150		MIN_POSSIBLE_ALLOCATION << self.0
151	}
152
153	/// Extract the order as `u32`.
154	fn into_raw(self) -> u32 {
155		self.0
156	}
157}
158
159/// A special magic value for a pointer in a link that denotes the end of the linked list.
160const NIL_MARKER: u32 = u32::max_value();
161
162/// A link between headers in the free list.
163#[derive(Clone, Copy, Debug, PartialEq, Eq)]
164enum Link {
165	/// Nil, denotes that there is no next element.
166	Nil,
167	/// Link to the next element represented as a pointer to the a header.
168	Ptr(u32),
169}
170
171impl Link {
172	/// Creates a link from raw value.
173	fn from_raw(raw: u32) -> Self {
174		if raw != NIL_MARKER {
175			Self::Ptr(raw)
176		} else {
177			Self::Nil
178		}
179	}
180
181	/// Converts this link into a raw u32.
182	fn into_raw(self) -> u32 {
183		match self {
184			Self::Nil => NIL_MARKER,
185			Self::Ptr(ptr) => ptr,
186		}
187	}
188}
189
190/// A header of an allocation.
191///
192/// The header is encoded in memory as follows.
193///
194/// ## Free header
195///
196/// ```ignore
197/// 64             32                  0
198//  +--------------+-------------------+
199/// |            0 | next element link |
200/// +--------------+-------------------+
201/// ```
202///
203/// ## Occupied header
204///
205/// ```ignore
206/// 64             32                  0
207//  +--------------+-------------------+
208/// |            1 |             order |
209/// +--------------+-------------------+
210/// ```
211#[derive(Clone, Debug, PartialEq, Eq)]
212enum Header {
213	/// A free header contains a link to the next element to form a free linked list.
214	Free(Link),
215	/// An occupied header has attached order to know in which free list we should put the
216	/// allocation upon deallocation.
217	Occupied(Order),
218}
219
220impl Header {
221	/// Reads a header from memory.
222	///
223	/// Returns an error if the `header_ptr` is out of bounds of the linear memory or if the read
224	/// header is corrupted (e.g. the order is incorrect).
225	fn read_from<M: Memory + ?Sized>(memory: &M, header_ptr: u32) -> Result<Self, Error> {
226		let raw_header = memory.read_le_u64(header_ptr)?;
227
228		// Check if the header represents an occupied or free allocation and extract the header data
229		// by trimming (and discarding) the high bits.
230		let occupied = raw_header & 0x00000001_00000000 != 0;
231		let header_data = raw_header as u32;
232
233		Ok(if occupied {
234			Self::Occupied(Order::from_raw(header_data)?)
235		} else {
236			Self::Free(Link::from_raw(header_data))
237		})
238	}
239
240	/// Write out this header to memory.
241	///
242	/// Returns an error if the `header_ptr` is out of bounds of the linear memory.
243	fn write_into<M: Memory + ?Sized>(&self, memory: &mut M, header_ptr: u32) -> Result<(), Error> {
244		let (header_data, occupied_mask) = match *self {
245			Self::Occupied(order) => (order.into_raw(), 0x00000001_00000000),
246			Self::Free(link) => (link.into_raw(), 0x00000000_00000000),
247		};
248		let raw_header = header_data as u64 | occupied_mask;
249		memory.write_le_u64(header_ptr, raw_header)?;
250		Ok(())
251	}
252
253	/// Returns the order of the allocation if this is an occupied header.
254	fn into_occupied(self) -> Option<Order> {
255		match self {
256			Self::Occupied(order) => Some(order),
257			_ => None,
258		}
259	}
260
261	/// Returns the link to the next element in the free list if this is a free header.
262	fn into_free(self) -> Option<Link> {
263		match self {
264			Self::Free(link) => Some(link),
265			_ => None,
266		}
267	}
268}
269
270/// This struct represents a collection of intrusive linked lists for each order.
271struct FreeLists {
272	heads: [Link; N_ORDERS],
273}
274
275impl FreeLists {
276	/// Creates the free empty lists.
277	fn new() -> Self {
278		Self {
279			heads: [Link::Nil; N_ORDERS]
280		}
281	}
282
283	/// Replaces a given link for the specified order and returns the old one.
284	fn replace(&mut self, order: Order, new: Link) -> Link {
285		let prev = self[order];
286		self[order] = new;
287		prev
288	}
289}
290
291impl Index<Order> for FreeLists {
292	type Output = Link;
293	fn index(&self, index: Order) -> &Link {
294		&self.heads[index.0 as usize]
295	}
296}
297
298impl IndexMut<Order> for FreeLists {
299	fn index_mut(&mut self, index: Order) -> &mut Link {
300		&mut self.heads[index.0 as usize]
301	}
302}
303
304/// An implementation of freeing bump allocator.
305///
306/// Refer to the module-level documentation for further details.
307pub struct FreeingBumpHeapAllocator {
308	bumper: u32,
309	free_lists: FreeLists,
310	total_size: u32,
311	poisoned: bool,
312}
313
314impl FreeingBumpHeapAllocator {
315	/// Creates a new allocation heap which follows a freeing-bump strategy.
316	///
317	/// # Arguments
318	///
319	/// - `heap_base` - the offset from the beginning of the linear memory where the heap starts.
320	pub fn new(heap_base: u32) -> Self {
321		let aligned_heap_base = (heap_base + ALIGNMENT - 1) / ALIGNMENT * ALIGNMENT;
322
323		FreeingBumpHeapAllocator {
324			bumper: aligned_heap_base,
325			free_lists: FreeLists::new(),
326			total_size: 0,
327			poisoned: false,
328		}
329	}
330
331	/// Gets requested number of bytes to allocate and returns a pointer.
332	/// The maximum size which can be allocated at once is 16 MiB.
333	/// There is no minimum size, but whatever size is passed into
334	/// this function is rounded to the next power of two. If the requested
335	/// size is below 8 bytes it will be rounded up to 8 bytes.
336	///
337	/// NOTE: Once the allocator has returned an error all subsequent requests will return an error.
338	///
339	/// # Arguments
340	///
341	/// - `mem` - a slice representing the linear memory on which this allocator operates.
342	/// - `size` - size in bytes of the allocation request
343	pub fn allocate<M: Memory + ?Sized>(
344		&mut self,
345		mem: &mut M,
346		size: WordSize,
347	) -> Result<Pointer<u8>, Error> {
348		if self.poisoned {
349			return Err(error("the allocator has been poisoned"))
350		}
351
352		let bomb = PoisonBomb { poisoned: &mut self.poisoned };
353		let order = Order::from_size(size)?;
354
355		let header_ptr: u32 = match self.free_lists[order] {
356			Link::Ptr(header_ptr) => {
357				assert!(
358					header_ptr + order.size() + HEADER_SIZE <= mem.size(),
359					"Pointer is looked up in list of free entries, into which
360					only valid values are inserted; qed"
361				);
362
363				// Remove this header from the free list.
364				let next_free = Header::read_from(mem, header_ptr)?
365					.into_free()
366					.ok_or_else(|| error("free list points to a occupied header"))?;
367				self.free_lists[order] = next_free;
368
369				header_ptr
370			}
371			Link::Nil => {
372				// Corresponding free list is empty. Allocate a new item.
373				Self::bump(
374					&mut self.bumper,
375					order.size() + HEADER_SIZE,
376					mem.size(),
377				)?
378			}
379		};
380
381		// Write the order in the occupied header.
382		Header::Occupied(order).write_into(mem, header_ptr)?;
383
384		self.total_size += order.size() + HEADER_SIZE;
385		trace!("Heap size is {} bytes after allocation", self.total_size);
386
387		bomb.disarm();
388		Ok(Pointer::new(header_ptr + HEADER_SIZE))
389	}
390
391	/// Deallocates the space which was allocated for a pointer.
392	///
393	/// NOTE: Once the allocator has returned an error all subsequent requests will return an error.
394	///
395	/// # Arguments
396	///
397	/// - `mem` - a slice representing the linear memory on which this allocator operates.
398	/// - `ptr` - pointer to the allocated chunk
399	pub fn deallocate<M: Memory + ?Sized>(&mut self, mem: &mut M, ptr: Pointer<u8>) -> Result<(), Error> {
400		if self.poisoned {
401			return Err(error("the allocator has been poisoned"))
402		}
403
404		let bomb = PoisonBomb { poisoned: &mut self.poisoned };
405
406		let header_ptr = u32::from(ptr)
407			.checked_sub(HEADER_SIZE)
408			.ok_or_else(|| error("Invalid pointer for deallocation"))?;
409
410		let order = Header::read_from(mem, header_ptr)?
411			.into_occupied()
412			.ok_or_else(|| error("the allocation points to an empty header"))?;
413
414		// Update the just freed header and knit it back to the free list.
415		let prev_head = self.free_lists.replace(order, Link::Ptr(header_ptr));
416		Header::Free(prev_head).write_into(mem, header_ptr)?;
417
418		// Do the total_size book keeping.
419		self.total_size = self
420			.total_size
421			.checked_sub(order.size() + HEADER_SIZE)
422			.ok_or_else(|| error("Unable to subtract from total heap size without overflow"))?;
423		trace!("Heap size is {} bytes after deallocation", self.total_size);
424
425		bomb.disarm();
426		Ok(())
427	}
428
429	/// Increases the `bumper` by `size`.
430	///
431	/// Returns the `bumper` from before the increase.
432	/// Returns an `Error::AllocatorOutOfSpace` if the operation
433	/// would exhaust the heap.
434	fn bump(bumper: &mut u32, size: u32, heap_end: u32) -> Result<u32, Error> {
435		if *bumper + size > heap_end {
436			return Err(Error::AllocatorOutOfSpace);
437		}
438
439		let res = *bumper;
440		*bumper += size;
441		Ok(res)
442	}
443}
444
445/// A trait for abstraction of accesses to a wasm linear memory. Used to read or modify the
446/// allocation prefixes.
447///
448/// A wasm linear memory behaves similarly to a vector. The address space doesn't have holes and is
449/// accessible up to the reported size.
450///
451/// The linear memory can grow in size with the wasm page granularity (64KiB), but it cannot shrink.
452pub trait Memory {
453	/// Read a u64 from the heap in LE form. Returns an error if any of the bytes read are out of
454	/// bounds.
455	fn read_le_u64(&self, ptr: u32) -> Result<u64, Error>;
456	/// Write a u64 to the heap in LE form. Returns an error if any of the bytes written are out of
457	/// bounds.
458	fn write_le_u64(&mut self, ptr: u32, val: u64) -> Result<(), Error>;
459	/// Returns the full size of the memory in bytes.
460	fn size(&self) -> u32;
461}
462
463impl Memory for [u8] {
464	fn read_le_u64(&self, ptr: u32) -> Result<u64, Error> {
465		let range =
466			heap_range(ptr, 8, self.len()).ok_or_else(|| error("read out of heap bounds"))?;
467		let bytes = self[range]
468			.try_into()
469			.expect("[u8] slice of length 8 must be convertible to [u8; 8]");
470		Ok(u64::from_le_bytes(bytes))
471	}
472	fn write_le_u64(&mut self, ptr: u32, val: u64) -> Result<(), Error> {
473		let range =
474			heap_range(ptr, 8, self.len()).ok_or_else(|| error("write out of heap bounds"))?;
475		let bytes = val.to_le_bytes();
476		&mut self[range].copy_from_slice(&bytes[..]);
477		Ok(())
478	}
479	fn size(&self) -> u32 {
480		u32::try_from(self.len()).expect("size of Wasm linear memory is <2^32; qed")
481	}
482}
483
484fn heap_range(offset: u32, length: u32, heap_len: usize) -> Option<Range<usize>> {
485	let start = offset as usize;
486	let end = offset.checked_add(length)? as usize;
487	if end <= heap_len {
488		Some(start..end)
489	} else {
490		None
491	}
492}
493
494/// A guard that will raise the poisoned flag on drop unless disarmed.
495struct PoisonBomb<'a> {
496	poisoned: &'a mut bool,
497}
498
499impl<'a> PoisonBomb<'a> {
500	fn disarm(self) {
501		mem::forget(self)
502	}
503}
504
505impl<'a> Drop for PoisonBomb<'a> {
506	fn drop(&mut self) {
507		*self.poisoned = true;
508	}
509}
510
511#[cfg(test)]
512mod tests {
513	use super::*;
514
515	const PAGE_SIZE: u32 = 65536;
516
517	/// Makes a pointer out of the given address.
518	fn to_pointer(address: u32) -> Pointer<u8> {
519		Pointer::new(address)
520	}
521
522	#[test]
523	fn should_allocate_properly() {
524		// given
525		let mut mem = [0u8; PAGE_SIZE as usize];
526		let mut heap = FreeingBumpHeapAllocator::new(0);
527
528		// when
529		let ptr = heap.allocate(&mut mem[..], 1).unwrap();
530
531		// then
532		// returned pointer must start right after `HEADER_SIZE`
533		assert_eq!(ptr, to_pointer(HEADER_SIZE));
534	}
535
536	#[test]
537	fn should_always_align_pointers_to_multiples_of_8() {
538		// given
539		let mut mem = [0u8; PAGE_SIZE as usize];
540		let mut heap = FreeingBumpHeapAllocator::new(13);
541
542		// when
543		let ptr = heap.allocate(&mut mem[..], 1).unwrap();
544
545		// then
546		// the pointer must start at the next multiple of 8 from 13
547		// + the prefix of 8 bytes.
548		assert_eq!(ptr, to_pointer(24));
549	}
550
551	#[test]
552	fn should_increment_pointers_properly() {
553		// given
554		let mut mem = [0u8; PAGE_SIZE as usize];
555		let mut heap = FreeingBumpHeapAllocator::new(0);
556
557		// when
558		let ptr1 = heap.allocate(&mut mem[..], 1).unwrap();
559		let ptr2 = heap.allocate(&mut mem[..], 9).unwrap();
560		let ptr3 = heap.allocate(&mut mem[..], 1).unwrap();
561
562		// then
563		// a prefix of 8 bytes is prepended to each pointer
564		assert_eq!(ptr1, to_pointer(HEADER_SIZE));
565
566		// the prefix of 8 bytes + the content of ptr1 padded to the lowest possible
567		// item size of 8 bytes + the prefix of ptr1
568		assert_eq!(ptr2, to_pointer(24));
569
570		// ptr2 + its content of 16 bytes + the prefix of 8 bytes
571		assert_eq!(ptr3, to_pointer(24 + 16 + HEADER_SIZE));
572	}
573
574	#[test]
575	fn should_free_properly() {
576		// given
577		let mut mem = [0u8; PAGE_SIZE as usize];
578		let mut heap = FreeingBumpHeapAllocator::new(0);
579		let ptr1 = heap.allocate(&mut mem[..], 1).unwrap();
580		// the prefix of 8 bytes is prepended to the pointer
581		assert_eq!(ptr1, to_pointer(HEADER_SIZE));
582
583		let ptr2 = heap.allocate(&mut mem[..], 1).unwrap();
584		// the prefix of 8 bytes + the content of ptr 1 is prepended to the pointer
585		assert_eq!(ptr2, to_pointer(24));
586
587		// when
588		heap.deallocate(&mut mem[..], ptr2).unwrap();
589
590		// then
591		// then the heads table should contain a pointer to the
592		// prefix of ptr2 in the leftmost entry
593		assert_eq!(heap.free_lists.heads[0], Link::Ptr(u32::from(ptr2) - HEADER_SIZE));
594	}
595
596	#[test]
597	fn should_deallocate_and_reallocate_properly() {
598		// given
599		let mut mem = [0u8; PAGE_SIZE as usize];
600		let padded_offset = 16;
601		let mut heap = FreeingBumpHeapAllocator::new(13);
602
603		let ptr1 = heap.allocate(&mut mem[..], 1).unwrap();
604		// the prefix of 8 bytes is prepended to the pointer
605		assert_eq!(ptr1, to_pointer(padded_offset + HEADER_SIZE));
606
607		let ptr2 = heap.allocate(&mut mem[..], 9).unwrap();
608		// the padded_offset + the previously allocated ptr (8 bytes prefix +
609		// 8 bytes content) + the prefix of 8 bytes which is prepended to the
610		// current pointer
611		assert_eq!(ptr2, to_pointer(padded_offset + 16 + HEADER_SIZE));
612
613		// when
614		heap.deallocate(&mut mem[..], ptr2).unwrap();
615		let ptr3 = heap.allocate(&mut mem[..], 9).unwrap();
616
617		// then
618		// should have re-allocated
619		assert_eq!(ptr3, to_pointer(padded_offset + 16 + HEADER_SIZE));
620		assert_eq!(heap.free_lists.heads, [Link::Nil; N_ORDERS]);
621	}
622
623	#[test]
624	fn should_build_linked_list_of_free_areas_properly() {
625		// given
626		let mut mem = [0u8; PAGE_SIZE as usize];
627		let mut heap = FreeingBumpHeapAllocator::new(0);
628
629		let ptr1 = heap.allocate(&mut mem[..], 8).unwrap();
630		let ptr2 = heap.allocate(&mut mem[..], 8).unwrap();
631		let ptr3 = heap.allocate(&mut mem[..], 8).unwrap();
632
633		// when
634		heap.deallocate(&mut mem[..], ptr1).unwrap();
635		heap.deallocate(&mut mem[..], ptr2).unwrap();
636		heap.deallocate(&mut mem[..], ptr3).unwrap();
637
638		// then
639		assert_eq!(heap.free_lists.heads[0], Link::Ptr(u32::from(ptr3) - HEADER_SIZE));
640
641		let ptr4 = heap.allocate(&mut mem[..], 8).unwrap();
642		assert_eq!(ptr4, ptr3);
643
644		assert_eq!(heap.free_lists.heads[0], Link::Ptr(u32::from(ptr2) - HEADER_SIZE));
645	}
646
647	#[test]
648	fn should_not_allocate_if_too_large() {
649		// given
650		let mut mem = [0u8; PAGE_SIZE as usize];
651		let mut heap = FreeingBumpHeapAllocator::new(13);
652
653		// when
654		let ptr = heap.allocate(&mut mem[..], PAGE_SIZE - 13);
655
656		// then
657		match ptr.unwrap_err() {
658			Error::AllocatorOutOfSpace => {},
659			e => panic!("Expected allocator out of space error, got: {:?}", e),
660		}
661	}
662
663	#[test]
664	fn should_not_allocate_if_full() {
665		// given
666		let mut mem = [0u8; PAGE_SIZE as usize];
667		let mut heap = FreeingBumpHeapAllocator::new(0);
668		let ptr1 = heap.allocate(&mut mem[..], (PAGE_SIZE / 2) - HEADER_SIZE).unwrap();
669		assert_eq!(ptr1, to_pointer(HEADER_SIZE));
670
671		// when
672		let ptr2 = heap.allocate(&mut mem[..], PAGE_SIZE / 2);
673
674		// then
675		// there is no room for another half page incl. its 8 byte prefix
676		match ptr2.unwrap_err() {
677			Error::AllocatorOutOfSpace => {},
678			e => panic!("Expected allocator out of space error, got: {:?}", e),
679		}
680	}
681
682	#[test]
683	fn should_allocate_max_possible_allocation_size() {
684		// given
685		let mut mem = vec![0u8; (MAX_POSSIBLE_ALLOCATION + PAGE_SIZE) as usize];
686		let mut heap = FreeingBumpHeapAllocator::new(0);
687
688		// when
689		let ptr = heap.allocate(&mut mem[..], MAX_POSSIBLE_ALLOCATION).unwrap();
690
691		// then
692		assert_eq!(ptr, to_pointer(HEADER_SIZE));
693	}
694
695	#[test]
696	fn should_not_allocate_if_requested_size_too_large() {
697		// given
698		let mut mem = [0u8; PAGE_SIZE as usize];
699		let mut heap = FreeingBumpHeapAllocator::new(0);
700
701		// when
702		let ptr = heap.allocate(&mut mem[..], MAX_POSSIBLE_ALLOCATION + 1);
703
704		// then
705		match ptr.unwrap_err() {
706			Error::RequestedAllocationTooLarge => {},
707			e => panic!("Expected allocation size too large error, got: {:?}", e),
708		}
709	}
710
711	#[test]
712	fn should_return_error_when_bumper_greater_than_heap_size() {
713		// given
714		let mut mem = [0u8; 64];
715		let mut heap = FreeingBumpHeapAllocator::new(0);
716
717		let ptr1 = heap.allocate(&mut mem[..], 32).unwrap();
718		assert_eq!(ptr1, to_pointer(HEADER_SIZE));
719		heap.deallocate(&mut mem[..], ptr1).expect("failed freeing ptr1");
720		assert_eq!(heap.total_size, 0);
721		assert_eq!(heap.bumper, 40);
722
723		let ptr2 = heap.allocate(&mut mem[..], 16).unwrap();
724		assert_eq!(ptr2, to_pointer(48));
725		heap.deallocate(&mut mem[..], ptr2).expect("failed freeing ptr2");
726		assert_eq!(heap.total_size, 0);
727		assert_eq!(heap.bumper, 64);
728
729		// when
730		// the `bumper` value is equal to `size` here and any
731		// further allocation which would increment the bumper must fail.
732		// we try to allocate 8 bytes here, which will increment the
733		// bumper since no 8 byte item has been allocated+freed before.
734		let ptr = heap.allocate(&mut mem[..], 8);
735
736		// then
737		match ptr.unwrap_err() {
738			Error::AllocatorOutOfSpace => {},
739			e => panic!("Expected allocator out of space error, got: {:?}", e),
740		}
741	}
742
743	#[test]
744	fn should_include_prefixes_in_total_heap_size() {
745		// given
746		let mut mem = [0u8; PAGE_SIZE as usize];
747		let mut heap = FreeingBumpHeapAllocator::new(1);
748
749		// when
750		// an item size of 16 must be used then
751		heap.allocate(&mut mem[..], 9).unwrap();
752
753		// then
754		assert_eq!(heap.total_size, HEADER_SIZE + 16);
755	}
756
757	#[test]
758	fn should_calculate_total_heap_size_to_zero() {
759		// given
760		let mut mem = [0u8; PAGE_SIZE as usize];
761		let mut heap = FreeingBumpHeapAllocator::new(13);
762
763		// when
764		let ptr = heap.allocate(&mut mem[..], 42).unwrap();
765		assert_eq!(ptr, to_pointer(16 + HEADER_SIZE));
766		heap.deallocate(&mut mem[..], ptr).unwrap();
767
768		// then
769		assert_eq!(heap.total_size, 0);
770	}
771
772	#[test]
773	fn should_calculate_total_size_of_zero() {
774		// given
775		let mut mem = [0u8; PAGE_SIZE as usize];
776		let mut heap = FreeingBumpHeapAllocator::new(19);
777
778		// when
779		for _ in 1..10 {
780			let ptr = heap.allocate(&mut mem[..], 42).unwrap();
781			heap.deallocate(&mut mem[..], ptr).unwrap();
782		}
783
784		// then
785		assert_eq!(heap.total_size, 0);
786	}
787
788	#[test]
789	fn should_read_and_write_u64_correctly() {
790		// given
791		let mut mem = [0u8; PAGE_SIZE as usize];
792
793		// when
794		Memory::write_le_u64(mem.as_mut(), 40, 4480113).unwrap();
795
796		// then
797		let value = Memory::read_le_u64(mem.as_mut(), 40).unwrap();
798		assert_eq!(value, 4480113);
799	}
800
801	#[test]
802	fn should_get_item_size_from_order() {
803		// given
804		let raw_order = 0;
805
806		// when
807		let item_size = Order::from_raw(raw_order).unwrap().size();
808
809		// then
810		assert_eq!(item_size, 8);
811	}
812
813	#[test]
814	fn should_get_max_item_size_from_index() {
815		// given
816		let raw_order = 21;
817
818		// when
819		let item_size = Order::from_raw(raw_order).unwrap().size();
820
821		// then
822		assert_eq!(item_size as u32, MAX_POSSIBLE_ALLOCATION);
823	}
824
825	#[test]
826	fn deallocate_needs_to_maintain_linked_list() {
827		let mut mem = [0u8; 8 * 2 * 4 + ALIGNMENT as usize];
828		let mut heap = FreeingBumpHeapAllocator::new(0);
829
830		// Allocate and free some pointers
831		let ptrs = (0..4).map(|_| heap.allocate(&mut mem[..], 8).unwrap()).collect::<Vec<_>>();
832		ptrs.into_iter().for_each(|ptr| heap.deallocate(&mut mem[..], ptr).unwrap());
833
834		// Second time we should be able to allocate all of them again.
835		let _ = (0..4).map(|_| heap.allocate(&mut mem[..], 8).unwrap()).collect::<Vec<_>>();
836	}
837
838	#[test]
839	fn header_read_write() {
840		let roundtrip = |header: Header| {
841			let mut memory = [0u8; 32];
842			header.write_into(memory.as_mut(), 0).unwrap();
843
844			let read_header = Header::read_from(memory.as_mut(), 0).unwrap();
845			assert_eq!(header, read_header);
846		};
847
848		roundtrip(Header::Occupied(Order(0)));
849		roundtrip(Header::Occupied(Order(1)));
850		roundtrip(Header::Free(Link::Nil));
851		roundtrip(Header::Free(Link::Ptr(0)));
852		roundtrip(Header::Free(Link::Ptr(4)));
853	}
854
855	#[test]
856	fn poison_oom() {
857		// given
858		// a heap of 32 bytes. Should be enough for two allocations.
859		let mut mem = [0u8; 32];
860		let mut heap = FreeingBumpHeapAllocator::new(0);
861
862		// when
863		assert!(heap.allocate(mem.as_mut(), 8).is_ok());
864		let alloc_ptr = heap.allocate(mem.as_mut(), 8).unwrap();
865		assert!(heap.allocate(mem.as_mut(), 8).is_err());
866
867		// then
868		assert!(heap.poisoned);
869		assert!(heap.deallocate(mem.as_mut(), alloc_ptr).is_err());
870	}
871}