avr-oxide 0.4.0

An extremely simple Rusty operating system for AVR microcontrollers
/* alloc.rs
 *
 * Developed by Tim Walls <tim.walls@snowgoons.com>
 * Copyright (c) All Rights Reserved, Tim Walls
 */
//! A simple dynamic allocator implementation for our embedded devices.
//!
//! Based on a First-Fit algorithm as described by:
//!
//! R. P. Brent. 1989. Efficient implementation of the first-fit strategy for
//! dynamic storage allocation.
//! ACM Trans. Program. Lang. Syst. 11, 3 (July 1989), 388–403.
//! DOI:<https://doi.org/10.1145/65979.65981>

// Imports ===================================================================
use avr_oxide::deviceconsts::memory;

use core::alloc::{GlobalAlloc, Layout};
use core::cell::UnsafeCell;

#[cfg(target_arch="avr")]
extern crate alloc;
#[cfg(target_arch="avr")]
pub use alloc::vec;
#[cfg(target_arch="avr")]
pub use alloc::boxed;
#[cfg(target_arch="avr")]
pub use alloc::rc;
#[cfg(target_arch="avr")]
pub use alloc::string;


#[cfg(not(target_arch="avr"))]
pub use std::boxed;
#[cfg(not(target_arch="avr"))]
pub use std::vec;
#[cfg(not(target_arch="avr"))]
pub use std::rc;


// Declarations ==============================================================

// Make sure we are word aligned (thus our heap will be as well), and also
// that we have a predictable data representation (since we'll be doing
// pointer mangling into/out of our heap array.)
#[repr(C,align(2))]
struct FirstFitHeap<const SEGS: usize, const S2: usize> {
  v:  &'static mut [i16],
  pa: [u16; SEGS],
  st: [i16; S2],
  s:  usize,
  words_per_seg: usize
}


struct GlobalAllocator<const S:usize, const S2:usize> {
  heap: UnsafeCell<FirstFitHeap<S,S2>>
}

// If this is enabled when we do unit tests, then it will replace the default
// allocator /for the unit test environment/, and then our unit tests will
// bomb out because our allocator isn't really designed to run on the host
// environment (and will not have been inititalised.)
//
//
#[cfg(target_arch="avr")]
#[global_allocator]
static mut GLOBAL: GlobalAllocator::<{memory::alloc::SMAX}, {memory::alloc::SMAX * 2}> = GlobalAllocator {
  heap: UnsafeCell::new(FirstFitHeap::new())
};

// Code ======================================================================
#[cfg(target_arch="avr")]
#[alloc_error_handler]
fn _alloc_error(_: core::alloc::Layout) -> ! {
  avr_oxide::oserror::halt(avr_oxide::oserror::OsError::OutOfMemory);
}

/// Allocate memory using the AVRoxide global allocator implementation
#[cfg(target_arch="avr")]
pub unsafe fn alloc(layout: Layout) -> *mut u8 {
  GLOBAL.alloc(layout)

}

/// Allocate memory using the AVRoxide global allocator implementation
#[cfg(not(target_arch="avr"))]
pub unsafe fn alloc(layout: Layout) -> *mut u8 {
  std::alloc::alloc(layout)

}


/// Deallocate memory allocated using the AVRoxide global allocator implementation
#[cfg(target_arch="avr")]
pub unsafe fn dealloc(ptr: *mut u8, layout: Layout) {
  GLOBAL.dealloc(ptr, layout)
}

/// Deallocate memory allocated using the AVRoxide global allocator implementation
#[cfg(not(target_arch="avr"))]
pub unsafe fn dealloc(ptr: *mut u8, layout: Layout) {
  std::alloc::dealloc(ptr, layout)
}


/**
 * Must be called to initialise the global allocator before any allocations
 * are attempted.  We will use all remaining memory on the device (after
 * the data/BSS segments) as our heap.  We calculate this using the constant
 * for the current device's memory max, and the `__bss_end` value exported by the
 * linker.
 */
#[cfg(target_arch="avr")]
pub fn initialise() {
  unsafe {
    let heap = avr_oxide::panic_if_none!(GLOBAL.heap.get().as_mut(), avr_oxide::oserror::OsError::InternalError);

    let mut heap_start_hi:u8;
    let mut heap_start_lo:u8;
    core::arch::asm!{
      "ldi {}, hi8(__heap_start)",
      "ldi {}, lo8(__heap_start)",
      out(reg) heap_start_hi,
      out(reg) heap_start_lo
    }

    let heap_start = (heap_start_hi as usize) << 8 | heap_start_lo as usize;

    let heap_size = avr_oxide::deviceconsts::memory::SRAM_END as isize - heap_start as isize;

    // Fail if there is not enough memory for a usefully functioning heap
    if heap_start == 0 || heap_size < avr_oxide::deviceconsts::memory::alloc::MIN_HEAPSIZE as isize {
      avr_oxide::oserror::halt(avr_oxide::oserror::OsError::NotEnoughRam);
    }
    heap.set_heap_location(heap_start as *mut u8, heap_size as usize);
    heap.init();
  }
}

/**
 * Implementation of the Rust GlobalAlloc trait for our First-Fit allocator,
 * allowing it to be used as the Rust global allocator.
 */
unsafe impl<const S:usize, const S2:usize> GlobalAlloc for GlobalAllocator<S,S2> {
  unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
    let heap = avr_oxide::panic_if_none!(self.heap.get().as_mut(), avr_oxide::oserror::OsError::InternalError);

    // Calculate the size we need to allocate for the 'worst case' amount of
    // padding needed to achieve the requested alignment
    let align_words = match layout.align() {
      0 => 0,
      1 => 0,
      _other => (layout.align()/2)-1
    };

    let size_words=  ((layout.size() + 1) / 2) + align_words;

    // Allocate, including space for padding if needed
    let ptr = avr_oxide::concurrency::interrupt::isolated(|_isotoken|{
      match heap.bl_new(size_words){
        0 => {
          core::ptr::null_mut()
        },
        index => {
          core::mem::transmute(&heap.v[index])
        }
      }
    });

    // Note that while we are updating the heap data directly here, we're
    // only doing it 'inside' the block of memory we just allocated, so it's
    // safe to do so outside the mutual exclusion lock.

    // Now modify the returned pointer so it matches the requested alignment
    match (ptr as usize) % layout.align() {
      0 => ptr, // We already have the requested alignment
      misalignment => {
        // We need to pad.  That means we need to set a flag, a positive number
        // in the word immediately before our returned pointer that will tell
        // us where to find the *actual* starting index.
        // (This works because normally that would be the control word,
        // with a *negative* pointer indicating the allocated size.  So if
        // that field is positive, we know we are not at the beginning of
        // our block)
        let padding_needed = (layout.align() - misalignment) as isize;

        let returned_ptr = ptr.offset(padding_needed);
        let flag = returned_ptr.offset(-2) as *mut i16; // -2 because 'word'
        *flag = (padding_needed as i16)/2; // Store as words not bytes
        // Note that the spec for layout tells us alignment is *always* a
        // power of two, so this is 'safe'

        returned_ptr
      }
    }
  }

  unsafe fn dealloc(&self, ptr: *mut u8, _layout: Layout) {
    let heap = avr_oxide::panic_if_none!(self.heap.get().as_mut(), avr_oxide::oserror::OsError::InternalError);

    let index0:usize = core::mem::transmute(&heap.v[0]);
    let mut index = ((ptr as usize) - index0)/2;

    avr_oxide::concurrency::interrupt::isolated(|_isotoken|{
      // Check if this allocation actually contains alignment padding,
      // and if so adjust the pointer
      if heap.v[index-1] > 0 {
        index -= heap.v[index-1] as usize;
      }

      // Now we can dispose knowing that the index is the start of the allocated
      // region.

      heap.bl_dispose(index);
    });
  }
}
impl<const SEGS: usize, const S2: usize> FirstFitHeap<SEGS,S2> {
  /**
   * Create a new instance of the first-fit heap structure.  You must call
   * the init() method before first making any allocations from it.
   */
  const fn new() -> Self {
    FirstFitHeap {
      // We actually only need to initialise v[0] to reflect the whole block
      // is free - but by copying the value across the whole heap we can use
      // a static allocator
      v:  &mut [],
      pa: [0; SEGS],
      st: [0; S2],
      s: 0,
      words_per_seg: 0
    }
  }

  /**
   * Set the location of the heap to an address in memory, of the given size
   * in bytes.  Must be called before `heap.init()`
   */
  fn set_heap_location(&mut self, memory: *mut u8, size_bytes: usize){
    unsafe {
      self.v = core::slice::from_raw_parts_mut(memory as *mut i16, size_bytes/2)
    }
  }

  /**
   * Perform such initialisation as is required to make the heap ready for
   * allocations.  Must be called before any calls to `bl_new()` etc.
   */
  fn init(&mut self) {
    let heapsize_words = self.v.len() as i16;

    if heapsize_words == 0 {
      avr_oxide::oserror::halt(avr_oxide::oserror::OsError::NotEnoughRam);
    }

    self.s     = 1;
    self.st[1] = heapsize_words;
    self.pa[0] = 0;
    self.v[0]  = heapsize_words;
    self.words_per_seg = heapsize_words as usize/SEGS;

    self.bl_new(0);
  }

  /**
   * Return the segment which contains the given heap address.
   */
  fn bl_seg(&self, addr: usize) -> usize {
    self.s + (addr/self.words_per_seg)
  }

  /**
   * Double the number of segments in use.  Assumes the current number is
   * less than (smax/2).
   */
  #[optimize(speed)]
  fn bl_double(&mut self) {
    let heapsize_max = self.v.len() - 1;

    if !(self.s <= SEGS/2) {
      avr_oxide::oserror::halt(avr_oxide::oserror::OsError::InternalError);
    }

    for i in self.s ..= 2*self.s-1 {
      self.pa[i] = heapsize_max as u16;
    }
    let mut k = self.s;

    while k > 0 {
      for i in 0 ..= k-1 {
        self.st[2*k+i] = self.st[k+i];
        self.st[3*k+i] = 0;
      }

      k /= 2;
    }

    self.st[1] = self.st[2];
    self.s *= 2;
  }

  /**
   * Do the housekeeping necessary if a block with control word `self.v[addr]`
   * will be allocated or merged with a block on its left in a different
   * segment.
   */
  #[optimize(speed)]
  fn bl_fix_left(&mut self, addr: usize) {
    let heapsize_max = self.v.len() - 1;

    let mut j = self.bl_seg(addr);
    if (self.v[addr] <= 0) || (self.st[j] as i16 <= self.v[addr]) {

      let mut pj = self.pa[j-self.s] as usize; // Index of first block in segment
      let mut pn = (j+1-self.s) * self.words_per_seg; // Start of next segment
      if pn > heapsize_max { // Bound last segment to total heap length
        pn = heapsize_max;
      }
      let mut mx = if pj < pn {
        // There is a block starting in this segment
        let mut mx = 1i16;
        while pj < pn {
          if mx < self.v[pj] {
            mx = self.v[pj];
          }
          pj = pj + self.v[pj].abs() as usize;
        }
        mx
      } else {
        // There is no block starting in this segment
        0
      };
      self.st[0] = 0; // Sentinel
      while self.st[j] > mx { // Update segment tree
        self.st[j] = mx;
        let sister = match j % 2 {
          1 => self.st[j-1], // j is odd
          _ => self.st[j+1]  // j is even
        };
        if mx < sister {
          mx = sister;
        }
        j /= 2;
      }
    }
  }

  /**
   * Do the housekeeping necessary after a block with control word `self.v[addr]`
   * is freed or merged with a block on its right, or created by splitting
   * (with addr on the right of the split)
   */
  // We get weird bugs with this routine if we don't have optimize(speed).
  // Reasons why are wholly mysterious.
  #[optimize(speed)]
  fn bl_fix_right(&mut self, addr: usize) {
    let mut j = self.bl_seg(addr);
    // If necessary (and possible) double the number of segments
    while j >= 2*self.s && self.s <= SEGS/2 {
      self.bl_double();
      j = self.bl_seg(addr);
    }
    if self.pa[j-self.s] > addr as u16 {
      self.pa[j-self.s] = addr as u16;
    }
    let vp = self.v[addr];
    self.st[0] = vp; // Sentinel
    while self.st[j] < vp { // Go up branch of segment tree
      self.st[j] = vp;
      j /= 2;
    }
  }

  /**
   * Return the predecessor of block p (there will always be one because of
   * the dummy first block).  The address given must be the index of a
   * control word (as will be the index returned)
   */
  #[optimize(speed)]
  fn bl_pred(&self, addr: usize) -> usize {
    let mut j = self.bl_seg(addr);
    if self.pa[j-self.s] == addr as u16 {
      while self.st[j-1] == 0 {
        j /= 2;
      }
      j -= 1;
      while j<self.s {
        if self.st[2*j+1] > 0 {
          j = 2*j+1;
        } else {
          j *= 2;
        }
      }
    }
    let mut qn = self.pa[j-self.s];
    let mut q  = qn;
    while qn != addr as u16 {
      q = qn;
      qn = qn+self.v[qn as usize].abs() as u16;
    }
    q as usize
  }

  /**
   * Return the index to a block of at least `size` words, or 0 if there
   * is not enough free space.
   */
  #[optimize(speed)]
  fn bl_new(&mut self, size: usize) -> usize {
    if self.st[1] <= size as i16 { // Not enough free space
      0
    } else {
      let n = size +1; // Length including control word
      let mut j = 1;
      while j < self.s {
        if self.st[2*j] >= n as i16 {
          j *= 2;
        } else {
          j = 2*j+1;
        }
      }
      let mut p = self.pa[j-self.s] as usize; // Index of control word of first block in segment j
      while self.v[p] < n as i16 {
        p = p + self.v[p].abs() as usize;
      }
      // p is now the index of control word of required block
      let vp = self.v[p];
      self.v[p] = -(n as i16); // Flag block as allocated
      let fix_left = vp == self.st[j];
      if vp > n as i16 {
        self.v[p+n] = vp - (n as i16);
        if self.bl_seg(p+n) > j {
          self.bl_fix_right(p+n);
        }
      }
      if fix_left {
        self.bl_fix_left(p);
      }
      p+1 // Return index of first word after control word
    }
  }

  /**
   * Dispose of a block obtained with `bl_new(size)`.  The given address is
   * an index that was returned by `bl_new`.
   */
  #[optimize(speed)]
  fn bl_dispose(&mut self, addr: usize) {
    let heapsize_max = self.v.len() - 1;
    let p = addr-1; // Index of control word
    let vp = -self.v[p];
    if vp > 0 {
      self.v[p] = vp; // Mark the block as free
      let j = self.bl_seg(p);
      let pn = p + vp as usize;
      if pn < heapsize_max && self.v[pn] >= 0 {
        // Next block is empty, so we can merge
        self.v[p] = vp + self.v[pn];
        let jn = self.bl_seg(pn);
        if jn > j {
          self.pa[jn-self.s] = (p as i16 + self.v[p]) as u16;
          self.bl_fix_left(pn);
        }
      }
      let pr = self.bl_pred(p); // Preceding block
      if self.v[pr] >= 0 {
        // Preceding block is empty, so we can merge
        self.v[pr] += self.v[p];
        if self.pa[j-self.s] as usize == p {
          self.pa[j-self.s] = (pr as i16 + self.v[pr]) as u16; // Point to first block in segment
          self.bl_fix_left(p);
        }
        self.bl_fix_right(pr);
      } else if self.v[p] > self.st[j] {
        self.bl_fix_right(p);
      }
    } else {
      // Double-Dispose is an error
      avr_oxide::oserror::halt(avr_oxide::oserror::OsError::InternalError);
    }
  }

  /**
   * Returns the size of a block allocated by a call to `bl_new()`.
   */
  #[allow(dead_code)]
  fn bl_size(&self, addr: usize) -> usize {
    let p = self.v[addr-1];

    if p > 0 {
      avr_oxide::oserror::halt(avr_oxide::oserror::OsError::InternalError);
    } else {
      -(p+1) as usize
    }
  }

  /**
   * Return the maximum RAM available to allocate (i.e. the largest size
   * for which `bl_new(size)` can be expected to succeed.)  This is
   * approximately the same thing as 'available space', although given the
   * effect of fragmentation the total number of unallocated bytes may be
   * larger than this.
   */
  #[allow(dead_code)]
  fn bl_available(&self) -> usize {
    self.st[1] as usize - 1
  }

  /**
   * Return the number of segments used
   */
  #[allow(dead_code)]
  fn bl_segments(&self) -> usize {
    self.s
  }
}

// Tests =====================================================================
#[cfg(test)]
mod test {
  use std::vec::Vec;
  use avr_oxide::alloc::*;

  #[test]
  fn test_create_heap() {
    let mut memory = [0u8;1024];
    let mut heap = FirstFitHeap::<32,64>::new();
    heap.set_heap_location(&mut memory as *mut u8, 1024);
    heap.init();
    assert_eq!(heap.bl_seg(1), 1);
  }

  #[test]
  fn test_alloc_free() {
    let mut memory = [0u8;1024];
    let mut heap = FirstFitHeap::<32,64>::new();
    heap.set_heap_location(&mut memory as *mut u8, 1024);
    heap.init();

    let init_free = heap.bl_available();
    println!("Heap with {} words available\n", init_free);

    let alloc1 = heap.bl_new(128);
    println!("First allocation, {} bytes @ {}", heap.bl_size(alloc1), alloc1);
    println!("Free space: {}", heap.bl_available());
    println!("Segments: {}", heap.bl_segments());
    assert!(heap.bl_size(alloc1) >= 128);
    assert_eq!(alloc1, 2);

    let alloc2 = heap.bl_new(64);
    println!("Second allocation, {} bytes @ {}", heap.bl_size(alloc2), alloc2);
    println!("Free space: {}", heap.bl_available());
    println!("Segments: {}", heap.bl_segments());
    assert!(heap.bl_size(alloc2) >= 64);
    assert_eq!(alloc2, 131);

    let alloc3 = heap.bl_new(27);
    println!("Third allocation, {} bytes @ {}", heap.bl_size(alloc3), alloc3);
    println!("Free space: {}", heap.bl_available());
    println!("Segments: {}", heap.bl_segments());
    assert!(heap.bl_size(alloc3) >= 27);
    assert_eq!(alloc3, 196);

    let alloc4 = heap.bl_new(256);
    println!("Fourth allocation, {} bytes @ {}", heap.bl_size(alloc4), alloc4);
    println!("Free space: {}", heap.bl_available());
    println!("Segments: {}", heap.bl_segments());
    assert!(heap.bl_size(alloc4) >= 256);
    assert_eq!(alloc4, 224);

    let alloc5 = heap.bl_new(31);
    println!("Fifth allocation, {} bytes @ {}", heap.bl_size(alloc5), alloc5);
    println!("Free space: {}", heap.bl_available());
    println!("Segments: {}", heap.bl_segments());
    assert!(heap.bl_size(alloc5) >= 31);
    assert_eq!(alloc5, 481);

    // OK, we allocated a load, now let's free some
    println!("Freeing allocation @ {}", alloc3);
    heap.bl_dispose(alloc3);
    println!("Free space: {}", heap.bl_available());
    println!("Segments: {}", heap.bl_segments());
    assert_eq!(heap.bl_available(), 27);

    // Now let's allocate a couple more small chunks
    let alloc6 = heap.bl_new(4);
    println!("Sixth allocation, {} bytes @ {}", heap.bl_size(alloc6), alloc6);
    println!("Free space: {}", heap.bl_available());
    println!("Segments: {}", heap.bl_segments());
    assert!(heap.bl_size(alloc6) >= 4);
    assert_eq!(alloc6, 196);

    let alloc7 = heap.bl_new(10);
    println!("Seventh allocation, {} bytes @ {}", heap.bl_size(alloc7), alloc7);
    println!("Free space: {}", heap.bl_available());
    println!("Segments: {}", heap.bl_segments());
    assert!(heap.bl_size(alloc7) >= 10);
    assert_eq!(alloc7, 201);

    // If we got this far, let's free everything and then see how much space
    // we have left
    heap.bl_dispose(alloc1);
    println!("Free space: {}", heap.bl_available());
    heap.bl_dispose(alloc2);
    println!("Free space: {}", heap.bl_available());
    heap.bl_dispose(alloc4);
    println!("Free space: {}", heap.bl_available());
    heap.bl_dispose(alloc5);
    println!("Free space: {}", heap.bl_available());
    heap.bl_dispose(alloc6);
    println!("Free space: {}", heap.bl_available());
    heap.bl_dispose(alloc7);
    println!("Free space: {}", heap.bl_available());

    assert_eq!(heap.bl_available(), init_free);
  }

  fn fillspace(addr: *mut u8, size: usize, value: u8) {
    unsafe {
      for i in 0..size as isize {
        *(addr.offset(i)) = value;
      }
    }
  }

  fn checkspace(addr: *mut u8, size: usize, value: u8) -> bool {
    unsafe {
      for i in 0..size as isize {
        if *(addr.offset(i)) != value {
          return false;
        }
      }
    }
    true
  }

  #[test]
  pub fn test_global_alloc() {
    unsafe {
      let mut memory = [0u8;1024];
      let global = GlobalAllocator {
        heap: UnsafeCell::new(FirstFitHeap::<32,64>::new())
      };
      let heap = global.heap.get().as_mut().unwrap();
      heap.set_heap_location(&mut memory as *mut u8, 1024);
      heap.init();


      // Do an allocation
      let addr = global.alloc(Layout::from_size_align(16,2).unwrap());
      fillspace(addr, 16,0xde);
      println!("Allocated a block @ address: {:?}", addr);
      println!("Free space: {}b", heap.bl_available()*2);
      assert_eq!(addr as usize % 2, 0); // Check the returned pointer has correct alignment

      // Now free it
      global.dealloc(addr, Layout::from_size_align(16,2).unwrap());
      println!("Freed block");
      println!("Free space: {}b", heap.bl_available()*2);

      // Do some other alignments
      println!("Alignment checks");
      let discard = global.alloc(Layout::from_size_align(3, 2).unwrap());
      let addr1 = global.alloc(Layout::from_size_align(175,4).unwrap());
      fillspace(addr1, 175, 0xde);
      println!("Allocated a block @ address: {:?}", addr1);
      println!("Free space: {}b", heap.bl_available()*2);
      assert_eq!(addr1 as usize % 4, 0); // Check the returned pointer has correct alignment

      let addr2 = global.alloc(Layout::from_size_align(230, 8).unwrap());
      fillspace(addr2, 230, 0xde);
      println!("Allocated a block @ address: {:?}", addr2);
      println!("Free space: {}b", heap.bl_available()*2);
      assert_eq!(addr2 as usize % 8, 0); // Check the returned pointer has correct alignment

      let addr3 = global.alloc(Layout::from_size_align(49, 16).unwrap());
      fillspace(addr3, 49, 0xde);
      println!("Allocated a block @ address: {:?}", addr3);
      println!("Free space: {}b", heap.bl_available()*2);
      assert_eq!(addr3 as usize % 16, 0); // Check the returned pointer has correct alignment

      println!("Checking for data corruption");
      assert!(checkspace(addr1, 175,0xde));
      assert!(checkspace(addr2, 230,0xde));
      assert!(checkspace(addr3, 49,0xde));

      println!("Freeing aligned allocations");
      global.dealloc(addr1, Layout::from_size_align(175,4).unwrap());
      assert!(checkspace(addr2, 230,0xde));
      assert!(checkspace(addr3, 49,0xde));
      global.dealloc(addr2, Layout::from_size_align(230, 8).unwrap());
      assert!(checkspace(addr3, 49,0xde));
      global.dealloc(addr3, Layout::from_size_align(49, 16).unwrap());
    }
  }

  #[test]
  pub fn soaktest_global_alloc(){
    // Remember: global alloc allocates 16bit words, not 8-bit bytes,
    // so the heapsize here is (device heap in `deviceconsts.rs`/2)
    // ATmega328P small
    soaktest_global_alloc_sized::<64,8,{8*2}>();  // '328p Small
    soaktest_global_alloc_sized::<128,8,{8*2}>(); // '328p Medium
    soaktest_global_alloc_sized::<256,16,{16*2}>(); // '328p Large

    soaktest_global_alloc_sized::<256,32,{32*2}>();  // '4809 Small
    soaktest_global_alloc_sized::<512,64,{64*2}>(); // '4809 Medium
    soaktest_global_alloc_sized::<1536,128,{128*2}>(); // '4809 Large
  }

  pub fn soaktest_global_alloc_sized<const SIZE:usize,const SMAX:usize,const S2:usize>() {
    println!("");
    println!("==========================");
    println!("==  ALLOCATOR SOAKTEST  ==");
    println!("==========================");
    println!("  Heap:     {} words", SIZE);
    println!("  Segments: {} (max)", SMAX);

    let mut memory = [0u16; SIZE];


    unsafe {
      let global = GlobalAllocator {
        heap: UnsafeCell::new(FirstFitHeap::<SMAX,S2>::new())
      };

      let mut heap = global.heap.get().as_mut().unwrap();
      heap.set_heap_location(&mut memory as *mut u16 as *mut u8, {SIZE*2});
      heap.init();

      for blocksize in 1..SMAX {
        println!("Testing blocks of size {}", blocksize);

        let mut blocks : Vec<*mut u8> = Vec::new();

        let mut i:i32 = 0;
        loop {
          let block = global.alloc(Layout::from_size_align(blocksize,1).unwrap());

          if block != core::ptr::null_mut() {
            fillspace(block,blocksize,i as u8);
            blocks.push(block);
          } else {
            break;
          }
          i += 1;
        }

        println!("  Allocated {} blocks", blocks.len());
        let mut i:i32 = 0;
        for block in blocks {
          assert!(checkspace(block,blocksize,i as u8));
          global.dealloc(block,Layout::from_size_align(blocksize,1).unwrap());
          i += 1;
        }
      }
    }
  }

}