secmem_alloc/zeroizing_alloc.rs
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226
//! An allocator zeroizing memory on deallocation.
//!
//! This module contains a wrapper for any memory allocator to zeroize memory
//! before deallocation. This allows use both as a [`GlobalAlloc`] and as
//! [`Allocator`].
//!
//! This is safer than zeroizing your secret objects on drop because the
//! allocator approach also zeroizes old memory when the object is only moved
//! in memory but not dropped. This can happen for example when resizing
//! [`Vec`]s.
use crate::macros::{
debug_handleallocerror_precondition, debug_handleallocerror_precondition_valid_layout,
precondition_memory_range,
};
use crate::zeroize::zeroize_mem;
use alloc::alloc::handle_alloc_error;
use allocator_api2::alloc::{AllocError, Allocator};
use core::alloc::{GlobalAlloc, Layout};
use core::ptr::NonNull;
#[cfg(not(feature = "nightly_strict_provenance"))]
use sptr::Strict;
/// Wrapper around an allocator which zeroizes memory on deallocation. See the
/// module level documentation.
///
/// If debug assertions are enabled, *some* of the safety requirement for using
/// an allocator are checked.
#[derive(Debug, Default)]
pub struct ZeroizeAlloc<A> {
/// Allocator used for the actual allocations.
backend_alloc: A,
}
impl<A> ZeroizeAlloc<A> {
/// Create a zeroizing allocator using `backend_alloc` for allocations and
/// `zeroizer` to zeroize memory upon deallocation.
pub const fn new(backend_alloc: A) -> Self {
Self { backend_alloc }
}
}
unsafe impl<A: GlobalAlloc> GlobalAlloc for ZeroizeAlloc<A> {
unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
// debug assertions
// SAFETY: the allocator is not allowed to unwind (panic!)
// check that `layout` is a valid layout
debug_handleallocerror_precondition_valid_layout!(layout);
// zero sized allocations are not allowed
debug_handleallocerror_precondition!(layout.size() != 0, layout);
// SAFETY: caller must uphold the safety contract of `GlobalAlloc::alloc`.
unsafe { self.backend_alloc.alloc(layout) }
}
unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
// debug assertions
// SAFETY: the allocator is not allowed to unwind (panic!)
// null pointers are never allowed
debug_handleallocerror_precondition!(!ptr.is_null(), layout);
// check that `layout` is a valid layout
debug_handleallocerror_precondition_valid_layout!(layout);
// zero sized allocations are not allowed
debug_handleallocerror_precondition!(layout.size() != 0, layout);
// you can't wrap around the address space
precondition_memory_range!(ptr, layout.size());
if cfg!(debug_assertions) {
// you can't wrap around the address space
if ptr.addr().checked_add(layout.size()).is_none() {
handle_alloc_error(layout);
}
}
// securely wipe the deallocated memory
// SAFETY: `ptr` is valid for writes of `layout.size()` bytes since it was
// previously successfully allocated (by the safety assumption on this function)
// and not yet deallocated SAFETY: `ptr` is at least `layout.align()`
// byte aligned and this is a power of two
unsafe {
zeroize_mem(ptr, layout.size());
}
// SAFETY: caller must uphold the safety contract of `GlobalAlloc::dealloc`.
unsafe { self.backend_alloc.dealloc(ptr, layout) }
}
unsafe fn alloc_zeroed(&self, layout: Layout) -> *mut u8 {
// debug assertions
// SAFETY: the allocator is not allowed to unwind (panic!)
// check that `layout` is a valid layout
debug_handleallocerror_precondition_valid_layout!(layout);
// zero sized allocations are not allowed
debug_handleallocerror_precondition!(layout.size() != 0, layout);
// SAFETY: caller must uphold the safety contract of
// `GlobalAlloc::alloc_zeroed`.
unsafe { self.backend_alloc.alloc_zeroed(layout) }
}
// We do not use `backend_alloc.realloc` but instead use the default
// implementation from `std` (actually `core`), so our zeroizing `dealloc`
// is used. This can degrade performance for 'smart' allocators that would
// try to reuse the same allocation in realloc.
// This is the only safe and secure behaviour we can when using an
// arbitrary backend allocator.
}
unsafe impl<A: Allocator> Allocator for ZeroizeAlloc<A> {
fn allocate(&self, layout: Layout) -> Result<NonNull<[u8]>, AllocError> {
// debug assertions
// check that `layout` is a valid layout
debug_handleallocerror_precondition_valid_layout!(layout);
self.backend_alloc.allocate(layout)
}
fn allocate_zeroed(&self, layout: Layout) -> Result<NonNull<[u8]>, AllocError> {
// debug assertions
// check that `layout` is a valid layout
debug_handleallocerror_precondition_valid_layout!(layout);
self.backend_alloc.allocate_zeroed(layout)
}
unsafe fn deallocate(&self, ptr: NonNull<u8>, layout: Layout) {
// debug assertions
// check that `layout` is a valid layout
debug_handleallocerror_precondition_valid_layout!(layout);
// securely wipe the deallocated memory
// SAFETY: `ptr` is valid for writes of `layout.size()` bytes since it was
// previously successfully allocated and not yet deallocated
// SAFETY: `ptr` is at least `layout.align()` byte aligned and this is a power
// of two
unsafe {
zeroize_mem(ptr.as_ptr(), layout.size());
}
// SAFETY: caller must uphold the safety contract of `Allocator::deallocate`
unsafe { self.backend_alloc.deallocate(ptr, layout) }
}
// We do not use `backend_alloc.grow[_zeroed]/shrink` but instead use the
// default implementation from `std` (actually `core`), so our zeroizing
// `deallocate` is used. This can degrade performance for 'smart' allocators
// that would try to reuse the same allocation for such reallocations.
// This is the only safe and secure behaviour we can when using an
// arbitrary backend allocator.
}
#[cfg(test)]
mod tests {
use super::*;
use crate::allocator_api::{Box, Vec};
use std::alloc::System;
#[test]
fn box_allocation_8b() {
let allocator = ZeroizeAlloc::new(System);
let _heap_mem = Box::new_in([1u8; 8], &allocator);
// drop `_heap_mem`
// drop `allocator`
}
#[test]
fn box_allocation_9b() {
let allocator = ZeroizeAlloc::new(System);
let _heap_mem = Box::new_in([1u8; 9], &allocator);
// drop `_heap_mem`
// drop `allocator`
}
#[test]
fn box_allocation_zst() {
let allocator = ZeroizeAlloc::new(System);
let _heap_mem = Box::new_in([(); 8], &allocator);
// drop `_heap_mem`
// drop `allocator`
}
#[test]
fn vec_allocation_9b() {
let allocator = ZeroizeAlloc::new(System);
let _heap_mem = Vec::<u8, _>::with_capacity_in(9, &allocator);
// drop `_heap_mem`
// drop `allocator`
}
#[test]
fn vec_allocation_grow_repeated() {
let allocator = ZeroizeAlloc::new(System);
let mut heap_mem = Vec::<u8, _>::with_capacity_in(9, &allocator);
heap_mem.reserve(1);
heap_mem.reserve(7);
// drop `heap_mem`
// drop `allocator`
}
#[test]
fn vec_allocation_shrink() {
let allocator = ZeroizeAlloc::new(System);
let mut heap_mem = Vec::<u8, _>::with_capacity_in(9, &allocator);
heap_mem.push(255);
heap_mem.shrink_to_fit();
// drop `heap_mem`
// drop `allocator`
}
#[test]
fn allocate_zeroed() {
let allocator = ZeroizeAlloc::new(System);
let layout = Layout::new::<[u8; 16]>();
let ptr = allocator
.allocate_zeroed(layout)
.expect("allocation failed");
for i in 0..16 {
let val: u8 = unsafe { (ptr.as_ptr() as *const u8).add(i).read() };
assert_eq!(val, 0_u8);
}
unsafe {
allocator.deallocate(ptr.cast(), layout);
}
}
}