use crate::cell::{RootCell, RootObj};
use crate::result::Result;
use crate::stm::{journal::*, Chaperon, Log};
use crate::{as_mut, PSafe, TxInSafe, TxOutSafe};
use std::collections::HashMap;
use std::fs::OpenOptions;
use std::ops::Range;
use std::panic::UnwindSafe;
use std::path::Path;
use std::thread::ThreadId;
use std::{alloc::Layout, mem, ptr};
pub const DEFAULT_POOL_SIZE: u64 = 8 * 1024 * 1024;
pub mod open_flags {
pub const O_C: u32 = 0x00000001;
pub const O_F: u32 = 0x00000002;
pub const O_CNE: u32 = 0x00000004;
pub const O_CF: u32 = O_C | O_F;
pub const O_CFNE: u32 = O_CNE | O_F;
pub const O_1GB: u32 = 0x00000010;
pub const O_2GB: u32 = 0x00000020;
pub const O_4GB: u32 = 0x00000040;
pub const O_8GB: u32 = 0x00000080;
pub const O_16GB: u32 = 0x00000100;
pub const O_32GB: u32 = 0x00000200;
pub const O_64GB: u32 = 0x00000400;
pub const O_128GB: u32 = 0x00000800;
pub const O_256GB: u32 = 0x00001000;
pub const O_512GB: u32 = 0x00002000;
pub const O_1TB: u32 = 0x00004000;
pub const O_2TB: u32 = 0x00008000;
pub const O_4TB: u32 = 0x00010000;
pub const O_8TB: u32 = 0x00020000;
pub const O_16TB: u32 = 0x00040000;
pub const O_32TB: u32 = 0x00080000;
pub const O_64TB: u32 = 0x00100000;
}
pub use open_flags::*;
pub const FLAG_HAS_ROOT: u64 = 0x0000_0001;
#[macro_export]
macro_rules! static_inner_object {
($id:ident, $ty:ty) => {
static mut $id: Option<&'static mut $ty> = None;
};
}
#[macro_export]
macro_rules! static_inner {
($id:ident, $inner:ident, $body:block) => {
unsafe {
if let Some($inner) = &mut $id {
$body
} else {
panic!("No memory pool is open");
}
}
};
}
pub unsafe trait MemPool
where
Self: 'static + Sized,
{
fn open_no_root(_path: &str, _flags: u32) -> Result<Self> {
unimplemented!()
}
unsafe fn close() -> Result<()> {
unimplemented!()
}
fn open<'a, U: 'a + PSafe + RootObj<Self>>(
_path: &str,
_flags: u32,
) -> Result<RootCell<'a, U, Self>> {
unimplemented!()
}
unsafe fn format(_path: &str) -> Result<()> {
unimplemented!()
}
unsafe fn apply_flags(path: &str, flags: u32) -> Result<()> {
let mut size: u64 = flags as u64 >> 4;
if size.count_ones() > 1 {
return Err("Cannot have multiple size flags".to_string());
} else if size == 0 {
size = DEFAULT_POOL_SIZE;
} else {
if flags & (O_C | O_CNE) == 0 {
return Err("Cannot use size flag without a create flag".to_string());
}
size <<= 30;
}
let mut format = !Path::new(path).exists() && ((flags & O_F) != 0);
if ((flags & O_C) != 0) || ((flags & O_CNE != 0) && !Path::new(path).exists()) {
create_file(path, size)?;
format = (flags & O_F) != 0;
}
if format {
Self::format(path)?;
}
Ok(())
}
#[inline]
fn allocated(_off: u64, _len: usize) -> bool {
true
}
#[inline]
unsafe fn off_unchecked<T: ?Sized>(x: *const T) -> u64 {
(x as *const u8 as u64) - Self::start()
}
#[inline]
unsafe fn get_unchecked<'a, T: 'a + ?Sized>(off: u64) -> &'a T {
union U<'b, K: 'b + ?Sized> {
off: u64,
raw: &'b K,
}
#[cfg(any(feature = "access_violation_check", debug_assertions))]
assert!( Self::allocated(off, 1), "Bad address (0x{:x})", off );
U { off: Self::start() + off }.raw
}
#[inline]
#[track_caller]
unsafe fn get_mut_unchecked<'a, T: 'a + ?Sized>(off: u64) -> &'a mut T {
union U<'b, K: 'b + ?Sized> {
off: u64,
raw: &'b mut K,
}
#[cfg(any(feature = "access_violation_check", debug_assertions))]
assert!( Self::allocated(off, 1), "Bad address (0x{:x})", off );
U { off: Self::start() + off }.raw
}
#[inline]
unsafe fn deref_slice_unchecked<'a, T: 'a>(off: u64, len: usize) -> &'a [T] {
if len == 0 {
&[]
} else {
union U<'b, K: 'b> {
off: u64,
raw: &'b K,
}
let ptr = U {
off: Self::start() + off,
}
.raw;
let res = std::slice::from_raw_parts(ptr, len);
#[cfg(any(feature = "access_violation_check", debug_assertions))]
assert!(
Self::allocated(off, mem::size_of::<T>() * len),
format!(
"Bad address (0x{:x}..0x{:x})",
off,
off + (mem::size_of::<T>() * len) as u64 - 1
)
);
res
}
}
#[inline]
unsafe fn deref_slice_unchecked_mut<'a, T: 'a>(off: u64, len: usize) -> &'a mut [T] {
if len == 0 {
&mut []
} else {
union U<'b, K: 'b> {
off: u64,
raw: &'b mut K,
}
let ptr = U {
off: Self::start() + off,
}
.raw;
let res = std::slice::from_raw_parts_mut(ptr, len);
#[cfg(any(feature = "access_violation_check", debug_assertions))]
assert!(
Self::allocated(off, mem::size_of::<T>() * len),
format!(
"Bad address (0x{:x}..0x{:x})",
off,
off + (mem::size_of::<T>() * len) as u64 - 1
)
);
res
}
}
#[inline]
unsafe fn deref<'a, T: 'a>(off: u64) -> Result<&'a T> {
if Self::allocated(off, mem::size_of::<T>()) {
Ok(Self::get_unchecked(off))
} else {
Err(format!("Bad address (0x{:x})", off))
}
}
#[inline]
unsafe fn deref_mut<'a, T: 'a>(off: u64) -> Result<&'a mut T> {
if Self::allocated(off, mem::size_of::<T>()) {
Ok(Self::get_mut_unchecked(off))
} else {
Err(format!("Bad address (0x{:x})", off))
}
}
#[inline]
fn off<T: ?Sized>(x: *const T) -> Result<u64> {
if Self::valid(unsafe { &*x }) {
Ok(x as *const u8 as u64 - Self::start())
} else {
Err("out of valid range".to_string())
}
}
fn rng() -> Range<u64> {
Self::start()..Self::end()
}
#[inline]
fn start() -> u64 {
Self::rng().start
}
#[inline]
fn end() -> u64 {
Self::rng().end
}
fn size() -> usize {
unimplemented!()
}
fn available() -> usize {
unimplemented!()
}
fn used() -> usize {
Self::size() - Self::available()
}
#[inline]
fn valid<T: ?Sized>(p: &T) -> bool {
let rng = Self::rng();
let start = p as *const T as *const u8 as u64;
start >= rng.start && start < rng.end
}
#[inline]
fn contains(addr: u64) -> bool {
let rng = Self::rng();
addr >= rng.start && addr < rng.end
}
#[inline]
#[track_caller]
unsafe fn alloc(size: usize) -> (*mut u8, u64, usize) {
let (p, off, len) = Self::pre_alloc(size);
Self::drop_on_failure(off, len);
Self::perform();
(p, off, len)
}
#[inline]
#[track_caller]
unsafe fn dealloc(ptr: *mut u8, size: usize) {
Self::pre_dealloc(ptr, size);
Self::perform();
}
unsafe fn pre_alloc(size: usize) -> (*mut u8, u64, usize);
unsafe fn pre_dealloc(ptr: *mut u8, size: usize);
unsafe fn pre_realloc(ptr: *mut *mut u8, size: usize, new_size: usize) -> bool;
unsafe fn log64(_obj: *const u64, _val: u64) {
unimplemented!()
}
unsafe fn drop_on_failure(_off: u64, _len: usize) {}
unsafe fn perform() { }
unsafe fn discard() { }
unsafe fn alloc_zeroed(size: usize) -> *mut u8 {
let (ptr, _, _) = Self::alloc(size);
if !ptr.is_null() {
std::ptr::write_bytes(ptr, 0, size);
}
ptr
}
unsafe fn realloc(ptr: *mut *mut u8, size: usize, new_size: usize) -> bool{
let (new_ptr, _, _) = Self::alloc(new_size);
if !new_ptr.is_null() {
std::ptr::copy_nonoverlapping(*ptr, new_ptr, std::cmp::min(size, new_size));
Self::dealloc(*ptr, size);
*ptr = new_ptr;
true
} else {
false
}
}
unsafe fn new<'a, T: PSafe + 'a>(x: T, j: &Journal<Self>) -> &'a mut T {
debug_assert!(mem::size_of::<T>() != 0, "Cannot allocated ZST");
let mut log = Log::drop_on_failure(u64::MAX, 1, j);
let (p, off, len) = Self::atomic_new(x);
log.set(off, len);
Self::perform();
p
}
unsafe fn new_slice<'a, T: PSafe + 'a>(x: &'a [T], _journal: &Journal<Self>) -> &'a mut [T] {
debug_assert!(mem::size_of::<T>() != 0, "Cannot allocate ZST");
debug_assert!(!x.is_empty(), "Cannot allocate empty slice");
let mut log = Log::drop_on_abort(u64::MAX, 1, _journal);
let (p, off, size) = Self::atomic_new_slice(x);
log.set(off, size);
Self::perform();
p
}
unsafe fn atomic_new<'a, T: 'a>(x: T) -> (&'a mut T, u64, usize) {
union U<'b, K: 'b + ?Sized> {
raw: *mut u8,
rf: &'b mut K,
}
#[cfg(feature = "verbose")]
println!(" ALLOC TYPE: {}", std::any::type_name::<T>());
let size = mem::size_of::<T>();
let (raw, off, len) = Self::pre_alloc(size);
if raw.is_null() {
panic!("Memory exhausted");
}
Self::drop_on_failure(off, len);
let p = U { raw }.rf;
mem::forget(ptr::replace(p, x));
(p, off, size)
}
unsafe fn atomic_new_slice<'a, T: 'a + PSafe>(x: &'a [T]) -> (&'a mut [T], u64, usize) {
#[cfg(feature = "verbose")]
println!(
" ALLOC TYPE: [{}; {}]",
std::any::type_name::<T>(),
x.len()
);
let (ptr, off, size) = Self::pre_alloc(Layout::for_value(x).size());
if ptr.is_null() {
panic!("Memory exhausted");
}
Self::drop_on_failure(off, size);
ptr::copy_nonoverlapping(
x as *const _ as *const u8,
ptr,
x.len() * mem::size_of::<T>(),
);
(
std::slice::from_raw_parts_mut(ptr.cast(), x.len()),
off,
size,
)
}
unsafe fn new_uninit<'a, T: PSafe + 'a>(j: &Journal<Self>) -> &'a mut T {
let mut log = Log::drop_on_failure(u64::MAX, 1, j);
let (p, off, size) = Self::atomic_new_uninit();
log.set(off, size);
Self::perform();
p
}
unsafe fn new_uninit_for_layout(size: usize, journal: &Journal<Self>) -> *mut u8 {
#[cfg(feature = "verbose")]
println!(" ALLOC {:?}", size);
let mut log = Log::drop_on_abort(u64::MAX, 1, journal);
let (p, off, len) = Self::pre_alloc(size);
if p.is_null() {
panic!("Memory exhausted");
}
log.set(off, len);
Self::perform();
p
}
unsafe fn atomic_new_uninit<'a, T: 'a>() -> (&'a mut T, u64, usize) {
union U<'b, K: 'b + ?Sized> {
ptr: *mut u8,
rf: &'b mut K,
}
let (ptr, off, len) = Self::pre_alloc(mem::size_of::<T>());
if ptr.is_null() {
panic!("Memory exhausted");
}
Self::drop_on_failure(off, len);
(U { ptr }.rf, off, len)
}
unsafe fn alloc_for_value<'a, T: ?Sized>(x: &T) -> &'a mut T {
union U<'b, K: 'b + ?Sized> {
raw: *mut u8,
rf: &'b mut K,
}
let raw = Self::alloc(mem::size_of_val(x));
if raw.0.is_null() {
panic!("Memory exhausted");
}
U { raw: raw.0 }.rf
}
unsafe fn free<'a, T: PSafe + ?Sized>(x: &mut T) {
let off = Self::off_unchecked(x);
let len = mem::size_of_val(x);
if std::thread::panicking() {
Log::drop_on_abort(off, len, &mut Journal::<Self>::current(true).unwrap().0);
} else {
Log::drop_on_commit(off, len, &mut Journal::<Self>::current(true).unwrap().0);
}
}
unsafe fn free_slice<'a, T: PSafe>(x: &mut [T]) {
if x.len() > 0 {
let off = Self::off_unchecked(x);
Log::drop_on_commit(
off,
x.len() * mem::size_of::<T>(),
&mut Journal::<Self>::current(true).unwrap().0,
);
}
}
unsafe fn free_nolog<'a, T: ?Sized>(x: &T) {
Self::pre_dealloc(x as *const _ as *mut u8, mem::size_of_val(x))
}
unsafe fn guarded<T, F: FnOnce() -> T>(f: F) -> T {
f()
}
unsafe fn new_journal(_tid: ThreadId) { }
unsafe fn drop_journal(_journal: &mut Journal<Self>) { }
unsafe fn journals() -> &'static mut HashMap<ThreadId, (&'static Journal<Self>, i32)> {
unimplemented!()
}
unsafe fn recover() {
unimplemented!()
}
#[inline]
unsafe fn commit() {
if let Some(journal) = Journal::<Self>::current(false) {
journal.1 -= 1;
if journal.1 == 0 {
#[cfg(feature = "verbose")]
println!("{:?}", journal.0);
let journal = as_mut(journal.0);
journal.commit();
journal.clear();
}
}
}
#[inline]
unsafe fn commit_no_clear() {
if let Some(journal) = Journal::<Self>::current(false) {
if journal.1 == 1 {
#[cfg(feature = "verbose")]
println!("{:?}", journal.0);
as_mut(journal.0).commit();
}
}
}
#[inline]
unsafe fn clear() {
if let Some(journal) = Journal::<Self>::current(false) {
journal.1 -= 1;
if journal.1 == 0 {
#[cfg(feature = "verbose")]
println!("{:?}", journal.0);
as_mut(journal.0).clear();
}
}
}
#[inline]
unsafe fn rollback() {
if let Some(journal) = Journal::<Self>::current(false) {
journal.1 -= 1;
if journal.1 == 0 {
#[cfg(feature = "verbose")]
println!("{:?}", journal.0);
let journal = as_mut(journal.0);
journal.rollback();
journal.clear();
} else {
panic!("Unsuccessful nested transaction");
}
}
}
#[inline]
unsafe fn rollback_no_clear() {
if let Some(journal) = Journal::<Self>::current(false) {
if journal.1 == 1 {
#[cfg(feature = "verbose")]
println!("{:?}", journal.0);
as_mut(journal.0).rollback();
} else {
panic!("Unsuccessful nested transaction");
}
}
}
#[inline]
fn transaction<T, F: FnOnce(&Journal<Self>) -> T>(body: F) -> Result<T>
where
F: TxInSafe + UnwindSafe,
T: TxOutSafe,
{
let mut chaperoned = false;
let cptr = &mut chaperoned as *mut bool;
let res = std::panic::catch_unwind(move || {
let chaperon = Chaperon::current();
if let Some(ptr) = chaperon {
unsafe {
*cptr = true;
let mut chaperon = &mut *ptr;
chaperon.postpone(
&|| Self::commit_no_clear(),
&|| Self::rollback_no_clear(),
&|| Self::clear(),
);
body({
let j = Journal::<Self>::current(true).unwrap();
j.1 += 1;
let journal = as_mut(j.0);
journal.start_session(&mut chaperon);
journal.reset(JOURNAL_COMMITTED);
journal
})
}
} else {
body({
let j = Journal::<Self>::current(true).unwrap();
j.1 += 1;
as_mut(j.0).reset(JOURNAL_COMMITTED);
j.0
})
}
});
unsafe {
if let Ok(res) = res {
if !chaperoned {
Self::commit();
}
Ok(res)
} else {
if !chaperoned {
Self::rollback();
Err("Unsuccessful transaction".to_string())
} else {
panic!("Unsuccessful chaperoned transaction");
}
}
}
}
fn gen() -> u32 {
0
}
fn print_info() {}
#[cfg(feature = "capture_footprint")]
fn footprint() -> usize {
0
}
}
pub(crate) fn create_file(filename: &str, size: u64) -> Result<()> {
let file = OpenOptions::new().write(true).create(true).open(filename);
if file.is_err() {
Err(format!("{}", file.err().unwrap()))
} else {
if let Some(e) = file.unwrap().set_len(size).err() {
Err(format!("{}", e))
} else {
Ok(())
}
}
}
#[cfg(test)]
mod test {
use crate::default::*;
#[test]
#[ignore]
fn nested_transactions() {
let _image = BuddyAlloc::open_no_root("nosb.pool", O_CFNE);
if let Err(e) = BuddyAlloc::transaction(|_| {
let _ = BuddyAlloc::transaction(|_| {
let _ = BuddyAlloc::transaction(|_| {
let _ = BuddyAlloc::transaction(|_| {
println!("should print");
panic!("intentional");
});
println!("should not print");
});
println!("should not print");
});
println!("should not print");
}) {
println!("Error: '{}'", e);
}
}
}