use std::marker::PhantomData;
use std::ops::Bound;
use crate::Key;
use crate::compaction::CompactionIndex;
use crate::config::Config;
use crate::const_tree::{ConstIter, ConstShard, ConstTree};
use crate::disk_loc::DiskLoc;
use crate::durability::{Bitcask, Durability, Fixed};
use crate::error::{DbError, DbResult};
use crate::fixed::config::FixedConfig;
use crate::hook::{NoHook, TypedWriteHook, ZeroHookAdapter};
use crate::key::Location;
pub struct ZeroTree<
K: Key,
const V: usize,
T: Copy = [u8; V],
H: TypedWriteHook<K, T> = NoHook,
D: Durability = Bitcask,
> {
inner: ConstTree<K, V, ZeroHookAdapter<K, T, H>, D>,
_marker: PhantomData<T>,
}
impl<K: Key, const V: usize, T: Copy> ZeroTree<K, V, T, NoHook, Bitcask> {
pub fn open(path: impl AsRef<std::path::Path>, config: Config) -> DbResult<Self> {
const { assert!(size_of::<T>() == V) }
let adapter = ZeroHookAdapter {
inner: NoHook,
_marker: PhantomData,
};
Ok(Self {
inner: ConstTree::open_hooked(path, config, adapter)?,
_marker: PhantomData,
})
}
}
impl<K: Key, const V: usize, T: Copy, H: TypedWriteHook<K, T>> ZeroTree<K, V, T, H, Bitcask> {
pub fn open_hooked(
path: impl AsRef<std::path::Path>,
config: Config,
hook: H,
) -> DbResult<Self> {
const { assert!(size_of::<T>() == V) }
let adapter = ZeroHookAdapter {
inner: hook,
_marker: PhantomData,
};
Ok(Self {
inner: ConstTree::open_hooked(path, config, adapter)?,
_marker: PhantomData,
})
}
pub fn close(self) -> DbResult<()> {
self.inner.close()
}
pub fn flush_buffers(&self) -> DbResult<()> {
self.inner.flush_buffers()
}
pub fn config(&self) -> &Config {
self.inner.config()
}
pub fn compact(&self) -> DbResult<usize> {
self.inner.compact()
}
pub fn sync_hints(&self) -> DbResult<()> {
self.inner.sync_hints()
}
pub fn migrate(&self, f: impl Fn(&K, &T) -> crate::MigrateAction<T>) -> DbResult<usize> {
self.inner.migrate(|key, bytes| {
let val: T = from_value_bytes(bytes);
match f(key, &val) {
crate::MigrateAction::Keep => crate::MigrateAction::Keep,
crate::MigrateAction::Update(new) => {
crate::MigrateAction::Update(to_bytes::<V, T>(&new))
}
crate::MigrateAction::Delete => crate::MigrateAction::Delete,
}
})
}
pub(crate) fn replay_init(&self) {
self.inner.replay_init();
}
pub fn as_inner(&self) -> &ConstTree<K, V, ZeroHookAdapter<K, T, H>, Bitcask> {
&self.inner
}
}
impl<K: Key, const V: usize, T: Copy> ZeroTree<K, V, T, NoHook, Fixed> {
pub fn open(path: impl AsRef<std::path::Path>, config: FixedConfig) -> DbResult<Self> {
const { assert!(size_of::<T>() == V) }
let adapter = ZeroHookAdapter {
inner: NoHook,
_marker: PhantomData,
};
Ok(Self {
inner: ConstTree::open_with_hook(path, config, adapter)?,
_marker: PhantomData,
})
}
}
impl<K: Key, const V: usize, T: Copy, H: TypedWriteHook<K, T>> ZeroTree<K, V, T, H, Fixed> {
pub fn open_with_hook(
path: impl AsRef<std::path::Path>,
config: FixedConfig,
hook: H,
) -> DbResult<Self> {
const { assert!(size_of::<T>() == V) }
let adapter = ZeroHookAdapter {
inner: hook,
_marker: PhantomData,
};
Ok(Self {
inner: ConstTree::open_with_hook(path, config, adapter)?,
_marker: PhantomData,
})
}
pub fn close(self) -> DbResult<()> {
self.inner.close()
}
}
impl<K: Key, const V: usize, T: Copy, H: TypedWriteHook<K, T>, D: Durability>
ZeroTree<K, V, T, H, D>
{
pub fn get(&self, key: &K) -> Option<T> {
let bytes = self.inner.get(key)?;
Some(from_value_bytes::<V, T>(&bytes))
}
pub fn get_or_err(&self, key: &K) -> DbResult<T> {
self.get(key).ok_or(DbError::KeyNotFound)
}
pub fn contains(&self, key: &K) -> bool {
self.inner.contains(key)
}
pub fn first(&self) -> Option<(K, T)> {
self.inner
.first()
.map(|(k, v)| (k, from_value_bytes::<V, T>(&v)))
}
pub fn last(&self) -> Option<(K, T)> {
self.inner
.last()
.map(|(k, v)| (k, from_value_bytes::<V, T>(&v)))
}
pub fn put(&self, key: &K, value: &T) -> DbResult<Option<T>> {
let bytes = to_bytes::<V, T>(value);
self.inner
.put(key, &bytes)
.map(|opt| opt.map(|b| from_value_bytes::<V, T>(&b)))
}
pub fn insert(&self, key: &K, value: &T) -> DbResult<()> {
let bytes = to_bytes::<V, T>(value);
self.inner.insert(key, &bytes)
}
pub fn delete(&self, key: &K) -> DbResult<Option<T>> {
self.inner
.delete(key)
.map(|opt| opt.map(|b| from_value_bytes::<V, T>(&b)))
}
pub fn cas(&self, key: &K, expected: &T, new_value: &T) -> DbResult<()> {
let exp_bytes = to_bytes::<V, T>(expected);
let new_bytes = to_bytes::<V, T>(new_value);
self.inner.cas(key, &exp_bytes, &new_bytes)
}
pub fn update(&self, key: &K, f: impl FnOnce(&T) -> T) -> DbResult<Option<T>> {
self.inner
.update(key, |bytes| {
let val = from_value_bytes::<V, T>(bytes);
let new_val = f(&val);
to_bytes::<V, T>(&new_val)
})
.map(|opt| opt.map(|b| from_value_bytes::<V, T>(&b)))
}
pub fn fetch_update(&self, key: &K, f: impl FnOnce(&T) -> T) -> DbResult<Option<T>> {
self.inner
.fetch_update(key, |bytes| {
let val = from_value_bytes::<V, T>(bytes);
let new_val = f(&val);
to_bytes::<V, T>(&new_val)
})
.map(|opt| opt.map(|b| from_value_bytes::<V, T>(&b)))
}
pub fn atomic<R>(
&self,
shard_key: &K,
f: impl FnOnce(&mut ZeroShard<'_, K, V, T, D>) -> DbResult<R>,
) -> DbResult<R> {
self.inner.atomic(shard_key, |const_shard| {
let shard = unsafe {
&mut *(const_shard as *mut ConstShard<'_, K, V, ZeroHookAdapter<K, T, H>, D>
as *mut ZeroShard<'_, K, V, T, D>)
};
f(shard)
})
}
pub fn prefix_iter(&self, prefix: &[u8]) -> ZeroIter<'_, K, V, T, D::Loc> {
ZeroIter {
inner: self.inner.prefix_iter(prefix),
_marker: PhantomData,
}
}
pub fn iter(&self) -> ZeroIter<'_, K, V, T, D::Loc> {
ZeroIter {
inner: self.inner.iter(),
_marker: PhantomData,
}
}
pub fn range(&self, start: &K, end: &K) -> ZeroIter<'_, K, V, T, D::Loc> {
self.range_bounds(Bound::Included(start), Bound::Excluded(end))
}
pub fn range_bounds(&self, start: Bound<&K>, end: Bound<&K>) -> ZeroIter<'_, K, V, T, D::Loc> {
ZeroIter {
inner: self.inner.range_bounds(start, end),
_marker: PhantomData,
}
}
pub fn len(&self) -> usize {
self.inner.len()
}
pub fn is_empty(&self) -> bool {
self.inner.is_empty()
}
pub fn shard_for(&self, key: &K) -> usize {
self.inner.shard_for(key)
}
pub fn flush(&self) -> DbResult<()> {
self.inner.flush()
}
}
impl<K: Key, const V: usize, T: Copy + Send + Sync, H: TypedWriteHook<K, T>> CompactionIndex<K>
for ZeroTree<K, V, T, H, Bitcask>
{
fn update_if_match(
&self,
key: &K,
old_loc: crate::disk_loc::DiskLoc,
new_loc: crate::disk_loc::DiskLoc,
) -> bool {
self.inner.update_if_match(key, old_loc, new_loc)
}
fn contains_key(&self, key: &K) -> bool {
self.contains(key)
}
}
pub struct ZeroIter<'a, K: Key, const V: usize, T = [u8; V], L: Location = DiskLoc> {
inner: ConstIter<'a, K, V, L>,
_marker: PhantomData<T>,
}
impl<'a, K: Key, const V: usize, T: Copy, L: Location> Iterator for ZeroIter<'a, K, V, T, L> {
type Item = (K, T);
fn next(&mut self) -> Option<Self::Item> {
self.inner
.next()
.map(|(k, v)| (k, from_value_bytes::<V, T>(&v)))
}
}
impl<'a, K: Key, const V: usize, T: Copy, L: Location> DoubleEndedIterator
for ZeroIter<'a, K, V, T, L>
{
fn next_back(&mut self) -> Option<Self::Item> {
self.inner
.next_back()
.map(|(k, v)| (k, from_value_bytes::<V, T>(&v)))
}
}
#[repr(transparent)]
pub struct ZeroShard<'a, K: Key, const V: usize, T: Copy = [u8; V], D: Durability = Bitcask> {
inner: ConstShard<'a, K, V, NoHook, D>,
_marker: PhantomData<T>,
}
impl<K: Key, const V: usize, T: Copy, D: Durability> ZeroShard<'_, K, V, T, D> {
pub fn put(&mut self, key: &K, value: &T) -> DbResult<Option<T>> {
let bytes = to_bytes::<V, T>(value);
self.inner
.put(key, &bytes)
.map(|opt| opt.map(|b| from_value_bytes::<V, T>(&b)))
}
pub fn insert(&mut self, key: &K, value: &T) -> DbResult<()> {
let bytes = to_bytes::<V, T>(value);
self.inner.insert(key, &bytes)
}
pub fn delete(&mut self, key: &K) -> DbResult<Option<T>> {
self.inner
.delete(key)
.map(|opt| opt.map(|b| from_value_bytes::<V, T>(&b)))
}
pub fn get(&self, key: &K) -> Option<T> {
let bytes = self.inner.get(key)?;
Some(from_value_bytes::<V, T>(&bytes))
}
pub fn get_or_err(&self, key: &K) -> DbResult<T> {
self.get(key).ok_or(DbError::KeyNotFound)
}
pub fn contains(&self, key: &K) -> bool {
self.inner.contains(key)
}
}
#[inline(always)]
pub(crate) fn to_bytes<const V: usize, T: Copy>(value: &T) -> [u8; V] {
debug_assert_eq!(size_of::<T>(), V);
unsafe { std::ptr::read(std::ptr::from_ref(value).cast()) }
}
#[inline(always)]
pub(crate) fn from_value_bytes<const V: usize, T: Copy>(bytes: &[u8; V]) -> T {
debug_assert_eq!(V, size_of::<T>());
unsafe { std::ptr::read(bytes.as_ptr().cast()) }
}
#[cfg(feature = "armour")]
impl<T, const V: usize, H> crate::armour::collection::Collection
for ZeroTree<T::SelfId, V, T, H, Bitcask>
where
T: crate::CollectionMeta + Copy + Send + Sync,
H: crate::hook::TypedWriteHook<T::SelfId, T>,
T::SelfId: crate::Key + Ord,
{
fn name(&self) -> &str {
T::NAME
}
fn len(&self) -> usize {
self.len()
}
fn compact(&self) -> crate::DbResult<usize> {
self.compact()
}
}