use crate::{
index::Unordered as UnorderedIndex,
journal::contiguous::{Contiguous, Mutable},
merkle::{
self, hasher::Standard as StandardHasher, storage::Storage as MerkleStorage, Graftable,
Location, Position, Readable,
},
qmdb::{
any::{
self,
batch::{DiffEntry, FloorScan},
operation::{update, Operation},
ValueEncoding,
},
current::{
db::{compute_db_root, compute_grafted_leaves},
grafting,
},
operation::Key,
Error,
},
Context,
};
use commonware_codec::Codec;
use commonware_cryptography::{Digest, Hasher};
use commonware_utils::bitmap::{Prunable as BitMap, Readable as BitmapReadable};
use std::{collections::BTreeMap, sync::Arc};
#[derive(Clone, Debug, Default)]
pub(crate) struct ChunkOverlay<const N: usize> {
pub(crate) chunks: BTreeMap<usize, [u8; N]>,
pub(crate) len: u64,
}
impl<const N: usize> ChunkOverlay<N> {
const CHUNK_BITS: u64 = BitMap::<N>::CHUNK_SIZE_BITS;
const fn new(len: u64) -> Self {
Self {
chunks: BTreeMap::new(),
len,
}
}
fn chunk_mut<B: BitmapReadable<N>>(&mut self, base: &B, idx: usize) -> &mut [u8; N] {
self.chunks.entry(idx).or_insert_with(|| {
let base_len = base.len();
let base_complete = base.complete_chunks();
let base_has_partial = !base_len.is_multiple_of(Self::CHUNK_BITS);
if idx < base_complete {
base.get_chunk(idx)
} else if idx == base_complete && base_has_partial {
base.last_chunk().0
} else {
[0u8; N]
}
})
}
fn set_bit<B: BitmapReadable<N>>(&mut self, base: &B, loc: u64) {
let idx = BitMap::<N>::to_chunk_index(loc);
let rel = (loc % Self::CHUNK_BITS) as usize;
let chunk = self.chunk_mut(base, idx);
chunk[rel / 8] |= 1 << (rel % 8);
}
fn clear_bit<B: BitmapReadable<N>>(&mut self, base: &B, loc: u64) {
let idx = BitMap::<N>::to_chunk_index(loc);
if idx < base.pruned_chunks() {
return;
}
let rel = (loc % Self::CHUNK_BITS) as usize;
let chunk = self.chunk_mut(base, idx);
chunk[rel / 8] &= !(1 << (rel % 8));
}
pub(crate) fn get(&self, idx: usize) -> Option<&[u8; N]> {
self.chunks.get(&idx)
}
pub(crate) const fn complete_chunks(&self) -> usize {
(self.len / Self::CHUNK_BITS) as usize
}
}
pub(crate) struct BitmapScan<'a, B, const N: usize> {
bitmap: &'a B,
}
impl<'a, B: BitmapReadable<N>, const N: usize> BitmapScan<'a, B, N> {
pub(crate) const fn new(bitmap: &'a B) -> Self {
Self { bitmap }
}
}
impl<F: Graftable, B: BitmapReadable<N>, const N: usize> FloorScan<F> for BitmapScan<'_, B, N> {
fn next_candidate(&mut self, floor: Location<F>, tip: u64) -> Option<Location<F>> {
let loc = *floor;
if loc >= tip {
return None;
}
let bitmap_len = self.bitmap.len();
if loc < bitmap_len {
let bound = bitmap_len.min(tip);
if let Some(idx) = self.bitmap.ones_iter_from(loc).next() {
if idx < bound {
return Some(Location::<F>::new(idx));
}
}
}
if bitmap_len < tip {
let candidate = loc.max(bitmap_len);
if candidate < tip {
return Some(Location::<F>::new(candidate));
}
}
None
}
}
struct BatchStorageAdapter<
'a,
F: Graftable,
D: Digest,
R: Readable<Family = F, Digest = D, Error = merkle::Error<F>>,
S: MerkleStorage<F, Digest = D>,
> {
batch: &'a R,
base: &'a S,
_phantom: core::marker::PhantomData<(F, D)>,
}
impl<
'a,
F: Graftable,
D: Digest,
R: Readable<Family = F, Digest = D, Error = merkle::Error<F>>,
S: MerkleStorage<F, Digest = D>,
> BatchStorageAdapter<'a, F, D, R, S>
{
const fn new(batch: &'a R, base: &'a S) -> Self {
Self {
batch,
base,
_phantom: core::marker::PhantomData,
}
}
}
impl<
F: Graftable,
D: Digest,
R: Readable<Family = F, Digest = D, Error = merkle::Error<F>>,
S: MerkleStorage<F, Digest = D>,
> MerkleStorage<F> for BatchStorageAdapter<'_, F, D, R, S>
{
type Digest = D;
async fn size(&self) -> Position<F> {
self.batch.size()
}
async fn get_node(&self, pos: Position<F>) -> Result<Option<D>, merkle::Error<F>> {
if let Some(node) = self.batch.get_node(pos) {
return Ok(Some(node));
}
self.base.get_node(pos).await
}
}
struct BatchOverMem<'a, F: Graftable, D: Digest> {
batch: &'a merkle::batch::MerkleizedBatch<F, D>,
mem: &'a merkle::mem::Mem<F, D>,
}
impl<F: Graftable, D: Digest> Readable for BatchOverMem<'_, F, D> {
type Family = F;
type Digest = D;
type Error = merkle::Error<F>;
fn size(&self) -> Position<F> {
self.batch.size()
}
fn get_node(&self, pos: Position<F>) -> Option<D> {
if let Some(d) = self.batch.get_node(pos) {
return Some(d);
}
self.mem.get_node(pos)
}
fn root(&self) -> D {
self.batch.root()
}
fn pruning_boundary(&self) -> Location<F> {
self.batch.pruning_boundary()
}
fn proof(
&self,
_hasher: &impl crate::merkle::hasher::Hasher<F, Digest = D>,
_loc: Location<F>,
) -> Result<crate::merkle::Proof<F, D>, merkle::Error<F>> {
unreachable!("proof not used in compute_current_layer")
}
fn range_proof(
&self,
_hasher: &impl crate::merkle::hasher::Hasher<F, Digest = D>,
_range: core::ops::Range<Location<F>>,
) -> Result<crate::merkle::Proof<F, D>, merkle::Error<F>> {
unreachable!("range_proof not used in compute_current_layer")
}
}
pub struct UnmerkleizedBatch<F, H, U, const N: usize>
where
F: Graftable,
U: update::Update + Send + Sync,
H: Hasher,
Operation<F, U>: Codec,
{
inner: any::batch::UnmerkleizedBatch<F, H, U>,
grafted_parent: Arc<merkle::batch::MerkleizedBatch<F, H::Digest>>,
bitmap_parent: BitmapBatch<N>,
}
pub struct MerkleizedBatch<F: Graftable, D: Digest, U: update::Update + Send + Sync, const N: usize>
where
Operation<F, U>: Send + Sync,
{
pub(crate) inner: Arc<any::batch::MerkleizedBatch<F, D, U>>,
pub(crate) grafted: Arc<merkle::batch::MerkleizedBatch<F, D>>,
pub(crate) bitmap: BitmapBatch<N>,
pub(crate) canonical_root: D,
}
impl<F, H, U, const N: usize> UnmerkleizedBatch<F, H, U, N>
where
F: Graftable,
U: update::Update + Send + Sync,
H: Hasher,
Operation<F, U>: Codec,
{
pub(super) const fn new(
inner: any::batch::UnmerkleizedBatch<F, H, U>,
grafted_parent: Arc<merkle::batch::MerkleizedBatch<F, H::Digest>>,
bitmap_parent: BitmapBatch<N>,
) -> Self {
Self {
inner,
grafted_parent,
bitmap_parent,
}
}
pub fn write(mut self, key: U::Key, value: Option<U::Value>) -> Self {
self.inner = self.inner.write(key, value);
self
}
}
impl<F, K, V, H, const N: usize> UnmerkleizedBatch<F, H, update::Unordered<K, V>, N>
where
F: Graftable,
K: Key,
V: ValueEncoding,
H: Hasher,
Operation<F, update::Unordered<K, V>>: Codec,
{
pub async fn get<E, C, I>(
&self,
key: &K,
db: &super::db::Db<F, E, C, I, H, update::Unordered<K, V>, N>,
) -> Result<Option<V::Value>, Error<F>>
where
E: Context,
C: Mutable<Item = Operation<F, update::Unordered<K, V>>>,
I: UnorderedIndex<Value = Location<F>> + 'static,
{
self.inner.get(key, &db.any).await
}
pub async fn merkleize<E, C, I>(
self,
db: &super::db::Db<F, E, C, I, H, update::Unordered<K, V>, N>,
metadata: Option<V::Value>,
) -> Result<Arc<MerkleizedBatch<F, H::Digest, update::Unordered<K, V>, N>>, Error<F>>
where
E: Context,
C: Mutable<Item = Operation<F, update::Unordered<K, V>>>,
I: UnorderedIndex<Value = Location<F>> + 'static,
{
let Self {
inner,
grafted_parent,
bitmap_parent,
} = self;
let scan = BitmapScan::new(&bitmap_parent);
let inner = inner
.merkleize_with_floor_scan(&db.any, metadata, scan)
.await?;
compute_current_layer(inner, db, &grafted_parent, &bitmap_parent).await
}
}
impl<F, K, V, H, const N: usize> UnmerkleizedBatch<F, H, update::Ordered<K, V>, N>
where
F: Graftable,
K: Key,
V: ValueEncoding,
H: Hasher,
Operation<F, update::Ordered<K, V>>: Codec,
{
pub async fn get<E, C, I>(
&self,
key: &K,
db: &super::db::Db<F, E, C, I, H, update::Ordered<K, V>, N>,
) -> Result<Option<V::Value>, Error<F>>
where
E: Context,
C: Mutable<Item = Operation<F, update::Ordered<K, V>>>,
I: crate::index::Ordered<Value = Location<F>> + 'static,
{
self.inner.get(key, &db.any).await
}
pub async fn merkleize<E, C, I>(
self,
db: &super::db::Db<F, E, C, I, H, update::Ordered<K, V>, N>,
metadata: Option<V::Value>,
) -> Result<Arc<MerkleizedBatch<F, H::Digest, update::Ordered<K, V>, N>>, Error<F>>
where
E: Context,
C: Mutable<Item = Operation<F, update::Ordered<K, V>>>,
I: crate::index::Ordered<Value = Location<F>> + 'static,
{
let Self {
inner,
grafted_parent,
bitmap_parent,
} = self;
let scan = BitmapScan::new(&bitmap_parent);
let inner = inner
.merkleize_with_floor_scan(&db.any, metadata, scan)
.await?;
compute_current_layer(inner, db, &grafted_parent, &bitmap_parent).await
}
}
#[allow(clippy::type_complexity)]
fn build_chunk_overlay<F: Graftable, U, B: BitmapReadable<N>, const N: usize>(
base: &B,
batch_len: usize,
batch_base: u64,
diff: &BTreeMap<U::Key, DiffEntry<F, U::Value>>,
ancestor_diffs: &[Arc<BTreeMap<U::Key, DiffEntry<F, U::Value>>>],
) -> ChunkOverlay<N>
where
U: update::Update,
{
let total_bits = base.len() + batch_len as u64;
let mut overlay = ChunkOverlay::new(total_bits);
let commit_loc = batch_base + batch_len as u64 - 1;
overlay.set_bit(base, commit_loc);
overlay.clear_bit(base, batch_base - 1);
for (key, entry) in diff {
if let Some(loc) = entry.loc() {
if *loc >= batch_base && *loc < batch_base + batch_len as u64 {
overlay.set_bit(base, *loc);
}
}
let mut prev_loc = entry.base_old_loc();
for ancestor_diff in ancestor_diffs {
if let Some(ancestor_entry) = ancestor_diff.get(key) {
prev_loc = ancestor_entry.loc();
break;
}
}
if let Some(old) = prev_loc {
overlay.clear_bit(base, *old);
}
}
let parent_complete = base.complete_chunks();
let new_complete = overlay.complete_chunks();
for idx in parent_complete..new_complete {
overlay.chunk_mut(base, idx);
}
overlay
}
async fn compute_current_layer<F, E, U, C, I, H, const N: usize>(
inner: Arc<any::batch::MerkleizedBatch<F, H::Digest, U>>,
current_db: &super::db::Db<F, E, C, I, H, U, N>,
grafted_parent: &Arc<merkle::batch::MerkleizedBatch<F, H::Digest>>,
bitmap_parent: &BitmapBatch<N>,
) -> Result<Arc<MerkleizedBatch<F, H::Digest, U, N>>, Error<F>>
where
F: Graftable,
E: Context,
U: update::Update + Send + Sync,
C: Contiguous<Item = Operation<F, U>>,
I: UnorderedIndex<Value = Location<F>>,
H: Hasher,
Operation<F, U>: Codec,
{
let batch_len = inner.journal_batch.items().len();
let batch_base = *inner.new_last_commit_loc + 1 - batch_len as u64;
let overlay = build_chunk_overlay::<F, U, _, N>(
bitmap_parent,
batch_len,
batch_base,
&inner.diff,
&inner.ancestor_diffs,
);
let new_grafted_leaves = overlay.complete_chunks();
let chunks_to_update = overlay
.chunks
.iter()
.filter(|(&idx, _)| idx < new_grafted_leaves)
.map(|(&idx, &chunk)| (idx, chunk));
let ops_tree_adapter =
BatchStorageAdapter::new(&inner.journal_batch, ¤t_db.any.log.merkle);
let hasher = StandardHasher::<H>::new();
let new_leaves = compute_grafted_leaves::<F, H, N>(
&hasher,
&ops_tree_adapter,
chunks_to_update,
current_db.thread_pool.as_ref(),
)
.await?;
let grafting_height = grafting::height::<N>();
let grafted_batch = {
let mut grafted_batch = grafted_parent
.new_batch()
.with_pool(current_db.thread_pool.clone());
let old_grafted_leaves = *grafted_parent.leaves() as usize;
for &(chunk_idx, digest) in &new_leaves {
if chunk_idx < old_grafted_leaves {
grafted_batch = grafted_batch
.update_leaf_digest(Location::<F>::new(chunk_idx as u64), digest)
.expect("update_leaf_digest failed");
} else {
grafted_batch = grafted_batch.add_leaf_digest(digest);
}
}
let gh = grafting::GraftedHasher::<F, _>::new(hasher.clone(), grafting_height);
grafted_batch.merkleize(¤t_db.grafted_tree, &gh)
};
let bitmap_batch = BitmapBatch::Layer(Arc::new(BitmapBatchLayer {
parent: bitmap_parent.clone(),
overlay: Arc::new(overlay),
}));
let ops_root = inner.root();
let layered = BatchOverMem {
batch: &grafted_batch,
mem: ¤t_db.grafted_tree,
};
let grafted_storage = grafting::Storage::new(&layered, grafting_height, &ops_tree_adapter);
let partial = {
let rem = bitmap_batch.len() % BitmapBatch::<N>::CHUNK_SIZE_BITS;
if rem == 0 {
None
} else {
let idx = new_grafted_leaves;
let chunk = bitmap_batch.get_chunk(idx);
Some((chunk, rem))
}
};
let canonical_root = compute_db_root::<F, H, _, _, _, N>(
&hasher,
&bitmap_batch,
&grafted_storage,
partial,
&ops_root,
)
.await?;
Ok(Arc::new(MerkleizedBatch {
inner,
grafted: grafted_batch,
bitmap: bitmap_batch,
canonical_root,
}))
}
#[derive(Clone, Debug)]
pub(crate) enum BitmapBatch<const N: usize> {
Base(Arc<BitMap<N>>),
Layer(Arc<BitmapBatchLayer<N>>),
}
#[derive(Debug)]
pub(crate) struct BitmapBatchLayer<const N: usize> {
pub(crate) parent: BitmapBatch<N>,
pub(crate) overlay: Arc<ChunkOverlay<N>>,
}
impl<const N: usize> BitmapBatch<N> {
const CHUNK_SIZE_BITS: u64 = BitMap::<N>::CHUNK_SIZE_BITS;
}
impl<const N: usize> BitmapReadable<N> for BitmapBatch<N> {
fn complete_chunks(&self) -> usize {
(self.len() / Self::CHUNK_SIZE_BITS) as usize
}
fn get_chunk(&self, idx: usize) -> [u8; N] {
match self {
Self::Base(bm) => *bm.get_chunk(idx),
Self::Layer(layer) => {
if let Some(&chunk) = layer.overlay.get(idx) {
chunk
} else {
layer.parent.get_chunk(idx)
}
}
}
}
fn last_chunk(&self) -> ([u8; N], u64) {
let total = self.len();
if total == 0 {
return ([0u8; N], 0);
}
let rem = total % Self::CHUNK_SIZE_BITS;
let bits_in_last = if rem == 0 { Self::CHUNK_SIZE_BITS } else { rem };
let idx = if rem == 0 {
self.complete_chunks().saturating_sub(1)
} else {
self.complete_chunks()
};
(self.get_chunk(idx), bits_in_last)
}
fn pruned_chunks(&self) -> usize {
match self {
Self::Base(bm) => bm.pruned_chunks(),
Self::Layer(layer) => layer.parent.pruned_chunks(),
}
}
fn len(&self) -> u64 {
match self {
Self::Base(bm) => BitmapReadable::<N>::len(bm.as_ref()),
Self::Layer(layer) => layer.overlay.len,
}
}
}
impl<const N: usize> BitmapBatch<N> {
pub(super) fn apply_overlay(&mut self, overlay: Arc<ChunkOverlay<N>>) {
if let Self::Base(base) = self {
if let Some(bitmap) = Arc::get_mut(base) {
bitmap.extend_to(overlay.len);
for (&idx, chunk_bytes) in &overlay.chunks {
if idx >= bitmap.pruned_chunks() {
bitmap.set_chunk_by_index(idx, chunk_bytes);
}
}
return;
}
}
let parent = self.clone();
*self = Self::Layer(Arc::new(BitmapBatchLayer { parent, overlay }));
}
pub(super) fn flatten(&mut self) {
if matches!(self, Self::Base(_)) {
return;
}
let mut owned = std::mem::replace(self, Self::Base(Arc::new(BitMap::default())));
let mut overlays = Vec::new();
let base = loop {
match owned {
Self::Base(bm) => break bm,
Self::Layer(layer) => match Arc::try_unwrap(layer) {
Ok(inner) => {
overlays.push(inner.overlay);
owned = inner.parent;
}
Err(arc) => {
overlays.push(arc.overlay.clone());
owned = arc.parent.clone();
}
},
}
};
let mut bitmap = Arc::try_unwrap(base).unwrap_or_else(|arc| (*arc).clone());
for overlay in overlays.into_iter().rev() {
bitmap.extend_to(overlay.len);
for (&idx, chunk_bytes) in &overlay.chunks {
if idx >= bitmap.pruned_chunks() {
bitmap.set_chunk_by_index(idx, chunk_bytes);
}
}
}
*self = Self::Base(Arc::new(bitmap));
}
}
impl<F: Graftable, D: Digest, U: update::Update + Send + Sync, const N: usize>
MerkleizedBatch<F, D, U, N>
where
Operation<F, U>: Send + Sync,
{
pub const fn root(&self) -> D {
self.canonical_root
}
pub fn ops_root(&self) -> D {
self.inner.root()
}
}
impl<F: Graftable, D: Digest, U: update::Update + Send + Sync, const N: usize>
MerkleizedBatch<F, D, U, N>
where
Operation<F, U>: Codec,
{
pub fn new_batch<H>(self: &Arc<Self>) -> UnmerkleizedBatch<F, H, U, N>
where
H: Hasher<Digest = D>,
{
UnmerkleizedBatch::new(
self.inner.new_batch::<H>(),
Arc::clone(&self.grafted),
self.bitmap.clone(),
)
}
pub async fn get<E, C, I, H>(
&self,
key: &U::Key,
db: &super::db::Db<F, E, C, I, H, U, N>,
) -> Result<Option<U::Value>, Error<F>>
where
E: Context,
C: Contiguous<Item = Operation<F, U>>,
I: UnorderedIndex<Value = Location<F>> + 'static,
H: Hasher<Digest = D>,
{
self.inner.get(key, &db.any).await
}
}
impl<F, E, C, I, H, U, const N: usize> super::db::Db<F, E, C, I, H, U, N>
where
F: Graftable,
E: Context,
U: update::Update + Send + Sync,
C: Contiguous<Item = Operation<F, U>>,
I: UnorderedIndex<Value = Location<F>>,
H: Hasher,
Operation<F, U>: Codec,
{
pub fn to_batch(&self) -> Arc<MerkleizedBatch<F, H::Digest, U, N>> {
let grafted = self.grafted_snapshot();
Arc::new(MerkleizedBatch {
inner: self.any.to_batch(),
grafted,
bitmap: self.status.clone(),
canonical_root: self.root,
})
}
}
#[cfg(any(test, feature = "test-traits"))]
mod trait_impls {
use super::*;
use crate::{
journal::contiguous::Mutable,
qmdb::any::traits::{
BatchableDb, MerkleizedBatch as MerkleizedBatchTrait,
UnmerkleizedBatch as UnmerkleizedBatchTrait,
},
Persistable,
};
use std::future::Future;
type CurrentDb<F, E, C, I, H, U, const N: usize> =
crate::qmdb::current::db::Db<F, E, C, I, H, U, N>;
impl<F, K, V, H, E, C, I, const N: usize>
UnmerkleizedBatchTrait<CurrentDb<F, E, C, I, H, update::Unordered<K, V>, N>>
for UnmerkleizedBatch<F, H, update::Unordered<K, V>, N>
where
F: Graftable,
K: Key,
V: ValueEncoding + 'static,
H: Hasher,
E: Context,
C: Mutable<Item = Operation<F, update::Unordered<K, V>>>
+ Persistable<Error = crate::journal::Error>,
I: UnorderedIndex<Value = Location<F>> + 'static,
Operation<F, update::Unordered<K, V>>: Codec,
{
type Family = F;
type K = K;
type V = V::Value;
type Metadata = V::Value;
type Merkleized = Arc<MerkleizedBatch<F, H::Digest, update::Unordered<K, V>, N>>;
fn write(self, key: K, value: Option<V::Value>) -> Self {
Self::write(self, key, value)
}
fn merkleize(
self,
db: &CurrentDb<F, E, C, I, H, update::Unordered<K, V>, N>,
metadata: Option<V::Value>,
) -> impl Future<Output = Result<Self::Merkleized, crate::qmdb::Error<F>>> {
self.merkleize(db, metadata)
}
}
impl<F, K, V, H, E, C, I, const N: usize>
UnmerkleizedBatchTrait<CurrentDb<F, E, C, I, H, update::Ordered<K, V>, N>>
for UnmerkleizedBatch<F, H, update::Ordered<K, V>, N>
where
F: Graftable,
K: Key,
V: ValueEncoding + 'static,
H: Hasher,
E: Context,
C: Mutable<Item = Operation<F, update::Ordered<K, V>>>
+ Persistable<Error = crate::journal::Error>,
I: crate::index::Ordered<Value = Location<F>> + 'static,
Operation<F, update::Ordered<K, V>>: Codec,
{
type Family = F;
type K = K;
type V = V::Value;
type Metadata = V::Value;
type Merkleized = Arc<MerkleizedBatch<F, H::Digest, update::Ordered<K, V>, N>>;
fn write(self, key: K, value: Option<V::Value>) -> Self {
Self::write(self, key, value)
}
fn merkleize(
self,
db: &CurrentDb<F, E, C, I, H, update::Ordered<K, V>, N>,
metadata: Option<V::Value>,
) -> impl Future<Output = Result<Self::Merkleized, crate::qmdb::Error<F>>> {
self.merkleize(db, metadata)
}
}
impl<F: Graftable, D: Digest, U: update::Update + Send + Sync + 'static, const N: usize>
MerkleizedBatchTrait for Arc<MerkleizedBatch<F, D, U, N>>
where
Operation<F, U>: Codec,
{
type Digest = D;
fn root(&self) -> D {
MerkleizedBatch::root(self)
}
}
impl<F, E, K, V, C, I, H, const N: usize> BatchableDb
for CurrentDb<F, E, C, I, H, update::Unordered<K, V>, N>
where
F: Graftable,
E: Context,
K: Key,
V: ValueEncoding + 'static,
C: Mutable<Item = Operation<F, update::Unordered<K, V>>>
+ Persistable<Error = crate::journal::Error>,
I: UnorderedIndex<Value = Location<F>> + 'static,
H: Hasher,
Operation<F, update::Unordered<K, V>>: Codec,
{
type Family = F;
type K = K;
type V = V::Value;
type Merkleized = Arc<MerkleizedBatch<F, H::Digest, update::Unordered<K, V>, N>>;
type Batch = UnmerkleizedBatch<F, H, update::Unordered<K, V>, N>;
fn new_batch(&self) -> Self::Batch {
self.new_batch()
}
fn apply_batch(
&mut self,
batch: Self::Merkleized,
) -> impl Future<Output = Result<core::ops::Range<Location<F>>, crate::qmdb::Error<F>>>
{
self.apply_batch(batch)
}
}
impl<F, E, K, V, C, I, H, const N: usize> BatchableDb
for CurrentDb<F, E, C, I, H, update::Ordered<K, V>, N>
where
F: Graftable,
E: Context,
K: Key,
V: ValueEncoding + 'static,
C: Mutable<Item = Operation<F, update::Ordered<K, V>>>
+ Persistable<Error = crate::journal::Error>,
I: crate::index::Ordered<Value = Location<F>> + 'static,
H: Hasher,
Operation<F, update::Ordered<K, V>>: Codec,
{
type Family = F;
type K = K;
type V = V::Value;
type Merkleized = Arc<MerkleizedBatch<F, H::Digest, update::Ordered<K, V>, N>>;
type Batch = UnmerkleizedBatch<F, H, update::Ordered<K, V>, N>;
fn new_batch(&self) -> Self::Batch {
self.new_batch()
}
fn apply_batch(
&mut self,
batch: Self::Merkleized,
) -> impl Future<Output = Result<core::ops::Range<Location<F>>, crate::qmdb::Error<F>>>
{
self.apply_batch(batch)
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::mmr;
use commonware_utils::bitmap::Prunable as BitMap;
const N: usize = 4;
type Bm = BitMap<N>;
type Location = mmr::Location;
fn make_bitmap(bits: &[bool]) -> Bm {
let mut bm = Bm::new();
for &b in bits {
bm.push(b);
}
bm
}
#[test]
fn chunk_overlay_pushes() {
use crate::qmdb::any::value::FixedEncoding;
use commonware_utils::sequence::FixedBytes;
type K = FixedBytes<4>;
type V = FixedEncoding<u64>;
type U = crate::qmdb::any::operation::update::Unordered<K, V>;
let key1 = FixedBytes::from([1, 0, 0, 0]);
let key2 = FixedBytes::from([2, 0, 0, 0]);
let base = make_bitmap(&[true; 4]);
let mut diff = BTreeMap::new();
diff.insert(
key1,
DiffEntry::Active {
value: 100u64,
loc: Location::new(4), base_old_loc: None,
},
);
diff.insert(
key2,
DiffEntry::Active {
value: 200u64,
loc: Location::new(99), base_old_loc: None,
},
);
let overlay = build_chunk_overlay::<mmr::Family, U, _, N>(&base, 4, 4, &diff, &[]);
let c0 = overlay.get(0).expect("chunk 0 should be dirty");
assert_ne!(c0[0] & (1 << 4), 0); assert_eq!(c0[0] & (1 << 5), 0); assert_eq!(c0[0] & (1 << 6), 0); assert_ne!(c0[0] & (1 << 7), 0); assert_eq!(c0[0] & (1 << 3), 0); }
#[test]
fn chunk_overlay_clears() {
use crate::qmdb::any::value::FixedEncoding;
use commonware_utils::sequence::FixedBytes;
type K = FixedBytes<4>;
type U = crate::qmdb::any::operation::update::Unordered<K, FixedEncoding<u64>>;
let key1 = FixedBytes::from([1, 0, 0, 0]);
let key2 = FixedBytes::from([2, 0, 0, 0]);
let key3 = FixedBytes::from([3, 0, 0, 0]);
let base = make_bitmap(&[true; 64]);
let mut diff: BTreeMap<K, DiffEntry<mmr::Family, u64>> = BTreeMap::new();
diff.insert(
key1,
DiffEntry::Active {
value: 100,
loc: Location::new(70),
base_old_loc: Some(Location::new(5)),
},
);
diff.insert(
key2,
DiffEntry::Deleted {
base_old_loc: Some(Location::new(10)),
},
);
diff.insert(
key3,
DiffEntry::Active {
value: 300,
loc: Location::new(71),
base_old_loc: None,
},
);
let overlay = build_chunk_overlay::<mmr::Family, U, _, N>(&base, 8, 64, &diff, &[]);
let c0 = overlay.get(0).expect("chunk 0 should be dirty");
assert_eq!(c0[0] & (1 << 5), 0); assert_eq!(c0[1] & (1 << 2), 0);
assert_eq!(c0[0] & (1 << 4), 1 << 4); assert_eq!(c0[1] & (1 << 3), 1 << 3); }
#[test]
fn chunk_overlay_preserves_partial_parent_chunk() {
use crate::qmdb::any::value::FixedEncoding;
use commonware_utils::sequence::FixedBytes;
type K = FixedBytes<4>;
type U = crate::qmdb::any::operation::update::Unordered<K, FixedEncoding<u64>>;
let base = make_bitmap(&[true; 20]);
assert_eq!(base.complete_chunks(), 0);
let key1 = FixedBytes::from([1, 0, 0, 0]);
let mut diff = BTreeMap::new();
diff.insert(
key1,
DiffEntry::Active {
value: 42u64,
loc: Location::new(35),
base_old_loc: None,
},
);
let overlay = build_chunk_overlay::<mmr::Family, U, _, N>(&base, 20, 20, &diff, &[]);
let c0 = overlay.get(0).expect("chunk 0 should be in overlay");
assert_eq!(c0[0], 0xFF);
assert_eq!(c0[1], 0xFF);
assert_eq!(c0[2], 0x07);
}
use crate::qmdb::any::batch::{FloorScan, SequentialScan};
#[test]
fn sequential_scan_returns_floor_when_below_tip() {
let mut scan = SequentialScan;
assert_eq!(
scan.next_candidate(Location::new(5), 10),
Some(Location::new(5))
);
}
#[test]
fn sequential_scan_returns_none_at_tip() {
let mut scan = SequentialScan;
assert_eq!(scan.next_candidate(Location::new(10), 10), None);
assert_eq!(scan.next_candidate(Location::new(11), 10), None);
}
#[test]
fn bitmap_scan_all_active() {
let bm = make_bitmap(&[true; 8]);
let mut scan = BitmapScan::<Bm, N>::new(&bm);
for i in 0..8 {
assert_eq!(
scan.next_candidate(Location::new(i), 8),
Some(Location::new(i))
);
}
assert_eq!(scan.next_candidate(Location::new(8), 8), None);
}
#[test]
fn bitmap_scan_all_inactive() {
let bm = make_bitmap(&[false; 8]);
let mut scan = BitmapScan::<Bm, N>::new(&bm);
assert_eq!(scan.next_candidate(Location::new(0), 8), None);
}
#[test]
fn bitmap_scan_skips_inactive() {
let bm = make_bitmap(&[false, false, true, false, true]);
let mut scan = BitmapScan::<Bm, N>::new(&bm);
assert_eq!(
scan.next_candidate(Location::new(0), 5),
Some(Location::new(2))
);
assert_eq!(
scan.next_candidate(Location::new(3), 5),
Some(Location::new(4))
);
assert_eq!(scan.next_candidate(Location::new(5), 5), None);
}
#[test]
fn bitmap_scan_beyond_bitmap_len_returns_candidate() {
let bm = make_bitmap(&[false; 4]);
let mut scan = BitmapScan::<Bm, N>::new(&bm);
assert_eq!(
scan.next_candidate(Location::new(0), 8),
Some(Location::new(4))
);
assert_eq!(
scan.next_candidate(Location::new(6), 8),
Some(Location::new(6))
);
}
#[test]
fn bitmap_scan_respects_tip() {
let bm = make_bitmap(&[false, false, false, true]);
let mut scan = BitmapScan::<Bm, N>::new(&bm);
assert_eq!(scan.next_candidate(Location::new(0), 3), None);
assert_eq!(
scan.next_candidate(Location::new(0), 4),
Some(Location::new(3))
);
}
#[test]
fn bitmap_scan_floor_at_tip() {
let bm = make_bitmap(&[true; 4]);
let mut scan = BitmapScan::<Bm, N>::new(&bm);
assert_eq!(scan.next_candidate(Location::new(4), 4), None);
}
#[test]
fn bitmap_scan_empty_bitmap() {
let bm = Bm::new();
let mut scan = BitmapScan::<Bm, N>::new(&bm);
assert_eq!(
scan.next_candidate(Location::new(0), 5),
Some(Location::new(0))
);
assert_eq!(scan.next_candidate(Location::new(0), 0), None);
}
#[test]
fn test_apply_overlay() {
let base = make_bitmap(&[true; 8]);
let mut batch = BitmapBatch::Base(Arc::new(base));
let mut overlay = ChunkOverlay::new(12);
let mut c0 = [0u8; N];
c0[0] = 0b1111_0111; c0[1] = 0b0000_0100; overlay.chunks.insert(0, c0);
batch.apply_overlay(Arc::new(overlay));
assert!(matches!(batch, BitmapBatch::Base(_)));
assert_eq!(batch.len(), 12);
assert_eq!(batch.get_chunk(0)[0] & (1 << 3), 0);
assert_ne!(batch.get_chunk(0)[1] & (1 << 2), 0);
let BitmapBatch::Base(ref base_arc) = batch else {
panic!("expected Base");
};
let _shared = Arc::clone(base_arc);
let overlay2 = ChunkOverlay::new(12);
batch.apply_overlay(Arc::new(overlay2));
assert!(matches!(batch, BitmapBatch::Layer(_)));
}
}