use crate::{
index::Unordered as UnorderedIndex,
journal::{
contiguous::{Contiguous, Mutable, Reader},
Error as JournalError,
},
merkle::{
self, batch::MIN_TO_PARALLELIZE, hasher::Standard as StandardHasher, mem::Mem,
storage::Storage as MerkleStorage, Location, Position, Readable,
},
metadata::{Config as MConfig, Metadata},
qmdb::{
any::{
self,
operation::{update::Update, Operation},
},
current::{
batch::BitmapBatch,
grafting,
proof::{OperationProof, RangeProof},
},
operation::Operation as _,
Error,
},
Context, Persistable,
};
use commonware_codec::{Codec, CodecShared, DecodeExt};
use commonware_cryptography::{Digest, DigestOf, Hasher};
use commonware_parallel::ThreadPool;
use commonware_utils::{
bitmap::{Prunable as BitMap, Readable as BitmapReadable},
sequence::prefixed_u64::U64,
sync::AsyncMutex,
};
use core::{num::NonZeroU64, ops::Range};
use futures::future::try_join_all;
use rayon::prelude::*;
use std::{collections::BTreeMap, sync::Arc};
use tracing::{error, warn};
const NODE_PREFIX: u8 = 0;
const PRUNED_CHUNKS_PREFIX: u8 = 1;
pub struct Db<
F: merkle::Graftable,
E: Context,
C: Contiguous<Item: CodecShared>,
I: UnorderedIndex<Value = Location<F>>,
H: Hasher,
U: Send + Sync,
const N: usize,
> {
pub(super) any: any::db::Db<F, E, C, I, H, U>,
pub(super) status: BitmapBatch<N>,
pub(super) grafted_tree: Mem<F, H::Digest>,
pub(super) metadata: AsyncMutex<Metadata<E, U64, Vec<u8>>>,
pub(super) thread_pool: Option<ThreadPool>,
pub(super) root: DigestOf<H>,
}
impl<F, E, C, I, H, U, const N: usize> Db<F, E, C, I, H, U, N>
where
F: merkle::Graftable,
E: Context,
U: Update,
C: Contiguous<Item = Operation<F, U>>,
I: UnorderedIndex<Value = Location<F>>,
H: Hasher,
Operation<F, U>: Codec,
{
pub const fn inactivity_floor_loc(&self) -> Location<F> {
self.any.inactivity_floor_loc()
}
pub const fn is_empty(&self) -> bool {
self.any.is_empty()
}
pub async fn get_metadata(&self) -> Result<Option<U::Value>, Error<F>> {
self.any.get_metadata().await
}
pub async fn bounds(&self) -> std::ops::Range<Location<F>> {
self.any.bounds().await
}
pub fn verify_range_proof(
hasher: &mut H,
proof: &RangeProof<F, H::Digest>,
start_loc: Location<F>,
ops: &[Operation<F, U>],
chunks: &[[u8; N]],
root: &H::Digest,
) -> bool {
proof.verify(hasher, start_loc, ops, chunks, root)
}
}
impl<F, E, U, C, I, H, const N: usize> Db<F, E, C, I, H, U, N>
where
F: merkle::Graftable,
E: Context,
U: Update,
C: Contiguous<Item = Operation<F, U>>,
I: UnorderedIndex<Value = Location<F>>,
H: Hasher,
Operation<F, U>: Codec,
{
fn grafted_storage(&self) -> impl MerkleStorage<F, Digest = H::Digest> + '_ {
grafting::Storage::new(
&self.grafted_tree,
grafting::height::<N>(),
&self.any.log.merkle,
)
}
pub const fn root(&self) -> H::Digest {
self.root
}
pub fn ops_root(&self) -> H::Digest {
self.any.log.root()
}
pub(super) fn grafted_snapshot(&self) -> Arc<merkle::batch::MerkleizedBatch<F, H::Digest>> {
merkle::batch::MerkleizedBatch::from_mem(&self.grafted_tree)
}
pub fn new_batch(&self) -> super::batch::UnmerkleizedBatch<F, H, U, N> {
super::batch::UnmerkleizedBatch::new(
self.any.new_batch(),
self.grafted_snapshot(),
self.status.clone(),
)
}
pub(super) async fn operation_proof(
&self,
hasher: &mut H,
loc: Location<F>,
) -> Result<OperationProof<F, H::Digest, N>, Error<F>> {
let storage = self.grafted_storage();
let ops_root = self.any.log.root();
OperationProof::new(hasher, &self.status, &storage, loc, ops_root).await
}
pub async fn range_proof(
&self,
hasher: &mut H,
start_loc: Location<F>,
max_ops: NonZeroU64,
) -> Result<(RangeProof<F, H::Digest>, Vec<Operation<F, U>>, Vec<[u8; N]>), Error<F>> {
let storage = self.grafted_storage();
let ops_root = self.any.log.root();
RangeProof::new_with_ops(
hasher,
&self.status,
&storage,
&self.any.log,
start_loc,
max_ops,
ops_root,
)
.await
}
}
impl<F, E, U, C, I, H, const N: usize> Db<F, E, C, I, H, U, N>
where
F: merkle::Graftable,
E: Context,
U: Update,
C: Mutable<Item = Operation<F, U>>,
I: UnorderedIndex<Value = Location<F>>,
H: Hasher,
Operation<F, U>: Codec,
{
pub async fn ops_historical_proof(
&self,
historical_size: Location<F>,
start_loc: Location<F>,
max_ops: NonZeroU64,
) -> Result<(merkle::Proof<F, H::Digest>, Vec<Operation<F, U>>), Error<F>> {
self.any
.historical_proof(historical_size, start_loc, max_ops)
.await
}
pub async fn pinned_nodes_at(&self, loc: Location<F>) -> Result<Vec<H::Digest>, Error<F>> {
self.any.pinned_nodes_at(loc).await
}
pub fn flatten(&mut self) {
self.status.flatten();
}
pub async fn prune(&mut self, prune_loc: Location<F>) -> Result<(), Error<F>> {
self.flatten();
let BitmapBatch::<N>::Base(base) = &mut self.status else {
unreachable!("flatten() guarantees Base");
};
Arc::make_mut(base).prune_to_bit(*self.any.inactivity_floor_loc);
let pruned_chunks = self.status.pruned_chunks() as u64;
if pruned_chunks > 0 {
let prune_loc_grafted = Location::<F>::new(pruned_chunks);
let bounds_start = self.grafted_tree.bounds().start;
let grafted_prune_pos =
Position::try_from(prune_loc_grafted).expect("valid leaf count");
if prune_loc_grafted > bounds_start {
let root = *self.grafted_tree.root();
let size = self.grafted_tree.size();
let mut pinned = BTreeMap::new();
for pos in F::nodes_to_pin(prune_loc_grafted) {
pinned.insert(
pos,
self.grafted_tree
.get_node(pos)
.expect("pinned peak must exist"),
);
}
let mut retained = Vec::with_capacity((*size - *grafted_prune_pos) as usize);
for p in *grafted_prune_pos..*size {
retained.push(
self.grafted_tree
.get_node(Position::new(p))
.expect("retained node must exist"),
);
}
self.grafted_tree =
Mem::from_pruned_with_retained(root, grafted_prune_pos, pinned, retained);
}
}
self.sync_metadata().await?;
self.any.prune(prune_loc).await
}
pub async fn rewind(&mut self, size: Location<F>) -> Result<(), Error<F>> {
self.flatten();
let rewind_size = *size;
let current_size = *self.any.last_commit_loc + 1;
if rewind_size == current_size {
return Ok(());
}
if rewind_size == 0 || rewind_size > current_size {
return Err(Error::Journal(JournalError::InvalidRewind(rewind_size)));
}
let pruned_chunks = self.status.pruned_chunks();
let pruned_bits = (pruned_chunks as u64)
.checked_mul(BitMap::<N>::CHUNK_SIZE_BITS)
.ok_or_else(|| Error::DataCorrupted("pruned ops leaves overflow"))?;
if rewind_size < pruned_bits {
return Err(Error::Journal(JournalError::ItemPruned(rewind_size - 1)));
}
{
let reader = self.any.log.reader().await;
let rewind_last_loc = Location::<F>::new(rewind_size - 1);
let rewind_last_op = reader.read(*rewind_last_loc).await?;
let Some(rewind_floor) = rewind_last_op.has_floor() else {
return Err(Error::<F>::UnexpectedData(rewind_last_loc));
};
if *rewind_floor < pruned_bits {
return Err(Error::<F>::Journal(JournalError::ItemPruned(*rewind_floor)));
}
}
let pinned_nodes = if pruned_chunks > 0 {
let grafted_leaves = Location::<F>::new(pruned_chunks as u64);
let mut pinned_nodes = Vec::new();
for pos in F::nodes_to_pin(grafted_leaves) {
let digest = self
.grafted_tree
.get_node(pos)
.ok_or(Error::<F>::DataCorrupted("missing grafted pinned node"))?;
pinned_nodes.push(digest);
}
pinned_nodes
} else {
Vec::new()
};
let restored_locs = self.any.rewind(size).await?;
{
let BitmapBatch::<N>::Base(base) = &mut self.status else {
unreachable!("flatten() guarantees Base");
};
let status: &mut BitMap<N> = Arc::get_mut(base).expect("flatten ensures sole owner");
status.truncate(rewind_size);
for loc in &restored_locs {
status.set_bit(**loc, true);
}
status.set_bit(rewind_size - 1, true);
}
let BitmapBatch::Base(status) = &self.status else {
unreachable!("flatten() guarantees Base");
};
let status = status.as_ref();
let hasher = StandardHasher::<H>::new();
let grafted_tree = build_grafted_tree::<F, H, N>(
&hasher,
status,
&pinned_nodes,
&self.any.log.merkle,
self.thread_pool.as_ref(),
)
.await?;
let storage =
grafting::Storage::new(&grafted_tree, grafting::height::<N>(), &self.any.log.merkle);
let partial_chunk = partial_chunk(status);
let ops_root = self.any.log.root();
let root = compute_db_root(&hasher, status, &storage, partial_chunk, &ops_root).await?;
self.grafted_tree = grafted_tree;
self.root = root;
Ok(())
}
pub(crate) async fn sync_metadata(&self) -> Result<(), Error<F>> {
let mut metadata = self.metadata.lock().await;
metadata.clear();
let key = U64::new(PRUNED_CHUNKS_PREFIX, 0);
metadata.put(
key,
(self.status.pruned_chunks() as u64).to_be_bytes().to_vec(),
);
let pruned_chunks = Location::<F>::new(self.status.pruned_chunks() as u64);
for (i, grafted_pos) in F::nodes_to_pin(pruned_chunks).enumerate() {
let digest = self
.grafted_tree
.get_node(grafted_pos)
.ok_or(Error::<F>::DataCorrupted("missing grafted pinned node"))?;
let key = U64::new(NODE_PREFIX, i as u64);
metadata.put(key, digest.to_vec());
}
metadata.sync().await?;
Ok(())
}
}
impl<F, E, U, C, I, H, const N: usize> Db<F, E, C, I, H, U, N>
where
F: merkle::Graftable,
E: Context,
U: Update,
C: Mutable<Item = Operation<F, U>> + Persistable<Error = JournalError>,
I: UnorderedIndex<Value = Location<F>>,
H: Hasher,
Operation<F, U>: Codec,
{
pub async fn commit(&self) -> Result<(), Error<F>> {
self.any.commit().await
}
pub async fn sync(&self) -> Result<(), Error<F>> {
self.any.sync().await?;
self.sync_metadata().await
}
pub async fn destroy(self) -> Result<(), Error<F>> {
self.metadata.into_inner().destroy().await?;
self.any.destroy().await
}
}
impl<F, E, U, C, I, H, const N: usize> Db<F, E, C, I, H, U, N>
where
F: merkle::Graftable,
E: Context,
U: Update + 'static,
C: Mutable<Item = Operation<F, U>> + Persistable<Error = JournalError>,
I: UnorderedIndex<Value = Location<F>>,
H: Hasher,
Operation<F, U>: Codec,
{
pub async fn apply_batch(
&mut self,
batch: Arc<super::batch::MerkleizedBatch<F, H::Digest, U, N>>,
) -> Result<Range<Location<F>>, Error<F>> {
let db_size = *self.any.last_commit_loc + 1;
let range = self.any.apply_batch(Arc::clone(&batch.inner)).await?;
{
let mut overlays = Vec::new();
let mut current = &batch.bitmap;
while let super::batch::BitmapBatch::Layer(layer) = current {
if layer.overlay.len <= db_size {
break;
}
overlays.push(Arc::clone(&layer.overlay));
current = &layer.parent;
}
for overlay in overlays.into_iter().rev() {
self.status.apply_overlay(overlay);
}
}
self.grafted_tree.apply_batch(&batch.grafted)?;
self.root = batch.canonical_root;
Ok(range)
}
}
impl<F, E, U, C, I, H, const N: usize> Persistable for Db<F, E, C, I, H, U, N>
where
F: merkle::Graftable,
E: Context,
U: Update,
C: Mutable<Item = Operation<F, U>> + Persistable<Error = JournalError>,
I: UnorderedIndex<Value = Location<F>>,
H: Hasher,
Operation<F, U>: Codec,
{
type Error = Error<F>;
async fn commit(&self) -> Result<(), Error<F>> {
Self::commit(self).await
}
async fn sync(&self) -> Result<(), Error<F>> {
Self::sync(self).await
}
async fn destroy(self) -> Result<(), Error<F>> {
self.destroy().await
}
}
pub(super) fn partial_chunk<B: BitmapReadable<N>, const N: usize>(
bitmap: &B,
) -> Option<([u8; N], u64)> {
let (last_chunk, next_bit) = bitmap.last_chunk();
if next_bit == BitMap::<N>::CHUNK_SIZE_BITS {
None
} else {
Some((last_chunk, next_bit))
}
}
pub(super) fn combine_roots<H: Hasher>(
hasher: &StandardHasher<H>,
ops_root: &H::Digest,
grafted_root: &H::Digest,
partial: Option<(u64, &H::Digest)>,
) -> H::Digest {
match partial {
Some((next_bit, last_chunk_digest)) => {
let next_bit = next_bit.to_be_bytes();
hasher.hash([
ops_root.as_ref(),
grafted_root.as_ref(),
next_bit.as_slice(),
last_chunk_digest.as_ref(),
])
}
None => hasher.hash([ops_root.as_ref(), grafted_root.as_ref()]),
}
}
pub(super) async fn compute_db_root<
F: merkle::Graftable,
H: Hasher,
B: BitmapReadable<N>,
G: Readable<Family = F, Digest = H::Digest, Error = merkle::Error<F>>,
S: MerkleStorage<F, Digest = H::Digest>,
const N: usize,
>(
hasher: &StandardHasher<H>,
status: &B,
storage: &grafting::Storage<'_, F, H::Digest, G, S>,
partial_chunk: Option<([u8; N], u64)>,
ops_root: &H::Digest,
) -> Result<H::Digest, Error<F>> {
let grafted_root = compute_grafted_root(hasher, status, storage).await?;
let partial = partial_chunk.map(|(chunk, next_bit)| {
let digest = hasher.digest(&chunk);
(next_bit, digest)
});
Ok(combine_roots(
hasher,
ops_root,
&grafted_root,
partial.as_ref().map(|(nb, d)| (*nb, d)),
))
}
pub(super) async fn compute_grafted_root<
F: merkle::Graftable,
H: Hasher,
B: BitmapReadable<N>,
G: Readable<Family = F, Digest = H::Digest, Error = merkle::Error<F>>,
S: MerkleStorage<F, Digest = H::Digest>,
const N: usize,
>(
hasher: &StandardHasher<H>,
status: &B,
storage: &grafting::Storage<'_, F, H::Digest, G, S>,
) -> Result<H::Digest, Error<F>> {
let size = storage.size().await;
let leaves = Location::try_from(size)?;
let mut peaks: Vec<H::Digest> = Vec::new();
for (peak_pos, _) in F::peaks(size) {
let digest = storage
.get_node(peak_pos)
.await?
.ok_or(merkle::Error::<F>::MissingNode(peak_pos))?;
peaks.push(digest);
}
let grafting_height = grafting::height::<N>();
let complete_chunks = status.complete_chunks() as u64;
let pruned_chunks = status.pruned_chunks() as u64;
Ok(grafting::grafted_root(
hasher,
leaves,
&peaks,
grafting_height,
|chunk_idx| {
if chunk_idx < complete_chunks {
if chunk_idx < pruned_chunks {
Some([0u8; N])
} else {
Some(status.get_chunk(chunk_idx as usize))
}
} else {
None
}
},
))
}
pub(super) async fn compute_grafted_leaves<F: merkle::Graftable, H: Hasher, const N: usize>(
hasher: &StandardHasher<H>,
ops_tree: &impl MerkleStorage<F, Digest = H::Digest>,
chunks: impl IntoIterator<Item = (usize, [u8; N])>,
pool: Option<&ThreadPool>,
) -> Result<Vec<(usize, H::Digest)>, Error<F>> {
let grafting_height = grafting::height::<N>();
let ops_size = ops_tree.size().await;
let inputs = try_join_all(chunks.into_iter().map(|(chunk_idx, chunk)| async move {
let mut chunk_ops_digest: Option<H::Digest> = None;
for (pos, _) in F::chunk_peaks(ops_size, chunk_idx as u64, grafting_height) {
let digest = ops_tree
.get_node(pos)
.await?
.ok_or(merkle::Error::<F>::MissingGraftedLeaf(pos))?;
chunk_ops_digest = Some(
chunk_ops_digest.map_or(digest, |acc| hasher.hash([acc.as_ref(), digest.as_ref()])),
);
}
let chunk_ops_digest =
chunk_ops_digest.expect("chunk must have at least one covering peak");
Ok::<_, Error<F>>((chunk_idx, chunk_ops_digest, chunk))
}))
.await?;
let zero_chunk = [0u8; N];
let graft =
|h: &StandardHasher<H>, chunk_idx: usize, chunk_ops_digest: H::Digest, chunk: [u8; N]| {
if chunk == zero_chunk {
(chunk_idx, chunk_ops_digest)
} else {
(
chunk_idx,
h.hash([chunk.as_slice(), chunk_ops_digest.as_ref()]),
)
}
};
Ok(match pool.filter(|_| inputs.len() >= MIN_TO_PARALLELIZE) {
Some(pool) => pool.install(|| {
inputs
.into_par_iter()
.map_init(
|| hasher.clone(),
|h, (chunk_idx, chunk_ops_digest, chunk)| {
graft(h, chunk_idx, chunk_ops_digest, chunk)
},
)
.collect()
}),
None => inputs
.into_iter()
.map(|(chunk_idx, chunk_ops_digest, chunk)| {
graft(hasher, chunk_idx, chunk_ops_digest, chunk)
})
.collect(),
})
}
pub(super) async fn build_grafted_tree<F: merkle::Graftable, H: Hasher, const N: usize>(
hasher: &StandardHasher<H>,
bitmap: &BitMap<N>,
pinned_nodes: &[H::Digest],
ops_tree: &impl MerkleStorage<F, Digest = H::Digest>,
pool: Option<&ThreadPool>,
) -> Result<Mem<F, H::Digest>, Error<F>> {
let grafting_height = grafting::height::<N>();
let pruned_chunks = bitmap.pruned_chunks();
let complete_chunks = bitmap.complete_chunks();
let leaves = compute_grafted_leaves::<F, H, N>(
hasher,
ops_tree,
(pruned_chunks..complete_chunks).map(|chunk_idx| (chunk_idx, *bitmap.get_chunk(chunk_idx))),
pool,
)
.await?;
let grafted_hasher = grafting::GraftedHasher::<F, _>::new(hasher.clone(), grafting_height);
let mut grafted_tree = if pruned_chunks > 0 {
let grafted_pruning_boundary = Location::<F>::new(pruned_chunks as u64);
Mem::from_components(
&grafted_hasher,
Vec::new(),
grafted_pruning_boundary,
pinned_nodes.to_vec(),
)
.map_err(|_| Error::<F>::DataCorrupted("grafted tree rebuild failed"))?
} else {
Mem::new(&grafted_hasher)
};
if !leaves.is_empty() {
let batch = {
let mut batch = grafted_tree.new_batch().with_pool(pool.cloned());
for &(_ops_pos, digest) in &leaves {
batch = batch.add_leaf_digest(digest);
}
batch.merkleize(&grafted_tree, &grafted_hasher)
};
grafted_tree.apply_batch(&batch)?;
}
Ok(grafted_tree)
}
pub(super) async fn init_metadata<F: merkle::Graftable, E: Context, D: Digest>(
context: E,
partition: &str,
) -> Result<(Metadata<E, U64, Vec<u8>>, usize, Vec<D>), Error<F>> {
let metadata_cfg = MConfig {
partition: partition.into(),
codec_config: ((0..).into(), ()),
};
let metadata =
Metadata::<_, U64, Vec<u8>>::init(context.with_label("metadata"), metadata_cfg).await?;
let key = U64::new(PRUNED_CHUNKS_PREFIX, 0);
let pruned_chunks = match metadata.get(&key) {
Some(bytes) => u64::from_be_bytes(bytes.as_slice().try_into().map_err(|_| {
error!("pruned chunks value not a valid u64");
Error::<F>::DataCorrupted("pruned chunks value not a valid u64")
})?),
None => {
warn!("bitmap metadata does not contain pruned chunks, initializing as empty");
0
}
} as usize;
let pinned_nodes = if pruned_chunks > 0 {
let pruned_loc = Location::<F>::new(pruned_chunks as u64);
if !pruned_loc.is_valid() {
return Err(Error::DataCorrupted("pruned chunks exceeds MAX_LEAVES"));
}
let mut pinned = Vec::new();
for (index, _pos) in F::nodes_to_pin(pruned_loc).enumerate() {
let metadata_key = U64::new(NODE_PREFIX, index as u64);
let Some(bytes) = metadata.get(&metadata_key) else {
return Err(Error::DataCorrupted(
"missing pinned node in grafted tree metadata",
));
};
let digest = D::decode(bytes.as_ref())
.map_err(|_| Error::<F>::DataCorrupted("invalid pinned node digest"))?;
pinned.push(digest);
}
pinned
} else {
Vec::new()
};
Ok((metadata, pruned_chunks, pinned_nodes))
}
#[cfg(test)]
mod tests {
use super::*;
use commonware_codec::FixedSize;
use commonware_cryptography::{sha256, Sha256};
use commonware_utils::bitmap::Prunable as PrunableBitMap;
const N: usize = sha256::Digest::SIZE;
#[test]
fn partial_chunk_single_bit() {
let mut bm = PrunableBitMap::<N>::new();
bm.push(true);
let result = partial_chunk::<PrunableBitMap<N>, N>(&bm);
assert!(result.is_some());
let (chunk, next_bit) = result.unwrap();
assert_eq!(next_bit, 1);
assert_eq!(chunk[0], 1); }
#[test]
fn partial_chunk_aligned() {
let mut bm = PrunableBitMap::<N>::new();
for _ in 0..PrunableBitMap::<N>::CHUNK_SIZE_BITS {
bm.push(true);
}
let result = partial_chunk::<PrunableBitMap<N>, N>(&bm);
assert!(result.is_none());
}
#[test]
fn partial_chunk_partial() {
let mut bm = PrunableBitMap::<N>::new();
for _ in 0..(PrunableBitMap::<N>::CHUNK_SIZE_BITS + 5) {
bm.push(true);
}
let result = partial_chunk::<PrunableBitMap<N>, N>(&bm);
assert!(result.is_some());
let (_chunk, next_bit) = result.unwrap();
assert_eq!(next_bit, 5);
}
#[test]
fn combine_roots_deterministic() {
let h1 = StandardHasher::<Sha256>::new();
let h2 = StandardHasher::<Sha256>::new();
let ops = Sha256::hash(b"ops");
let grafted = Sha256::hash(b"grafted");
let r1 = combine_roots(&h1, &ops, &grafted, None);
let r2 = combine_roots(&h2, &ops, &grafted, None);
assert_eq!(r1, r2);
}
#[test]
fn combine_roots_with_partial_differs() {
let h1 = StandardHasher::<Sha256>::new();
let h2 = StandardHasher::<Sha256>::new();
let ops = Sha256::hash(b"ops");
let grafted = Sha256::hash(b"grafted");
let partial_digest = Sha256::hash(b"partial");
let without = combine_roots(&h1, &ops, &grafted, None);
let with = combine_roots(&h2, &ops, &grafted, Some((5, &partial_digest)));
assert_ne!(without, with);
}
#[test]
fn combine_roots_different_ops_root() {
let h1 = StandardHasher::<Sha256>::new();
let h2 = StandardHasher::<Sha256>::new();
let ops_a = Sha256::hash(b"ops_a");
let ops_b = Sha256::hash(b"ops_b");
let grafted = Sha256::hash(b"grafted");
let r1 = combine_roots(&h1, &ops_a, &grafted, None);
let r2 = combine_roots(&h2, &ops_b, &grafted, None);
assert_ne!(r1, r2);
}
}