use std::{
collections::{BTreeMap, HashMap},
ops::RangeBounds,
};
use itertools::Itertools;
use zebra_chain::block::Height;
use zebra_state::{
DiskWriteBatch, SaplingScannedDatabaseEntry, SaplingScannedDatabaseIndex, SaplingScannedResult,
SaplingScanningKey, TransactionIndex, TransactionLocation, TypedColumnFamily, WriteTypedBatch,
};
use crate::storage::{Storage, INSERT_CONTROL_INTERVAL};
pub const SAPLING_TX_IDS: &str = "sapling_tx_ids";
pub type SaplingTxIdsCf<'cf> =
TypedColumnFamily<'cf, SaplingScannedDatabaseIndex, Option<SaplingScannedResult>>;
pub type WriteSaplingTxIdsBatch<'cf> =
WriteTypedBatch<'cf, SaplingScannedDatabaseIndex, Option<SaplingScannedResult>, DiskWriteBatch>;
impl Storage {
pub fn sapling_result_for_index(
&self,
index: &SaplingScannedDatabaseIndex,
) -> Option<SaplingScannedResult> {
self.sapling_tx_ids_cf().zs_get(index).flatten()
}
pub fn sapling_results_for_key_and_height(
&self,
sapling_key: &SaplingScanningKey,
height: Height,
) -> BTreeMap<TransactionIndex, Option<SaplingScannedResult>> {
let kh_min = SaplingScannedDatabaseIndex::min_for_key_and_height(sapling_key, height);
let kh_max = SaplingScannedDatabaseIndex::max_for_key_and_height(sapling_key, height);
self.sapling_results_in_range(kh_min..=kh_max)
.into_iter()
.map(|(result_index, txid)| (result_index.tx_loc.index, txid))
.collect()
}
pub fn sapling_results_for_key(
&self,
sapling_key: &SaplingScanningKey,
) -> BTreeMap<Height, Vec<SaplingScannedResult>> {
let k_min = SaplingScannedDatabaseIndex::min_for_key(sapling_key);
let k_max = SaplingScannedDatabaseIndex::max_for_key(sapling_key);
let results: HashMap<Height, Vec<Option<SaplingScannedResult>>> = self
.sapling_results_in_range(k_min..=k_max)
.into_iter()
.map(|(index, result)| (index.tx_loc.height, result))
.into_group_map();
results
.into_iter()
.map(|(index, vector)| -> (Height, Vec<SaplingScannedResult>) {
(index, vector.into_iter().flatten().collect())
})
.collect()
}
pub fn sapling_keys_and_last_scanned_heights(&self) -> HashMap<SaplingScanningKey, Height> {
let sapling_tx_ids = self.sapling_tx_ids_cf();
let mut keys = HashMap::new();
let mut last_stored_record = sapling_tx_ids.zs_last_key_value();
while let Some((last_stored_record_index, _result)) = last_stored_record {
let sapling_key = last_stored_record_index.sapling_key.clone();
let height = last_stored_record_index.tx_loc.height;
let prev_height = keys.insert(sapling_key.clone(), height);
assert_eq!(
prev_height, None,
"unexpected duplicate key: keys must only be inserted once \
last_stored_record_index: {last_stored_record_index:?}",
);
last_stored_record = sapling_tx_ids.zs_prev_key_value_strictly_before(
&SaplingScannedDatabaseIndex::min_for_key(&sapling_key),
);
}
keys
}
fn sapling_results_in_range(
&self,
range: impl RangeBounds<SaplingScannedDatabaseIndex>,
) -> BTreeMap<SaplingScannedDatabaseIndex, Option<SaplingScannedResult>> {
self.sapling_tx_ids_cf().zs_items_in_range_ordered(range)
}
pub(crate) fn sapling_tx_ids_cf(&self) -> SaplingTxIdsCf {
SaplingTxIdsCf::new(&self.db, SAPLING_TX_IDS)
.expect("column family was created when database was created")
}
pub fn insert_sapling_results(
&mut self,
sapling_key: &SaplingScanningKey,
height: Height,
sapling_results: BTreeMap<TransactionIndex, SaplingScannedResult>,
) {
let mut batch = self.sapling_tx_ids_cf().new_batch_for_writing();
let needs_control_entry =
height.0 % INSERT_CONTROL_INTERVAL == 0 && sapling_results.is_empty();
if needs_control_entry {
batch = batch.insert_sapling_height(sapling_key, height);
}
for (index, sapling_result) in sapling_results {
let index = SaplingScannedDatabaseIndex {
sapling_key: sapling_key.clone(),
tx_loc: TransactionLocation::from_parts(height, index),
};
let entry = SaplingScannedDatabaseEntry {
index,
value: Some(sapling_result),
};
batch = batch.zs_insert(&entry.index, &entry.value);
}
batch
.write_batch()
.expect("unexpected database write failure");
}
pub(crate) fn insert_sapling_key(
&mut self,
sapling_key: &SaplingScanningKey,
birthday_height: Option<Height>,
) {
let min_birthday_height = self.network().sapling_activation_height();
let birthday_height = birthday_height
.unwrap_or(min_birthday_height)
.max(min_birthday_height);
let skip_up_to_height = birthday_height.previous().unwrap_or(Height::MIN);
self.sapling_tx_ids_cf()
.new_batch_for_writing()
.insert_sapling_height(sapling_key, skip_up_to_height)
.write_batch()
.expect("unexpected database write failure");
}
pub fn delete_sapling_keys(&mut self, keys: Vec<SaplingScanningKey>) {
self.sapling_tx_ids_cf()
.new_batch_for_writing()
.delete_sapling_keys(keys)
.write_batch()
.expect("unexpected database write failure");
}
pub(crate) fn delete_sapling_results(&mut self, keys: Vec<SaplingScanningKey>) {
let mut batch = self
.sapling_tx_ids_cf()
.new_batch_for_writing()
.delete_sapling_keys(keys.clone());
for key in &keys {
batch = batch.insert_sapling_height(key, Height::MIN);
}
batch
.write_batch()
.expect("unexpected database write failure");
}
}
trait InsertSaplingHeight {
fn insert_sapling_height(self, sapling_key: &SaplingScanningKey, height: Height) -> Self;
}
impl InsertSaplingHeight for WriteSaplingTxIdsBatch<'_> {
fn insert_sapling_height(self, sapling_key: &SaplingScanningKey, height: Height) -> Self {
let index = SaplingScannedDatabaseIndex::min_for_key_and_height(sapling_key, height);
self.zs_insert(&index, &None)
}
}
trait DeleteSaplingKeys {
fn delete_sapling_keys(self, sapling_key: Vec<SaplingScanningKey>) -> Self;
}
impl DeleteSaplingKeys for WriteSaplingTxIdsBatch<'_> {
fn delete_sapling_keys(mut self, sapling_keys: Vec<SaplingScanningKey>) -> Self {
for key in &sapling_keys {
let from_index = SaplingScannedDatabaseIndex::min_for_key(key);
let until_strictly_before_index = SaplingScannedDatabaseIndex::max_for_key(key);
self = self
.zs_delete_range(&from_index, &until_strictly_before_index)
.zs_delete(&until_strictly_before_index);
}
self
}
}