use std::io::Cursor;
use std::num::NonZeroU64;
use base64::prelude::*;
use bytes::{Buf, BufMut, BytesMut};
use celestia_proto::serializers::cow_str::CowStr;
use nmt_rs::NamespaceMerkleHasher;
use serde::{Deserialize, Deserializer, Serialize, Serializer};
use tendermint::crypto::sha256::HASH_SIZE;
use tendermint::{crypto, merkle};
#[cfg(all(feature = "wasm-bindgen", target_arch = "wasm32"))]
use wasm_bindgen::prelude::*;
use crate::consts::appconsts;
use crate::nmt::{Namespace, NamespacedHashExt, NamespacedSha2Hasher, Nmt, RawNamespacedHash};
use crate::state::{AccAddress, AddressTrait};
use crate::{Error, Result};
use crate::{InfoByte, Share};
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
#[cfg_attr(
all(feature = "wasm-bindgen", target_arch = "wasm32"),
wasm_bindgen(inspectable)
)]
pub struct Commitment {
hash: merkle::Hash,
}
impl Commitment {
pub fn new(hash: merkle::Hash) -> Self {
Commitment { hash }
}
pub fn from_blob(
namespace: Namespace,
blob_data: &[u8],
share_version: u8,
signer: Option<&AccAddress>,
) -> Result<Commitment> {
validate_blob(share_version, signer.is_some())?;
let shares = split_blob_to_shares(namespace, share_version, blob_data, signer)?;
Self::from_shares(namespace, &shares)
}
pub fn from_shares(namespace: Namespace, mut shares: &[Share]) -> Result<Commitment> {
let subtree_root_threshold = appconsts::SUBTREE_ROOT_THRESHOLD;
let subtree_width = subtree_width(shares.len() as u64, subtree_root_threshold);
let tree_sizes = merkle_mountain_range_sizes(shares.len() as u64, subtree_width);
let mut leaf_sets: Vec<&[_]> = Vec::with_capacity(tree_sizes.len());
for size in tree_sizes {
let (leafs, rest) = shares.split_at(size as usize);
leaf_sets.push(leafs);
shares = rest;
}
let mut subtree_roots: Vec<RawNamespacedHash> = Vec::with_capacity(leaf_sets.len());
for leaf_set in leaf_sets {
let mut tree = Nmt::with_hasher(NamespacedSha2Hasher::with_ignore_max_ns(true));
for leaf_share in leaf_set {
tree.push_leaf(leaf_share.as_ref(), namespace.into())
.map_err(Error::Nmt)?;
}
subtree_roots.push(tree.root().to_array());
}
let hash = merkle::simple_hash_from_byte_vectors::<crypto::default::Sha256>(&subtree_roots);
Ok(Commitment { hash })
}
pub fn hash(&self) -> &merkle::Hash {
&self.hash
}
}
#[cfg(all(feature = "wasm-bindgen", target_arch = "wasm32"))]
#[wasm_bindgen]
impl Commitment {
#[wasm_bindgen(js_name = hash)]
pub fn js_hash(&self) -> Vec<u8> {
self.hash.to_vec()
}
}
impl From<Commitment> for merkle::Hash {
fn from(commitment: Commitment) -> Self {
commitment.hash
}
}
impl Serialize for Commitment {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
let s = BASE64_STANDARD.encode(self.hash);
serializer.serialize_str(&s)
}
}
impl<'de> Deserialize<'de> for Commitment {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
let mut buf = [0u8; HASH_SIZE * 2];
let s = CowStr::deserialize(deserializer)?;
let len = BASE64_STANDARD
.decode_slice(s, &mut buf)
.map_err(|e| serde::de::Error::custom(e.to_string()))?;
let hash: merkle::Hash = buf[..len]
.try_into()
.map_err(|_| serde::de::Error::custom("commitment is not a size of a sha256"))?;
Ok(Commitment { hash })
}
}
pub(crate) fn validate_blob(share_version: u8, has_signer: bool) -> Result<()> {
if ![appconsts::SHARE_VERSION_ZERO, appconsts::SHARE_VERSION_ONE].contains(&share_version) {
return Err(Error::UnsupportedShareVersion(share_version));
}
if share_version == appconsts::SHARE_VERSION_ZERO && has_signer {
return Err(Error::SignerNotSupported);
}
if share_version == appconsts::SHARE_VERSION_ONE && !has_signer {
return Err(Error::MissingSigner);
}
Ok(())
}
pub(crate) fn split_blob_to_shares(
namespace: Namespace,
share_version: u8,
blob_data: &[u8],
signer: Option<&AccAddress>,
) -> Result<Vec<Share>> {
let mut shares = Vec::new();
let mut cursor = Cursor::new(blob_data);
while cursor.has_remaining() {
let share = build_sparse_share(namespace, share_version, signer, &mut cursor)?;
shares.push(share);
}
Ok(shares)
}
fn build_sparse_share(
namespace: Namespace,
share_version: u8,
signer: Option<&AccAddress>,
data: &mut Cursor<impl AsRef<[u8]>>,
) -> Result<Share> {
let is_first_share = data.position() == 0;
let data_len = cursor_inner_length(data);
let mut bytes = BytesMut::with_capacity(appconsts::SHARE_SIZE);
bytes.put_slice(namespace.as_bytes());
let info_byte = InfoByte::new(share_version, is_first_share)?;
bytes.put_u8(info_byte.as_u8());
if is_first_share {
let data_len = data_len
.try_into()
.map_err(|_| Error::ShareSequenceLenExceeded(data_len))?;
bytes.put_u32(data_len);
if share_version == appconsts::SHARE_VERSION_ONE {
let signer = signer.as_ref().ok_or(Error::MissingSigner)?;
bytes.put_slice(signer.as_bytes());
}
}
let current_size = bytes.len();
let available_space = appconsts::SHARE_SIZE - current_size;
let read_amount = available_space.min(data.remaining());
bytes.resize(appconsts::SHARE_SIZE, 0);
data.copy_to_slice(&mut bytes[current_size..current_size + read_amount]);
Share::from_raw(&bytes)
}
fn cursor_inner_length(cursor: &Cursor<impl AsRef<[u8]>>) -> usize {
cursor.get_ref().as_ref().len()
}
fn merkle_mountain_range_sizes(mut total_size: u64, max_tree_size: u64) -> Vec<u64> {
let mut tree_sizes = Vec::new();
while total_size != 0 {
if total_size >= max_tree_size {
tree_sizes.push(max_tree_size);
total_size -= max_tree_size;
} else {
let tree_size = round_down_to_power_of_2(
total_size.try_into().unwrap(),
)
.expect("Failed to find next power of 2");
tree_sizes.push(tree_size);
total_size -= tree_size;
}
}
tree_sizes
}
fn blob_min_square_size(share_count: u64) -> u64 {
round_up_to_power_of_2((share_count as f64).sqrt().ceil() as u64)
.expect("Failed to find minimum blob square size")
}
fn subtree_width(share_count: u64, subtree_root_threshold: u64) -> u64 {
let mut s = share_count / subtree_root_threshold;
if !share_count.is_multiple_of(subtree_root_threshold) {
s += 1;
}
s = round_up_to_power_of_2(s).expect("Failed to find next power of 2");
s.min(blob_min_square_size(share_count))
}
fn round_up_to_power_of_2(x: u64) -> Option<u64> {
let mut po2 = 1;
loop {
if po2 >= x {
return Some(po2);
}
if let Some(next_po2) = po2.checked_shl(1) {
po2 = next_po2;
} else {
return None;
}
}
}
fn round_down_to_power_of_2(x: NonZeroU64) -> Option<u64> {
let x: u64 = x.into();
match round_up_to_power_of_2(x) {
Some(po2) if po2 == x => Some(x),
Some(po2) => Some(po2 / 2),
_ => None,
}
}
#[cfg(feature = "uniffi")]
mod uniffi_types {
use super::{Commitment as RustCommitment, HASH_SIZE};
use uniffi::Record;
use crate::error::UniffiConversionError;
#[derive(Record)]
pub struct Commitment {
pub sha_hash: Vec<u8>,
}
impl From<RustCommitment> for Commitment {
fn from(value: RustCommitment) -> Self {
Commitment {
sha_hash: value.hash.to_vec(),
}
}
}
impl TryFrom<Commitment> for RustCommitment {
type Error = UniffiConversionError;
fn try_from(value: Commitment) -> Result<Self, Self::Error> {
let hash: [u8; HASH_SIZE] = value
.sha_hash
.try_into()
.map_err(|_| UniffiConversionError::InvalidCommitmentLength)?;
Ok(RustCommitment { hash })
}
}
uniffi::custom_type!(RustCommitment, Commitment);
}
#[cfg(test)]
mod tests {
use super::*;
#[cfg(target_arch = "wasm32")]
use wasm_bindgen_test::wasm_bindgen_test as test;
#[test]
fn test_single_sparse_share_v0() {
let namespace = Namespace::new(0, &[1, 1, 1, 1, 1, 1, 1, 1, 1, 1]).unwrap();
let data = vec![1, 2, 3, 4, 5, 6, 7];
let mut cursor = Cursor::new(&data);
let share = build_sparse_share(namespace, 0, None, &mut cursor).unwrap();
assert!(!cursor.has_remaining());
let (share_ns, share_data) = share.as_ref().split_at(appconsts::NAMESPACE_SIZE);
assert_eq!(share_ns, namespace.as_bytes());
let expected_share_start: &[u8] = &[
1, 0, 0, 0, 7, 1, 2, 3, 4, 5, 6, 7, ];
let (share_data, share_padding) = share_data.split_at(expected_share_start.len());
assert_eq!(share_data, expected_share_start);
assert_eq!(
share_padding,
&vec![0; appconsts::FIRST_SPARSE_SHARE_CONTENT_SIZE - data.len()],
);
}
#[test]
fn test_single_sparse_share_v1() {
let namespace = Namespace::new(0, &[1, 1, 1, 1, 1, 1, 1, 1, 1, 1]).unwrap();
let data = vec![1, 2, 3, 4, 5, 6, 7];
let signer = AccAddress::from([9; appconsts::SIGNER_SIZE]);
let mut cursor = Cursor::new(&data);
let share = build_sparse_share(namespace, 1, Some(&signer), &mut cursor).unwrap();
assert!(!cursor.has_remaining());
let (share_ns, share_data) = share.as_ref().split_at(appconsts::NAMESPACE_SIZE);
assert_eq!(share_ns, namespace.as_bytes());
let expected_share_start: &[u8] = &[
0b00000011, 0, 0, 0, 7, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 1, 2, 3, 4, 5, 6, 7, ];
let (share_data, share_padding) = share_data.split_at(expected_share_start.len());
assert_eq!(share_data, expected_share_start);
assert_eq!(
share_padding,
&vec![
0;
appconsts::FIRST_SPARSE_SHARE_CONTENT_SIZE - appconsts::SIGNER_SIZE - data.len()
],
);
}
#[test]
fn test_sparse_share_v0_with_continuation() {
let namespace = Namespace::new(0, &[1, 1, 1, 1, 1, 1, 1, 1, 1, 1]).unwrap();
let continuation_len = 7;
let data = vec![7; appconsts::FIRST_SPARSE_SHARE_CONTENT_SIZE + continuation_len];
let mut cursor = Cursor::new(&data);
let first_share = build_sparse_share(namespace, 0, None, &mut cursor).unwrap();
assert_eq!(
cursor.position(),
appconsts::FIRST_SPARSE_SHARE_CONTENT_SIZE as u64
);
let (share_ns, share_data) = first_share.as_ref().split_at(appconsts::NAMESPACE_SIZE);
assert_eq!(share_ns, namespace.as_bytes());
let (share_info_byte, share_data) = share_data.split_at(appconsts::SHARE_INFO_BYTES);
assert_eq!(share_info_byte, &[1]);
let (share_seq_len, share_data) = share_data.split_at(appconsts::SEQUENCE_LEN_BYTES);
assert_eq!(share_seq_len, &(data.len() as u32).to_be_bytes());
assert_eq!(
share_data,
&vec![7; appconsts::FIRST_SPARSE_SHARE_CONTENT_SIZE]
);
let continuation_share = build_sparse_share(namespace, 0, None, &mut cursor).unwrap();
assert!(!cursor.has_remaining());
let (share_ns, share_data) = continuation_share
.as_ref()
.split_at(appconsts::NAMESPACE_SIZE);
assert_eq!(share_ns, namespace.as_bytes());
let expected_continuation_share_start: &[u8] = &[
0, 7, 7, 7, 7, 7, 7, 7, ];
let (share_data, share_padding) =
share_data.split_at(expected_continuation_share_start.len());
assert_eq!(share_data, expected_continuation_share_start);
assert_eq!(
share_padding,
&vec![0; appconsts::CONTINUATION_SPARSE_SHARE_CONTENT_SIZE - continuation_len],
);
}
#[test]
fn test_sparse_share_v0_empty_data() {
let namespace = Namespace::new(0, &[1, 1, 1, 1, 1, 1, 1, 1, 1, 1]).unwrap();
let data = vec![];
let mut cursor = Cursor::new(&data);
let expected_share_start: &[u8] = &[
1, 0, 0, 0, 0, ];
let share = build_sparse_share(namespace, 0, None, &mut cursor).unwrap();
assert!(!cursor.has_remaining());
let (share_ns, share_data) = share.as_ref().split_at(appconsts::NAMESPACE_SIZE);
assert_eq!(share_ns, namespace.as_bytes());
let (share_start, share_data) = share_data.split_at(expected_share_start.len());
assert_eq!(share_start, expected_share_start);
assert_eq!(
share_data,
&vec![0; appconsts::FIRST_SPARSE_SHARE_CONTENT_SIZE],
);
}
#[test]
fn merkle_mountain_ranges() {
struct TestCase {
total_size: u64,
square_size: u64,
expected: Vec<u64>,
}
let test_cases = [
TestCase {
total_size: 11,
square_size: 4,
expected: vec![4, 4, 2, 1],
},
TestCase {
total_size: 2,
square_size: 64,
expected: vec![2],
},
TestCase {
total_size: 64,
square_size: 8,
expected: vec![8, 8, 8, 8, 8, 8, 8, 8],
},
TestCase {
total_size: 19,
square_size: 8,
expected: vec![8, 8, 2, 1],
},
];
for case in test_cases {
assert_eq!(
merkle_mountain_range_sizes(case.total_size, case.square_size),
case.expected,
);
}
}
#[test]
fn blob_validation() {
let share_signer_required = appconsts::SHARE_VERSION_ONE;
let share_signer_forbidden = appconsts::SHARE_VERSION_ZERO;
let share_version_unsupported = appconsts::MAX_SHARE_VERSION;
let with_signer = true;
let no_signer = false;
validate_blob(share_signer_forbidden, no_signer).unwrap();
validate_blob(share_signer_required, with_signer).unwrap();
validate_blob(share_version_unsupported, no_signer).unwrap_err();
validate_blob(share_signer_required, no_signer).unwrap_err();
validate_blob(share_signer_forbidden, with_signer).unwrap_err();
}
}