use super::{
committable_column::CommittableColumn, AppendColumnCommitmentsError, ColumnCommitments,
ColumnCommitmentsMismatch, Commitment, DuplicateIdentifiers,
};
use crate::base::{
database::{ColumnField, CommitmentAccessor, OwnedTable, TableRef},
scalar::Scalar,
};
use alloc::vec::Vec;
use core::ops::Range;
use proof_of_sql_parser::Identifier;
use serde::{Deserialize, Serialize};
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(display("cannot create a TableCommitment with a negative range"))]
pub struct NegativeRange;
#[derive(Debug, Snafu)]
#[snafu(display("cannot create a TableCommitment from columns of mixed length"))]
pub struct MixedLengthColumns;
#[derive(Debug, Snafu)]
pub enum TableCommitmentFromColumnsError {
#[snafu(transparent)]
MixedLengthColumns {
source: MixedLengthColumns,
},
#[snafu(transparent)]
DuplicateIdentifiers {
source: DuplicateIdentifiers,
},
}
#[derive(Debug, Snafu)]
pub enum AppendTableCommitmentError {
#[snafu(transparent)]
MixedLengthColumns {
source: MixedLengthColumns,
},
#[snafu(transparent)]
AppendColumnCommitments {
source: AppendColumnCommitmentsError,
},
}
#[derive(Debug, Snafu)]
pub enum TableCommitmentArithmeticError {
#[snafu(transparent)]
ColumnMismatch {
source: ColumnCommitmentsMismatch,
},
#[snafu(transparent)]
NegativeRange {
source: NegativeRange,
},
#[snafu(display(
"cannot perform table commitment arithmetic for noncontiguous table commitments"
))]
NonContiguous,
}
#[derive(Clone, Default, Debug, PartialEq, Eq, Serialize, Deserialize)]
pub struct TableCommitment<C>
where
C: Commitment,
{
column_commitments: ColumnCommitments<C>,
range: Range<usize>,
}
impl<C: Commitment> TableCommitment<C> {
#[allow(
clippy::missing_panics_doc,
reason = "The assertion ensures that from_accessor should not create columns with a negative range"
)]
pub fn from_accessor_with_max_bounds(
table_ref: TableRef,
columns: &[ColumnField],
accessor: &impl CommitmentAccessor<C>,
) -> Self {
let length = accessor.get_length(table_ref);
let offset = accessor.get_offset(table_ref);
Self::try_new(
ColumnCommitments::from_accessor_with_max_bounds(table_ref, columns, accessor),
offset..offset + length,
)
.expect("from_accessor should not create columns with a negative range")
}
#[cfg(test)]
pub(super) fn column_commitments_mut(&mut self) -> &mut ColumnCommitments<C> {
&mut self.column_commitments
}
pub fn try_new(
column_commitments: ColumnCommitments<C>,
range: Range<usize>,
) -> Result<Self, NegativeRange> {
if range.start <= range.end {
Ok(TableCommitment {
column_commitments,
range,
})
} else {
Err(NegativeRange)
}
}
#[must_use]
pub fn column_commitments(&self) -> &ColumnCommitments<C> {
&self.column_commitments
}
#[must_use]
pub fn range(&self) -> &Range<usize> {
&self.range
}
#[must_use]
pub fn num_columns(&self) -> usize {
self.column_commitments.len()
}
#[must_use]
pub fn num_rows(&self) -> usize {
self.range.len()
}
pub fn try_from_columns_with_offset<'a, COL>(
columns: impl IntoIterator<Item = (&'a Identifier, COL)>,
offset: usize,
setup: &C::PublicSetup<'_>,
) -> Result<TableCommitment<C>, TableCommitmentFromColumnsError>
where
COL: Into<CommittableColumn<'a>>,
{
let (identifiers, committable_columns): (Vec<&Identifier>, Vec<CommittableColumn>) =
columns
.into_iter()
.map(|(identifier, column)| (identifier, column.into()))
.unzip();
let num_rows = num_rows_of_columns(&committable_columns)?;
let column_commitments = ColumnCommitments::try_from_columns_with_offset(
identifiers.into_iter().zip(committable_columns.into_iter()),
offset,
setup,
)?;
Ok(TableCommitment {
column_commitments,
range: offset..offset + num_rows,
})
}
#[allow(
clippy::missing_panics_doc,
reason = "since OwnedTables cannot have columns of mixed length or duplicate identifiers"
)]
pub fn from_owned_table_with_offset<S>(
owned_table: &OwnedTable<S>,
offset: usize,
setup: &C::PublicSetup<'_>,
) -> TableCommitment<C>
where
S: Scalar,
{
Self::try_from_columns_with_offset(owned_table.inner_table(), offset, setup)
.expect("OwnedTables cannot have columns of mixed length or duplicate identifiers")
}
pub fn try_append_rows<'a, COL>(
&mut self,
columns: impl IntoIterator<Item = (&'a Identifier, COL)>,
setup: &C::PublicSetup<'_>,
) -> Result<(), AppendTableCommitmentError>
where
COL: Into<CommittableColumn<'a>>,
{
let (identifiers, committable_columns): (Vec<&Identifier>, Vec<CommittableColumn>) =
columns
.into_iter()
.map(|(identifier, column)| (identifier, column.into()))
.unzip();
let num_rows = num_rows_of_columns(&committable_columns)?;
self.column_commitments.try_append_rows_with_offset(
identifiers.into_iter().zip(committable_columns.into_iter()),
self.range.end,
setup,
)?;
self.range.end += num_rows;
Ok(())
}
pub fn append_owned_table<S>(
&mut self,
owned_table: &OwnedTable<S>,
setup: &C::PublicSetup<'_>,
) -> Result<(), ColumnCommitmentsMismatch>
where
S: Scalar,
{
self.try_append_rows(owned_table.inner_table(), setup)
.map_err(|e| match e {
AppendTableCommitmentError::AppendColumnCommitments { source: e } => match e {
AppendColumnCommitmentsError::Mismatch { source: e } => e,
AppendColumnCommitmentsError::DuplicateIdentifiers { .. } => {
panic!("OwnedTables cannot have duplicate identifiers");
}
},
AppendTableCommitmentError::MixedLengthColumns { .. } => {
panic!("OwnedTables cannot have columns of mixed length");
}
})
}
pub fn try_extend_columns<'a, COL>(
&mut self,
columns: impl IntoIterator<Item = (&'a Identifier, COL)>,
setup: &C::PublicSetup<'_>,
) -> Result<(), TableCommitmentFromColumnsError>
where
COL: Into<CommittableColumn<'a>>,
{
let num_rows = self.range.len();
let (identifiers, committable_columns): (Vec<&Identifier>, Vec<CommittableColumn>) =
columns
.into_iter()
.map(|(identifier, column)| (identifier, column.into()))
.unzip();
let num_rows_of_new_columns = num_rows_of_columns(&committable_columns)?;
if num_rows_of_new_columns != num_rows {
Err(MixedLengthColumns)?;
}
self.column_commitments.try_extend_columns_with_offset(
identifiers.into_iter().zip(committable_columns.into_iter()),
self.range.start,
setup,
)?;
Ok(())
}
pub fn try_add(self, other: Self) -> Result<Self, TableCommitmentArithmeticError>
where
Self: Sized,
{
let range = if self.range.end == other.range.start {
self.range.start..other.range.end
} else if other.range.end == self.range.start {
other.range.start..self.range.end
} else {
return Err(TableCommitmentArithmeticError::NonContiguous);
};
let column_commitments = self.column_commitments.try_add(other.column_commitments)?;
Ok(TableCommitment {
column_commitments,
range,
})
}
pub fn try_sub(self, other: Self) -> Result<Self, TableCommitmentArithmeticError>
where
Self: Sized,
{
if self.range.len() < other.range.len() {
Err(NegativeRange)?;
}
let range = if self.range.start == other.range.start {
other.range.end..self.range.end
} else if self.range.end == other.range.end {
self.range.start..other.range.start
} else {
return Err(TableCommitmentArithmeticError::NonContiguous);
};
let column_commitments = self.column_commitments.try_sub(other.column_commitments)?;
Ok(TableCommitment {
column_commitments,
range,
})
}
}
fn num_rows_of_columns<'a>(
committable_columns: impl IntoIterator<Item = &'a CommittableColumn<'a>>,
) -> Result<usize, MixedLengthColumns> {
let mut committable_columns_iter = committable_columns.into_iter().peekable();
let num_rows = committable_columns_iter
.peek()
.map_or(0, |committable_column| committable_column.len());
for committable_column in committable_columns_iter {
if committable_column.len() != num_rows {
return Err(MixedLengthColumns);
}
}
Ok(num_rows)
}
#[cfg(all(test, feature = "arrow", feature = "blitzar"))]
mod tests {
use super::*;
use crate::{
base::{
commitment::naive_commitment::NaiveCommitment,
database::{owned_table_utility::*, Column, OwnedColumn},
map::IndexMap,
scalar::test_scalar::TestScalar,
},
record_batch,
};
#[test]
#[allow(clippy::reversed_empty_ranges)]
fn we_cannot_construct_table_commitment_with_negative_range() {
let try_new_result =
TableCommitment::<NaiveCommitment>::try_new(ColumnCommitments::default(), 1..0);
assert!(matches!(try_new_result, Err(NegativeRange)));
}
#[test]
fn we_can_construct_table_commitment_from_columns_and_identifiers() {
let mut empty_columns_iter: IndexMap<Identifier, OwnedColumn<TestScalar>> =
IndexMap::default();
let empty_table_commitment =
TableCommitment::<NaiveCommitment>::try_from_columns_with_offset(
&empty_columns_iter,
0,
&(),
)
.unwrap();
assert_eq!(
empty_table_commitment.column_commitments(),
&ColumnCommitments::try_from_columns_with_offset(&empty_columns_iter, 0, &()).unwrap()
);
assert_eq!(empty_table_commitment.range(), &(0..0));
assert_eq!(empty_table_commitment.num_columns(), 0);
assert_eq!(empty_table_commitment.num_rows(), 0);
empty_columns_iter.insert("column_a".parse().unwrap(), OwnedColumn::BigInt(vec![]));
let empty_table_commitment =
TableCommitment::<NaiveCommitment>::try_from_columns_with_offset(
&empty_columns_iter,
1,
&(),
)
.unwrap();
assert_eq!(
empty_table_commitment.column_commitments(),
&ColumnCommitments::try_from_columns_with_offset(&empty_columns_iter, 1, &()).unwrap()
);
assert_eq!(empty_table_commitment.range(), &(1..1));
assert_eq!(empty_table_commitment.num_columns(), 1);
assert_eq!(empty_table_commitment.num_rows(), 0);
let owned_table = owned_table::<TestScalar>([
bigint("bigint_id", [1, 5, -5, 0]),
varchar("varchar_id", ["Lorem", "ipsum", "dolor", "sit"]),
scalar("scalar_id", [1000, 2000, -1000, 0]),
]);
let table_commitment = TableCommitment::<NaiveCommitment>::try_from_columns_with_offset(
owned_table.inner_table(),
2,
&(),
)
.unwrap();
assert_eq!(
table_commitment.column_commitments(),
&ColumnCommitments::try_from_columns_with_offset(owned_table.inner_table(), 2, &())
.unwrap()
);
assert_eq!(table_commitment.range(), &(2..6));
assert_eq!(table_commitment.num_columns(), 3);
assert_eq!(table_commitment.num_rows(), 4);
let table_commitment_from_owned_table =
TableCommitment::from_owned_table_with_offset(&owned_table, 2, &());
assert_eq!(table_commitment_from_owned_table, table_commitment);
}
#[test]
fn we_cannot_construct_table_commitment_from_duplicate_identifiers() {
let duplicate_identifier_a = "duplicate_identifier_a".parse().unwrap();
let duplicate_identifier_b = "duplicate_identifier_b".parse().unwrap();
let unique_identifier = "unique_identifier".parse().unwrap();
let empty_column = OwnedColumn::<TestScalar>::BigInt(vec![]);
let from_columns_result = TableCommitment::<NaiveCommitment>::try_from_columns_with_offset(
[
(&duplicate_identifier_a, &empty_column),
(&unique_identifier, &empty_column),
(&duplicate_identifier_a, &empty_column),
],
0,
&(),
);
assert!(matches!(
from_columns_result,
Err(TableCommitmentFromColumnsError::DuplicateIdentifiers { .. })
));
let mut table_commitment =
TableCommitment::<NaiveCommitment>::try_from_columns_with_offset(
[
(&duplicate_identifier_a, &empty_column),
(&unique_identifier, &empty_column),
],
0,
&(),
)
.unwrap();
let column_commitments = table_commitment.column_commitments().clone();
let extend_columns_result =
table_commitment.try_extend_columns([(&duplicate_identifier_a, &empty_column)], &());
assert!(matches!(
extend_columns_result,
Err(TableCommitmentFromColumnsError::DuplicateIdentifiers { .. })
));
let extend_columns_result = table_commitment.try_extend_columns(
[
(&duplicate_identifier_b, &empty_column),
(&duplicate_identifier_b, &empty_column),
],
&(),
);
assert!(matches!(
extend_columns_result,
Err(TableCommitmentFromColumnsError::DuplicateIdentifiers { .. })
));
assert_eq!(table_commitment.num_columns(), 2);
assert_eq!(table_commitment.column_commitments(), &column_commitments);
}
#[test]
fn we_cannot_construct_table_commitment_from_columns_of_mixed_length() {
let column_id_a = "column_a".parse().unwrap();
let column_id_b = "column_b".parse().unwrap();
let column_id_c = "column_c".parse().unwrap();
let one_row_column = OwnedColumn::<TestScalar>::BigInt(vec![1]);
let two_row_column = OwnedColumn::<TestScalar>::BigInt(vec![1, 2]);
let from_columns_result = TableCommitment::<NaiveCommitment>::try_from_columns_with_offset(
[
(&column_id_a, &one_row_column),
(&column_id_b, &two_row_column),
],
0,
&(),
);
assert!(matches!(
from_columns_result,
Err(TableCommitmentFromColumnsError::MixedLengthColumns { .. })
));
let mut table_commitment =
TableCommitment::<NaiveCommitment>::try_from_columns_with_offset(
[(&column_id_a, &one_row_column)],
0,
&(),
)
.unwrap();
let column_commitments = table_commitment.column_commitments().clone();
let extend_columns_result =
table_commitment.try_extend_columns([(&column_id_b, &two_row_column)], &());
assert!(matches!(
extend_columns_result,
Err(TableCommitmentFromColumnsError::MixedLengthColumns { .. })
));
let extend_columns_result = table_commitment.try_extend_columns(
[
(&column_id_b, &one_row_column),
(&column_id_c, &two_row_column),
],
&(),
);
assert!(matches!(
extend_columns_result,
Err(TableCommitmentFromColumnsError::MixedLengthColumns { .. })
));
assert_eq!(table_commitment.num_columns(), 1);
assert_eq!(table_commitment.column_commitments(), &column_commitments);
}
#[test]
fn we_can_append_rows_to_table_commitment() {
let bigint_id: Identifier = "bigint_column".parse().unwrap();
let bigint_data = [1i64, 5, -5, 0, 10];
let varchar_id: Identifier = "varchar_column".parse().unwrap();
let varchar_data = ["Lorem", "ipsum", "dolor", "sit", "amet"];
let scalar_id: Identifier = "scalar_column".parse().unwrap();
let scalar_data = [1000, 2000, 3000, -1000, 0];
let initial_columns: OwnedTable<TestScalar> = owned_table([
bigint(bigint_id, bigint_data[..2].to_vec()),
varchar(varchar_id, varchar_data[..2].to_vec()),
scalar(scalar_id, scalar_data[..2].to_vec()),
]);
let mut table_commitment =
TableCommitment::<NaiveCommitment>::try_from_columns_with_offset(
initial_columns.inner_table(),
0,
&(),
)
.unwrap();
let mut table_commitment_clone = table_commitment.clone();
let append_columns: OwnedTable<TestScalar> = owned_table([
bigint(bigint_id, bigint_data[2..].to_vec()),
varchar(varchar_id, varchar_data[2..].to_vec()),
scalar(scalar_id, scalar_data[2..].to_vec()),
]);
table_commitment
.try_append_rows(append_columns.inner_table(), &())
.unwrap();
let total_columns: OwnedTable<TestScalar> = owned_table([
bigint(bigint_id, bigint_data),
varchar(varchar_id, varchar_data),
scalar(scalar_id, scalar_data),
]);
let expected_table_commitment =
TableCommitment::try_from_columns_with_offset(total_columns.inner_table(), 0, &())
.unwrap();
assert_eq!(table_commitment, expected_table_commitment);
table_commitment_clone
.append_owned_table(&append_columns, &())
.unwrap();
assert_eq!(table_commitment, table_commitment_clone);
}
#[test]
fn we_cannot_append_mismatched_columns_to_table_commitment() {
let base_table: OwnedTable<TestScalar> = owned_table([
bigint("column_a", [1, 2, 3, 4]),
varchar("column_b", ["Lorem", "ipsum", "dolor", "sit"]),
]);
let mut table_commitment =
TableCommitment::<NaiveCommitment>::try_from_columns_with_offset(
base_table.inner_table(),
0,
&(),
)
.unwrap();
let column_commitments = table_commitment.column_commitments().clone();
let table_diff_type: OwnedTable<TestScalar> = owned_table([
varchar("column_a", ["5", "6", "7", "8"]),
varchar("column_b", ["Lorem", "ipsum", "dolor", "sit"]),
]);
assert!(matches!(
table_commitment.try_append_rows(table_diff_type.inner_table(), &()),
Err(AppendTableCommitmentError::AppendColumnCommitments {
source: AppendColumnCommitmentsError::Mismatch {
source: ColumnCommitmentsMismatch::ColumnCommitmentMetadata { .. }
}
})
));
assert_eq!(table_commitment.num_rows(), 4);
assert_eq!(table_commitment.column_commitments(), &column_commitments);
}
#[test]
fn we_cannot_append_columns_with_duplicate_identifiers_to_table_commitment() {
let column_id_a = "column_a".parse().unwrap();
let column_id_b = "column_b".parse().unwrap();
let column_data = OwnedColumn::<TestScalar>::BigInt(vec![1, 2, 3]);
let mut table_commitment =
TableCommitment::<NaiveCommitment>::try_from_columns_with_offset(
[(&column_id_a, &column_data), (&column_id_b, &column_data)],
0,
&(),
)
.unwrap();
let column_commitments = table_commitment.column_commitments().clone();
let append_column_result = table_commitment.try_append_rows(
[
(&column_id_a, &column_data),
(&column_id_b, &column_data),
(&column_id_a, &column_data),
],
&(),
);
assert!(matches!(
append_column_result,
Err(AppendTableCommitmentError::AppendColumnCommitments {
source: AppendColumnCommitmentsError::DuplicateIdentifiers { .. }
})
));
assert_eq!(table_commitment.num_rows(), 3);
assert_eq!(table_commitment.column_commitments(), &column_commitments);
}
#[allow(clippy::similar_names)]
#[test]
fn we_cannot_append_columns_of_mixed_length_to_table_commitment() {
let column_id_a: Identifier = "column_a".parse().unwrap();
let column_id_b: Identifier = "column_b".parse().unwrap();
let base_table: OwnedTable<TestScalar> = owned_table([
bigint(column_id_a, [1, 2, 3, 4]),
varchar(column_id_b, ["Lorem", "ipsum", "dolor", "sit"]),
]);
let mut table_commitment =
TableCommitment::<NaiveCommitment>::try_from_columns_with_offset(
base_table.inner_table(),
0,
&(),
)
.unwrap();
let column_commitments = table_commitment.column_commitments().clone();
let column_a_append_data = OwnedColumn::<TestScalar>::BigInt(vec![5, 6, 7]);
let column_b_append_data =
OwnedColumn::VarChar(["amet", "consectetur"].map(String::from).to_vec());
let append_result = table_commitment.try_append_rows(
[
(&column_id_a, &column_a_append_data),
(&column_id_b, &column_b_append_data),
],
&(),
);
assert!(matches!(
append_result,
Err(AppendTableCommitmentError::MixedLengthColumns { .. })
));
assert_eq!(table_commitment.num_rows(), 4);
assert_eq!(table_commitment.column_commitments(), &column_commitments);
}
#[test]
fn we_can_extend_columns_to_table_commitment() {
let bigint_id: Identifier = "bigint_column".parse().unwrap();
let bigint_data = [1i64, 5, -5, 0, 10];
let varchar_id: Identifier = "varchar_column".parse().unwrap();
let varchar_data = ["Lorem", "ipsum", "dolor", "sit", "amet"];
let scalar_id: Identifier = "scalar_column".parse().unwrap();
let scalar_data = [1000, 2000, 3000, -1000, 0];
let initial_columns: OwnedTable<TestScalar> = owned_table([
bigint(bigint_id, bigint_data),
varchar(varchar_id, varchar_data),
]);
let mut table_commitment =
TableCommitment::<NaiveCommitment>::try_from_columns_with_offset(
initial_columns.inner_table(),
2,
&(),
)
.unwrap();
let new_columns = owned_table::<TestScalar>([scalar(scalar_id, scalar_data)]);
table_commitment
.try_extend_columns(new_columns.inner_table(), &())
.unwrap();
let expected_columns = owned_table::<TestScalar>([
bigint(bigint_id, bigint_data),
varchar(varchar_id, varchar_data),
scalar(scalar_id, scalar_data),
]);
let expected_table_commitment =
TableCommitment::try_from_columns_with_offset(expected_columns.inner_table(), 2, &())
.unwrap();
assert_eq!(table_commitment, expected_table_commitment);
}
#[test]
fn we_can_add_table_commitments() {
let bigint_id: Identifier = "bigint_column".parse().unwrap();
let bigint_data = [1i64, 5, -5, 0, 10];
let varchar_id: Identifier = "varchar_column".parse().unwrap();
let varchar_data = ["Lorem", "ipsum", "dolor", "sit", "amet"];
let scalar_id: Identifier = "scalar_column".parse().unwrap();
let scalar_data = [1000, 2000, 3000, -1000, 0];
let columns_a: OwnedTable<TestScalar> = owned_table([
bigint(bigint_id, bigint_data[..2].to_vec()),
varchar(varchar_id, varchar_data[..2].to_vec()),
scalar(scalar_id, scalar_data[..2].to_vec()),
]);
let table_commitment_a = TableCommitment::<NaiveCommitment>::try_from_columns_with_offset(
columns_a.inner_table(),
0,
&(),
)
.unwrap();
let columns_b: OwnedTable<TestScalar> = owned_table([
bigint(bigint_id, bigint_data[2..].to_vec()),
varchar(varchar_id, varchar_data[2..].to_vec()),
scalar(scalar_id, scalar_data[2..].to_vec()),
]);
let table_commitment_b =
TableCommitment::try_from_columns_with_offset(columns_b.inner_table(), 2, &()).unwrap();
let columns_sum: OwnedTable<TestScalar> = owned_table([
bigint(bigint_id, bigint_data),
varchar(varchar_id, varchar_data),
scalar(scalar_id, scalar_data),
]);
let table_commitment_sum =
TableCommitment::try_from_columns_with_offset(columns_sum.inner_table(), 0, &())
.unwrap();
assert_eq!(
table_commitment_a
.clone()
.try_add(table_commitment_b.clone())
.unwrap(),
table_commitment_sum
);
assert_eq!(
table_commitment_b.try_add(table_commitment_a).unwrap(),
table_commitment_sum
);
}
#[test]
fn we_cannot_add_mismatched_table_commitments() {
let base_table: OwnedTable<TestScalar> = owned_table([
bigint("column_a", [1, 2, 3, 4]),
varchar("column_b", ["Lorem", "ipsum", "dolor", "sit"]),
]);
let table_commitment = TableCommitment::<NaiveCommitment>::try_from_columns_with_offset(
base_table.inner_table(),
0,
&(),
)
.unwrap();
let table_diff_type: OwnedTable<TestScalar> = owned_table([
varchar("column_a", ["5", "6", "7", "8"]),
varchar("column_b", ["Lorem", "ipsum", "dolor", "sit"]),
]);
let table_commitment_diff_type =
TableCommitment::try_from_columns_with_offset(table_diff_type.inner_table(), 4, &())
.unwrap();
assert!(matches!(
table_commitment.try_add(table_commitment_diff_type),
Err(TableCommitmentArithmeticError::ColumnMismatch { .. })
));
}
#[test]
fn we_cannot_add_noncontiguous_table_commitments() {
let base_table: OwnedTable<TestScalar> = owned_table([
bigint("column_a", [1, 2, 3, 4]),
varchar("column_b", ["Lorem", "ipsum", "dolor", "sit"]),
]);
let table_commitment = TableCommitment::<NaiveCommitment>::try_from_columns_with_offset(
base_table.inner_table(),
5,
&(),
)
.unwrap();
let high_disjoint_table_commitment =
TableCommitment::try_from_columns_with_offset(base_table.inner_table(), 10, &())
.unwrap();
assert!(matches!(
table_commitment
.clone()
.try_add(high_disjoint_table_commitment),
Err(TableCommitmentArithmeticError::NonContiguous)
));
let high_overlapping_table_commitment =
TableCommitment::try_from_columns_with_offset(base_table.inner_table(), 7, &())
.unwrap();
assert!(matches!(
table_commitment
.clone()
.try_add(high_overlapping_table_commitment),
Err(TableCommitmentArithmeticError::NonContiguous)
));
let equal_range_table_commitment =
TableCommitment::try_from_columns_with_offset(base_table.inner_table(), 5, &())
.unwrap();
assert!(matches!(
table_commitment
.clone()
.try_add(equal_range_table_commitment),
Err(TableCommitmentArithmeticError::NonContiguous)
));
let low_overlapping_table_commitment =
TableCommitment::try_from_columns_with_offset(base_table.inner_table(), 3, &())
.unwrap();
assert!(matches!(
table_commitment
.clone()
.try_add(low_overlapping_table_commitment),
Err(TableCommitmentArithmeticError::NonContiguous)
));
let low_disjoint_table_commitment =
TableCommitment::try_from_columns_with_offset(base_table.inner_table(), 0, &())
.unwrap();
assert!(matches!(
table_commitment
.clone()
.try_add(low_disjoint_table_commitment),
Err(TableCommitmentArithmeticError::NonContiguous)
));
}
#[test]
fn we_can_sub_table_commitments() {
let bigint_id: Identifier = "bigint_column".parse().unwrap();
let bigint_data = [1i64, 5, -5, 0, 10];
let varchar_id: Identifier = "varchar_column".parse().unwrap();
let varchar_data = ["Lorem", "ipsum", "dolor", "sit", "amet"];
let scalar_id: Identifier = "scalar_column".parse().unwrap();
let scalar_data = [1000, 2000, 3000, -1000, 0];
let columns_low: OwnedTable<TestScalar> = owned_table([
bigint(bigint_id, bigint_data[..2].to_vec()),
varchar(varchar_id, varchar_data[..2].to_vec()),
scalar(scalar_id, scalar_data[..2].to_vec()),
]);
let table_commitment_low =
TableCommitment::<NaiveCommitment>::try_from_columns_with_offset(
columns_low.inner_table(),
0,
&(),
)
.unwrap();
let columns_high: OwnedTable<TestScalar> = owned_table([
bigint(bigint_id, bigint_data[2..].to_vec()),
varchar(varchar_id, varchar_data[2..].to_vec()),
scalar(scalar_id, scalar_data[2..].to_vec()),
]);
let table_commitment_high =
TableCommitment::try_from_columns_with_offset(columns_high.inner_table(), 2, &())
.unwrap();
let columns_all: OwnedTable<TestScalar> = owned_table([
bigint(bigint_id, bigint_data),
varchar(varchar_id, varchar_data),
scalar(scalar_id, scalar_data),
]);
let table_commitment_all =
TableCommitment::try_from_columns_with_offset(columns_all.inner_table(), 0, &())
.unwrap();
let high_difference = table_commitment_all
.clone()
.try_sub(table_commitment_low.clone())
.unwrap();
assert_eq!(
high_difference.column_commitments().commitments(),
table_commitment_high.column_commitments().commitments()
);
assert_eq!(high_difference.range(), table_commitment_high.range());
let low_difference = table_commitment_all.try_sub(table_commitment_high).unwrap();
assert_eq!(
low_difference.column_commitments().commitments(),
table_commitment_low.column_commitments().commitments()
);
assert_eq!(low_difference.range(), table_commitment_low.range());
}
#[test]
fn we_cannot_sub_mismatched_table_commitments() {
let base_table: OwnedTable<TestScalar> = owned_table([
bigint("column_a", [1, 2, 3, 4]),
varchar("column_b", ["Lorem", "ipsum", "dolor", "sit"]),
]);
let table_commitment = TableCommitment::<NaiveCommitment>::try_from_columns_with_offset(
base_table.inner_table(),
0,
&(),
)
.unwrap();
let table_diff_type: OwnedTable<TestScalar> = owned_table([
varchar("column_a", ["1", "2"]),
varchar("column_b", ["Lorem", "ipsum"]),
]);
let table_commitment_diff_type =
TableCommitment::try_from_columns_with_offset(table_diff_type.inner_table(), 0, &())
.unwrap();
assert!(matches!(
table_commitment.try_sub(table_commitment_diff_type),
Err(TableCommitmentArithmeticError::ColumnMismatch { .. })
));
}
#[test]
fn we_cannot_sub_noncontiguous_table_commitments() {
let bigint_id: Identifier = "bigint_column".parse().unwrap();
let bigint_data = [1i64, 5, -5, 0, 10];
let varchar_id: Identifier = "varchar_column".parse().unwrap();
let varchar_data = ["Lorem", "ipsum", "dolor", "sit", "amet"];
let scalar_id: Identifier = "scalar_column".parse().unwrap();
let scalar_data = [1000, 2000, 3000, -1000, 0];
let columns_minuend: OwnedTable<TestScalar> = owned_table([
bigint(bigint_id, bigint_data[..].to_vec()),
varchar(varchar_id, varchar_data[..].to_vec()),
scalar(scalar_id, scalar_data[..].to_vec()),
]);
let columns_subtrahend: OwnedTable<TestScalar> = owned_table([
bigint(bigint_id, bigint_data[..2].to_vec()),
varchar(varchar_id, varchar_data[..2].to_vec()),
scalar(scalar_id, scalar_data[..2].to_vec()),
]);
let minuend_table_commitment =
TableCommitment::<NaiveCommitment>::try_from_columns_with_offset(
columns_minuend.inner_table(),
4,
&(),
)
.unwrap();
let high_contiguous_table_commitment =
TableCommitment::try_from_columns_with_offset(columns_subtrahend.inner_table(), 9, &())
.unwrap();
assert!(matches!(
minuend_table_commitment
.clone()
.try_sub(high_contiguous_table_commitment),
Err(TableCommitmentArithmeticError::NonContiguous)
));
let high_overlapping_table_commitment =
TableCommitment::try_from_columns_with_offset(columns_subtrahend.inner_table(), 6, &())
.unwrap();
assert!(matches!(
minuend_table_commitment
.clone()
.try_sub(high_overlapping_table_commitment),
Err(TableCommitmentArithmeticError::NonContiguous)
));
let low_overlapping_table_commitment =
TableCommitment::try_from_columns_with_offset(columns_subtrahend.inner_table(), 3, &())
.unwrap();
assert!(matches!(
minuend_table_commitment
.clone()
.try_sub(low_overlapping_table_commitment),
Err(TableCommitmentArithmeticError::NonContiguous)
));
let low_contiguous_table_commitment =
TableCommitment::try_from_columns_with_offset(columns_subtrahend.inner_table(), 2, &())
.unwrap();
assert!(matches!(
minuend_table_commitment
.clone()
.try_sub(low_contiguous_table_commitment),
Err(TableCommitmentArithmeticError::NonContiguous)
));
}
#[test]
fn we_cannot_sub_commitments_with_negative_difference() {
let bigint_id: Identifier = "bigint_column".parse().unwrap();
let bigint_data = [1i64, 5, -5, 0, 10];
let varchar_id: Identifier = "varchar_column".parse().unwrap();
let varchar_data = ["Lorem", "ipsum", "dolor", "sit", "amet"];
let scalar_id: Identifier = "scalar_column".parse().unwrap();
let scalar_data = [1000, 2000, 3000, -1000, 0];
let columns_low: OwnedTable<TestScalar> = owned_table([
bigint(bigint_id, bigint_data[..2].to_vec()),
varchar(varchar_id, varchar_data[..2].to_vec()),
scalar(scalar_id, scalar_data[..2].to_vec()),
]);
let table_commitment_low =
TableCommitment::<NaiveCommitment>::try_from_columns_with_offset(
columns_low.inner_table(),
0,
&(),
)
.unwrap();
let columns_high: OwnedTable<TestScalar> = owned_table([
bigint(bigint_id, bigint_data[2..].to_vec()),
varchar(varchar_id, varchar_data[2..].to_vec()),
scalar(scalar_id, scalar_data[2..].to_vec()),
]);
let table_commitment_high =
TableCommitment::try_from_columns_with_offset(columns_high.inner_table(), 2, &())
.unwrap();
let columns_all: OwnedTable<TestScalar> = owned_table([
bigint(bigint_id, bigint_data),
varchar(varchar_id, varchar_data),
scalar(scalar_id, scalar_data),
]);
let table_commitment_all =
TableCommitment::try_from_columns_with_offset(columns_all.inner_table(), 0, &())
.unwrap();
let try_negative_high_difference_result =
table_commitment_low.try_sub(table_commitment_all.clone());
assert!(matches!(
try_negative_high_difference_result,
Err(TableCommitmentArithmeticError::NegativeRange { .. })
));
let try_negative_low_difference_result =
table_commitment_high.try_sub(table_commitment_all);
assert!(matches!(
try_negative_low_difference_result,
Err(TableCommitmentArithmeticError::NegativeRange { .. })
));
}
#[test]
fn we_can_create_and_append_table_commitments_with_record_batchs() {
let batch = record_batch!(
"a" => [1i64, 2, 3],
"b" => ["1", "2", "3"],
);
let b_scals = ["1".into(), "2".into(), "3".into()];
let columns = [
(
&"a".parse().unwrap(),
&Column::<TestScalar>::BigInt(&[1, 2, 3]),
),
(
&"b".parse().unwrap(),
&Column::<TestScalar>::VarChar((&["1", "2", "3"], &b_scals)),
),
];
let mut expected_commitment =
TableCommitment::<NaiveCommitment>::try_from_columns_with_offset(columns, 0, &())
.unwrap();
let mut commitment =
TableCommitment::<NaiveCommitment>::try_from_record_batch(&batch, &()).unwrap();
assert_eq!(commitment, expected_commitment);
let batch2 = record_batch!(
"a" => [4i64, 5, 6],
"b" => ["4", "5", "6"],
);
let b_scals2 = ["4".into(), "5".into(), "6".into()];
let columns2 = [
(
&"a".parse().unwrap(),
&Column::<TestScalar>::BigInt(&[4, 5, 6]),
),
(
&"b".parse().unwrap(),
&Column::<TestScalar>::VarChar((&["4", "5", "6"], &b_scals2)),
),
];
expected_commitment.try_append_rows(columns2, &()).unwrap();
commitment.try_append_record_batch(&batch2, &()).unwrap();
assert_eq!(commitment, expected_commitment);
}
}