use {
crate::crds_data::sanitize_wallclock,
serde::{Deserialize, Serialize},
solana_clock::Slot,
solana_pubkey::Pubkey,
solana_sanitize::{Sanitize, SanitizeError},
std::num::TryFromIntError,
thiserror::Error,
};
#[cfg(feature = "duplicate-shred-rocksdb")]
use {
itertools::Itertools,
solana_ledger::{
blockstore::BlockstoreError,
blockstore_meta::{DuplicateSlotProof, ErasureMeta},
shred::{self, Shred, ShredType},
},
std::{
collections::{hash_map::Entry, HashMap},
convert::TryFrom,
},
};
#[cfg(feature = "duplicate-shred-rocksdb")]
const DUPLICATE_SHRED_HEADER_SIZE: usize = 63;
pub(crate) type DuplicateShredIndex = u16;
pub(crate) const MAX_DUPLICATE_SHREDS: DuplicateShredIndex = 512;
#[derive(Clone, Debug, PartialEq, Eq, Deserialize, Serialize)]
pub struct DuplicateShred {
pub(crate) from: Pubkey,
pub(crate) wallclock: u64,
pub(crate) slot: Slot,
_unused: u32,
_unused_shred_type: u8,
num_chunks: u8,
chunk_index: u8,
#[serde(with = "serde_bytes")]
chunk: Vec<u8>,
}
impl DuplicateShred {
#[inline]
#[allow(dead_code)]
pub(crate) fn num_chunks(&self) -> u8 {
self.num_chunks
}
#[inline]
#[allow(dead_code)]
pub(crate) fn chunk_index(&self) -> u8 {
self.chunk_index
}
}
#[derive(Debug, Error)]
pub enum Error {
#[error("block store save error")]
#[cfg(feature = "duplicate-shred-rocksdb")]
BlockstoreInsertFailed(#[from] BlockstoreError),
#[error("data chunk mismatch")]
DataChunkMismatch,
#[error("unable to send duplicate slot to state machine")]
DuplicateSlotSenderFailure,
#[error("invalid chunk_index: {chunk_index}, num_chunks: {num_chunks}")]
InvalidChunkIndex { chunk_index: u8, num_chunks: u8 },
#[error("invalid duplicate shreds")]
InvalidDuplicateShreds,
#[error("invalid duplicate slot proof")]
InvalidDuplicateSlotProof,
#[error("invalid erasure meta conflict")]
InvalidErasureMetaConflict,
#[error("invalid last index conflict")]
InvalidLastIndexConflict,
#[error("invalid shred version: {0}")]
#[cfg(feature = "duplicate-shred-rocksdb")]
InvalidShredVersion(u16),
#[error("invalid signature")]
#[cfg(feature = "duplicate-shred-rocksdb")]
InvalidSignature,
#[error("invalid size limit")]
InvalidSizeLimit,
#[error(transparent)]
#[cfg(feature = "duplicate-shred-rocksdb")]
InvalidShred(#[from] shred::Error),
#[error("number of chunks mismatch")]
NumChunksMismatch,
#[error("missing data chunk")]
MissingDataChunk,
#[error("(de)serialization error")]
SerializationError(#[from] bincode::Error),
#[error("shred type mismatch")]
ShredTypeMismatch,
#[error("slot mismatch")]
SlotMismatch,
#[error("type conversion error")]
TryFromIntError(#[from] TryFromIntError),
#[error("unknown slot leader: {0}")]
#[cfg(feature = "duplicate-shred-rocksdb")]
UnknownSlotLeader(Slot),
}
impl Error {
#[allow(dead_code)]
pub(crate) fn is_non_critical(&self) -> bool {
match self {
Self::SlotMismatch
| Self::ShredTypeMismatch
| Self::InvalidDuplicateShreds
| Self::InvalidLastIndexConflict
| Self::InvalidErasureMetaConflict => true,
Self::DataChunkMismatch
| Self::DuplicateSlotSenderFailure
| Self::InvalidChunkIndex { .. }
| Self::InvalidDuplicateSlotProof
| Self::InvalidSizeLimit
| Self::NumChunksMismatch
| Self::MissingDataChunk
| Self::SerializationError(_)
| Self::TryFromIntError(_) => false,
#[cfg(feature = "duplicate-shred-rocksdb")]
Self::InvalidShredVersion(_)
| Self::InvalidSignature
| Self::BlockstoreInsertFailed(_)
| Self::InvalidShred(_)
| Self::UnknownSlotLeader(_) => false,
}
}
}
#[cfg(feature = "duplicate-shred-rocksdb")]
fn check_shreds<F>(
leader_schedule: Option<F>,
shred1: &Shred,
shred2: &Shred,
shred_version: u16,
) -> Result<(), Error>
where
F: FnOnce(Slot) -> Option<Pubkey>,
{
if shred1.slot() != shred2.slot() {
return Err(Error::SlotMismatch);
}
if shred1.version() != shred_version {
return Err(Error::InvalidShredVersion(shred1.version()));
}
if shred2.version() != shred_version {
return Err(Error::InvalidShredVersion(shred2.version()));
}
if let Some(leader_schedule) = leader_schedule {
let slot_leader =
leader_schedule(shred1.slot()).ok_or(Error::UnknownSlotLeader(shred1.slot()))?;
if !shred1.verify(&slot_leader) || !shred2.verify(&slot_leader) {
return Err(Error::InvalidSignature);
}
}
if shred1.fec_set_index() == shred2.fec_set_index()
&& shred1.merkle_root().ok() != shred2.merkle_root().ok()
{
return Ok(());
}
if shred1.shred_type() != shred2.shred_type() {
return Err(Error::ShredTypeMismatch);
}
if shred1.index() == shred2.index() {
if shred1.is_shred_duplicate(shred2) {
return Ok(());
}
return Err(Error::InvalidDuplicateShreds);
}
if shred1.shred_type() == ShredType::Data {
if shred1.last_in_slot() && shred2.index() > shred1.index() {
return Ok(());
}
if shred2.last_in_slot() && shred1.index() > shred2.index() {
return Ok(());
}
return Err(Error::InvalidLastIndexConflict);
}
if shred1.fec_set_index() == shred2.fec_set_index()
&& !ErasureMeta::check_erasure_consistency(shred1, shred2)
{
return Ok(());
}
Err(Error::InvalidErasureMetaConflict)
}
#[cfg(feature = "duplicate-shred-rocksdb")]
pub(crate) fn from_shred<T: AsRef<[u8]>, F>(
shred: Shred,
self_pubkey: Pubkey, other_payload: T,
leader_schedule: Option<F>,
wallclock: u64,
max_size: usize, shred_version: u16,
) -> Result<impl Iterator<Item = DuplicateShred>, Error>
where
F: FnOnce(Slot) -> Option<Pubkey>,
shred::Payload: From<T>,
{
if shred.payload().as_ref() == other_payload.as_ref() {
return Err(Error::InvalidDuplicateShreds);
}
let other_shred = Shred::new_from_serialized_shred(other_payload)?;
check_shreds(leader_schedule, &shred, &other_shred, shred_version)?;
let slot = shred.slot();
let proof = DuplicateSlotProof {
shred1: shred.into_payload(),
shred2: other_shred.into_payload(),
};
let data = bincode::serialize(&proof)?;
let chunk_size = if DUPLICATE_SHRED_HEADER_SIZE < max_size {
max_size - DUPLICATE_SHRED_HEADER_SIZE
} else {
return Err(Error::InvalidSizeLimit);
};
let chunks: Vec<_> = data.chunks(chunk_size).map(Vec::from).collect();
let num_chunks = u8::try_from(chunks.len())?;
let chunks = chunks
.into_iter()
.enumerate()
.map(move |(i, chunk)| DuplicateShred {
from: self_pubkey,
wallclock,
slot,
num_chunks,
chunk_index: i as u8,
chunk,
_unused: 0,
_unused_shred_type: ShredType::Code.into(),
});
Ok(chunks)
}
#[cfg(feature = "duplicate-shred-rocksdb")]
fn check_chunk(slot: Slot, num_chunks: u8) -> impl Fn(&DuplicateShred) -> Result<(), Error> {
move |dup| {
if dup.slot != slot {
Err(Error::SlotMismatch)
} else if dup.num_chunks != num_chunks {
Err(Error::NumChunksMismatch)
} else if dup.chunk_index >= num_chunks {
Err(Error::InvalidChunkIndex {
chunk_index: dup.chunk_index,
num_chunks,
})
} else {
Ok(())
}
}
}
#[cfg(feature = "duplicate-shred-rocksdb")]
pub(crate) fn into_shreds(
slot_leader: &Pubkey,
chunks: impl IntoIterator<Item = DuplicateShred>,
shred_version: u16,
) -> Result<(Shred, Shred), Error> {
let mut chunks = chunks.into_iter();
let DuplicateShred {
slot,
num_chunks,
chunk_index,
chunk,
..
} = chunks.next().ok_or(Error::InvalidDuplicateShreds)?;
let check_chunk = check_chunk(slot, num_chunks);
let mut data = HashMap::new();
data.insert(chunk_index, chunk);
for chunk in chunks {
check_chunk(&chunk)?;
match data.entry(chunk.chunk_index) {
Entry::Vacant(entry) => {
entry.insert(chunk.chunk);
}
Entry::Occupied(entry) => {
if *entry.get() != chunk.chunk {
return Err(Error::DataChunkMismatch);
}
}
}
}
if data.len() != num_chunks as usize {
return Err(Error::MissingDataChunk);
}
let data = (0..num_chunks).map(|k| data.remove(&k).unwrap()).concat();
let proof: DuplicateSlotProof = bincode::deserialize(&data)?;
if proof.shred1 == proof.shred2 {
return Err(Error::InvalidDuplicateSlotProof);
}
let shred1 = Shred::new_from_serialized_shred(proof.shred1)?;
let shred2 = Shred::new_from_serialized_shred(proof.shred2)?;
if shred1.slot() != slot || shred2.slot() != slot {
Err(Error::SlotMismatch)
} else {
check_shreds(
Some(|_| Some(slot_leader).copied()),
&shred1,
&shred2,
shred_version,
)?;
Ok((shred1, shred2))
}
}
impl Sanitize for DuplicateShred {
fn sanitize(&self) -> Result<(), SanitizeError> {
sanitize_wallclock(self.wallclock)?;
if self.chunk_index >= self.num_chunks {
return Err(SanitizeError::IndexOutOfBounds);
}
self.from.sanitize()
}
}
#[cfg(all(test, feature = "duplicate-shred-rocksdb"))]
pub(crate) mod tests {
use {
super::*,
rand::Rng,
solana_entry::entry::Entry,
solana_hash::Hash,
solana_keypair::Keypair,
solana_ledger::shred::{ProcessShredsStats, ReedSolomonCache, Shredder},
solana_signature::Signature,
solana_signer::Signer,
solana_system_transaction::transfer,
std::sync::Arc,
};
#[test]
fn test_duplicate_shred_header_size() {
let dup = DuplicateShred {
from: Pubkey::new_unique(),
wallclock: u64::MAX,
slot: Slot::MAX,
_unused_shred_type: ShredType::Data.into(),
num_chunks: u8::MAX,
chunk_index: u8::MAX,
chunk: Vec::default(),
_unused: 0,
};
assert_eq!(
bincode::serialize(&dup).unwrap().len(),
DUPLICATE_SHRED_HEADER_SIZE
);
assert_eq!(
bincode::serialized_size(&dup).unwrap(),
DUPLICATE_SHRED_HEADER_SIZE as u64
);
}
pub(crate) fn new_rand_shred<R: Rng>(
rng: &mut R,
next_shred_index: u32,
shredder: &Shredder,
keypair: &Keypair,
) -> Shred {
let (mut data_shreds, _) = new_rand_shreds(
rng,
next_shred_index,
next_shred_index,
5,
shredder,
keypair,
true,
);
data_shreds.pop().unwrap()
}
fn new_rand_data_shred<R: Rng>(
rng: &mut R,
next_shred_index: u32,
shredder: &Shredder,
keypair: &Keypair,
is_last_in_slot: bool,
) -> Shred {
let (mut data_shreds, _) = new_rand_shreds(
rng,
next_shred_index,
next_shred_index,
5,
shredder,
keypair,
is_last_in_slot,
);
data_shreds.pop().unwrap()
}
fn new_rand_coding_shreds<R: Rng>(
rng: &mut R,
next_shred_index: u32,
num_entries: usize,
shredder: &Shredder,
keypair: &Keypair,
) -> Vec<Shred> {
let (_, coding_shreds) = new_rand_shreds(
rng,
next_shred_index,
next_shred_index,
num_entries,
shredder,
keypair,
true,
);
coding_shreds
}
fn new_rand_shreds<R: Rng>(
rng: &mut R,
next_shred_index: u32,
next_code_index: u32,
num_entries: usize,
shredder: &Shredder,
keypair: &Keypair,
is_last_in_slot: bool,
) -> (Vec<Shred>, Vec<Shred>) {
let entries: Vec<_> = std::iter::repeat_with(|| {
let tx = transfer(
&Keypair::new(), &Pubkey::new_unique(), rng.gen(), Hash::new_unique(), );
Entry::new(
&Hash::new_unique(), 1, vec![tx], )
})
.take(num_entries)
.collect();
shredder.entries_to_merkle_shreds_for_tests(
keypair,
&entries,
is_last_in_slot,
Hash::new_from_array(rng.gen()),
next_shred_index,
next_code_index, &ReedSolomonCache::default(),
&mut ProcessShredsStats::default(),
)
}
fn from_shred_bypass_checks(
shred: Shred,
self_pubkey: Pubkey, other_shred: Shred,
wallclock: u64,
max_size: usize, ) -> Result<impl Iterator<Item = DuplicateShred>, Error> {
let slot = shred.slot();
let proof = DuplicateSlotProof {
shred1: shred.into_payload(),
shred2: other_shred.into_payload(),
};
let data = bincode::serialize(&proof)?;
let chunk_size = max_size - DUPLICATE_SHRED_HEADER_SIZE;
let chunks: Vec<_> = data.chunks(chunk_size).map(Vec::from).collect();
let num_chunks = u8::try_from(chunks.len())?;
let chunks = chunks
.into_iter()
.enumerate()
.map(move |(i, chunk)| DuplicateShred {
from: self_pubkey,
wallclock,
slot,
num_chunks,
chunk_index: i as u8,
chunk,
_unused: 0,
_unused_shred_type: ShredType::Code.into(),
});
Ok(chunks)
}
#[test]
fn test_duplicate_shred_round_trip() {
let mut rng = rand::thread_rng();
let leader = Arc::new(Keypair::new());
let (slot, parent_slot, reference_tick, version) = (53084024, 53084023, 0, 0);
let shredder = Shredder::new(slot, parent_slot, reference_tick, version).unwrap();
let next_shred_index = rng.gen_range(0..32_000);
let shred1 = new_rand_data_shred(&mut rng, next_shred_index, &shredder, &leader, true);
let shred2 = new_rand_data_shred(&mut rng, next_shred_index, &shredder, &leader, true);
let leader_schedule = |s| {
if s == slot {
Some(leader.pubkey())
} else {
None
}
};
let chunks: Vec<_> = from_shred(
shred1.clone(),
Pubkey::new_unique(), shred2.payload().clone(),
Some(leader_schedule),
rng.gen(), 512, version,
)
.unwrap()
.collect();
assert!(chunks.len() > 4);
let (shred3, shred4) = into_shreds(&leader.pubkey(), chunks, version).unwrap();
assert_eq!(shred1, shred3);
assert_eq!(shred2, shred4);
}
#[test]
fn test_duplicate_shred_invalid() {
let mut rng = rand::thread_rng();
let leader = Arc::new(Keypair::new());
let (slot, parent_slot, reference_tick, version) = (53084024, 53084023, 0, 0);
let shredder = Shredder::new(slot, parent_slot, reference_tick, version).unwrap();
let next_shred_index = rng.gen_range(0..32_000);
let leader_schedule = |s| {
if s == slot {
Some(leader.pubkey())
} else {
None
}
};
let data_shred = new_rand_data_shred(&mut rng, next_shred_index, &shredder, &leader, true);
let coding_shreds =
new_rand_coding_shreds(&mut rng, next_shred_index, 10, &shredder, &leader);
let test_cases = vec![
(data_shred.clone(), data_shred),
(coding_shreds[0].clone(), coding_shreds[0].clone()),
];
for (shred1, shred2) in test_cases.into_iter() {
assert_matches!(
from_shred(
shred1.clone(),
Pubkey::new_unique(), shred2.payload().clone(),
Some(leader_schedule),
rng.gen(), 512, version,
)
.err()
.unwrap(),
Error::InvalidDuplicateShreds
);
let chunks: Vec<_> = from_shred_bypass_checks(
shred1.clone(),
Pubkey::new_unique(), shred2.clone(),
rng.gen(), 512, )
.unwrap()
.collect();
assert!(chunks.len() > 4);
assert_matches!(
into_shreds(&leader.pubkey(), chunks, version)
.err()
.unwrap(),
Error::InvalidDuplicateSlotProof
);
}
}
#[test]
fn test_latest_index_conflict_round_trip() {
let mut rng = rand::thread_rng();
let leader = Arc::new(Keypair::new());
let (slot, parent_slot, reference_tick, version) = (53084024, 53084023, 0, 0);
let shredder = Shredder::new(slot, parent_slot, reference_tick, version).unwrap();
let next_shred_index = rng.gen_range(0..31_000);
let leader_schedule = |s| {
if s == slot {
Some(leader.pubkey())
} else {
None
}
};
let test_cases = [
(
new_rand_data_shred(&mut rng, next_shred_index, &shredder, &leader, true),
new_rand_data_shred(
&mut rng,
next_shred_index + 30,
&shredder,
&leader,
false,
),
),
(
new_rand_data_shred(&mut rng, next_shred_index + 100, &shredder, &leader, true),
new_rand_data_shred(&mut rng, next_shred_index, &shredder, &leader, true),
),
];
for (shred1, shred2) in test_cases.iter().flat_map(|(a, b)| [(a, b), (b, a)]) {
let chunks: Vec<_> = from_shred(
shred1.clone(),
Pubkey::new_unique(), shred2.payload().clone(),
Some(leader_schedule),
rng.gen(), 512, version,
)
.unwrap()
.collect();
assert!(chunks.len() > 4);
let (shred3, shred4) = into_shreds(&leader.pubkey(), chunks, version).unwrap();
assert_eq!(shred1, &shred3);
assert_eq!(shred2, &shred4);
}
}
#[test]
fn test_latest_index_conflict_invalid() {
let mut rng = rand::thread_rng();
let leader = Arc::new(Keypair::new());
let (slot, parent_slot, reference_tick, version) = (53084024, 53084023, 0, 0);
let shredder = Shredder::new(slot, parent_slot, reference_tick, version).unwrap();
let next_shred_index = rng.gen_range(0..31_000);
let leader_schedule = |s| {
if s == slot {
Some(leader.pubkey())
} else {
None
}
};
let test_cases = vec![
(
new_rand_data_shred(&mut rng, next_shred_index, &shredder, &leader, false),
new_rand_data_shred(&mut rng, next_shred_index + 1, &shredder, &leader, true),
),
(
new_rand_data_shred(&mut rng, next_shred_index + 1, &shredder, &leader, true),
new_rand_data_shred(&mut rng, next_shred_index, &shredder, &leader, false),
),
(
new_rand_data_shred(&mut rng, next_shred_index + 100, &shredder, &leader, false),
new_rand_data_shred(&mut rng, next_shred_index, &shredder, &leader, false),
),
(
new_rand_data_shred(&mut rng, next_shred_index, &shredder, &leader, false),
new_rand_data_shred(&mut rng, next_shred_index + 100, &shredder, &leader, false),
),
];
for (shred1, shred2) in test_cases.into_iter() {
assert_matches!(
from_shred(
shred1.clone(),
Pubkey::new_unique(), shred2.payload().clone(),
Some(leader_schedule),
rng.gen(), 512, version,
)
.err()
.unwrap(),
Error::InvalidLastIndexConflict
);
let chunks: Vec<_> = from_shred_bypass_checks(
shred1.clone(),
Pubkey::new_unique(), shred2.clone(),
rng.gen(), 512, )
.unwrap()
.collect();
assert!(chunks.len() > 4);
assert_matches!(
into_shreds(&leader.pubkey(), chunks, version)
.err()
.unwrap(),
Error::InvalidLastIndexConflict
);
}
}
#[test]
fn test_erasure_meta_conflict_round_trip() {
let mut rng = rand::thread_rng();
let leader = Arc::new(Keypair::new());
let (slot, parent_slot, reference_tick, version) = (53084024, 53084023, 0, 0);
let shredder = Shredder::new(slot, parent_slot, reference_tick, version).unwrap();
let next_shred_index = rng.gen_range(0..31_000);
let leader_schedule = |s| {
if s == slot {
Some(leader.pubkey())
} else {
None
}
};
let coding_shreds =
new_rand_coding_shreds(&mut rng, next_shred_index, 10, &shredder, &leader);
let coding_shreds_bigger =
new_rand_coding_shreds(&mut rng, next_shred_index, 13, &shredder, &leader);
let coding_shreds_smaller =
new_rand_coding_shreds(&mut rng, next_shred_index, 7, &shredder, &leader);
let test_cases = vec![
(coding_shreds[0].clone(), coding_shreds_bigger[1].clone()),
(coding_shreds[0].clone(), coding_shreds_smaller[1].clone()),
];
for (shred1, shred2) in test_cases.into_iter() {
let chunks: Vec<_> = from_shred(
shred1.clone(),
Pubkey::new_unique(), shred2.payload().clone(),
Some(leader_schedule),
rng.gen(), 512, version,
)
.unwrap()
.collect();
assert!(chunks.len() > 4);
let (shred3, shred4) = into_shreds(&leader.pubkey(), chunks, version).unwrap();
assert_eq!(shred1, shred3);
assert_eq!(shred2, shred4);
}
}
#[test]
fn test_erasure_meta_conflict_invalid() {
let mut rng = rand::thread_rng();
let leader = Arc::new(Keypair::new());
let (slot, parent_slot, reference_tick, version) = (53084024, 53084023, 0, 0);
let shredder = Shredder::new(slot, parent_slot, reference_tick, version).unwrap();
let next_shred_index = rng.gen_range(0..31_000);
let leader_schedule = |s| {
if s == slot {
Some(leader.pubkey())
} else {
None
}
};
let coding_shreds =
new_rand_coding_shreds(&mut rng, next_shred_index, 10, &shredder, &leader);
let coding_shreds_different_fec =
new_rand_coding_shreds(&mut rng, next_shred_index + 1, 10, &shredder, &leader);
let coding_shreds_different_fec_and_size =
new_rand_coding_shreds(&mut rng, next_shred_index + 1, 13, &shredder, &leader);
let test_cases = vec![
(
coding_shreds[0].clone(),
coding_shreds_different_fec[1].clone(),
),
(
coding_shreds[0].clone(),
coding_shreds_different_fec_and_size[1].clone(),
),
(coding_shreds[0].clone(), coding_shreds[1].clone()),
(
coding_shreds_different_fec[0].clone(),
coding_shreds_different_fec[1].clone(),
),
(
coding_shreds_different_fec_and_size[0].clone(),
coding_shreds_different_fec_and_size[1].clone(),
),
];
for (shred1, shred2) in test_cases.into_iter() {
assert_matches!(
from_shred(
shred1.clone(),
Pubkey::new_unique(), shred2.payload().clone(),
Some(leader_schedule),
rng.gen(), 512, version,
)
.err()
.unwrap(),
Error::InvalidErasureMetaConflict
);
let chunks: Vec<_> = from_shred_bypass_checks(
shred1.clone(),
Pubkey::new_unique(), shred2.clone(),
rng.gen(), 512, )
.unwrap()
.collect();
assert!(chunks.len() > 4);
assert_matches!(
into_shreds(&leader.pubkey(), chunks, version)
.err()
.unwrap(),
Error::InvalidErasureMetaConflict
);
}
}
#[test]
fn test_merkle_root_conflict_round_trip() {
let mut rng = rand::thread_rng();
let leader = Arc::new(Keypair::new());
let (slot, parent_slot, reference_tick, version) = (53084024, 53084023, 0, 0);
let shredder = Shredder::new(slot, parent_slot, reference_tick, version).unwrap();
let next_shred_index = rng.gen_range(0..31_000);
let leader_schedule = |s| {
if s == slot {
Some(leader.pubkey())
} else {
None
}
};
let (data_shreds, coding_shreds) = new_rand_shreds(
&mut rng,
next_shred_index,
next_shred_index,
10,
&shredder,
&leader,
false,
);
let (diff_data_shreds, diff_coding_shreds) = new_rand_shreds(
&mut rng,
next_shred_index,
next_shred_index,
10,
&shredder,
&leader,
false,
);
let test_cases = vec![
(data_shreds[0].clone(), diff_data_shreds[1].clone()),
(coding_shreds[0].clone(), diff_coding_shreds[1].clone()),
(data_shreds[0].clone(), diff_coding_shreds[0].clone()),
(coding_shreds[0].clone(), diff_data_shreds[0].clone()),
];
for (shred1, shred2) in test_cases.into_iter() {
let chunks: Vec<_> = from_shred(
shred1.clone(),
Pubkey::new_unique(), shred2.payload().clone(),
Some(leader_schedule),
rng.gen(), 512, version,
)
.unwrap()
.collect();
assert!(chunks.len() > 4);
let (shred3, shred4) = into_shreds(&leader.pubkey(), chunks, version).unwrap();
assert_eq!(shred1, shred3);
assert_eq!(shred2, shred4);
}
}
#[test]
fn test_merkle_root_conflict_invalid() {
let mut rng = rand::thread_rng();
let leader = Arc::new(Keypair::new());
let (slot, parent_slot, reference_tick, version) = (53084024, 53084023, 0, 0);
let shredder = Shredder::new(slot, parent_slot, reference_tick, version).unwrap();
let next_shred_index = rng.gen_range(0..31_000);
let leader_schedule = |s| {
if s == slot {
Some(leader.pubkey())
} else {
None
}
};
let (data_shreds, coding_shreds) = new_rand_shreds(
&mut rng,
next_shred_index,
next_shred_index,
10,
&shredder,
&leader,
true,
);
let (next_data_shreds, next_coding_shreds) = new_rand_shreds(
&mut rng,
next_shred_index + 1,
next_shred_index + 1,
10,
&shredder,
&leader,
true,
);
let test_cases = vec![
(coding_shreds[0].clone(), data_shreds[0].clone()),
(data_shreds[0].clone(), coding_shreds[0].clone()),
(coding_shreds[0].clone(), next_data_shreds[0].clone()),
(next_coding_shreds[0].clone(), data_shreds[0].clone()),
(data_shreds[0].clone(), next_coding_shreds[0].clone()),
(next_data_shreds[0].clone(), coding_shreds[0].clone()),
];
for (shred1, shred2) in test_cases.into_iter() {
assert_matches!(
from_shred(
shred1.clone(),
Pubkey::new_unique(), shred2.payload().clone(),
Some(leader_schedule),
rng.gen(), 512, version,
)
.err()
.unwrap(),
Error::ShredTypeMismatch
);
let chunks: Vec<_> = from_shred_bypass_checks(
shred1.clone(),
Pubkey::new_unique(), shred2.clone(),
rng.gen(), 512, )
.unwrap()
.collect();
assert!(chunks.len() > 4);
assert_matches!(
into_shreds(&leader.pubkey(), chunks, version)
.err()
.unwrap(),
Error::ShredTypeMismatch
);
}
}
#[test]
fn test_shred_version() {
let mut rng = rand::thread_rng();
let leader = Arc::new(Keypair::new());
let (slot, parent_slot, reference_tick, version) = (53084024, 53084023, 0, 0);
let shredder = Shredder::new(slot, parent_slot, reference_tick, version).unwrap();
let next_shred_index = rng.gen_range(0..31_000);
let leader_schedule = |s| {
if s == slot {
Some(leader.pubkey())
} else {
None
}
};
let (data_shreds, coding_shreds) = new_rand_shreds(
&mut rng,
next_shred_index,
next_shred_index,
10,
&shredder,
&leader,
true,
);
let shredder = Shredder::new(slot, parent_slot, reference_tick, version + 1).unwrap();
let (wrong_data_shreds_1, wrong_coding_shreds_1) = new_rand_shreds(
&mut rng,
next_shred_index,
next_shred_index,
10,
&shredder,
&leader,
true,
);
let shredder = Shredder::new(slot, parent_slot, reference_tick, version + 2).unwrap();
let (wrong_data_shreds_2, wrong_coding_shreds_2) = new_rand_shreds(
&mut rng,
next_shred_index,
next_shred_index,
10,
&shredder,
&leader,
true,
);
let test_cases = vec![
(coding_shreds[0].clone(), wrong_coding_shreds_1[0].clone()),
(coding_shreds[0].clone(), wrong_data_shreds_1[0].clone()),
(data_shreds[0].clone(), wrong_coding_shreds_1[0].clone()),
(data_shreds[0].clone(), wrong_data_shreds_1[0].clone()),
(
wrong_coding_shreds_2[0].clone(),
wrong_coding_shreds_1[0].clone(),
),
(
wrong_coding_shreds_2[0].clone(),
wrong_data_shreds_1[0].clone(),
),
(
wrong_data_shreds_2[0].clone(),
wrong_coding_shreds_1[0].clone(),
),
(
wrong_data_shreds_2[0].clone(),
wrong_data_shreds_1[0].clone(),
),
];
for (shred1, shred2) in test_cases.into_iter() {
assert_matches!(
from_shred(
shred1.clone(),
Pubkey::new_unique(), shred2.payload().clone(),
Some(leader_schedule),
rng.gen(), 512, version,
)
.err()
.unwrap(),
Error::InvalidShredVersion(_)
);
let chunks: Vec<_> = from_shred_bypass_checks(
shred1.clone(),
Pubkey::new_unique(), shred2.clone(),
rng.gen(), 512, )
.unwrap()
.collect();
assert!(chunks.len() > 4);
assert_matches!(
into_shreds(&leader.pubkey(), chunks, version)
.err()
.unwrap(),
Error::InvalidShredVersion(_)
);
}
}
#[test]
fn test_retransmitter_signature_invalid() {
let mut rng = rand::thread_rng();
let leader = Arc::new(Keypair::new());
let (slot, parent_slot, reference_tick, version) = (53084024, 53084023, 0, 0);
let shredder = Shredder::new(slot, parent_slot, reference_tick, version).unwrap();
let next_shred_index = rng.gen_range(0..32_000);
let leader_schedule = |s| {
if s == slot {
Some(leader.pubkey())
} else {
None
}
};
let data_shred = new_rand_data_shred(&mut rng, next_shred_index, &shredder, &leader, true);
let coding_shred =
new_rand_coding_shreds(&mut rng, next_shred_index, 10, &shredder, &leader)[0].clone();
let mut data_shred_different_retransmitter_payload = data_shred.clone().into_payload();
shred::layout::set_retransmitter_signature(
&mut data_shred_different_retransmitter_payload.as_mut(),
&Signature::new_unique(),
)
.unwrap();
let data_shred_different_retransmitter =
Shred::new_from_serialized_shred(data_shred_different_retransmitter_payload).unwrap();
let mut coding_shred_different_retransmitter_payload = coding_shred.clone().into_payload();
shred::layout::set_retransmitter_signature(
&mut coding_shred_different_retransmitter_payload.as_mut(),
&Signature::new_unique(),
)
.unwrap();
let coding_shred_different_retransmitter =
Shred::new_from_serialized_shred(coding_shred_different_retransmitter_payload).unwrap();
let test_cases = [
(data_shred, data_shred_different_retransmitter),
(coding_shred, coding_shred_different_retransmitter),
];
for (shred1, shred2) in test_cases.iter().flat_map(|(a, b)| [(a, b), (b, a)]) {
assert_matches!(
from_shred(
shred1.clone(),
Pubkey::new_unique(), shred2.payload().clone(),
Some(leader_schedule),
rng.gen(), 512, version,
)
.err()
.unwrap(),
Error::InvalidDuplicateShreds
);
let chunks: Vec<_> = from_shred_bypass_checks(
shred1.clone(),
Pubkey::new_unique(), shred2.clone(),
rng.gen(), 512, )
.unwrap()
.collect();
assert!(chunks.len() > 4);
assert_matches!(
into_shreds(&leader.pubkey(), chunks, version)
.err()
.unwrap(),
Error::InvalidDuplicateShreds
);
}
}
}