1use codec::{Decode, Encode};
28use polkadot_node_primitives::{AvailableData, Proof};
29use polkadot_primitives::{BlakeTwo256, Hash as H256, HashT};
30use sp_core::Blake2Hasher;
31use sp_trie::{
32 trie_types::{TrieDBBuilder, TrieDBMutBuilderV0 as TrieDBMutBuilder},
33 LayoutV0, MemoryDB, Trie, TrieMut, EMPTY_PREFIX,
34};
35use thiserror::Error;
36
37use novelpoly::{CodeParams, WrappedShard};
38
39const MAX_VALIDATORS: usize = novelpoly::f2e16::FIELD_SIZE;
41
42#[derive(Debug, Clone, PartialEq, Error)]
44pub enum Error {
45 #[error("There are too many validators")]
47 TooManyValidators,
48 #[error("Expected at least 2 validators")]
50 NotEnoughValidators,
51 #[error("Validator count mismatches between encoding and decoding")]
53 WrongValidatorCount,
54 #[error("Not enough chunks to reconstruct message")]
56 NotEnoughChunks,
57 #[error("Too many chunks present")]
59 TooManyChunks,
60 #[error("Chunks are not uniform, mismatch in length or are zero sized")]
62 NonUniformChunks,
63 #[error("Uneven length is not valid for field GF(2^16)")]
65 UnevenLength,
66 #[error("Chunk is out of bounds: {chunk_index} not included in 0..{n_validators}")]
68 ChunkIndexOutOfBounds { chunk_index: usize, n_validators: usize },
69 #[error("Reconstructed payload invalid")]
71 BadPayload,
72 #[error("Unable to decode reconstructed payload: {0}")]
74 Decode(#[source] codec::Error),
75 #[error("Invalid branch proof")]
77 InvalidBranchProof,
78 #[error("Branch is out of bounds")]
80 BranchOutOfBounds,
81 #[error("An unknown error has appeared when reconstructing erasure code chunks")]
83 UnknownReconstruction,
84 #[error("An unknown error has appeared when deriving code parameters from validator count")]
86 UnknownCodeParam,
87}
88
89impl From<novelpoly::Error> for Error {
90 fn from(error: novelpoly::Error) -> Self {
91 match error {
92 novelpoly::Error::NeedMoreShards { .. } => Self::NotEnoughChunks,
93 novelpoly::Error::ParamterMustBePowerOf2 { .. } => Self::UnevenLength,
94 novelpoly::Error::WantedShardCountTooHigh(_) => Self::TooManyValidators,
95 novelpoly::Error::WantedShardCountTooLow(_) => Self::NotEnoughValidators,
96 novelpoly::Error::PayloadSizeIsZero { .. } => Self::BadPayload,
97 novelpoly::Error::InconsistentShardLengths { .. } => Self::NonUniformChunks,
98 _ => Self::UnknownReconstruction,
99 }
100 }
101}
102
103pub const fn recovery_threshold(n_validators: usize) -> Result<usize, Error> {
105 if n_validators > MAX_VALIDATORS {
106 return Err(Error::TooManyValidators)
107 }
108 if n_validators <= 1 {
109 return Err(Error::NotEnoughValidators)
110 }
111
112 let needed = n_validators.saturating_sub(1) / 3;
113 Ok(needed + 1)
114}
115
116pub fn systematic_recovery_threshold(n_validators: usize) -> Result<usize, Error> {
121 code_params(n_validators).map(|params| params.k())
122}
123
124fn code_params(n_validators: usize) -> Result<CodeParams, Error> {
125 let n_wanted = n_validators;
128 let k_wanted = recovery_threshold(n_wanted)?;
129
130 if n_wanted > MAX_VALIDATORS as usize {
131 return Err(Error::TooManyValidators)
132 }
133
134 CodeParams::derive_parameters(n_wanted, k_wanted).map_err(|e| match e {
135 novelpoly::Error::WantedShardCountTooHigh(_) => Error::TooManyValidators,
136 novelpoly::Error::WantedShardCountTooLow(_) => Error::NotEnoughValidators,
137 _ => Error::UnknownCodeParam,
138 })
139}
140
141pub fn reconstruct_from_systematic_v1(
146 n_validators: usize,
147 chunks: Vec<Vec<u8>>,
148) -> Result<AvailableData, Error> {
149 reconstruct_from_systematic(n_validators, chunks)
150}
151
152pub fn reconstruct_from_systematic<T: Decode>(
157 n_validators: usize,
158 chunks: Vec<Vec<u8>>,
159) -> Result<T, Error> {
160 let code_params = code_params(n_validators)?;
161 let k = code_params.k();
162
163 for chunk_data in chunks.iter().take(k) {
164 if chunk_data.len() % 2 != 0 {
165 return Err(Error::UnevenLength)
166 }
167 }
168
169 let bytes = code_params.make_encoder().reconstruct_from_systematic(
170 chunks.into_iter().take(k).map(|data| WrappedShard::new(data)).collect(),
171 )?;
172
173 Decode::decode(&mut &bytes[..]).map_err(|err| Error::Decode(err))
174}
175
176pub fn obtain_chunks_v1(n_validators: usize, data: &AvailableData) -> Result<Vec<Vec<u8>>, Error> {
180 obtain_chunks(n_validators, data)
181}
182
183pub fn obtain_chunks<T: Encode>(n_validators: usize, data: &T) -> Result<Vec<Vec<u8>>, Error> {
187 let params = code_params(n_validators)?;
188 let encoded = data.encode();
189
190 if encoded.is_empty() {
191 return Err(Error::BadPayload)
192 }
193
194 let shards = params
195 .make_encoder()
196 .encode::<WrappedShard>(&encoded[..])
197 .expect("Payload non-empty, shard sizes are uniform, and validator numbers checked; qed");
198
199 Ok(shards.into_iter().map(|w: WrappedShard| w.into_inner()).collect())
200}
201
202pub fn reconstruct_v1<'a, I: 'a>(n_validators: usize, chunks: I) -> Result<AvailableData, Error>
210where
211 I: IntoIterator<Item = (&'a [u8], usize)>,
212{
213 reconstruct(n_validators, chunks)
214}
215
216pub fn reconstruct<'a, I: 'a, T: Decode>(n_validators: usize, chunks: I) -> Result<T, Error>
224where
225 I: IntoIterator<Item = (&'a [u8], usize)>,
226{
227 let params = code_params(n_validators)?;
228 let mut received_shards: Vec<Option<WrappedShard>> = vec![None; n_validators];
229 for (chunk_data, chunk_idx) in chunks.into_iter().take(n_validators) {
230 if chunk_data.len() % 2 != 0 {
231 return Err(Error::UnevenLength)
232 }
233
234 received_shards[chunk_idx] = Some(WrappedShard::new(chunk_data.to_vec()));
235 }
236
237 let payload_bytes = params.make_encoder().reconstruct(received_shards)?;
238
239 Decode::decode(&mut &payload_bytes[..]).map_err(|_| Error::BadPayload)
240}
241
242pub struct Branches<'a, I> {
245 trie_storage: MemoryDB<Blake2Hasher>,
246 root: H256,
247 chunks: &'a [I],
248 current_pos: usize,
249}
250
251impl<'a, I: AsRef<[u8]>> Branches<'a, I> {
252 pub fn root(&self) -> H256 {
254 self.root
255 }
256}
257
258impl<'a, I: AsRef<[u8]>> Iterator for Branches<'a, I> {
259 type Item = (Proof, &'a [u8]);
260
261 fn next(&mut self) -> Option<Self::Item> {
262 use sp_trie::Recorder;
263
264 let mut recorder = Recorder::<LayoutV0<Blake2Hasher>>::new();
265 let res = {
266 let trie = TrieDBBuilder::new(&self.trie_storage, &self.root)
267 .with_recorder(&mut recorder)
268 .build();
269
270 (self.current_pos as u32).using_encoded(|s| trie.get(s))
271 };
272
273 match res.expect("all nodes in trie present; qed") {
274 Some(_) => {
275 let nodes: Vec<Vec<u8>> = recorder.drain().into_iter().map(|r| r.data).collect();
276 let chunk = self.chunks.get(self.current_pos).expect(
277 "there is a one-to-one mapping of chunks to valid merkle branches; qed",
278 );
279 self.current_pos += 1;
280 Proof::try_from(nodes).ok().map(|proof| (proof, chunk.as_ref()))
281 },
282 None => None,
283 }
284 }
285}
286
287pub fn branches<'a, I: 'a>(chunks: &'a [I]) -> Branches<'a, I>
290where
291 I: AsRef<[u8]>,
292{
293 let mut trie_storage: MemoryDB<Blake2Hasher> = MemoryDB::default();
294 let mut root = H256::default();
295
296 {
298 let mut trie = TrieDBMutBuilder::new(&mut trie_storage, &mut root).build();
299 for (i, chunk) in chunks.as_ref().iter().enumerate() {
300 (i as u32).using_encoded(|encoded_index| {
301 let chunk_hash = BlakeTwo256::hash(chunk.as_ref());
302 trie.insert(encoded_index, chunk_hash.as_ref())
303 .expect("a fresh trie stored in memory cannot have errors loading nodes; qed");
304 })
305 }
306 }
307
308 Branches { trie_storage, root, chunks, current_pos: 0 }
309}
310
311pub fn branch_hash(root: &H256, branch_nodes: &Proof, index: usize) -> Result<H256, Error> {
314 let mut trie_storage: MemoryDB<Blake2Hasher> = MemoryDB::default();
315 for node in branch_nodes.iter() {
316 (&mut trie_storage as &mut sp_trie::HashDB<_>).insert(EMPTY_PREFIX, node);
317 }
318
319 let trie = TrieDBBuilder::new(&trie_storage, &root).build();
320 let res = (index as u32).using_encoded(|key| {
321 trie.get_with(key, |raw_hash: &[u8]| H256::decode(&mut &raw_hash[..]))
322 });
323
324 match res {
325 Ok(Some(Ok(hash))) => Ok(hash),
326 Ok(Some(Err(_))) => Err(Error::InvalidBranchProof), Ok(None) => Err(Error::BranchOutOfBounds),
328 Err(_) => Err(Error::InvalidBranchProof),
329 }
330}
331
332#[cfg(test)]
333mod tests {
334 use std::sync::Arc;
335
336 use super::*;
337 use polkadot_node_primitives::{AvailableData, BlockData, PoV};
338 use polkadot_primitives::{HeadData, PersistedValidationData};
339 use quickcheck::{Arbitrary, Gen, QuickCheck};
340
341 const KEY_INDEX_NIBBLE_SIZE: usize = 4;
344
345 #[derive(Clone, Debug)]
346 struct ArbitraryAvailableData(AvailableData);
347
348 impl Arbitrary for ArbitraryAvailableData {
349 fn arbitrary(g: &mut Gen) -> Self {
350 let pov_len = (u32::arbitrary(g) % (1024 * 1024)).max(2);
352
353 let pov = (0..pov_len).map(|_| u8::arbitrary(g)).collect();
354
355 let pvd = PersistedValidationData {
356 parent_head: HeadData((0..u16::arbitrary(g)).map(|_| u8::arbitrary(g)).collect()),
357 relay_parent_number: u32::arbitrary(g),
358 relay_parent_storage_root: [u8::arbitrary(g); 32].into(),
359 max_pov_size: u32::arbitrary(g),
360 };
361
362 ArbitraryAvailableData(AvailableData {
363 pov: Arc::new(PoV { block_data: BlockData(pov) }),
364 validation_data: pvd,
365 })
366 }
367 }
368
369 #[test]
370 fn field_order_is_right_size() {
371 assert_eq!(MAX_VALIDATORS, 65536);
372 }
373
374 #[test]
375 fn round_trip_works() {
376 let pov = PoV { block_data: BlockData((0..255).collect()) };
377
378 let available_data = AvailableData { pov: pov.into(), validation_data: Default::default() };
379 let chunks = obtain_chunks(10, &available_data).unwrap();
380
381 assert_eq!(chunks.len(), 10);
382
383 let reconstructed: AvailableData = reconstruct(
385 10,
386 [(&*chunks[1], 1), (&*chunks[4], 4), (&*chunks[6], 6), (&*chunks[9], 9)]
387 .iter()
388 .cloned(),
389 )
390 .unwrap();
391
392 assert_eq!(reconstructed, available_data);
393 }
394
395 #[test]
396 fn round_trip_systematic_works() {
397 fn property(available_data: ArbitraryAvailableData, n_validators: u16) {
398 let n_validators = n_validators.max(2);
399 let kpow2 = systematic_recovery_threshold(n_validators as usize).unwrap();
400 let chunks = obtain_chunks(n_validators as usize, &available_data.0).unwrap();
401 assert_eq!(
402 reconstruct_from_systematic_v1(
403 n_validators as usize,
404 chunks.into_iter().take(kpow2).collect()
405 )
406 .unwrap(),
407 available_data.0
408 );
409 }
410
411 QuickCheck::new().quickcheck(property as fn(ArbitraryAvailableData, u16))
412 }
413
414 #[test]
415 fn reconstruct_does_not_panic_on_low_validator_count() {
416 let reconstructed = reconstruct_v1(1, [].iter().cloned());
417 assert_eq!(reconstructed, Err(Error::NotEnoughValidators));
418 }
419
420 fn generate_trie_and_generate_proofs(magnitude: u32) {
421 let n_validators = 2_u32.pow(magnitude) as usize;
422 let pov = PoV { block_data: BlockData(vec![2; n_validators / KEY_INDEX_NIBBLE_SIZE]) };
423
424 let available_data = AvailableData { pov: pov.into(), validation_data: Default::default() };
425
426 let chunks = obtain_chunks(magnitude as usize, &available_data).unwrap();
427
428 assert_eq!(chunks.len() as u32, magnitude);
429
430 let branches = branches(chunks.as_ref());
431 let root = branches.root();
432
433 let proofs: Vec<_> = branches.map(|(proof, _)| proof).collect();
434 assert_eq!(proofs.len() as u32, magnitude);
435 for (i, proof) in proofs.into_iter().enumerate() {
436 let encode = Encode::encode(&proof);
437 let decode = Decode::decode(&mut &encode[..]).unwrap();
438 assert_eq!(proof, decode);
439 assert_eq!(encode, Encode::encode(&decode));
440
441 assert_eq!(branch_hash(&root, &proof, i).unwrap(), BlakeTwo256::hash(&chunks[i]));
442 }
443 }
444
445 #[test]
446 fn roundtrip_proof_encoding() {
447 for i in 2..16 {
448 generate_trie_and_generate_proofs(i);
449 }
450 }
451}