use super::*;
use authorship::claim_slot;
use soil_client::block_builder::{BlockBuilder, BlockBuilderBuilder};
use soil_client::client_api::{BlockchainEvents, Finalizer};
use soil_client::consensus::{NoNetwork as DummyOracle, Proposal, ProposeArgs};
use soil_client::import::{BoxBlockImport, BoxJustificationImport};
use soil_client::transaction_pool::RejectAllTxPool;
use soil_consensus::epochs::{EpochIdentifier, EpochIdentifierPosition};
use soil_consensus::slots::BackoffAuthoringOnFinalizedHeadLagging;
use soil_test::network::{Block as TestBlock, *};
use std::{cell::RefCell, task::Poll};
use subsoil::application_crypto::key_types::BABE;
use subsoil::consensus::babe::{
inherents::{BabeCreateInherentDataProviders, InherentDataProvider},
make_vrf_sign_data, AllowedSlots, AuthorityId, AuthorityPair, Slot,
};
use subsoil::consensus::slots::SlotDuration;
use subsoil::core::crypto::Pair;
use subsoil::keyring::Sr25519Keyring;
use subsoil::keystore::{testing::MemoryKeystore, Keystore};
use subsoil::runtime::{
generic::{Digest, DigestItem},
traits::Block as BlockT,
};
use soil_test_node_runtime_client::DefaultTestClientBuilderExt;
type Item = DigestItem;
type Error = soil_client::blockchain::Error;
type TestClient = soil_test_node_runtime_client::client::Client<
soil_test_node_runtime_client::Backend,
soil_test_node_runtime_client::ExecutorDispatch,
TestBlock,
soil_test_node_runtime_client::runtime::RuntimeApi,
>;
#[derive(Copy, Clone, PartialEq)]
enum Stage {
PreSeal,
PostSeal,
}
type Mutator = Arc<dyn Fn(&mut TestHeader, Stage) + Send + Sync>;
type BabeBlockImport = PanickingBlockImport<
crate::BabeBlockImport<
TestBlock,
TestClient,
Arc<TestClient>,
BabeCreateInherentDataProviders<TestBlock>,
soil_consensus::LongestChain<soil_test_node_runtime_client::Backend, Block>,
>,
>;
const SLOT_DURATION_MS: u64 = 1000;
#[derive(Clone)]
struct DummyFactory {
client: Arc<TestClient>,
epoch_changes: SharedEpochChanges<TestBlock, Epoch>,
mutator: Mutator,
}
struct DummyProposer {
factory: DummyFactory,
parent_hash: Hash,
}
impl Environment<TestBlock> for DummyFactory {
type CreateProposer = future::Ready<Result<DummyProposer, Error>>;
type Proposer = DummyProposer;
type Error = Error;
fn init(&mut self, parent_header: &<TestBlock as BlockT>::Header) -> Self::CreateProposer {
future::ready(Ok(DummyProposer {
factory: self.clone(),
parent_hash: parent_header.hash(),
}))
}
}
impl DummyProposer {
fn propose_with(
&mut self,
pre_digests: Digest,
) -> future::Ready<Result<Proposal<TestBlock>, Error>> {
let block_builder = BlockBuilderBuilder::new(&*self.factory.client)
.on_parent_block(self.parent_hash)
.fetch_parent_block_number(&*self.factory.client)
.unwrap()
.with_inherent_digests(pre_digests)
.build()
.unwrap();
let mut block = match block_builder.build().map_err(|e| e.into()) {
Ok(b) => b.block,
Err(e) => return future::ready(Err(e)),
};
(self.factory.mutator)(&mut block.header, Stage::PreSeal);
future::ready(Ok(Proposal { block, storage_changes: Default::default() }))
}
}
impl Proposer<TestBlock> for DummyProposer {
type Error = Error;
type Proposal = future::Ready<Result<Proposal<TestBlock>, Error>>;
fn propose(mut self, args: ProposeArgs<TestBlock>) -> Self::Proposal {
self.propose_with(args.inherent_digests)
}
}
thread_local! {
static MUTATOR: RefCell<Mutator> = RefCell::new(Arc::new(|_, _|()));
}
#[derive(Clone)]
pub struct PanickingBlockImport<B>(B);
#[async_trait::async_trait]
impl<BI> BlockImport<TestBlock> for PanickingBlockImport<BI>
where
BI: BlockImport<TestBlock> + Send + Sync,
{
type Error = BI::Error;
async fn import_block(
&self,
block: BlockImportParams<TestBlock>,
) -> Result<ImportResult, Self::Error> {
Ok(self.0.import_block(block).await.expect("importing block failed"))
}
async fn check_block(
&self,
block: BlockCheckParams<TestBlock>,
) -> Result<ImportResult, Self::Error> {
Ok(self.0.check_block(block).await.expect("checking block failed"))
}
}
type BabePeer = Peer<Option<PeerData>, BabeBlockImport>;
#[derive(Default)]
pub struct BabeTestNet {
peers: Vec<BabePeer>,
}
type TestHeader = <TestBlock as BlockT>::Header;
pub struct TestVerifier {
inner: BabeVerifier<TestBlock, PeersFullClient>,
mutator: Mutator,
}
#[async_trait::async_trait]
impl Verifier<TestBlock> for TestVerifier {
async fn verify(
&self,
mut block: BlockImportParams<TestBlock>,
) -> Result<BlockImportParams<TestBlock>, String> {
(self.mutator)(&mut block.header, Stage::PostSeal);
self.inner.verify(block).await
}
}
pub struct PeerData {
link: BabeLink<TestBlock>,
block_import: Mutex<Option<BoxBlockImport<TestBlock>>>,
}
impl TestNetFactory for BabeTestNet {
type Verifier = TestVerifier;
type PeerData = Option<PeerData>;
type BlockImport = BabeBlockImport;
fn make_block_import(
&self,
client: PeersClient,
) -> (
BlockImportAdapter<Self::BlockImport>,
Option<BoxJustificationImport<Block>>,
Option<PeerData>,
) {
let client = client.as_client();
let config = crate::configuration(&*client).expect("config available");
let (_, longest_chain) = TestClientBuilder::new().build_with_longest_chain();
let (block_import, link) = crate::block_import(
config,
client.clone(),
client.clone(),
Arc::new(move |_, _| async {
let timestamp = subsoil::timestamp::InherentDataProvider::from_system_time();
let slot = InherentDataProvider::from_timestamp_and_slot_duration(
*timestamp,
SlotDuration::from_millis(SLOT_DURATION_MS),
);
Ok((slot, timestamp))
}) as BabeCreateInherentDataProviders<TestBlock>,
longest_chain,
OffchainTransactionPoolFactory::new(RejectAllTxPool::default()),
)
.expect("can initialize block-import");
let block_import = PanickingBlockImport(block_import);
let data_block_import =
Mutex::new(Some(Box::new(block_import.clone()) as BoxBlockImport<_>));
(
BlockImportAdapter::new(block_import),
None,
Some(PeerData { link, block_import: data_block_import }),
)
}
fn make_verifier(&self, client: PeersClient, maybe_link: &Option<PeerData>) -> Self::Verifier {
let client = client.as_client();
trace!(target: LOG_TARGET, "Creating a verifier");
let data = maybe_link
.as_ref()
.expect("babe link always provided to verifier instantiation");
TestVerifier {
inner: BabeVerifier {
client: client.clone(),
slot_duration: SlotDuration::from_millis(SLOT_DURATION_MS),
config: data.link.config.clone(),
epoch_changes: data.link.epoch_changes.clone(),
telemetry: None,
},
mutator: MUTATOR.with(|m| m.borrow().clone()),
}
}
fn peer(&mut self, i: usize) -> &mut BabePeer {
trace!(target: LOG_TARGET, "Retrieving a peer");
&mut self.peers[i]
}
fn peers(&self) -> &Vec<BabePeer> {
trace!(target: LOG_TARGET, "Retrieving peers");
&self.peers
}
fn peers_mut(&mut self) -> &mut Vec<BabePeer> {
trace!(target: "babe", "Retrieving peers, mutable");
&mut self.peers
}
fn mut_peers<F: FnOnce(&mut Vec<BabePeer>)>(&mut self, closure: F) {
closure(&mut self.peers);
}
}
#[tokio::test]
#[should_panic(expected = "No BABE pre-runtime digest found")]
async fn rejects_empty_block() {
subsoil::tracing::try_init_simple();
let mut net = BabeTestNet::new(3);
let block_builder = |builder: BlockBuilder<_, _>| builder.build().unwrap().block;
net.mut_peers(|peer| {
peer[0].generate_blocks(1, BlockOrigin::NetworkInitialSync, block_builder);
})
}
fn create_keystore(authority: Sr25519Keyring) -> KeystorePtr {
let keystore = MemoryKeystore::new();
keystore
.sr25519_generate_new(BABE, Some(&authority.to_seed()))
.expect("Generates authority key");
keystore.into()
}
async fn run_one_test(mutator: impl Fn(&mut TestHeader, Stage) + Send + Sync + 'static) {
subsoil::tracing::try_init_simple();
let mutator = Arc::new(mutator) as Mutator;
MUTATOR.with(|m| *m.borrow_mut() = mutator.clone());
let net = BabeTestNet::new(3);
let peers = [Sr25519Keyring::Alice, Sr25519Keyring::Bob, Sr25519Keyring::Charlie];
let net = Arc::new(Mutex::new(net));
let mut import_notifications = Vec::new();
let mut babe_futures = Vec::new();
for (peer_id, auth_id) in peers.iter().enumerate() {
let mut net = net.lock();
let peer = net.peer(peer_id);
let client = peer.client().as_client();
let select_chain = peer.select_chain().expect("Full client has select_chain");
let keystore = create_keystore(*auth_id);
let mut got_own = false;
let mut got_other = false;
let data = peer.data.as_ref().expect("babe link set up during initialization");
let environ = DummyFactory {
client: client.clone(),
epoch_changes: data.link.epoch_changes.clone(),
mutator: mutator.clone(),
};
import_notifications.push(
client
.import_notification_stream()
.take_while(move |n| {
future::ready(
n.header.number() < &5 || {
if n.origin == BlockOrigin::Own {
got_own = true;
} else {
got_other = true;
}
!(got_own && got_other)
},
)
})
.for_each(|_| future::ready(())),
);
let client_clone = client.clone();
babe_futures.push(
start_babe(BabeParams {
block_import: data.block_import.lock().take().expect("import set up during init"),
select_chain,
client,
env: environ,
sync_oracle: DummyOracle,
create_inherent_data_providers: Box::new(move |parent, _| {
let parent_header = client_clone.header(parent).ok().flatten().unwrap();
let slot = Slot::from(
find_pre_digest::<TestBlock>(&parent_header).unwrap().slot() + 1,
);
async move { Ok((InherentDataProvider::new(slot),)) }
}),
force_authoring: false,
backoff_authoring_blocks: Some(BackoffAuthoringOnFinalizedHeadLagging::default()),
babe_link: data.link.clone(),
keystore,
justification_sync_link: (),
block_proposal_slot_portion: SlotProportion::new(0.5),
max_block_proposal_slot_portion: None,
telemetry: None,
})
.expect("Starts babe"),
);
}
future::select(
futures::future::poll_fn(move |cx| {
let mut net = net.lock();
net.poll(cx);
for p in net.peers() {
if let Some((h, e)) = p.failed_verifications().into_iter().next() {
panic!("Verification failed for {:?}: {}", h, e);
}
}
Poll::<()>::Pending
}),
future::select(future::join_all(import_notifications), future::join_all(babe_futures)),
)
.await;
}
#[cfg(ignore_flaky_test)] #[tokio::test]
async fn authoring_blocks() {
run_one_test(|_, _| ()).await;
}
#[tokio::test]
#[should_panic(expected = "importing block failed: Other(NoPreRuntimeDigest)")]
async fn rejects_missing_inherent_digest() {
run_one_test(|header: &mut TestHeader, stage| {
let v = std::mem::take(&mut header.digest_mut().logs);
header.digest_mut().logs = v
.into_iter()
.filter(|v| stage == Stage::PostSeal || v.as_babe_pre_digest().is_none())
.collect()
})
.await;
}
#[cfg(ignore_flaky_test)] #[tokio::test]
#[should_panic(expected = "has a bad seal")]
async fn rejects_missing_seals() {
run_one_test(|header: &mut TestHeader, stage| {
let v = std::mem::take(&mut header.digest_mut().logs);
header.digest_mut().logs = v
.into_iter()
.filter(|v| stage == Stage::PreSeal || v.as_babe_seal().is_none())
.collect()
})
.await;
}
#[tokio::test]
#[should_panic(expected = "Expected epoch change to happen")]
async fn rejects_missing_consensus_digests() {
run_one_test(|header: &mut TestHeader, stage| {
let v = std::mem::take(&mut header.digest_mut().logs);
header.digest_mut().logs = v
.into_iter()
.filter(|v| stage == Stage::PostSeal || v.as_next_epoch_descriptor().is_none())
.collect()
})
.await;
}
#[test]
fn wrong_consensus_engine_id_rejected() {
subsoil::tracing::try_init_simple();
let sig = AuthorityPair::generate().0.sign(b"");
let bad_seal: Item = DigestItem::Seal([0; 4], sig.to_vec());
assert!(bad_seal.as_babe_pre_digest().is_none());
assert!(bad_seal.as_babe_seal().is_none())
}
#[test]
fn malformed_pre_digest_rejected() {
subsoil::tracing::try_init_simple();
let bad_seal: Item = DigestItem::Seal(BABE_ENGINE_ID, [0; 64].to_vec());
assert!(bad_seal.as_babe_pre_digest().is_none());
}
#[test]
fn sig_is_not_pre_digest() {
subsoil::tracing::try_init_simple();
let sig = AuthorityPair::generate().0.sign(b"");
let bad_seal: Item = DigestItem::Seal(BABE_ENGINE_ID, sig.to_vec());
assert!(bad_seal.as_babe_pre_digest().is_none());
assert!(bad_seal.as_babe_seal().is_some())
}
#[test]
fn claim_epoch_slots() {
const EPOCH_DURATION: u64 = 10;
let authority = Sr25519Keyring::Alice;
let keystore = create_keystore(authority);
let mut epoch: Epoch = subsoil::consensus::babe::Epoch {
start_slot: 0.into(),
authorities: vec![(authority.public().into(), 1)],
randomness: [0; 32],
epoch_index: 1,
duration: EPOCH_DURATION,
config: BabeEpochConfiguration {
c: (3, 10),
allowed_slots: AllowedSlots::PrimaryAndSecondaryPlainSlots,
},
}
.into();
let claim_slot_wrap = |s, e| match claim_slot(Slot::from(s as u64), &e, &keystore) {
None => 0,
Some((PreDigest::Primary(_), _)) => 1,
Some((PreDigest::SecondaryPlain(_), _)) => 2,
Some((PreDigest::SecondaryVRF(_), _)) => 3,
};
let claims: Vec<_> = (0..EPOCH_DURATION)
.into_iter()
.map(|slot| claim_slot_wrap(slot, epoch.clone()))
.collect();
assert_eq!(claims, [1, 2, 2, 1, 2, 2, 2, 2, 2, 1]);
epoch.config.allowed_slots = AllowedSlots::PrimaryAndSecondaryVRFSlots;
let claims: Vec<_> = (0..EPOCH_DURATION)
.into_iter()
.map(|slot| claim_slot_wrap(slot, epoch.clone()))
.collect();
assert_eq!(claims, [1, 3, 3, 1, 3, 3, 3, 3, 3, 1]);
epoch.config.allowed_slots = AllowedSlots::PrimarySlots;
let claims: Vec<_> = (0..EPOCH_DURATION)
.into_iter()
.map(|slot| claim_slot_wrap(slot, epoch.clone()))
.collect();
assert_eq!(claims, [1, 0, 0, 1, 0, 0, 0, 0, 0, 1]);
}
#[test]
fn claim_vrf_check() {
let authority = Sr25519Keyring::Alice;
let keystore = create_keystore(authority);
let public = authority.public();
let epoch: Epoch = subsoil::consensus::babe::Epoch {
start_slot: 0.into(),
authorities: vec![(public.into(), 1)],
randomness: [0; 32],
epoch_index: 1,
duration: 10,
config: BabeEpochConfiguration {
c: (3, 10),
allowed_slots: AllowedSlots::PrimaryAndSecondaryVRFSlots,
},
}
.into();
let pre_digest = match claim_slot(0.into(), &epoch, &keystore).unwrap().0 {
PreDigest::Primary(d) => d,
v => panic!("Unexpected pre-digest variant {:?}", v),
};
let data = make_vrf_sign_data(&epoch.randomness.clone(), 0.into(), epoch.epoch_index);
let sign = keystore.sr25519_vrf_sign(AuthorityId::ID, &public, &data).unwrap().unwrap();
assert_eq!(pre_digest.vrf_signature.pre_output, sign.pre_output);
let pre_digest = match claim_slot(1.into(), &epoch, &keystore).unwrap().0 {
PreDigest::SecondaryVRF(d) => d,
v => panic!("Unexpected pre-digest variant {:?}", v),
};
let data = make_vrf_sign_data(&epoch.randomness.clone(), 1.into(), epoch.epoch_index);
let sign = keystore.sr25519_vrf_sign(AuthorityId::ID, &public, &data).unwrap().unwrap();
assert_eq!(pre_digest.vrf_signature.pre_output, sign.pre_output);
let slot = Slot::from(103);
let claim = match claim_slot(slot, &epoch, &keystore).unwrap().0 {
PreDigest::Primary(d) => d,
v => panic!("Unexpected claim variant {:?}", v),
};
let fixed_epoch = epoch.clone_for_slot(slot);
let data = make_vrf_sign_data(&epoch.randomness.clone(), slot, fixed_epoch.epoch_index);
let sign = keystore.sr25519_vrf_sign(AuthorityId::ID, &public, &data).unwrap().unwrap();
assert_eq!(fixed_epoch.epoch_index, 11);
assert_eq!(claim.vrf_signature.pre_output, sign.pre_output);
let slot = Slot::from(100);
let pre_digest = match claim_slot(slot, &epoch, &keystore).unwrap().0 {
PreDigest::SecondaryVRF(d) => d,
v => panic!("Unexpected claim variant {:?}", v),
};
let fixed_epoch = epoch.clone_for_slot(slot);
let data = make_vrf_sign_data(&epoch.randomness.clone(), slot, fixed_epoch.epoch_index);
let sign = keystore.sr25519_vrf_sign(AuthorityId::ID, &public, &data).unwrap().unwrap();
assert_eq!(fixed_epoch.epoch_index, 11);
assert_eq!(pre_digest.vrf_signature.pre_output, sign.pre_output);
}
async fn propose_and_import_block(
parent: &TestHeader,
slot: Option<Slot>,
proposer_factory: &mut DummyFactory,
block_import: &mut BoxBlockImport<TestBlock>,
) -> Hash {
let mut proposer = proposer_factory.init(parent).await.unwrap();
let slot = slot.unwrap_or_else(|| {
let parent_pre_digest = find_pre_digest::<TestBlock>(parent).unwrap();
parent_pre_digest.slot() + 1
});
let pre_digest = subsoil::runtime::generic::Digest {
logs: vec![Item::babe_pre_digest(PreDigest::SecondaryPlain(SecondaryPlainPreDigest {
authority_index: 0,
slot,
}))],
};
let parent_hash = parent.hash();
let mut block = proposer.propose_with(pre_digest).await.unwrap().block;
let epoch_descriptor = proposer_factory
.epoch_changes
.shared_data()
.epoch_descriptor_for_child_of(
descendent_query(&*proposer_factory.client),
&parent_hash,
*parent.number(),
slot,
)
.unwrap()
.unwrap();
let seal = {
let pair = AuthorityPair::from_seed(&[1; 32]);
let pre_hash = block.header.hash();
let signature = pair.sign(pre_hash.as_ref());
Item::babe_seal(signature)
};
let post_hash = {
block.header.digest_mut().push(seal.clone());
let h = block.header.hash();
block.header.digest_mut().pop();
h
};
let mut import = BlockImportParams::new(BlockOrigin::Own, block.header);
import.post_digests.push(seal);
import.body = Some(block.extrinsics);
import
.insert_intermediate(INTERMEDIATE_KEY, BabeIntermediate::<TestBlock> { epoch_descriptor });
import.fork_choice = Some(ForkChoiceStrategy::LongestChain);
let import_result = block_import.import_block(import).await.unwrap();
match import_result {
ImportResult::Imported(_) => {},
_ => panic!("expected block to be imported"),
}
post_hash
}
async fn propose_and_import_blocks(
client: &PeersFullClient,
proposer_factory: &mut DummyFactory,
block_import: &mut BoxBlockImport<TestBlock>,
parent_hash: Hash,
n: usize,
) -> Vec<Hash> {
let mut hashes = Vec::with_capacity(n);
let mut parent_header = client.header(parent_hash).unwrap().unwrap();
for _ in 0..n {
let block_hash =
propose_and_import_block(&parent_header, None, proposer_factory, block_import).await;
hashes.push(block_hash);
parent_header = client.header(block_hash).unwrap().unwrap();
}
hashes
}
#[tokio::test]
async fn importing_block_one_sets_genesis_epoch() {
let mut net = BabeTestNet::new(1);
let peer = net.peer(0);
let data = peer.data.as_ref().expect("babe link set up during initialization");
let client = peer.client().as_client();
let mut proposer_factory = DummyFactory {
client: client.clone(),
epoch_changes: data.link.epoch_changes.clone(),
mutator: Arc::new(|_, _| ()),
};
let mut block_import = data.block_import.lock().take().expect("import set up during init");
let genesis_header = client.header(client.chain_info().genesis_hash).unwrap().unwrap();
let block_hash = propose_and_import_block(
&genesis_header,
Some(999.into()),
&mut proposer_factory,
&mut block_import,
)
.await;
let genesis_epoch = Epoch::genesis(&data.link.config, 999.into());
let epoch_changes = data.link.epoch_changes.shared_data();
let epoch_for_second_block = epoch_changes
.epoch_data_for_child_of(descendent_query(&*client), &block_hash, 1, 1000.into(), |slot| {
Epoch::genesis(&data.link.config, slot)
})
.unwrap()
.unwrap();
assert_eq!(epoch_for_second_block, genesis_epoch);
}
#[tokio::test]
async fn revert_prunes_epoch_changes_and_removes_weights() {
let mut net = BabeTestNet::new(1);
let peer = net.peer(0);
let data = peer.data.as_ref().expect("babe link set up during initialization");
let client = peer.client().as_client();
let backend = peer.client().as_backend();
let mut block_import = data.block_import.lock().take().expect("import set up during init");
let epoch_changes = data.link.epoch_changes.clone();
let mut proposer_factory = DummyFactory {
client: client.clone(),
epoch_changes: data.link.epoch_changes.clone(),
mutator: Arc::new(|_, _| ()),
};
let canon = propose_and_import_blocks(
&client,
&mut proposer_factory,
&mut block_import,
client.chain_info().genesis_hash,
21,
)
.await;
let fork1 =
propose_and_import_blocks(&client, &mut proposer_factory, &mut block_import, canon[0], 10)
.await;
let fork2 =
propose_and_import_blocks(&client, &mut proposer_factory, &mut block_import, canon[7], 10)
.await;
let fork3 =
propose_and_import_blocks(&client, &mut proposer_factory, &mut block_import, canon[11], 8)
.await;
assert_eq!(epoch_changes.shared_data().tree().iter().count(), 8);
assert_eq!(epoch_changes.shared_data().tree().roots().count(), 1);
revert(client.clone(), backend, 11).expect("revert should work for baked test scenario");
let actual_nodes =
aux_schema::load_epoch_changes::<Block, TestClient>(&*client, &data.link.config)
.expect("load epoch changes")
.shared_data()
.tree()
.iter()
.map(|(h, _, _)| *h)
.collect::<Vec<_>>();
let expected_nodes = vec![
canon[0], canon[6], fork2[4], fork1[5], ];
assert_eq!(actual_nodes, expected_nodes);
let weight_data_check = |hashes: &[Hash], expected: bool| {
hashes.iter().all(|hash| {
aux_schema::load_block_weight(&*client, hash).unwrap().is_some() == expected
})
};
assert!(weight_data_check(&canon[..10], true));
assert!(weight_data_check(&canon[10..], false));
assert!(weight_data_check(&fork1, true));
assert!(weight_data_check(&fork2, true));
assert!(weight_data_check(&fork3, false));
}
#[tokio::test]
async fn revert_not_allowed_for_finalized() {
let mut net = BabeTestNet::new(1);
let peer = net.peer(0);
let data = peer.data.as_ref().expect("babe link set up during initialization");
let client = peer.client().as_client();
let backend = peer.client().as_backend();
let mut block_import = data.block_import.lock().take().expect("import set up during init");
let mut proposer_factory = DummyFactory {
client: client.clone(),
epoch_changes: data.link.epoch_changes.clone(),
mutator: Arc::new(|_, _| ()),
};
let canon = propose_and_import_blocks(
&client,
&mut proposer_factory,
&mut block_import,
client.chain_info().genesis_hash,
3,
)
.await;
client.finalize_block(canon[2], None, false).unwrap();
revert(client.clone(), backend, 100).expect("revert should work for baked test scenario");
let weight_data_check = |hashes: &[Hash], expected: bool| {
hashes.iter().all(|hash| {
aux_schema::load_block_weight(&*client, hash).unwrap().is_some() == expected
})
};
assert!(weight_data_check(&canon, true));
}
#[tokio::test]
async fn importing_epoch_change_block_prunes_tree() {
let mut net = BabeTestNet::new(1);
let peer = net.peer(0);
let data = peer.data.as_ref().expect("babe link set up during initialization");
let client = peer.client().as_client();
let mut block_import = data.block_import.lock().take().expect("import set up during init");
let epoch_changes = data.link.epoch_changes.clone();
let mut proposer_factory = DummyFactory {
client: client.clone(),
epoch_changes: data.link.epoch_changes.clone(),
mutator: Arc::new(|_, _| ()),
};
let canon = propose_and_import_blocks(
&client,
&mut proposer_factory,
&mut block_import,
client.chain_info().genesis_hash,
30,
)
.await;
let fork_1 =
propose_and_import_blocks(&client, &mut proposer_factory, &mut block_import, canon[0], 10)
.await;
let fork_2 =
propose_and_import_blocks(&client, &mut proposer_factory, &mut block_import, canon[12], 15)
.await;
let fork_3 =
propose_and_import_blocks(&client, &mut proposer_factory, &mut block_import, canon[18], 10)
.await;
assert_eq!(epoch_changes.shared_data().tree().iter().count(), 9);
assert_eq!(epoch_changes.shared_data().tree().roots().count(), 1);
client.finalize_block(canon[12], None, false).unwrap();
propose_and_import_blocks(
&client,
&mut proposer_factory,
&mut block_import,
client.chain_info().best_hash,
7,
)
.await;
let nodes: Vec<_> = epoch_changes.shared_data().tree().iter().map(|(h, _, _)| *h).collect();
assert!(!nodes.iter().any(|h| fork_1.contains(h)));
assert!(nodes.iter().any(|h| fork_2.contains(h)));
assert!(nodes.iter().any(|h| fork_3.contains(h)));
client.finalize_block(canon[24], None, false).unwrap();
propose_and_import_blocks(
&client,
&mut proposer_factory,
&mut block_import,
client.chain_info().best_hash,
8,
)
.await;
let nodes: Vec<_> = epoch_changes.shared_data().tree().iter().map(|(h, _, _)| *h).collect();
assert!(!nodes.iter().any(|h| fork_2.contains(h)));
assert!(!nodes.iter().any(|h| fork_3.contains(h)));
assert!(nodes.iter().any(|h| *h == canon[18]));
assert!(nodes.iter().any(|h| *h == canon[24]));
}
#[tokio::test]
#[should_panic(expected = "Slot number must increase: parent slot: 999, this slot: 999")]
async fn verify_slots_are_strictly_increasing() {
let mut net = BabeTestNet::new(1);
let peer = net.peer(0);
let data = peer.data.as_ref().expect("babe link set up during initialization");
let client = peer.client().as_client();
let mut block_import = data.block_import.lock().take().expect("import set up during init");
let mut proposer_factory = DummyFactory {
client: client.clone(),
epoch_changes: data.link.epoch_changes.clone(),
mutator: Arc::new(|_, _| ()),
};
let genesis_header = client.header(client.chain_info().genesis_hash).unwrap().unwrap();
let b1 = propose_and_import_block(
&genesis_header,
Some(999.into()),
&mut proposer_factory,
&mut block_import,
)
.await;
let b1 = client.header(b1).unwrap().unwrap();
propose_and_import_block(&b1, Some(999.into()), &mut proposer_factory, &mut block_import).await;
}
#[tokio::test]
async fn obsolete_blocks_aux_data_cleanup() {
let mut net = BabeTestNet::new(1);
let peer = net.peer(0);
let data = peer.data.as_ref().expect("babe link set up during initialization");
let client = peer.client().as_client();
let client_clone = client.clone();
let on_finality = move |summary: &FinalityNotification<TestBlock>| {
aux_storage_cleanup(client_clone.as_ref(), summary)
};
client.register_finality_action(Box::new(on_finality));
let mut proposer_factory = DummyFactory {
client: client.clone(),
epoch_changes: data.link.epoch_changes.clone(),
mutator: Arc::new(|_, _| ()),
};
let mut block_import = data.block_import.lock().take().expect("import set up during init");
let aux_data_check = |hashes: &[Hash], expected: bool| {
hashes.iter().all(|hash| {
aux_schema::load_block_weight(&*peer.client().as_backend(), hash)
.unwrap()
.is_some() == expected
})
};
let fork1_hashes = propose_and_import_blocks(
&client,
&mut proposer_factory,
&mut block_import,
client.chain_info().genesis_hash,
4,
)
.await;
let fork2_hashes = propose_and_import_blocks(
&client,
&mut proposer_factory,
&mut block_import,
client.chain_info().genesis_hash,
2,
)
.await;
let fork3_hashes = propose_and_import_blocks(
&client,
&mut proposer_factory,
&mut block_import,
fork1_hashes[2],
2,
)
.await;
assert!(aux_data_check(&[client.chain_info().genesis_hash], false));
assert!(aux_data_check(&fork1_hashes, true));
assert!(aux_data_check(&fork2_hashes, true));
assert!(aux_data_check(&fork3_hashes, true));
client.finalize_block(fork1_hashes[2], None, true).unwrap();
assert!(aux_data_check(&fork1_hashes[..2], false));
assert!(aux_data_check(&fork1_hashes[2..], true));
assert!(aux_data_check(&fork2_hashes, false));
assert!(aux_data_check(&fork3_hashes, true));
client.finalize_block(fork1_hashes[3], None, true).unwrap();
assert!(aux_data_check(&fork1_hashes[2..3], false));
assert!(aux_data_check(&fork1_hashes[3..], true));
assert!(aux_data_check(&fork3_hashes, false));
}
#[tokio::test]
async fn allows_skipping_epochs() {
let mut net = BabeTestNet::new(1);
let peer = net.peer(0);
let data = peer.data.as_ref().expect("babe link set up during initialization");
let client = peer.client().as_client();
let mut block_import = data.block_import.lock().take().expect("import set up during init");
let mut proposer_factory = DummyFactory {
client: client.clone(),
epoch_changes: data.link.epoch_changes.clone(),
mutator: Arc::new(|_, _| ()),
};
let epoch_changes = data.link.epoch_changes.clone();
let epoch_length = data.link.config.epoch_length;
let blocks = propose_and_import_blocks(
&client,
&mut proposer_factory,
&mut block_import,
client.chain_info().genesis_hash,
epoch_length as usize + 1,
)
.await;
let epoch0 = epoch_changes
.shared_data()
.epoch(&EpochIdentifier {
position: EpochIdentifierPosition::Genesis0,
hash: blocks[0],
number: 1,
})
.unwrap()
.clone();
assert_eq!(epoch0.epoch_index, 0);
assert_eq!(epoch0.start_slot, Slot::from(1));
let epoch1 = epoch_changes
.shared_data()
.epoch(&EpochIdentifier {
position: EpochIdentifierPosition::Genesis1,
hash: blocks[0],
number: 1,
})
.unwrap()
.clone();
assert_eq!(epoch1.epoch_index, 1);
assert_eq!(epoch1.start_slot, Slot::from(epoch_length + 1));
let epoch2 = epoch_changes
.shared_data()
.epoch(&EpochIdentifier {
position: EpochIdentifierPosition::Regular,
hash: blocks[epoch_length as usize],
number: epoch_length + 1,
})
.unwrap()
.clone();
assert_eq!(epoch2.epoch_index, 2);
assert_eq!(epoch2.start_slot, Slot::from(epoch_length * 2 + 1));
let last_block = client.expect_header(*blocks.last().unwrap()).unwrap();
let block = propose_and_import_block(
&last_block,
Some((epoch_length * 3 + 1).into()),
&mut proposer_factory,
&mut block_import,
)
.await;
let epoch4 = epoch_changes
.shared_data()
.epoch(&EpochIdentifier {
position: EpochIdentifierPosition::Regular,
hash: block,
number: epoch_length + 2,
})
.unwrap()
.clone();
assert_eq!(epoch4.epoch_index, 4);
assert_eq!(epoch4.start_slot, Slot::from(epoch_length * 4 + 1));
let epoch3 = epoch_changes
.shared_data()
.epoch_data_for_child_of(
descendent_query(&*client),
&block,
epoch_length + 2,
(epoch_length * 3 + 2).into(),
|slot| Epoch::genesis(&data.link.config, slot),
)
.unwrap()
.unwrap();
assert_eq!(epoch3, epoch2);
let epoch4_ = epoch_changes
.shared_data()
.epoch_data_for_child_of(
descendent_query(&*client),
&block,
epoch_length + 2,
(epoch_length * 4 + 1).into(),
|slot| Epoch::genesis(&data.link.config, slot),
)
.unwrap()
.unwrap();
assert_eq!(epoch4, epoch4_);
}
#[tokio::test]
async fn allows_skipping_epochs_on_some_forks() {
let mut net = BabeTestNet::new(1);
let peer = net.peer(0);
let data = peer.data.as_ref().expect("babe link set up during initialization");
let client = peer.client().as_client();
let mut block_import = data.block_import.lock().take().expect("import set up during init");
let mut proposer_factory = DummyFactory {
client: client.clone(),
epoch_changes: data.link.epoch_changes.clone(),
mutator: Arc::new(|_, _| ()),
};
let epoch_changes = data.link.epoch_changes.clone();
let epoch_length = data.link.config.epoch_length;
let blocks = propose_and_import_blocks(
&client,
&mut proposer_factory,
&mut block_import,
client.chain_info().genesis_hash,
epoch_length as usize + 1,
)
.await;
let last_block = client.expect_header(*blocks.last().unwrap()).unwrap();
let epoch2_block = propose_and_import_block(
&last_block,
Some((epoch_length * 2 + 1).into()),
&mut proposer_factory,
&mut block_import,
)
.await;
let epoch2 = epoch_changes
.shared_data()
.epoch_data_for_child_of(
descendent_query(&*client),
&epoch2_block,
epoch_length + 2,
(epoch_length * 2 + 2).into(),
|slot| Epoch::genesis(&data.link.config, slot),
)
.unwrap()
.unwrap();
let epoch3_block = propose_and_import_block(
&last_block,
Some((epoch_length * 3 + 1).into()),
&mut proposer_factory,
&mut block_import,
)
.await;
let epoch3_ = epoch_changes
.shared_data()
.epoch_data_for_child_of(
descendent_query(&*client),
&epoch3_block,
epoch_length + 2,
(epoch_length * 3 + 2).into(),
|slot| Epoch::genesis(&data.link.config, slot),
)
.unwrap()
.unwrap();
assert_eq!(epoch3_, epoch2);
let epoch_data = epoch_changes
.shared_data()
.epoch_data_for_child_of(
descendent_query(&*client),
&epoch3_block,
epoch_length + 2,
(epoch_length * 4 + 1).into(),
|slot| Epoch::genesis(&data.link.config, slot),
)
.unwrap()
.unwrap();
assert!(epoch_data != epoch3_);
let epoch_data = epoch_changes
.shared_data()
.epoch_data_for_child_of(
descendent_query(&*client),
&epoch2_block,
epoch_length + 2,
(epoch_length * 4 + 1).into(),
|slot| Epoch::genesis(&data.link.config, slot),
)
.unwrap()
.unwrap();
assert!(epoch_data != epoch3_);
let epoch3 = epoch_changes
.shared_data()
.epoch(&EpochIdentifier {
position: EpochIdentifierPosition::Regular,
hash: epoch2_block,
number: epoch_length + 2,
})
.unwrap()
.clone();
assert_eq!(epoch_data, epoch3);
}