1#![cfg_attr(not(feature = "std"), no_std)]
18
19extern crate alloc;
31
32use alloc::{collections::btree_map::BTreeMap, vec, vec::Vec};
33use codec::{Decode, DecodeLimit, Encode};
34use core::{cmp, marker::PhantomData};
35use cumulus_primitives_core::{
36 relay_chain::{
37 self,
38 vstaging::{ClaimQueueOffset, CoreSelector, DEFAULT_CLAIM_QUEUE_OFFSET},
39 },
40 AbridgedHostConfiguration, ChannelInfo, ChannelStatus, CollationInfo, GetChannelInfo,
41 InboundDownwardMessage, InboundHrmpMessage, ListChannelInfos, MessageSendError,
42 OutboundHrmpMessage, ParaId, PersistedValidationData, UpwardMessage, UpwardMessageSender,
43 XcmpMessageHandler, XcmpMessageSource,
44};
45use cumulus_primitives_parachain_inherent::{MessageQueueChain, ParachainInherentData};
46use frame_support::{
47 defensive,
48 dispatch::{DispatchResult, Pays, PostDispatchInfo},
49 ensure,
50 inherent::{InherentData, InherentIdentifier, ProvideInherent},
51 traits::{Get, HandleMessage},
52 weights::Weight,
53};
54use frame_system::{ensure_none, ensure_root, pallet_prelude::HeaderFor};
55use polkadot_parachain_primitives::primitives::RelayChainBlockNumber;
56use polkadot_runtime_parachains::FeeTracker;
57use scale_info::TypeInfo;
58use sp_core::U256;
59use sp_runtime::{
60 traits::{Block as BlockT, BlockNumberProvider, Hash, One},
61 BoundedSlice, FixedU128, RuntimeDebug, Saturating,
62};
63use xcm::{latest::XcmHash, VersionedLocation, VersionedXcm, MAX_XCM_DECODE_DEPTH};
64use xcm_builder::InspectMessageQueues;
65
66mod benchmarking;
67pub mod migration;
68mod mock;
69#[cfg(test)]
70mod tests;
71pub mod weights;
72
73pub use weights::WeightInfo;
74
75mod unincluded_segment;
76
77pub mod consensus_hook;
78pub mod relay_state_snapshot;
79#[macro_use]
80pub mod validate_block;
81
82use unincluded_segment::{
83 Ancestor, HrmpChannelUpdate, HrmpWatermarkUpdate, OutboundBandwidthLimits, SegmentTracker,
84 UsedBandwidth,
85};
86
87pub use consensus_hook::{ConsensusHook, ExpectParentIncluded};
88pub use cumulus_pallet_parachain_system_proc_macro::register_validate_block;
111pub use relay_state_snapshot::{MessagingStateSnapshot, RelayChainStateProof};
112
113pub use pallet::*;
114
115pub trait CheckAssociatedRelayNumber {
124 fn check_associated_relay_number(
128 current: RelayChainBlockNumber,
129 previous: RelayChainBlockNumber,
130 );
131}
132
133pub struct RelayNumberStrictlyIncreases;
138
139impl CheckAssociatedRelayNumber for RelayNumberStrictlyIncreases {
140 fn check_associated_relay_number(
141 current: RelayChainBlockNumber,
142 previous: RelayChainBlockNumber,
143 ) {
144 if current <= previous {
145 panic!("Relay chain block number needs to strictly increase between Parachain blocks!")
146 }
147 }
148}
149
150pub struct AnyRelayNumber;
155
156impl CheckAssociatedRelayNumber for AnyRelayNumber {
157 fn check_associated_relay_number(_: RelayChainBlockNumber, _: RelayChainBlockNumber) {}
158}
159
160pub struct RelayNumberMonotonicallyIncreases;
165
166impl CheckAssociatedRelayNumber for RelayNumberMonotonicallyIncreases {
167 fn check_associated_relay_number(
168 current: RelayChainBlockNumber,
169 previous: RelayChainBlockNumber,
170 ) {
171 if current < previous {
172 panic!("Relay chain block number needs to monotonically increase between Parachain blocks!")
173 }
174 }
175}
176
177pub type MaxDmpMessageLenOf<T> = <<T as Config>::DmpQueue as HandleMessage>::MaxMessageLen;
179
180pub mod ump_constants {
181 use super::FixedU128;
182
183 pub const THRESHOLD_FACTOR: u32 = 2;
187 pub const EXPONENTIAL_FEE_BASE: FixedU128 = FixedU128::from_rational(105, 100); pub const MESSAGE_SIZE_FEE_BASE: FixedU128 = FixedU128::from_rational(1, 1000); }
193
194pub trait SelectCore {
196 fn selected_core() -> (CoreSelector, ClaimQueueOffset);
198 fn select_next_core() -> (CoreSelector, ClaimQueueOffset);
200}
201
202pub struct DefaultCoreSelector<T>(PhantomData<T>);
204
205impl<T: frame_system::Config> SelectCore for DefaultCoreSelector<T> {
206 fn selected_core() -> (CoreSelector, ClaimQueueOffset) {
207 let core_selector: U256 = frame_system::Pallet::<T>::block_number().into();
208
209 (CoreSelector(core_selector.byte(0)), ClaimQueueOffset(DEFAULT_CLAIM_QUEUE_OFFSET))
210 }
211
212 fn select_next_core() -> (CoreSelector, ClaimQueueOffset) {
213 let core_selector: U256 = (frame_system::Pallet::<T>::block_number() + One::one()).into();
214
215 (CoreSelector(core_selector.byte(0)), ClaimQueueOffset(DEFAULT_CLAIM_QUEUE_OFFSET))
216 }
217}
218
219pub struct LookaheadCoreSelector<T>(PhantomData<T>);
221
222impl<T: frame_system::Config> SelectCore for LookaheadCoreSelector<T> {
223 fn selected_core() -> (CoreSelector, ClaimQueueOffset) {
224 let core_selector: U256 = frame_system::Pallet::<T>::block_number().into();
225
226 (CoreSelector(core_selector.byte(0)), ClaimQueueOffset(1))
227 }
228
229 fn select_next_core() -> (CoreSelector, ClaimQueueOffset) {
230 let core_selector: U256 = (frame_system::Pallet::<T>::block_number() + One::one()).into();
231
232 (CoreSelector(core_selector.byte(0)), ClaimQueueOffset(1))
233 }
234}
235
236#[frame_support::pallet]
237pub mod pallet {
238 use super::*;
239 use frame_support::pallet_prelude::*;
240 use frame_system::pallet_prelude::*;
241
242 #[pallet::pallet]
243 #[pallet::storage_version(migration::STORAGE_VERSION)]
244 #[pallet::without_storage_info]
245 pub struct Pallet<T>(_);
246
247 #[pallet::config]
248 pub trait Config: frame_system::Config<OnSetCode = ParachainSetCode<Self>> {
249 type RuntimeEvent: From<Event<Self>> + IsType<<Self as frame_system::Config>::RuntimeEvent>;
251
252 type OnSystemEvent: OnSystemEvent;
254
255 #[pallet::constant]
257 type SelfParaId: Get<ParaId>;
258
259 type OutboundXcmpMessageSource: XcmpMessageSource;
261
262 type DmpQueue: HandleMessage;
267
268 type ReservedDmpWeight: Get<Weight>;
270
271 type XcmpMessageHandler: XcmpMessageHandler;
275
276 type ReservedXcmpWeight: Get<Weight>;
278
279 type CheckAssociatedRelayNumber: CheckAssociatedRelayNumber;
281
282 type WeightInfo: WeightInfo;
284
285 type ConsensusHook: ConsensusHook;
296
297 type SelectCore: SelectCore;
299 }
300
301 #[pallet::hooks]
302 impl<T: Config> Hooks<BlockNumberFor<T>> for Pallet<T> {
303 fn on_finalize(_: BlockNumberFor<T>) {
308 <DidSetValidationCode<T>>::kill();
309 <UpgradeRestrictionSignal<T>>::kill();
310 let relay_upgrade_go_ahead = <UpgradeGoAhead<T>>::take();
311
312 let vfp = <ValidationData<T>>::get()
313 .expect("set_validation_data inherent needs to be present in every block!");
314
315 LastRelayChainBlockNumber::<T>::put(vfp.relay_parent_number);
316
317 let host_config = match HostConfiguration::<T>::get() {
318 Some(ok) => ok,
319 None => {
320 debug_assert!(
321 false,
322 "host configuration is promised to set until `on_finalize`; qed",
323 );
324 return
325 },
326 };
327
328 let total_bandwidth_out = match RelevantMessagingState::<T>::get() {
332 Some(s) => OutboundBandwidthLimits::from_relay_chain_state(&s),
333 None => {
334 debug_assert!(
335 false,
336 "relevant messaging state is promised to be set until `on_finalize`; \
337 qed",
338 );
339 return
340 },
341 };
342
343 Self::adjust_egress_bandwidth_limits();
346
347 let (ump_msg_count, ump_total_bytes) = <PendingUpwardMessages<T>>::mutate(|up| {
348 let (available_capacity, available_size) = match RelevantMessagingState::<T>::get()
349 {
350 Some(limits) => (
351 limits.relay_dispatch_queue_remaining_capacity.remaining_count,
352 limits.relay_dispatch_queue_remaining_capacity.remaining_size,
353 ),
354 None => {
355 debug_assert!(
356 false,
357 "relevant messaging state is promised to be set until `on_finalize`; \
358 qed",
359 );
360 return (0, 0)
361 },
362 };
363
364 let available_capacity =
365 cmp::min(available_capacity, host_config.max_upward_message_num_per_candidate);
366
367 let (num, total_size) = up
370 .iter()
371 .scan((0u32, 0u32), |state, msg| {
372 let (cap_used, size_used) = *state;
373 let new_cap = cap_used.saturating_add(1);
374 let new_size = size_used.saturating_add(msg.len() as u32);
375 match available_capacity
376 .checked_sub(new_cap)
377 .and(available_size.checked_sub(new_size))
378 {
379 Some(_) => {
380 *state = (new_cap, new_size);
381 Some(*state)
382 },
383 _ => None,
384 }
385 })
386 .last()
387 .unwrap_or_default();
388
389 UpwardMessages::<T>::put(&up[..num as usize]);
392 *up = up.split_off(num as usize);
393
394 #[cfg(feature = "experimental-ump-signals")]
397 Self::send_ump_signal();
398
399 let threshold = host_config
403 .max_upward_queue_size
404 .saturating_div(ump_constants::THRESHOLD_FACTOR);
405 let remaining_total_size: usize = up.iter().map(UpwardMessage::len).sum();
406 if remaining_total_size <= threshold as usize {
407 Self::decrease_fee_factor(());
408 }
409
410 (num, total_size)
411 });
412
413 let maximum_channels = host_config
423 .hrmp_max_message_num_per_candidate
424 .min(<AnnouncedHrmpMessagesPerCandidate<T>>::take())
425 as usize;
426
427 let outbound_messages =
431 T::OutboundXcmpMessageSource::take_outbound_messages(maximum_channels)
432 .into_iter()
433 .map(|(recipient, data)| OutboundHrmpMessage { recipient, data })
434 .collect::<Vec<_>>();
435
436 {
439 let hrmp_outgoing = outbound_messages
440 .iter()
441 .map(|msg| {
442 (
443 msg.recipient,
444 HrmpChannelUpdate { msg_count: 1, total_bytes: msg.data.len() as u32 },
445 )
446 })
447 .collect();
448 let used_bandwidth =
449 UsedBandwidth { ump_msg_count, ump_total_bytes, hrmp_outgoing };
450
451 let mut aggregated_segment =
452 AggregatedUnincludedSegment::<T>::get().unwrap_or_default();
453 let consumed_go_ahead_signal =
454 if aggregated_segment.consumed_go_ahead_signal().is_some() {
455 None
458 } else {
459 relay_upgrade_go_ahead
460 };
461 let ancestor = Ancestor::new_unchecked(used_bandwidth, consumed_go_ahead_signal);
463
464 let watermark = HrmpWatermark::<T>::get();
465 let watermark_update = HrmpWatermarkUpdate::new(watermark, vfp.relay_parent_number);
466
467 aggregated_segment
468 .append(&ancestor, watermark_update, &total_bandwidth_out)
469 .expect("unincluded segment limits exceeded");
470 AggregatedUnincludedSegment::<T>::put(aggregated_segment);
471 UnincludedSegment::<T>::append(ancestor);
473 }
474 HrmpOutboundMessages::<T>::put(outbound_messages);
475 }
476
477 fn on_initialize(_n: BlockNumberFor<T>) -> Weight {
478 let mut weight = Weight::zero();
479
480 if !<DidSetValidationCode<T>>::get() {
484 NewValidationCode::<T>::kill();
485 weight += T::DbWeight::get().writes(1);
486 }
487
488 {
490 <UnincludedSegment<T>>::mutate(|chain| {
491 if let Some(ancestor) = chain.last_mut() {
492 let parent = frame_system::Pallet::<T>::parent_hash();
493 ancestor.replace_para_head_hash(parent);
496 }
497 });
498 weight += T::DbWeight::get().reads_writes(1, 1);
499
500 weight += T::DbWeight::get().reads_writes(3, 2);
502 }
503
504 ValidationData::<T>::kill();
506 ProcessedDownwardMessages::<T>::kill();
507 HrmpWatermark::<T>::kill();
508 UpwardMessages::<T>::kill();
509 HrmpOutboundMessages::<T>::kill();
510 CustomValidationHeadData::<T>::kill();
511
512 weight += T::DbWeight::get().writes(6);
513
514 weight += T::DbWeight::get().reads_writes(1, 1);
533 let hrmp_max_message_num_per_candidate = HostConfiguration::<T>::get()
534 .map(|cfg| cfg.hrmp_max_message_num_per_candidate)
535 .unwrap_or(0);
536 <AnnouncedHrmpMessagesPerCandidate<T>>::put(hrmp_max_message_num_per_candidate);
537
538 weight += T::DbWeight::get().reads_writes(
540 3 + hrmp_max_message_num_per_candidate as u64,
541 4 + hrmp_max_message_num_per_candidate as u64,
542 );
543
544 weight += T::DbWeight::get().reads_writes(1, 1);
546
547 weight += T::DbWeight::get().reads_writes(6, 3);
549
550 weight += T::DbWeight::get().reads(1);
552
553 weight
554 }
555 }
556
557 #[pallet::call]
558 impl<T: Config> Pallet<T> {
559 #[pallet::call_index(0)]
569 #[pallet::weight((0, DispatchClass::Mandatory))]
570 pub fn set_validation_data(
572 origin: OriginFor<T>,
573 data: ParachainInherentData,
574 ) -> DispatchResultWithPostInfo {
575 ensure_none(origin)?;
576 assert!(
577 !<ValidationData<T>>::exists(),
578 "ValidationData must be updated only once in a block",
579 );
580
581 let mut total_weight = Weight::zero();
583
584 let ParachainInherentData {
591 validation_data: vfp,
592 relay_chain_state,
593 downward_messages,
594 horizontal_messages,
595 } = data;
596
597 T::CheckAssociatedRelayNumber::check_associated_relay_number(
599 vfp.relay_parent_number,
600 LastRelayChainBlockNumber::<T>::get(),
601 );
602
603 let relay_state_proof = RelayChainStateProof::new(
604 T::SelfParaId::get(),
605 vfp.relay_parent_storage_root,
606 relay_chain_state.clone(),
607 )
608 .expect("Invalid relay chain state proof");
609
610 let (consensus_hook_weight, capacity) =
612 T::ConsensusHook::on_state_proof(&relay_state_proof);
613 total_weight += consensus_hook_weight;
614 total_weight += Self::maybe_drop_included_ancestors(&relay_state_proof, capacity);
615 frame_system::Pallet::<T>::deposit_log(
619 cumulus_primitives_core::rpsr_digest::relay_parent_storage_root_item(
620 vfp.relay_parent_storage_root,
621 vfp.relay_parent_number,
622 ),
623 );
624
625 let upgrade_go_ahead_signal = relay_state_proof
629 .read_upgrade_go_ahead_signal()
630 .expect("Invalid upgrade go ahead signal");
631
632 let upgrade_signal_in_segment = AggregatedUnincludedSegment::<T>::get()
633 .as_ref()
634 .and_then(SegmentTracker::consumed_go_ahead_signal);
635 if let Some(signal_in_segment) = upgrade_signal_in_segment.as_ref() {
636 assert_eq!(upgrade_go_ahead_signal, Some(*signal_in_segment));
639 }
640 match upgrade_go_ahead_signal {
641 Some(_signal) if upgrade_signal_in_segment.is_some() => {
642 },
644 Some(relay_chain::UpgradeGoAhead::GoAhead) => {
645 assert!(
646 <PendingValidationCode<T>>::exists(),
647 "No new validation function found in storage, GoAhead signal is not expected",
648 );
649 let validation_code = <PendingValidationCode<T>>::take();
650
651 frame_system::Pallet::<T>::update_code_in_storage(&validation_code);
652 <T::OnSystemEvent as OnSystemEvent>::on_validation_code_applied();
653 Self::deposit_event(Event::ValidationFunctionApplied {
654 relay_chain_block_num: vfp.relay_parent_number,
655 });
656 },
657 Some(relay_chain::UpgradeGoAhead::Abort) => {
658 <PendingValidationCode<T>>::kill();
659 Self::deposit_event(Event::ValidationFunctionDiscarded);
660 },
661 None => {},
662 }
663 <UpgradeRestrictionSignal<T>>::put(
664 relay_state_proof
665 .read_upgrade_restriction_signal()
666 .expect("Invalid upgrade restriction signal"),
667 );
668 <UpgradeGoAhead<T>>::put(upgrade_go_ahead_signal);
669
670 let host_config = relay_state_proof
671 .read_abridged_host_configuration()
672 .expect("Invalid host configuration in relay chain state proof");
673
674 let relevant_messaging_state = relay_state_proof
675 .read_messaging_state_snapshot(&host_config)
676 .expect("Invalid messaging state in relay chain state proof");
677
678 <ValidationData<T>>::put(&vfp);
679 <RelayStateProof<T>>::put(relay_chain_state);
680 <RelevantMessagingState<T>>::put(relevant_messaging_state.clone());
681 <HostConfiguration<T>>::put(host_config);
682
683 <T::OnSystemEvent as OnSystemEvent>::on_validation_data(&vfp);
684
685 total_weight.saturating_accrue(Self::enqueue_inbound_downward_messages(
686 relevant_messaging_state.dmq_mqc_head,
687 downward_messages,
688 ));
689 total_weight.saturating_accrue(Self::enqueue_inbound_horizontal_messages(
690 &relevant_messaging_state.ingress_channels,
691 horizontal_messages,
692 vfp.relay_parent_number,
693 ));
694
695 Ok(PostDispatchInfo { actual_weight: Some(total_weight), pays_fee: Pays::No })
696 }
697
698 #[pallet::call_index(1)]
699 #[pallet::weight((1_000, DispatchClass::Operational))]
700 pub fn sudo_send_upward_message(
701 origin: OriginFor<T>,
702 message: UpwardMessage,
703 ) -> DispatchResult {
704 ensure_root(origin)?;
705 let _ = Self::send_upward_message(message);
706 Ok(())
707 }
708
709 }
712
713 #[pallet::event]
714 #[pallet::generate_deposit(pub(super) fn deposit_event)]
715 pub enum Event<T: Config> {
716 ValidationFunctionStored,
718 ValidationFunctionApplied { relay_chain_block_num: RelayChainBlockNumber },
720 ValidationFunctionDiscarded,
722 DownwardMessagesReceived { count: u32 },
724 DownwardMessagesProcessed { weight_used: Weight, dmq_head: relay_chain::Hash },
726 UpwardMessageSent { message_hash: Option<XcmHash> },
728 }
729
730 #[pallet::error]
731 pub enum Error<T> {
732 OverlappingUpgrades,
734 ProhibitedByPolkadot,
736 TooBig,
739 ValidationDataNotAvailable,
741 HostConfigurationNotAvailable,
743 NotScheduled,
745 NothingAuthorized,
747 Unauthorized,
749 }
750
751 #[pallet::storage]
758 pub type UnincludedSegment<T: Config> = StorageValue<_, Vec<Ancestor<T::Hash>>, ValueQuery>;
759
760 #[pallet::storage]
764 pub type AggregatedUnincludedSegment<T: Config> =
765 StorageValue<_, SegmentTracker<T::Hash>, OptionQuery>;
766
767 #[pallet::storage]
774 pub type PendingValidationCode<T: Config> = StorageValue<_, Vec<u8>, ValueQuery>;
775
776 #[pallet::storage]
782 pub type NewValidationCode<T: Config> = StorageValue<_, Vec<u8>, OptionQuery>;
783
784 #[pallet::storage]
788 pub type ValidationData<T: Config> = StorageValue<_, PersistedValidationData>;
789
790 #[pallet::storage]
792 pub type DidSetValidationCode<T: Config> = StorageValue<_, bool, ValueQuery>;
793
794 #[pallet::storage]
798 pub type LastRelayChainBlockNumber<T: Config> =
799 StorageValue<_, RelayChainBlockNumber, ValueQuery>;
800
801 #[pallet::storage]
809 pub type UpgradeRestrictionSignal<T: Config> =
810 StorageValue<_, Option<relay_chain::UpgradeRestriction>, ValueQuery>;
811
812 #[pallet::storage]
818 pub type UpgradeGoAhead<T: Config> =
819 StorageValue<_, Option<relay_chain::UpgradeGoAhead>, ValueQuery>;
820
821 #[pallet::storage]
828 pub type RelayStateProof<T: Config> = StorageValue<_, sp_trie::StorageProof>;
829
830 #[pallet::storage]
838 pub type RelevantMessagingState<T: Config> = StorageValue<_, MessagingStateSnapshot>;
839
840 #[pallet::storage]
847 #[pallet::disable_try_decode_storage]
848 pub type HostConfiguration<T: Config> = StorageValue<_, AbridgedHostConfiguration>;
849
850 #[pallet::storage]
855 pub type LastDmqMqcHead<T: Config> = StorageValue<_, MessageQueueChain, ValueQuery>;
856
857 #[pallet::storage]
862 pub type LastHrmpMqcHeads<T: Config> =
863 StorageValue<_, BTreeMap<ParaId, MessageQueueChain>, ValueQuery>;
864
865 #[pallet::storage]
869 pub type ProcessedDownwardMessages<T: Config> = StorageValue<_, u32, ValueQuery>;
870
871 #[pallet::storage]
875 pub type HrmpWatermark<T: Config> = StorageValue<_, relay_chain::BlockNumber, ValueQuery>;
876
877 #[pallet::storage]
881 pub type HrmpOutboundMessages<T: Config> =
882 StorageValue<_, Vec<OutboundHrmpMessage>, ValueQuery>;
883
884 #[pallet::storage]
888 pub type UpwardMessages<T: Config> = StorageValue<_, Vec<UpwardMessage>, ValueQuery>;
889
890 #[pallet::storage]
892 pub type PendingUpwardMessages<T: Config> = StorageValue<_, Vec<UpwardMessage>, ValueQuery>;
893
894 #[pallet::type_value]
896 pub fn UpwardInitialDeliveryFeeFactor() -> FixedU128 {
897 FixedU128::from_u32(1)
898 }
899
900 #[pallet::storage]
902 pub type UpwardDeliveryFeeFactor<T: Config> =
903 StorageValue<_, FixedU128, ValueQuery, UpwardInitialDeliveryFeeFactor>;
904
905 #[pallet::storage]
908 pub type AnnouncedHrmpMessagesPerCandidate<T: Config> = StorageValue<_, u32, ValueQuery>;
909
910 #[pallet::storage]
913 pub type ReservedXcmpWeightOverride<T: Config> = StorageValue<_, Weight>;
914
915 #[pallet::storage]
918 pub type ReservedDmpWeightOverride<T: Config> = StorageValue<_, Weight>;
919
920 #[pallet::storage]
924 pub type CustomValidationHeadData<T: Config> = StorageValue<_, Vec<u8>, OptionQuery>;
925
926 #[pallet::inherent]
927 impl<T: Config> ProvideInherent for Pallet<T> {
928 type Call = Call<T>;
929 type Error = sp_inherents::MakeFatalError<()>;
930 const INHERENT_IDENTIFIER: InherentIdentifier =
931 cumulus_primitives_parachain_inherent::INHERENT_IDENTIFIER;
932
933 fn create_inherent(data: &InherentData) -> Option<Self::Call> {
934 let mut data: ParachainInherentData =
935 data.get_data(&Self::INHERENT_IDENTIFIER).ok().flatten().expect(
936 "validation function params are always injected into inherent data; qed",
937 );
938
939 Self::drop_processed_messages_from_inherent(&mut data);
940
941 Some(Call::set_validation_data { data })
942 }
943
944 fn is_inherent(call: &Self::Call) -> bool {
945 matches!(call, Call::set_validation_data { .. })
946 }
947 }
948
949 #[pallet::genesis_config]
950 #[derive(frame_support::DefaultNoBound)]
951 pub struct GenesisConfig<T: Config> {
952 #[serde(skip)]
953 pub _config: core::marker::PhantomData<T>,
954 }
955
956 #[pallet::genesis_build]
957 impl<T: Config> BuildGenesisConfig for GenesisConfig<T> {
958 fn build(&self) {
959 sp_io::storage::set(b":c", &[]);
961 }
962 }
963}
964
965impl<T: Config> Pallet<T> {
966 pub fn unincluded_segment_size_after(included_hash: T::Hash) -> u32 {
974 let segment = UnincludedSegment::<T>::get();
975 crate::unincluded_segment::size_after_included(included_hash, &segment)
976 }
977}
978
979impl<T: Config> FeeTracker for Pallet<T> {
980 type Id = ();
981
982 fn get_fee_factor(_: Self::Id) -> FixedU128 {
983 UpwardDeliveryFeeFactor::<T>::get()
984 }
985
986 fn increase_fee_factor(_: Self::Id, message_size_factor: FixedU128) -> FixedU128 {
987 <UpwardDeliveryFeeFactor<T>>::mutate(|f| {
988 *f = f.saturating_mul(
989 ump_constants::EXPONENTIAL_FEE_BASE.saturating_add(message_size_factor),
990 );
991 *f
992 })
993 }
994
995 fn decrease_fee_factor(_: Self::Id) -> FixedU128 {
996 <UpwardDeliveryFeeFactor<T>>::mutate(|f| {
997 *f =
998 UpwardInitialDeliveryFeeFactor::get().max(*f / ump_constants::EXPONENTIAL_FEE_BASE);
999 *f
1000 })
1001 }
1002}
1003
1004impl<T: Config> ListChannelInfos for Pallet<T> {
1005 fn outgoing_channels() -> Vec<ParaId> {
1006 let Some(state) = RelevantMessagingState::<T>::get() else { return Vec::new() };
1007 state.egress_channels.into_iter().map(|(id, _)| id).collect()
1008 }
1009}
1010
1011impl<T: Config> GetChannelInfo for Pallet<T> {
1012 fn get_channel_status(id: ParaId) -> ChannelStatus {
1013 let channels = match RelevantMessagingState::<T>::get() {
1028 None => {
1029 log::warn!("calling `get_channel_status` with no RelevantMessagingState?!");
1030 return ChannelStatus::Closed
1031 },
1032 Some(d) => d.egress_channels,
1033 };
1034 let index = match channels.binary_search_by_key(&id, |item| item.0) {
1041 Err(_) => return ChannelStatus::Closed,
1042 Ok(i) => i,
1043 };
1044 let meta = &channels[index].1;
1045 if meta.msg_count + 1 > meta.max_capacity {
1046 return ChannelStatus::Full
1048 }
1049 let max_size_now = meta.max_total_size - meta.total_size;
1050 let max_size_ever = meta.max_message_size;
1051 ChannelStatus::Ready(max_size_now as usize, max_size_ever as usize)
1052 }
1053
1054 fn get_channel_info(id: ParaId) -> Option<ChannelInfo> {
1055 let channels = RelevantMessagingState::<T>::get()?.egress_channels;
1056 let index = channels.binary_search_by_key(&id, |item| item.0).ok()?;
1057 let info = ChannelInfo {
1058 max_capacity: channels[index].1.max_capacity,
1059 max_total_size: channels[index].1.max_total_size,
1060 max_message_size: channels[index].1.max_message_size,
1061 msg_count: channels[index].1.msg_count,
1062 total_size: channels[index].1.total_size,
1063 };
1064 Some(info)
1065 }
1066}
1067
1068impl<T: Config> Pallet<T> {
1069 fn drop_processed_messages_from_inherent(para_inherent: &mut ParachainInherentData) {
1074 let ParachainInherentData { downward_messages, horizontal_messages, .. } = para_inherent;
1075
1076 let last_relay_block_number = LastRelayChainBlockNumber::<T>::get();
1079
1080 let dmq_processed_num = downward_messages
1082 .iter()
1083 .take_while(|message| message.sent_at <= last_relay_block_number)
1084 .count();
1085 downward_messages.drain(..dmq_processed_num);
1086
1087 for horizontal in horizontal_messages.values_mut() {
1089 let horizontal_processed_num = horizontal
1090 .iter()
1091 .take_while(|message| message.sent_at <= last_relay_block_number)
1092 .count();
1093 horizontal.drain(..horizontal_processed_num);
1094 }
1095
1096 }
1099
1100 fn enqueue_inbound_downward_messages(
1110 expected_dmq_mqc_head: relay_chain::Hash,
1111 downward_messages: Vec<InboundDownwardMessage>,
1112 ) -> Weight {
1113 let dm_count = downward_messages.len() as u32;
1114 let mut dmq_head = <LastDmqMqcHead<T>>::get();
1115
1116 let weight_used = T::WeightInfo::enqueue_inbound_downward_messages(dm_count);
1117 if dm_count != 0 {
1118 Self::deposit_event(Event::DownwardMessagesReceived { count: dm_count });
1119
1120 for m in &downward_messages {
1122 dmq_head.extend_downward(m);
1123 }
1124 let bounded = downward_messages
1125 .iter()
1126 .filter_map(|m| match BoundedSlice::try_from(&m.msg[..]) {
1129 Ok(bounded) => Some(bounded),
1130 Err(_) => {
1131 defensive!("Inbound Downward message was too long; dropping");
1132 None
1133 },
1134 });
1135 T::DmpQueue::handle_messages(bounded);
1136 <LastDmqMqcHead<T>>::put(&dmq_head);
1137
1138 Self::deposit_event(Event::DownwardMessagesProcessed {
1139 weight_used,
1140 dmq_head: dmq_head.head(),
1141 });
1142 }
1143
1144 assert_eq!(dmq_head.head(), expected_dmq_mqc_head);
1150
1151 ProcessedDownwardMessages::<T>::put(dm_count);
1152
1153 weight_used
1154 }
1155
1156 fn enqueue_inbound_horizontal_messages(
1167 ingress_channels: &[(ParaId, cumulus_primitives_core::AbridgedHrmpChannel)],
1168 horizontal_messages: BTreeMap<ParaId, Vec<InboundHrmpMessage>>,
1169 relay_parent_number: relay_chain::BlockNumber,
1170 ) -> Weight {
1171 for sender in horizontal_messages.keys() {
1174 assert!(ingress_channels.binary_search_by_key(sender, |&(s, _)| s).is_ok(),);
1178 }
1179
1180 let mut horizontal_messages = horizontal_messages
1189 .into_iter()
1190 .flat_map(|(sender, channel_contents)| {
1191 channel_contents.into_iter().map(move |message| (sender, message))
1192 })
1193 .collect::<Vec<_>>();
1194 horizontal_messages.sort_by(|a, b| {
1195 match a.1.sent_at.cmp(&b.1.sent_at) {
1197 cmp::Ordering::Equal => a.0.cmp(&b.0),
1198 ord => ord,
1199 }
1200 });
1201
1202 let last_mqc_heads = <LastHrmpMqcHeads<T>>::get();
1203 let mut running_mqc_heads = BTreeMap::new();
1204 let mut hrmp_watermark = None;
1205
1206 {
1207 for (sender, ref horizontal_message) in &horizontal_messages {
1208 if hrmp_watermark.map(|w| w < horizontal_message.sent_at).unwrap_or(true) {
1209 hrmp_watermark = Some(horizontal_message.sent_at);
1210 }
1211
1212 running_mqc_heads
1213 .entry(sender)
1214 .or_insert_with(|| last_mqc_heads.get(sender).cloned().unwrap_or_default())
1215 .extend_hrmp(horizontal_message);
1216 }
1217 }
1218 let message_iter = horizontal_messages
1219 .iter()
1220 .map(|&(sender, ref message)| (sender, message.sent_at, &message.data[..]));
1221
1222 let max_weight =
1223 <ReservedXcmpWeightOverride<T>>::get().unwrap_or_else(T::ReservedXcmpWeight::get);
1224 let weight_used = T::XcmpMessageHandler::handle_xcmp_messages(message_iter, max_weight);
1225
1226 for (sender, channel) in ingress_channels {
1234 let cur_head = running_mqc_heads
1235 .entry(sender)
1236 .or_insert_with(|| last_mqc_heads.get(sender).cloned().unwrap_or_default())
1237 .head();
1238 let target_head = channel.mqc_head.unwrap_or_default();
1239
1240 assert!(cur_head == target_head);
1241 }
1242
1243 <LastHrmpMqcHeads<T>>::put(running_mqc_heads);
1244
1245 HrmpWatermark::<T>::put(hrmp_watermark.unwrap_or(relay_parent_number));
1248
1249 weight_used
1250 }
1251
1252 fn maybe_drop_included_ancestors(
1254 relay_state_proof: &RelayChainStateProof,
1255 capacity: consensus_hook::UnincludedSegmentCapacity,
1256 ) -> Weight {
1257 let mut weight_used = Weight::zero();
1258 let para_head =
1260 relay_state_proof.read_included_para_head().ok().map(|h| T::Hashing::hash(&h.0));
1261
1262 let unincluded_segment_len = <UnincludedSegment<T>>::decode_len().unwrap_or(0);
1263 weight_used += T::DbWeight::get().reads(1);
1264
1265 let included_head = match (para_head, capacity.is_expecting_included_parent()) {
1267 (Some(h), true) => {
1268 assert_eq!(
1269 h,
1270 frame_system::Pallet::<T>::parent_hash(),
1271 "expected parent to be included"
1272 );
1273
1274 h
1275 },
1276 (Some(h), false) => h,
1277 (None, true) => {
1278 frame_system::Pallet::<T>::parent_hash()
1281 },
1282 (None, false) => panic!("included head not present in relay storage proof"),
1283 };
1284
1285 let new_len = {
1286 let para_head_hash = included_head;
1287 let dropped: Vec<Ancestor<T::Hash>> = <UnincludedSegment<T>>::mutate(|chain| {
1288 let idx = chain
1291 .iter()
1292 .position(|block| {
1293 let head_hash = block
1294 .para_head_hash()
1295 .expect("para head hash is updated during block initialization; qed");
1296 head_hash == ¶_head_hash
1297 })
1298 .map_or(0, |idx| idx + 1); chain.drain(..idx).collect()
1301 });
1302 weight_used += T::DbWeight::get().reads_writes(1, 1);
1303
1304 let new_len = unincluded_segment_len - dropped.len();
1305 if !dropped.is_empty() {
1306 <AggregatedUnincludedSegment<T>>::mutate(|agg| {
1307 let agg = agg.as_mut().expect(
1308 "dropped part of the segment wasn't empty, hence value exists; qed",
1309 );
1310 for block in dropped {
1311 agg.subtract(&block);
1312 }
1313 });
1314 weight_used += T::DbWeight::get().reads_writes(1, 1);
1315 }
1316
1317 new_len as u32
1318 };
1319
1320 assert!(new_len < capacity.get(), "no space left for the block in the unincluded segment");
1325 weight_used
1326 }
1327
1328 fn adjust_egress_bandwidth_limits() {
1334 let unincluded_segment = match AggregatedUnincludedSegment::<T>::get() {
1335 None => return,
1336 Some(s) => s,
1337 };
1338
1339 <RelevantMessagingState<T>>::mutate(|messaging_state| {
1340 let messaging_state = match messaging_state {
1341 None => return,
1342 Some(s) => s,
1343 };
1344
1345 let used_bandwidth = unincluded_segment.used_bandwidth();
1346
1347 let channels = &mut messaging_state.egress_channels;
1348 for (para_id, used) in used_bandwidth.hrmp_outgoing.iter() {
1349 let i = match channels.binary_search_by_key(para_id, |item| item.0) {
1350 Ok(i) => i,
1351 Err(_) => continue, };
1353
1354 let c = &mut channels[i].1;
1355
1356 c.total_size = (c.total_size + used.total_bytes).min(c.max_total_size);
1357 c.msg_count = (c.msg_count + used.msg_count).min(c.max_capacity);
1358 }
1359
1360 let upward_capacity = &mut messaging_state.relay_dispatch_queue_remaining_capacity;
1361 upward_capacity.remaining_count =
1362 upward_capacity.remaining_count.saturating_sub(used_bandwidth.ump_msg_count);
1363 upward_capacity.remaining_size =
1364 upward_capacity.remaining_size.saturating_sub(used_bandwidth.ump_total_bytes);
1365 });
1366 }
1367
1368 fn notify_polkadot_of_pending_upgrade(code: &[u8]) {
1372 NewValidationCode::<T>::put(code);
1373 <DidSetValidationCode<T>>::put(true);
1374 }
1375
1376 pub fn max_code_size() -> Option<u32> {
1380 <HostConfiguration<T>>::get().map(|cfg| cfg.max_code_size)
1381 }
1382
1383 pub fn schedule_code_upgrade(validation_function: Vec<u8>) -> DispatchResult {
1385 ensure!(<ValidationData<T>>::exists(), Error::<T>::ValidationDataNotAvailable,);
1389 ensure!(<UpgradeRestrictionSignal<T>>::get().is_none(), Error::<T>::ProhibitedByPolkadot);
1390
1391 ensure!(!<PendingValidationCode<T>>::exists(), Error::<T>::OverlappingUpgrades);
1392 let cfg = HostConfiguration::<T>::get().ok_or(Error::<T>::HostConfigurationNotAvailable)?;
1393 ensure!(validation_function.len() <= cfg.max_code_size as usize, Error::<T>::TooBig);
1394
1395 Self::notify_polkadot_of_pending_upgrade(&validation_function);
1403 <PendingValidationCode<T>>::put(validation_function);
1404 Self::deposit_event(Event::ValidationFunctionStored);
1405
1406 Ok(())
1407 }
1408
1409 pub fn collect_collation_info(header: &HeaderFor<T>) -> CollationInfo {
1417 CollationInfo {
1418 hrmp_watermark: HrmpWatermark::<T>::get(),
1419 horizontal_messages: HrmpOutboundMessages::<T>::get(),
1420 upward_messages: UpwardMessages::<T>::get(),
1421 processed_downward_messages: ProcessedDownwardMessages::<T>::get(),
1422 new_validation_code: NewValidationCode::<T>::get().map(Into::into),
1423 head_data: CustomValidationHeadData::<T>::get()
1426 .map_or_else(|| header.encode(), |v| v)
1427 .into(),
1428 }
1429 }
1430
1431 pub fn core_selector() -> (CoreSelector, ClaimQueueOffset) {
1433 T::SelectCore::select_next_core()
1434 }
1435
1436 pub fn set_custom_validation_head_data(head_data: Vec<u8>) {
1449 CustomValidationHeadData::<T>::put(head_data);
1450 }
1451
1452 #[cfg(feature = "experimental-ump-signals")]
1454 fn send_ump_signal() {
1455 use cumulus_primitives_core::relay_chain::vstaging::{UMPSignal, UMP_SEPARATOR};
1456
1457 UpwardMessages::<T>::mutate(|up| {
1458 up.push(UMP_SEPARATOR);
1459
1460 let core_selector = T::SelectCore::selected_core();
1462 up.push(UMPSignal::SelectCore(core_selector.0, core_selector.1).encode());
1463 });
1464 }
1465
1466 #[cfg(any(feature = "runtime-benchmarks", feature = "std"))]
1471 pub fn open_outbound_hrmp_channel_for_benchmarks_or_tests(target_parachain: ParaId) {
1472 RelevantMessagingState::<T>::put(MessagingStateSnapshot {
1473 dmq_mqc_head: Default::default(),
1474 relay_dispatch_queue_remaining_capacity: Default::default(),
1475 ingress_channels: Default::default(),
1476 egress_channels: vec![(
1477 target_parachain,
1478 cumulus_primitives_core::AbridgedHrmpChannel {
1479 max_capacity: 10,
1480 max_total_size: 10_000_000_u32,
1481 max_message_size: 10_000_000_u32,
1482 msg_count: 5,
1483 total_size: 5_000_000_u32,
1484 mqc_head: None,
1485 },
1486 )],
1487 })
1488 }
1489
1490 #[cfg(any(feature = "runtime-benchmarks", feature = "std"))]
1495 pub fn open_custom_outbound_hrmp_channel_for_benchmarks_or_tests(
1496 target_parachain: ParaId,
1497 channel: cumulus_primitives_core::AbridgedHrmpChannel,
1498 ) {
1499 RelevantMessagingState::<T>::put(MessagingStateSnapshot {
1500 dmq_mqc_head: Default::default(),
1501 relay_dispatch_queue_remaining_capacity: Default::default(),
1502 ingress_channels: Default::default(),
1503 egress_channels: vec![(target_parachain, channel)],
1504 })
1505 }
1506
1507 #[cfg(feature = "runtime-benchmarks")]
1509 pub fn initialize_for_set_code_benchmark(max_code_size: u32) {
1510 let vfp = PersistedValidationData {
1512 parent_head: polkadot_parachain_primitives::primitives::HeadData(Default::default()),
1513 relay_parent_number: 1,
1514 relay_parent_storage_root: Default::default(),
1515 max_pov_size: 1_000,
1516 };
1517 <ValidationData<T>>::put(&vfp);
1518
1519 let host_config = AbridgedHostConfiguration {
1521 max_code_size,
1522 max_head_data_size: 32 * 1024,
1523 max_upward_queue_count: 8,
1524 max_upward_queue_size: 1024 * 1024,
1525 max_upward_message_size: 4 * 1024,
1526 max_upward_message_num_per_candidate: 2,
1527 hrmp_max_message_num_per_candidate: 2,
1528 validation_upgrade_cooldown: 2,
1529 validation_upgrade_delay: 2,
1530 async_backing_params: relay_chain::AsyncBackingParams {
1531 allowed_ancestry_len: 0,
1532 max_candidate_depth: 0,
1533 },
1534 };
1535 <HostConfiguration<T>>::put(host_config);
1536 }
1537}
1538
1539pub struct ParachainSetCode<T>(core::marker::PhantomData<T>);
1541impl<T: Config> frame_system::SetCode<T> for ParachainSetCode<T> {
1542 fn set_code(code: Vec<u8>) -> DispatchResult {
1543 Pallet::<T>::schedule_code_upgrade(code)
1544 }
1545}
1546
1547impl<T: Config> Pallet<T> {
1548 pub fn send_upward_message(message: UpwardMessage) -> Result<(u32, XcmHash), MessageSendError> {
1554 let message_len = message.len();
1555 if let Some(cfg) = HostConfiguration::<T>::get() {
1568 if message_len > cfg.max_upward_message_size as usize {
1569 return Err(MessageSendError::TooBig)
1570 }
1571 let threshold =
1572 cfg.max_upward_queue_size.saturating_div(ump_constants::THRESHOLD_FACTOR);
1573 <PendingUpwardMessages<T>>::append(message.clone());
1576 let pending_messages = PendingUpwardMessages::<T>::get();
1577 let total_size: usize = pending_messages.iter().map(UpwardMessage::len).sum();
1578 if total_size > threshold as usize {
1579 let message_size_factor = FixedU128::from((message_len / 1024) as u128)
1581 .saturating_mul(ump_constants::MESSAGE_SIZE_FEE_BASE);
1582 Self::increase_fee_factor((), message_size_factor);
1583 }
1584 } else {
1585 <PendingUpwardMessages<T>>::append(message.clone());
1595 };
1596
1597 let hash = sp_io::hashing::blake2_256(&message);
1600 Self::deposit_event(Event::UpwardMessageSent { message_hash: Some(hash) });
1601 Ok((0, hash))
1602 }
1603
1604 pub fn last_relay_block_number() -> RelayChainBlockNumber {
1607 LastRelayChainBlockNumber::<T>::get()
1608 }
1609}
1610
1611impl<T: Config> UpwardMessageSender for Pallet<T> {
1612 fn send_upward_message(message: UpwardMessage) -> Result<(u32, XcmHash), MessageSendError> {
1613 Self::send_upward_message(message)
1614 }
1615}
1616
1617impl<T: Config> InspectMessageQueues for Pallet<T> {
1618 fn clear_messages() {
1619 PendingUpwardMessages::<T>::kill();
1620 }
1621
1622 fn get_messages() -> Vec<(VersionedLocation, Vec<VersionedXcm<()>>)> {
1623 use xcm::prelude::*;
1624
1625 let messages: Vec<VersionedXcm<()>> = PendingUpwardMessages::<T>::get()
1626 .iter()
1627 .map(|encoded_message| {
1628 VersionedXcm::<()>::decode_all_with_depth_limit(
1629 MAX_XCM_DECODE_DEPTH,
1630 &mut &encoded_message[..],
1631 )
1632 .unwrap()
1633 })
1634 .collect();
1635
1636 if messages.is_empty() {
1637 vec![]
1638 } else {
1639 vec![(VersionedLocation::from(Location::parent()), messages)]
1640 }
1641 }
1642}
1643
1644#[cfg(feature = "runtime-benchmarks")]
1645impl<T: Config> polkadot_runtime_common::xcm_sender::EnsureForParachain for Pallet<T> {
1646 fn ensure(para_id: ParaId) {
1647 if let ChannelStatus::Closed = Self::get_channel_status(para_id) {
1648 Self::open_outbound_hrmp_channel_for_benchmarks_or_tests(para_id)
1649 }
1650 }
1651}
1652
1653#[deprecated(note = "This trait is deprecated and will be removed by September 2024. \
1655 Consider switching to `cumulus-pallet-parachain-system::ConsensusHook`")]
1656pub trait CheckInherents<Block: BlockT> {
1657 fn check_inherents(
1662 block: &Block,
1663 validation_data: &RelayChainStateProof,
1664 ) -> frame_support::inherent::CheckInherentsResult;
1665}
1666
1667#[doc(hidden)]
1669pub struct DummyCheckInherents<Block>(core::marker::PhantomData<Block>);
1670
1671#[allow(deprecated)]
1672impl<Block: BlockT> CheckInherents<Block> for DummyCheckInherents<Block> {
1673 fn check_inherents(
1674 _: &Block,
1675 _: &RelayChainStateProof,
1676 ) -> frame_support::inherent::CheckInherentsResult {
1677 sp_inherents::CheckInherentsResult::new()
1678 }
1679}
1680
1681#[impl_trait_for_tuples::impl_for_tuples(30)]
1689pub trait OnSystemEvent {
1690 fn on_validation_data(data: &PersistedValidationData);
1692 fn on_validation_code_applied();
1695}
1696
1697#[derive(PartialEq, Eq, Clone, Encode, Decode, TypeInfo, Default, RuntimeDebug)]
1699pub struct RelayChainState {
1700 pub number: relay_chain::BlockNumber,
1702 pub state_root: relay_chain::Hash,
1704}
1705
1706pub trait RelaychainStateProvider {
1710 fn current_relay_chain_state() -> RelayChainState;
1714
1715 #[cfg(feature = "runtime-benchmarks")]
1720 fn set_current_relay_chain_state(_state: RelayChainState) {}
1721}
1722
1723#[deprecated = "Use `RelaychainDataProvider` instead"]
1731pub type RelaychainBlockNumberProvider<T> = RelaychainDataProvider<T>;
1732
1733pub struct RelaychainDataProvider<T>(core::marker::PhantomData<T>);
1743
1744impl<T: Config> BlockNumberProvider for RelaychainDataProvider<T> {
1745 type BlockNumber = relay_chain::BlockNumber;
1746
1747 fn current_block_number() -> relay_chain::BlockNumber {
1748 ValidationData::<T>::get()
1749 .map(|d| d.relay_parent_number)
1750 .unwrap_or_else(|| Pallet::<T>::last_relay_block_number())
1751 }
1752
1753 #[cfg(feature = "runtime-benchmarks")]
1754 fn set_block_number(block: Self::BlockNumber) {
1755 let mut validation_data = ValidationData::<T>::get().unwrap_or_else(||
1756 PersistedValidationData {
1758 parent_head: vec![].into(),
1759 relay_parent_number: Default::default(),
1760 max_pov_size: Default::default(),
1761 relay_parent_storage_root: Default::default(),
1762 });
1763 validation_data.relay_parent_number = block;
1764 ValidationData::<T>::put(validation_data)
1765 }
1766}
1767
1768impl<T: Config> RelaychainStateProvider for RelaychainDataProvider<T> {
1769 fn current_relay_chain_state() -> RelayChainState {
1770 ValidationData::<T>::get()
1771 .map(|d| RelayChainState {
1772 number: d.relay_parent_number,
1773 state_root: d.relay_parent_storage_root,
1774 })
1775 .unwrap_or_default()
1776 }
1777
1778 #[cfg(feature = "runtime-benchmarks")]
1779 fn set_current_relay_chain_state(state: RelayChainState) {
1780 let mut validation_data = ValidationData::<T>::get().unwrap_or_else(||
1781 PersistedValidationData {
1783 parent_head: vec![].into(),
1784 relay_parent_number: Default::default(),
1785 max_pov_size: Default::default(),
1786 relay_parent_storage_root: Default::default(),
1787 });
1788 validation_data.relay_parent_number = state.number;
1789 validation_data.relay_parent_storage_root = state.state_root;
1790 ValidationData::<T>::put(validation_data)
1791 }
1792}