1#![cfg_attr(not(feature = "std"), no_std)]
18
19extern crate alloc;
31
32use alloc::{collections::btree_map::BTreeMap, vec, vec::Vec};
33use codec::{Decode, DecodeLimit, Encode};
34use core::{cmp, marker::PhantomData};
35use cumulus_primitives_core::{
36 relay_chain::{
37 self,
38 vstaging::{ClaimQueueOffset, CoreSelector, DEFAULT_CLAIM_QUEUE_OFFSET},
39 },
40 AbridgedHostConfiguration, ChannelInfo, ChannelStatus, CollationInfo, GetChannelInfo,
41 ListChannelInfos, MessageSendError, OutboundHrmpMessage, ParaId, PersistedValidationData,
42 UpwardMessage, UpwardMessageSender, XcmpMessageHandler, XcmpMessageSource,
43};
44use cumulus_primitives_parachain_inherent::{v0, MessageQueueChain, ParachainInherentData};
45use frame_support::{
46 dispatch::{DispatchClass, DispatchResult},
47 ensure,
48 inherent::{InherentData, InherentIdentifier, ProvideInherent},
49 traits::{Get, HandleMessage},
50 weights::Weight,
51};
52use frame_system::{ensure_none, ensure_root, pallet_prelude::HeaderFor};
53use parachain_inherent::{
54 deconstruct_parachain_inherent_data, AbridgedInboundDownwardMessages,
55 AbridgedInboundHrmpMessages, BasicParachainInherentData, InboundMessageId, InboundMessagesData,
56};
57use polkadot_parachain_primitives::primitives::RelayChainBlockNumber;
58use polkadot_runtime_parachains::{FeeTracker, GetMinFeeFactor};
59use scale_info::TypeInfo;
60use sp_runtime::{
61 traits::{Block as BlockT, BlockNumberProvider, Hash, One},
62 FixedU128, RuntimeDebug, SaturatedConversion,
63};
64use xcm::{latest::XcmHash, VersionedLocation, VersionedXcm, MAX_XCM_DECODE_DEPTH};
65use xcm_builder::InspectMessageQueues;
66
67mod benchmarking;
68pub mod migration;
69mod mock;
70#[cfg(test)]
71mod tests;
72pub mod weights;
73
74pub use weights::WeightInfo;
75
76mod unincluded_segment;
77
78pub mod consensus_hook;
79pub mod relay_state_snapshot;
80#[macro_use]
81pub mod validate_block;
82mod descendant_validation;
83pub mod parachain_inherent;
84
85use unincluded_segment::{
86 HrmpChannelUpdate, HrmpWatermarkUpdate, OutboundBandwidthLimits, SegmentTracker,
87};
88
89pub use consensus_hook::{ConsensusHook, ExpectParentIncluded};
90pub use cumulus_pallet_parachain_system_proc_macro::register_validate_block;
113pub use relay_state_snapshot::{MessagingStateSnapshot, RelayChainStateProof};
114pub use unincluded_segment::{Ancestor, UsedBandwidth};
115
116pub use pallet::*;
117
118const LOG_TARGET: &str = "parachain-system";
119
120pub trait CheckAssociatedRelayNumber {
129 fn check_associated_relay_number(
133 current: RelayChainBlockNumber,
134 previous: RelayChainBlockNumber,
135 );
136}
137
138pub struct RelayNumberStrictlyIncreases;
143
144impl CheckAssociatedRelayNumber for RelayNumberStrictlyIncreases {
145 fn check_associated_relay_number(
146 current: RelayChainBlockNumber,
147 previous: RelayChainBlockNumber,
148 ) {
149 if current <= previous {
150 panic!("Relay chain block number needs to strictly increase between Parachain blocks!")
151 }
152 }
153}
154
155pub struct AnyRelayNumber;
160
161impl CheckAssociatedRelayNumber for AnyRelayNumber {
162 fn check_associated_relay_number(_: RelayChainBlockNumber, _: RelayChainBlockNumber) {}
163}
164
165pub struct RelayNumberMonotonicallyIncreases;
170
171impl CheckAssociatedRelayNumber for RelayNumberMonotonicallyIncreases {
172 fn check_associated_relay_number(
173 current: RelayChainBlockNumber,
174 previous: RelayChainBlockNumber,
175 ) {
176 if current < previous {
177 panic!("Relay chain block number needs to monotonically increase between Parachain blocks!")
178 }
179 }
180}
181
182pub type MaxDmpMessageLenOf<T> = <<T as Config>::DmpQueue as HandleMessage>::MaxMessageLen;
184
185pub mod ump_constants {
186 pub const THRESHOLD_FACTOR: u32 = 2;
190}
191
192pub trait SelectCore {
194 fn selected_core() -> (CoreSelector, ClaimQueueOffset);
196 fn select_next_core() -> (CoreSelector, ClaimQueueOffset);
198}
199
200pub struct DefaultCoreSelector<T>(PhantomData<T>);
202
203impl<T: frame_system::Config> SelectCore for DefaultCoreSelector<T> {
204 fn selected_core() -> (CoreSelector, ClaimQueueOffset) {
205 let core_selector = frame_system::Pallet::<T>::block_number().using_encoded(|b| b[0]);
206
207 (CoreSelector(core_selector), ClaimQueueOffset(DEFAULT_CLAIM_QUEUE_OFFSET))
208 }
209
210 fn select_next_core() -> (CoreSelector, ClaimQueueOffset) {
211 let core_selector =
212 (frame_system::Pallet::<T>::block_number() + One::one()).using_encoded(|b| b[0]);
213
214 (CoreSelector(core_selector), ClaimQueueOffset(DEFAULT_CLAIM_QUEUE_OFFSET))
215 }
216}
217
218pub struct LookaheadCoreSelector<T>(PhantomData<T>);
220
221impl<T: frame_system::Config> SelectCore for LookaheadCoreSelector<T> {
222 fn selected_core() -> (CoreSelector, ClaimQueueOffset) {
223 let core_selector = frame_system::Pallet::<T>::block_number().using_encoded(|b| b[0]);
224
225 (CoreSelector(core_selector), ClaimQueueOffset(1))
226 }
227
228 fn select_next_core() -> (CoreSelector, ClaimQueueOffset) {
229 let core_selector =
230 (frame_system::Pallet::<T>::block_number() + One::one()).using_encoded(|b| b[0]);
231
232 (CoreSelector(core_selector), ClaimQueueOffset(1))
233 }
234}
235
236#[frame_support::pallet]
237pub mod pallet {
238 use super::*;
239 use frame_support::pallet_prelude::*;
240 use frame_system::pallet_prelude::*;
241
242 #[pallet::pallet]
243 #[pallet::storage_version(migration::STORAGE_VERSION)]
244 #[pallet::without_storage_info]
245 pub struct Pallet<T>(_);
246
247 #[pallet::config]
248 pub trait Config: frame_system::Config<OnSetCode = ParachainSetCode<Self>> {
249 #[allow(deprecated)]
251 type RuntimeEvent: From<Event<Self>> + IsType<<Self as frame_system::Config>::RuntimeEvent>;
252
253 type OnSystemEvent: OnSystemEvent;
255
256 #[pallet::constant]
258 type SelfParaId: Get<ParaId>;
259
260 type OutboundXcmpMessageSource: XcmpMessageSource;
262
263 type DmpQueue: HandleMessage;
268
269 type ReservedDmpWeight: Get<Weight>;
271
272 type XcmpMessageHandler: XcmpMessageHandler;
276
277 type ReservedXcmpWeight: Get<Weight>;
279
280 type CheckAssociatedRelayNumber: CheckAssociatedRelayNumber;
282
283 type WeightInfo: WeightInfo;
285
286 type ConsensusHook: ConsensusHook;
297
298 type SelectCore: SelectCore;
300
301 type RelayParentOffset: Get<u32>;
316 }
317
318 #[pallet::hooks]
319 impl<T: Config> Hooks<BlockNumberFor<T>> for Pallet<T> {
320 fn on_finalize(_: BlockNumberFor<T>) {
325 <DidSetValidationCode<T>>::kill();
326 <UpgradeRestrictionSignal<T>>::kill();
327 let relay_upgrade_go_ahead = <UpgradeGoAhead<T>>::take();
328
329 let vfp = <ValidationData<T>>::get().expect(
330 r"Missing required set_validation_data inherent. This inherent must be
331 present in every block. This error typically occurs when the set_validation_data
332 execution failed and was rejected by the block builder. Check earlier log entries
333 for the specific cause of the failure.",
334 );
335
336 LastRelayChainBlockNumber::<T>::put(vfp.relay_parent_number);
337
338 let host_config = match HostConfiguration::<T>::get() {
339 Some(ok) => ok,
340 None => {
341 debug_assert!(
342 false,
343 "host configuration is promised to set until `on_finalize`; qed",
344 );
345 return
346 },
347 };
348
349 let total_bandwidth_out = match RelevantMessagingState::<T>::get() {
353 Some(s) => OutboundBandwidthLimits::from_relay_chain_state(&s),
354 None => {
355 debug_assert!(
356 false,
357 "relevant messaging state is promised to be set until `on_finalize`; \
358 qed",
359 );
360 return
361 },
362 };
363
364 Self::adjust_egress_bandwidth_limits();
367
368 let (ump_msg_count, ump_total_bytes) = <PendingUpwardMessages<T>>::mutate(|up| {
369 let (available_capacity, available_size) = match RelevantMessagingState::<T>::get()
370 {
371 Some(limits) => (
372 limits.relay_dispatch_queue_remaining_capacity.remaining_count,
373 limits.relay_dispatch_queue_remaining_capacity.remaining_size,
374 ),
375 None => {
376 debug_assert!(
377 false,
378 "relevant messaging state is promised to be set until `on_finalize`; \
379 qed",
380 );
381 return (0, 0)
382 },
383 };
384
385 let available_capacity =
386 cmp::min(available_capacity, host_config.max_upward_message_num_per_candidate);
387
388 let (num, total_size) = up
391 .iter()
392 .scan((0u32, 0u32), |state, msg| {
393 let (cap_used, size_used) = *state;
394 let new_cap = cap_used.saturating_add(1);
395 let new_size = size_used.saturating_add(msg.len() as u32);
396 match available_capacity
397 .checked_sub(new_cap)
398 .and(available_size.checked_sub(new_size))
399 {
400 Some(_) => {
401 *state = (new_cap, new_size);
402 Some(*state)
403 },
404 _ => None,
405 }
406 })
407 .last()
408 .unwrap_or_default();
409
410 UpwardMessages::<T>::put(&up[..num as usize]);
413 *up = up.split_off(num as usize);
414
415 #[cfg(feature = "experimental-ump-signals")]
418 Self::send_ump_signal();
419
420 let threshold = host_config
424 .max_upward_queue_size
425 .saturating_div(ump_constants::THRESHOLD_FACTOR);
426 let remaining_total_size: usize = up.iter().map(UpwardMessage::len).sum();
427 if remaining_total_size <= threshold as usize {
428 Self::decrease_fee_factor(());
429 }
430
431 (num, total_size)
432 });
433
434 let maximum_channels = host_config
444 .hrmp_max_message_num_per_candidate
445 .min(<AnnouncedHrmpMessagesPerCandidate<T>>::take())
446 as usize;
447
448 let outbound_messages =
452 T::OutboundXcmpMessageSource::take_outbound_messages(maximum_channels)
453 .into_iter()
454 .map(|(recipient, data)| OutboundHrmpMessage { recipient, data })
455 .collect::<Vec<_>>();
456
457 {
460 let hrmp_outgoing = outbound_messages
461 .iter()
462 .map(|msg| {
463 (
464 msg.recipient,
465 HrmpChannelUpdate { msg_count: 1, total_bytes: msg.data.len() as u32 },
466 )
467 })
468 .collect();
469 let used_bandwidth =
470 UsedBandwidth { ump_msg_count, ump_total_bytes, hrmp_outgoing };
471
472 let mut aggregated_segment =
473 AggregatedUnincludedSegment::<T>::get().unwrap_or_default();
474 let consumed_go_ahead_signal =
475 if aggregated_segment.consumed_go_ahead_signal().is_some() {
476 None
479 } else {
480 relay_upgrade_go_ahead
481 };
482 let ancestor = Ancestor::new_unchecked(used_bandwidth, consumed_go_ahead_signal);
484
485 let watermark = HrmpWatermark::<T>::get();
486 let watermark_update = HrmpWatermarkUpdate::new(watermark, vfp.relay_parent_number);
487
488 aggregated_segment
489 .append(&ancestor, watermark_update, &total_bandwidth_out)
490 .expect("unincluded segment limits exceeded");
491 AggregatedUnincludedSegment::<T>::put(aggregated_segment);
492 UnincludedSegment::<T>::append(ancestor);
494 }
495 HrmpOutboundMessages::<T>::put(outbound_messages);
496 }
497
498 fn on_initialize(_n: BlockNumberFor<T>) -> Weight {
499 let mut weight = Weight::zero();
500
501 if !<DidSetValidationCode<T>>::get() {
505 NewValidationCode::<T>::kill();
509 weight += T::DbWeight::get().writes(1);
510 }
511
512 {
514 <UnincludedSegment<T>>::mutate(|chain| {
515 if let Some(ancestor) = chain.last_mut() {
516 let parent = frame_system::Pallet::<T>::parent_hash();
517 ancestor.replace_para_head_hash(parent);
520 }
521 });
522 weight += T::DbWeight::get().reads_writes(1, 1);
523
524 weight += T::DbWeight::get().reads_writes(3, 2);
526 }
527
528 ValidationData::<T>::kill();
530 ProcessedDownwardMessages::<T>::kill();
534 UpwardMessages::<T>::kill();
535 HrmpOutboundMessages::<T>::kill();
536 CustomValidationHeadData::<T>::kill();
537 HrmpWatermark::<T>::get();
539 weight += T::DbWeight::get().reads_writes(1, 5);
540
541 weight += T::DbWeight::get().reads_writes(1, 1);
560 let hrmp_max_message_num_per_candidate = HostConfiguration::<T>::get()
561 .map(|cfg| cfg.hrmp_max_message_num_per_candidate)
562 .unwrap_or(0);
563 <AnnouncedHrmpMessagesPerCandidate<T>>::put(hrmp_max_message_num_per_candidate);
564
565 weight += T::DbWeight::get().reads_writes(
567 3 + hrmp_max_message_num_per_candidate as u64,
568 4 + hrmp_max_message_num_per_candidate as u64,
569 );
570
571 weight += T::DbWeight::get().reads_writes(1, 1);
573
574 weight += T::DbWeight::get().reads_writes(6, 3);
576
577 weight += T::DbWeight::get().reads(1);
579
580 weight
581 }
582 }
583
584 #[pallet::call]
585 impl<T: Config> Pallet<T> {
586 #[pallet::call_index(0)]
596 #[pallet::weight((0, DispatchClass::Mandatory))]
597 pub fn set_validation_data(
600 origin: OriginFor<T>,
601 data: BasicParachainInherentData,
602 inbound_messages_data: InboundMessagesData,
603 ) -> DispatchResult {
604 ensure_none(origin)?;
605 assert!(
606 !<ValidationData<T>>::exists(),
607 "ValidationData must be updated only once in a block",
608 );
609
610 let mut total_weight = Weight::zero();
612
613 let BasicParachainInherentData {
620 validation_data: vfp,
621 relay_chain_state,
622 relay_parent_descendants,
623 collator_peer_id: _,
624 } = data;
625
626 T::CheckAssociatedRelayNumber::check_associated_relay_number(
628 vfp.relay_parent_number,
629 LastRelayChainBlockNumber::<T>::get(),
630 );
631
632 let relay_state_proof = RelayChainStateProof::new(
633 T::SelfParaId::get(),
634 vfp.relay_parent_storage_root,
635 relay_chain_state.clone(),
636 )
637 .expect("Invalid relay chain state proof");
638
639 let expected_rp_descendants_num = T::RelayParentOffset::get();
640
641 if expected_rp_descendants_num > 0 {
642 if let Err(err) = descendant_validation::verify_relay_parent_descendants(
643 &relay_state_proof,
644 relay_parent_descendants,
645 vfp.relay_parent_storage_root,
646 expected_rp_descendants_num,
647 ) {
648 panic!(
649 "Unable to verify provided relay parent descendants. \
650 expected_rp_descendants_num: {expected_rp_descendants_num} \
651 error: {err:?}"
652 );
653 };
654 }
655
656 let (consensus_hook_weight, capacity) =
658 T::ConsensusHook::on_state_proof(&relay_state_proof);
659 total_weight += consensus_hook_weight;
660 total_weight += Self::maybe_drop_included_ancestors(&relay_state_proof, capacity);
661 frame_system::Pallet::<T>::deposit_log(
665 cumulus_primitives_core::rpsr_digest::relay_parent_storage_root_item(
666 vfp.relay_parent_storage_root,
667 vfp.relay_parent_number,
668 ),
669 );
670
671 let upgrade_go_ahead_signal = relay_state_proof
675 .read_upgrade_go_ahead_signal()
676 .expect("Invalid upgrade go ahead signal");
677
678 let upgrade_signal_in_segment = AggregatedUnincludedSegment::<T>::get()
679 .as_ref()
680 .and_then(SegmentTracker::consumed_go_ahead_signal);
681 if let Some(signal_in_segment) = upgrade_signal_in_segment.as_ref() {
682 assert_eq!(upgrade_go_ahead_signal, Some(*signal_in_segment));
685 }
686 match upgrade_go_ahead_signal {
687 Some(_signal) if upgrade_signal_in_segment.is_some() => {
688 },
690 Some(relay_chain::UpgradeGoAhead::GoAhead) => {
691 assert!(
692 <PendingValidationCode<T>>::exists(),
693 "No new validation function found in storage, GoAhead signal is not expected",
694 );
695 let validation_code = <PendingValidationCode<T>>::take();
696
697 frame_system::Pallet::<T>::update_code_in_storage(&validation_code);
698 <T::OnSystemEvent as OnSystemEvent>::on_validation_code_applied();
699 Self::deposit_event(Event::ValidationFunctionApplied {
700 relay_chain_block_num: vfp.relay_parent_number,
701 });
702 },
703 Some(relay_chain::UpgradeGoAhead::Abort) => {
704 <PendingValidationCode<T>>::kill();
705 Self::deposit_event(Event::ValidationFunctionDiscarded);
706 },
707 None => {},
708 }
709 <UpgradeRestrictionSignal<T>>::put(
710 relay_state_proof
711 .read_upgrade_restriction_signal()
712 .expect("Invalid upgrade restriction signal"),
713 );
714 <UpgradeGoAhead<T>>::put(upgrade_go_ahead_signal);
715
716 let host_config = relay_state_proof
717 .read_abridged_host_configuration()
718 .expect("Invalid host configuration in relay chain state proof");
719
720 let relevant_messaging_state = relay_state_proof
721 .read_messaging_state_snapshot(&host_config)
722 .expect("Invalid messaging state in relay chain state proof");
723
724 <ValidationData<T>>::put(&vfp);
725 <RelayStateProof<T>>::put(relay_chain_state);
726 <RelevantMessagingState<T>>::put(relevant_messaging_state.clone());
727 <HostConfiguration<T>>::put(host_config);
728
729 <T::OnSystemEvent as OnSystemEvent>::on_validation_data(&vfp);
730
731 total_weight.saturating_accrue(Self::enqueue_inbound_downward_messages(
732 relevant_messaging_state.dmq_mqc_head,
733 inbound_messages_data.downward_messages,
734 ));
735 total_weight.saturating_accrue(Self::enqueue_inbound_horizontal_messages(
736 &relevant_messaging_state.ingress_channels,
737 inbound_messages_data.horizontal_messages,
738 vfp.relay_parent_number,
739 ));
740
741 frame_system::Pallet::<T>::register_extra_weight_unchecked(
742 total_weight,
743 DispatchClass::Mandatory,
744 );
745
746 Ok(())
747 }
748
749 #[pallet::call_index(1)]
750 #[pallet::weight((1_000, DispatchClass::Operational))]
751 pub fn sudo_send_upward_message(
752 origin: OriginFor<T>,
753 message: UpwardMessage,
754 ) -> DispatchResult {
755 ensure_root(origin)?;
756 let _ = Self::send_upward_message(message);
757 Ok(())
758 }
759
760 }
763
764 #[pallet::event]
765 #[pallet::generate_deposit(pub(super) fn deposit_event)]
766 pub enum Event<T: Config> {
767 ValidationFunctionStored,
769 ValidationFunctionApplied { relay_chain_block_num: RelayChainBlockNumber },
771 ValidationFunctionDiscarded,
773 DownwardMessagesReceived { count: u32 },
775 DownwardMessagesProcessed { weight_used: Weight, dmq_head: relay_chain::Hash },
777 UpwardMessageSent { message_hash: Option<XcmHash> },
779 }
780
781 #[pallet::error]
782 pub enum Error<T> {
783 OverlappingUpgrades,
785 ProhibitedByPolkadot,
787 TooBig,
790 ValidationDataNotAvailable,
792 HostConfigurationNotAvailable,
794 NotScheduled,
796 }
797
798 #[pallet::storage]
805 pub type UnincludedSegment<T: Config> = StorageValue<_, Vec<Ancestor<T::Hash>>, ValueQuery>;
806
807 #[pallet::storage]
811 pub type AggregatedUnincludedSegment<T: Config> =
812 StorageValue<_, SegmentTracker<T::Hash>, OptionQuery>;
813
814 #[pallet::storage]
821 pub type PendingValidationCode<T: Config> = StorageValue<_, Vec<u8>, ValueQuery>;
822
823 #[pallet::storage]
829 pub type NewValidationCode<T: Config> = StorageValue<_, Vec<u8>, OptionQuery>;
830
831 #[pallet::storage]
835 pub type ValidationData<T: Config> = StorageValue<_, PersistedValidationData>;
836
837 #[pallet::storage]
839 pub type DidSetValidationCode<T: Config> = StorageValue<_, bool, ValueQuery>;
840
841 #[pallet::storage]
845 pub type LastRelayChainBlockNumber<T: Config> =
846 StorageValue<_, RelayChainBlockNumber, ValueQuery>;
847
848 #[pallet::storage]
856 pub type UpgradeRestrictionSignal<T: Config> =
857 StorageValue<_, Option<relay_chain::UpgradeRestriction>, ValueQuery>;
858
859 #[pallet::storage]
865 pub type UpgradeGoAhead<T: Config> =
866 StorageValue<_, Option<relay_chain::UpgradeGoAhead>, ValueQuery>;
867
868 #[pallet::storage]
875 pub type RelayStateProof<T: Config> = StorageValue<_, sp_trie::StorageProof>;
876
877 #[pallet::storage]
885 pub type RelevantMessagingState<T: Config> = StorageValue<_, MessagingStateSnapshot>;
886
887 #[pallet::storage]
894 #[pallet::disable_try_decode_storage]
895 pub type HostConfiguration<T: Config> = StorageValue<_, AbridgedHostConfiguration>;
896
897 #[pallet::storage]
902 pub type LastDmqMqcHead<T: Config> = StorageValue<_, MessageQueueChain, ValueQuery>;
903
904 #[pallet::storage]
909 pub type LastHrmpMqcHeads<T: Config> =
910 StorageValue<_, BTreeMap<ParaId, MessageQueueChain>, ValueQuery>;
911
912 #[pallet::storage]
916 pub type ProcessedDownwardMessages<T: Config> = StorageValue<_, u32, ValueQuery>;
917
918 #[pallet::storage]
922 pub type LastProcessedDownwardMessage<T: Config> = StorageValue<_, InboundMessageId>;
923
924 #[pallet::storage]
926 pub type HrmpWatermark<T: Config> = StorageValue<_, relay_chain::BlockNumber, ValueQuery>;
927
928 #[pallet::storage]
932 pub type LastProcessedHrmpMessage<T: Config> = StorageValue<_, InboundMessageId>;
933
934 #[pallet::storage]
938 pub type HrmpOutboundMessages<T: Config> =
939 StorageValue<_, Vec<OutboundHrmpMessage>, ValueQuery>;
940
941 #[pallet::storage]
945 pub type UpwardMessages<T: Config> = StorageValue<_, Vec<UpwardMessage>, ValueQuery>;
946
947 #[pallet::storage]
949 pub type PendingUpwardMessages<T: Config> = StorageValue<_, Vec<UpwardMessage>, ValueQuery>;
950
951 #[pallet::storage]
953 pub type UpwardDeliveryFeeFactor<T: Config> =
954 StorageValue<_, FixedU128, ValueQuery, GetMinFeeFactor<Pallet<T>>>;
955
956 #[pallet::storage]
959 pub type AnnouncedHrmpMessagesPerCandidate<T: Config> = StorageValue<_, u32, ValueQuery>;
960
961 #[pallet::storage]
964 pub type ReservedXcmpWeightOverride<T: Config> = StorageValue<_, Weight>;
965
966 #[pallet::storage]
969 pub type ReservedDmpWeightOverride<T: Config> = StorageValue<_, Weight>;
970
971 #[pallet::storage]
975 pub type CustomValidationHeadData<T: Config> = StorageValue<_, Vec<u8>, OptionQuery>;
976
977 #[pallet::inherent]
978 impl<T: Config> ProvideInherent for Pallet<T> {
979 type Call = Call<T>;
980 type Error = sp_inherents::MakeFatalError<()>;
981 const INHERENT_IDENTIFIER: InherentIdentifier =
982 cumulus_primitives_parachain_inherent::INHERENT_IDENTIFIER;
983
984 fn create_inherent(data: &InherentData) -> Option<Self::Call> {
985 let data = match data
986 .get_data::<ParachainInherentData>(&Self::INHERENT_IDENTIFIER)
987 .ok()
988 .flatten()
989 {
990 None => {
991 let data = data
996 .get_data::<v0::ParachainInherentData>(
997 &cumulus_primitives_parachain_inherent::PARACHAIN_INHERENT_IDENTIFIER_V0,
998 )
999 .ok()
1000 .flatten()?;
1001 data.into()
1002 },
1003 Some(data) => data,
1004 };
1005
1006 Some(Self::do_create_inherent(data))
1007 }
1008
1009 fn is_inherent(call: &Self::Call) -> bool {
1010 matches!(call, Call::set_validation_data { .. })
1011 }
1012 }
1013
1014 #[pallet::genesis_config]
1015 #[derive(frame_support::DefaultNoBound)]
1016 pub struct GenesisConfig<T: Config> {
1017 #[serde(skip)]
1018 pub _config: core::marker::PhantomData<T>,
1019 }
1020
1021 #[pallet::genesis_build]
1022 impl<T: Config> BuildGenesisConfig for GenesisConfig<T> {
1023 fn build(&self) {
1024 sp_io::storage::set(b":c", &[]);
1026 }
1027 }
1028}
1029
1030impl<T: Config> Pallet<T> {
1031 pub fn unincluded_segment_size_after(included_hash: T::Hash) -> u32 {
1039 let segment = UnincludedSegment::<T>::get();
1040 crate::unincluded_segment::size_after_included(included_hash, &segment)
1041 }
1042}
1043
1044impl<T: Config> FeeTracker for Pallet<T> {
1045 type Id = ();
1046
1047 fn get_fee_factor(_id: Self::Id) -> FixedU128 {
1048 UpwardDeliveryFeeFactor::<T>::get()
1049 }
1050
1051 fn set_fee_factor(_id: Self::Id, val: FixedU128) {
1052 UpwardDeliveryFeeFactor::<T>::set(val);
1053 }
1054}
1055
1056impl<T: Config> ListChannelInfos for Pallet<T> {
1057 fn outgoing_channels() -> Vec<ParaId> {
1058 let Some(state) = RelevantMessagingState::<T>::get() else { return Vec::new() };
1059 state.egress_channels.into_iter().map(|(id, _)| id).collect()
1060 }
1061}
1062
1063impl<T: Config> GetChannelInfo for Pallet<T> {
1064 fn get_channel_status(id: ParaId) -> ChannelStatus {
1065 let channels = match RelevantMessagingState::<T>::get() {
1080 None => {
1081 log::warn!("calling `get_channel_status` with no RelevantMessagingState?!");
1082 return ChannelStatus::Closed
1083 },
1084 Some(d) => d.egress_channels,
1085 };
1086 let index = match channels.binary_search_by_key(&id, |item| item.0) {
1093 Err(_) => return ChannelStatus::Closed,
1094 Ok(i) => i,
1095 };
1096 let meta = &channels[index].1;
1097 if meta.msg_count + 1 > meta.max_capacity {
1098 return ChannelStatus::Full
1100 }
1101 let max_size_now = meta.max_total_size - meta.total_size;
1102 let max_size_ever = meta.max_message_size;
1103 ChannelStatus::Ready(max_size_now as usize, max_size_ever as usize)
1104 }
1105
1106 fn get_channel_info(id: ParaId) -> Option<ChannelInfo> {
1107 let channels = RelevantMessagingState::<T>::get()?.egress_channels;
1108 let index = channels.binary_search_by_key(&id, |item| item.0).ok()?;
1109 let info = ChannelInfo {
1110 max_capacity: channels[index].1.max_capacity,
1111 max_total_size: channels[index].1.max_total_size,
1112 max_message_size: channels[index].1.max_message_size,
1113 msg_count: channels[index].1.msg_count,
1114 total_size: channels[index].1.total_size,
1115 };
1116 Some(info)
1117 }
1118}
1119
1120impl<T: Config> Pallet<T> {
1121 fn messages_collection_size_limit() -> usize {
1131 let max_block_weight = <T as frame_system::Config>::BlockWeights::get().max_block;
1132 let max_block_pov = max_block_weight.proof_size();
1133 (max_block_pov / 6).saturated_into()
1134 }
1135
1136 fn do_create_inherent(data: ParachainInherentData) -> Call<T> {
1142 let (data, mut downward_messages, mut horizontal_messages) =
1143 deconstruct_parachain_inherent_data(data);
1144 let last_relay_block_number = LastRelayChainBlockNumber::<T>::get();
1145
1146 let messages_collection_size_limit = Self::messages_collection_size_limit();
1147 let last_processed_msg = LastProcessedDownwardMessage::<T>::get()
1149 .unwrap_or(InboundMessageId { sent_at: last_relay_block_number, reverse_idx: 0 });
1150 downward_messages.drop_processed_messages(&last_processed_msg);
1151 let mut size_limit = messages_collection_size_limit;
1152 let downward_messages = downward_messages.into_abridged(&mut size_limit);
1153
1154 let last_processed_msg = LastProcessedHrmpMessage::<T>::get()
1156 .unwrap_or(InboundMessageId { sent_at: last_relay_block_number, reverse_idx: 0 });
1157 horizontal_messages.drop_processed_messages(&last_processed_msg);
1158 size_limit = size_limit.saturating_add(messages_collection_size_limit);
1159 let horizontal_messages = horizontal_messages.into_abridged(&mut size_limit);
1160
1161 let inbound_messages_data =
1162 InboundMessagesData::new(downward_messages, horizontal_messages);
1163
1164 Call::set_validation_data { data, inbound_messages_data }
1165 }
1166
1167 fn enqueue_inbound_downward_messages(
1177 expected_dmq_mqc_head: relay_chain::Hash,
1178 downward_messages: AbridgedInboundDownwardMessages,
1179 ) -> Weight {
1180 downward_messages.check_enough_messages_included("DMQ");
1181
1182 let mut dmq_head = <LastDmqMqcHead<T>>::get();
1183
1184 let (messages, hashed_messages) = downward_messages.messages();
1185 let message_count = messages.len() as u32;
1186 let weight_used = T::WeightInfo::enqueue_inbound_downward_messages(message_count);
1187 if let Some(last_msg) = messages.last() {
1188 Self::deposit_event(Event::DownwardMessagesReceived { count: message_count });
1189
1190 for msg in messages {
1192 dmq_head.extend_downward(msg);
1193 }
1194 <LastDmqMqcHead<T>>::put(&dmq_head);
1195 Self::deposit_event(Event::DownwardMessagesProcessed {
1196 weight_used,
1197 dmq_head: dmq_head.head(),
1198 });
1199
1200 let mut last_processed_msg =
1201 InboundMessageId { sent_at: last_msg.sent_at, reverse_idx: 0 };
1202 for msg in hashed_messages {
1203 dmq_head.extend_with_hashed_msg(msg);
1204
1205 if msg.sent_at == last_processed_msg.sent_at {
1206 last_processed_msg.reverse_idx += 1;
1207 }
1208 }
1209 LastProcessedDownwardMessage::<T>::put(last_processed_msg);
1210
1211 T::DmpQueue::handle_messages(downward_messages.bounded_msgs_iter());
1212 }
1213
1214 assert_eq!(dmq_head.head(), expected_dmq_mqc_head, "DMQ head mismatch");
1220
1221 ProcessedDownwardMessages::<T>::put(message_count);
1222
1223 weight_used
1224 }
1225
1226 fn check_hrmp_mcq_heads(
1227 ingress_channels: &[(ParaId, cumulus_primitives_core::AbridgedHrmpChannel)],
1228 mqc_heads: &mut BTreeMap<ParaId, MessageQueueChain>,
1229 ) {
1230 for (sender, channel) in ingress_channels {
1238 let cur_head = mqc_heads.entry(*sender).or_default().head();
1239 let target_head = channel.mqc_head.unwrap_or_default();
1240 assert_eq!(cur_head, target_head, "HRMP head mismatch");
1241 }
1242 }
1243
1244 fn check_hrmp_message_metadata(
1249 ingress_channels: &[(ParaId, cumulus_primitives_core::AbridgedHrmpChannel)],
1250 maybe_prev_msg_metadata: &mut Option<(u32, ParaId)>,
1251 msg_metadata: (u32, ParaId),
1252 ) {
1253 if let Some(prev_msg) = maybe_prev_msg_metadata {
1255 assert!(&msg_metadata >= prev_msg, "[HRMP] Messages order violation");
1256 }
1257
1258 let sender = msg_metadata.1;
1261 let maybe_channel_idx =
1262 ingress_channels.binary_search_by_key(&sender, |&(channel_sender, _)| channel_sender);
1263 assert!(
1264 maybe_channel_idx.is_ok(),
1265 "One of the messages submitted by the collator was sent from a sender ({}) \
1266 that doesn't have a channel opened to this parachain",
1267 <ParaId as Into<u32>>::into(sender)
1268 );
1269 }
1270
1271 fn enqueue_inbound_horizontal_messages(
1282 ingress_channels: &[(ParaId, cumulus_primitives_core::AbridgedHrmpChannel)],
1283 horizontal_messages: AbridgedInboundHrmpMessages,
1284 relay_parent_number: relay_chain::BlockNumber,
1285 ) -> Weight {
1286 horizontal_messages.check_enough_messages_included("HRMP");
1288
1289 let (messages, hashed_messages) = horizontal_messages.messages();
1290 let mut mqc_heads = <LastHrmpMqcHeads<T>>::get();
1291
1292 if messages.is_empty() {
1293 Self::check_hrmp_mcq_heads(ingress_channels, &mut mqc_heads);
1294 let last_processed_msg =
1295 InboundMessageId { sent_at: relay_parent_number, reverse_idx: 0 };
1296 LastProcessedHrmpMessage::<T>::put(last_processed_msg);
1297 HrmpWatermark::<T>::put(relay_parent_number);
1298 return T::DbWeight::get().reads_writes(1, 2);
1299 }
1300
1301 let mut prev_msg_metadata = None;
1302 let mut last_processed_block = HrmpWatermark::<T>::get();
1303 let mut last_processed_msg = InboundMessageId { sent_at: 0, reverse_idx: 0 };
1304 for (sender, msg) in messages {
1305 Self::check_hrmp_message_metadata(
1306 ingress_channels,
1307 &mut prev_msg_metadata,
1308 (msg.sent_at, *sender),
1309 );
1310 mqc_heads.entry(*sender).or_default().extend_hrmp(msg);
1311
1312 if msg.sent_at > last_processed_msg.sent_at && last_processed_msg.sent_at > 0 {
1313 last_processed_block = last_processed_msg.sent_at;
1314 }
1315 last_processed_msg.sent_at = msg.sent_at;
1316 }
1317 <LastHrmpMqcHeads<T>>::put(&mqc_heads);
1318 for (sender, msg) in hashed_messages {
1319 Self::check_hrmp_message_metadata(
1320 ingress_channels,
1321 &mut prev_msg_metadata,
1322 (msg.sent_at, *sender),
1323 );
1324 mqc_heads.entry(*sender).or_default().extend_with_hashed_msg(msg);
1325
1326 if msg.sent_at == last_processed_msg.sent_at {
1327 last_processed_msg.reverse_idx += 1;
1328 }
1329 }
1330 if last_processed_msg.sent_at > 0 && last_processed_msg.reverse_idx == 0 {
1331 last_processed_block = last_processed_msg.sent_at;
1332 }
1333 LastProcessedHrmpMessage::<T>::put(&last_processed_msg);
1334 Self::check_hrmp_mcq_heads(ingress_channels, &mut mqc_heads);
1335
1336 let max_weight =
1337 <ReservedXcmpWeightOverride<T>>::get().unwrap_or_else(T::ReservedXcmpWeight::get);
1338 let weight_used = T::XcmpMessageHandler::handle_xcmp_messages(
1339 horizontal_messages.flat_msgs_iter(),
1340 max_weight,
1341 );
1342
1343 HrmpWatermark::<T>::put(last_processed_block);
1345
1346 weight_used.saturating_add(T::DbWeight::get().reads_writes(2, 3))
1347 }
1348
1349 fn maybe_drop_included_ancestors(
1351 relay_state_proof: &RelayChainStateProof,
1352 capacity: consensus_hook::UnincludedSegmentCapacity,
1353 ) -> Weight {
1354 let mut weight_used = Weight::zero();
1355 let para_head =
1357 relay_state_proof.read_included_para_head().ok().map(|h| T::Hashing::hash(&h.0));
1358
1359 let unincluded_segment_len = <UnincludedSegment<T>>::decode_len().unwrap_or(0);
1360 weight_used += T::DbWeight::get().reads(1);
1361
1362 let included_head = match (para_head, capacity.is_expecting_included_parent()) {
1364 (Some(h), true) => {
1365 assert_eq!(
1366 h,
1367 frame_system::Pallet::<T>::parent_hash(),
1368 "expected parent to be included"
1369 );
1370
1371 h
1372 },
1373 (Some(h), false) => h,
1374 (None, true) => {
1375 frame_system::Pallet::<T>::parent_hash()
1378 },
1379 (None, false) => panic!("included head not present in relay storage proof"),
1380 };
1381
1382 let new_len = {
1383 let para_head_hash = included_head;
1384 let dropped: Vec<Ancestor<T::Hash>> = <UnincludedSegment<T>>::mutate(|chain| {
1385 let idx = chain
1388 .iter()
1389 .position(|block| {
1390 let head_hash = block
1391 .para_head_hash()
1392 .expect("para head hash is updated during block initialization; qed");
1393 head_hash == ¶_head_hash
1394 })
1395 .map_or(0, |idx| idx + 1); chain.drain(..idx).collect()
1398 });
1399 weight_used += T::DbWeight::get().reads_writes(1, 1);
1400
1401 let new_len = unincluded_segment_len - dropped.len();
1402 if !dropped.is_empty() {
1403 <AggregatedUnincludedSegment<T>>::mutate(|agg| {
1404 let agg = agg.as_mut().expect(
1405 "dropped part of the segment wasn't empty, hence value exists; qed",
1406 );
1407 for block in dropped {
1408 agg.subtract(&block);
1409 }
1410 });
1411 weight_used += T::DbWeight::get().reads_writes(1, 1);
1412 }
1413
1414 new_len as u32
1415 };
1416
1417 assert!(new_len < capacity.get(), "no space left for the block in the unincluded segment");
1422 weight_used
1423 }
1424
1425 fn adjust_egress_bandwidth_limits() {
1431 let unincluded_segment = match AggregatedUnincludedSegment::<T>::get() {
1432 None => return,
1433 Some(s) => s,
1434 };
1435
1436 <RelevantMessagingState<T>>::mutate(|messaging_state| {
1437 let messaging_state = match messaging_state {
1438 None => return,
1439 Some(s) => s,
1440 };
1441
1442 let used_bandwidth = unincluded_segment.used_bandwidth();
1443
1444 let channels = &mut messaging_state.egress_channels;
1445 for (para_id, used) in used_bandwidth.hrmp_outgoing.iter() {
1446 let i = match channels.binary_search_by_key(para_id, |item| item.0) {
1447 Ok(i) => i,
1448 Err(_) => continue, };
1450
1451 let c = &mut channels[i].1;
1452
1453 c.total_size = (c.total_size + used.total_bytes).min(c.max_total_size);
1454 c.msg_count = (c.msg_count + used.msg_count).min(c.max_capacity);
1455 }
1456
1457 let upward_capacity = &mut messaging_state.relay_dispatch_queue_remaining_capacity;
1458 upward_capacity.remaining_count =
1459 upward_capacity.remaining_count.saturating_sub(used_bandwidth.ump_msg_count);
1460 upward_capacity.remaining_size =
1461 upward_capacity.remaining_size.saturating_sub(used_bandwidth.ump_total_bytes);
1462 });
1463 }
1464
1465 fn notify_polkadot_of_pending_upgrade(code: &[u8]) {
1469 NewValidationCode::<T>::put(code);
1470 <DidSetValidationCode<T>>::put(true);
1471 }
1472
1473 pub fn max_code_size() -> Option<u32> {
1477 <HostConfiguration<T>>::get().map(|cfg| cfg.max_code_size)
1478 }
1479
1480 pub fn schedule_code_upgrade(validation_function: Vec<u8>) -> DispatchResult {
1482 ensure!(<ValidationData<T>>::exists(), Error::<T>::ValidationDataNotAvailable,);
1486 ensure!(<UpgradeRestrictionSignal<T>>::get().is_none(), Error::<T>::ProhibitedByPolkadot);
1487
1488 ensure!(!<PendingValidationCode<T>>::exists(), Error::<T>::OverlappingUpgrades);
1489 let cfg = HostConfiguration::<T>::get().ok_or(Error::<T>::HostConfigurationNotAvailable)?;
1490 ensure!(validation_function.len() <= cfg.max_code_size as usize, Error::<T>::TooBig);
1491
1492 Self::notify_polkadot_of_pending_upgrade(&validation_function);
1500 <PendingValidationCode<T>>::put(validation_function);
1501 Self::deposit_event(Event::ValidationFunctionStored);
1502
1503 Ok(())
1504 }
1505
1506 pub fn collect_collation_info(header: &HeaderFor<T>) -> CollationInfo {
1514 CollationInfo {
1515 hrmp_watermark: HrmpWatermark::<T>::get(),
1516 horizontal_messages: HrmpOutboundMessages::<T>::get(),
1517 upward_messages: UpwardMessages::<T>::get(),
1518 processed_downward_messages: ProcessedDownwardMessages::<T>::get(),
1519 new_validation_code: NewValidationCode::<T>::get().map(Into::into),
1520 head_data: CustomValidationHeadData::<T>::get()
1523 .map_or_else(|| header.encode(), |v| v)
1524 .into(),
1525 }
1526 }
1527
1528 pub fn core_selector() -> (CoreSelector, ClaimQueueOffset) {
1530 T::SelectCore::select_next_core()
1531 }
1532
1533 pub fn set_custom_validation_head_data(head_data: Vec<u8>) {
1546 CustomValidationHeadData::<T>::put(head_data);
1547 }
1548
1549 #[cfg(feature = "experimental-ump-signals")]
1551 fn send_ump_signal() {
1552 use cumulus_primitives_core::relay_chain::vstaging::{UMPSignal, UMP_SEPARATOR};
1553
1554 UpwardMessages::<T>::mutate(|up| {
1555 up.push(UMP_SEPARATOR);
1556
1557 let core_selector = T::SelectCore::selected_core();
1559 up.push(UMPSignal::SelectCore(core_selector.0, core_selector.1).encode());
1560 });
1561 }
1562
1563 #[cfg(any(feature = "runtime-benchmarks", feature = "std"))]
1568 pub fn open_outbound_hrmp_channel_for_benchmarks_or_tests(target_parachain: ParaId) {
1569 RelevantMessagingState::<T>::put(MessagingStateSnapshot {
1570 dmq_mqc_head: Default::default(),
1571 relay_dispatch_queue_remaining_capacity: Default::default(),
1572 ingress_channels: Default::default(),
1573 egress_channels: vec![(
1574 target_parachain,
1575 cumulus_primitives_core::AbridgedHrmpChannel {
1576 max_capacity: 10,
1577 max_total_size: 10_000_000_u32,
1578 max_message_size: 10_000_000_u32,
1579 msg_count: 5,
1580 total_size: 5_000_000_u32,
1581 mqc_head: None,
1582 },
1583 )],
1584 })
1585 }
1586
1587 #[cfg(any(feature = "runtime-benchmarks", feature = "std"))]
1592 pub fn open_custom_outbound_hrmp_channel_for_benchmarks_or_tests(
1593 target_parachain: ParaId,
1594 channel: cumulus_primitives_core::AbridgedHrmpChannel,
1595 ) {
1596 RelevantMessagingState::<T>::put(MessagingStateSnapshot {
1597 dmq_mqc_head: Default::default(),
1598 relay_dispatch_queue_remaining_capacity: Default::default(),
1599 ingress_channels: Default::default(),
1600 egress_channels: vec![(target_parachain, channel)],
1601 })
1602 }
1603
1604 #[cfg(feature = "runtime-benchmarks")]
1606 pub fn initialize_for_set_code_benchmark(max_code_size: u32) {
1607 let vfp = PersistedValidationData {
1609 parent_head: polkadot_parachain_primitives::primitives::HeadData(Default::default()),
1610 relay_parent_number: 1,
1611 relay_parent_storage_root: Default::default(),
1612 max_pov_size: 1_000,
1613 };
1614 <ValidationData<T>>::put(&vfp);
1615
1616 let host_config = AbridgedHostConfiguration {
1618 max_code_size,
1619 max_head_data_size: 32 * 1024,
1620 max_upward_queue_count: 8,
1621 max_upward_queue_size: 1024 * 1024,
1622 max_upward_message_size: 4 * 1024,
1623 max_upward_message_num_per_candidate: 2,
1624 hrmp_max_message_num_per_candidate: 2,
1625 validation_upgrade_cooldown: 2,
1626 validation_upgrade_delay: 2,
1627 async_backing_params: relay_chain::AsyncBackingParams {
1628 allowed_ancestry_len: 0,
1629 max_candidate_depth: 0,
1630 },
1631 };
1632 <HostConfiguration<T>>::put(host_config);
1633 }
1634}
1635
1636pub struct ParachainSetCode<T>(core::marker::PhantomData<T>);
1638impl<T: Config> frame_system::SetCode<T> for ParachainSetCode<T> {
1639 fn set_code(code: Vec<u8>) -> DispatchResult {
1640 Pallet::<T>::schedule_code_upgrade(code)
1641 }
1642}
1643
1644impl<T: Config> Pallet<T> {
1645 pub fn send_upward_message(message: UpwardMessage) -> Result<(u32, XcmHash), MessageSendError> {
1651 let message_len = message.len();
1652 if let Some(cfg) = HostConfiguration::<T>::get() {
1665 if message_len > cfg.max_upward_message_size as usize {
1666 return Err(MessageSendError::TooBig);
1667 }
1668 let threshold =
1669 cfg.max_upward_queue_size.saturating_div(ump_constants::THRESHOLD_FACTOR);
1670 <PendingUpwardMessages<T>>::append(message.clone());
1673 let pending_messages = PendingUpwardMessages::<T>::get();
1674 let total_size: usize = pending_messages.iter().map(UpwardMessage::len).sum();
1675 if total_size > threshold as usize {
1676 Self::increase_fee_factor((), message_len as u128);
1678 }
1679 } else {
1680 <PendingUpwardMessages<T>>::append(message.clone());
1690 };
1691
1692 let hash = sp_io::hashing::blake2_256(&message);
1695 Self::deposit_event(Event::UpwardMessageSent { message_hash: Some(hash) });
1696 Ok((0, hash))
1697 }
1698
1699 pub fn last_relay_block_number() -> RelayChainBlockNumber {
1702 LastRelayChainBlockNumber::<T>::get()
1703 }
1704}
1705
1706impl<T: Config> UpwardMessageSender for Pallet<T> {
1707 fn send_upward_message(message: UpwardMessage) -> Result<(u32, XcmHash), MessageSendError> {
1708 Self::send_upward_message(message)
1709 }
1710
1711 fn can_send_upward_message(message: &UpwardMessage) -> Result<(), MessageSendError> {
1712 let max_upward_message_size = HostConfiguration::<T>::get()
1713 .map(|cfg| cfg.max_upward_message_size)
1714 .ok_or(MessageSendError::Other)?;
1715 if message.len() > max_upward_message_size as usize {
1716 Err(MessageSendError::TooBig)
1717 } else {
1718 Ok(())
1719 }
1720 }
1721
1722 #[cfg(any(feature = "std", feature = "runtime-benchmarks", test))]
1723 fn ensure_successful_delivery() {
1724 const MAX_UPWARD_MESSAGE_SIZE: u32 = 65_531 * 3;
1725 const MAX_CODE_SIZE: u32 = 3 * 1024 * 1024;
1726 HostConfiguration::<T>::mutate(|cfg| match cfg {
1727 Some(cfg) => cfg.max_upward_message_size = MAX_UPWARD_MESSAGE_SIZE,
1728 None =>
1729 *cfg = Some(AbridgedHostConfiguration {
1730 max_code_size: MAX_CODE_SIZE,
1731 max_head_data_size: 32 * 1024,
1732 max_upward_queue_count: 8,
1733 max_upward_queue_size: 1024 * 1024,
1734 max_upward_message_size: MAX_UPWARD_MESSAGE_SIZE,
1735 max_upward_message_num_per_candidate: 2,
1736 hrmp_max_message_num_per_candidate: 2,
1737 validation_upgrade_cooldown: 2,
1738 validation_upgrade_delay: 2,
1739 async_backing_params: relay_chain::AsyncBackingParams {
1740 allowed_ancestry_len: 0,
1741 max_candidate_depth: 0,
1742 },
1743 }),
1744 })
1745 }
1746}
1747
1748impl<T: Config> InspectMessageQueues for Pallet<T> {
1749 fn clear_messages() {
1750 PendingUpwardMessages::<T>::kill();
1751 }
1752
1753 fn get_messages() -> Vec<(VersionedLocation, Vec<VersionedXcm<()>>)> {
1754 use xcm::prelude::*;
1755
1756 let messages: Vec<VersionedXcm<()>> = PendingUpwardMessages::<T>::get()
1757 .iter()
1758 .map(|encoded_message| {
1759 VersionedXcm::<()>::decode_all_with_depth_limit(
1760 MAX_XCM_DECODE_DEPTH,
1761 &mut &encoded_message[..],
1762 )
1763 .unwrap()
1764 })
1765 .collect();
1766
1767 if messages.is_empty() {
1768 vec![]
1769 } else {
1770 vec![(VersionedLocation::from(Location::parent()), messages)]
1771 }
1772 }
1773}
1774
1775#[cfg(feature = "runtime-benchmarks")]
1776impl<T: Config> polkadot_runtime_parachains::EnsureForParachain for Pallet<T> {
1777 fn ensure(para_id: ParaId) {
1778 if let ChannelStatus::Closed = Self::get_channel_status(para_id) {
1779 Self::open_outbound_hrmp_channel_for_benchmarks_or_tests(para_id)
1780 }
1781 }
1782}
1783
1784#[deprecated(note = "This trait is deprecated and will be removed by September 2024. \
1786 Consider switching to `cumulus-pallet-parachain-system::ConsensusHook`")]
1787pub trait CheckInherents<Block: BlockT> {
1788 fn check_inherents(
1793 block: &Block,
1794 validation_data: &RelayChainStateProof,
1795 ) -> frame_support::inherent::CheckInherentsResult;
1796}
1797
1798#[doc(hidden)]
1800pub struct DummyCheckInherents<Block>(core::marker::PhantomData<Block>);
1801
1802#[allow(deprecated)]
1803impl<Block: BlockT> CheckInherents<Block> for DummyCheckInherents<Block> {
1804 fn check_inherents(
1805 _: &Block,
1806 _: &RelayChainStateProof,
1807 ) -> frame_support::inherent::CheckInherentsResult {
1808 sp_inherents::CheckInherentsResult::new()
1809 }
1810}
1811
1812#[impl_trait_for_tuples::impl_for_tuples(30)]
1820pub trait OnSystemEvent {
1821 fn on_validation_data(data: &PersistedValidationData);
1823 fn on_validation_code_applied();
1826}
1827
1828#[derive(PartialEq, Eq, Clone, Encode, Decode, TypeInfo, Default, RuntimeDebug)]
1830pub struct RelayChainState {
1831 pub number: relay_chain::BlockNumber,
1833 pub state_root: relay_chain::Hash,
1835}
1836
1837pub trait RelaychainStateProvider {
1841 fn current_relay_chain_state() -> RelayChainState;
1845
1846 #[cfg(feature = "runtime-benchmarks")]
1851 fn set_current_relay_chain_state(_state: RelayChainState) {}
1852}
1853
1854#[deprecated = "Use `RelaychainDataProvider` instead"]
1862pub type RelaychainBlockNumberProvider<T> = RelaychainDataProvider<T>;
1863
1864pub struct RelaychainDataProvider<T>(core::marker::PhantomData<T>);
1874
1875impl<T: Config> BlockNumberProvider for RelaychainDataProvider<T> {
1876 type BlockNumber = relay_chain::BlockNumber;
1877
1878 fn current_block_number() -> relay_chain::BlockNumber {
1879 ValidationData::<T>::get()
1880 .map(|d| d.relay_parent_number)
1881 .unwrap_or_else(|| Pallet::<T>::last_relay_block_number())
1882 }
1883
1884 #[cfg(any(feature = "std", feature = "runtime-benchmarks", test))]
1885 fn set_block_number(block: Self::BlockNumber) {
1886 let mut validation_data = ValidationData::<T>::get().unwrap_or_else(||
1887 PersistedValidationData {
1889 parent_head: vec![].into(),
1890 relay_parent_number: Default::default(),
1891 max_pov_size: Default::default(),
1892 relay_parent_storage_root: Default::default(),
1893 });
1894 validation_data.relay_parent_number = block;
1895 ValidationData::<T>::put(validation_data)
1896 }
1897}
1898
1899impl<T: Config> RelaychainStateProvider for RelaychainDataProvider<T> {
1900 fn current_relay_chain_state() -> RelayChainState {
1901 ValidationData::<T>::get()
1902 .map(|d| RelayChainState {
1903 number: d.relay_parent_number,
1904 state_root: d.relay_parent_storage_root,
1905 })
1906 .unwrap_or_default()
1907 }
1908
1909 #[cfg(feature = "runtime-benchmarks")]
1910 fn set_current_relay_chain_state(state: RelayChainState) {
1911 let mut validation_data = ValidationData::<T>::get().unwrap_or_else(||
1912 PersistedValidationData {
1914 parent_head: vec![].into(),
1915 relay_parent_number: Default::default(),
1916 max_pov_size: Default::default(),
1917 relay_parent_storage_root: Default::default(),
1918 });
1919 validation_data.relay_parent_number = state.number;
1920 validation_data.relay_parent_storage_root = state.state_root;
1921 ValidationData::<T>::put(validation_data)
1922 }
1923}