cumulus_pallet_parachain_system/
lib.rs

1// Copyright (C) Parity Technologies (UK) Ltd.
2// This file is part of Cumulus.
3// SPDX-License-Identifier: Apache-2.0
4
5// Licensed under the Apache License, Version 2.0 (the "License");
6// you may not use this file except in compliance with the License.
7// You may obtain a copy of the License at
8//
9// 	http://www.apache.org/licenses/LICENSE-2.0
10//
11// Unless required by applicable law or agreed to in writing, software
12// distributed under the License is distributed on an "AS IS" BASIS,
13// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14// See the License for the specific language governing permissions and
15// limitations under the License.
16
17#![cfg_attr(not(feature = "std"), no_std)]
18
19//! `cumulus-pallet-parachain-system` is a base pallet for Cumulus-based parachains.
20//!
21//! This pallet handles low-level details of being a parachain. Its responsibilities include:
22//!
23//! - ingestion of the parachain validation data;
24//! - ingestion and dispatch of incoming downward and lateral messages;
25//! - coordinating upgrades with the Relay Chain; and
26//! - communication of parachain outputs, such as sent messages, signaling an upgrade, etc.
27//!
28//! Users must ensure that they register this pallet as an inherent provider.
29
30extern crate alloc;
31
32use alloc::{collections::btree_map::BTreeMap, vec, vec::Vec};
33use codec::{Decode, DecodeLimit, Encode};
34use core::{cmp, marker::PhantomData};
35use cumulus_primitives_core::{
36	relay_chain::{
37		self,
38		vstaging::{ClaimQueueOffset, CoreSelector, DEFAULT_CLAIM_QUEUE_OFFSET},
39	},
40	AbridgedHostConfiguration, ChannelInfo, ChannelStatus, CollationInfo, GetChannelInfo,
41	ListChannelInfos, MessageSendError, OutboundHrmpMessage, ParaId, PersistedValidationData,
42	UpwardMessage, UpwardMessageSender, XcmpMessageHandler, XcmpMessageSource,
43};
44use cumulus_primitives_parachain_inherent::{v0, MessageQueueChain, ParachainInherentData};
45use frame_support::{
46	dispatch::{DispatchClass, DispatchResult},
47	ensure,
48	inherent::{InherentData, InherentIdentifier, ProvideInherent},
49	traits::{Get, HandleMessage},
50	weights::Weight,
51};
52use frame_system::{ensure_none, ensure_root, pallet_prelude::HeaderFor};
53use parachain_inherent::{
54	deconstruct_parachain_inherent_data, AbridgedInboundDownwardMessages,
55	AbridgedInboundHrmpMessages, BasicParachainInherentData, InboundMessageId, InboundMessagesData,
56};
57use polkadot_parachain_primitives::primitives::RelayChainBlockNumber;
58use polkadot_runtime_parachains::{FeeTracker, GetMinFeeFactor};
59use scale_info::TypeInfo;
60use sp_runtime::{
61	traits::{Block as BlockT, BlockNumberProvider, Hash, One},
62	FixedU128, RuntimeDebug, SaturatedConversion,
63};
64use xcm::{latest::XcmHash, VersionedLocation, VersionedXcm, MAX_XCM_DECODE_DEPTH};
65use xcm_builder::InspectMessageQueues;
66
67mod benchmarking;
68pub mod migration;
69mod mock;
70#[cfg(test)]
71mod tests;
72pub mod weights;
73
74pub use weights::WeightInfo;
75
76mod unincluded_segment;
77
78pub mod consensus_hook;
79pub mod relay_state_snapshot;
80#[macro_use]
81pub mod validate_block;
82mod descendant_validation;
83pub mod parachain_inherent;
84
85use unincluded_segment::{
86	HrmpChannelUpdate, HrmpWatermarkUpdate, OutboundBandwidthLimits, SegmentTracker,
87};
88
89pub use consensus_hook::{ConsensusHook, ExpectParentIncluded};
90/// Register the `validate_block` function that is used by parachains to validate blocks on a
91/// validator.
92///
93/// Does *nothing* when `std` feature is enabled.
94///
95/// Expects as parameters the runtime, a block executor and an inherent checker.
96///
97/// # Example
98///
99/// ```
100///     struct BlockExecutor;
101///     struct Runtime;
102///     struct CheckInherents;
103///
104///     cumulus_pallet_parachain_system::register_validate_block! {
105///         Runtime = Runtime,
106///         BlockExecutor = Executive,
107///         CheckInherents = CheckInherents,
108///     }
109///
110/// # fn main() {}
111/// ```
112pub use cumulus_pallet_parachain_system_proc_macro::register_validate_block;
113pub use relay_state_snapshot::{MessagingStateSnapshot, RelayChainStateProof};
114pub use unincluded_segment::{Ancestor, UsedBandwidth};
115
116pub use pallet::*;
117
118const LOG_TARGET: &str = "parachain-system";
119
120/// Something that can check the associated relay block number.
121///
122/// Each Parachain block is built in the context of a relay chain block, this trait allows us
123/// to validate the given relay chain block number. With async backing it is legal to build
124/// multiple Parachain blocks per relay chain parent. With this trait it is possible for the
125/// Parachain to ensure that still only one Parachain block is build per relay chain parent.
126///
127/// By default [`RelayNumberStrictlyIncreases`] and [`AnyRelayNumber`] are provided.
128pub trait CheckAssociatedRelayNumber {
129	/// Check the current relay number versus the previous relay number.
130	///
131	/// The implementation should panic when there is something wrong.
132	fn check_associated_relay_number(
133		current: RelayChainBlockNumber,
134		previous: RelayChainBlockNumber,
135	);
136}
137
138/// Provides an implementation of [`CheckAssociatedRelayNumber`].
139///
140/// It will ensure that the associated relay block number strictly increases between Parachain
141/// blocks. This should be used by production Parachains when in doubt.
142pub struct RelayNumberStrictlyIncreases;
143
144impl CheckAssociatedRelayNumber for RelayNumberStrictlyIncreases {
145	fn check_associated_relay_number(
146		current: RelayChainBlockNumber,
147		previous: RelayChainBlockNumber,
148	) {
149		if current <= previous {
150			panic!("Relay chain block number needs to strictly increase between Parachain blocks!")
151		}
152	}
153}
154
155/// Provides an implementation of [`CheckAssociatedRelayNumber`].
156///
157/// This will accept any relay chain block number combination. This is mainly useful for
158/// test parachains.
159pub struct AnyRelayNumber;
160
161impl CheckAssociatedRelayNumber for AnyRelayNumber {
162	fn check_associated_relay_number(_: RelayChainBlockNumber, _: RelayChainBlockNumber) {}
163}
164
165/// Provides an implementation of [`CheckAssociatedRelayNumber`].
166///
167/// It will ensure that the associated relay block number monotonically increases between Parachain
168/// blocks. This should be used when asynchronous backing is enabled.
169pub struct RelayNumberMonotonicallyIncreases;
170
171impl CheckAssociatedRelayNumber for RelayNumberMonotonicallyIncreases {
172	fn check_associated_relay_number(
173		current: RelayChainBlockNumber,
174		previous: RelayChainBlockNumber,
175	) {
176		if current < previous {
177			panic!("Relay chain block number needs to monotonically increase between Parachain blocks!")
178		}
179	}
180}
181
182/// The max length of a DMP message.
183pub type MaxDmpMessageLenOf<T> = <<T as Config>::DmpQueue as HandleMessage>::MaxMessageLen;
184
185pub mod ump_constants {
186	/// `host_config.max_upward_queue_size / THRESHOLD_FACTOR` is the threshold after which delivery
187	/// starts getting exponentially more expensive.
188	/// `2` means the price starts to increase when queue is half full.
189	pub const THRESHOLD_FACTOR: u32 = 2;
190}
191
192/// Trait for selecting the next core to build the candidate for.
193pub trait SelectCore {
194	/// Core selector information for the current block.
195	fn selected_core() -> (CoreSelector, ClaimQueueOffset);
196	/// Core selector information for the next block.
197	fn select_next_core() -> (CoreSelector, ClaimQueueOffset);
198}
199
200/// The default core selection policy.
201pub struct DefaultCoreSelector<T>(PhantomData<T>);
202
203impl<T: frame_system::Config> SelectCore for DefaultCoreSelector<T> {
204	fn selected_core() -> (CoreSelector, ClaimQueueOffset) {
205		let core_selector = frame_system::Pallet::<T>::block_number().using_encoded(|b| b[0]);
206
207		(CoreSelector(core_selector), ClaimQueueOffset(DEFAULT_CLAIM_QUEUE_OFFSET))
208	}
209
210	fn select_next_core() -> (CoreSelector, ClaimQueueOffset) {
211		let core_selector =
212			(frame_system::Pallet::<T>::block_number() + One::one()).using_encoded(|b| b[0]);
213
214		(CoreSelector(core_selector), ClaimQueueOffset(DEFAULT_CLAIM_QUEUE_OFFSET))
215	}
216}
217
218/// Core selection policy that builds on claim queue offset 1.
219pub struct LookaheadCoreSelector<T>(PhantomData<T>);
220
221impl<T: frame_system::Config> SelectCore for LookaheadCoreSelector<T> {
222	fn selected_core() -> (CoreSelector, ClaimQueueOffset) {
223		let core_selector = frame_system::Pallet::<T>::block_number().using_encoded(|b| b[0]);
224
225		(CoreSelector(core_selector), ClaimQueueOffset(1))
226	}
227
228	fn select_next_core() -> (CoreSelector, ClaimQueueOffset) {
229		let core_selector =
230			(frame_system::Pallet::<T>::block_number() + One::one()).using_encoded(|b| b[0]);
231
232		(CoreSelector(core_selector), ClaimQueueOffset(1))
233	}
234}
235
236#[frame_support::pallet]
237pub mod pallet {
238	use super::*;
239	use frame_support::pallet_prelude::*;
240	use frame_system::pallet_prelude::*;
241
242	#[pallet::pallet]
243	#[pallet::storage_version(migration::STORAGE_VERSION)]
244	#[pallet::without_storage_info]
245	pub struct Pallet<T>(_);
246
247	#[pallet::config]
248	pub trait Config: frame_system::Config<OnSetCode = ParachainSetCode<Self>> {
249		/// The overarching event type.
250		#[allow(deprecated)]
251		type RuntimeEvent: From<Event<Self>> + IsType<<Self as frame_system::Config>::RuntimeEvent>;
252
253		/// Something which can be notified when the validation data is set.
254		type OnSystemEvent: OnSystemEvent;
255
256		/// Returns the parachain ID we are running with.
257		#[pallet::constant]
258		type SelfParaId: Get<ParaId>;
259
260		/// The place where outbound XCMP messages come from. This is queried in `finalize_block`.
261		type OutboundXcmpMessageSource: XcmpMessageSource;
262
263		/// Queues inbound downward messages for delayed processing.
264		///
265		/// All inbound DMP messages from the relay are pushed into this. The handler is expected to
266		/// eventually process all the messages that are pushed to it.
267		type DmpQueue: HandleMessage;
268
269		/// The weight we reserve at the beginning of the block for processing DMP messages.
270		type ReservedDmpWeight: Get<Weight>;
271
272		/// The message handler that will be invoked when messages are received via XCMP.
273		///
274		/// This should normally link to the XCMP Queue pallet.
275		type XcmpMessageHandler: XcmpMessageHandler;
276
277		/// The weight we reserve at the beginning of the block for processing XCMP messages.
278		type ReservedXcmpWeight: Get<Weight>;
279
280		/// Something that can check the associated relay parent block number.
281		type CheckAssociatedRelayNumber: CheckAssociatedRelayNumber;
282
283		/// Weight info for functions and calls.
284		type WeightInfo: WeightInfo;
285
286		/// An entry-point for higher-level logic to manage the backlog of unincluded parachain
287		/// blocks and authorship rights for those blocks.
288		///
289		/// Typically, this should be a hook tailored to the collator-selection/consensus mechanism
290		/// that is used for this chain.
291		///
292		/// However, to maintain the same behavior as prior to asynchronous backing, provide the
293		/// [`consensus_hook::ExpectParentIncluded`] here. This is only necessary in the case
294		/// that collators aren't expected to have node versions that supply the included block
295		/// in the relay-chain state proof.
296		type ConsensusHook: ConsensusHook;
297
298		/// Select core.
299		type SelectCore: SelectCore;
300
301		/// The offset between the tip of the relay chain and the parent relay block used as parent
302		/// when authoring a parachain block.
303		///
304		/// This setting directly impacts the number of descendant headers that are expected in the
305		/// `set_validation_data` inherent.
306		///
307		/// For any setting `N` larger than zero, the inherent expects that the inherent includes
308		/// the relay parent plus `N` descendants. These headers are required to validate that new
309		/// parachain blocks are authored at the correct offset.
310		///
311		/// While this helps to reduce forks on the parachain side, it increases the delay for
312		/// processing XCM messages. So, the value should be chosen wisely.
313		///
314		/// If set to 0, this config has no impact.
315		type RelayParentOffset: Get<u32>;
316	}
317
318	#[pallet::hooks]
319	impl<T: Config> Hooks<BlockNumberFor<T>> for Pallet<T> {
320		/// Handles actually sending upward messages by moving them from `PendingUpwardMessages` to
321		/// `UpwardMessages`. Decreases the delivery fee factor if after sending messages, the queue
322		/// total size is less than the threshold (see [`ump_constants::THRESHOLD_FACTOR`]).
323		/// Also does the sending for HRMP messages it takes from `OutboundXcmpMessageSource`.
324		fn on_finalize(_: BlockNumberFor<T>) {
325			<DidSetValidationCode<T>>::kill();
326			<UpgradeRestrictionSignal<T>>::kill();
327			let relay_upgrade_go_ahead = <UpgradeGoAhead<T>>::take();
328
329			let vfp = <ValidationData<T>>::get().expect(
330				r"Missing required set_validation_data inherent. This inherent must be
331				present in every block. This error typically occurs when the set_validation_data
332				execution failed and was rejected by the block builder. Check earlier log entries
333				for the specific cause of the failure.",
334			);
335
336			LastRelayChainBlockNumber::<T>::put(vfp.relay_parent_number);
337
338			let host_config = match HostConfiguration::<T>::get() {
339				Some(ok) => ok,
340				None => {
341					debug_assert!(
342						false,
343						"host configuration is promised to set until `on_finalize`; qed",
344					);
345					return
346				},
347			};
348
349			// Before updating the relevant messaging state, we need to extract
350			// the total bandwidth limits for the purpose of updating the unincluded
351			// segment.
352			let total_bandwidth_out = match RelevantMessagingState::<T>::get() {
353				Some(s) => OutboundBandwidthLimits::from_relay_chain_state(&s),
354				None => {
355					debug_assert!(
356						false,
357						"relevant messaging state is promised to be set until `on_finalize`; \
358							qed",
359					);
360					return
361				},
362			};
363
364			// After this point, the `RelevantMessagingState` in storage reflects the
365			// unincluded segment.
366			Self::adjust_egress_bandwidth_limits();
367
368			let (ump_msg_count, ump_total_bytes) = <PendingUpwardMessages<T>>::mutate(|up| {
369				let (available_capacity, available_size) = match RelevantMessagingState::<T>::get()
370				{
371					Some(limits) => (
372						limits.relay_dispatch_queue_remaining_capacity.remaining_count,
373						limits.relay_dispatch_queue_remaining_capacity.remaining_size,
374					),
375					None => {
376						debug_assert!(
377							false,
378							"relevant messaging state is promised to be set until `on_finalize`; \
379								qed",
380						);
381						return (0, 0)
382					},
383				};
384
385				let available_capacity =
386					cmp::min(available_capacity, host_config.max_upward_message_num_per_candidate);
387
388				// Count the number of messages we can possibly fit in the given constraints, i.e.
389				// available_capacity and available_size.
390				let (num, total_size) = up
391					.iter()
392					.scan((0u32, 0u32), |state, msg| {
393						let (cap_used, size_used) = *state;
394						let new_cap = cap_used.saturating_add(1);
395						let new_size = size_used.saturating_add(msg.len() as u32);
396						match available_capacity
397							.checked_sub(new_cap)
398							.and(available_size.checked_sub(new_size))
399						{
400							Some(_) => {
401								*state = (new_cap, new_size);
402								Some(*state)
403							},
404							_ => None,
405						}
406					})
407					.last()
408					.unwrap_or_default();
409
410				// TODO: #274 Return back messages that do not longer fit into the queue.
411
412				UpwardMessages::<T>::put(&up[..num as usize]);
413				*up = up.split_off(num as usize);
414
415				// Send the core selector UMP signal. This is experimental until relay chain
416				// validators are upgraded to handle ump signals.
417				#[cfg(feature = "experimental-ump-signals")]
418				Self::send_ump_signal();
419
420				// If the total size of the pending messages is less than the threshold,
421				// we decrease the fee factor, since the queue is less congested.
422				// This makes delivery of new messages cheaper.
423				let threshold = host_config
424					.max_upward_queue_size
425					.saturating_div(ump_constants::THRESHOLD_FACTOR);
426				let remaining_total_size: usize = up.iter().map(UpwardMessage::len).sum();
427				if remaining_total_size <= threshold as usize {
428					Self::decrease_fee_factor(());
429				}
430
431				(num, total_size)
432			});
433
434			// Sending HRMP messages is a little bit more involved. There are the following
435			// constraints:
436			//
437			// - a channel should exist (and it can be closed while a message is buffered),
438			// - at most one message can be sent in a channel,
439			// - the sent out messages should be ordered by ascension of recipient para id.
440			// - the capacity and total size of the channel is limited,
441			// - the maximum size of a message is limited (and can potentially be changed),
442
443			let maximum_channels = host_config
444				.hrmp_max_message_num_per_candidate
445				.min(<AnnouncedHrmpMessagesPerCandidate<T>>::take())
446				as usize;
447
448			// Note: this internally calls the `GetChannelInfo` implementation for this
449			// pallet, which draws on the `RelevantMessagingState`. That in turn has
450			// been adjusted above to reflect the correct limits in all channels.
451			let outbound_messages =
452				T::OutboundXcmpMessageSource::take_outbound_messages(maximum_channels)
453					.into_iter()
454					.map(|(recipient, data)| OutboundHrmpMessage { recipient, data })
455					.collect::<Vec<_>>();
456
457			// Update the unincluded segment length; capacity checks were done previously in
458			// `set_validation_data`, so this can be done unconditionally.
459			{
460				let hrmp_outgoing = outbound_messages
461					.iter()
462					.map(|msg| {
463						(
464							msg.recipient,
465							HrmpChannelUpdate { msg_count: 1, total_bytes: msg.data.len() as u32 },
466						)
467					})
468					.collect();
469				let used_bandwidth =
470					UsedBandwidth { ump_msg_count, ump_total_bytes, hrmp_outgoing };
471
472				let mut aggregated_segment =
473					AggregatedUnincludedSegment::<T>::get().unwrap_or_default();
474				let consumed_go_ahead_signal =
475					if aggregated_segment.consumed_go_ahead_signal().is_some() {
476						// Some ancestor within the segment already processed this signal --
477						// validated during inherent creation.
478						None
479					} else {
480						relay_upgrade_go_ahead
481					};
482				// The bandwidth constructed was ensured to satisfy relay chain constraints.
483				let ancestor = Ancestor::new_unchecked(used_bandwidth, consumed_go_ahead_signal);
484
485				let watermark = HrmpWatermark::<T>::get();
486				let watermark_update = HrmpWatermarkUpdate::new(watermark, vfp.relay_parent_number);
487
488				aggregated_segment
489					.append(&ancestor, watermark_update, &total_bandwidth_out)
490					.expect("unincluded segment limits exceeded");
491				AggregatedUnincludedSegment::<T>::put(aggregated_segment);
492				// Check in `on_initialize` guarantees there's space for this block.
493				UnincludedSegment::<T>::append(ancestor);
494			}
495			HrmpOutboundMessages::<T>::put(outbound_messages);
496		}
497
498		fn on_initialize(_n: BlockNumberFor<T>) -> Weight {
499			let mut weight = Weight::zero();
500
501			// To prevent removing `NewValidationCode` that was set by another `on_initialize`
502			// like for example from scheduler, we only kill the storage entry if it was not yet
503			// updated in the current block.
504			if !<DidSetValidationCode<T>>::get() {
505				// NOTE: Killing here is required to at least include the trie nodes down to the key
506				// in the proof. Because this value will be read in `validate_block` and thus,
507				// needs to be reachable by the proof.
508				NewValidationCode::<T>::kill();
509				weight += T::DbWeight::get().writes(1);
510			}
511
512			// The parent hash was unknown during block finalization. Update it here.
513			{
514				<UnincludedSegment<T>>::mutate(|chain| {
515					if let Some(ancestor) = chain.last_mut() {
516						let parent = frame_system::Pallet::<T>::parent_hash();
517						// Ancestor is the latest finalized block, thus current parent is
518						// its output head.
519						ancestor.replace_para_head_hash(parent);
520					}
521				});
522				weight += T::DbWeight::get().reads_writes(1, 1);
523
524				// Weight used during finalization.
525				weight += T::DbWeight::get().reads_writes(3, 2);
526			}
527
528			// Remove the validation from the old block.
529			ValidationData::<T>::kill();
530			// NOTE: Killing here is required to at least include the trie nodes down to the keys
531			// in the proof. Because these values will be read in `validate_block` and thus,
532			// need to be reachable by the proof.
533			ProcessedDownwardMessages::<T>::kill();
534			UpwardMessages::<T>::kill();
535			HrmpOutboundMessages::<T>::kill();
536			CustomValidationHeadData::<T>::kill();
537			// The same as above. Reading here to make sure that the key is included in the proof.
538			HrmpWatermark::<T>::get();
539			weight += T::DbWeight::get().reads_writes(1, 5);
540
541			// Here, in `on_initialize` we must report the weight for both `on_initialize` and
542			// `on_finalize`.
543			//
544			// One complication here, is that the `host_configuration` is updated by an inherent
545			// and those are processed after the block initialization phase. Therefore, we have to
546			// be content only with the configuration as per the previous block. That means that
547			// the configuration can be either stale (or be absent altogether in case of the
548			// beginning of the chain).
549			//
550			// In order to mitigate this, we do the following. At the time, we are only concerned
551			// about `hrmp_max_message_num_per_candidate`. We reserve the amount of weight to
552			// process the number of HRMP messages according to the potentially stale
553			// configuration. In `on_finalize` we will process only the maximum between the
554			// announced number of messages and the actual received in the fresh configuration.
555			//
556			// In the common case, they will be the same. In the case the actual value is smaller
557			// than the announced, we would waste some of weight. In the case the actual value is
558			// greater than the announced, we will miss opportunity to send a couple of messages.
559			weight += T::DbWeight::get().reads_writes(1, 1);
560			let hrmp_max_message_num_per_candidate = HostConfiguration::<T>::get()
561				.map(|cfg| cfg.hrmp_max_message_num_per_candidate)
562				.unwrap_or(0);
563			<AnnouncedHrmpMessagesPerCandidate<T>>::put(hrmp_max_message_num_per_candidate);
564
565			// NOTE that the actual weight consumed by `on_finalize` may turn out lower.
566			weight += T::DbWeight::get().reads_writes(
567				3 + hrmp_max_message_num_per_candidate as u64,
568				4 + hrmp_max_message_num_per_candidate as u64,
569			);
570
571			// Weight for updating the last relay chain block number in `on_finalize`.
572			weight += T::DbWeight::get().reads_writes(1, 1);
573
574			// Weight for adjusting the unincluded segment in `on_finalize`.
575			weight += T::DbWeight::get().reads_writes(6, 3);
576
577			// Always try to read `UpgradeGoAhead` in `on_finalize`.
578			weight += T::DbWeight::get().reads(1);
579
580			weight
581		}
582	}
583
584	#[pallet::call]
585	impl<T: Config> Pallet<T> {
586		/// Set the current validation data.
587		///
588		/// This should be invoked exactly once per block. It will panic at the finalization
589		/// phase if the call was not invoked.
590		///
591		/// The dispatch origin for this call must be `Inherent`
592		///
593		/// As a side effect, this function upgrades the current validation function
594		/// if the appropriate time has come.
595		#[pallet::call_index(0)]
596		#[pallet::weight((0, DispatchClass::Mandatory))]
597		// TODO: This weight should be corrected. Currently the weight is registered manually in the
598		// call with `register_extra_weight_unchecked`.
599		pub fn set_validation_data(
600			origin: OriginFor<T>,
601			data: BasicParachainInherentData,
602			inbound_messages_data: InboundMessagesData,
603		) -> DispatchResult {
604			ensure_none(origin)?;
605			assert!(
606				!<ValidationData<T>>::exists(),
607				"ValidationData must be updated only once in a block",
608			);
609
610			// TODO: This is more than zero, but will need benchmarking to figure out what.
611			let mut total_weight = Weight::zero();
612
613			// NOTE: the inherent data is expected to be unique, even if this block is built
614			// in the context of the same relay parent as the previous one. In particular,
615			// the inherent shouldn't contain messages that were already processed by any of the
616			// ancestors.
617			//
618			// This invariant should be upheld by the `ProvideInherent` implementation.
619			let BasicParachainInherentData {
620				validation_data: vfp,
621				relay_chain_state,
622				relay_parent_descendants,
623				collator_peer_id: _,
624			} = data;
625
626			// Check that the associated relay chain block number is as expected.
627			T::CheckAssociatedRelayNumber::check_associated_relay_number(
628				vfp.relay_parent_number,
629				LastRelayChainBlockNumber::<T>::get(),
630			);
631
632			let relay_state_proof = RelayChainStateProof::new(
633				T::SelfParaId::get(),
634				vfp.relay_parent_storage_root,
635				relay_chain_state.clone(),
636			)
637			.expect("Invalid relay chain state proof");
638
639			let expected_rp_descendants_num = T::RelayParentOffset::get();
640
641			if expected_rp_descendants_num > 0 {
642				if let Err(err) = descendant_validation::verify_relay_parent_descendants(
643					&relay_state_proof,
644					relay_parent_descendants,
645					vfp.relay_parent_storage_root,
646					expected_rp_descendants_num,
647				) {
648					panic!(
649						"Unable to verify provided relay parent descendants. \
650						expected_rp_descendants_num: {expected_rp_descendants_num} \
651						error: {err:?}"
652					);
653				};
654			}
655
656			// Update the desired maximum capacity according to the consensus hook.
657			let (consensus_hook_weight, capacity) =
658				T::ConsensusHook::on_state_proof(&relay_state_proof);
659			total_weight += consensus_hook_weight;
660			total_weight += Self::maybe_drop_included_ancestors(&relay_state_proof, capacity);
661			// Deposit a log indicating the relay-parent storage root.
662			// TODO: remove this in favor of the relay-parent's hash after
663			// https://github.com/paritytech/cumulus/issues/303
664			frame_system::Pallet::<T>::deposit_log(
665				cumulus_primitives_core::rpsr_digest::relay_parent_storage_root_item(
666					vfp.relay_parent_storage_root,
667					vfp.relay_parent_number,
668				),
669			);
670
671			// initialization logic: we know that this runs exactly once every block,
672			// which means we can put the initialization logic here to remove the
673			// sequencing problem.
674			let upgrade_go_ahead_signal = relay_state_proof
675				.read_upgrade_go_ahead_signal()
676				.expect("Invalid upgrade go ahead signal");
677
678			let upgrade_signal_in_segment = AggregatedUnincludedSegment::<T>::get()
679				.as_ref()
680				.and_then(SegmentTracker::consumed_go_ahead_signal);
681			if let Some(signal_in_segment) = upgrade_signal_in_segment.as_ref() {
682				// Unincluded ancestor consuming upgrade signal is still within the segment,
683				// sanity check that it matches with the signal from relay chain.
684				assert_eq!(upgrade_go_ahead_signal, Some(*signal_in_segment));
685			}
686			match upgrade_go_ahead_signal {
687				Some(_signal) if upgrade_signal_in_segment.is_some() => {
688					// Do nothing, processing logic was executed by unincluded ancestor.
689				},
690				Some(relay_chain::UpgradeGoAhead::GoAhead) => {
691					assert!(
692						<PendingValidationCode<T>>::exists(),
693						"No new validation function found in storage, GoAhead signal is not expected",
694					);
695					let validation_code = <PendingValidationCode<T>>::take();
696
697					frame_system::Pallet::<T>::update_code_in_storage(&validation_code);
698					<T::OnSystemEvent as OnSystemEvent>::on_validation_code_applied();
699					Self::deposit_event(Event::ValidationFunctionApplied {
700						relay_chain_block_num: vfp.relay_parent_number,
701					});
702				},
703				Some(relay_chain::UpgradeGoAhead::Abort) => {
704					<PendingValidationCode<T>>::kill();
705					Self::deposit_event(Event::ValidationFunctionDiscarded);
706				},
707				None => {},
708			}
709			<UpgradeRestrictionSignal<T>>::put(
710				relay_state_proof
711					.read_upgrade_restriction_signal()
712					.expect("Invalid upgrade restriction signal"),
713			);
714			<UpgradeGoAhead<T>>::put(upgrade_go_ahead_signal);
715
716			let host_config = relay_state_proof
717				.read_abridged_host_configuration()
718				.expect("Invalid host configuration in relay chain state proof");
719
720			let relevant_messaging_state = relay_state_proof
721				.read_messaging_state_snapshot(&host_config)
722				.expect("Invalid messaging state in relay chain state proof");
723
724			<ValidationData<T>>::put(&vfp);
725			<RelayStateProof<T>>::put(relay_chain_state);
726			<RelevantMessagingState<T>>::put(relevant_messaging_state.clone());
727			<HostConfiguration<T>>::put(host_config);
728
729			<T::OnSystemEvent as OnSystemEvent>::on_validation_data(&vfp);
730
731			total_weight.saturating_accrue(Self::enqueue_inbound_downward_messages(
732				relevant_messaging_state.dmq_mqc_head,
733				inbound_messages_data.downward_messages,
734			));
735			total_weight.saturating_accrue(Self::enqueue_inbound_horizontal_messages(
736				&relevant_messaging_state.ingress_channels,
737				inbound_messages_data.horizontal_messages,
738				vfp.relay_parent_number,
739			));
740
741			frame_system::Pallet::<T>::register_extra_weight_unchecked(
742				total_weight,
743				DispatchClass::Mandatory,
744			);
745
746			Ok(())
747		}
748
749		#[pallet::call_index(1)]
750		#[pallet::weight((1_000, DispatchClass::Operational))]
751		pub fn sudo_send_upward_message(
752			origin: OriginFor<T>,
753			message: UpwardMessage,
754		) -> DispatchResult {
755			ensure_root(origin)?;
756			let _ = Self::send_upward_message(message);
757			Ok(())
758		}
759
760		// WARNING: call indices 2 and 3 were used in a former version of this pallet. Using them
761		// again will require to bump the transaction version of runtimes using this pallet.
762	}
763
764	#[pallet::event]
765	#[pallet::generate_deposit(pub(super) fn deposit_event)]
766	pub enum Event<T: Config> {
767		/// The validation function has been scheduled to apply.
768		ValidationFunctionStored,
769		/// The validation function was applied as of the contained relay chain block number.
770		ValidationFunctionApplied { relay_chain_block_num: RelayChainBlockNumber },
771		/// The relay-chain aborted the upgrade process.
772		ValidationFunctionDiscarded,
773		/// Some downward messages have been received and will be processed.
774		DownwardMessagesReceived { count: u32 },
775		/// Downward messages were processed using the given weight.
776		DownwardMessagesProcessed { weight_used: Weight, dmq_head: relay_chain::Hash },
777		/// An upward message was sent to the relay chain.
778		UpwardMessageSent { message_hash: Option<XcmHash> },
779	}
780
781	#[pallet::error]
782	pub enum Error<T> {
783		/// Attempt to upgrade validation function while existing upgrade pending.
784		OverlappingUpgrades,
785		/// Polkadot currently prohibits this parachain from upgrading its validation function.
786		ProhibitedByPolkadot,
787		/// The supplied validation function has compiled into a blob larger than Polkadot is
788		/// willing to run.
789		TooBig,
790		/// The inherent which supplies the validation data did not run this block.
791		ValidationDataNotAvailable,
792		/// The inherent which supplies the host configuration did not run this block.
793		HostConfigurationNotAvailable,
794		/// No validation function upgrade is currently scheduled.
795		NotScheduled,
796	}
797
798	/// Latest included block descendants the runtime accepted. In other words, these are
799	/// ancestors of the currently executing block which have not been included in the observed
800	/// relay-chain state.
801	///
802	/// The segment length is limited by the capacity returned from the [`ConsensusHook`] configured
803	/// in the pallet.
804	#[pallet::storage]
805	pub type UnincludedSegment<T: Config> = StorageValue<_, Vec<Ancestor<T::Hash>>, ValueQuery>;
806
807	/// Storage field that keeps track of bandwidth used by the unincluded segment along with the
808	/// latest HRMP watermark. Used for limiting the acceptance of new blocks with
809	/// respect to relay chain constraints.
810	#[pallet::storage]
811	pub type AggregatedUnincludedSegment<T: Config> =
812		StorageValue<_, SegmentTracker<T::Hash>, OptionQuery>;
813
814	/// In case of a scheduled upgrade, this storage field contains the validation code to be
815	/// applied.
816	///
817	/// As soon as the relay chain gives us the go-ahead signal, we will overwrite the
818	/// [`:code`][sp_core::storage::well_known_keys::CODE] which will result the next block process
819	/// with the new validation code. This concludes the upgrade process.
820	#[pallet::storage]
821	pub type PendingValidationCode<T: Config> = StorageValue<_, Vec<u8>, ValueQuery>;
822
823	/// Validation code that is set by the parachain and is to be communicated to collator and
824	/// consequently the relay-chain.
825	///
826	/// This will be cleared in `on_initialize` of each new block if no other pallet already set
827	/// the value.
828	#[pallet::storage]
829	pub type NewValidationCode<T: Config> = StorageValue<_, Vec<u8>, OptionQuery>;
830
831	/// The [`PersistedValidationData`] set for this block.
832	/// This value is expected to be set only once per block and it's never stored
833	/// in the trie.
834	#[pallet::storage]
835	pub type ValidationData<T: Config> = StorageValue<_, PersistedValidationData>;
836
837	/// Were the validation data set to notify the relay chain?
838	#[pallet::storage]
839	pub type DidSetValidationCode<T: Config> = StorageValue<_, bool, ValueQuery>;
840
841	/// The relay chain block number associated with the last parachain block.
842	///
843	/// This is updated in `on_finalize`.
844	#[pallet::storage]
845	pub type LastRelayChainBlockNumber<T: Config> =
846		StorageValue<_, RelayChainBlockNumber, ValueQuery>;
847
848	/// An option which indicates if the relay-chain restricts signalling a validation code upgrade.
849	/// In other words, if this is `Some` and [`NewValidationCode`] is `Some` then the produced
850	/// candidate will be invalid.
851	///
852	/// This storage item is a mirror of the corresponding value for the current parachain from the
853	/// relay-chain. This value is ephemeral which means it doesn't hit the storage. This value is
854	/// set after the inherent.
855	#[pallet::storage]
856	pub type UpgradeRestrictionSignal<T: Config> =
857		StorageValue<_, Option<relay_chain::UpgradeRestriction>, ValueQuery>;
858
859	/// Optional upgrade go-ahead signal from the relay-chain.
860	///
861	/// This storage item is a mirror of the corresponding value for the current parachain from the
862	/// relay-chain. This value is ephemeral which means it doesn't hit the storage. This value is
863	/// set after the inherent.
864	#[pallet::storage]
865	pub type UpgradeGoAhead<T: Config> =
866		StorageValue<_, Option<relay_chain::UpgradeGoAhead>, ValueQuery>;
867
868	/// The state proof for the last relay parent block.
869	///
870	/// This field is meant to be updated each block with the validation data inherent. Therefore,
871	/// before processing of the inherent, e.g. in `on_initialize` this data may be stale.
872	///
873	/// This data is also absent from the genesis.
874	#[pallet::storage]
875	pub type RelayStateProof<T: Config> = StorageValue<_, sp_trie::StorageProof>;
876
877	/// The snapshot of some state related to messaging relevant to the current parachain as per
878	/// the relay parent.
879	///
880	/// This field is meant to be updated each block with the validation data inherent. Therefore,
881	/// before processing of the inherent, e.g. in `on_initialize` this data may be stale.
882	///
883	/// This data is also absent from the genesis.
884	#[pallet::storage]
885	pub type RelevantMessagingState<T: Config> = StorageValue<_, MessagingStateSnapshot>;
886
887	/// The parachain host configuration that was obtained from the relay parent.
888	///
889	/// This field is meant to be updated each block with the validation data inherent. Therefore,
890	/// before processing of the inherent, e.g. in `on_initialize` this data may be stale.
891	///
892	/// This data is also absent from the genesis.
893	#[pallet::storage]
894	#[pallet::disable_try_decode_storage]
895	pub type HostConfiguration<T: Config> = StorageValue<_, AbridgedHostConfiguration>;
896
897	/// The last downward message queue chain head we have observed.
898	///
899	/// This value is loaded before and saved after processing inbound downward messages carried
900	/// by the system inherent.
901	#[pallet::storage]
902	pub type LastDmqMqcHead<T: Config> = StorageValue<_, MessageQueueChain, ValueQuery>;
903
904	/// The message queue chain heads we have observed per each channel incoming channel.
905	///
906	/// This value is loaded before and saved after processing inbound downward messages carried
907	/// by the system inherent.
908	#[pallet::storage]
909	pub type LastHrmpMqcHeads<T: Config> =
910		StorageValue<_, BTreeMap<ParaId, MessageQueueChain>, ValueQuery>;
911
912	/// Number of downward messages processed in a block.
913	///
914	/// This will be cleared in `on_initialize` of each new block.
915	#[pallet::storage]
916	pub type ProcessedDownwardMessages<T: Config> = StorageValue<_, u32, ValueQuery>;
917
918	/// The last processed downward message.
919	///
920	/// We need to keep track of this to filter the messages that have been already processed.
921	#[pallet::storage]
922	pub type LastProcessedDownwardMessage<T: Config> = StorageValue<_, InboundMessageId>;
923
924	/// HRMP watermark that was set in a block.
925	#[pallet::storage]
926	pub type HrmpWatermark<T: Config> = StorageValue<_, relay_chain::BlockNumber, ValueQuery>;
927
928	/// The last processed HRMP message.
929	///
930	/// We need to keep track of this to filter the messages that have been already processed.
931	#[pallet::storage]
932	pub type LastProcessedHrmpMessage<T: Config> = StorageValue<_, InboundMessageId>;
933
934	/// HRMP messages that were sent in a block.
935	///
936	/// This will be cleared in `on_initialize` of each new block.
937	#[pallet::storage]
938	pub type HrmpOutboundMessages<T: Config> =
939		StorageValue<_, Vec<OutboundHrmpMessage>, ValueQuery>;
940
941	/// Upward messages that were sent in a block.
942	///
943	/// This will be cleared in `on_initialize` of each new block.
944	#[pallet::storage]
945	pub type UpwardMessages<T: Config> = StorageValue<_, Vec<UpwardMessage>, ValueQuery>;
946
947	/// Upward messages that are still pending and not yet send to the relay chain.
948	#[pallet::storage]
949	pub type PendingUpwardMessages<T: Config> = StorageValue<_, Vec<UpwardMessage>, ValueQuery>;
950
951	/// The factor to multiply the base delivery fee by for UMP.
952	#[pallet::storage]
953	pub type UpwardDeliveryFeeFactor<T: Config> =
954		StorageValue<_, FixedU128, ValueQuery, GetMinFeeFactor<Pallet<T>>>;
955
956	/// The number of HRMP messages we observed in `on_initialize` and thus used that number for
957	/// announcing the weight of `on_initialize` and `on_finalize`.
958	#[pallet::storage]
959	pub type AnnouncedHrmpMessagesPerCandidate<T: Config> = StorageValue<_, u32, ValueQuery>;
960
961	/// The weight we reserve at the beginning of the block for processing XCMP messages. This
962	/// overrides the amount set in the Config trait.
963	#[pallet::storage]
964	pub type ReservedXcmpWeightOverride<T: Config> = StorageValue<_, Weight>;
965
966	/// The weight we reserve at the beginning of the block for processing DMP messages. This
967	/// overrides the amount set in the Config trait.
968	#[pallet::storage]
969	pub type ReservedDmpWeightOverride<T: Config> = StorageValue<_, Weight>;
970
971	/// A custom head data that should be returned as result of `validate_block`.
972	///
973	/// See `Pallet::set_custom_validation_head_data` for more information.
974	#[pallet::storage]
975	pub type CustomValidationHeadData<T: Config> = StorageValue<_, Vec<u8>, OptionQuery>;
976
977	#[pallet::inherent]
978	impl<T: Config> ProvideInherent for Pallet<T> {
979		type Call = Call<T>;
980		type Error = sp_inherents::MakeFatalError<()>;
981		const INHERENT_IDENTIFIER: InherentIdentifier =
982			cumulus_primitives_parachain_inherent::INHERENT_IDENTIFIER;
983
984		fn create_inherent(data: &InherentData) -> Option<Self::Call> {
985			let data = match data
986				.get_data::<ParachainInherentData>(&Self::INHERENT_IDENTIFIER)
987				.ok()
988				.flatten()
989			{
990				None => {
991					// Key Self::INHERENT_IDENTIFIER is expected to contain versioned inherent
992					// data. Older nodes are unaware of the new format and might provide the
993					// legacy data format. We try to load it and transform it into the current
994					// version.
995					let data = data
996						.get_data::<v0::ParachainInherentData>(
997							&cumulus_primitives_parachain_inherent::PARACHAIN_INHERENT_IDENTIFIER_V0,
998						)
999						.ok()
1000						.flatten()?;
1001					data.into()
1002				},
1003				Some(data) => data,
1004			};
1005
1006			Some(Self::do_create_inherent(data))
1007		}
1008
1009		fn is_inherent(call: &Self::Call) -> bool {
1010			matches!(call, Call::set_validation_data { .. })
1011		}
1012	}
1013
1014	#[pallet::genesis_config]
1015	#[derive(frame_support::DefaultNoBound)]
1016	pub struct GenesisConfig<T: Config> {
1017		#[serde(skip)]
1018		pub _config: core::marker::PhantomData<T>,
1019	}
1020
1021	#[pallet::genesis_build]
1022	impl<T: Config> BuildGenesisConfig for GenesisConfig<T> {
1023		fn build(&self) {
1024			// TODO: Remove after https://github.com/paritytech/cumulus/issues/479
1025			sp_io::storage::set(b":c", &[]);
1026		}
1027	}
1028}
1029
1030impl<T: Config> Pallet<T> {
1031	/// Get the unincluded segment size after the given hash.
1032	///
1033	/// If the unincluded segment doesn't contain the given hash, this returns the
1034	/// length of the entire unincluded segment.
1035	///
1036	/// This is intended to be used for determining how long the unincluded segment _would be_
1037	/// in runtime APIs related to authoring.
1038	pub fn unincluded_segment_size_after(included_hash: T::Hash) -> u32 {
1039		let segment = UnincludedSegment::<T>::get();
1040		crate::unincluded_segment::size_after_included(included_hash, &segment)
1041	}
1042}
1043
1044impl<T: Config> FeeTracker for Pallet<T> {
1045	type Id = ();
1046
1047	fn get_fee_factor(_id: Self::Id) -> FixedU128 {
1048		UpwardDeliveryFeeFactor::<T>::get()
1049	}
1050
1051	fn set_fee_factor(_id: Self::Id, val: FixedU128) {
1052		UpwardDeliveryFeeFactor::<T>::set(val);
1053	}
1054}
1055
1056impl<T: Config> ListChannelInfos for Pallet<T> {
1057	fn outgoing_channels() -> Vec<ParaId> {
1058		let Some(state) = RelevantMessagingState::<T>::get() else { return Vec::new() };
1059		state.egress_channels.into_iter().map(|(id, _)| id).collect()
1060	}
1061}
1062
1063impl<T: Config> GetChannelInfo for Pallet<T> {
1064	fn get_channel_status(id: ParaId) -> ChannelStatus {
1065		// Note, that we are using `relevant_messaging_state` which may be from the previous
1066		// block, in case this is called from `on_initialize`, i.e. before the inherent with
1067		// fresh data is submitted.
1068		//
1069		// That shouldn't be a problem though because this is anticipated and already can
1070		// happen. This is because sending implies that a message is buffered until there is
1071		// space to send a message in the candidate. After a while waiting in a buffer, it may
1072		// be discovered that the channel to which a message were addressed is now closed.
1073		// Another possibility, is that the maximum message size was decreased so that a
1074		// message in the buffer doesn't fit. Should any of that happen the sender should be
1075		// notified about the message was discarded.
1076		//
1077		// Here it a similar case, with the difference that the realization that the channel is
1078		// closed came the same block.
1079		let channels = match RelevantMessagingState::<T>::get() {
1080			None => {
1081				log::warn!("calling `get_channel_status` with no RelevantMessagingState?!");
1082				return ChannelStatus::Closed
1083			},
1084			Some(d) => d.egress_channels,
1085		};
1086		// ^^^ NOTE: This storage field should carry over from the previous block. So if it's
1087		// None then it must be that this is an edge-case where a message is attempted to be
1088		// sent at the first block. It should be safe to assume that there are no channels
1089		// opened at all so early. At least, relying on this assumption seems to be a better
1090		// trade-off, compared to introducing an error variant that the clients should be
1091		// prepared to handle.
1092		let index = match channels.binary_search_by_key(&id, |item| item.0) {
1093			Err(_) => return ChannelStatus::Closed,
1094			Ok(i) => i,
1095		};
1096		let meta = &channels[index].1;
1097		if meta.msg_count + 1 > meta.max_capacity {
1098			// The channel is at its capacity. Skip it for now.
1099			return ChannelStatus::Full
1100		}
1101		let max_size_now = meta.max_total_size - meta.total_size;
1102		let max_size_ever = meta.max_message_size;
1103		ChannelStatus::Ready(max_size_now as usize, max_size_ever as usize)
1104	}
1105
1106	fn get_channel_info(id: ParaId) -> Option<ChannelInfo> {
1107		let channels = RelevantMessagingState::<T>::get()?.egress_channels;
1108		let index = channels.binary_search_by_key(&id, |item| item.0).ok()?;
1109		let info = ChannelInfo {
1110			max_capacity: channels[index].1.max_capacity,
1111			max_total_size: channels[index].1.max_total_size,
1112			max_message_size: channels[index].1.max_message_size,
1113			msg_count: channels[index].1.msg_count,
1114			total_size: channels[index].1.total_size,
1115		};
1116		Some(info)
1117	}
1118}
1119
1120impl<T: Config> Pallet<T> {
1121	/// The bandwidth limit per block that applies when receiving messages from the relay chain via
1122	/// DMP or XCMP.
1123	///
1124	/// The limit is per message passing mechanism (e.g. 1 MiB for DMP, 1 MiB for XCMP).
1125	///
1126	/// The purpose of this limit is to make sure that the total size of the messages received by
1127	/// the parachain from the relay chain doesn't exceed the block size. Currently each message
1128	/// passing mechanism can use 1/6 of the total block PoV which means that in total 1/3
1129	/// of the block PoV can be used for message passing.
1130	fn messages_collection_size_limit() -> usize {
1131		let max_block_weight = <T as frame_system::Config>::BlockWeights::get().max_block;
1132		let max_block_pov = max_block_weight.proof_size();
1133		(max_block_pov / 6).saturated_into()
1134	}
1135
1136	/// Updates inherent data to only include the messages that weren't already processed
1137	/// by the runtime and to compress (hash) the messages that exceed the allocated size.
1138	///
1139	/// This method doesn't check for mqc heads mismatch. If the MQC doesn't match after
1140	/// dropping messages, the runtime will panic when executing the inherent.
1141	fn do_create_inherent(data: ParachainInherentData) -> Call<T> {
1142		let (data, mut downward_messages, mut horizontal_messages) =
1143			deconstruct_parachain_inherent_data(data);
1144		let last_relay_block_number = LastRelayChainBlockNumber::<T>::get();
1145
1146		let messages_collection_size_limit = Self::messages_collection_size_limit();
1147		// DMQ.
1148		let last_processed_msg = LastProcessedDownwardMessage::<T>::get()
1149			.unwrap_or(InboundMessageId { sent_at: last_relay_block_number, reverse_idx: 0 });
1150		downward_messages.drop_processed_messages(&last_processed_msg);
1151		let mut size_limit = messages_collection_size_limit;
1152		let downward_messages = downward_messages.into_abridged(&mut size_limit);
1153
1154		// HRMP.
1155		let last_processed_msg = LastProcessedHrmpMessage::<T>::get()
1156			.unwrap_or(InboundMessageId { sent_at: last_relay_block_number, reverse_idx: 0 });
1157		horizontal_messages.drop_processed_messages(&last_processed_msg);
1158		size_limit = size_limit.saturating_add(messages_collection_size_limit);
1159		let horizontal_messages = horizontal_messages.into_abridged(&mut size_limit);
1160
1161		let inbound_messages_data =
1162			InboundMessagesData::new(downward_messages, horizontal_messages);
1163
1164		Call::set_validation_data { data, inbound_messages_data }
1165	}
1166
1167	/// Enqueue all inbound downward messages relayed by the collator into the MQ pallet.
1168	///
1169	/// Checks if the sequence of the messages is valid, dispatches them and communicates the
1170	/// number of processed messages to the collator via a storage update.
1171	///
1172	/// # Panics
1173	///
1174	/// If it turns out that after processing all messages the Message Queue Chain
1175	/// hash doesn't match the expected.
1176	fn enqueue_inbound_downward_messages(
1177		expected_dmq_mqc_head: relay_chain::Hash,
1178		downward_messages: AbridgedInboundDownwardMessages,
1179	) -> Weight {
1180		downward_messages.check_enough_messages_included("DMQ");
1181
1182		let mut dmq_head = <LastDmqMqcHead<T>>::get();
1183
1184		let (messages, hashed_messages) = downward_messages.messages();
1185		let message_count = messages.len() as u32;
1186		let weight_used = T::WeightInfo::enqueue_inbound_downward_messages(message_count);
1187		if let Some(last_msg) = messages.last() {
1188			Self::deposit_event(Event::DownwardMessagesReceived { count: message_count });
1189
1190			// Eagerly update the MQC head hash:
1191			for msg in messages {
1192				dmq_head.extend_downward(msg);
1193			}
1194			<LastDmqMqcHead<T>>::put(&dmq_head);
1195			Self::deposit_event(Event::DownwardMessagesProcessed {
1196				weight_used,
1197				dmq_head: dmq_head.head(),
1198			});
1199
1200			let mut last_processed_msg =
1201				InboundMessageId { sent_at: last_msg.sent_at, reverse_idx: 0 };
1202			for msg in hashed_messages {
1203				dmq_head.extend_with_hashed_msg(msg);
1204
1205				if msg.sent_at == last_processed_msg.sent_at {
1206					last_processed_msg.reverse_idx += 1;
1207				}
1208			}
1209			LastProcessedDownwardMessage::<T>::put(last_processed_msg);
1210
1211			T::DmpQueue::handle_messages(downward_messages.bounded_msgs_iter());
1212		}
1213
1214		// After hashing each message in the message queue chain submitted by the collator, we
1215		// should arrive to the MQC head provided by the relay chain.
1216		//
1217		// A mismatch means that at least some of the submitted messages were altered, omitted or
1218		// added improperly.
1219		assert_eq!(dmq_head.head(), expected_dmq_mqc_head, "DMQ head mismatch");
1220
1221		ProcessedDownwardMessages::<T>::put(message_count);
1222
1223		weight_used
1224	}
1225
1226	fn check_hrmp_mcq_heads(
1227		ingress_channels: &[(ParaId, cumulus_primitives_core::AbridgedHrmpChannel)],
1228		mqc_heads: &mut BTreeMap<ParaId, MessageQueueChain>,
1229	) {
1230		// Check that the MQC heads for each channel provided by the relay chain match the MQC
1231		// heads we have after processing all incoming messages.
1232		//
1233		// Along the way we also carry over the relevant entries from the `last_mqc_heads` to
1234		// `running_mqc_heads`. Otherwise, in a block where no messages were sent in a channel
1235		// it won't get into next block's `last_mqc_heads` and thus will be all zeros, which
1236		// would corrupt the message queue chain.
1237		for (sender, channel) in ingress_channels {
1238			let cur_head = mqc_heads.entry(*sender).or_default().head();
1239			let target_head = channel.mqc_head.unwrap_or_default();
1240			assert_eq!(cur_head, target_head, "HRMP head mismatch");
1241		}
1242	}
1243
1244	/// Performs some checks related to the sender and the `sent_at` field of an HRMP message.
1245	///
1246	/// **Panics** if the message submitted by the collator doesn't respect the expected order or if
1247	///            it was sent from a para which has no open channel to this parachain.
1248	fn check_hrmp_message_metadata(
1249		ingress_channels: &[(ParaId, cumulus_primitives_core::AbridgedHrmpChannel)],
1250		maybe_prev_msg_metadata: &mut Option<(u32, ParaId)>,
1251		msg_metadata: (u32, ParaId),
1252	) {
1253		// Check that the message is properly ordered.
1254		if let Some(prev_msg) = maybe_prev_msg_metadata {
1255			assert!(&msg_metadata >= prev_msg, "[HRMP] Messages order violation");
1256		}
1257
1258		// Check that the message is sent from an existing channel. The channel exists
1259		// if its MQC head is present in `vfp.hrmp_mqc_heads`.
1260		let sender = msg_metadata.1;
1261		let maybe_channel_idx =
1262			ingress_channels.binary_search_by_key(&sender, |&(channel_sender, _)| channel_sender);
1263		assert!(
1264			maybe_channel_idx.is_ok(),
1265			"One of the messages submitted by the collator was sent from a sender ({}) \
1266					that doesn't have a channel opened to this parachain",
1267			<ParaId as Into<u32>>::into(sender)
1268		);
1269	}
1270
1271	/// Process all inbound horizontal messages relayed by the collator.
1272	///
1273	/// This is similar to [`enqueue_inbound_downward_messages`], but works with multiple inbound
1274	/// channels. It immediately dispatches signals and queues all other XCMs. Blob messages are
1275	/// ignored.
1276	///
1277	/// **Panics** if either any of horizontal messages submitted by the collator was sent from
1278	///            a para which has no open channel to this parachain or if after processing
1279	///            messages across all inbound channels MQCs were obtained which do not
1280	///            correspond to the ones found on the relay-chain.
1281	fn enqueue_inbound_horizontal_messages(
1282		ingress_channels: &[(ParaId, cumulus_primitives_core::AbridgedHrmpChannel)],
1283		horizontal_messages: AbridgedInboundHrmpMessages,
1284		relay_parent_number: relay_chain::BlockNumber,
1285	) -> Weight {
1286		// First, check the HRMP advancement rule.
1287		horizontal_messages.check_enough_messages_included("HRMP");
1288
1289		let (messages, hashed_messages) = horizontal_messages.messages();
1290		let mut mqc_heads = <LastHrmpMqcHeads<T>>::get();
1291
1292		if messages.is_empty() {
1293			Self::check_hrmp_mcq_heads(ingress_channels, &mut mqc_heads);
1294			let last_processed_msg =
1295				InboundMessageId { sent_at: relay_parent_number, reverse_idx: 0 };
1296			LastProcessedHrmpMessage::<T>::put(last_processed_msg);
1297			HrmpWatermark::<T>::put(relay_parent_number);
1298			return T::DbWeight::get().reads_writes(1, 2);
1299		}
1300
1301		let mut prev_msg_metadata = None;
1302		let mut last_processed_block = HrmpWatermark::<T>::get();
1303		let mut last_processed_msg = InboundMessageId { sent_at: 0, reverse_idx: 0 };
1304		for (sender, msg) in messages {
1305			Self::check_hrmp_message_metadata(
1306				ingress_channels,
1307				&mut prev_msg_metadata,
1308				(msg.sent_at, *sender),
1309			);
1310			mqc_heads.entry(*sender).or_default().extend_hrmp(msg);
1311
1312			if msg.sent_at > last_processed_msg.sent_at && last_processed_msg.sent_at > 0 {
1313				last_processed_block = last_processed_msg.sent_at;
1314			}
1315			last_processed_msg.sent_at = msg.sent_at;
1316		}
1317		<LastHrmpMqcHeads<T>>::put(&mqc_heads);
1318		for (sender, msg) in hashed_messages {
1319			Self::check_hrmp_message_metadata(
1320				ingress_channels,
1321				&mut prev_msg_metadata,
1322				(msg.sent_at, *sender),
1323			);
1324			mqc_heads.entry(*sender).or_default().extend_with_hashed_msg(msg);
1325
1326			if msg.sent_at == last_processed_msg.sent_at {
1327				last_processed_msg.reverse_idx += 1;
1328			}
1329		}
1330		if last_processed_msg.sent_at > 0 && last_processed_msg.reverse_idx == 0 {
1331			last_processed_block = last_processed_msg.sent_at;
1332		}
1333		LastProcessedHrmpMessage::<T>::put(&last_processed_msg);
1334		Self::check_hrmp_mcq_heads(ingress_channels, &mut mqc_heads);
1335
1336		let max_weight =
1337			<ReservedXcmpWeightOverride<T>>::get().unwrap_or_else(T::ReservedXcmpWeight::get);
1338		let weight_used = T::XcmpMessageHandler::handle_xcmp_messages(
1339			horizontal_messages.flat_msgs_iter(),
1340			max_weight,
1341		);
1342
1343		// Update watermark
1344		HrmpWatermark::<T>::put(last_processed_block);
1345
1346		weight_used.saturating_add(T::DbWeight::get().reads_writes(2, 3))
1347	}
1348
1349	/// Drop blocks from the unincluded segment with respect to the latest parachain head.
1350	fn maybe_drop_included_ancestors(
1351		relay_state_proof: &RelayChainStateProof,
1352		capacity: consensus_hook::UnincludedSegmentCapacity,
1353	) -> Weight {
1354		let mut weight_used = Weight::zero();
1355		// If the unincluded segment length is nonzero, then the parachain head must be present.
1356		let para_head =
1357			relay_state_proof.read_included_para_head().ok().map(|h| T::Hashing::hash(&h.0));
1358
1359		let unincluded_segment_len = <UnincludedSegment<T>>::decode_len().unwrap_or(0);
1360		weight_used += T::DbWeight::get().reads(1);
1361
1362		// Clean up unincluded segment if nonempty.
1363		let included_head = match (para_head, capacity.is_expecting_included_parent()) {
1364			(Some(h), true) => {
1365				assert_eq!(
1366					h,
1367					frame_system::Pallet::<T>::parent_hash(),
1368					"expected parent to be included"
1369				);
1370
1371				h
1372			},
1373			(Some(h), false) => h,
1374			(None, true) => {
1375				// All this logic is essentially a workaround to support collators which
1376				// might still not provide the included block with the state proof.
1377				frame_system::Pallet::<T>::parent_hash()
1378			},
1379			(None, false) => panic!("included head not present in relay storage proof"),
1380		};
1381
1382		let new_len = {
1383			let para_head_hash = included_head;
1384			let dropped: Vec<Ancestor<T::Hash>> = <UnincludedSegment<T>>::mutate(|chain| {
1385				// Drop everything up to (inclusive) the block with an included para head, if
1386				// present.
1387				let idx = chain
1388					.iter()
1389					.position(|block| {
1390						let head_hash = block
1391							.para_head_hash()
1392							.expect("para head hash is updated during block initialization; qed");
1393						head_hash == &para_head_hash
1394					})
1395					.map_or(0, |idx| idx + 1); // inclusive.
1396
1397				chain.drain(..idx).collect()
1398			});
1399			weight_used += T::DbWeight::get().reads_writes(1, 1);
1400
1401			let new_len = unincluded_segment_len - dropped.len();
1402			if !dropped.is_empty() {
1403				<AggregatedUnincludedSegment<T>>::mutate(|agg| {
1404					let agg = agg.as_mut().expect(
1405						"dropped part of the segment wasn't empty, hence value exists; qed",
1406					);
1407					for block in dropped {
1408						agg.subtract(&block);
1409					}
1410				});
1411				weight_used += T::DbWeight::get().reads_writes(1, 1);
1412			}
1413
1414			new_len as u32
1415		};
1416
1417		// Current block validity check: ensure there is space in the unincluded segment.
1418		//
1419		// If this fails, the parachain needs to wait for ancestors to be included before
1420		// a new block is allowed.
1421		assert!(new_len < capacity.get(), "no space left for the block in the unincluded segment");
1422		weight_used
1423	}
1424
1425	/// This adjusts the `RelevantMessagingState` according to the bandwidth limits in the
1426	/// unincluded segment.
1427	//
1428	// Reads: 2
1429	// Writes: 1
1430	fn adjust_egress_bandwidth_limits() {
1431		let unincluded_segment = match AggregatedUnincludedSegment::<T>::get() {
1432			None => return,
1433			Some(s) => s,
1434		};
1435
1436		<RelevantMessagingState<T>>::mutate(|messaging_state| {
1437			let messaging_state = match messaging_state {
1438				None => return,
1439				Some(s) => s,
1440			};
1441
1442			let used_bandwidth = unincluded_segment.used_bandwidth();
1443
1444			let channels = &mut messaging_state.egress_channels;
1445			for (para_id, used) in used_bandwidth.hrmp_outgoing.iter() {
1446				let i = match channels.binary_search_by_key(para_id, |item| item.0) {
1447					Ok(i) => i,
1448					Err(_) => continue, // indicates channel closed.
1449				};
1450
1451				let c = &mut channels[i].1;
1452
1453				c.total_size = (c.total_size + used.total_bytes).min(c.max_total_size);
1454				c.msg_count = (c.msg_count + used.msg_count).min(c.max_capacity);
1455			}
1456
1457			let upward_capacity = &mut messaging_state.relay_dispatch_queue_remaining_capacity;
1458			upward_capacity.remaining_count =
1459				upward_capacity.remaining_count.saturating_sub(used_bandwidth.ump_msg_count);
1460			upward_capacity.remaining_size =
1461				upward_capacity.remaining_size.saturating_sub(used_bandwidth.ump_total_bytes);
1462		});
1463	}
1464
1465	/// Put a new validation function into a particular location where polkadot
1466	/// monitors for updates. Calling this function notifies polkadot that a new
1467	/// upgrade has been scheduled.
1468	fn notify_polkadot_of_pending_upgrade(code: &[u8]) {
1469		NewValidationCode::<T>::put(code);
1470		<DidSetValidationCode<T>>::put(true);
1471	}
1472
1473	/// The maximum code size permitted, in bytes.
1474	///
1475	/// Returns `None` if the relay chain parachain host configuration hasn't been submitted yet.
1476	pub fn max_code_size() -> Option<u32> {
1477		<HostConfiguration<T>>::get().map(|cfg| cfg.max_code_size)
1478	}
1479
1480	/// The implementation of the runtime upgrade functionality for parachains.
1481	pub fn schedule_code_upgrade(validation_function: Vec<u8>) -> DispatchResult {
1482		// Ensure that `ValidationData` exists. We do not care about the validation data per se,
1483		// but we do care about the [`UpgradeRestrictionSignal`] which arrives with the same
1484		// inherent.
1485		ensure!(<ValidationData<T>>::exists(), Error::<T>::ValidationDataNotAvailable,);
1486		ensure!(<UpgradeRestrictionSignal<T>>::get().is_none(), Error::<T>::ProhibitedByPolkadot);
1487
1488		ensure!(!<PendingValidationCode<T>>::exists(), Error::<T>::OverlappingUpgrades);
1489		let cfg = HostConfiguration::<T>::get().ok_or(Error::<T>::HostConfigurationNotAvailable)?;
1490		ensure!(validation_function.len() <= cfg.max_code_size as usize, Error::<T>::TooBig);
1491
1492		// When a code upgrade is scheduled, it has to be applied in two
1493		// places, synchronized: both polkadot and the individual parachain
1494		// have to upgrade on the same relay chain block.
1495		//
1496		// `notify_polkadot_of_pending_upgrade` notifies polkadot; the `PendingValidationCode`
1497		// storage keeps track locally for the parachain upgrade, which will
1498		// be applied later: when the relay-chain communicates go-ahead signal to us.
1499		Self::notify_polkadot_of_pending_upgrade(&validation_function);
1500		<PendingValidationCode<T>>::put(validation_function);
1501		Self::deposit_event(Event::ValidationFunctionStored);
1502
1503		Ok(())
1504	}
1505
1506	/// Returns the [`CollationInfo`] of the current active block.
1507	///
1508	/// The given `header` is the header of the built block we are collecting the collation info
1509	/// for.
1510	///
1511	/// This is expected to be used by the
1512	/// [`CollectCollationInfo`](cumulus_primitives_core::CollectCollationInfo) runtime api.
1513	pub fn collect_collation_info(header: &HeaderFor<T>) -> CollationInfo {
1514		CollationInfo {
1515			hrmp_watermark: HrmpWatermark::<T>::get(),
1516			horizontal_messages: HrmpOutboundMessages::<T>::get(),
1517			upward_messages: UpwardMessages::<T>::get(),
1518			processed_downward_messages: ProcessedDownwardMessages::<T>::get(),
1519			new_validation_code: NewValidationCode::<T>::get().map(Into::into),
1520			// Check if there is a custom header that will also be returned by the validation phase.
1521			// If so, we need to also return it here.
1522			head_data: CustomValidationHeadData::<T>::get()
1523				.map_or_else(|| header.encode(), |v| v)
1524				.into(),
1525		}
1526	}
1527
1528	/// Returns the core selector for the next block.
1529	pub fn core_selector() -> (CoreSelector, ClaimQueueOffset) {
1530		T::SelectCore::select_next_core()
1531	}
1532
1533	/// Set a custom head data that should be returned as result of `validate_block`.
1534	///
1535	/// This will overwrite the head data that is returned as result of `validate_block` while
1536	/// validating a `PoV` on the relay chain. Normally the head data that is being returned
1537	/// by `validate_block` is the header of the block that is validated, thus it can be
1538	/// enacted as the new best block. However, for features like forking it can be useful
1539	/// to overwrite the head data with a custom header.
1540	///
1541	/// # Attention
1542	///
1543	/// This should only be used when you are sure what you are doing as this can brick
1544	/// your Parachain.
1545	pub fn set_custom_validation_head_data(head_data: Vec<u8>) {
1546		CustomValidationHeadData::<T>::put(head_data);
1547	}
1548
1549	/// Send the ump signals
1550	#[cfg(feature = "experimental-ump-signals")]
1551	fn send_ump_signal() {
1552		use cumulus_primitives_core::relay_chain::vstaging::{UMPSignal, UMP_SEPARATOR};
1553
1554		UpwardMessages::<T>::mutate(|up| {
1555			up.push(UMP_SEPARATOR);
1556
1557			// Send the core selector signal.
1558			let core_selector = T::SelectCore::selected_core();
1559			up.push(UMPSignal::SelectCore(core_selector.0, core_selector.1).encode());
1560		});
1561	}
1562
1563	/// Open HRMP channel for using it in benchmarks or tests.
1564	///
1565	/// The caller assumes that the pallet will accept regular outbound message to the sibling
1566	/// `target_parachain` after this call. No other assumptions are made.
1567	#[cfg(any(feature = "runtime-benchmarks", feature = "std"))]
1568	pub fn open_outbound_hrmp_channel_for_benchmarks_or_tests(target_parachain: ParaId) {
1569		RelevantMessagingState::<T>::put(MessagingStateSnapshot {
1570			dmq_mqc_head: Default::default(),
1571			relay_dispatch_queue_remaining_capacity: Default::default(),
1572			ingress_channels: Default::default(),
1573			egress_channels: vec![(
1574				target_parachain,
1575				cumulus_primitives_core::AbridgedHrmpChannel {
1576					max_capacity: 10,
1577					max_total_size: 10_000_000_u32,
1578					max_message_size: 10_000_000_u32,
1579					msg_count: 5,
1580					total_size: 5_000_000_u32,
1581					mqc_head: None,
1582				},
1583			)],
1584		})
1585	}
1586
1587	/// Open HRMP channel for using it in benchmarks or tests.
1588	///
1589	/// The caller assumes that the pallet will accept regular outbound message to the sibling
1590	/// `target_parachain` after this call. No other assumptions are made.
1591	#[cfg(any(feature = "runtime-benchmarks", feature = "std"))]
1592	pub fn open_custom_outbound_hrmp_channel_for_benchmarks_or_tests(
1593		target_parachain: ParaId,
1594		channel: cumulus_primitives_core::AbridgedHrmpChannel,
1595	) {
1596		RelevantMessagingState::<T>::put(MessagingStateSnapshot {
1597			dmq_mqc_head: Default::default(),
1598			relay_dispatch_queue_remaining_capacity: Default::default(),
1599			ingress_channels: Default::default(),
1600			egress_channels: vec![(target_parachain, channel)],
1601		})
1602	}
1603
1604	/// Prepare/insert relevant data for `schedule_code_upgrade` for benchmarks.
1605	#[cfg(feature = "runtime-benchmarks")]
1606	pub fn initialize_for_set_code_benchmark(max_code_size: u32) {
1607		// insert dummy ValidationData
1608		let vfp = PersistedValidationData {
1609			parent_head: polkadot_parachain_primitives::primitives::HeadData(Default::default()),
1610			relay_parent_number: 1,
1611			relay_parent_storage_root: Default::default(),
1612			max_pov_size: 1_000,
1613		};
1614		<ValidationData<T>>::put(&vfp);
1615
1616		// insert dummy HostConfiguration with
1617		let host_config = AbridgedHostConfiguration {
1618			max_code_size,
1619			max_head_data_size: 32 * 1024,
1620			max_upward_queue_count: 8,
1621			max_upward_queue_size: 1024 * 1024,
1622			max_upward_message_size: 4 * 1024,
1623			max_upward_message_num_per_candidate: 2,
1624			hrmp_max_message_num_per_candidate: 2,
1625			validation_upgrade_cooldown: 2,
1626			validation_upgrade_delay: 2,
1627			async_backing_params: relay_chain::AsyncBackingParams {
1628				allowed_ancestry_len: 0,
1629				max_candidate_depth: 0,
1630			},
1631		};
1632		<HostConfiguration<T>>::put(host_config);
1633	}
1634}
1635
1636/// Type that implements `SetCode`.
1637pub struct ParachainSetCode<T>(core::marker::PhantomData<T>);
1638impl<T: Config> frame_system::SetCode<T> for ParachainSetCode<T> {
1639	fn set_code(code: Vec<u8>) -> DispatchResult {
1640		Pallet::<T>::schedule_code_upgrade(code)
1641	}
1642}
1643
1644impl<T: Config> Pallet<T> {
1645	/// Puts a message in the `PendingUpwardMessages` storage item.
1646	/// The message will be later sent in `on_finalize`.
1647	/// Checks host configuration to see if message is too big.
1648	/// Increases the delivery fee factor if the queue is sufficiently (see
1649	/// [`ump_constants::THRESHOLD_FACTOR`]) congested.
1650	pub fn send_upward_message(message: UpwardMessage) -> Result<(u32, XcmHash), MessageSendError> {
1651		let message_len = message.len();
1652		// Check if the message fits into the relay-chain constraints.
1653		//
1654		// Note, that we are using `host_configuration` here which may be from the previous
1655		// block, in case this is called from `on_initialize`, i.e. before the inherent with fresh
1656		// data is submitted.
1657		//
1658		// That shouldn't be a problem since this is a preliminary check and the actual check would
1659		// be performed just before submitting the message from the candidate, and it already can
1660		// happen that during the time the message is buffered for sending the relay-chain setting
1661		// may change so that the message is no longer valid.
1662		//
1663		// However, changing this setting is expected to be rare.
1664		if let Some(cfg) = HostConfiguration::<T>::get() {
1665			if message_len > cfg.max_upward_message_size as usize {
1666				return Err(MessageSendError::TooBig);
1667			}
1668			let threshold =
1669				cfg.max_upward_queue_size.saturating_div(ump_constants::THRESHOLD_FACTOR);
1670			// We check the threshold against total size and not number of messages since messages
1671			// could be big or small.
1672			<PendingUpwardMessages<T>>::append(message.clone());
1673			let pending_messages = PendingUpwardMessages::<T>::get();
1674			let total_size: usize = pending_messages.iter().map(UpwardMessage::len).sum();
1675			if total_size > threshold as usize {
1676				// We increase the fee factor by a factor based on the new message's size in KB
1677				Self::increase_fee_factor((), message_len as u128);
1678			}
1679		} else {
1680			// This storage field should carry over from the previous block. So if it's None
1681			// then it must be that this is an edge-case where a message is attempted to be
1682			// sent at the first block.
1683			//
1684			// Let's pass this message through. I think it's not unreasonable to expect that
1685			// the message is not huge and it comes through, but if it doesn't it can be
1686			// returned back to the sender.
1687			//
1688			// Thus fall through here.
1689			<PendingUpwardMessages<T>>::append(message.clone());
1690		};
1691
1692		// The relay ump does not use using_encoded
1693		// We apply the same this to use the same hash
1694		let hash = sp_io::hashing::blake2_256(&message);
1695		Self::deposit_event(Event::UpwardMessageSent { message_hash: Some(hash) });
1696		Ok((0, hash))
1697	}
1698
1699	/// Get the relay chain block number which was used as an anchor for the last block in this
1700	/// chain.
1701	pub fn last_relay_block_number() -> RelayChainBlockNumber {
1702		LastRelayChainBlockNumber::<T>::get()
1703	}
1704}
1705
1706impl<T: Config> UpwardMessageSender for Pallet<T> {
1707	fn send_upward_message(message: UpwardMessage) -> Result<(u32, XcmHash), MessageSendError> {
1708		Self::send_upward_message(message)
1709	}
1710
1711	fn can_send_upward_message(message: &UpwardMessage) -> Result<(), MessageSendError> {
1712		let max_upward_message_size = HostConfiguration::<T>::get()
1713			.map(|cfg| cfg.max_upward_message_size)
1714			.ok_or(MessageSendError::Other)?;
1715		if message.len() > max_upward_message_size as usize {
1716			Err(MessageSendError::TooBig)
1717		} else {
1718			Ok(())
1719		}
1720	}
1721
1722	#[cfg(any(feature = "std", feature = "runtime-benchmarks", test))]
1723	fn ensure_successful_delivery() {
1724		const MAX_UPWARD_MESSAGE_SIZE: u32 = 65_531 * 3;
1725		const MAX_CODE_SIZE: u32 = 3 * 1024 * 1024;
1726		HostConfiguration::<T>::mutate(|cfg| match cfg {
1727			Some(cfg) => cfg.max_upward_message_size = MAX_UPWARD_MESSAGE_SIZE,
1728			None =>
1729				*cfg = Some(AbridgedHostConfiguration {
1730					max_code_size: MAX_CODE_SIZE,
1731					max_head_data_size: 32 * 1024,
1732					max_upward_queue_count: 8,
1733					max_upward_queue_size: 1024 * 1024,
1734					max_upward_message_size: MAX_UPWARD_MESSAGE_SIZE,
1735					max_upward_message_num_per_candidate: 2,
1736					hrmp_max_message_num_per_candidate: 2,
1737					validation_upgrade_cooldown: 2,
1738					validation_upgrade_delay: 2,
1739					async_backing_params: relay_chain::AsyncBackingParams {
1740						allowed_ancestry_len: 0,
1741						max_candidate_depth: 0,
1742					},
1743				}),
1744		})
1745	}
1746}
1747
1748impl<T: Config> InspectMessageQueues for Pallet<T> {
1749	fn clear_messages() {
1750		PendingUpwardMessages::<T>::kill();
1751	}
1752
1753	fn get_messages() -> Vec<(VersionedLocation, Vec<VersionedXcm<()>>)> {
1754		use xcm::prelude::*;
1755
1756		let messages: Vec<VersionedXcm<()>> = PendingUpwardMessages::<T>::get()
1757			.iter()
1758			.map(|encoded_message| {
1759				VersionedXcm::<()>::decode_all_with_depth_limit(
1760					MAX_XCM_DECODE_DEPTH,
1761					&mut &encoded_message[..],
1762				)
1763				.unwrap()
1764			})
1765			.collect();
1766
1767		if messages.is_empty() {
1768			vec![]
1769		} else {
1770			vec![(VersionedLocation::from(Location::parent()), messages)]
1771		}
1772	}
1773}
1774
1775#[cfg(feature = "runtime-benchmarks")]
1776impl<T: Config> polkadot_runtime_parachains::EnsureForParachain for Pallet<T> {
1777	fn ensure(para_id: ParaId) {
1778		if let ChannelStatus::Closed = Self::get_channel_status(para_id) {
1779			Self::open_outbound_hrmp_channel_for_benchmarks_or_tests(para_id)
1780		}
1781	}
1782}
1783
1784/// Something that can check the inherents of a block.
1785#[deprecated(note = "This trait is deprecated and will be removed by September 2024. \
1786		Consider switching to `cumulus-pallet-parachain-system::ConsensusHook`")]
1787pub trait CheckInherents<Block: BlockT> {
1788	/// Check all inherents of the block.
1789	///
1790	/// This function gets passed all the extrinsics of the block, so it is up to the callee to
1791	/// identify the inherents. The `validation_data` can be used to access the
1792	fn check_inherents(
1793		block: &Block,
1794		validation_data: &RelayChainStateProof,
1795	) -> frame_support::inherent::CheckInherentsResult;
1796}
1797
1798/// Struct that always returns `Ok` on inherents check, needed for backwards-compatibility.
1799#[doc(hidden)]
1800pub struct DummyCheckInherents<Block>(core::marker::PhantomData<Block>);
1801
1802#[allow(deprecated)]
1803impl<Block: BlockT> CheckInherents<Block> for DummyCheckInherents<Block> {
1804	fn check_inherents(
1805		_: &Block,
1806		_: &RelayChainStateProof,
1807	) -> frame_support::inherent::CheckInherentsResult {
1808		sp_inherents::CheckInherentsResult::new()
1809	}
1810}
1811
1812/// Something that should be informed about system related events.
1813///
1814/// This includes events like [`on_validation_data`](Self::on_validation_data) that is being
1815/// called when the parachain inherent is executed that contains the validation data.
1816/// Or like [`on_validation_code_applied`](Self::on_validation_code_applied) that is called
1817/// when the new validation is written to the state. This means that
1818/// from the next block the runtime is being using this new code.
1819#[impl_trait_for_tuples::impl_for_tuples(30)]
1820pub trait OnSystemEvent {
1821	/// Called in each blocks once when the validation data is set by the inherent.
1822	fn on_validation_data(data: &PersistedValidationData);
1823	/// Called when the validation code is being applied, aka from the next block on this is the new
1824	/// runtime.
1825	fn on_validation_code_applied();
1826}
1827
1828/// Holds the most recent relay-parent state root and block number of the current parachain block.
1829#[derive(PartialEq, Eq, Clone, Encode, Decode, TypeInfo, Default, RuntimeDebug)]
1830pub struct RelayChainState {
1831	/// Current relay chain height.
1832	pub number: relay_chain::BlockNumber,
1833	/// State root for current relay chain height.
1834	pub state_root: relay_chain::Hash,
1835}
1836
1837/// This exposes the [`RelayChainState`] to other runtime modules.
1838///
1839/// Enables parachains to read relay chain state via state proofs.
1840pub trait RelaychainStateProvider {
1841	/// May be called by any runtime module to obtain the current state of the relay chain.
1842	///
1843	/// **NOTE**: This is not guaranteed to return monotonically increasing relay parents.
1844	fn current_relay_chain_state() -> RelayChainState;
1845
1846	/// Utility function only to be used in benchmarking scenarios, to be implemented optionally,
1847	/// else a noop.
1848	///
1849	/// It allows for setting a custom RelayChainState.
1850	#[cfg(feature = "runtime-benchmarks")]
1851	fn set_current_relay_chain_state(_state: RelayChainState) {}
1852}
1853
1854/// Implements [`BlockNumberProvider`] that returns relay chain block number fetched from validation
1855/// data.
1856///
1857/// When validation data is not available (e.g. within `on_initialize`), it will fallback to use
1858/// [`Pallet::last_relay_block_number()`].
1859///
1860/// **NOTE**: This has been deprecated, please use [`RelaychainDataProvider`]
1861#[deprecated = "Use `RelaychainDataProvider` instead"]
1862pub type RelaychainBlockNumberProvider<T> = RelaychainDataProvider<T>;
1863
1864/// Implements [`BlockNumberProvider`] and [`RelaychainStateProvider`] that returns relevant relay
1865/// data fetched from validation data.
1866///
1867/// NOTE: When validation data is not available (e.g. within `on_initialize`):
1868///
1869/// - [`current_relay_chain_state`](Self::current_relay_chain_state): Will return the default value
1870///   of [`RelayChainState`].
1871/// - [`current_block_number`](Self::current_block_number): Will return
1872///   [`Pallet::last_relay_block_number()`].
1873pub struct RelaychainDataProvider<T>(core::marker::PhantomData<T>);
1874
1875impl<T: Config> BlockNumberProvider for RelaychainDataProvider<T> {
1876	type BlockNumber = relay_chain::BlockNumber;
1877
1878	fn current_block_number() -> relay_chain::BlockNumber {
1879		ValidationData::<T>::get()
1880			.map(|d| d.relay_parent_number)
1881			.unwrap_or_else(|| Pallet::<T>::last_relay_block_number())
1882	}
1883
1884	#[cfg(any(feature = "std", feature = "runtime-benchmarks", test))]
1885	fn set_block_number(block: Self::BlockNumber) {
1886		let mut validation_data = ValidationData::<T>::get().unwrap_or_else(||
1887			// PersistedValidationData does not impl default in non-std
1888			PersistedValidationData {
1889				parent_head: vec![].into(),
1890				relay_parent_number: Default::default(),
1891				max_pov_size: Default::default(),
1892				relay_parent_storage_root: Default::default(),
1893			});
1894		validation_data.relay_parent_number = block;
1895		ValidationData::<T>::put(validation_data)
1896	}
1897}
1898
1899impl<T: Config> RelaychainStateProvider for RelaychainDataProvider<T> {
1900	fn current_relay_chain_state() -> RelayChainState {
1901		ValidationData::<T>::get()
1902			.map(|d| RelayChainState {
1903				number: d.relay_parent_number,
1904				state_root: d.relay_parent_storage_root,
1905			})
1906			.unwrap_or_default()
1907	}
1908
1909	#[cfg(feature = "runtime-benchmarks")]
1910	fn set_current_relay_chain_state(state: RelayChainState) {
1911		let mut validation_data = ValidationData::<T>::get().unwrap_or_else(||
1912			// PersistedValidationData does not impl default in non-std
1913			PersistedValidationData {
1914				parent_head: vec![].into(),
1915				relay_parent_number: Default::default(),
1916				max_pov_size: Default::default(),
1917				relay_parent_storage_root: Default::default(),
1918			});
1919		validation_data.relay_parent_number = state.number;
1920		validation_data.relay_parent_storage_root = state.state_root;
1921		ValidationData::<T>::put(validation_data)
1922	}
1923}