cumulus_pallet_parachain_system/
lib.rs

1// Copyright (C) Parity Technologies (UK) Ltd.
2// This file is part of Cumulus.
3
4// Cumulus is free software: you can redistribute it and/or modify
5// it under the terms of the GNU General Public License as published by
6// the Free Software Foundation, either version 3 of the License, or
7// (at your option) any later version.
8
9// Cumulus is distributed in the hope that it will be useful,
10// but WITHOUT ANY WARRANTY; without even the implied warranty of
11// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12// GNU General Public License for more details.
13
14// You should have received a copy of the GNU General Public License
15// along with Cumulus.  If not, see <http://www.gnu.org/licenses/>.
16
17#![cfg_attr(not(feature = "std"), no_std)]
18
19//! `cumulus-pallet-parachain-system` is a base pallet for Cumulus-based parachains.
20//!
21//! This pallet handles low-level details of being a parachain. Its responsibilities include:
22//!
23//! - ingestion of the parachain validation data;
24//! - ingestion and dispatch of incoming downward and lateral messages;
25//! - coordinating upgrades with the Relay Chain; and
26//! - communication of parachain outputs, such as sent messages, signaling an upgrade, etc.
27//!
28//! Users must ensure that they register this pallet as an inherent provider.
29
30extern crate alloc;
31
32use alloc::{collections::btree_map::BTreeMap, vec, vec::Vec};
33use codec::{Decode, Encode};
34use core::{cmp, marker::PhantomData};
35use cumulus_primitives_core::{
36	relay_chain::{
37		self,
38		vstaging::{ClaimQueueOffset, CoreSelector, DEFAULT_CLAIM_QUEUE_OFFSET},
39	},
40	AbridgedHostConfiguration, ChannelInfo, ChannelStatus, CollationInfo, GetChannelInfo,
41	InboundDownwardMessage, InboundHrmpMessage, ListChannelInfos, MessageSendError,
42	OutboundHrmpMessage, ParaId, PersistedValidationData, UpwardMessage, UpwardMessageSender,
43	XcmpMessageHandler, XcmpMessageSource,
44};
45use cumulus_primitives_parachain_inherent::{MessageQueueChain, ParachainInherentData};
46use frame_support::{
47	defensive,
48	dispatch::{DispatchResult, Pays, PostDispatchInfo},
49	ensure,
50	inherent::{InherentData, InherentIdentifier, ProvideInherent},
51	traits::{Get, HandleMessage},
52	weights::Weight,
53};
54use frame_system::{ensure_none, ensure_root, pallet_prelude::HeaderFor};
55use polkadot_parachain_primitives::primitives::RelayChainBlockNumber;
56use polkadot_runtime_parachains::FeeTracker;
57use scale_info::TypeInfo;
58use sp_core::U256;
59use sp_runtime::{
60	traits::{Block as BlockT, BlockNumberProvider, Hash, One},
61	BoundedSlice, FixedU128, RuntimeDebug, Saturating,
62};
63use xcm::{latest::XcmHash, VersionedLocation, VersionedXcm};
64use xcm_builder::InspectMessageQueues;
65
66mod benchmarking;
67pub mod migration;
68mod mock;
69#[cfg(test)]
70mod tests;
71pub mod weights;
72
73pub use weights::WeightInfo;
74
75mod unincluded_segment;
76
77pub mod consensus_hook;
78pub mod relay_state_snapshot;
79#[macro_use]
80pub mod validate_block;
81
82use unincluded_segment::{
83	Ancestor, HrmpChannelUpdate, HrmpWatermarkUpdate, OutboundBandwidthLimits, SegmentTracker,
84	UsedBandwidth,
85};
86
87pub use consensus_hook::{ConsensusHook, ExpectParentIncluded};
88/// Register the `validate_block` function that is used by parachains to validate blocks on a
89/// validator.
90///
91/// Does *nothing* when `std` feature is enabled.
92///
93/// Expects as parameters the runtime, a block executor and an inherent checker.
94///
95/// # Example
96///
97/// ```
98///     struct BlockExecutor;
99///     struct Runtime;
100///     struct CheckInherents;
101///
102///     cumulus_pallet_parachain_system::register_validate_block! {
103///         Runtime = Runtime,
104///         BlockExecutor = Executive,
105///         CheckInherents = CheckInherents,
106///     }
107///
108/// # fn main() {}
109/// ```
110pub use cumulus_pallet_parachain_system_proc_macro::register_validate_block;
111pub use relay_state_snapshot::{MessagingStateSnapshot, RelayChainStateProof};
112
113pub use pallet::*;
114
115/// Something that can check the associated relay block number.
116///
117/// Each Parachain block is built in the context of a relay chain block, this trait allows us
118/// to validate the given relay chain block number. With async backing it is legal to build
119/// multiple Parachain blocks per relay chain parent. With this trait it is possible for the
120/// Parachain to ensure that still only one Parachain block is build per relay chain parent.
121///
122/// By default [`RelayNumberStrictlyIncreases`] and [`AnyRelayNumber`] are provided.
123pub trait CheckAssociatedRelayNumber {
124	/// Check the current relay number versus the previous relay number.
125	///
126	/// The implementation should panic when there is something wrong.
127	fn check_associated_relay_number(
128		current: RelayChainBlockNumber,
129		previous: RelayChainBlockNumber,
130	);
131}
132
133/// Provides an implementation of [`CheckAssociatedRelayNumber`].
134///
135/// It will ensure that the associated relay block number strictly increases between Parachain
136/// blocks. This should be used by production Parachains when in doubt.
137pub struct RelayNumberStrictlyIncreases;
138
139impl CheckAssociatedRelayNumber for RelayNumberStrictlyIncreases {
140	fn check_associated_relay_number(
141		current: RelayChainBlockNumber,
142		previous: RelayChainBlockNumber,
143	) {
144		if current <= previous {
145			panic!("Relay chain block number needs to strictly increase between Parachain blocks!")
146		}
147	}
148}
149
150/// Provides an implementation of [`CheckAssociatedRelayNumber`].
151///
152/// This will accept any relay chain block number combination. This is mainly useful for
153/// test parachains.
154pub struct AnyRelayNumber;
155
156impl CheckAssociatedRelayNumber for AnyRelayNumber {
157	fn check_associated_relay_number(_: RelayChainBlockNumber, _: RelayChainBlockNumber) {}
158}
159
160/// Provides an implementation of [`CheckAssociatedRelayNumber`].
161///
162/// It will ensure that the associated relay block number monotonically increases between Parachain
163/// blocks. This should be used when asynchronous backing is enabled.
164pub struct RelayNumberMonotonicallyIncreases;
165
166impl CheckAssociatedRelayNumber for RelayNumberMonotonicallyIncreases {
167	fn check_associated_relay_number(
168		current: RelayChainBlockNumber,
169		previous: RelayChainBlockNumber,
170	) {
171		if current < previous {
172			panic!("Relay chain block number needs to monotonically increase between Parachain blocks!")
173		}
174	}
175}
176
177/// The max length of a DMP message.
178pub type MaxDmpMessageLenOf<T> = <<T as Config>::DmpQueue as HandleMessage>::MaxMessageLen;
179
180pub mod ump_constants {
181	use super::FixedU128;
182
183	/// `host_config.max_upward_queue_size / THRESHOLD_FACTOR` is the threshold after which delivery
184	/// starts getting exponentially more expensive.
185	/// `2` means the price starts to increase when queue is half full.
186	pub const THRESHOLD_FACTOR: u32 = 2;
187	/// The base number the delivery fee factor gets multiplied by every time it is increased.
188	/// Also the number it gets divided by when decreased.
189	pub const EXPONENTIAL_FEE_BASE: FixedU128 = FixedU128::from_rational(105, 100); // 1.05
190	/// The base number message size in KB is multiplied by before increasing the fee factor.
191	pub const MESSAGE_SIZE_FEE_BASE: FixedU128 = FixedU128::from_rational(1, 1000); // 0.001
192}
193
194/// Trait for selecting the next core to build the candidate for.
195pub trait SelectCore {
196	/// Core selector information for the current block.
197	fn selected_core() -> (CoreSelector, ClaimQueueOffset);
198	/// Core selector information for the next block.
199	fn select_next_core() -> (CoreSelector, ClaimQueueOffset);
200}
201
202/// The default core selection policy.
203pub struct DefaultCoreSelector<T>(PhantomData<T>);
204
205impl<T: frame_system::Config> SelectCore for DefaultCoreSelector<T> {
206	fn selected_core() -> (CoreSelector, ClaimQueueOffset) {
207		let core_selector: U256 = frame_system::Pallet::<T>::block_number().into();
208
209		(CoreSelector(core_selector.byte(0)), ClaimQueueOffset(DEFAULT_CLAIM_QUEUE_OFFSET))
210	}
211
212	fn select_next_core() -> (CoreSelector, ClaimQueueOffset) {
213		let core_selector: U256 = (frame_system::Pallet::<T>::block_number() + One::one()).into();
214
215		(CoreSelector(core_selector.byte(0)), ClaimQueueOffset(DEFAULT_CLAIM_QUEUE_OFFSET))
216	}
217}
218
219/// Core selection policy that builds on claim queue offset 1.
220pub struct LookaheadCoreSelector<T>(PhantomData<T>);
221
222impl<T: frame_system::Config> SelectCore for LookaheadCoreSelector<T> {
223	fn selected_core() -> (CoreSelector, ClaimQueueOffset) {
224		let core_selector: U256 = frame_system::Pallet::<T>::block_number().into();
225
226		(CoreSelector(core_selector.byte(0)), ClaimQueueOffset(1))
227	}
228
229	fn select_next_core() -> (CoreSelector, ClaimQueueOffset) {
230		let core_selector: U256 = (frame_system::Pallet::<T>::block_number() + One::one()).into();
231
232		(CoreSelector(core_selector.byte(0)), ClaimQueueOffset(1))
233	}
234}
235
236#[frame_support::pallet]
237pub mod pallet {
238	use super::*;
239	use frame_support::pallet_prelude::*;
240	use frame_system::pallet_prelude::*;
241
242	#[pallet::pallet]
243	#[pallet::storage_version(migration::STORAGE_VERSION)]
244	#[pallet::without_storage_info]
245	pub struct Pallet<T>(_);
246
247	#[pallet::config]
248	pub trait Config: frame_system::Config<OnSetCode = ParachainSetCode<Self>> {
249		/// The overarching event type.
250		type RuntimeEvent: From<Event<Self>> + IsType<<Self as frame_system::Config>::RuntimeEvent>;
251
252		/// Something which can be notified when the validation data is set.
253		type OnSystemEvent: OnSystemEvent;
254
255		/// Returns the parachain ID we are running with.
256		#[pallet::constant]
257		type SelfParaId: Get<ParaId>;
258
259		/// The place where outbound XCMP messages come from. This is queried in `finalize_block`.
260		type OutboundXcmpMessageSource: XcmpMessageSource;
261
262		/// Queues inbound downward messages for delayed processing.
263		///
264		/// All inbound DMP messages from the relay are pushed into this. The handler is expected to
265		/// eventually process all the messages that are pushed to it.
266		type DmpQueue: HandleMessage;
267
268		/// The weight we reserve at the beginning of the block for processing DMP messages.
269		type ReservedDmpWeight: Get<Weight>;
270
271		/// The message handler that will be invoked when messages are received via XCMP.
272		///
273		/// This should normally link to the XCMP Queue pallet.
274		type XcmpMessageHandler: XcmpMessageHandler;
275
276		/// The weight we reserve at the beginning of the block for processing XCMP messages.
277		type ReservedXcmpWeight: Get<Weight>;
278
279		/// Something that can check the associated relay parent block number.
280		type CheckAssociatedRelayNumber: CheckAssociatedRelayNumber;
281
282		/// Weight info for functions and calls.
283		type WeightInfo: WeightInfo;
284
285		/// An entry-point for higher-level logic to manage the backlog of unincluded parachain
286		/// blocks and authorship rights for those blocks.
287		///
288		/// Typically, this should be a hook tailored to the collator-selection/consensus mechanism
289		/// that is used for this chain.
290		///
291		/// However, to maintain the same behavior as prior to asynchronous backing, provide the
292		/// [`consensus_hook::ExpectParentIncluded`] here. This is only necessary in the case
293		/// that collators aren't expected to have node versions that supply the included block
294		/// in the relay-chain state proof.
295		type ConsensusHook: ConsensusHook;
296
297		/// Select core.
298		type SelectCore: SelectCore;
299	}
300
301	#[pallet::hooks]
302	impl<T: Config> Hooks<BlockNumberFor<T>> for Pallet<T> {
303		/// Handles actually sending upward messages by moving them from `PendingUpwardMessages` to
304		/// `UpwardMessages`. Decreases the delivery fee factor if after sending messages, the queue
305		/// total size is less than the threshold (see [`ump_constants::THRESHOLD_FACTOR`]).
306		/// Also does the sending for HRMP messages it takes from `OutboundXcmpMessageSource`.
307		fn on_finalize(_: BlockNumberFor<T>) {
308			<DidSetValidationCode<T>>::kill();
309			<UpgradeRestrictionSignal<T>>::kill();
310			let relay_upgrade_go_ahead = <UpgradeGoAhead<T>>::take();
311
312			let vfp = <ValidationData<T>>::get()
313				.expect("set_validation_data inherent needs to be present in every block!");
314
315			LastRelayChainBlockNumber::<T>::put(vfp.relay_parent_number);
316
317			let host_config = match HostConfiguration::<T>::get() {
318				Some(ok) => ok,
319				None => {
320					debug_assert!(
321						false,
322						"host configuration is promised to set until `on_finalize`; qed",
323					);
324					return
325				},
326			};
327
328			// Before updating the relevant messaging state, we need to extract
329			// the total bandwidth limits for the purpose of updating the unincluded
330			// segment.
331			let total_bandwidth_out = match RelevantMessagingState::<T>::get() {
332				Some(s) => OutboundBandwidthLimits::from_relay_chain_state(&s),
333				None => {
334					debug_assert!(
335						false,
336						"relevant messaging state is promised to be set until `on_finalize`; \
337							qed",
338					);
339					return
340				},
341			};
342
343			// After this point, the `RelevantMessagingState` in storage reflects the
344			// unincluded segment.
345			Self::adjust_egress_bandwidth_limits();
346
347			let (ump_msg_count, ump_total_bytes) = <PendingUpwardMessages<T>>::mutate(|up| {
348				let (available_capacity, available_size) = match RelevantMessagingState::<T>::get()
349				{
350					Some(limits) => (
351						limits.relay_dispatch_queue_remaining_capacity.remaining_count,
352						limits.relay_dispatch_queue_remaining_capacity.remaining_size,
353					),
354					None => {
355						debug_assert!(
356							false,
357							"relevant messaging state is promised to be set until `on_finalize`; \
358								qed",
359						);
360						return (0, 0)
361					},
362				};
363
364				let available_capacity =
365					cmp::min(available_capacity, host_config.max_upward_message_num_per_candidate);
366
367				// Count the number of messages we can possibly fit in the given constraints, i.e.
368				// available_capacity and available_size.
369				let (num, total_size) = up
370					.iter()
371					.scan((0u32, 0u32), |state, msg| {
372						let (cap_used, size_used) = *state;
373						let new_cap = cap_used.saturating_add(1);
374						let new_size = size_used.saturating_add(msg.len() as u32);
375						match available_capacity
376							.checked_sub(new_cap)
377							.and(available_size.checked_sub(new_size))
378						{
379							Some(_) => {
380								*state = (new_cap, new_size);
381								Some(*state)
382							},
383							_ => None,
384						}
385					})
386					.last()
387					.unwrap_or_default();
388
389				// TODO: #274 Return back messages that do not longer fit into the queue.
390
391				UpwardMessages::<T>::put(&up[..num as usize]);
392				*up = up.split_off(num as usize);
393
394				// Send the core selector UMP signal. This is experimental until relay chain
395				// validators are upgraded to handle ump signals.
396				#[cfg(feature = "experimental-ump-signals")]
397				Self::send_ump_signal();
398
399				// If the total size of the pending messages is less than the threshold,
400				// we decrease the fee factor, since the queue is less congested.
401				// This makes delivery of new messages cheaper.
402				let threshold = host_config
403					.max_upward_queue_size
404					.saturating_div(ump_constants::THRESHOLD_FACTOR);
405				let remaining_total_size: usize = up.iter().map(UpwardMessage::len).sum();
406				if remaining_total_size <= threshold as usize {
407					Self::decrease_fee_factor(());
408				}
409
410				(num, total_size)
411			});
412
413			// Sending HRMP messages is a little bit more involved. There are the following
414			// constraints:
415			//
416			// - a channel should exist (and it can be closed while a message is buffered),
417			// - at most one message can be sent in a channel,
418			// - the sent out messages should be ordered by ascension of recipient para id.
419			// - the capacity and total size of the channel is limited,
420			// - the maximum size of a message is limited (and can potentially be changed),
421
422			let maximum_channels = host_config
423				.hrmp_max_message_num_per_candidate
424				.min(<AnnouncedHrmpMessagesPerCandidate<T>>::take())
425				as usize;
426
427			// Note: this internally calls the `GetChannelInfo` implementation for this
428			// pallet, which draws on the `RelevantMessagingState`. That in turn has
429			// been adjusted above to reflect the correct limits in all channels.
430			let outbound_messages =
431				T::OutboundXcmpMessageSource::take_outbound_messages(maximum_channels)
432					.into_iter()
433					.map(|(recipient, data)| OutboundHrmpMessage { recipient, data })
434					.collect::<Vec<_>>();
435
436			// Update the unincluded segment length; capacity checks were done previously in
437			// `set_validation_data`, so this can be done unconditionally.
438			{
439				let hrmp_outgoing = outbound_messages
440					.iter()
441					.map(|msg| {
442						(
443							msg.recipient,
444							HrmpChannelUpdate { msg_count: 1, total_bytes: msg.data.len() as u32 },
445						)
446					})
447					.collect();
448				let used_bandwidth =
449					UsedBandwidth { ump_msg_count, ump_total_bytes, hrmp_outgoing };
450
451				let mut aggregated_segment =
452					AggregatedUnincludedSegment::<T>::get().unwrap_or_default();
453				let consumed_go_ahead_signal =
454					if aggregated_segment.consumed_go_ahead_signal().is_some() {
455						// Some ancestor within the segment already processed this signal --
456						// validated during inherent creation.
457						None
458					} else {
459						relay_upgrade_go_ahead
460					};
461				// The bandwidth constructed was ensured to satisfy relay chain constraints.
462				let ancestor = Ancestor::new_unchecked(used_bandwidth, consumed_go_ahead_signal);
463
464				let watermark = HrmpWatermark::<T>::get();
465				let watermark_update = HrmpWatermarkUpdate::new(watermark, vfp.relay_parent_number);
466
467				aggregated_segment
468					.append(&ancestor, watermark_update, &total_bandwidth_out)
469					.expect("unincluded segment limits exceeded");
470				AggregatedUnincludedSegment::<T>::put(aggregated_segment);
471				// Check in `on_initialize` guarantees there's space for this block.
472				UnincludedSegment::<T>::append(ancestor);
473			}
474			HrmpOutboundMessages::<T>::put(outbound_messages);
475		}
476
477		fn on_initialize(_n: BlockNumberFor<T>) -> Weight {
478			let mut weight = Weight::zero();
479
480			// To prevent removing `NewValidationCode` that was set by another `on_initialize`
481			// like for example from scheduler, we only kill the storage entry if it was not yet
482			// updated in the current block.
483			if !<DidSetValidationCode<T>>::get() {
484				NewValidationCode::<T>::kill();
485				weight += T::DbWeight::get().writes(1);
486			}
487
488			// The parent hash was unknown during block finalization. Update it here.
489			{
490				<UnincludedSegment<T>>::mutate(|chain| {
491					if let Some(ancestor) = chain.last_mut() {
492						let parent = frame_system::Pallet::<T>::parent_hash();
493						// Ancestor is the latest finalized block, thus current parent is
494						// its output head.
495						ancestor.replace_para_head_hash(parent);
496					}
497				});
498				weight += T::DbWeight::get().reads_writes(1, 1);
499
500				// Weight used during finalization.
501				weight += T::DbWeight::get().reads_writes(3, 2);
502			}
503
504			// Remove the validation from the old block.
505			ValidationData::<T>::kill();
506			ProcessedDownwardMessages::<T>::kill();
507			HrmpWatermark::<T>::kill();
508			UpwardMessages::<T>::kill();
509			HrmpOutboundMessages::<T>::kill();
510			CustomValidationHeadData::<T>::kill();
511
512			weight += T::DbWeight::get().writes(6);
513
514			// Here, in `on_initialize` we must report the weight for both `on_initialize` and
515			// `on_finalize`.
516			//
517			// One complication here, is that the `host_configuration` is updated by an inherent
518			// and those are processed after the block initialization phase. Therefore, we have to
519			// be content only with the configuration as per the previous block. That means that
520			// the configuration can be either stale (or be absent altogether in case of the
521			// beginning of the chain).
522			//
523			// In order to mitigate this, we do the following. At the time, we are only concerned
524			// about `hrmp_max_message_num_per_candidate`. We reserve the amount of weight to
525			// process the number of HRMP messages according to the potentially stale
526			// configuration. In `on_finalize` we will process only the maximum between the
527			// announced number of messages and the actual received in the fresh configuration.
528			//
529			// In the common case, they will be the same. In the case the actual value is smaller
530			// than the announced, we would waste some of weight. In the case the actual value is
531			// greater than the announced, we will miss opportunity to send a couple of messages.
532			weight += T::DbWeight::get().reads_writes(1, 1);
533			let hrmp_max_message_num_per_candidate = HostConfiguration::<T>::get()
534				.map(|cfg| cfg.hrmp_max_message_num_per_candidate)
535				.unwrap_or(0);
536			<AnnouncedHrmpMessagesPerCandidate<T>>::put(hrmp_max_message_num_per_candidate);
537
538			// NOTE that the actual weight consumed by `on_finalize` may turn out lower.
539			weight += T::DbWeight::get().reads_writes(
540				3 + hrmp_max_message_num_per_candidate as u64,
541				4 + hrmp_max_message_num_per_candidate as u64,
542			);
543
544			// Weight for updating the last relay chain block number in `on_finalize`.
545			weight += T::DbWeight::get().reads_writes(1, 1);
546
547			// Weight for adjusting the unincluded segment in `on_finalize`.
548			weight += T::DbWeight::get().reads_writes(6, 3);
549
550			// Always try to read `UpgradeGoAhead` in `on_finalize`.
551			weight += T::DbWeight::get().reads(1);
552
553			weight
554		}
555	}
556
557	#[pallet::call]
558	impl<T: Config> Pallet<T> {
559		/// Set the current validation data.
560		///
561		/// This should be invoked exactly once per block. It will panic at the finalization
562		/// phase if the call was not invoked.
563		///
564		/// The dispatch origin for this call must be `Inherent`
565		///
566		/// As a side effect, this function upgrades the current validation function
567		/// if the appropriate time has come.
568		#[pallet::call_index(0)]
569		#[pallet::weight((0, DispatchClass::Mandatory))]
570		// TODO: This weight should be corrected.
571		pub fn set_validation_data(
572			origin: OriginFor<T>,
573			data: ParachainInherentData,
574		) -> DispatchResultWithPostInfo {
575			ensure_none(origin)?;
576			assert!(
577				!<ValidationData<T>>::exists(),
578				"ValidationData must be updated only once in a block",
579			);
580
581			// TODO: This is more than zero, but will need benchmarking to figure out what.
582			let mut total_weight = Weight::zero();
583
584			// NOTE: the inherent data is expected to be unique, even if this block is built
585			// in the context of the same relay parent as the previous one. In particular,
586			// the inherent shouldn't contain messages that were already processed by any of the
587			// ancestors.
588			//
589			// This invariant should be upheld by the `ProvideInherent` implementation.
590			let ParachainInherentData {
591				validation_data: vfp,
592				relay_chain_state,
593				downward_messages,
594				horizontal_messages,
595			} = data;
596
597			// Check that the associated relay chain block number is as expected.
598			T::CheckAssociatedRelayNumber::check_associated_relay_number(
599				vfp.relay_parent_number,
600				LastRelayChainBlockNumber::<T>::get(),
601			);
602
603			let relay_state_proof = RelayChainStateProof::new(
604				T::SelfParaId::get(),
605				vfp.relay_parent_storage_root,
606				relay_chain_state.clone(),
607			)
608			.expect("Invalid relay chain state proof");
609
610			// Update the desired maximum capacity according to the consensus hook.
611			let (consensus_hook_weight, capacity) =
612				T::ConsensusHook::on_state_proof(&relay_state_proof);
613			total_weight += consensus_hook_weight;
614			total_weight += Self::maybe_drop_included_ancestors(&relay_state_proof, capacity);
615			// Deposit a log indicating the relay-parent storage root.
616			// TODO: remove this in favor of the relay-parent's hash after
617			// https://github.com/paritytech/cumulus/issues/303
618			frame_system::Pallet::<T>::deposit_log(
619				cumulus_primitives_core::rpsr_digest::relay_parent_storage_root_item(
620					vfp.relay_parent_storage_root,
621					vfp.relay_parent_number,
622				),
623			);
624
625			// initialization logic: we know that this runs exactly once every block,
626			// which means we can put the initialization logic here to remove the
627			// sequencing problem.
628			let upgrade_go_ahead_signal = relay_state_proof
629				.read_upgrade_go_ahead_signal()
630				.expect("Invalid upgrade go ahead signal");
631
632			let upgrade_signal_in_segment = AggregatedUnincludedSegment::<T>::get()
633				.as_ref()
634				.and_then(SegmentTracker::consumed_go_ahead_signal);
635			if let Some(signal_in_segment) = upgrade_signal_in_segment.as_ref() {
636				// Unincluded ancestor consuming upgrade signal is still within the segment,
637				// sanity check that it matches with the signal from relay chain.
638				assert_eq!(upgrade_go_ahead_signal, Some(*signal_in_segment));
639			}
640			match upgrade_go_ahead_signal {
641				Some(_signal) if upgrade_signal_in_segment.is_some() => {
642					// Do nothing, processing logic was executed by unincluded ancestor.
643				},
644				Some(relay_chain::UpgradeGoAhead::GoAhead) => {
645					assert!(
646						<PendingValidationCode<T>>::exists(),
647						"No new validation function found in storage, GoAhead signal is not expected",
648					);
649					let validation_code = <PendingValidationCode<T>>::take();
650
651					frame_system::Pallet::<T>::update_code_in_storage(&validation_code);
652					<T::OnSystemEvent as OnSystemEvent>::on_validation_code_applied();
653					Self::deposit_event(Event::ValidationFunctionApplied {
654						relay_chain_block_num: vfp.relay_parent_number,
655					});
656				},
657				Some(relay_chain::UpgradeGoAhead::Abort) => {
658					<PendingValidationCode<T>>::kill();
659					Self::deposit_event(Event::ValidationFunctionDiscarded);
660				},
661				None => {},
662			}
663			<UpgradeRestrictionSignal<T>>::put(
664				relay_state_proof
665					.read_upgrade_restriction_signal()
666					.expect("Invalid upgrade restriction signal"),
667			);
668			<UpgradeGoAhead<T>>::put(upgrade_go_ahead_signal);
669
670			let host_config = relay_state_proof
671				.read_abridged_host_configuration()
672				.expect("Invalid host configuration in relay chain state proof");
673
674			let relevant_messaging_state = relay_state_proof
675				.read_messaging_state_snapshot(&host_config)
676				.expect("Invalid messaging state in relay chain state proof");
677
678			<ValidationData<T>>::put(&vfp);
679			<RelayStateProof<T>>::put(relay_chain_state);
680			<RelevantMessagingState<T>>::put(relevant_messaging_state.clone());
681			<HostConfiguration<T>>::put(host_config);
682
683			<T::OnSystemEvent as OnSystemEvent>::on_validation_data(&vfp);
684
685			total_weight.saturating_accrue(Self::enqueue_inbound_downward_messages(
686				relevant_messaging_state.dmq_mqc_head,
687				downward_messages,
688			));
689			total_weight.saturating_accrue(Self::enqueue_inbound_horizontal_messages(
690				&relevant_messaging_state.ingress_channels,
691				horizontal_messages,
692				vfp.relay_parent_number,
693			));
694
695			Ok(PostDispatchInfo { actual_weight: Some(total_weight), pays_fee: Pays::No })
696		}
697
698		#[pallet::call_index(1)]
699		#[pallet::weight((1_000, DispatchClass::Operational))]
700		pub fn sudo_send_upward_message(
701			origin: OriginFor<T>,
702			message: UpwardMessage,
703		) -> DispatchResult {
704			ensure_root(origin)?;
705			let _ = Self::send_upward_message(message);
706			Ok(())
707		}
708
709		// WARNING: call indices 2 and 3 were used in a former version of this pallet. Using them
710		// again will require to bump the transaction version of runtimes using this pallet.
711	}
712
713	#[pallet::event]
714	#[pallet::generate_deposit(pub(super) fn deposit_event)]
715	pub enum Event<T: Config> {
716		/// The validation function has been scheduled to apply.
717		ValidationFunctionStored,
718		/// The validation function was applied as of the contained relay chain block number.
719		ValidationFunctionApplied { relay_chain_block_num: RelayChainBlockNumber },
720		/// The relay-chain aborted the upgrade process.
721		ValidationFunctionDiscarded,
722		/// Some downward messages have been received and will be processed.
723		DownwardMessagesReceived { count: u32 },
724		/// Downward messages were processed using the given weight.
725		DownwardMessagesProcessed { weight_used: Weight, dmq_head: relay_chain::Hash },
726		/// An upward message was sent to the relay chain.
727		UpwardMessageSent { message_hash: Option<XcmHash> },
728	}
729
730	#[pallet::error]
731	pub enum Error<T> {
732		/// Attempt to upgrade validation function while existing upgrade pending.
733		OverlappingUpgrades,
734		/// Polkadot currently prohibits this parachain from upgrading its validation function.
735		ProhibitedByPolkadot,
736		/// The supplied validation function has compiled into a blob larger than Polkadot is
737		/// willing to run.
738		TooBig,
739		/// The inherent which supplies the validation data did not run this block.
740		ValidationDataNotAvailable,
741		/// The inherent which supplies the host configuration did not run this block.
742		HostConfigurationNotAvailable,
743		/// No validation function upgrade is currently scheduled.
744		NotScheduled,
745		/// No code upgrade has been authorized.
746		NothingAuthorized,
747		/// The given code upgrade has not been authorized.
748		Unauthorized,
749	}
750
751	/// Latest included block descendants the runtime accepted. In other words, these are
752	/// ancestors of the currently executing block which have not been included in the observed
753	/// relay-chain state.
754	///
755	/// The segment length is limited by the capacity returned from the [`ConsensusHook`] configured
756	/// in the pallet.
757	#[pallet::storage]
758	pub type UnincludedSegment<T: Config> = StorageValue<_, Vec<Ancestor<T::Hash>>, ValueQuery>;
759
760	/// Storage field that keeps track of bandwidth used by the unincluded segment along with the
761	/// latest HRMP watermark. Used for limiting the acceptance of new blocks with
762	/// respect to relay chain constraints.
763	#[pallet::storage]
764	pub type AggregatedUnincludedSegment<T: Config> =
765		StorageValue<_, SegmentTracker<T::Hash>, OptionQuery>;
766
767	/// In case of a scheduled upgrade, this storage field contains the validation code to be
768	/// applied.
769	///
770	/// As soon as the relay chain gives us the go-ahead signal, we will overwrite the
771	/// [`:code`][sp_core::storage::well_known_keys::CODE] which will result the next block process
772	/// with the new validation code. This concludes the upgrade process.
773	#[pallet::storage]
774	pub type PendingValidationCode<T: Config> = StorageValue<_, Vec<u8>, ValueQuery>;
775
776	/// Validation code that is set by the parachain and is to be communicated to collator and
777	/// consequently the relay-chain.
778	///
779	/// This will be cleared in `on_initialize` of each new block if no other pallet already set
780	/// the value.
781	#[pallet::storage]
782	pub type NewValidationCode<T: Config> = StorageValue<_, Vec<u8>, OptionQuery>;
783
784	/// The [`PersistedValidationData`] set for this block.
785	/// This value is expected to be set only once per block and it's never stored
786	/// in the trie.
787	#[pallet::storage]
788	pub type ValidationData<T: Config> = StorageValue<_, PersistedValidationData>;
789
790	/// Were the validation data set to notify the relay chain?
791	#[pallet::storage]
792	pub type DidSetValidationCode<T: Config> = StorageValue<_, bool, ValueQuery>;
793
794	/// The relay chain block number associated with the last parachain block.
795	///
796	/// This is updated in `on_finalize`.
797	#[pallet::storage]
798	pub type LastRelayChainBlockNumber<T: Config> =
799		StorageValue<_, RelayChainBlockNumber, ValueQuery>;
800
801	/// An option which indicates if the relay-chain restricts signalling a validation code upgrade.
802	/// In other words, if this is `Some` and [`NewValidationCode`] is `Some` then the produced
803	/// candidate will be invalid.
804	///
805	/// This storage item is a mirror of the corresponding value for the current parachain from the
806	/// relay-chain. This value is ephemeral which means it doesn't hit the storage. This value is
807	/// set after the inherent.
808	#[pallet::storage]
809	pub type UpgradeRestrictionSignal<T: Config> =
810		StorageValue<_, Option<relay_chain::UpgradeRestriction>, ValueQuery>;
811
812	/// Optional upgrade go-ahead signal from the relay-chain.
813	///
814	/// This storage item is a mirror of the corresponding value for the current parachain from the
815	/// relay-chain. This value is ephemeral which means it doesn't hit the storage. This value is
816	/// set after the inherent.
817	#[pallet::storage]
818	pub type UpgradeGoAhead<T: Config> =
819		StorageValue<_, Option<relay_chain::UpgradeGoAhead>, ValueQuery>;
820
821	/// The state proof for the last relay parent block.
822	///
823	/// This field is meant to be updated each block with the validation data inherent. Therefore,
824	/// before processing of the inherent, e.g. in `on_initialize` this data may be stale.
825	///
826	/// This data is also absent from the genesis.
827	#[pallet::storage]
828	pub type RelayStateProof<T: Config> = StorageValue<_, sp_trie::StorageProof>;
829
830	/// The snapshot of some state related to messaging relevant to the current parachain as per
831	/// the relay parent.
832	///
833	/// This field is meant to be updated each block with the validation data inherent. Therefore,
834	/// before processing of the inherent, e.g. in `on_initialize` this data may be stale.
835	///
836	/// This data is also absent from the genesis.
837	#[pallet::storage]
838	pub type RelevantMessagingState<T: Config> = StorageValue<_, MessagingStateSnapshot>;
839
840	/// The parachain host configuration that was obtained from the relay parent.
841	///
842	/// This field is meant to be updated each block with the validation data inherent. Therefore,
843	/// before processing of the inherent, e.g. in `on_initialize` this data may be stale.
844	///
845	/// This data is also absent from the genesis.
846	#[pallet::storage]
847	#[pallet::disable_try_decode_storage]
848	pub type HostConfiguration<T: Config> = StorageValue<_, AbridgedHostConfiguration>;
849
850	/// The last downward message queue chain head we have observed.
851	///
852	/// This value is loaded before and saved after processing inbound downward messages carried
853	/// by the system inherent.
854	#[pallet::storage]
855	pub type LastDmqMqcHead<T: Config> = StorageValue<_, MessageQueueChain, ValueQuery>;
856
857	/// The message queue chain heads we have observed per each channel incoming channel.
858	///
859	/// This value is loaded before and saved after processing inbound downward messages carried
860	/// by the system inherent.
861	#[pallet::storage]
862	pub type LastHrmpMqcHeads<T: Config> =
863		StorageValue<_, BTreeMap<ParaId, MessageQueueChain>, ValueQuery>;
864
865	/// Number of downward messages processed in a block.
866	///
867	/// This will be cleared in `on_initialize` of each new block.
868	#[pallet::storage]
869	pub type ProcessedDownwardMessages<T: Config> = StorageValue<_, u32, ValueQuery>;
870
871	/// HRMP watermark that was set in a block.
872	///
873	/// This will be cleared in `on_initialize` of each new block.
874	#[pallet::storage]
875	pub type HrmpWatermark<T: Config> = StorageValue<_, relay_chain::BlockNumber, ValueQuery>;
876
877	/// HRMP messages that were sent in a block.
878	///
879	/// This will be cleared in `on_initialize` of each new block.
880	#[pallet::storage]
881	pub type HrmpOutboundMessages<T: Config> =
882		StorageValue<_, Vec<OutboundHrmpMessage>, ValueQuery>;
883
884	/// Upward messages that were sent in a block.
885	///
886	/// This will be cleared in `on_initialize` of each new block.
887	#[pallet::storage]
888	pub type UpwardMessages<T: Config> = StorageValue<_, Vec<UpwardMessage>, ValueQuery>;
889
890	/// Upward messages that are still pending and not yet send to the relay chain.
891	#[pallet::storage]
892	pub type PendingUpwardMessages<T: Config> = StorageValue<_, Vec<UpwardMessage>, ValueQuery>;
893
894	/// Initialization value for the delivery fee factor for UMP.
895	#[pallet::type_value]
896	pub fn UpwardInitialDeliveryFeeFactor() -> FixedU128 {
897		FixedU128::from_u32(1)
898	}
899
900	/// The factor to multiply the base delivery fee by for UMP.
901	#[pallet::storage]
902	pub type UpwardDeliveryFeeFactor<T: Config> =
903		StorageValue<_, FixedU128, ValueQuery, UpwardInitialDeliveryFeeFactor>;
904
905	/// The number of HRMP messages we observed in `on_initialize` and thus used that number for
906	/// announcing the weight of `on_initialize` and `on_finalize`.
907	#[pallet::storage]
908	pub type AnnouncedHrmpMessagesPerCandidate<T: Config> = StorageValue<_, u32, ValueQuery>;
909
910	/// The weight we reserve at the beginning of the block for processing XCMP messages. This
911	/// overrides the amount set in the Config trait.
912	#[pallet::storage]
913	pub type ReservedXcmpWeightOverride<T: Config> = StorageValue<_, Weight>;
914
915	/// The weight we reserve at the beginning of the block for processing DMP messages. This
916	/// overrides the amount set in the Config trait.
917	#[pallet::storage]
918	pub type ReservedDmpWeightOverride<T: Config> = StorageValue<_, Weight>;
919
920	/// A custom head data that should be returned as result of `validate_block`.
921	///
922	/// See `Pallet::set_custom_validation_head_data` for more information.
923	#[pallet::storage]
924	pub type CustomValidationHeadData<T: Config> = StorageValue<_, Vec<u8>, OptionQuery>;
925
926	#[pallet::inherent]
927	impl<T: Config> ProvideInherent for Pallet<T> {
928		type Call = Call<T>;
929		type Error = sp_inherents::MakeFatalError<()>;
930		const INHERENT_IDENTIFIER: InherentIdentifier =
931			cumulus_primitives_parachain_inherent::INHERENT_IDENTIFIER;
932
933		fn create_inherent(data: &InherentData) -> Option<Self::Call> {
934			let mut data: ParachainInherentData =
935				data.get_data(&Self::INHERENT_IDENTIFIER).ok().flatten().expect(
936					"validation function params are always injected into inherent data; qed",
937				);
938
939			Self::drop_processed_messages_from_inherent(&mut data);
940
941			Some(Call::set_validation_data { data })
942		}
943
944		fn is_inherent(call: &Self::Call) -> bool {
945			matches!(call, Call::set_validation_data { .. })
946		}
947	}
948
949	#[pallet::genesis_config]
950	#[derive(frame_support::DefaultNoBound)]
951	pub struct GenesisConfig<T: Config> {
952		#[serde(skip)]
953		pub _config: core::marker::PhantomData<T>,
954	}
955
956	#[pallet::genesis_build]
957	impl<T: Config> BuildGenesisConfig for GenesisConfig<T> {
958		fn build(&self) {
959			// TODO: Remove after https://github.com/paritytech/cumulus/issues/479
960			sp_io::storage::set(b":c", &[]);
961		}
962	}
963}
964
965impl<T: Config> Pallet<T> {
966	/// Get the unincluded segment size after the given hash.
967	///
968	/// If the unincluded segment doesn't contain the given hash, this returns the
969	/// length of the entire unincluded segment.
970	///
971	/// This is intended to be used for determining how long the unincluded segment _would be_
972	/// in runtime APIs related to authoring.
973	pub fn unincluded_segment_size_after(included_hash: T::Hash) -> u32 {
974		let segment = UnincludedSegment::<T>::get();
975		crate::unincluded_segment::size_after_included(included_hash, &segment)
976	}
977}
978
979impl<T: Config> FeeTracker for Pallet<T> {
980	type Id = ();
981
982	fn get_fee_factor(_: Self::Id) -> FixedU128 {
983		UpwardDeliveryFeeFactor::<T>::get()
984	}
985
986	fn increase_fee_factor(_: Self::Id, message_size_factor: FixedU128) -> FixedU128 {
987		<UpwardDeliveryFeeFactor<T>>::mutate(|f| {
988			*f = f.saturating_mul(
989				ump_constants::EXPONENTIAL_FEE_BASE.saturating_add(message_size_factor),
990			);
991			*f
992		})
993	}
994
995	fn decrease_fee_factor(_: Self::Id) -> FixedU128 {
996		<UpwardDeliveryFeeFactor<T>>::mutate(|f| {
997			*f =
998				UpwardInitialDeliveryFeeFactor::get().max(*f / ump_constants::EXPONENTIAL_FEE_BASE);
999			*f
1000		})
1001	}
1002}
1003
1004impl<T: Config> ListChannelInfos for Pallet<T> {
1005	fn outgoing_channels() -> Vec<ParaId> {
1006		let Some(state) = RelevantMessagingState::<T>::get() else { return Vec::new() };
1007		state.egress_channels.into_iter().map(|(id, _)| id).collect()
1008	}
1009}
1010
1011impl<T: Config> GetChannelInfo for Pallet<T> {
1012	fn get_channel_status(id: ParaId) -> ChannelStatus {
1013		// Note, that we are using `relevant_messaging_state` which may be from the previous
1014		// block, in case this is called from `on_initialize`, i.e. before the inherent with
1015		// fresh data is submitted.
1016		//
1017		// That shouldn't be a problem though because this is anticipated and already can
1018		// happen. This is because sending implies that a message is buffered until there is
1019		// space to send a message in the candidate. After a while waiting in a buffer, it may
1020		// be discovered that the channel to which a message were addressed is now closed.
1021		// Another possibility, is that the maximum message size was decreased so that a
1022		// message in the buffer doesn't fit. Should any of that happen the sender should be
1023		// notified about the message was discarded.
1024		//
1025		// Here it a similar case, with the difference that the realization that the channel is
1026		// closed came the same block.
1027		let channels = match RelevantMessagingState::<T>::get() {
1028			None => {
1029				log::warn!("calling `get_channel_status` with no RelevantMessagingState?!");
1030				return ChannelStatus::Closed
1031			},
1032			Some(d) => d.egress_channels,
1033		};
1034		// ^^^ NOTE: This storage field should carry over from the previous block. So if it's
1035		// None then it must be that this is an edge-case where a message is attempted to be
1036		// sent at the first block. It should be safe to assume that there are no channels
1037		// opened at all so early. At least, relying on this assumption seems to be a better
1038		// trade-off, compared to introducing an error variant that the clients should be
1039		// prepared to handle.
1040		let index = match channels.binary_search_by_key(&id, |item| item.0) {
1041			Err(_) => return ChannelStatus::Closed,
1042			Ok(i) => i,
1043		};
1044		let meta = &channels[index].1;
1045		if meta.msg_count + 1 > meta.max_capacity {
1046			// The channel is at its capacity. Skip it for now.
1047			return ChannelStatus::Full
1048		}
1049		let max_size_now = meta.max_total_size - meta.total_size;
1050		let max_size_ever = meta.max_message_size;
1051		ChannelStatus::Ready(max_size_now as usize, max_size_ever as usize)
1052	}
1053
1054	fn get_channel_info(id: ParaId) -> Option<ChannelInfo> {
1055		let channels = RelevantMessagingState::<T>::get()?.egress_channels;
1056		let index = channels.binary_search_by_key(&id, |item| item.0).ok()?;
1057		let info = ChannelInfo {
1058			max_capacity: channels[index].1.max_capacity,
1059			max_total_size: channels[index].1.max_total_size,
1060			max_message_size: channels[index].1.max_message_size,
1061			msg_count: channels[index].1.msg_count,
1062			total_size: channels[index].1.total_size,
1063		};
1064		Some(info)
1065	}
1066}
1067
1068impl<T: Config> Pallet<T> {
1069	/// Updates inherent data to only contain messages that weren't already processed
1070	/// by the runtime based on last relay chain block number.
1071	///
1072	/// This method doesn't check for mqc heads mismatch.
1073	fn drop_processed_messages_from_inherent(para_inherent: &mut ParachainInherentData) {
1074		let ParachainInherentData { downward_messages, horizontal_messages, .. } = para_inherent;
1075
1076		// Last relay chain block number. Any message with sent-at block number less
1077		// than or equal to this value is assumed to be processed previously.
1078		let last_relay_block_number = LastRelayChainBlockNumber::<T>::get();
1079
1080		// DMQ.
1081		let dmq_processed_num = downward_messages
1082			.iter()
1083			.take_while(|message| message.sent_at <= last_relay_block_number)
1084			.count();
1085		downward_messages.drain(..dmq_processed_num);
1086
1087		// HRMP.
1088		for horizontal in horizontal_messages.values_mut() {
1089			let horizontal_processed_num = horizontal
1090				.iter()
1091				.take_while(|message| message.sent_at <= last_relay_block_number)
1092				.count();
1093			horizontal.drain(..horizontal_processed_num);
1094		}
1095
1096		// If MQC doesn't match after dropping messages, the runtime will panic when creating
1097		// inherent.
1098	}
1099
1100	/// Enqueue all inbound downward messages relayed by the collator into the MQ pallet.
1101	///
1102	/// Checks if the sequence of the messages is valid, dispatches them and communicates the
1103	/// number of processed messages to the collator via a storage update.
1104	///
1105	/// # Panics
1106	///
1107	/// If it turns out that after processing all messages the Message Queue Chain
1108	/// hash doesn't match the expected.
1109	fn enqueue_inbound_downward_messages(
1110		expected_dmq_mqc_head: relay_chain::Hash,
1111		downward_messages: Vec<InboundDownwardMessage>,
1112	) -> Weight {
1113		let dm_count = downward_messages.len() as u32;
1114		let mut dmq_head = <LastDmqMqcHead<T>>::get();
1115
1116		let weight_used = T::WeightInfo::enqueue_inbound_downward_messages(dm_count);
1117		if dm_count != 0 {
1118			Self::deposit_event(Event::DownwardMessagesReceived { count: dm_count });
1119
1120			// Eagerly update the MQC head hash:
1121			for m in &downward_messages {
1122				dmq_head.extend_downward(m);
1123			}
1124			let bounded = downward_messages
1125				.iter()
1126				// Note: we are not using `.defensive()` here since that prints the whole value to
1127				// console. In case that the message is too long, this clogs up the log quite badly.
1128				.filter_map(|m| match BoundedSlice::try_from(&m.msg[..]) {
1129					Ok(bounded) => Some(bounded),
1130					Err(_) => {
1131						defensive!("Inbound Downward message was too long; dropping");
1132						None
1133					},
1134				});
1135			T::DmpQueue::handle_messages(bounded);
1136			<LastDmqMqcHead<T>>::put(&dmq_head);
1137
1138			Self::deposit_event(Event::DownwardMessagesProcessed {
1139				weight_used,
1140				dmq_head: dmq_head.head(),
1141			});
1142		}
1143
1144		// After hashing each message in the message queue chain submitted by the collator, we
1145		// should arrive to the MQC head provided by the relay chain.
1146		//
1147		// A mismatch means that at least some of the submitted messages were altered, omitted or
1148		// added improperly.
1149		assert_eq!(dmq_head.head(), expected_dmq_mqc_head);
1150
1151		ProcessedDownwardMessages::<T>::put(dm_count);
1152
1153		weight_used
1154	}
1155
1156	/// Process all inbound horizontal messages relayed by the collator.
1157	///
1158	/// This is similar to [`enqueue_inbound_downward_messages`], but works with multiple inbound
1159	/// channels. It immediately dispatches signals and queues all other XCMs. Blob messages are
1160	/// ignored.
1161	///
1162	/// **Panics** if either any of horizontal messages submitted by the collator was sent from
1163	///            a para which has no open channel to this parachain or if after processing
1164	///            messages across all inbound channels MQCs were obtained which do not
1165	///            correspond to the ones found on the relay-chain.
1166	fn enqueue_inbound_horizontal_messages(
1167		ingress_channels: &[(ParaId, cumulus_primitives_core::AbridgedHrmpChannel)],
1168		horizontal_messages: BTreeMap<ParaId, Vec<InboundHrmpMessage>>,
1169		relay_parent_number: relay_chain::BlockNumber,
1170	) -> Weight {
1171		// First, check that all submitted messages are sent from channels that exist. The
1172		// channel exists if its MQC head is present in `vfp.hrmp_mqc_heads`.
1173		for sender in horizontal_messages.keys() {
1174			// A violation of the assertion below indicates that one of the messages submitted
1175			// by the collator was sent from a sender that doesn't have a channel opened to
1176			// this parachain, according to the relay-parent state.
1177			assert!(ingress_channels.binary_search_by_key(sender, |&(s, _)| s).is_ok(),);
1178		}
1179
1180		// Second, prepare horizontal messages for a more convenient processing:
1181		//
1182		// instead of a mapping from a para to a list of inbound HRMP messages, we will have a
1183		// list of tuples `(sender, message)` first ordered by `sent_at` (the relay chain block
1184		// number in which the message hit the relay-chain) and second ordered by para id
1185		// ascending.
1186		//
1187		// The messages will be dispatched in this order.
1188		let mut horizontal_messages = horizontal_messages
1189			.into_iter()
1190			.flat_map(|(sender, channel_contents)| {
1191				channel_contents.into_iter().map(move |message| (sender, message))
1192			})
1193			.collect::<Vec<_>>();
1194		horizontal_messages.sort_by(|a, b| {
1195			// first sort by sent-at and then by the para id
1196			match a.1.sent_at.cmp(&b.1.sent_at) {
1197				cmp::Ordering::Equal => a.0.cmp(&b.0),
1198				ord => ord,
1199			}
1200		});
1201
1202		let last_mqc_heads = <LastHrmpMqcHeads<T>>::get();
1203		let mut running_mqc_heads = BTreeMap::new();
1204		let mut hrmp_watermark = None;
1205
1206		{
1207			for (sender, ref horizontal_message) in &horizontal_messages {
1208				if hrmp_watermark.map(|w| w < horizontal_message.sent_at).unwrap_or(true) {
1209					hrmp_watermark = Some(horizontal_message.sent_at);
1210				}
1211
1212				running_mqc_heads
1213					.entry(sender)
1214					.or_insert_with(|| last_mqc_heads.get(sender).cloned().unwrap_or_default())
1215					.extend_hrmp(horizontal_message);
1216			}
1217		}
1218		let message_iter = horizontal_messages
1219			.iter()
1220			.map(|&(sender, ref message)| (sender, message.sent_at, &message.data[..]));
1221
1222		let max_weight =
1223			<ReservedXcmpWeightOverride<T>>::get().unwrap_or_else(T::ReservedXcmpWeight::get);
1224		let weight_used = T::XcmpMessageHandler::handle_xcmp_messages(message_iter, max_weight);
1225
1226		// Check that the MQC heads for each channel provided by the relay chain match the MQC
1227		// heads we have after processing all incoming messages.
1228		//
1229		// Along the way we also carry over the relevant entries from the `last_mqc_heads` to
1230		// `running_mqc_heads`. Otherwise, in a block where no messages were sent in a channel
1231		// it won't get into next block's `last_mqc_heads` and thus will be all zeros, which
1232		// would corrupt the message queue chain.
1233		for (sender, channel) in ingress_channels {
1234			let cur_head = running_mqc_heads
1235				.entry(sender)
1236				.or_insert_with(|| last_mqc_heads.get(sender).cloned().unwrap_or_default())
1237				.head();
1238			let target_head = channel.mqc_head.unwrap_or_default();
1239
1240			assert!(cur_head == target_head);
1241		}
1242
1243		<LastHrmpMqcHeads<T>>::put(running_mqc_heads);
1244
1245		// If we processed at least one message, then advance watermark to that location or if there
1246		// were no messages, set it to the block number of the relay parent.
1247		HrmpWatermark::<T>::put(hrmp_watermark.unwrap_or(relay_parent_number));
1248
1249		weight_used
1250	}
1251
1252	/// Drop blocks from the unincluded segment with respect to the latest parachain head.
1253	fn maybe_drop_included_ancestors(
1254		relay_state_proof: &RelayChainStateProof,
1255		capacity: consensus_hook::UnincludedSegmentCapacity,
1256	) -> Weight {
1257		let mut weight_used = Weight::zero();
1258		// If the unincluded segment length is nonzero, then the parachain head must be present.
1259		let para_head =
1260			relay_state_proof.read_included_para_head().ok().map(|h| T::Hashing::hash(&h.0));
1261
1262		let unincluded_segment_len = <UnincludedSegment<T>>::decode_len().unwrap_or(0);
1263		weight_used += T::DbWeight::get().reads(1);
1264
1265		// Clean up unincluded segment if nonempty.
1266		let included_head = match (para_head, capacity.is_expecting_included_parent()) {
1267			(Some(h), true) => {
1268				assert_eq!(
1269					h,
1270					frame_system::Pallet::<T>::parent_hash(),
1271					"expected parent to be included"
1272				);
1273
1274				h
1275			},
1276			(Some(h), false) => h,
1277			(None, true) => {
1278				// All this logic is essentially a workaround to support collators which
1279				// might still not provide the included block with the state proof.
1280				frame_system::Pallet::<T>::parent_hash()
1281			},
1282			(None, false) => panic!("included head not present in relay storage proof"),
1283		};
1284
1285		let new_len = {
1286			let para_head_hash = included_head;
1287			let dropped: Vec<Ancestor<T::Hash>> = <UnincludedSegment<T>>::mutate(|chain| {
1288				// Drop everything up to (inclusive) the block with an included para head, if
1289				// present.
1290				let idx = chain
1291					.iter()
1292					.position(|block| {
1293						let head_hash = block
1294							.para_head_hash()
1295							.expect("para head hash is updated during block initialization; qed");
1296						head_hash == &para_head_hash
1297					})
1298					.map_or(0, |idx| idx + 1); // inclusive.
1299
1300				chain.drain(..idx).collect()
1301			});
1302			weight_used += T::DbWeight::get().reads_writes(1, 1);
1303
1304			let new_len = unincluded_segment_len - dropped.len();
1305			if !dropped.is_empty() {
1306				<AggregatedUnincludedSegment<T>>::mutate(|agg| {
1307					let agg = agg.as_mut().expect(
1308						"dropped part of the segment wasn't empty, hence value exists; qed",
1309					);
1310					for block in dropped {
1311						agg.subtract(&block);
1312					}
1313				});
1314				weight_used += T::DbWeight::get().reads_writes(1, 1);
1315			}
1316
1317			new_len as u32
1318		};
1319
1320		// Current block validity check: ensure there is space in the unincluded segment.
1321		//
1322		// If this fails, the parachain needs to wait for ancestors to be included before
1323		// a new block is allowed.
1324		assert!(new_len < capacity.get(), "no space left for the block in the unincluded segment");
1325		weight_used
1326	}
1327
1328	/// This adjusts the `RelevantMessagingState` according to the bandwidth limits in the
1329	/// unincluded segment.
1330	//
1331	// Reads: 2
1332	// Writes: 1
1333	fn adjust_egress_bandwidth_limits() {
1334		let unincluded_segment = match AggregatedUnincludedSegment::<T>::get() {
1335			None => return,
1336			Some(s) => s,
1337		};
1338
1339		<RelevantMessagingState<T>>::mutate(|messaging_state| {
1340			let messaging_state = match messaging_state {
1341				None => return,
1342				Some(s) => s,
1343			};
1344
1345			let used_bandwidth = unincluded_segment.used_bandwidth();
1346
1347			let channels = &mut messaging_state.egress_channels;
1348			for (para_id, used) in used_bandwidth.hrmp_outgoing.iter() {
1349				let i = match channels.binary_search_by_key(para_id, |item| item.0) {
1350					Ok(i) => i,
1351					Err(_) => continue, // indicates channel closed.
1352				};
1353
1354				let c = &mut channels[i].1;
1355
1356				c.total_size = (c.total_size + used.total_bytes).min(c.max_total_size);
1357				c.msg_count = (c.msg_count + used.msg_count).min(c.max_capacity);
1358			}
1359
1360			let upward_capacity = &mut messaging_state.relay_dispatch_queue_remaining_capacity;
1361			upward_capacity.remaining_count =
1362				upward_capacity.remaining_count.saturating_sub(used_bandwidth.ump_msg_count);
1363			upward_capacity.remaining_size =
1364				upward_capacity.remaining_size.saturating_sub(used_bandwidth.ump_total_bytes);
1365		});
1366	}
1367
1368	/// Put a new validation function into a particular location where polkadot
1369	/// monitors for updates. Calling this function notifies polkadot that a new
1370	/// upgrade has been scheduled.
1371	fn notify_polkadot_of_pending_upgrade(code: &[u8]) {
1372		NewValidationCode::<T>::put(code);
1373		<DidSetValidationCode<T>>::put(true);
1374	}
1375
1376	/// The maximum code size permitted, in bytes.
1377	///
1378	/// Returns `None` if the relay chain parachain host configuration hasn't been submitted yet.
1379	pub fn max_code_size() -> Option<u32> {
1380		<HostConfiguration<T>>::get().map(|cfg| cfg.max_code_size)
1381	}
1382
1383	/// The implementation of the runtime upgrade functionality for parachains.
1384	pub fn schedule_code_upgrade(validation_function: Vec<u8>) -> DispatchResult {
1385		// Ensure that `ValidationData` exists. We do not care about the validation data per se,
1386		// but we do care about the [`UpgradeRestrictionSignal`] which arrives with the same
1387		// inherent.
1388		ensure!(<ValidationData<T>>::exists(), Error::<T>::ValidationDataNotAvailable,);
1389		ensure!(<UpgradeRestrictionSignal<T>>::get().is_none(), Error::<T>::ProhibitedByPolkadot);
1390
1391		ensure!(!<PendingValidationCode<T>>::exists(), Error::<T>::OverlappingUpgrades);
1392		let cfg = HostConfiguration::<T>::get().ok_or(Error::<T>::HostConfigurationNotAvailable)?;
1393		ensure!(validation_function.len() <= cfg.max_code_size as usize, Error::<T>::TooBig);
1394
1395		// When a code upgrade is scheduled, it has to be applied in two
1396		// places, synchronized: both polkadot and the individual parachain
1397		// have to upgrade on the same relay chain block.
1398		//
1399		// `notify_polkadot_of_pending_upgrade` notifies polkadot; the `PendingValidationCode`
1400		// storage keeps track locally for the parachain upgrade, which will
1401		// be applied later: when the relay-chain communicates go-ahead signal to us.
1402		Self::notify_polkadot_of_pending_upgrade(&validation_function);
1403		<PendingValidationCode<T>>::put(validation_function);
1404		Self::deposit_event(Event::ValidationFunctionStored);
1405
1406		Ok(())
1407	}
1408
1409	/// Returns the [`CollationInfo`] of the current active block.
1410	///
1411	/// The given `header` is the header of the built block we are collecting the collation info
1412	/// for.
1413	///
1414	/// This is expected to be used by the
1415	/// [`CollectCollationInfo`](cumulus_primitives_core::CollectCollationInfo) runtime api.
1416	pub fn collect_collation_info(header: &HeaderFor<T>) -> CollationInfo {
1417		CollationInfo {
1418			hrmp_watermark: HrmpWatermark::<T>::get(),
1419			horizontal_messages: HrmpOutboundMessages::<T>::get(),
1420			upward_messages: UpwardMessages::<T>::get(),
1421			processed_downward_messages: ProcessedDownwardMessages::<T>::get(),
1422			new_validation_code: NewValidationCode::<T>::get().map(Into::into),
1423			// Check if there is a custom header that will also be returned by the validation phase.
1424			// If so, we need to also return it here.
1425			head_data: CustomValidationHeadData::<T>::get()
1426				.map_or_else(|| header.encode(), |v| v)
1427				.into(),
1428		}
1429	}
1430
1431	/// Returns the core selector for the next block.
1432	pub fn core_selector() -> (CoreSelector, ClaimQueueOffset) {
1433		T::SelectCore::select_next_core()
1434	}
1435
1436	/// Set a custom head data that should be returned as result of `validate_block`.
1437	///
1438	/// This will overwrite the head data that is returned as result of `validate_block` while
1439	/// validating a `PoV` on the relay chain. Normally the head data that is being returned
1440	/// by `validate_block` is the header of the block that is validated, thus it can be
1441	/// enacted as the new best block. However, for features like forking it can be useful
1442	/// to overwrite the head data with a custom header.
1443	///
1444	/// # Attention
1445	///
1446	/// This should only be used when you are sure what you are doing as this can brick
1447	/// your Parachain.
1448	pub fn set_custom_validation_head_data(head_data: Vec<u8>) {
1449		CustomValidationHeadData::<T>::put(head_data);
1450	}
1451
1452	/// Send the ump signals
1453	#[cfg(feature = "experimental-ump-signals")]
1454	fn send_ump_signal() {
1455		use cumulus_primitives_core::relay_chain::vstaging::{UMPSignal, UMP_SEPARATOR};
1456
1457		UpwardMessages::<T>::mutate(|up| {
1458			up.push(UMP_SEPARATOR);
1459
1460			// Send the core selector signal.
1461			let core_selector = T::SelectCore::selected_core();
1462			up.push(UMPSignal::SelectCore(core_selector.0, core_selector.1).encode());
1463		});
1464	}
1465
1466	/// Open HRMP channel for using it in benchmarks or tests.
1467	///
1468	/// The caller assumes that the pallet will accept regular outbound message to the sibling
1469	/// `target_parachain` after this call. No other assumptions are made.
1470	#[cfg(any(feature = "runtime-benchmarks", feature = "std"))]
1471	pub fn open_outbound_hrmp_channel_for_benchmarks_or_tests(target_parachain: ParaId) {
1472		RelevantMessagingState::<T>::put(MessagingStateSnapshot {
1473			dmq_mqc_head: Default::default(),
1474			relay_dispatch_queue_remaining_capacity: Default::default(),
1475			ingress_channels: Default::default(),
1476			egress_channels: vec![(
1477				target_parachain,
1478				cumulus_primitives_core::AbridgedHrmpChannel {
1479					max_capacity: 10,
1480					max_total_size: 10_000_000_u32,
1481					max_message_size: 10_000_000_u32,
1482					msg_count: 5,
1483					total_size: 5_000_000_u32,
1484					mqc_head: None,
1485				},
1486			)],
1487		})
1488	}
1489
1490	/// Open HRMP channel for using it in benchmarks or tests.
1491	///
1492	/// The caller assumes that the pallet will accept regular outbound message to the sibling
1493	/// `target_parachain` after this call. No other assumptions are made.
1494	#[cfg(any(feature = "runtime-benchmarks", feature = "std"))]
1495	pub fn open_custom_outbound_hrmp_channel_for_benchmarks_or_tests(
1496		target_parachain: ParaId,
1497		channel: cumulus_primitives_core::AbridgedHrmpChannel,
1498	) {
1499		RelevantMessagingState::<T>::put(MessagingStateSnapshot {
1500			dmq_mqc_head: Default::default(),
1501			relay_dispatch_queue_remaining_capacity: Default::default(),
1502			ingress_channels: Default::default(),
1503			egress_channels: vec![(target_parachain, channel)],
1504		})
1505	}
1506
1507	/// Prepare/insert relevant data for `schedule_code_upgrade` for benchmarks.
1508	#[cfg(feature = "runtime-benchmarks")]
1509	pub fn initialize_for_set_code_benchmark(max_code_size: u32) {
1510		// insert dummy ValidationData
1511		let vfp = PersistedValidationData {
1512			parent_head: polkadot_parachain_primitives::primitives::HeadData(Default::default()),
1513			relay_parent_number: 1,
1514			relay_parent_storage_root: Default::default(),
1515			max_pov_size: 1_000,
1516		};
1517		<ValidationData<T>>::put(&vfp);
1518
1519		// insert dummy HostConfiguration with
1520		let host_config = AbridgedHostConfiguration {
1521			max_code_size,
1522			max_head_data_size: 32 * 1024,
1523			max_upward_queue_count: 8,
1524			max_upward_queue_size: 1024 * 1024,
1525			max_upward_message_size: 4 * 1024,
1526			max_upward_message_num_per_candidate: 2,
1527			hrmp_max_message_num_per_candidate: 2,
1528			validation_upgrade_cooldown: 2,
1529			validation_upgrade_delay: 2,
1530			async_backing_params: relay_chain::AsyncBackingParams {
1531				allowed_ancestry_len: 0,
1532				max_candidate_depth: 0,
1533			},
1534		};
1535		<HostConfiguration<T>>::put(host_config);
1536	}
1537}
1538
1539/// Type that implements `SetCode`.
1540pub struct ParachainSetCode<T>(core::marker::PhantomData<T>);
1541impl<T: Config> frame_system::SetCode<T> for ParachainSetCode<T> {
1542	fn set_code(code: Vec<u8>) -> DispatchResult {
1543		Pallet::<T>::schedule_code_upgrade(code)
1544	}
1545}
1546
1547impl<T: Config> Pallet<T> {
1548	/// Puts a message in the `PendingUpwardMessages` storage item.
1549	/// The message will be later sent in `on_finalize`.
1550	/// Checks host configuration to see if message is too big.
1551	/// Increases the delivery fee factor if the queue is sufficiently (see
1552	/// [`ump_constants::THRESHOLD_FACTOR`]) congested.
1553	pub fn send_upward_message(message: UpwardMessage) -> Result<(u32, XcmHash), MessageSendError> {
1554		let message_len = message.len();
1555		// Check if the message fits into the relay-chain constraints.
1556		//
1557		// Note, that we are using `host_configuration` here which may be from the previous
1558		// block, in case this is called from `on_initialize`, i.e. before the inherent with fresh
1559		// data is submitted.
1560		//
1561		// That shouldn't be a problem since this is a preliminary check and the actual check would
1562		// be performed just before submitting the message from the candidate, and it already can
1563		// happen that during the time the message is buffered for sending the relay-chain setting
1564		// may change so that the message is no longer valid.
1565		//
1566		// However, changing this setting is expected to be rare.
1567		if let Some(cfg) = HostConfiguration::<T>::get() {
1568			if message_len > cfg.max_upward_message_size as usize {
1569				return Err(MessageSendError::TooBig)
1570			}
1571			let threshold =
1572				cfg.max_upward_queue_size.saturating_div(ump_constants::THRESHOLD_FACTOR);
1573			// We check the threshold against total size and not number of messages since messages
1574			// could be big or small.
1575			<PendingUpwardMessages<T>>::append(message.clone());
1576			let pending_messages = PendingUpwardMessages::<T>::get();
1577			let total_size: usize = pending_messages.iter().map(UpwardMessage::len).sum();
1578			if total_size > threshold as usize {
1579				// We increase the fee factor by a factor based on the new message's size in KB
1580				let message_size_factor = FixedU128::from((message_len / 1024) as u128)
1581					.saturating_mul(ump_constants::MESSAGE_SIZE_FEE_BASE);
1582				Self::increase_fee_factor((), message_size_factor);
1583			}
1584		} else {
1585			// This storage field should carry over from the previous block. So if it's None
1586			// then it must be that this is an edge-case where a message is attempted to be
1587			// sent at the first block.
1588			//
1589			// Let's pass this message through. I think it's not unreasonable to expect that
1590			// the message is not huge and it comes through, but if it doesn't it can be
1591			// returned back to the sender.
1592			//
1593			// Thus fall through here.
1594			<PendingUpwardMessages<T>>::append(message.clone());
1595		};
1596
1597		// The relay ump does not use using_encoded
1598		// We apply the same this to use the same hash
1599		let hash = sp_io::hashing::blake2_256(&message);
1600		Self::deposit_event(Event::UpwardMessageSent { message_hash: Some(hash) });
1601		Ok((0, hash))
1602	}
1603
1604	/// Get the relay chain block number which was used as an anchor for the last block in this
1605	/// chain.
1606	pub fn last_relay_block_number() -> RelayChainBlockNumber {
1607		LastRelayChainBlockNumber::<T>::get()
1608	}
1609}
1610
1611impl<T: Config> UpwardMessageSender for Pallet<T> {
1612	fn send_upward_message(message: UpwardMessage) -> Result<(u32, XcmHash), MessageSendError> {
1613		Self::send_upward_message(message)
1614	}
1615}
1616
1617impl<T: Config> InspectMessageQueues for Pallet<T> {
1618	fn clear_messages() {
1619		PendingUpwardMessages::<T>::kill();
1620	}
1621
1622	fn get_messages() -> Vec<(VersionedLocation, Vec<VersionedXcm<()>>)> {
1623		use xcm::prelude::*;
1624
1625		let messages: Vec<VersionedXcm<()>> = PendingUpwardMessages::<T>::get()
1626			.iter()
1627			.map(|encoded_message| VersionedXcm::<()>::decode(&mut &encoded_message[..]).unwrap())
1628			.collect();
1629
1630		if messages.is_empty() {
1631			vec![]
1632		} else {
1633			vec![(VersionedLocation::from(Location::parent()), messages)]
1634		}
1635	}
1636}
1637
1638#[cfg(feature = "runtime-benchmarks")]
1639impl<T: Config> polkadot_runtime_common::xcm_sender::EnsureForParachain for Pallet<T> {
1640	fn ensure(para_id: ParaId) {
1641		if let ChannelStatus::Closed = Self::get_channel_status(para_id) {
1642			Self::open_outbound_hrmp_channel_for_benchmarks_or_tests(para_id)
1643		}
1644	}
1645}
1646
1647/// Something that can check the inherents of a block.
1648#[deprecated(note = "This trait is deprecated and will be removed by September 2024. \
1649		Consider switching to `cumulus-pallet-parachain-system::ConsensusHook`")]
1650pub trait CheckInherents<Block: BlockT> {
1651	/// Check all inherents of the block.
1652	///
1653	/// This function gets passed all the extrinsics of the block, so it is up to the callee to
1654	/// identify the inherents. The `validation_data` can be used to access the
1655	fn check_inherents(
1656		block: &Block,
1657		validation_data: &RelayChainStateProof,
1658	) -> frame_support::inherent::CheckInherentsResult;
1659}
1660
1661/// Struct that always returns `Ok` on inherents check, needed for backwards-compatibility.
1662#[doc(hidden)]
1663pub struct DummyCheckInherents<Block>(core::marker::PhantomData<Block>);
1664
1665#[allow(deprecated)]
1666impl<Block: BlockT> CheckInherents<Block> for DummyCheckInherents<Block> {
1667	fn check_inherents(
1668		_: &Block,
1669		_: &RelayChainStateProof,
1670	) -> frame_support::inherent::CheckInherentsResult {
1671		sp_inherents::CheckInherentsResult::new()
1672	}
1673}
1674
1675/// Something that should be informed about system related events.
1676///
1677/// This includes events like [`on_validation_data`](Self::on_validation_data) that is being
1678/// called when the parachain inherent is executed that contains the validation data.
1679/// Or like [`on_validation_code_applied`](Self::on_validation_code_applied) that is called
1680/// when the new validation is written to the state. This means that
1681/// from the next block the runtime is being using this new code.
1682#[impl_trait_for_tuples::impl_for_tuples(30)]
1683pub trait OnSystemEvent {
1684	/// Called in each blocks once when the validation data is set by the inherent.
1685	fn on_validation_data(data: &PersistedValidationData);
1686	/// Called when the validation code is being applied, aka from the next block on this is the new
1687	/// runtime.
1688	fn on_validation_code_applied();
1689}
1690
1691/// Holds the most recent relay-parent state root and block number of the current parachain block.
1692#[derive(PartialEq, Eq, Clone, Encode, Decode, TypeInfo, Default, RuntimeDebug)]
1693pub struct RelayChainState {
1694	/// Current relay chain height.
1695	pub number: relay_chain::BlockNumber,
1696	/// State root for current relay chain height.
1697	pub state_root: relay_chain::Hash,
1698}
1699
1700/// This exposes the [`RelayChainState`] to other runtime modules.
1701///
1702/// Enables parachains to read relay chain state via state proofs.
1703pub trait RelaychainStateProvider {
1704	/// May be called by any runtime module to obtain the current state of the relay chain.
1705	///
1706	/// **NOTE**: This is not guaranteed to return monotonically increasing relay parents.
1707	fn current_relay_chain_state() -> RelayChainState;
1708
1709	/// Utility function only to be used in benchmarking scenarios, to be implemented optionally,
1710	/// else a noop.
1711	///
1712	/// It allows for setting a custom RelayChainState.
1713	#[cfg(feature = "runtime-benchmarks")]
1714	fn set_current_relay_chain_state(_state: RelayChainState) {}
1715}
1716
1717/// Implements [`BlockNumberProvider`] that returns relay chain block number fetched from validation
1718/// data.
1719///
1720/// When validation data is not available (e.g. within `on_initialize`), it will fallback to use
1721/// [`Pallet::last_relay_block_number()`].
1722///
1723/// **NOTE**: This has been deprecated, please use [`RelaychainDataProvider`]
1724#[deprecated = "Use `RelaychainDataProvider` instead"]
1725pub type RelaychainBlockNumberProvider<T> = RelaychainDataProvider<T>;
1726
1727/// Implements [`BlockNumberProvider`] and [`RelaychainStateProvider`] that returns relevant relay
1728/// data fetched from validation data.
1729///
1730/// NOTE: When validation data is not available (e.g. within `on_initialize`):
1731///
1732/// - [`current_relay_chain_state`](Self::current_relay_chain_state): Will return the default value
1733///   of [`RelayChainState`].
1734/// - [`current_block_number`](Self::current_block_number): Will return
1735///   [`Pallet::last_relay_block_number()`].
1736pub struct RelaychainDataProvider<T>(core::marker::PhantomData<T>);
1737
1738impl<T: Config> BlockNumberProvider for RelaychainDataProvider<T> {
1739	type BlockNumber = relay_chain::BlockNumber;
1740
1741	fn current_block_number() -> relay_chain::BlockNumber {
1742		ValidationData::<T>::get()
1743			.map(|d| d.relay_parent_number)
1744			.unwrap_or_else(|| Pallet::<T>::last_relay_block_number())
1745	}
1746
1747	#[cfg(feature = "runtime-benchmarks")]
1748	fn set_block_number(block: Self::BlockNumber) {
1749		let mut validation_data = ValidationData::<T>::get().unwrap_or_else(||
1750			// PersistedValidationData does not impl default in non-std
1751			PersistedValidationData {
1752				parent_head: vec![].into(),
1753				relay_parent_number: Default::default(),
1754				max_pov_size: Default::default(),
1755				relay_parent_storage_root: Default::default(),
1756			});
1757		validation_data.relay_parent_number = block;
1758		ValidationData::<T>::put(validation_data)
1759	}
1760}
1761
1762impl<T: Config> RelaychainStateProvider for RelaychainDataProvider<T> {
1763	fn current_relay_chain_state() -> RelayChainState {
1764		ValidationData::<T>::get()
1765			.map(|d| RelayChainState {
1766				number: d.relay_parent_number,
1767				state_root: d.relay_parent_storage_root,
1768			})
1769			.unwrap_or_default()
1770	}
1771
1772	#[cfg(feature = "runtime-benchmarks")]
1773	fn set_current_relay_chain_state(state: RelayChainState) {
1774		let mut validation_data = ValidationData::<T>::get().unwrap_or_else(||
1775			// PersistedValidationData does not impl default in non-std
1776			PersistedValidationData {
1777				parent_head: vec![].into(),
1778				relay_parent_number: Default::default(),
1779				max_pov_size: Default::default(),
1780				relay_parent_storage_root: Default::default(),
1781			});
1782		validation_data.relay_parent_number = state.number;
1783		validation_data.relay_parent_storage_root = state.state_root;
1784		ValidationData::<T>::put(validation_data)
1785	}
1786}