cumulus_pallet_parachain_system/
lib.rs

1// Copyright (C) Parity Technologies (UK) Ltd.
2// This file is part of Cumulus.
3// SPDX-License-Identifier: Apache-2.0
4
5// Licensed under the Apache License, Version 2.0 (the "License");
6// you may not use this file except in compliance with the License.
7// You may obtain a copy of the License at
8//
9// 	http://www.apache.org/licenses/LICENSE-2.0
10//
11// Unless required by applicable law or agreed to in writing, software
12// distributed under the License is distributed on an "AS IS" BASIS,
13// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14// See the License for the specific language governing permissions and
15// limitations under the License.
16
17#![cfg_attr(not(feature = "std"), no_std)]
18
19//! `cumulus-pallet-parachain-system` is a base pallet for Cumulus-based parachains.
20//!
21//! This pallet handles low-level details of being a parachain. Its responsibilities include:
22//!
23//! - ingestion of the parachain validation data;
24//! - ingestion and dispatch of incoming downward and lateral messages;
25//! - coordinating upgrades with the Relay Chain; and
26//! - communication of parachain outputs, such as sent messages, signaling an upgrade, etc.
27//!
28//! Users must ensure that they register this pallet as an inherent provider.
29
30extern crate alloc;
31
32use alloc::{collections::btree_map::BTreeMap, vec, vec::Vec};
33use codec::{Decode, DecodeLimit, Encode};
34use core::{cmp, marker::PhantomData};
35use cumulus_primitives_core::{
36	relay_chain::{
37		self,
38		vstaging::{ClaimQueueOffset, CoreSelector, DEFAULT_CLAIM_QUEUE_OFFSET},
39	},
40	AbridgedHostConfiguration, ChannelInfo, ChannelStatus, CollationInfo, GetChannelInfo,
41	InboundDownwardMessage, InboundHrmpMessage, ListChannelInfos, MessageSendError,
42	OutboundHrmpMessage, ParaId, PersistedValidationData, UpwardMessage, UpwardMessageSender,
43	XcmpMessageHandler, XcmpMessageSource,
44};
45use cumulus_primitives_parachain_inherent::{MessageQueueChain, ParachainInherentData};
46use frame_support::{
47	defensive,
48	dispatch::DispatchResult,
49	ensure,
50	inherent::{InherentData, InherentIdentifier, ProvideInherent},
51	traits::{Get, HandleMessage},
52	weights::Weight,
53};
54use frame_system::{ensure_none, ensure_root, pallet_prelude::HeaderFor};
55use polkadot_parachain_primitives::primitives::RelayChainBlockNumber;
56use polkadot_runtime_parachains::FeeTracker;
57use scale_info::TypeInfo;
58use sp_core::U256;
59use sp_runtime::{
60	traits::{Block as BlockT, BlockNumberProvider, Hash, One},
61	BoundedSlice, FixedU128, RuntimeDebug, Saturating,
62};
63use xcm::{latest::XcmHash, VersionedLocation, VersionedXcm, MAX_XCM_DECODE_DEPTH};
64use xcm_builder::InspectMessageQueues;
65
66mod benchmarking;
67pub mod migration;
68mod mock;
69#[cfg(test)]
70mod tests;
71pub mod weights;
72
73pub use weights::WeightInfo;
74
75mod unincluded_segment;
76
77pub mod consensus_hook;
78pub mod relay_state_snapshot;
79#[macro_use]
80pub mod validate_block;
81
82use unincluded_segment::{
83	HrmpChannelUpdate, HrmpWatermarkUpdate, OutboundBandwidthLimits, SegmentTracker,
84};
85
86pub use consensus_hook::{ConsensusHook, ExpectParentIncluded};
87/// Register the `validate_block` function that is used by parachains to validate blocks on a
88/// validator.
89///
90/// Does *nothing* when `std` feature is enabled.
91///
92/// Expects as parameters the runtime, a block executor and an inherent checker.
93///
94/// # Example
95///
96/// ```
97///     struct BlockExecutor;
98///     struct Runtime;
99///     struct CheckInherents;
100///
101///     cumulus_pallet_parachain_system::register_validate_block! {
102///         Runtime = Runtime,
103///         BlockExecutor = Executive,
104///         CheckInherents = CheckInherents,
105///     }
106///
107/// # fn main() {}
108/// ```
109pub use cumulus_pallet_parachain_system_proc_macro::register_validate_block;
110pub use relay_state_snapshot::{MessagingStateSnapshot, RelayChainStateProof};
111pub use unincluded_segment::{Ancestor, UsedBandwidth};
112
113pub use pallet::*;
114
115/// Something that can check the associated relay block number.
116///
117/// Each Parachain block is built in the context of a relay chain block, this trait allows us
118/// to validate the given relay chain block number. With async backing it is legal to build
119/// multiple Parachain blocks per relay chain parent. With this trait it is possible for the
120/// Parachain to ensure that still only one Parachain block is build per relay chain parent.
121///
122/// By default [`RelayNumberStrictlyIncreases`] and [`AnyRelayNumber`] are provided.
123pub trait CheckAssociatedRelayNumber {
124	/// Check the current relay number versus the previous relay number.
125	///
126	/// The implementation should panic when there is something wrong.
127	fn check_associated_relay_number(
128		current: RelayChainBlockNumber,
129		previous: RelayChainBlockNumber,
130	);
131}
132
133/// Provides an implementation of [`CheckAssociatedRelayNumber`].
134///
135/// It will ensure that the associated relay block number strictly increases between Parachain
136/// blocks. This should be used by production Parachains when in doubt.
137pub struct RelayNumberStrictlyIncreases;
138
139impl CheckAssociatedRelayNumber for RelayNumberStrictlyIncreases {
140	fn check_associated_relay_number(
141		current: RelayChainBlockNumber,
142		previous: RelayChainBlockNumber,
143	) {
144		if current <= previous {
145			panic!("Relay chain block number needs to strictly increase between Parachain blocks!")
146		}
147	}
148}
149
150/// Provides an implementation of [`CheckAssociatedRelayNumber`].
151///
152/// This will accept any relay chain block number combination. This is mainly useful for
153/// test parachains.
154pub struct AnyRelayNumber;
155
156impl CheckAssociatedRelayNumber for AnyRelayNumber {
157	fn check_associated_relay_number(_: RelayChainBlockNumber, _: RelayChainBlockNumber) {}
158}
159
160/// Provides an implementation of [`CheckAssociatedRelayNumber`].
161///
162/// It will ensure that the associated relay block number monotonically increases between Parachain
163/// blocks. This should be used when asynchronous backing is enabled.
164pub struct RelayNumberMonotonicallyIncreases;
165
166impl CheckAssociatedRelayNumber for RelayNumberMonotonicallyIncreases {
167	fn check_associated_relay_number(
168		current: RelayChainBlockNumber,
169		previous: RelayChainBlockNumber,
170	) {
171		if current < previous {
172			panic!("Relay chain block number needs to monotonically increase between Parachain blocks!")
173		}
174	}
175}
176
177/// The max length of a DMP message.
178pub type MaxDmpMessageLenOf<T> = <<T as Config>::DmpQueue as HandleMessage>::MaxMessageLen;
179
180pub mod ump_constants {
181	use super::FixedU128;
182
183	/// `host_config.max_upward_queue_size / THRESHOLD_FACTOR` is the threshold after which delivery
184	/// starts getting exponentially more expensive.
185	/// `2` means the price starts to increase when queue is half full.
186	pub const THRESHOLD_FACTOR: u32 = 2;
187	/// The base number the delivery fee factor gets multiplied by every time it is increased.
188	/// Also the number it gets divided by when decreased.
189	pub const EXPONENTIAL_FEE_BASE: FixedU128 = FixedU128::from_rational(105, 100); // 1.05
190	/// The base number message size in KB is multiplied by before increasing the fee factor.
191	pub const MESSAGE_SIZE_FEE_BASE: FixedU128 = FixedU128::from_rational(1, 1000); // 0.001
192}
193
194/// Trait for selecting the next core to build the candidate for.
195pub trait SelectCore {
196	/// Core selector information for the current block.
197	fn selected_core() -> (CoreSelector, ClaimQueueOffset);
198	/// Core selector information for the next block.
199	fn select_next_core() -> (CoreSelector, ClaimQueueOffset);
200}
201
202/// The default core selection policy.
203pub struct DefaultCoreSelector<T>(PhantomData<T>);
204
205impl<T: frame_system::Config> SelectCore for DefaultCoreSelector<T> {
206	fn selected_core() -> (CoreSelector, ClaimQueueOffset) {
207		let core_selector: U256 = frame_system::Pallet::<T>::block_number().into();
208
209		(CoreSelector(core_selector.byte(0)), ClaimQueueOffset(DEFAULT_CLAIM_QUEUE_OFFSET))
210	}
211
212	fn select_next_core() -> (CoreSelector, ClaimQueueOffset) {
213		let core_selector: U256 = (frame_system::Pallet::<T>::block_number() + One::one()).into();
214
215		(CoreSelector(core_selector.byte(0)), ClaimQueueOffset(DEFAULT_CLAIM_QUEUE_OFFSET))
216	}
217}
218
219/// Core selection policy that builds on claim queue offset 1.
220pub struct LookaheadCoreSelector<T>(PhantomData<T>);
221
222impl<T: frame_system::Config> SelectCore for LookaheadCoreSelector<T> {
223	fn selected_core() -> (CoreSelector, ClaimQueueOffset) {
224		let core_selector: U256 = frame_system::Pallet::<T>::block_number().into();
225
226		(CoreSelector(core_selector.byte(0)), ClaimQueueOffset(1))
227	}
228
229	fn select_next_core() -> (CoreSelector, ClaimQueueOffset) {
230		let core_selector: U256 = (frame_system::Pallet::<T>::block_number() + One::one()).into();
231
232		(CoreSelector(core_selector.byte(0)), ClaimQueueOffset(1))
233	}
234}
235
236#[frame_support::pallet]
237pub mod pallet {
238	use super::*;
239	use frame_support::pallet_prelude::*;
240	use frame_system::pallet_prelude::*;
241
242	#[pallet::pallet]
243	#[pallet::storage_version(migration::STORAGE_VERSION)]
244	#[pallet::without_storage_info]
245	pub struct Pallet<T>(_);
246
247	#[pallet::config]
248	pub trait Config: frame_system::Config<OnSetCode = ParachainSetCode<Self>> {
249		/// The overarching event type.
250		type RuntimeEvent: From<Event<Self>> + IsType<<Self as frame_system::Config>::RuntimeEvent>;
251
252		/// Something which can be notified when the validation data is set.
253		type OnSystemEvent: OnSystemEvent;
254
255		/// Returns the parachain ID we are running with.
256		#[pallet::constant]
257		type SelfParaId: Get<ParaId>;
258
259		/// The place where outbound XCMP messages come from. This is queried in `finalize_block`.
260		type OutboundXcmpMessageSource: XcmpMessageSource;
261
262		/// Queues inbound downward messages for delayed processing.
263		///
264		/// All inbound DMP messages from the relay are pushed into this. The handler is expected to
265		/// eventually process all the messages that are pushed to it.
266		type DmpQueue: HandleMessage;
267
268		/// The weight we reserve at the beginning of the block for processing DMP messages.
269		type ReservedDmpWeight: Get<Weight>;
270
271		/// The message handler that will be invoked when messages are received via XCMP.
272		///
273		/// This should normally link to the XCMP Queue pallet.
274		type XcmpMessageHandler: XcmpMessageHandler;
275
276		/// The weight we reserve at the beginning of the block for processing XCMP messages.
277		type ReservedXcmpWeight: Get<Weight>;
278
279		/// Something that can check the associated relay parent block number.
280		type CheckAssociatedRelayNumber: CheckAssociatedRelayNumber;
281
282		/// Weight info for functions and calls.
283		type WeightInfo: WeightInfo;
284
285		/// An entry-point for higher-level logic to manage the backlog of unincluded parachain
286		/// blocks and authorship rights for those blocks.
287		///
288		/// Typically, this should be a hook tailored to the collator-selection/consensus mechanism
289		/// that is used for this chain.
290		///
291		/// However, to maintain the same behavior as prior to asynchronous backing, provide the
292		/// [`consensus_hook::ExpectParentIncluded`] here. This is only necessary in the case
293		/// that collators aren't expected to have node versions that supply the included block
294		/// in the relay-chain state proof.
295		type ConsensusHook: ConsensusHook;
296
297		/// Select core.
298		type SelectCore: SelectCore;
299	}
300
301	#[pallet::hooks]
302	impl<T: Config> Hooks<BlockNumberFor<T>> for Pallet<T> {
303		/// Handles actually sending upward messages by moving them from `PendingUpwardMessages` to
304		/// `UpwardMessages`. Decreases the delivery fee factor if after sending messages, the queue
305		/// total size is less than the threshold (see [`ump_constants::THRESHOLD_FACTOR`]).
306		/// Also does the sending for HRMP messages it takes from `OutboundXcmpMessageSource`.
307		fn on_finalize(_: BlockNumberFor<T>) {
308			<DidSetValidationCode<T>>::kill();
309			<UpgradeRestrictionSignal<T>>::kill();
310			let relay_upgrade_go_ahead = <UpgradeGoAhead<T>>::take();
311
312			let vfp = <ValidationData<T>>::get().expect(
313				r"Missing required set_validation_data inherent. This inherent must be
314				present in every block. This error typically occurs when the set_validation_data
315				execution failed and was rejected by the block builder. Check earlier log entries
316				for the specific cause of the failure.",
317			);
318
319			LastRelayChainBlockNumber::<T>::put(vfp.relay_parent_number);
320
321			let host_config = match HostConfiguration::<T>::get() {
322				Some(ok) => ok,
323				None => {
324					debug_assert!(
325						false,
326						"host configuration is promised to set until `on_finalize`; qed",
327					);
328					return
329				},
330			};
331
332			// Before updating the relevant messaging state, we need to extract
333			// the total bandwidth limits for the purpose of updating the unincluded
334			// segment.
335			let total_bandwidth_out = match RelevantMessagingState::<T>::get() {
336				Some(s) => OutboundBandwidthLimits::from_relay_chain_state(&s),
337				None => {
338					debug_assert!(
339						false,
340						"relevant messaging state is promised to be set until `on_finalize`; \
341							qed",
342					);
343					return
344				},
345			};
346
347			// After this point, the `RelevantMessagingState` in storage reflects the
348			// unincluded segment.
349			Self::adjust_egress_bandwidth_limits();
350
351			let (ump_msg_count, ump_total_bytes) = <PendingUpwardMessages<T>>::mutate(|up| {
352				let (available_capacity, available_size) = match RelevantMessagingState::<T>::get()
353				{
354					Some(limits) => (
355						limits.relay_dispatch_queue_remaining_capacity.remaining_count,
356						limits.relay_dispatch_queue_remaining_capacity.remaining_size,
357					),
358					None => {
359						debug_assert!(
360							false,
361							"relevant messaging state is promised to be set until `on_finalize`; \
362								qed",
363						);
364						return (0, 0)
365					},
366				};
367
368				let available_capacity =
369					cmp::min(available_capacity, host_config.max_upward_message_num_per_candidate);
370
371				// Count the number of messages we can possibly fit in the given constraints, i.e.
372				// available_capacity and available_size.
373				let (num, total_size) = up
374					.iter()
375					.scan((0u32, 0u32), |state, msg| {
376						let (cap_used, size_used) = *state;
377						let new_cap = cap_used.saturating_add(1);
378						let new_size = size_used.saturating_add(msg.len() as u32);
379						match available_capacity
380							.checked_sub(new_cap)
381							.and(available_size.checked_sub(new_size))
382						{
383							Some(_) => {
384								*state = (new_cap, new_size);
385								Some(*state)
386							},
387							_ => None,
388						}
389					})
390					.last()
391					.unwrap_or_default();
392
393				// TODO: #274 Return back messages that do not longer fit into the queue.
394
395				UpwardMessages::<T>::put(&up[..num as usize]);
396				*up = up.split_off(num as usize);
397
398				// Send the core selector UMP signal. This is experimental until relay chain
399				// validators are upgraded to handle ump signals.
400				#[cfg(feature = "experimental-ump-signals")]
401				Self::send_ump_signal();
402
403				// If the total size of the pending messages is less than the threshold,
404				// we decrease the fee factor, since the queue is less congested.
405				// This makes delivery of new messages cheaper.
406				let threshold = host_config
407					.max_upward_queue_size
408					.saturating_div(ump_constants::THRESHOLD_FACTOR);
409				let remaining_total_size: usize = up.iter().map(UpwardMessage::len).sum();
410				if remaining_total_size <= threshold as usize {
411					Self::decrease_fee_factor(());
412				}
413
414				(num, total_size)
415			});
416
417			// Sending HRMP messages is a little bit more involved. There are the following
418			// constraints:
419			//
420			// - a channel should exist (and it can be closed while a message is buffered),
421			// - at most one message can be sent in a channel,
422			// - the sent out messages should be ordered by ascension of recipient para id.
423			// - the capacity and total size of the channel is limited,
424			// - the maximum size of a message is limited (and can potentially be changed),
425
426			let maximum_channels = host_config
427				.hrmp_max_message_num_per_candidate
428				.min(<AnnouncedHrmpMessagesPerCandidate<T>>::take())
429				as usize;
430
431			// Note: this internally calls the `GetChannelInfo` implementation for this
432			// pallet, which draws on the `RelevantMessagingState`. That in turn has
433			// been adjusted above to reflect the correct limits in all channels.
434			let outbound_messages =
435				T::OutboundXcmpMessageSource::take_outbound_messages(maximum_channels)
436					.into_iter()
437					.map(|(recipient, data)| OutboundHrmpMessage { recipient, data })
438					.collect::<Vec<_>>();
439
440			// Update the unincluded segment length; capacity checks were done previously in
441			// `set_validation_data`, so this can be done unconditionally.
442			{
443				let hrmp_outgoing = outbound_messages
444					.iter()
445					.map(|msg| {
446						(
447							msg.recipient,
448							HrmpChannelUpdate { msg_count: 1, total_bytes: msg.data.len() as u32 },
449						)
450					})
451					.collect();
452				let used_bandwidth =
453					UsedBandwidth { ump_msg_count, ump_total_bytes, hrmp_outgoing };
454
455				let mut aggregated_segment =
456					AggregatedUnincludedSegment::<T>::get().unwrap_or_default();
457				let consumed_go_ahead_signal =
458					if aggregated_segment.consumed_go_ahead_signal().is_some() {
459						// Some ancestor within the segment already processed this signal --
460						// validated during inherent creation.
461						None
462					} else {
463						relay_upgrade_go_ahead
464					};
465				// The bandwidth constructed was ensured to satisfy relay chain constraints.
466				let ancestor = Ancestor::new_unchecked(used_bandwidth, consumed_go_ahead_signal);
467
468				let watermark = HrmpWatermark::<T>::get();
469				let watermark_update = HrmpWatermarkUpdate::new(watermark, vfp.relay_parent_number);
470
471				aggregated_segment
472					.append(&ancestor, watermark_update, &total_bandwidth_out)
473					.expect("unincluded segment limits exceeded");
474				AggregatedUnincludedSegment::<T>::put(aggregated_segment);
475				// Check in `on_initialize` guarantees there's space for this block.
476				UnincludedSegment::<T>::append(ancestor);
477			}
478			HrmpOutboundMessages::<T>::put(outbound_messages);
479		}
480
481		fn on_initialize(_n: BlockNumberFor<T>) -> Weight {
482			let mut weight = Weight::zero();
483
484			// To prevent removing `NewValidationCode` that was set by another `on_initialize`
485			// like for example from scheduler, we only kill the storage entry if it was not yet
486			// updated in the current block.
487			if !<DidSetValidationCode<T>>::get() {
488				NewValidationCode::<T>::kill();
489				weight += T::DbWeight::get().writes(1);
490			}
491
492			// The parent hash was unknown during block finalization. Update it here.
493			{
494				<UnincludedSegment<T>>::mutate(|chain| {
495					if let Some(ancestor) = chain.last_mut() {
496						let parent = frame_system::Pallet::<T>::parent_hash();
497						// Ancestor is the latest finalized block, thus current parent is
498						// its output head.
499						ancestor.replace_para_head_hash(parent);
500					}
501				});
502				weight += T::DbWeight::get().reads_writes(1, 1);
503
504				// Weight used during finalization.
505				weight += T::DbWeight::get().reads_writes(3, 2);
506			}
507
508			// Remove the validation from the old block.
509			ValidationData::<T>::kill();
510			ProcessedDownwardMessages::<T>::kill();
511			HrmpWatermark::<T>::kill();
512			UpwardMessages::<T>::kill();
513			HrmpOutboundMessages::<T>::kill();
514			CustomValidationHeadData::<T>::kill();
515
516			weight += T::DbWeight::get().writes(6);
517
518			// Here, in `on_initialize` we must report the weight for both `on_initialize` and
519			// `on_finalize`.
520			//
521			// One complication here, is that the `host_configuration` is updated by an inherent
522			// and those are processed after the block initialization phase. Therefore, we have to
523			// be content only with the configuration as per the previous block. That means that
524			// the configuration can be either stale (or be absent altogether in case of the
525			// beginning of the chain).
526			//
527			// In order to mitigate this, we do the following. At the time, we are only concerned
528			// about `hrmp_max_message_num_per_candidate`. We reserve the amount of weight to
529			// process the number of HRMP messages according to the potentially stale
530			// configuration. In `on_finalize` we will process only the maximum between the
531			// announced number of messages and the actual received in the fresh configuration.
532			//
533			// In the common case, they will be the same. In the case the actual value is smaller
534			// than the announced, we would waste some of weight. In the case the actual value is
535			// greater than the announced, we will miss opportunity to send a couple of messages.
536			weight += T::DbWeight::get().reads_writes(1, 1);
537			let hrmp_max_message_num_per_candidate = HostConfiguration::<T>::get()
538				.map(|cfg| cfg.hrmp_max_message_num_per_candidate)
539				.unwrap_or(0);
540			<AnnouncedHrmpMessagesPerCandidate<T>>::put(hrmp_max_message_num_per_candidate);
541
542			// NOTE that the actual weight consumed by `on_finalize` may turn out lower.
543			weight += T::DbWeight::get().reads_writes(
544				3 + hrmp_max_message_num_per_candidate as u64,
545				4 + hrmp_max_message_num_per_candidate as u64,
546			);
547
548			// Weight for updating the last relay chain block number in `on_finalize`.
549			weight += T::DbWeight::get().reads_writes(1, 1);
550
551			// Weight for adjusting the unincluded segment in `on_finalize`.
552			weight += T::DbWeight::get().reads_writes(6, 3);
553
554			// Always try to read `UpgradeGoAhead` in `on_finalize`.
555			weight += T::DbWeight::get().reads(1);
556
557			weight
558		}
559	}
560
561	#[pallet::call]
562	impl<T: Config> Pallet<T> {
563		/// Set the current validation data.
564		///
565		/// This should be invoked exactly once per block. It will panic at the finalization
566		/// phase if the call was not invoked.
567		///
568		/// The dispatch origin for this call must be `Inherent`
569		///
570		/// As a side effect, this function upgrades the current validation function
571		/// if the appropriate time has come.
572		#[pallet::call_index(0)]
573		#[pallet::weight((0, DispatchClass::Mandatory))]
574		// TODO: This weight should be corrected. Currently the weight is registered manually in the
575		// call with `register_extra_weight_unchecked`.
576		pub fn set_validation_data(
577			origin: OriginFor<T>,
578			data: ParachainInherentData,
579		) -> DispatchResult {
580			ensure_none(origin)?;
581			assert!(
582				!<ValidationData<T>>::exists(),
583				"ValidationData must be updated only once in a block",
584			);
585
586			// TODO: This is more than zero, but will need benchmarking to figure out what.
587			let mut total_weight = Weight::zero();
588
589			// NOTE: the inherent data is expected to be unique, even if this block is built
590			// in the context of the same relay parent as the previous one. In particular,
591			// the inherent shouldn't contain messages that were already processed by any of the
592			// ancestors.
593			//
594			// This invariant should be upheld by the `ProvideInherent` implementation.
595			let ParachainInherentData {
596				validation_data: vfp,
597				relay_chain_state,
598				downward_messages,
599				horizontal_messages,
600			} = data;
601
602			// Check that the associated relay chain block number is as expected.
603			T::CheckAssociatedRelayNumber::check_associated_relay_number(
604				vfp.relay_parent_number,
605				LastRelayChainBlockNumber::<T>::get(),
606			);
607
608			let relay_state_proof = RelayChainStateProof::new(
609				T::SelfParaId::get(),
610				vfp.relay_parent_storage_root,
611				relay_chain_state.clone(),
612			)
613			.expect("Invalid relay chain state proof");
614
615			// Update the desired maximum capacity according to the consensus hook.
616			let (consensus_hook_weight, capacity) =
617				T::ConsensusHook::on_state_proof(&relay_state_proof);
618			total_weight += consensus_hook_weight;
619			total_weight += Self::maybe_drop_included_ancestors(&relay_state_proof, capacity);
620			// Deposit a log indicating the relay-parent storage root.
621			// TODO: remove this in favor of the relay-parent's hash after
622			// https://github.com/paritytech/cumulus/issues/303
623			frame_system::Pallet::<T>::deposit_log(
624				cumulus_primitives_core::rpsr_digest::relay_parent_storage_root_item(
625					vfp.relay_parent_storage_root,
626					vfp.relay_parent_number,
627				),
628			);
629
630			// initialization logic: we know that this runs exactly once every block,
631			// which means we can put the initialization logic here to remove the
632			// sequencing problem.
633			let upgrade_go_ahead_signal = relay_state_proof
634				.read_upgrade_go_ahead_signal()
635				.expect("Invalid upgrade go ahead signal");
636
637			let upgrade_signal_in_segment = AggregatedUnincludedSegment::<T>::get()
638				.as_ref()
639				.and_then(SegmentTracker::consumed_go_ahead_signal);
640			if let Some(signal_in_segment) = upgrade_signal_in_segment.as_ref() {
641				// Unincluded ancestor consuming upgrade signal is still within the segment,
642				// sanity check that it matches with the signal from relay chain.
643				assert_eq!(upgrade_go_ahead_signal, Some(*signal_in_segment));
644			}
645			match upgrade_go_ahead_signal {
646				Some(_signal) if upgrade_signal_in_segment.is_some() => {
647					// Do nothing, processing logic was executed by unincluded ancestor.
648				},
649				Some(relay_chain::UpgradeGoAhead::GoAhead) => {
650					assert!(
651						<PendingValidationCode<T>>::exists(),
652						"No new validation function found in storage, GoAhead signal is not expected",
653					);
654					let validation_code = <PendingValidationCode<T>>::take();
655
656					frame_system::Pallet::<T>::update_code_in_storage(&validation_code);
657					<T::OnSystemEvent as OnSystemEvent>::on_validation_code_applied();
658					Self::deposit_event(Event::ValidationFunctionApplied {
659						relay_chain_block_num: vfp.relay_parent_number,
660					});
661				},
662				Some(relay_chain::UpgradeGoAhead::Abort) => {
663					<PendingValidationCode<T>>::kill();
664					Self::deposit_event(Event::ValidationFunctionDiscarded);
665				},
666				None => {},
667			}
668			<UpgradeRestrictionSignal<T>>::put(
669				relay_state_proof
670					.read_upgrade_restriction_signal()
671					.expect("Invalid upgrade restriction signal"),
672			);
673			<UpgradeGoAhead<T>>::put(upgrade_go_ahead_signal);
674
675			let host_config = relay_state_proof
676				.read_abridged_host_configuration()
677				.expect("Invalid host configuration in relay chain state proof");
678
679			let relevant_messaging_state = relay_state_proof
680				.read_messaging_state_snapshot(&host_config)
681				.expect("Invalid messaging state in relay chain state proof");
682
683			<ValidationData<T>>::put(&vfp);
684			<RelayStateProof<T>>::put(relay_chain_state);
685			<RelevantMessagingState<T>>::put(relevant_messaging_state.clone());
686			<HostConfiguration<T>>::put(host_config);
687
688			<T::OnSystemEvent as OnSystemEvent>::on_validation_data(&vfp);
689
690			total_weight.saturating_accrue(Self::enqueue_inbound_downward_messages(
691				relevant_messaging_state.dmq_mqc_head,
692				downward_messages,
693			));
694			total_weight.saturating_accrue(Self::enqueue_inbound_horizontal_messages(
695				&relevant_messaging_state.ingress_channels,
696				horizontal_messages,
697				vfp.relay_parent_number,
698			));
699
700			frame_system::Pallet::<T>::register_extra_weight_unchecked(
701				total_weight,
702				DispatchClass::Mandatory,
703			);
704
705			Ok(())
706		}
707
708		#[pallet::call_index(1)]
709		#[pallet::weight((1_000, DispatchClass::Operational))]
710		pub fn sudo_send_upward_message(
711			origin: OriginFor<T>,
712			message: UpwardMessage,
713		) -> DispatchResult {
714			ensure_root(origin)?;
715			let _ = Self::send_upward_message(message);
716			Ok(())
717		}
718
719		// WARNING: call indices 2 and 3 were used in a former version of this pallet. Using them
720		// again will require to bump the transaction version of runtimes using this pallet.
721	}
722
723	#[pallet::event]
724	#[pallet::generate_deposit(pub(super) fn deposit_event)]
725	pub enum Event<T: Config> {
726		/// The validation function has been scheduled to apply.
727		ValidationFunctionStored,
728		/// The validation function was applied as of the contained relay chain block number.
729		ValidationFunctionApplied { relay_chain_block_num: RelayChainBlockNumber },
730		/// The relay-chain aborted the upgrade process.
731		ValidationFunctionDiscarded,
732		/// Some downward messages have been received and will be processed.
733		DownwardMessagesReceived { count: u32 },
734		/// Downward messages were processed using the given weight.
735		DownwardMessagesProcessed { weight_used: Weight, dmq_head: relay_chain::Hash },
736		/// An upward message was sent to the relay chain.
737		UpwardMessageSent { message_hash: Option<XcmHash> },
738	}
739
740	#[pallet::error]
741	pub enum Error<T> {
742		/// Attempt to upgrade validation function while existing upgrade pending.
743		OverlappingUpgrades,
744		/// Polkadot currently prohibits this parachain from upgrading its validation function.
745		ProhibitedByPolkadot,
746		/// The supplied validation function has compiled into a blob larger than Polkadot is
747		/// willing to run.
748		TooBig,
749		/// The inherent which supplies the validation data did not run this block.
750		ValidationDataNotAvailable,
751		/// The inherent which supplies the host configuration did not run this block.
752		HostConfigurationNotAvailable,
753		/// No validation function upgrade is currently scheduled.
754		NotScheduled,
755	}
756
757	/// Latest included block descendants the runtime accepted. In other words, these are
758	/// ancestors of the currently executing block which have not been included in the observed
759	/// relay-chain state.
760	///
761	/// The segment length is limited by the capacity returned from the [`ConsensusHook`] configured
762	/// in the pallet.
763	#[pallet::storage]
764	pub type UnincludedSegment<T: Config> = StorageValue<_, Vec<Ancestor<T::Hash>>, ValueQuery>;
765
766	/// Storage field that keeps track of bandwidth used by the unincluded segment along with the
767	/// latest HRMP watermark. Used for limiting the acceptance of new blocks with
768	/// respect to relay chain constraints.
769	#[pallet::storage]
770	pub type AggregatedUnincludedSegment<T: Config> =
771		StorageValue<_, SegmentTracker<T::Hash>, OptionQuery>;
772
773	/// In case of a scheduled upgrade, this storage field contains the validation code to be
774	/// applied.
775	///
776	/// As soon as the relay chain gives us the go-ahead signal, we will overwrite the
777	/// [`:code`][sp_core::storage::well_known_keys::CODE] which will result the next block process
778	/// with the new validation code. This concludes the upgrade process.
779	#[pallet::storage]
780	pub type PendingValidationCode<T: Config> = StorageValue<_, Vec<u8>, ValueQuery>;
781
782	/// Validation code that is set by the parachain and is to be communicated to collator and
783	/// consequently the relay-chain.
784	///
785	/// This will be cleared in `on_initialize` of each new block if no other pallet already set
786	/// the value.
787	#[pallet::storage]
788	pub type NewValidationCode<T: Config> = StorageValue<_, Vec<u8>, OptionQuery>;
789
790	/// The [`PersistedValidationData`] set for this block.
791	/// This value is expected to be set only once per block and it's never stored
792	/// in the trie.
793	#[pallet::storage]
794	pub type ValidationData<T: Config> = StorageValue<_, PersistedValidationData>;
795
796	/// Were the validation data set to notify the relay chain?
797	#[pallet::storage]
798	pub type DidSetValidationCode<T: Config> = StorageValue<_, bool, ValueQuery>;
799
800	/// The relay chain block number associated with the last parachain block.
801	///
802	/// This is updated in `on_finalize`.
803	#[pallet::storage]
804	pub type LastRelayChainBlockNumber<T: Config> =
805		StorageValue<_, RelayChainBlockNumber, ValueQuery>;
806
807	/// An option which indicates if the relay-chain restricts signalling a validation code upgrade.
808	/// In other words, if this is `Some` and [`NewValidationCode`] is `Some` then the produced
809	/// candidate will be invalid.
810	///
811	/// This storage item is a mirror of the corresponding value for the current parachain from the
812	/// relay-chain. This value is ephemeral which means it doesn't hit the storage. This value is
813	/// set after the inherent.
814	#[pallet::storage]
815	pub type UpgradeRestrictionSignal<T: Config> =
816		StorageValue<_, Option<relay_chain::UpgradeRestriction>, ValueQuery>;
817
818	/// Optional upgrade go-ahead signal from the relay-chain.
819	///
820	/// This storage item is a mirror of the corresponding value for the current parachain from the
821	/// relay-chain. This value is ephemeral which means it doesn't hit the storage. This value is
822	/// set after the inherent.
823	#[pallet::storage]
824	pub type UpgradeGoAhead<T: Config> =
825		StorageValue<_, Option<relay_chain::UpgradeGoAhead>, ValueQuery>;
826
827	/// The state proof for the last relay parent block.
828	///
829	/// This field is meant to be updated each block with the validation data inherent. Therefore,
830	/// before processing of the inherent, e.g. in `on_initialize` this data may be stale.
831	///
832	/// This data is also absent from the genesis.
833	#[pallet::storage]
834	pub type RelayStateProof<T: Config> = StorageValue<_, sp_trie::StorageProof>;
835
836	/// The snapshot of some state related to messaging relevant to the current parachain as per
837	/// the relay parent.
838	///
839	/// This field is meant to be updated each block with the validation data inherent. Therefore,
840	/// before processing of the inherent, e.g. in `on_initialize` this data may be stale.
841	///
842	/// This data is also absent from the genesis.
843	#[pallet::storage]
844	pub type RelevantMessagingState<T: Config> = StorageValue<_, MessagingStateSnapshot>;
845
846	/// The parachain host configuration that was obtained from the relay parent.
847	///
848	/// This field is meant to be updated each block with the validation data inherent. Therefore,
849	/// before processing of the inherent, e.g. in `on_initialize` this data may be stale.
850	///
851	/// This data is also absent from the genesis.
852	#[pallet::storage]
853	#[pallet::disable_try_decode_storage]
854	pub type HostConfiguration<T: Config> = StorageValue<_, AbridgedHostConfiguration>;
855
856	/// The last downward message queue chain head we have observed.
857	///
858	/// This value is loaded before and saved after processing inbound downward messages carried
859	/// by the system inherent.
860	#[pallet::storage]
861	pub type LastDmqMqcHead<T: Config> = StorageValue<_, MessageQueueChain, ValueQuery>;
862
863	/// The message queue chain heads we have observed per each channel incoming channel.
864	///
865	/// This value is loaded before and saved after processing inbound downward messages carried
866	/// by the system inherent.
867	#[pallet::storage]
868	pub type LastHrmpMqcHeads<T: Config> =
869		StorageValue<_, BTreeMap<ParaId, MessageQueueChain>, ValueQuery>;
870
871	/// Number of downward messages processed in a block.
872	///
873	/// This will be cleared in `on_initialize` of each new block.
874	#[pallet::storage]
875	pub type ProcessedDownwardMessages<T: Config> = StorageValue<_, u32, ValueQuery>;
876
877	/// HRMP watermark that was set in a block.
878	///
879	/// This will be cleared in `on_initialize` of each new block.
880	#[pallet::storage]
881	pub type HrmpWatermark<T: Config> = StorageValue<_, relay_chain::BlockNumber, ValueQuery>;
882
883	/// HRMP messages that were sent in a block.
884	///
885	/// This will be cleared in `on_initialize` of each new block.
886	#[pallet::storage]
887	pub type HrmpOutboundMessages<T: Config> =
888		StorageValue<_, Vec<OutboundHrmpMessage>, ValueQuery>;
889
890	/// Upward messages that were sent in a block.
891	///
892	/// This will be cleared in `on_initialize` of each new block.
893	#[pallet::storage]
894	pub type UpwardMessages<T: Config> = StorageValue<_, Vec<UpwardMessage>, ValueQuery>;
895
896	/// Upward messages that are still pending and not yet send to the relay chain.
897	#[pallet::storage]
898	pub type PendingUpwardMessages<T: Config> = StorageValue<_, Vec<UpwardMessage>, ValueQuery>;
899
900	/// Initialization value for the delivery fee factor for UMP.
901	#[pallet::type_value]
902	pub fn UpwardInitialDeliveryFeeFactor() -> FixedU128 {
903		FixedU128::from_u32(1)
904	}
905
906	/// The factor to multiply the base delivery fee by for UMP.
907	#[pallet::storage]
908	pub type UpwardDeliveryFeeFactor<T: Config> =
909		StorageValue<_, FixedU128, ValueQuery, UpwardInitialDeliveryFeeFactor>;
910
911	/// The number of HRMP messages we observed in `on_initialize` and thus used that number for
912	/// announcing the weight of `on_initialize` and `on_finalize`.
913	#[pallet::storage]
914	pub type AnnouncedHrmpMessagesPerCandidate<T: Config> = StorageValue<_, u32, ValueQuery>;
915
916	/// The weight we reserve at the beginning of the block for processing XCMP messages. This
917	/// overrides the amount set in the Config trait.
918	#[pallet::storage]
919	pub type ReservedXcmpWeightOverride<T: Config> = StorageValue<_, Weight>;
920
921	/// The weight we reserve at the beginning of the block for processing DMP messages. This
922	/// overrides the amount set in the Config trait.
923	#[pallet::storage]
924	pub type ReservedDmpWeightOverride<T: Config> = StorageValue<_, Weight>;
925
926	/// A custom head data that should be returned as result of `validate_block`.
927	///
928	/// See `Pallet::set_custom_validation_head_data` for more information.
929	#[pallet::storage]
930	pub type CustomValidationHeadData<T: Config> = StorageValue<_, Vec<u8>, OptionQuery>;
931
932	#[pallet::inherent]
933	impl<T: Config> ProvideInherent for Pallet<T> {
934		type Call = Call<T>;
935		type Error = sp_inherents::MakeFatalError<()>;
936		const INHERENT_IDENTIFIER: InherentIdentifier =
937			cumulus_primitives_parachain_inherent::INHERENT_IDENTIFIER;
938
939		fn create_inherent(data: &InherentData) -> Option<Self::Call> {
940			let mut data: ParachainInherentData =
941				data.get_data(&Self::INHERENT_IDENTIFIER).ok().flatten().expect(
942					"validation function params are always injected into inherent data; qed",
943				);
944
945			Self::drop_processed_messages_from_inherent(&mut data);
946
947			Some(Call::set_validation_data { data })
948		}
949
950		fn is_inherent(call: &Self::Call) -> bool {
951			matches!(call, Call::set_validation_data { .. })
952		}
953	}
954
955	#[pallet::genesis_config]
956	#[derive(frame_support::DefaultNoBound)]
957	pub struct GenesisConfig<T: Config> {
958		#[serde(skip)]
959		pub _config: core::marker::PhantomData<T>,
960	}
961
962	#[pallet::genesis_build]
963	impl<T: Config> BuildGenesisConfig for GenesisConfig<T> {
964		fn build(&self) {
965			// TODO: Remove after https://github.com/paritytech/cumulus/issues/479
966			sp_io::storage::set(b":c", &[]);
967		}
968	}
969}
970
971impl<T: Config> Pallet<T> {
972	/// Get the unincluded segment size after the given hash.
973	///
974	/// If the unincluded segment doesn't contain the given hash, this returns the
975	/// length of the entire unincluded segment.
976	///
977	/// This is intended to be used for determining how long the unincluded segment _would be_
978	/// in runtime APIs related to authoring.
979	pub fn unincluded_segment_size_after(included_hash: T::Hash) -> u32 {
980		let segment = UnincludedSegment::<T>::get();
981		crate::unincluded_segment::size_after_included(included_hash, &segment)
982	}
983}
984
985impl<T: Config> FeeTracker for Pallet<T> {
986	type Id = ();
987
988	fn get_fee_factor(_: Self::Id) -> FixedU128 {
989		UpwardDeliveryFeeFactor::<T>::get()
990	}
991
992	fn increase_fee_factor(_: Self::Id, message_size_factor: FixedU128) -> FixedU128 {
993		<UpwardDeliveryFeeFactor<T>>::mutate(|f| {
994			*f = f.saturating_mul(
995				ump_constants::EXPONENTIAL_FEE_BASE.saturating_add(message_size_factor),
996			);
997			*f
998		})
999	}
1000
1001	fn decrease_fee_factor(_: Self::Id) -> FixedU128 {
1002		<UpwardDeliveryFeeFactor<T>>::mutate(|f| {
1003			*f =
1004				UpwardInitialDeliveryFeeFactor::get().max(*f / ump_constants::EXPONENTIAL_FEE_BASE);
1005			*f
1006		})
1007	}
1008}
1009
1010impl<T: Config> ListChannelInfos for Pallet<T> {
1011	fn outgoing_channels() -> Vec<ParaId> {
1012		let Some(state) = RelevantMessagingState::<T>::get() else { return Vec::new() };
1013		state.egress_channels.into_iter().map(|(id, _)| id).collect()
1014	}
1015}
1016
1017impl<T: Config> GetChannelInfo for Pallet<T> {
1018	fn get_channel_status(id: ParaId) -> ChannelStatus {
1019		// Note, that we are using `relevant_messaging_state` which may be from the previous
1020		// block, in case this is called from `on_initialize`, i.e. before the inherent with
1021		// fresh data is submitted.
1022		//
1023		// That shouldn't be a problem though because this is anticipated and already can
1024		// happen. This is because sending implies that a message is buffered until there is
1025		// space to send a message in the candidate. After a while waiting in a buffer, it may
1026		// be discovered that the channel to which a message were addressed is now closed.
1027		// Another possibility, is that the maximum message size was decreased so that a
1028		// message in the buffer doesn't fit. Should any of that happen the sender should be
1029		// notified about the message was discarded.
1030		//
1031		// Here it a similar case, with the difference that the realization that the channel is
1032		// closed came the same block.
1033		let channels = match RelevantMessagingState::<T>::get() {
1034			None => {
1035				log::warn!("calling `get_channel_status` with no RelevantMessagingState?!");
1036				return ChannelStatus::Closed
1037			},
1038			Some(d) => d.egress_channels,
1039		};
1040		// ^^^ NOTE: This storage field should carry over from the previous block. So if it's
1041		// None then it must be that this is an edge-case where a message is attempted to be
1042		// sent at the first block. It should be safe to assume that there are no channels
1043		// opened at all so early. At least, relying on this assumption seems to be a better
1044		// trade-off, compared to introducing an error variant that the clients should be
1045		// prepared to handle.
1046		let index = match channels.binary_search_by_key(&id, |item| item.0) {
1047			Err(_) => return ChannelStatus::Closed,
1048			Ok(i) => i,
1049		};
1050		let meta = &channels[index].1;
1051		if meta.msg_count + 1 > meta.max_capacity {
1052			// The channel is at its capacity. Skip it for now.
1053			return ChannelStatus::Full
1054		}
1055		let max_size_now = meta.max_total_size - meta.total_size;
1056		let max_size_ever = meta.max_message_size;
1057		ChannelStatus::Ready(max_size_now as usize, max_size_ever as usize)
1058	}
1059
1060	fn get_channel_info(id: ParaId) -> Option<ChannelInfo> {
1061		let channels = RelevantMessagingState::<T>::get()?.egress_channels;
1062		let index = channels.binary_search_by_key(&id, |item| item.0).ok()?;
1063		let info = ChannelInfo {
1064			max_capacity: channels[index].1.max_capacity,
1065			max_total_size: channels[index].1.max_total_size,
1066			max_message_size: channels[index].1.max_message_size,
1067			msg_count: channels[index].1.msg_count,
1068			total_size: channels[index].1.total_size,
1069		};
1070		Some(info)
1071	}
1072}
1073
1074impl<T: Config> Pallet<T> {
1075	/// Updates inherent data to only contain messages that weren't already processed
1076	/// by the runtime based on last relay chain block number.
1077	///
1078	/// This method doesn't check for mqc heads mismatch.
1079	fn drop_processed_messages_from_inherent(para_inherent: &mut ParachainInherentData) {
1080		let ParachainInherentData { downward_messages, horizontal_messages, .. } = para_inherent;
1081
1082		// Last relay chain block number. Any message with sent-at block number less
1083		// than or equal to this value is assumed to be processed previously.
1084		let last_relay_block_number = LastRelayChainBlockNumber::<T>::get();
1085
1086		// DMQ.
1087		let dmq_processed_num = downward_messages
1088			.iter()
1089			.take_while(|message| message.sent_at <= last_relay_block_number)
1090			.count();
1091		downward_messages.drain(..dmq_processed_num);
1092
1093		// HRMP.
1094		for horizontal in horizontal_messages.values_mut() {
1095			let horizontal_processed_num = horizontal
1096				.iter()
1097				.take_while(|message| message.sent_at <= last_relay_block_number)
1098				.count();
1099			horizontal.drain(..horizontal_processed_num);
1100		}
1101
1102		// If MQC doesn't match after dropping messages, the runtime will panic when creating
1103		// inherent.
1104	}
1105
1106	/// Enqueue all inbound downward messages relayed by the collator into the MQ pallet.
1107	///
1108	/// Checks if the sequence of the messages is valid, dispatches them and communicates the
1109	/// number of processed messages to the collator via a storage update.
1110	///
1111	/// # Panics
1112	///
1113	/// If it turns out that after processing all messages the Message Queue Chain
1114	/// hash doesn't match the expected.
1115	fn enqueue_inbound_downward_messages(
1116		expected_dmq_mqc_head: relay_chain::Hash,
1117		downward_messages: Vec<InboundDownwardMessage>,
1118	) -> Weight {
1119		let dm_count = downward_messages.len() as u32;
1120		let mut dmq_head = <LastDmqMqcHead<T>>::get();
1121
1122		let weight_used = T::WeightInfo::enqueue_inbound_downward_messages(dm_count);
1123		if dm_count != 0 {
1124			Self::deposit_event(Event::DownwardMessagesReceived { count: dm_count });
1125
1126			// Eagerly update the MQC head hash:
1127			for m in &downward_messages {
1128				dmq_head.extend_downward(m);
1129			}
1130			let bounded = downward_messages
1131				.iter()
1132				// Note: we are not using `.defensive()` here since that prints the whole value to
1133				// console. In case that the message is too long, this clogs up the log quite badly.
1134				.filter_map(|m| match BoundedSlice::try_from(&m.msg[..]) {
1135					Ok(bounded) => Some(bounded),
1136					Err(_) => {
1137						defensive!("Inbound Downward message was too long; dropping");
1138						None
1139					},
1140				});
1141			T::DmpQueue::handle_messages(bounded);
1142			<LastDmqMqcHead<T>>::put(&dmq_head);
1143
1144			Self::deposit_event(Event::DownwardMessagesProcessed {
1145				weight_used,
1146				dmq_head: dmq_head.head(),
1147			});
1148		}
1149
1150		// After hashing each message in the message queue chain submitted by the collator, we
1151		// should arrive to the MQC head provided by the relay chain.
1152		//
1153		// A mismatch means that at least some of the submitted messages were altered, omitted or
1154		// added improperly.
1155		assert_eq!(dmq_head.head(), expected_dmq_mqc_head);
1156
1157		ProcessedDownwardMessages::<T>::put(dm_count);
1158
1159		weight_used
1160	}
1161
1162	/// Process all inbound horizontal messages relayed by the collator.
1163	///
1164	/// This is similar to [`enqueue_inbound_downward_messages`], but works with multiple inbound
1165	/// channels. It immediately dispatches signals and queues all other XCMs. Blob messages are
1166	/// ignored.
1167	///
1168	/// **Panics** if either any of horizontal messages submitted by the collator was sent from
1169	///            a para which has no open channel to this parachain or if after processing
1170	///            messages across all inbound channels MQCs were obtained which do not
1171	///            correspond to the ones found on the relay-chain.
1172	fn enqueue_inbound_horizontal_messages(
1173		ingress_channels: &[(ParaId, cumulus_primitives_core::AbridgedHrmpChannel)],
1174		horizontal_messages: BTreeMap<ParaId, Vec<InboundHrmpMessage>>,
1175		relay_parent_number: relay_chain::BlockNumber,
1176	) -> Weight {
1177		// First, check that all submitted messages are sent from channels that exist. The
1178		// channel exists if its MQC head is present in `vfp.hrmp_mqc_heads`.
1179		for sender in horizontal_messages.keys() {
1180			// A violation of the assertion below indicates that one of the messages submitted
1181			// by the collator was sent from a sender that doesn't have a channel opened to
1182			// this parachain, according to the relay-parent state.
1183			assert!(ingress_channels.binary_search_by_key(sender, |&(s, _)| s).is_ok(),);
1184		}
1185
1186		// Second, prepare horizontal messages for a more convenient processing:
1187		//
1188		// instead of a mapping from a para to a list of inbound HRMP messages, we will have a
1189		// list of tuples `(sender, message)` first ordered by `sent_at` (the relay chain block
1190		// number in which the message hit the relay-chain) and second ordered by para id
1191		// ascending.
1192		//
1193		// The messages will be dispatched in this order.
1194		let mut horizontal_messages = horizontal_messages
1195			.into_iter()
1196			.flat_map(|(sender, channel_contents)| {
1197				channel_contents.into_iter().map(move |message| (sender, message))
1198			})
1199			.collect::<Vec<_>>();
1200		horizontal_messages.sort_by(|a, b| {
1201			// first sort by sent-at and then by the para id
1202			match a.1.sent_at.cmp(&b.1.sent_at) {
1203				cmp::Ordering::Equal => a.0.cmp(&b.0),
1204				ord => ord,
1205			}
1206		});
1207
1208		let last_mqc_heads = <LastHrmpMqcHeads<T>>::get();
1209		let mut running_mqc_heads = BTreeMap::new();
1210		let mut hrmp_watermark = None;
1211
1212		{
1213			for (sender, ref horizontal_message) in &horizontal_messages {
1214				if hrmp_watermark.map(|w| w < horizontal_message.sent_at).unwrap_or(true) {
1215					hrmp_watermark = Some(horizontal_message.sent_at);
1216				}
1217
1218				running_mqc_heads
1219					.entry(sender)
1220					.or_insert_with(|| last_mqc_heads.get(sender).cloned().unwrap_or_default())
1221					.extend_hrmp(horizontal_message);
1222			}
1223		}
1224		let message_iter = horizontal_messages
1225			.iter()
1226			.map(|&(sender, ref message)| (sender, message.sent_at, &message.data[..]));
1227
1228		let max_weight =
1229			<ReservedXcmpWeightOverride<T>>::get().unwrap_or_else(T::ReservedXcmpWeight::get);
1230		let weight_used = T::XcmpMessageHandler::handle_xcmp_messages(message_iter, max_weight);
1231
1232		// Check that the MQC heads for each channel provided by the relay chain match the MQC
1233		// heads we have after processing all incoming messages.
1234		//
1235		// Along the way we also carry over the relevant entries from the `last_mqc_heads` to
1236		// `running_mqc_heads`. Otherwise, in a block where no messages were sent in a channel
1237		// it won't get into next block's `last_mqc_heads` and thus will be all zeros, which
1238		// would corrupt the message queue chain.
1239		for (sender, channel) in ingress_channels {
1240			let cur_head = running_mqc_heads
1241				.entry(sender)
1242				.or_insert_with(|| last_mqc_heads.get(sender).cloned().unwrap_or_default())
1243				.head();
1244			let target_head = channel.mqc_head.unwrap_or_default();
1245
1246			assert!(cur_head == target_head);
1247		}
1248
1249		<LastHrmpMqcHeads<T>>::put(running_mqc_heads);
1250
1251		// If we processed at least one message, then advance watermark to that location or if there
1252		// were no messages, set it to the block number of the relay parent.
1253		HrmpWatermark::<T>::put(hrmp_watermark.unwrap_or(relay_parent_number));
1254
1255		weight_used
1256	}
1257
1258	/// Drop blocks from the unincluded segment with respect to the latest parachain head.
1259	fn maybe_drop_included_ancestors(
1260		relay_state_proof: &RelayChainStateProof,
1261		capacity: consensus_hook::UnincludedSegmentCapacity,
1262	) -> Weight {
1263		let mut weight_used = Weight::zero();
1264		// If the unincluded segment length is nonzero, then the parachain head must be present.
1265		let para_head =
1266			relay_state_proof.read_included_para_head().ok().map(|h| T::Hashing::hash(&h.0));
1267
1268		let unincluded_segment_len = <UnincludedSegment<T>>::decode_len().unwrap_or(0);
1269		weight_used += T::DbWeight::get().reads(1);
1270
1271		// Clean up unincluded segment if nonempty.
1272		let included_head = match (para_head, capacity.is_expecting_included_parent()) {
1273			(Some(h), true) => {
1274				assert_eq!(
1275					h,
1276					frame_system::Pallet::<T>::parent_hash(),
1277					"expected parent to be included"
1278				);
1279
1280				h
1281			},
1282			(Some(h), false) => h,
1283			(None, true) => {
1284				// All this logic is essentially a workaround to support collators which
1285				// might still not provide the included block with the state proof.
1286				frame_system::Pallet::<T>::parent_hash()
1287			},
1288			(None, false) => panic!("included head not present in relay storage proof"),
1289		};
1290
1291		let new_len = {
1292			let para_head_hash = included_head;
1293			let dropped: Vec<Ancestor<T::Hash>> = <UnincludedSegment<T>>::mutate(|chain| {
1294				// Drop everything up to (inclusive) the block with an included para head, if
1295				// present.
1296				let idx = chain
1297					.iter()
1298					.position(|block| {
1299						let head_hash = block
1300							.para_head_hash()
1301							.expect("para head hash is updated during block initialization; qed");
1302						head_hash == &para_head_hash
1303					})
1304					.map_or(0, |idx| idx + 1); // inclusive.
1305
1306				chain.drain(..idx).collect()
1307			});
1308			weight_used += T::DbWeight::get().reads_writes(1, 1);
1309
1310			let new_len = unincluded_segment_len - dropped.len();
1311			if !dropped.is_empty() {
1312				<AggregatedUnincludedSegment<T>>::mutate(|agg| {
1313					let agg = agg.as_mut().expect(
1314						"dropped part of the segment wasn't empty, hence value exists; qed",
1315					);
1316					for block in dropped {
1317						agg.subtract(&block);
1318					}
1319				});
1320				weight_used += T::DbWeight::get().reads_writes(1, 1);
1321			}
1322
1323			new_len as u32
1324		};
1325
1326		// Current block validity check: ensure there is space in the unincluded segment.
1327		//
1328		// If this fails, the parachain needs to wait for ancestors to be included before
1329		// a new block is allowed.
1330		assert!(new_len < capacity.get(), "no space left for the block in the unincluded segment");
1331		weight_used
1332	}
1333
1334	/// This adjusts the `RelevantMessagingState` according to the bandwidth limits in the
1335	/// unincluded segment.
1336	//
1337	// Reads: 2
1338	// Writes: 1
1339	fn adjust_egress_bandwidth_limits() {
1340		let unincluded_segment = match AggregatedUnincludedSegment::<T>::get() {
1341			None => return,
1342			Some(s) => s,
1343		};
1344
1345		<RelevantMessagingState<T>>::mutate(|messaging_state| {
1346			let messaging_state = match messaging_state {
1347				None => return,
1348				Some(s) => s,
1349			};
1350
1351			let used_bandwidth = unincluded_segment.used_bandwidth();
1352
1353			let channels = &mut messaging_state.egress_channels;
1354			for (para_id, used) in used_bandwidth.hrmp_outgoing.iter() {
1355				let i = match channels.binary_search_by_key(para_id, |item| item.0) {
1356					Ok(i) => i,
1357					Err(_) => continue, // indicates channel closed.
1358				};
1359
1360				let c = &mut channels[i].1;
1361
1362				c.total_size = (c.total_size + used.total_bytes).min(c.max_total_size);
1363				c.msg_count = (c.msg_count + used.msg_count).min(c.max_capacity);
1364			}
1365
1366			let upward_capacity = &mut messaging_state.relay_dispatch_queue_remaining_capacity;
1367			upward_capacity.remaining_count =
1368				upward_capacity.remaining_count.saturating_sub(used_bandwidth.ump_msg_count);
1369			upward_capacity.remaining_size =
1370				upward_capacity.remaining_size.saturating_sub(used_bandwidth.ump_total_bytes);
1371		});
1372	}
1373
1374	/// Put a new validation function into a particular location where polkadot
1375	/// monitors for updates. Calling this function notifies polkadot that a new
1376	/// upgrade has been scheduled.
1377	fn notify_polkadot_of_pending_upgrade(code: &[u8]) {
1378		NewValidationCode::<T>::put(code);
1379		<DidSetValidationCode<T>>::put(true);
1380	}
1381
1382	/// The maximum code size permitted, in bytes.
1383	///
1384	/// Returns `None` if the relay chain parachain host configuration hasn't been submitted yet.
1385	pub fn max_code_size() -> Option<u32> {
1386		<HostConfiguration<T>>::get().map(|cfg| cfg.max_code_size)
1387	}
1388
1389	/// The implementation of the runtime upgrade functionality for parachains.
1390	pub fn schedule_code_upgrade(validation_function: Vec<u8>) -> DispatchResult {
1391		// Ensure that `ValidationData` exists. We do not care about the validation data per se,
1392		// but we do care about the [`UpgradeRestrictionSignal`] which arrives with the same
1393		// inherent.
1394		ensure!(<ValidationData<T>>::exists(), Error::<T>::ValidationDataNotAvailable,);
1395		ensure!(<UpgradeRestrictionSignal<T>>::get().is_none(), Error::<T>::ProhibitedByPolkadot);
1396
1397		ensure!(!<PendingValidationCode<T>>::exists(), Error::<T>::OverlappingUpgrades);
1398		let cfg = HostConfiguration::<T>::get().ok_or(Error::<T>::HostConfigurationNotAvailable)?;
1399		ensure!(validation_function.len() <= cfg.max_code_size as usize, Error::<T>::TooBig);
1400
1401		// When a code upgrade is scheduled, it has to be applied in two
1402		// places, synchronized: both polkadot and the individual parachain
1403		// have to upgrade on the same relay chain block.
1404		//
1405		// `notify_polkadot_of_pending_upgrade` notifies polkadot; the `PendingValidationCode`
1406		// storage keeps track locally for the parachain upgrade, which will
1407		// be applied later: when the relay-chain communicates go-ahead signal to us.
1408		Self::notify_polkadot_of_pending_upgrade(&validation_function);
1409		<PendingValidationCode<T>>::put(validation_function);
1410		Self::deposit_event(Event::ValidationFunctionStored);
1411
1412		Ok(())
1413	}
1414
1415	/// Returns the [`CollationInfo`] of the current active block.
1416	///
1417	/// The given `header` is the header of the built block we are collecting the collation info
1418	/// for.
1419	///
1420	/// This is expected to be used by the
1421	/// [`CollectCollationInfo`](cumulus_primitives_core::CollectCollationInfo) runtime api.
1422	pub fn collect_collation_info(header: &HeaderFor<T>) -> CollationInfo {
1423		CollationInfo {
1424			hrmp_watermark: HrmpWatermark::<T>::get(),
1425			horizontal_messages: HrmpOutboundMessages::<T>::get(),
1426			upward_messages: UpwardMessages::<T>::get(),
1427			processed_downward_messages: ProcessedDownwardMessages::<T>::get(),
1428			new_validation_code: NewValidationCode::<T>::get().map(Into::into),
1429			// Check if there is a custom header that will also be returned by the validation phase.
1430			// If so, we need to also return it here.
1431			head_data: CustomValidationHeadData::<T>::get()
1432				.map_or_else(|| header.encode(), |v| v)
1433				.into(),
1434		}
1435	}
1436
1437	/// Returns the core selector for the next block.
1438	pub fn core_selector() -> (CoreSelector, ClaimQueueOffset) {
1439		T::SelectCore::select_next_core()
1440	}
1441
1442	/// Set a custom head data that should be returned as result of `validate_block`.
1443	///
1444	/// This will overwrite the head data that is returned as result of `validate_block` while
1445	/// validating a `PoV` on the relay chain. Normally the head data that is being returned
1446	/// by `validate_block` is the header of the block that is validated, thus it can be
1447	/// enacted as the new best block. However, for features like forking it can be useful
1448	/// to overwrite the head data with a custom header.
1449	///
1450	/// # Attention
1451	///
1452	/// This should only be used when you are sure what you are doing as this can brick
1453	/// your Parachain.
1454	pub fn set_custom_validation_head_data(head_data: Vec<u8>) {
1455		CustomValidationHeadData::<T>::put(head_data);
1456	}
1457
1458	/// Send the ump signals
1459	#[cfg(feature = "experimental-ump-signals")]
1460	fn send_ump_signal() {
1461		use cumulus_primitives_core::relay_chain::vstaging::{UMPSignal, UMP_SEPARATOR};
1462
1463		UpwardMessages::<T>::mutate(|up| {
1464			up.push(UMP_SEPARATOR);
1465
1466			// Send the core selector signal.
1467			let core_selector = T::SelectCore::selected_core();
1468			up.push(UMPSignal::SelectCore(core_selector.0, core_selector.1).encode());
1469		});
1470	}
1471
1472	/// Open HRMP channel for using it in benchmarks or tests.
1473	///
1474	/// The caller assumes that the pallet will accept regular outbound message to the sibling
1475	/// `target_parachain` after this call. No other assumptions are made.
1476	#[cfg(any(feature = "runtime-benchmarks", feature = "std"))]
1477	pub fn open_outbound_hrmp_channel_for_benchmarks_or_tests(target_parachain: ParaId) {
1478		RelevantMessagingState::<T>::put(MessagingStateSnapshot {
1479			dmq_mqc_head: Default::default(),
1480			relay_dispatch_queue_remaining_capacity: Default::default(),
1481			ingress_channels: Default::default(),
1482			egress_channels: vec![(
1483				target_parachain,
1484				cumulus_primitives_core::AbridgedHrmpChannel {
1485					max_capacity: 10,
1486					max_total_size: 10_000_000_u32,
1487					max_message_size: 10_000_000_u32,
1488					msg_count: 5,
1489					total_size: 5_000_000_u32,
1490					mqc_head: None,
1491				},
1492			)],
1493		})
1494	}
1495
1496	/// Open HRMP channel for using it in benchmarks or tests.
1497	///
1498	/// The caller assumes that the pallet will accept regular outbound message to the sibling
1499	/// `target_parachain` after this call. No other assumptions are made.
1500	#[cfg(any(feature = "runtime-benchmarks", feature = "std"))]
1501	pub fn open_custom_outbound_hrmp_channel_for_benchmarks_or_tests(
1502		target_parachain: ParaId,
1503		channel: cumulus_primitives_core::AbridgedHrmpChannel,
1504	) {
1505		RelevantMessagingState::<T>::put(MessagingStateSnapshot {
1506			dmq_mqc_head: Default::default(),
1507			relay_dispatch_queue_remaining_capacity: Default::default(),
1508			ingress_channels: Default::default(),
1509			egress_channels: vec![(target_parachain, channel)],
1510		})
1511	}
1512
1513	/// Prepare/insert relevant data for `schedule_code_upgrade` for benchmarks.
1514	#[cfg(feature = "runtime-benchmarks")]
1515	pub fn initialize_for_set_code_benchmark(max_code_size: u32) {
1516		// insert dummy ValidationData
1517		let vfp = PersistedValidationData {
1518			parent_head: polkadot_parachain_primitives::primitives::HeadData(Default::default()),
1519			relay_parent_number: 1,
1520			relay_parent_storage_root: Default::default(),
1521			max_pov_size: 1_000,
1522		};
1523		<ValidationData<T>>::put(&vfp);
1524
1525		// insert dummy HostConfiguration with
1526		let host_config = AbridgedHostConfiguration {
1527			max_code_size,
1528			max_head_data_size: 32 * 1024,
1529			max_upward_queue_count: 8,
1530			max_upward_queue_size: 1024 * 1024,
1531			max_upward_message_size: 4 * 1024,
1532			max_upward_message_num_per_candidate: 2,
1533			hrmp_max_message_num_per_candidate: 2,
1534			validation_upgrade_cooldown: 2,
1535			validation_upgrade_delay: 2,
1536			async_backing_params: relay_chain::AsyncBackingParams {
1537				allowed_ancestry_len: 0,
1538				max_candidate_depth: 0,
1539			},
1540		};
1541		<HostConfiguration<T>>::put(host_config);
1542	}
1543}
1544
1545/// Type that implements `SetCode`.
1546pub struct ParachainSetCode<T>(core::marker::PhantomData<T>);
1547impl<T: Config> frame_system::SetCode<T> for ParachainSetCode<T> {
1548	fn set_code(code: Vec<u8>) -> DispatchResult {
1549		Pallet::<T>::schedule_code_upgrade(code)
1550	}
1551}
1552
1553impl<T: Config> Pallet<T> {
1554	/// Puts a message in the `PendingUpwardMessages` storage item.
1555	/// The message will be later sent in `on_finalize`.
1556	/// Checks host configuration to see if message is too big.
1557	/// Increases the delivery fee factor if the queue is sufficiently (see
1558	/// [`ump_constants::THRESHOLD_FACTOR`]) congested.
1559	pub fn send_upward_message(message: UpwardMessage) -> Result<(u32, XcmHash), MessageSendError> {
1560		let message_len = message.len();
1561		// Check if the message fits into the relay-chain constraints.
1562		//
1563		// Note, that we are using `host_configuration` here which may be from the previous
1564		// block, in case this is called from `on_initialize`, i.e. before the inherent with fresh
1565		// data is submitted.
1566		//
1567		// That shouldn't be a problem since this is a preliminary check and the actual check would
1568		// be performed just before submitting the message from the candidate, and it already can
1569		// happen that during the time the message is buffered for sending the relay-chain setting
1570		// may change so that the message is no longer valid.
1571		//
1572		// However, changing this setting is expected to be rare.
1573		if let Some(cfg) = HostConfiguration::<T>::get() {
1574			if message_len > cfg.max_upward_message_size as usize {
1575				return Err(MessageSendError::TooBig)
1576			}
1577			let threshold =
1578				cfg.max_upward_queue_size.saturating_div(ump_constants::THRESHOLD_FACTOR);
1579			// We check the threshold against total size and not number of messages since messages
1580			// could be big or small.
1581			<PendingUpwardMessages<T>>::append(message.clone());
1582			let pending_messages = PendingUpwardMessages::<T>::get();
1583			let total_size: usize = pending_messages.iter().map(UpwardMessage::len).sum();
1584			if total_size > threshold as usize {
1585				// We increase the fee factor by a factor based on the new message's size in KB
1586				let message_size_factor = FixedU128::from((message_len / 1024) as u128)
1587					.saturating_mul(ump_constants::MESSAGE_SIZE_FEE_BASE);
1588				Self::increase_fee_factor((), message_size_factor);
1589			}
1590		} else {
1591			// This storage field should carry over from the previous block. So if it's None
1592			// then it must be that this is an edge-case where a message is attempted to be
1593			// sent at the first block.
1594			//
1595			// Let's pass this message through. I think it's not unreasonable to expect that
1596			// the message is not huge and it comes through, but if it doesn't it can be
1597			// returned back to the sender.
1598			//
1599			// Thus fall through here.
1600			<PendingUpwardMessages<T>>::append(message.clone());
1601		};
1602
1603		// The relay ump does not use using_encoded
1604		// We apply the same this to use the same hash
1605		let hash = sp_io::hashing::blake2_256(&message);
1606		Self::deposit_event(Event::UpwardMessageSent { message_hash: Some(hash) });
1607		Ok((0, hash))
1608	}
1609
1610	/// Get the relay chain block number which was used as an anchor for the last block in this
1611	/// chain.
1612	pub fn last_relay_block_number() -> RelayChainBlockNumber {
1613		LastRelayChainBlockNumber::<T>::get()
1614	}
1615}
1616
1617impl<T: Config> UpwardMessageSender for Pallet<T> {
1618	fn send_upward_message(message: UpwardMessage) -> Result<(u32, XcmHash), MessageSendError> {
1619		Self::send_upward_message(message)
1620	}
1621}
1622
1623impl<T: Config> InspectMessageQueues for Pallet<T> {
1624	fn clear_messages() {
1625		PendingUpwardMessages::<T>::kill();
1626	}
1627
1628	fn get_messages() -> Vec<(VersionedLocation, Vec<VersionedXcm<()>>)> {
1629		use xcm::prelude::*;
1630
1631		let messages: Vec<VersionedXcm<()>> = PendingUpwardMessages::<T>::get()
1632			.iter()
1633			.map(|encoded_message| {
1634				VersionedXcm::<()>::decode_all_with_depth_limit(
1635					MAX_XCM_DECODE_DEPTH,
1636					&mut &encoded_message[..],
1637				)
1638				.unwrap()
1639			})
1640			.collect();
1641
1642		if messages.is_empty() {
1643			vec![]
1644		} else {
1645			vec![(VersionedLocation::from(Location::parent()), messages)]
1646		}
1647	}
1648}
1649
1650#[cfg(feature = "runtime-benchmarks")]
1651impl<T: Config> polkadot_runtime_parachains::EnsureForParachain for Pallet<T> {
1652	fn ensure(para_id: ParaId) {
1653		if let ChannelStatus::Closed = Self::get_channel_status(para_id) {
1654			Self::open_outbound_hrmp_channel_for_benchmarks_or_tests(para_id)
1655		}
1656	}
1657}
1658
1659/// Something that can check the inherents of a block.
1660#[deprecated(note = "This trait is deprecated and will be removed by September 2024. \
1661		Consider switching to `cumulus-pallet-parachain-system::ConsensusHook`")]
1662pub trait CheckInherents<Block: BlockT> {
1663	/// Check all inherents of the block.
1664	///
1665	/// This function gets passed all the extrinsics of the block, so it is up to the callee to
1666	/// identify the inherents. The `validation_data` can be used to access the
1667	fn check_inherents(
1668		block: &Block,
1669		validation_data: &RelayChainStateProof,
1670	) -> frame_support::inherent::CheckInherentsResult;
1671}
1672
1673/// Struct that always returns `Ok` on inherents check, needed for backwards-compatibility.
1674#[doc(hidden)]
1675pub struct DummyCheckInherents<Block>(core::marker::PhantomData<Block>);
1676
1677#[allow(deprecated)]
1678impl<Block: BlockT> CheckInherents<Block> for DummyCheckInherents<Block> {
1679	fn check_inherents(
1680		_: &Block,
1681		_: &RelayChainStateProof,
1682	) -> frame_support::inherent::CheckInherentsResult {
1683		sp_inherents::CheckInherentsResult::new()
1684	}
1685}
1686
1687/// Something that should be informed about system related events.
1688///
1689/// This includes events like [`on_validation_data`](Self::on_validation_data) that is being
1690/// called when the parachain inherent is executed that contains the validation data.
1691/// Or like [`on_validation_code_applied`](Self::on_validation_code_applied) that is called
1692/// when the new validation is written to the state. This means that
1693/// from the next block the runtime is being using this new code.
1694#[impl_trait_for_tuples::impl_for_tuples(30)]
1695pub trait OnSystemEvent {
1696	/// Called in each blocks once when the validation data is set by the inherent.
1697	fn on_validation_data(data: &PersistedValidationData);
1698	/// Called when the validation code is being applied, aka from the next block on this is the new
1699	/// runtime.
1700	fn on_validation_code_applied();
1701}
1702
1703/// Holds the most recent relay-parent state root and block number of the current parachain block.
1704#[derive(PartialEq, Eq, Clone, Encode, Decode, TypeInfo, Default, RuntimeDebug)]
1705pub struct RelayChainState {
1706	/// Current relay chain height.
1707	pub number: relay_chain::BlockNumber,
1708	/// State root for current relay chain height.
1709	pub state_root: relay_chain::Hash,
1710}
1711
1712/// This exposes the [`RelayChainState`] to other runtime modules.
1713///
1714/// Enables parachains to read relay chain state via state proofs.
1715pub trait RelaychainStateProvider {
1716	/// May be called by any runtime module to obtain the current state of the relay chain.
1717	///
1718	/// **NOTE**: This is not guaranteed to return monotonically increasing relay parents.
1719	fn current_relay_chain_state() -> RelayChainState;
1720
1721	/// Utility function only to be used in benchmarking scenarios, to be implemented optionally,
1722	/// else a noop.
1723	///
1724	/// It allows for setting a custom RelayChainState.
1725	#[cfg(feature = "runtime-benchmarks")]
1726	fn set_current_relay_chain_state(_state: RelayChainState) {}
1727}
1728
1729/// Implements [`BlockNumberProvider`] that returns relay chain block number fetched from validation
1730/// data.
1731///
1732/// When validation data is not available (e.g. within `on_initialize`), it will fallback to use
1733/// [`Pallet::last_relay_block_number()`].
1734///
1735/// **NOTE**: This has been deprecated, please use [`RelaychainDataProvider`]
1736#[deprecated = "Use `RelaychainDataProvider` instead"]
1737pub type RelaychainBlockNumberProvider<T> = RelaychainDataProvider<T>;
1738
1739/// Implements [`BlockNumberProvider`] and [`RelaychainStateProvider`] that returns relevant relay
1740/// data fetched from validation data.
1741///
1742/// NOTE: When validation data is not available (e.g. within `on_initialize`):
1743///
1744/// - [`current_relay_chain_state`](Self::current_relay_chain_state): Will return the default value
1745///   of [`RelayChainState`].
1746/// - [`current_block_number`](Self::current_block_number): Will return
1747///   [`Pallet::last_relay_block_number()`].
1748pub struct RelaychainDataProvider<T>(core::marker::PhantomData<T>);
1749
1750impl<T: Config> BlockNumberProvider for RelaychainDataProvider<T> {
1751	type BlockNumber = relay_chain::BlockNumber;
1752
1753	fn current_block_number() -> relay_chain::BlockNumber {
1754		ValidationData::<T>::get()
1755			.map(|d| d.relay_parent_number)
1756			.unwrap_or_else(|| Pallet::<T>::last_relay_block_number())
1757	}
1758
1759	#[cfg(feature = "runtime-benchmarks")]
1760	fn set_block_number(block: Self::BlockNumber) {
1761		let mut validation_data = ValidationData::<T>::get().unwrap_or_else(||
1762			// PersistedValidationData does not impl default in non-std
1763			PersistedValidationData {
1764				parent_head: vec![].into(),
1765				relay_parent_number: Default::default(),
1766				max_pov_size: Default::default(),
1767				relay_parent_storage_root: Default::default(),
1768			});
1769		validation_data.relay_parent_number = block;
1770		ValidationData::<T>::put(validation_data)
1771	}
1772}
1773
1774impl<T: Config> RelaychainStateProvider for RelaychainDataProvider<T> {
1775	fn current_relay_chain_state() -> RelayChainState {
1776		ValidationData::<T>::get()
1777			.map(|d| RelayChainState {
1778				number: d.relay_parent_number,
1779				state_root: d.relay_parent_storage_root,
1780			})
1781			.unwrap_or_default()
1782	}
1783
1784	#[cfg(feature = "runtime-benchmarks")]
1785	fn set_current_relay_chain_state(state: RelayChainState) {
1786		let mut validation_data = ValidationData::<T>::get().unwrap_or_else(||
1787			// PersistedValidationData does not impl default in non-std
1788			PersistedValidationData {
1789				parent_head: vec![].into(),
1790				relay_parent_number: Default::default(),
1791				max_pov_size: Default::default(),
1792				relay_parent_storage_root: Default::default(),
1793			});
1794		validation_data.relay_parent_number = state.number;
1795		validation_data.relay_parent_storage_root = state.state_root;
1796		ValidationData::<T>::put(validation_data)
1797	}
1798}