Skip to main content

pallet_scheduler/
lib.rs

1// This file is part of Substrate.
2
3// Copyright (C) Parity Technologies (UK) Ltd.
4// SPDX-License-Identifier: Apache-2.0
5
6// Licensed under the Apache License, Version 2.0 (the "License");
7// you may not use this file except in compliance with the License.
8// You may obtain a copy of the License at
9//
10// 	http://www.apache.org/licenses/LICENSE-2.0
11//
12// Unless required by applicable law or agreed to in writing, software
13// distributed under the License is distributed on an "AS IS" BASIS,
14// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15// See the License for the specific language governing permissions and
16// limitations under the License.
17
18//! > Made with *Substrate*, for *Polkadot*.
19//!
20//! [![github]](https://github.com/paritytech/polkadot-sdk/tree/master/substrate/frame/scheduler) -
21//! [![polkadot]](https://polkadot.com)
22//!
23//! [polkadot]: https://img.shields.io/badge/polkadot-E6007A?style=for-the-badge&logo=polkadot&logoColor=white
24//! [github]: https://img.shields.io/badge/github-8da0cb?style=for-the-badge&labelColor=555555&logo=github
25//!
26//! # Scheduler Pallet
27//!
28//! A Pallet for scheduling runtime calls.
29//!
30//! ## Overview
31//!
32//! This Pallet exposes capabilities for scheduling runtime calls to occur at a specified block
33//! number or at a specified period. These scheduled runtime calls may be named or anonymous and may
34//! be canceled.
35//!
36//! __NOTE:__ Instead of using the filter contained in the origin to call `fn schedule`, scheduled
37//! runtime calls will be dispatched with the default filter for the origin: namely
38//! `frame_system::Config::BaseCallFilter` for all origin types (except root which will get no
39//! filter).
40//!
41//! If a call is scheduled using proxy or whatever mechanism which adds filter, then those filter
42//! will not be used when dispatching the schedule runtime call.
43//!
44//! ### Examples
45//!
46//! 1. Scheduling a runtime call at a specific block.
47#![doc = docify::embed!("src/tests.rs", basic_scheduling_works)]
48//! 2. Scheduling a preimage hash of a runtime call at a specific block
49#![doc = docify::embed!("src/tests.rs", scheduling_with_preimages_works)]
50
51//! ## Pallet API
52//!
53//! See the [`pallet`] module for more information about the interfaces this pallet exposes,
54//! including its configuration trait, dispatchables, storage items, events and errors.
55//!
56//! ## Warning
57//!
58//! This Pallet executes all scheduled runtime calls in the [`on_initialize`] hook. Do not execute
59//! any runtime calls which should not be considered mandatory.
60//!
61//! Please be aware that any scheduled runtime calls executed in a future block may __fail__ or may
62//! result in __undefined behavior__ since the runtime could have upgraded between the time of
63//! scheduling and execution. For example, the runtime upgrade could have:
64//!
65//! * Modified the implementation of the runtime call (runtime specification upgrade).
66//!     * Could lead to undefined behavior.
67//! * Removed or changed the ordering/index of the runtime call.
68//!     * Could fail due to the runtime call index not being part of the `Call`.
69//!     * Could lead to undefined behavior, such as executing another runtime call with the same
70//!       index.
71//!
72//! [`on_initialize`]: frame_support::traits::Hooks::on_initialize
73
74// Ensure we're `no_std` when compiling for Wasm.
75#![cfg_attr(not(feature = "std"), no_std)]
76
77#[cfg(feature = "runtime-benchmarks")]
78mod benchmarking;
79pub mod migration;
80#[cfg(test)]
81mod mock;
82#[cfg(test)]
83mod tests;
84pub mod weights;
85
86extern crate alloc;
87
88use alloc::{boxed::Box, vec::Vec};
89use codec::{Decode, DecodeWithMemTracking, Encode, MaxEncodedLen};
90use core::{borrow::Borrow, cmp::Ordering, marker::PhantomData};
91use frame_support::{
92	dispatch::{DispatchResult, GetDispatchInfo, Parameter, RawOrigin},
93	ensure,
94	traits::{
95		schedule::{self, DispatchTime, MaybeHashed},
96		Bounded, CallerTrait, EnsureOrigin, Get, IsType, OriginTrait, PalletInfoAccess,
97		PrivilegeCmp, QueryPreimage, StorageVersion, StorePreimage,
98	},
99	weights::{Weight, WeightMeter},
100};
101use frame_system::{self as system};
102use scale_info::TypeInfo;
103use sp_io::hashing::blake2_256;
104use sp_runtime::{
105	traits::{BadOrigin, BlockNumberProvider, Dispatchable, One, Saturating, Zero},
106	BoundedVec, Debug, DispatchError,
107};
108
109pub use pallet::*;
110pub use weights::WeightInfo;
111
112/// Just a simple index for naming period tasks.
113pub type PeriodicIndex = u32;
114/// The location of a scheduled task that can be used to remove it.
115pub type TaskAddress<BlockNumber> = (BlockNumber, u32);
116
117pub type CallOrHashOf<T> =
118	MaybeHashed<<T as Config>::RuntimeCall, <T as frame_system::Config>::Hash>;
119
120pub type BoundedCallOf<T> =
121	Bounded<<T as Config>::RuntimeCall, <T as frame_system::Config>::Hashing>;
122
123pub type BlockNumberFor<T> =
124	<<T as Config>::BlockNumberProvider as BlockNumberProvider>::BlockNumber;
125
126/// The configuration of the retry mechanism for a given task along with its current state.
127#[derive(
128	Clone,
129	Copy,
130	Debug,
131	PartialEq,
132	Eq,
133	Encode,
134	Decode,
135	DecodeWithMemTracking,
136	MaxEncodedLen,
137	TypeInfo,
138)]
139pub struct RetryConfig<Period> {
140	/// Initial amount of retries allowed.
141	pub total_retries: u8,
142	/// Amount of retries left.
143	pub remaining: u8,
144	/// Period of time between retry attempts.
145	pub period: Period,
146}
147
148#[cfg_attr(any(feature = "std", test), derive(PartialEq, Eq))]
149#[derive(Clone, Debug, Encode, Decode)]
150struct ScheduledV1<Call, BlockNumber> {
151	maybe_id: Option<Vec<u8>>,
152	priority: schedule::Priority,
153	call: Call,
154	maybe_periodic: Option<schedule::Period<BlockNumber>>,
155}
156
157/// Information regarding an item to be executed in the future.
158#[derive(
159	Clone, Debug, PartialEq, Eq, Encode, Decode, MaxEncodedLen, TypeInfo, DecodeWithMemTracking,
160)]
161pub struct Scheduled<Name, Call, BlockNumber, PalletsOrigin, AccountId> {
162	/// The unique identity for this task, if there is one.
163	pub maybe_id: Option<Name>,
164	/// This task's priority.
165	pub priority: schedule::Priority,
166	/// The call to be dispatched.
167	pub call: Call,
168	/// If the call is periodic, then this points to the information concerning that.
169	pub maybe_periodic: Option<schedule::Period<BlockNumber>>,
170	/// The origin with which to dispatch the call.
171	pub origin: PalletsOrigin,
172	#[doc(hidden)]
173	pub _phantom: PhantomData<AccountId>,
174}
175
176impl<Name, Call, BlockNumber, PalletsOrigin, AccountId>
177	Scheduled<Name, Call, BlockNumber, PalletsOrigin, AccountId>
178where
179	Call: Clone,
180	PalletsOrigin: Clone,
181{
182	/// Create a new task to be used for retry attempts of the original one. The cloned task will
183	/// have the same `priority`, `call` and `origin`, but will always be non-periodic and unnamed.
184	pub fn as_retry(&self) -> Self {
185		Self {
186			maybe_id: None,
187			priority: self.priority,
188			call: self.call.clone(),
189			maybe_periodic: None,
190			origin: self.origin.clone(),
191			_phantom: Default::default(),
192		}
193	}
194}
195
196use crate::{Scheduled as ScheduledV3, Scheduled as ScheduledV2};
197
198pub type ScheduledV2Of<T> = ScheduledV2<
199	Vec<u8>,
200	<T as Config>::RuntimeCall,
201	BlockNumberFor<T>,
202	<T as Config>::PalletsOrigin,
203	<T as frame_system::Config>::AccountId,
204>;
205
206pub type ScheduledV3Of<T> = ScheduledV3<
207	Vec<u8>,
208	CallOrHashOf<T>,
209	BlockNumberFor<T>,
210	<T as Config>::PalletsOrigin,
211	<T as frame_system::Config>::AccountId,
212>;
213
214pub type ScheduledOf<T> = Scheduled<
215	TaskName,
216	BoundedCallOf<T>,
217	BlockNumberFor<T>,
218	<T as Config>::PalletsOrigin,
219	<T as frame_system::Config>::AccountId,
220>;
221
222pub(crate) trait MarginalWeightInfo: WeightInfo {
223	fn service_task(maybe_lookup_len: Option<usize>, named: bool, periodic: bool) -> Weight {
224		let base = Self::service_task_base();
225		let mut total = match maybe_lookup_len {
226			None => base,
227			Some(l) => Self::service_task_fetched(l as u32),
228		};
229		if named {
230			total.saturating_accrue(Self::service_task_named().saturating_sub(base));
231		}
232		if periodic {
233			total.saturating_accrue(Self::service_task_periodic().saturating_sub(base));
234		}
235		total
236	}
237}
238impl<T: WeightInfo> MarginalWeightInfo for T {}
239
240#[frame_support::pallet]
241pub mod pallet {
242	use super::*;
243	use frame_support::{dispatch::PostDispatchInfo, pallet_prelude::*};
244	use frame_system::pallet_prelude::{BlockNumberFor as SystemBlockNumberFor, OriginFor};
245
246	/// The in-code storage version.
247	const STORAGE_VERSION: StorageVersion = StorageVersion::new(4);
248
249	#[pallet::pallet]
250	#[pallet::storage_version(STORAGE_VERSION)]
251	pub struct Pallet<T>(_);
252
253	/// `system::Config` should always be included in our implied traits.
254	#[pallet::config]
255	pub trait Config: frame_system::Config {
256		/// The overarching event type.
257		#[allow(deprecated)]
258		type RuntimeEvent: From<Event<Self>> + IsType<<Self as frame_system::Config>::RuntimeEvent>;
259
260		/// The aggregated origin which the dispatch will take.
261		type RuntimeOrigin: OriginTrait<PalletsOrigin = Self::PalletsOrigin>
262			+ From<Self::PalletsOrigin>
263			+ IsType<<Self as system::Config>::RuntimeOrigin>;
264
265		/// The caller origin, overarching type of all pallets origins.
266		type PalletsOrigin: From<system::RawOrigin<Self::AccountId>>
267			+ CallerTrait<Self::AccountId>
268			+ MaxEncodedLen;
269
270		/// The aggregated call type.
271		type RuntimeCall: Parameter
272			+ Dispatchable<
273				RuntimeOrigin = <Self as Config>::RuntimeOrigin,
274				PostInfo = PostDispatchInfo,
275			> + GetDispatchInfo
276			+ From<system::Call<Self>>;
277
278		/// The maximum weight that may be scheduled per block for any dispatchables.
279		#[pallet::constant]
280		type MaximumWeight: Get<Weight>;
281
282		/// Required origin to schedule or cancel calls.
283		type ScheduleOrigin: EnsureOrigin<<Self as system::Config>::RuntimeOrigin>;
284
285		/// Compare the privileges of origins.
286		///
287		/// This will be used when canceling a task, to ensure that the origin that tries
288		/// to cancel has greater or equal privileges as the origin that created the scheduled task.
289		///
290		/// For simplicity the [`EqualPrivilegeOnly`](frame_support::traits::EqualPrivilegeOnly) can
291		/// be used. This will only check if two given origins are equal.
292		type OriginPrivilegeCmp: PrivilegeCmp<Self::PalletsOrigin>;
293
294		/// The maximum number of scheduled calls in the queue for a single block.
295		///
296		/// NOTE:
297		/// + Dependent pallets' benchmarks might require a higher limit for the setting. Set a
298		/// higher limit under `runtime-benchmarks` feature.
299		#[pallet::constant]
300		type MaxScheduledPerBlock: Get<u32>;
301
302		/// Weight information for extrinsics in this pallet.
303		type WeightInfo: WeightInfo;
304
305		/// The preimage provider with which we look up call hashes to get the call.
306		type Preimages: QueryPreimage<H = Self::Hashing> + StorePreimage;
307
308		/// Query the current block number.
309		///
310		/// Must return monotonically increasing values when called from consecutive blocks. It is
311		/// generally expected that the values also do not differ "too much" between consecutive
312		/// blocks. A future addition to this pallet will allow bigger difference between
313		/// consecutive blocks to make it possible to be utilized by parachains with *Agile
314		/// Coretime*. *Agile Coretime* parachains are currently not supported and must continue to
315		/// use their local block number provider.
316		///
317		/// Can be configured to return either:
318		/// - the local block number of the runtime via `frame_system::Pallet`
319		/// - a remote block number, eg from the relay chain through `RelaychainDataProvider`
320		/// - an arbitrary value through a custom implementation of the trait
321		///
322		/// Suggested values:
323		/// - Solo- and Relay-chains should use `frame_system::Pallet`. There are no concerns with
324		///   this configuration.
325		/// - Parachains should also use `frame_system::Pallet` for the time being. The scheduler
326		///   pallet is not yet ready for the case that big numbers of blocks are skipped. In an
327		///   *Agile Coretime* chain with relay chain number provider configured, it could otherwise
328		///   happen that the scheduler will not be able to catch up to its agendas, since too many
329		///   relay blocks are missing if the parachain only produces blocks rarely.
330		///
331		/// There is currently no migration provided to "hot-swap" block number providers and it is
332		/// therefore highly advised to stay with the default (local) values. If you still want to
333		/// swap block number providers on the fly, then please at least ensure that you do not run
334		/// any pallet migration in the same runtime upgrade.
335		type BlockNumberProvider: BlockNumberProvider;
336	}
337
338	/// Block number at which the agenda began incomplete execution.
339	#[pallet::storage]
340	pub type IncompleteSince<T: Config> = StorageValue<_, BlockNumberFor<T>>;
341
342	/// Items to be executed, indexed by the block number that they should be executed on.
343	#[pallet::storage]
344	pub type Agenda<T: Config> = StorageMap<
345		_,
346		Twox64Concat,
347		BlockNumberFor<T>,
348		BoundedVec<Option<ScheduledOf<T>>, T::MaxScheduledPerBlock>,
349		ValueQuery,
350	>;
351
352	/// Retry configurations for items to be executed, indexed by task address.
353	#[pallet::storage]
354	pub type Retries<T: Config> = StorageMap<
355		_,
356		Blake2_128Concat,
357		TaskAddress<BlockNumberFor<T>>,
358		RetryConfig<BlockNumberFor<T>>,
359		OptionQuery,
360	>;
361
362	/// Lookup from a name to the block number and index of the task.
363	///
364	/// For v3 -> v4 the previously unbounded identities are Blake2-256 hashed to form the v4
365	/// identities.
366	#[pallet::storage]
367	pub type Lookup<T: Config> =
368		StorageMap<_, Twox64Concat, TaskName, TaskAddress<BlockNumberFor<T>>>;
369
370	/// Events type.
371	#[pallet::event]
372	#[pallet::generate_deposit(pub(super) fn deposit_event)]
373	pub enum Event<T: Config> {
374		/// Scheduled some task.
375		Scheduled { when: BlockNumberFor<T>, index: u32 },
376		/// Canceled some task.
377		Canceled { when: BlockNumberFor<T>, index: u32 },
378		/// Dispatched some task.
379		Dispatched {
380			task: TaskAddress<BlockNumberFor<T>>,
381			id: Option<TaskName>,
382			result: DispatchResult,
383		},
384		/// Set a retry configuration for some task.
385		RetrySet {
386			task: TaskAddress<BlockNumberFor<T>>,
387			id: Option<TaskName>,
388			period: BlockNumberFor<T>,
389			retries: u8,
390		},
391		/// Cancel a retry configuration for some task.
392		RetryCancelled { task: TaskAddress<BlockNumberFor<T>>, id: Option<TaskName> },
393		/// The call for the provided hash was not found so the task has been aborted.
394		CallUnavailable { task: TaskAddress<BlockNumberFor<T>>, id: Option<TaskName> },
395		/// The given task was unable to be renewed since the agenda is full at that block.
396		PeriodicFailed { task: TaskAddress<BlockNumberFor<T>>, id: Option<TaskName> },
397		/// The given task was unable to be retried since the agenda is full at that block or there
398		/// was not enough weight to reschedule it.
399		RetryFailed { task: TaskAddress<BlockNumberFor<T>>, id: Option<TaskName> },
400		/// The given task can never be executed since it is overweight.
401		PermanentlyOverweight { task: TaskAddress<BlockNumberFor<T>>, id: Option<TaskName> },
402		/// Agenda is incomplete from `when`.
403		AgendaIncomplete { when: BlockNumberFor<T> },
404	}
405
406	#[pallet::error]
407	pub enum Error<T> {
408		/// Failed to schedule a call
409		FailedToSchedule,
410		/// Cannot find the scheduled call.
411		NotFound,
412		/// Given target block number is in the past.
413		TargetBlockNumberInPast,
414		/// Reschedule failed because it does not change scheduled time.
415		RescheduleNoChange,
416		/// Attempt to use a non-named function on a named task.
417		Named,
418	}
419
420	#[pallet::hooks]
421	impl<T: Config> Hooks<SystemBlockNumberFor<T>> for Pallet<T> {
422		/// Execute the scheduled calls
423		fn on_initialize(_now: SystemBlockNumberFor<T>) -> Weight {
424			let now = T::BlockNumberProvider::current_block_number();
425			let mut weight_counter = frame_system::Pallet::<T>::remaining_block_weight()
426				.limit_to(T::MaximumWeight::get());
427			Self::service_agendas(&mut weight_counter, now, u32::MAX);
428			weight_counter.consumed()
429		}
430
431		#[cfg(feature = "std")]
432		fn integrity_test() {
433			/// Calculate the maximum weight that a lookup of a given size can take.
434			fn lookup_weight<T: Config>(s: usize) -> Weight {
435				T::WeightInfo::service_agendas_base() +
436					T::WeightInfo::service_agenda_base(T::MaxScheduledPerBlock::get()) +
437					T::WeightInfo::service_task(Some(s), true, true)
438			}
439
440			let limit = sp_runtime::Perbill::from_percent(90) * T::MaximumWeight::get();
441
442			let small_lookup = lookup_weight::<T>(128);
443			assert!(small_lookup.all_lte(limit), "Must be possible to submit a small lookup");
444
445			let medium_lookup = lookup_weight::<T>(1024);
446			assert!(medium_lookup.all_lte(limit), "Must be possible to submit a medium lookup");
447
448			let large_lookup = lookup_weight::<T>(1024 * 1024);
449			assert!(large_lookup.all_lte(limit), "Must be possible to submit a large lookup");
450		}
451	}
452
453	#[pallet::call]
454	impl<T: Config> Pallet<T> {
455		/// Anonymously schedule a task.
456		#[pallet::call_index(0)]
457		#[pallet::weight(<T as Config>::WeightInfo::schedule(T::MaxScheduledPerBlock::get()))]
458		pub fn schedule(
459			origin: OriginFor<T>,
460			when: BlockNumberFor<T>,
461			maybe_periodic: Option<schedule::Period<BlockNumberFor<T>>>,
462			priority: schedule::Priority,
463			call: Box<<T as Config>::RuntimeCall>,
464		) -> DispatchResult {
465			T::ScheduleOrigin::ensure_origin(origin.clone())?;
466			let origin = <T as Config>::RuntimeOrigin::from(origin);
467			Self::do_schedule(
468				DispatchTime::At(when),
469				maybe_periodic,
470				priority,
471				origin.caller().clone(),
472				T::Preimages::bound(*call)?,
473			)?;
474			Ok(())
475		}
476
477		/// Cancel a scheduled task (named or anonymous), by providing the block it is scheduled for
478		/// execution in, as well as the index of the task in that block's agenda.
479		///
480		/// In the case of a named task, it will remove it from the lookup table as well.
481		#[pallet::call_index(1)]
482		#[pallet::weight(<T as Config>::WeightInfo::cancel(T::MaxScheduledPerBlock::get()))]
483		pub fn cancel(origin: OriginFor<T>, when: BlockNumberFor<T>, index: u32) -> DispatchResult {
484			T::ScheduleOrigin::ensure_origin(origin.clone())?;
485			let origin = <T as Config>::RuntimeOrigin::from(origin);
486			Self::do_cancel(Some(origin.caller().clone()), (when, index))?;
487			Ok(())
488		}
489
490		/// Schedule a named task.
491		#[pallet::call_index(2)]
492		#[pallet::weight(<T as Config>::WeightInfo::schedule_named(T::MaxScheduledPerBlock::get()))]
493		pub fn schedule_named(
494			origin: OriginFor<T>,
495			id: TaskName,
496			when: BlockNumberFor<T>,
497			maybe_periodic: Option<schedule::Period<BlockNumberFor<T>>>,
498			priority: schedule::Priority,
499			call: Box<<T as Config>::RuntimeCall>,
500		) -> DispatchResult {
501			T::ScheduleOrigin::ensure_origin(origin.clone())?;
502			let origin = <T as Config>::RuntimeOrigin::from(origin);
503			Self::do_schedule_named(
504				id,
505				DispatchTime::At(when),
506				maybe_periodic,
507				priority,
508				origin.caller().clone(),
509				T::Preimages::bound(*call)?,
510			)?;
511			Ok(())
512		}
513
514		/// Cancel a named scheduled task.
515		#[pallet::call_index(3)]
516		#[pallet::weight(<T as Config>::WeightInfo::cancel_named(T::MaxScheduledPerBlock::get()))]
517		pub fn cancel_named(origin: OriginFor<T>, id: TaskName) -> DispatchResult {
518			T::ScheduleOrigin::ensure_origin(origin.clone())?;
519			let origin = <T as Config>::RuntimeOrigin::from(origin);
520			Self::do_cancel_named(Some(origin.caller().clone()), id)?;
521			Ok(())
522		}
523
524		/// Anonymously schedule a task after a delay.
525		#[pallet::call_index(4)]
526		#[pallet::weight(<T as Config>::WeightInfo::schedule(T::MaxScheduledPerBlock::get()))]
527		pub fn schedule_after(
528			origin: OriginFor<T>,
529			after: BlockNumberFor<T>,
530			maybe_periodic: Option<schedule::Period<BlockNumberFor<T>>>,
531			priority: schedule::Priority,
532			call: Box<<T as Config>::RuntimeCall>,
533		) -> DispatchResult {
534			T::ScheduleOrigin::ensure_origin(origin.clone())?;
535			let origin = <T as Config>::RuntimeOrigin::from(origin);
536			Self::do_schedule(
537				DispatchTime::After(after),
538				maybe_periodic,
539				priority,
540				origin.caller().clone(),
541				T::Preimages::bound(*call)?,
542			)?;
543			Ok(())
544		}
545
546		/// Schedule a named task after a delay.
547		#[pallet::call_index(5)]
548		#[pallet::weight(<T as Config>::WeightInfo::schedule_named(T::MaxScheduledPerBlock::get()))]
549		pub fn schedule_named_after(
550			origin: OriginFor<T>,
551			id: TaskName,
552			after: BlockNumberFor<T>,
553			maybe_periodic: Option<schedule::Period<BlockNumberFor<T>>>,
554			priority: schedule::Priority,
555			call: Box<<T as Config>::RuntimeCall>,
556		) -> DispatchResult {
557			T::ScheduleOrigin::ensure_origin(origin.clone())?;
558			let origin = <T as Config>::RuntimeOrigin::from(origin);
559			Self::do_schedule_named(
560				id,
561				DispatchTime::After(after),
562				maybe_periodic,
563				priority,
564				origin.caller().clone(),
565				T::Preimages::bound(*call)?,
566			)?;
567			Ok(())
568		}
569
570		/// Set a retry configuration for a task so that, in case its scheduled run fails, it will
571		/// be retried after `period` blocks, for a total amount of `retries` retries or until it
572		/// succeeds.
573		///
574		/// Tasks which need to be scheduled for a retry are still subject to weight metering and
575		/// agenda space, same as a regular task. If a periodic task fails, it will be scheduled
576		/// normally while the task is retrying.
577		///
578		/// Tasks scheduled as a result of a retry for a periodic task are unnamed, non-periodic
579		/// clones of the original task. Their retry configuration will be derived from the
580		/// original task's configuration, but will have a lower value for `remaining` than the
581		/// original `total_retries`.
582		///
583		/// This call **cannot** be used to set a retry configuration for a named task.
584		#[pallet::call_index(6)]
585		#[pallet::weight(<T as Config>::WeightInfo::set_retry())]
586		pub fn set_retry(
587			origin: OriginFor<T>,
588			task: TaskAddress<BlockNumberFor<T>>,
589			retries: u8,
590			period: BlockNumberFor<T>,
591		) -> DispatchResult {
592			T::ScheduleOrigin::ensure_origin(origin.clone())?;
593			let origin = <T as Config>::RuntimeOrigin::from(origin);
594			let (when, index) = task;
595			let agenda = Agenda::<T>::get(when);
596			let scheduled = agenda
597				.get(index as usize)
598				.and_then(Option::as_ref)
599				.ok_or(Error::<T>::NotFound)?;
600			Self::ensure_privilege(origin.caller(), &scheduled.origin)?;
601			Retries::<T>::insert(
602				(when, index),
603				RetryConfig { total_retries: retries, remaining: retries, period },
604			);
605			Self::deposit_event(Event::RetrySet { task, id: None, period, retries });
606			Ok(())
607		}
608
609		/// Set a retry configuration for a named task so that, in case its scheduled run fails, it
610		/// will be retried after `period` blocks, for a total amount of `retries` retries or until
611		/// it succeeds.
612		///
613		/// Tasks which need to be scheduled for a retry are still subject to weight metering and
614		/// agenda space, same as a regular task. If a periodic task fails, it will be scheduled
615		/// normally while the task is retrying.
616		///
617		/// Tasks scheduled as a result of a retry for a periodic task are unnamed, non-periodic
618		/// clones of the original task. Their retry configuration will be derived from the
619		/// original task's configuration, but will have a lower value for `remaining` than the
620		/// original `total_retries`.
621		///
622		/// This is the only way to set a retry configuration for a named task.
623		#[pallet::call_index(7)]
624		#[pallet::weight(<T as Config>::WeightInfo::set_retry_named())]
625		pub fn set_retry_named(
626			origin: OriginFor<T>,
627			id: TaskName,
628			retries: u8,
629			period: BlockNumberFor<T>,
630		) -> DispatchResult {
631			T::ScheduleOrigin::ensure_origin(origin.clone())?;
632			let origin = <T as Config>::RuntimeOrigin::from(origin);
633			let (when, agenda_index) = Lookup::<T>::get(&id).ok_or(Error::<T>::NotFound)?;
634			let agenda = Agenda::<T>::get(when);
635			let scheduled = agenda
636				.get(agenda_index as usize)
637				.and_then(Option::as_ref)
638				.ok_or(Error::<T>::NotFound)?;
639			Self::ensure_privilege(origin.caller(), &scheduled.origin)?;
640			Retries::<T>::insert(
641				(when, agenda_index),
642				RetryConfig { total_retries: retries, remaining: retries, period },
643			);
644			Self::deposit_event(Event::RetrySet {
645				task: (when, agenda_index),
646				id: Some(id),
647				period,
648				retries,
649			});
650			Ok(())
651		}
652
653		/// Removes the retry configuration of a task.
654		#[pallet::call_index(8)]
655		#[pallet::weight(<T as Config>::WeightInfo::cancel_retry())]
656		pub fn cancel_retry(
657			origin: OriginFor<T>,
658			task: TaskAddress<BlockNumberFor<T>>,
659		) -> DispatchResult {
660			T::ScheduleOrigin::ensure_origin(origin.clone())?;
661			let origin = <T as Config>::RuntimeOrigin::from(origin);
662			Self::do_cancel_retry(origin.caller(), task)?;
663			Self::deposit_event(Event::RetryCancelled { task, id: None });
664			Ok(())
665		}
666
667		/// Cancel the retry configuration of a named task.
668		#[pallet::call_index(9)]
669		#[pallet::weight(<T as Config>::WeightInfo::cancel_retry_named())]
670		pub fn cancel_retry_named(origin: OriginFor<T>, id: TaskName) -> DispatchResult {
671			T::ScheduleOrigin::ensure_origin(origin.clone())?;
672			let origin = <T as Config>::RuntimeOrigin::from(origin);
673			let task = Lookup::<T>::get(&id).ok_or(Error::<T>::NotFound)?;
674			Self::do_cancel_retry(origin.caller(), task)?;
675			Self::deposit_event(Event::RetryCancelled { task, id: Some(id) });
676			Ok(())
677		}
678	}
679}
680
681impl<T: Config> Pallet<T> {
682	/// Migrate storage format from V1 to V4.
683	///
684	/// Returns the weight consumed by this migration.
685	pub fn migrate_v1_to_v4() -> Weight {
686		use migration::v1 as old;
687		let mut weight = T::DbWeight::get().reads_writes(1, 1);
688
689		// Delete all undecodable values.
690		// `StorageMap::translate` is not enough since it just skips them and leaves the keys in.
691		let keys = old::Agenda::<T>::iter_keys().collect::<Vec<_>>();
692		for key in keys {
693			weight.saturating_accrue(T::DbWeight::get().reads(1));
694			if let Err(_) = old::Agenda::<T>::try_get(&key) {
695				weight.saturating_accrue(T::DbWeight::get().writes(1));
696				old::Agenda::<T>::remove(&key);
697				log::warn!("Deleted undecodable agenda");
698			}
699		}
700
701		Agenda::<T>::translate::<
702			Vec<Option<ScheduledV1<<T as Config>::RuntimeCall, BlockNumberFor<T>>>>,
703			_,
704		>(|_, agenda| {
705			Some(BoundedVec::truncate_from(
706				agenda
707					.into_iter()
708					.map(|schedule| {
709						weight.saturating_accrue(T::DbWeight::get().reads_writes(1, 1));
710
711						schedule.and_then(|schedule| {
712							if let Some(id) = schedule.maybe_id.as_ref() {
713								let name = blake2_256(id);
714								if let Some(item) = old::Lookup::<T>::take(id) {
715									Lookup::<T>::insert(name, item);
716								}
717								weight.saturating_accrue(T::DbWeight::get().reads_writes(2, 2));
718							}
719
720							let call = T::Preimages::bound(schedule.call).ok()?;
721
722							if call.lookup_needed() {
723								weight.saturating_accrue(T::DbWeight::get().reads_writes(0, 1));
724							}
725
726							Some(Scheduled {
727								maybe_id: schedule.maybe_id.map(|x| blake2_256(&x[..])),
728								priority: schedule.priority,
729								call,
730								maybe_periodic: schedule.maybe_periodic,
731								origin: system::RawOrigin::Root.into(),
732								_phantom: Default::default(),
733							})
734						})
735					})
736					.collect::<Vec<_>>(),
737			))
738		});
739
740		#[allow(deprecated)]
741		frame_support::storage::migration::remove_storage_prefix(
742			Self::name().as_bytes(),
743			b"StorageVersion",
744			&[],
745		);
746
747		StorageVersion::new(4).put::<Self>();
748
749		weight + T::DbWeight::get().writes(2)
750	}
751
752	/// Migrate storage format from V2 to V4.
753	///
754	/// Returns the weight consumed by this migration.
755	pub fn migrate_v2_to_v4() -> Weight {
756		use migration::v2 as old;
757		let mut weight = T::DbWeight::get().reads_writes(1, 1);
758
759		// Delete all undecodable values.
760		// `StorageMap::translate` is not enough since it just skips them and leaves the keys in.
761		let keys = old::Agenda::<T>::iter_keys().collect::<Vec<_>>();
762		for key in keys {
763			weight.saturating_accrue(T::DbWeight::get().reads(1));
764			if let Err(_) = old::Agenda::<T>::try_get(&key) {
765				weight.saturating_accrue(T::DbWeight::get().writes(1));
766				old::Agenda::<T>::remove(&key);
767				log::warn!("Deleted undecodable agenda");
768			}
769		}
770
771		Agenda::<T>::translate::<Vec<Option<ScheduledV2Of<T>>>, _>(|_, agenda| {
772			Some(BoundedVec::truncate_from(
773				agenda
774					.into_iter()
775					.map(|schedule| {
776						weight.saturating_accrue(T::DbWeight::get().reads_writes(1, 1));
777						schedule.and_then(|schedule| {
778							if let Some(id) = schedule.maybe_id.as_ref() {
779								let name = blake2_256(id);
780								if let Some(item) = old::Lookup::<T>::take(id) {
781									Lookup::<T>::insert(name, item);
782								}
783								weight.saturating_accrue(T::DbWeight::get().reads_writes(2, 2));
784							}
785
786							let call = T::Preimages::bound(schedule.call).ok()?;
787							if call.lookup_needed() {
788								weight.saturating_accrue(T::DbWeight::get().reads_writes(0, 1));
789							}
790
791							Some(Scheduled {
792								maybe_id: schedule.maybe_id.map(|x| blake2_256(&x[..])),
793								priority: schedule.priority,
794								call,
795								maybe_periodic: schedule.maybe_periodic,
796								origin: schedule.origin,
797								_phantom: Default::default(),
798							})
799						})
800					})
801					.collect::<Vec<_>>(),
802			))
803		});
804
805		#[allow(deprecated)]
806		frame_support::storage::migration::remove_storage_prefix(
807			Self::name().as_bytes(),
808			b"StorageVersion",
809			&[],
810		);
811
812		StorageVersion::new(4).put::<Self>();
813
814		weight + T::DbWeight::get().writes(2)
815	}
816
817	/// Migrate storage format from V3 to V4.
818	///
819	/// Returns the weight consumed by this migration.
820	#[allow(deprecated)]
821	pub fn migrate_v3_to_v4() -> Weight {
822		use migration::v3 as old;
823		let mut weight = T::DbWeight::get().reads_writes(2, 1);
824
825		// Delete all undecodable values.
826		// `StorageMap::translate` is not enough since it just skips them and leaves the keys in.
827		let blocks = old::Agenda::<T>::iter_keys().collect::<Vec<_>>();
828		for block in blocks {
829			weight.saturating_accrue(T::DbWeight::get().reads(1));
830			if let Err(_) = old::Agenda::<T>::try_get(&block) {
831				weight.saturating_accrue(T::DbWeight::get().writes(1));
832				old::Agenda::<T>::remove(&block);
833				log::warn!("Deleted undecodable agenda of block: {:?}", block);
834			}
835		}
836
837		Agenda::<T>::translate::<Vec<Option<ScheduledV3Of<T>>>, _>(|block, agenda| {
838			log::info!("Migrating agenda of block: {:?}", &block);
839			Some(BoundedVec::truncate_from(
840				agenda
841					.into_iter()
842					.map(|schedule| {
843						weight.saturating_accrue(T::DbWeight::get().reads_writes(1, 1));
844						schedule
845							.and_then(|schedule| {
846								if let Some(id) = schedule.maybe_id.as_ref() {
847									let name = blake2_256(id);
848									if let Some(item) = old::Lookup::<T>::take(id) {
849										Lookup::<T>::insert(name, item);
850										log::info!("Migrated name for id: {:?}", id);
851									} else {
852										log::error!("No name in Lookup for id: {:?}", &id);
853									}
854									weight.saturating_accrue(T::DbWeight::get().reads_writes(2, 2));
855								} else {
856									log::info!("Schedule is unnamed");
857								}
858
859								let call = match schedule.call {
860									MaybeHashed::Hash(h) => {
861										let bounded = Bounded::from_legacy_hash(h);
862										// Check that the call can be decoded in the new runtime.
863										if let Err(err) = T::Preimages::peek::<
864											<T as Config>::RuntimeCall,
865										>(&bounded)
866										{
867											log::error!(
868												"Dropping undecodable call {:?}: {:?}",
869												&h,
870												&err
871											);
872											return None;
873										}
874										weight.saturating_accrue(T::DbWeight::get().reads(1));
875										log::info!("Migrated call by hash, hash: {:?}", h);
876										bounded
877									},
878									MaybeHashed::Value(v) => {
879										let call = T::Preimages::bound(v)
880											.map_err(|e| {
881												log::error!("Could not bound Call: {:?}", e)
882											})
883											.ok()?;
884										if call.lookup_needed() {
885											weight.saturating_accrue(
886												T::DbWeight::get().reads_writes(0, 1),
887											);
888										}
889										log::info!(
890											"Migrated call by value, hash: {:?}",
891											call.hash()
892										);
893										call
894									},
895								};
896
897								Some(Scheduled {
898									maybe_id: schedule.maybe_id.map(|x| blake2_256(&x[..])),
899									priority: schedule.priority,
900									call,
901									maybe_periodic: schedule.maybe_periodic,
902									origin: schedule.origin,
903									_phantom: Default::default(),
904								})
905							})
906							.or_else(|| {
907								log::info!("Schedule in agenda for block {:?} is empty - nothing to do here.", &block);
908								None
909							})
910					})
911					.collect::<Vec<_>>(),
912			))
913		});
914
915		#[allow(deprecated)]
916		frame_support::storage::migration::remove_storage_prefix(
917			Self::name().as_bytes(),
918			b"StorageVersion",
919			&[],
920		);
921
922		StorageVersion::new(4).put::<Self>();
923
924		weight + T::DbWeight::get().writes(2)
925	}
926}
927
928impl<T: Config> Pallet<T> {
929	/// Helper to migrate scheduler when the pallet origin type has changed.
930	pub fn migrate_origin<OldOrigin: Into<T::PalletsOrigin> + codec::Decode>() {
931		Agenda::<T>::translate::<
932			Vec<
933				Option<
934					Scheduled<
935						TaskName,
936						BoundedCallOf<T>,
937						BlockNumberFor<T>,
938						OldOrigin,
939						T::AccountId,
940					>,
941				>,
942			>,
943			_,
944		>(|_, agenda| {
945			Some(BoundedVec::truncate_from(
946				agenda
947					.into_iter()
948					.map(|schedule| {
949						schedule.map(|schedule| Scheduled {
950							maybe_id: schedule.maybe_id,
951							priority: schedule.priority,
952							call: schedule.call,
953							maybe_periodic: schedule.maybe_periodic,
954							origin: schedule.origin.into(),
955							_phantom: Default::default(),
956						})
957					})
958					.collect::<Vec<_>>(),
959			))
960		});
961	}
962
963	fn resolve_time(
964		when: DispatchTime<BlockNumberFor<T>>,
965	) -> Result<BlockNumberFor<T>, DispatchError> {
966		let now = T::BlockNumberProvider::current_block_number();
967		let when = match when {
968			DispatchTime::At(x) => x,
969			// The current block has already completed it's scheduled tasks, so
970			// Schedule the task at lest one block after this current block.
971			DispatchTime::After(x) => now.saturating_add(x).saturating_add(One::one()),
972		};
973
974		if when <= now {
975			return Err(Error::<T>::TargetBlockNumberInPast.into());
976		}
977
978		Ok(when)
979	}
980
981	fn place_task(
982		when: BlockNumberFor<T>,
983		what: ScheduledOf<T>,
984	) -> Result<TaskAddress<BlockNumberFor<T>>, (DispatchError, ScheduledOf<T>)> {
985		let maybe_name = what.maybe_id;
986		let index = Self::push_to_agenda(when, what)?;
987		let address = (when, index);
988		if let Some(name) = maybe_name {
989			Lookup::<T>::insert(name, address)
990		}
991		Self::deposit_event(Event::Scheduled { when: address.0, index: address.1 });
992		Ok(address)
993	}
994
995	fn push_to_agenda(
996		when: BlockNumberFor<T>,
997		what: ScheduledOf<T>,
998	) -> Result<u32, (DispatchError, ScheduledOf<T>)> {
999		let mut agenda = Agenda::<T>::get(when);
1000		let index = if (agenda.len() as u32) < T::MaxScheduledPerBlock::get() {
1001			// will always succeed due to the above check.
1002			let _ = agenda.try_push(Some(what));
1003			agenda.len() as u32 - 1
1004		} else {
1005			if let Some(hole_index) = agenda.iter().position(|i| i.is_none()) {
1006				agenda[hole_index] = Some(what);
1007				hole_index as u32
1008			} else {
1009				return Err((DispatchError::Exhausted, what));
1010			}
1011		};
1012		Agenda::<T>::insert(when, agenda);
1013		Ok(index)
1014	}
1015
1016	/// Remove trailing `None` items of an agenda at `when`. If all items are `None` remove the
1017	/// agenda record entirely.
1018	fn cleanup_agenda(when: BlockNumberFor<T>) {
1019		let mut agenda = Agenda::<T>::get(when);
1020		match agenda.iter().rposition(|i| i.is_some()) {
1021			// Note that `agenda.len() > i + 1` implies that the agenda ends on a sequence of at
1022			// least one `None` item(s).
1023			Some(i) if agenda.len() > i + 1 => {
1024				agenda.truncate(i + 1);
1025				Agenda::<T>::insert(when, agenda);
1026			},
1027			// This branch is taken if `agenda.len() <= i + 1 ==> agenda.len() == i + 1 <==>
1028			// agenda.len() - 1 == i` i.e. the agenda's last item is `Some`.
1029			Some(_) => {},
1030			// All items in the agenda are `None`.
1031			None => {
1032				Agenda::<T>::remove(when);
1033			},
1034		}
1035	}
1036
1037	fn do_schedule(
1038		when: DispatchTime<BlockNumberFor<T>>,
1039		maybe_periodic: Option<schedule::Period<BlockNumberFor<T>>>,
1040		priority: schedule::Priority,
1041		origin: T::PalletsOrigin,
1042		call: BoundedCallOf<T>,
1043	) -> Result<TaskAddress<BlockNumberFor<T>>, DispatchError> {
1044		let when = Self::resolve_time(when)?;
1045
1046		let lookup_hash = call.lookup_hash();
1047
1048		// sanitize maybe_periodic
1049		let maybe_periodic = maybe_periodic
1050			.filter(|p| p.1 > 1 && !p.0.is_zero())
1051			// Remove one from the number of repetitions since we will schedule one now.
1052			.map(|(p, c)| (p, c - 1));
1053		let task = Scheduled {
1054			maybe_id: None,
1055			priority,
1056			call,
1057			maybe_periodic,
1058			origin,
1059			_phantom: PhantomData,
1060		};
1061		let res = Self::place_task(when, task).map_err(|x| x.0)?;
1062
1063		if let Some(hash) = lookup_hash {
1064			// Request the call to be made available.
1065			T::Preimages::request(&hash);
1066		}
1067
1068		Ok(res)
1069	}
1070
1071	fn do_cancel(
1072		origin: Option<T::PalletsOrigin>,
1073		(when, index): TaskAddress<BlockNumberFor<T>>,
1074	) -> Result<(), DispatchError> {
1075		let scheduled = Agenda::<T>::try_mutate(when, |agenda| {
1076			agenda.get_mut(index as usize).map_or(
1077				Ok(None),
1078				|s| -> Result<Option<Scheduled<_, _, _, _, _>>, DispatchError> {
1079					if let (Some(ref o), Some(ref s)) = (origin, s.borrow()) {
1080						Self::ensure_privilege(o, &s.origin)?;
1081					};
1082					Ok(s.take())
1083				},
1084			)
1085		})?;
1086		if let Some(s) = scheduled {
1087			T::Preimages::drop(&s.call);
1088			if let Some(id) = s.maybe_id {
1089				Lookup::<T>::remove(id);
1090			}
1091			Retries::<T>::remove((when, index));
1092			Self::cleanup_agenda(when);
1093			Self::deposit_event(Event::Canceled { when, index });
1094			Ok(())
1095		} else {
1096			return Err(Error::<T>::NotFound.into());
1097		}
1098	}
1099
1100	fn do_reschedule(
1101		(when, index): TaskAddress<BlockNumberFor<T>>,
1102		new_time: DispatchTime<BlockNumberFor<T>>,
1103	) -> Result<TaskAddress<BlockNumberFor<T>>, DispatchError> {
1104		let new_time = Self::resolve_time(new_time)?;
1105
1106		if new_time == when {
1107			return Err(Error::<T>::RescheduleNoChange.into());
1108		}
1109
1110		let task = Agenda::<T>::try_mutate(when, |agenda| {
1111			let task = agenda.get_mut(index as usize).ok_or(Error::<T>::NotFound)?;
1112			ensure!(!matches!(task, Some(Scheduled { maybe_id: Some(_), .. })), Error::<T>::Named);
1113			task.take().ok_or(Error::<T>::NotFound)
1114		})?;
1115		Self::cleanup_agenda(when);
1116		Self::deposit_event(Event::Canceled { when, index });
1117
1118		Self::place_task(new_time, task).map_err(|x| x.0)
1119	}
1120
1121	fn do_schedule_named(
1122		id: TaskName,
1123		when: DispatchTime<BlockNumberFor<T>>,
1124		maybe_periodic: Option<schedule::Period<BlockNumberFor<T>>>,
1125		priority: schedule::Priority,
1126		origin: T::PalletsOrigin,
1127		call: BoundedCallOf<T>,
1128	) -> Result<TaskAddress<BlockNumberFor<T>>, DispatchError> {
1129		// ensure id it is unique
1130		if Lookup::<T>::contains_key(&id) {
1131			return Err(Error::<T>::FailedToSchedule.into());
1132		}
1133
1134		let when = Self::resolve_time(when)?;
1135
1136		let lookup_hash = call.lookup_hash();
1137
1138		// sanitize maybe_periodic
1139		let maybe_periodic = maybe_periodic
1140			.filter(|p| p.1 > 1 && !p.0.is_zero())
1141			// Remove one from the number of repetitions since we will schedule one now.
1142			.map(|(p, c)| (p, c - 1));
1143
1144		let task = Scheduled {
1145			maybe_id: Some(id),
1146			priority,
1147			call,
1148			maybe_periodic,
1149			origin,
1150			_phantom: Default::default(),
1151		};
1152		let res = Self::place_task(when, task).map_err(|x| x.0)?;
1153
1154		if let Some(hash) = lookup_hash {
1155			// Request the call to be made available.
1156			T::Preimages::request(&hash);
1157		}
1158
1159		Ok(res)
1160	}
1161
1162	fn do_cancel_named(origin: Option<T::PalletsOrigin>, id: TaskName) -> DispatchResult {
1163		Lookup::<T>::try_mutate_exists(id, |lookup| -> DispatchResult {
1164			if let Some((when, index)) = lookup.take() {
1165				let i = index as usize;
1166				Agenda::<T>::try_mutate(when, |agenda| -> DispatchResult {
1167					if let Some(s) = agenda.get_mut(i) {
1168						if let (Some(ref o), Some(ref s)) = (origin, s.borrow()) {
1169							Self::ensure_privilege(o, &s.origin)?;
1170							Retries::<T>::remove((when, index));
1171							T::Preimages::drop(&s.call);
1172						}
1173						*s = None;
1174					}
1175					Ok(())
1176				})?;
1177				Self::cleanup_agenda(when);
1178				Self::deposit_event(Event::Canceled { when, index });
1179				Ok(())
1180			} else {
1181				return Err(Error::<T>::NotFound.into());
1182			}
1183		})
1184	}
1185
1186	fn do_reschedule_named(
1187		id: TaskName,
1188		new_time: DispatchTime<BlockNumberFor<T>>,
1189	) -> Result<TaskAddress<BlockNumberFor<T>>, DispatchError> {
1190		let new_time = Self::resolve_time(new_time)?;
1191
1192		let lookup = Lookup::<T>::get(id);
1193		let (when, index) = lookup.ok_or(Error::<T>::NotFound)?;
1194
1195		if new_time == when {
1196			return Err(Error::<T>::RescheduleNoChange.into());
1197		}
1198
1199		let task = Agenda::<T>::try_mutate(when, |agenda| {
1200			let task = agenda.get_mut(index as usize).ok_or(Error::<T>::NotFound)?;
1201			task.take().ok_or(Error::<T>::NotFound)
1202		})?;
1203		Self::cleanup_agenda(when);
1204		Self::deposit_event(Event::Canceled { when, index });
1205		Self::place_task(new_time, task).map_err(|x| x.0)
1206	}
1207
1208	fn do_cancel_retry(
1209		origin: &T::PalletsOrigin,
1210		(when, index): TaskAddress<BlockNumberFor<T>>,
1211	) -> Result<(), DispatchError> {
1212		let agenda = Agenda::<T>::get(when);
1213		let scheduled = agenda
1214			.get(index as usize)
1215			.and_then(Option::as_ref)
1216			.ok_or(Error::<T>::NotFound)?;
1217		Self::ensure_privilege(origin, &scheduled.origin)?;
1218		Retries::<T>::remove((when, index));
1219		Ok(())
1220	}
1221}
1222
1223enum ServiceTaskError {
1224	/// Could not be executed due to missing preimage.
1225	Unavailable,
1226	/// Could not be executed due to weight limitations.
1227	Overweight,
1228}
1229use ServiceTaskError::*;
1230
1231impl<T: Config> Pallet<T> {
1232	/// Service up to `max` agendas queue starting from earliest incompletely executed agenda.
1233	fn service_agendas(weight: &mut WeightMeter, now: BlockNumberFor<T>, max: u32) {
1234		if weight.try_consume(T::WeightInfo::service_agendas_base()).is_err() {
1235			return;
1236		}
1237
1238		let mut incomplete_since = now + One::one();
1239		let mut when = IncompleteSince::<T>::take().unwrap_or(now);
1240		let mut is_first = true; // first task from the first agenda.
1241
1242		let max_items = T::MaxScheduledPerBlock::get();
1243		let mut count_down = max;
1244		let service_agenda_base_weight = T::WeightInfo::service_agenda_base(max_items);
1245		while count_down > 0 && when <= now && weight.can_consume(service_agenda_base_weight) {
1246			if !Self::service_agenda(weight, is_first, now, when, u32::MAX) {
1247				incomplete_since = incomplete_since.min(when);
1248			}
1249			is_first = false;
1250			when.saturating_inc();
1251			count_down.saturating_dec();
1252		}
1253		incomplete_since = incomplete_since.min(when);
1254		if incomplete_since <= now {
1255			Self::deposit_event(Event::AgendaIncomplete { when: incomplete_since });
1256			IncompleteSince::<T>::put(incomplete_since);
1257		} else {
1258			// The next scheduler iteration should typically start from `now + 1` (`next_iter_now`).
1259			// However, if the [`Config::BlockNumberProvider`] is not a local block number provider,
1260			// then `next_iter_now` could be `now + n` where `n > 1`. In this case, we want to start
1261			// from `now + 1` to ensure we don't miss any agendas.
1262			IncompleteSince::<T>::put(now + One::one());
1263		}
1264	}
1265
1266	/// Returns `true` if the agenda was fully completed, `false` if it should be revisited at a
1267	/// later block.
1268	fn service_agenda(
1269		weight: &mut WeightMeter,
1270		mut is_first: bool,
1271		now: BlockNumberFor<T>,
1272		when: BlockNumberFor<T>,
1273		max: u32,
1274	) -> bool {
1275		let mut agenda = Agenda::<T>::get(when);
1276		let mut ordered = agenda
1277			.iter()
1278			.enumerate()
1279			.filter_map(|(index, maybe_item)| {
1280				maybe_item.as_ref().map(|item| (index as u32, item.priority))
1281			})
1282			.collect::<Vec<_>>();
1283		ordered.sort_by_key(|k| k.1);
1284		let within_limit = weight
1285			.try_consume(T::WeightInfo::service_agenda_base(ordered.len() as u32))
1286			.is_ok();
1287		debug_assert!(within_limit, "weight limit should have been checked in advance");
1288
1289		// Items which we know can be executed and have postponed for execution in a later block.
1290		let mut postponed = (ordered.len() as u32).saturating_sub(max);
1291		// Items which we don't know can ever be executed.
1292		let mut dropped = 0;
1293
1294		for (agenda_index, _) in ordered.into_iter().take(max as usize) {
1295			let Some(task) = agenda[agenda_index as usize].take() else { continue };
1296			let base_weight = T::WeightInfo::service_task(
1297				task.call.lookup_len().map(|x| x as usize),
1298				task.maybe_id.is_some(),
1299				task.maybe_periodic.is_some(),
1300			);
1301			if !weight.can_consume(base_weight) {
1302				postponed += 1;
1303				agenda[agenda_index as usize] = Some(task);
1304				break;
1305			}
1306			let result = Self::service_task(weight, now, when, agenda_index, is_first, task);
1307			agenda[agenda_index as usize] = match result {
1308				Err((Unavailable, slot)) => {
1309					dropped += 1;
1310					slot
1311				},
1312				Err((Overweight, slot)) => {
1313					postponed += 1;
1314					slot
1315				},
1316				Ok(()) => {
1317					is_first = false;
1318					None
1319				},
1320			};
1321		}
1322		if postponed > 0 || dropped > 0 {
1323			Agenda::<T>::insert(when, agenda);
1324		} else {
1325			Agenda::<T>::remove(when);
1326		}
1327
1328		postponed == 0
1329	}
1330
1331	/// Service (i.e. execute) the given task, being careful not to overflow the `weight` counter.
1332	///
1333	/// This involves:
1334	/// - removing and potentially replacing the `Lookup` entry for the task.
1335	/// - realizing the task's call which can include a preimage lookup.
1336	/// - Rescheduling the task for execution in a later agenda if periodic.
1337	fn service_task(
1338		weight: &mut WeightMeter,
1339		now: BlockNumberFor<T>,
1340		when: BlockNumberFor<T>,
1341		agenda_index: u32,
1342		is_first: bool,
1343		mut task: ScheduledOf<T>,
1344	) -> Result<(), (ServiceTaskError, Option<ScheduledOf<T>>)> {
1345		if let Some(ref id) = task.maybe_id {
1346			Lookup::<T>::remove(id);
1347		}
1348
1349		let (call, lookup_len) = match T::Preimages::peek(&task.call) {
1350			Ok(c) => c,
1351			Err(_) => {
1352				Self::deposit_event(Event::CallUnavailable {
1353					task: (when, agenda_index),
1354					id: task.maybe_id,
1355				});
1356
1357				// It was not available when we needed it, so we don't need to have requested it
1358				// anymore.
1359				T::Preimages::drop(&task.call);
1360
1361				// We don't know why `peek` failed, thus we most account here for the "full weight".
1362				let _ = weight.try_consume(T::WeightInfo::service_task(
1363					task.call.lookup_len().map(|x| x as usize),
1364					task.maybe_id.is_some(),
1365					task.maybe_periodic.is_some(),
1366				));
1367
1368				return Err((Unavailable, Some(task)));
1369			},
1370		};
1371
1372		let _ = weight.try_consume(T::WeightInfo::service_task(
1373			lookup_len.map(|x| x as usize),
1374			task.maybe_id.is_some(),
1375			task.maybe_periodic.is_some(),
1376		));
1377
1378		match Self::execute_dispatch(weight, task.origin.clone(), call) {
1379			Err(()) if is_first => {
1380				T::Preimages::drop(&task.call);
1381				Self::deposit_event(Event::PermanentlyOverweight {
1382					task: (when, agenda_index),
1383					id: task.maybe_id,
1384				});
1385				Err((Unavailable, Some(task)))
1386			},
1387			Err(()) => Err((Overweight, Some(task))),
1388			Ok(result) => {
1389				let failed = result.is_err();
1390				let maybe_retry_config = Retries::<T>::take((when, agenda_index));
1391				Self::deposit_event(Event::Dispatched {
1392					task: (when, agenda_index),
1393					id: task.maybe_id,
1394					result,
1395				});
1396
1397				match maybe_retry_config {
1398					Some(retry_config) if failed => {
1399						Self::schedule_retry(weight, now, when, agenda_index, &task, retry_config);
1400					},
1401					_ => {},
1402				}
1403
1404				if let &Some((period, count)) = &task.maybe_periodic {
1405					if count > 1 {
1406						task.maybe_periodic = Some((period, count - 1));
1407					} else {
1408						task.maybe_periodic = None;
1409					}
1410					let wake = now.saturating_add(period);
1411					match Self::place_task(wake, task) {
1412						Ok(new_address) => {
1413							if let Some(retry_config) = maybe_retry_config {
1414								Retries::<T>::insert(new_address, retry_config);
1415							}
1416						},
1417						Err((_, task)) => {
1418							// TODO: Leave task in storage somewhere for it to be rescheduled
1419							// manually.
1420							T::Preimages::drop(&task.call);
1421							Self::deposit_event(Event::PeriodicFailed {
1422								task: (when, agenda_index),
1423								id: task.maybe_id,
1424							});
1425						},
1426					}
1427				} else {
1428					T::Preimages::drop(&task.call);
1429				}
1430				Ok(())
1431			},
1432		}
1433	}
1434
1435	/// Make a dispatch to the given `call` from the given `origin`, ensuring that the `weight`
1436	/// counter does not exceed its limit and that it is counted accurately (e.g. accounted using
1437	/// post info if available).
1438	///
1439	/// NOTE: Only the weight for this function will be counted (origin lookup, dispatch and the
1440	/// call itself).
1441	///
1442	/// Returns an error if the call is overweight.
1443	fn execute_dispatch(
1444		weight: &mut WeightMeter,
1445		origin: T::PalletsOrigin,
1446		call: <T as Config>::RuntimeCall,
1447	) -> Result<DispatchResult, ()> {
1448		let base_weight = match origin.as_system_ref() {
1449			Some(&RawOrigin::Signed(_)) => T::WeightInfo::execute_dispatch_signed(),
1450			_ => T::WeightInfo::execute_dispatch_unsigned(),
1451		};
1452		let call_weight = call.get_dispatch_info().call_weight;
1453		// We only allow a scheduled call if it cannot push the weight past the limit.
1454		let max_weight = base_weight.saturating_add(call_weight);
1455
1456		if !weight.can_consume(max_weight) {
1457			return Err(());
1458		}
1459
1460		let dispatch_origin = origin.into();
1461		let (maybe_actual_call_weight, result) = match call.dispatch(dispatch_origin) {
1462			Ok(post_info) => (post_info.actual_weight, Ok(())),
1463			Err(error_and_info) => {
1464				(error_and_info.post_info.actual_weight, Err(error_and_info.error))
1465			},
1466		};
1467		let call_weight = maybe_actual_call_weight.unwrap_or(call_weight);
1468		let _ = weight.try_consume(base_weight);
1469		let _ = weight.try_consume(call_weight);
1470		Ok(result)
1471	}
1472
1473	/// Check if a task has a retry configuration in place and, if so, try to reschedule it.
1474	///
1475	/// Possible causes for failure to schedule a retry for a task:
1476	/// - there wasn't enough weight to run the task reschedule logic
1477	/// - there was no retry configuration in place
1478	/// - there were no more retry attempts left
1479	/// - the agenda was full.
1480	fn schedule_retry(
1481		weight: &mut WeightMeter,
1482		now: BlockNumberFor<T>,
1483		when: BlockNumberFor<T>,
1484		agenda_index: u32,
1485		task: &ScheduledOf<T>,
1486		retry_config: RetryConfig<BlockNumberFor<T>>,
1487	) {
1488		if weight
1489			.try_consume(T::WeightInfo::schedule_retry(T::MaxScheduledPerBlock::get()))
1490			.is_err()
1491		{
1492			Self::deposit_event(Event::RetryFailed {
1493				task: (when, agenda_index),
1494				id: task.maybe_id,
1495			});
1496			return;
1497		}
1498
1499		let RetryConfig { total_retries, mut remaining, period } = retry_config;
1500		remaining = match remaining.checked_sub(1) {
1501			Some(n) => n,
1502			None => return,
1503		};
1504		let wake = now.saturating_add(period);
1505		match Self::place_task(wake, task.as_retry()) {
1506			Ok(address) => {
1507				// Reinsert the retry config to the new address of the task after it was
1508				// placed.
1509				Retries::<T>::insert(address, RetryConfig { total_retries, remaining, period });
1510			},
1511			Err((_, task)) => {
1512				// TODO: Leave task in storage somewhere for it to be
1513				// rescheduled manually.
1514				T::Preimages::drop(&task.call);
1515				Self::deposit_event(Event::RetryFailed {
1516					task: (when, agenda_index),
1517					id: task.maybe_id,
1518				});
1519			},
1520		}
1521	}
1522
1523	/// Ensure that `left` has at least the same level of privilege or higher than `right`.
1524	///
1525	/// Returns an error if `left` has a lower level of privilege or the two cannot be compared.
1526	fn ensure_privilege(
1527		left: &<T as Config>::PalletsOrigin,
1528		right: &<T as Config>::PalletsOrigin,
1529	) -> Result<(), DispatchError> {
1530		if matches!(T::OriginPrivilegeCmp::cmp_privilege(left, right), Some(Ordering::Less) | None)
1531		{
1532			return Err(BadOrigin.into());
1533		}
1534		Ok(())
1535	}
1536}
1537
1538#[allow(deprecated)]
1539impl<T: Config> schedule::v2::Anon<BlockNumberFor<T>, <T as Config>::RuntimeCall, T::PalletsOrigin>
1540	for Pallet<T>
1541{
1542	type Address = TaskAddress<BlockNumberFor<T>>;
1543	type Hash = T::Hash;
1544
1545	fn schedule(
1546		when: DispatchTime<BlockNumberFor<T>>,
1547		maybe_periodic: Option<schedule::Period<BlockNumberFor<T>>>,
1548		priority: schedule::Priority,
1549		origin: T::PalletsOrigin,
1550		call: CallOrHashOf<T>,
1551	) -> Result<Self::Address, DispatchError> {
1552		let call = call.as_value().ok_or(DispatchError::CannotLookup)?;
1553		let call = T::Preimages::bound(call)?.transmute();
1554		Self::do_schedule(when, maybe_periodic, priority, origin, call)
1555	}
1556
1557	fn cancel((when, index): Self::Address) -> Result<(), ()> {
1558		Self::do_cancel(None, (when, index)).map_err(|_| ())
1559	}
1560
1561	fn reschedule(
1562		address: Self::Address,
1563		when: DispatchTime<BlockNumberFor<T>>,
1564	) -> Result<Self::Address, DispatchError> {
1565		Self::do_reschedule(address, when)
1566	}
1567
1568	fn next_dispatch_time((when, index): Self::Address) -> Result<BlockNumberFor<T>, ()> {
1569		Agenda::<T>::get(when).get(index as usize).ok_or(()).map(|_| when)
1570	}
1571}
1572
1573// TODO: migrate `schedule::v2::Anon` to `v3`
1574#[allow(deprecated)]
1575impl<T: Config> schedule::v2::Named<BlockNumberFor<T>, <T as Config>::RuntimeCall, T::PalletsOrigin>
1576	for Pallet<T>
1577{
1578	type Address = TaskAddress<BlockNumberFor<T>>;
1579	type Hash = T::Hash;
1580
1581	fn schedule_named(
1582		id: Vec<u8>,
1583		when: DispatchTime<BlockNumberFor<T>>,
1584		maybe_periodic: Option<schedule::Period<BlockNumberFor<T>>>,
1585		priority: schedule::Priority,
1586		origin: T::PalletsOrigin,
1587		call: CallOrHashOf<T>,
1588	) -> Result<Self::Address, ()> {
1589		let call = call.as_value().ok_or(())?;
1590		let call = T::Preimages::bound(call).map_err(|_| ())?.transmute();
1591		let name = blake2_256(&id[..]);
1592		Self::do_schedule_named(name, when, maybe_periodic, priority, origin, call).map_err(|_| ())
1593	}
1594
1595	fn cancel_named(id: Vec<u8>) -> Result<(), ()> {
1596		let name = blake2_256(&id[..]);
1597		Self::do_cancel_named(None, name).map_err(|_| ())
1598	}
1599
1600	fn reschedule_named(
1601		id: Vec<u8>,
1602		when: DispatchTime<BlockNumberFor<T>>,
1603	) -> Result<Self::Address, DispatchError> {
1604		let name = blake2_256(&id[..]);
1605		Self::do_reschedule_named(name, when)
1606	}
1607
1608	fn next_dispatch_time(id: Vec<u8>) -> Result<BlockNumberFor<T>, ()> {
1609		let name = blake2_256(&id[..]);
1610		Lookup::<T>::get(name)
1611			.and_then(|(when, index)| Agenda::<T>::get(when).get(index as usize).map(|_| when))
1612			.ok_or(())
1613	}
1614}
1615
1616impl<T: Config> schedule::v3::Anon<BlockNumberFor<T>, <T as Config>::RuntimeCall, T::PalletsOrigin>
1617	for Pallet<T>
1618{
1619	type Address = TaskAddress<BlockNumberFor<T>>;
1620	type Hasher = T::Hashing;
1621
1622	fn schedule(
1623		when: DispatchTime<BlockNumberFor<T>>,
1624		maybe_periodic: Option<schedule::Period<BlockNumberFor<T>>>,
1625		priority: schedule::Priority,
1626		origin: T::PalletsOrigin,
1627		call: BoundedCallOf<T>,
1628	) -> Result<Self::Address, DispatchError> {
1629		Self::do_schedule(when, maybe_periodic, priority, origin, call)
1630	}
1631
1632	fn cancel((when, index): Self::Address) -> Result<(), DispatchError> {
1633		Self::do_cancel(None, (when, index)).map_err(map_err_to_v3_err::<T>)
1634	}
1635
1636	fn reschedule(
1637		address: Self::Address,
1638		when: DispatchTime<BlockNumberFor<T>>,
1639	) -> Result<Self::Address, DispatchError> {
1640		Self::do_reschedule(address, when).map_err(map_err_to_v3_err::<T>)
1641	}
1642
1643	fn next_dispatch_time(
1644		(when, index): Self::Address,
1645	) -> Result<BlockNumberFor<T>, DispatchError> {
1646		Agenda::<T>::get(when)
1647			.get(index as usize)
1648			.ok_or(DispatchError::Unavailable)
1649			.map(|_| when)
1650	}
1651}
1652
1653use schedule::v3::TaskName;
1654
1655impl<T: Config> schedule::v3::Named<BlockNumberFor<T>, <T as Config>::RuntimeCall, T::PalletsOrigin>
1656	for Pallet<T>
1657{
1658	type Address = TaskAddress<BlockNumberFor<T>>;
1659	type Hasher = T::Hashing;
1660
1661	fn schedule_named(
1662		id: TaskName,
1663		when: DispatchTime<BlockNumberFor<T>>,
1664		maybe_periodic: Option<schedule::Period<BlockNumberFor<T>>>,
1665		priority: schedule::Priority,
1666		origin: T::PalletsOrigin,
1667		call: BoundedCallOf<T>,
1668	) -> Result<Self::Address, DispatchError> {
1669		Self::do_schedule_named(id, when, maybe_periodic, priority, origin, call)
1670	}
1671
1672	fn cancel_named(id: TaskName) -> Result<(), DispatchError> {
1673		Self::do_cancel_named(None, id).map_err(map_err_to_v3_err::<T>)
1674	}
1675
1676	fn reschedule_named(
1677		id: TaskName,
1678		when: DispatchTime<BlockNumberFor<T>>,
1679	) -> Result<Self::Address, DispatchError> {
1680		Self::do_reschedule_named(id, when).map_err(map_err_to_v3_err::<T>)
1681	}
1682
1683	fn next_dispatch_time(id: TaskName) -> Result<BlockNumberFor<T>, DispatchError> {
1684		Lookup::<T>::get(id)
1685			.and_then(|(when, index)| Agenda::<T>::get(when).get(index as usize).map(|_| when))
1686			.ok_or(DispatchError::Unavailable)
1687	}
1688}
1689
1690/// Maps a pallet error to an `schedule::v3` error.
1691fn map_err_to_v3_err<T: Config>(err: DispatchError) -> DispatchError {
1692	if err == DispatchError::from(Error::<T>::NotFound) {
1693		DispatchError::Unavailable
1694	} else {
1695		err
1696	}
1697}