pallet_broker/lib.rs
1// This file is part of Substrate.
2
3// Copyright (C) Parity Technologies (UK) Ltd.
4// SPDX-License-Identifier: Apache-2.0
5
6// Licensed under the Apache License, Version 2.0 (the "License");
7// you may not use this file except in compliance with the License.
8// You may obtain a copy of the License at
9//
10// http://www.apache.org/licenses/LICENSE-2.0
11//
12// Unless required by applicable law or agreed to in writing, software
13// distributed under the License is distributed on an "AS IS" BASIS,
14// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15// See the License for the specific language governing permissions and
16// limitations under the License.
17
18#![cfg_attr(not(feature = "std"), no_std)]
19#![doc = include_str!("../README.md")]
20
21pub use pallet::*;
22
23mod adapt_price;
24mod benchmarking;
25mod core_mask;
26mod coretime_interface;
27mod dispatchable_impls;
28#[cfg(test)]
29mod mock;
30mod nonfungible_impl;
31#[cfg(test)]
32mod test_fungibles;
33#[cfg(test)]
34mod tests;
35mod tick_impls;
36mod types;
37mod utility_impls;
38
39pub mod migration;
40pub mod runtime_api;
41
42pub mod weights;
43pub use weights::WeightInfo;
44
45pub use adapt_price::*;
46pub use core_mask::*;
47pub use coretime_interface::*;
48pub use types::*;
49
50extern crate alloc;
51
52/// The log target for this pallet.
53const LOG_TARGET: &str = "runtime::broker";
54
55#[frame_support::pallet]
56pub mod pallet {
57 use super::*;
58 use alloc::vec::Vec;
59 use frame_support::{
60 pallet_prelude::{DispatchResult, DispatchResultWithPostInfo, *},
61 traits::{
62 fungible::{Balanced, Credit, Mutate},
63 BuildGenesisConfig, EnsureOrigin, OnUnbalanced,
64 },
65 PalletId,
66 };
67 use frame_system::pallet_prelude::*;
68 use sp_runtime::traits::{Convert, ConvertBack, MaybeConvert};
69
70 const STORAGE_VERSION: StorageVersion = StorageVersion::new(4);
71
72 #[pallet::pallet]
73 #[pallet::storage_version(STORAGE_VERSION)]
74 pub struct Pallet<T>(_);
75
76 #[pallet::config]
77 pub trait Config: frame_system::Config {
78 #[allow(deprecated)]
79 type RuntimeEvent: From<Event<Self>> + IsType<<Self as frame_system::Config>::RuntimeEvent>;
80
81 /// Weight information for all calls of this pallet.
82 type WeightInfo: WeightInfo;
83
84 /// Currency used to pay for Coretime.
85 type Currency: Mutate<Self::AccountId> + Balanced<Self::AccountId>;
86
87 /// The origin test needed for administrating this pallet.
88 type AdminOrigin: EnsureOrigin<Self::RuntimeOrigin>;
89
90 /// What to do with any revenues collected from the sale of Coretime.
91 type OnRevenue: OnUnbalanced<Credit<Self::AccountId, Self::Currency>>;
92
93 /// Relay chain's Coretime API used to interact with and instruct the low-level scheduling
94 /// system.
95 type Coretime: CoretimeInterface;
96
97 /// The algorithm to determine the next price on the basis of market performance.
98 type PriceAdapter: AdaptPrice<BalanceOf<Self>>;
99
100 /// Reversible conversion from local balance to Relay-chain balance. This will typically be
101 /// the `Identity`, but provided just in case the chains use different representations.
102 type ConvertBalance: Convert<BalanceOf<Self>, RelayBalanceOf<Self>>
103 + ConvertBack<BalanceOf<Self>, RelayBalanceOf<Self>>;
104
105 /// Type used for getting the associated account of a task. This account is controlled by
106 /// the task itself.
107 type SovereignAccountOf: MaybeConvert<TaskId, Self::AccountId>;
108
109 /// Identifier from which the internal Pot is generated.
110 #[pallet::constant]
111 type PalletId: Get<PalletId>;
112
113 /// Number of Relay-chain blocks per timeslice.
114 #[pallet::constant]
115 type TimeslicePeriod: Get<RelayBlockNumberOf<Self>>;
116
117 /// Maximum number of legacy leases.
118 #[pallet::constant]
119 type MaxLeasedCores: Get<u32>;
120
121 /// Maximum number of system cores.
122 #[pallet::constant]
123 type MaxReservedCores: Get<u32>;
124
125 /// Given that we are performing all auto-renewals in a single block, it has to be limited.
126 #[pallet::constant]
127 type MaxAutoRenewals: Get<u32>;
128
129 /// The smallest amount of credits a user can purchase.
130 ///
131 /// Needed to prevent spam attacks.
132 #[pallet::constant]
133 type MinimumCreditPurchase: Get<BalanceOf<Self>>;
134 }
135
136 /// The current configuration of this pallet.
137 #[pallet::storage]
138 pub type Configuration<T> = StorageValue<_, ConfigRecordOf<T>, OptionQuery>;
139
140 /// The Polkadot Core reservations (generally tasked with the maintenance of System Chains).
141 #[pallet::storage]
142 pub type Reservations<T> = StorageValue<_, ReservationsRecordOf<T>, ValueQuery>;
143
144 /// Force reservations that need to be inserted into the workplan at the next sale rotation.
145 ///
146 /// They are automatically freed at the next sale rotation.
147 #[pallet::storage]
148 pub type ForceReservations<T> = StorageValue<_, ReservationsRecordOf<T>, ValueQuery>;
149
150 /// The Polkadot Core legacy leases.
151 #[pallet::storage]
152 pub type Leases<T> = StorageValue<_, LeasesRecordOf<T>, ValueQuery>;
153
154 /// The current status of miscellaneous subsystems of this pallet.
155 #[pallet::storage]
156 pub type Status<T> = StorageValue<_, StatusRecord, OptionQuery>;
157
158 /// The details of the current sale, including its properties and status.
159 #[pallet::storage]
160 pub type SaleInfo<T> = StorageValue<_, SaleInfoRecordOf<T>, OptionQuery>;
161
162 /// Records of potential renewals.
163 ///
164 /// Renewals will only actually be allowed if `CompletionStatus` is actually `Complete`.
165 #[pallet::storage]
166 pub type PotentialRenewals<T> =
167 StorageMap<_, Twox64Concat, PotentialRenewalId, PotentialRenewalRecordOf<T>, OptionQuery>;
168
169 /// The current (unassigned or provisionally assigend) Regions.
170 #[pallet::storage]
171 pub type Regions<T> = StorageMap<_, Blake2_128Concat, RegionId, RegionRecordOf<T>, OptionQuery>;
172
173 /// The work we plan on having each core do at a particular time in the future.
174 #[pallet::storage]
175 pub type Workplan<T> =
176 StorageMap<_, Twox64Concat, (Timeslice, CoreIndex), Schedule, OptionQuery>;
177
178 /// The current workload of each core. This gets updated with workplan as timeslices pass.
179 #[pallet::storage]
180 pub type Workload<T> = StorageMap<_, Twox64Concat, CoreIndex, Schedule, ValueQuery>;
181
182 /// Record of a single contribution to the Instantaneous Coretime Pool.
183 #[pallet::storage]
184 pub type InstaPoolContribution<T> =
185 StorageMap<_, Blake2_128Concat, RegionId, ContributionRecordOf<T>, OptionQuery>;
186
187 /// Record of Coretime entering or leaving the Instantaneous Coretime Pool.
188 #[pallet::storage]
189 pub type InstaPoolIo<T> = StorageMap<_, Blake2_128Concat, Timeslice, PoolIoRecord, ValueQuery>;
190
191 /// Total InstaPool rewards for each Timeslice and the number of core parts which contributed.
192 #[pallet::storage]
193 pub type InstaPoolHistory<T> =
194 StorageMap<_, Blake2_128Concat, Timeslice, InstaPoolHistoryRecordOf<T>>;
195
196 /// Received core count change from the relay chain.
197 #[pallet::storage]
198 pub type CoreCountInbox<T> = StorageValue<_, CoreIndex, OptionQuery>;
199
200 /// Keeping track of cores which have auto-renewal enabled.
201 ///
202 /// Sorted by `CoreIndex` to make the removal of cores from auto-renewal more efficient.
203 #[pallet::storage]
204 pub type AutoRenewals<T: Config> =
205 StorageValue<_, BoundedVec<AutoRenewalRecord, T::MaxAutoRenewals>, ValueQuery>;
206
207 /// Received revenue info from the relay chain.
208 #[pallet::storage]
209 pub type RevenueInbox<T> = StorageValue<_, OnDemandRevenueRecordOf<T>, OptionQuery>;
210
211 #[pallet::event]
212 #[pallet::generate_deposit(pub(super) fn deposit_event)]
213 pub enum Event<T: Config> {
214 /// A Region of Bulk Coretime has been purchased.
215 Purchased {
216 /// The identity of the purchaser.
217 who: T::AccountId,
218 /// The identity of the Region.
219 region_id: RegionId,
220 /// The price paid for this Region.
221 price: BalanceOf<T>,
222 /// The duration of the Region.
223 duration: Timeslice,
224 },
225 /// The workload of a core has become renewable.
226 Renewable {
227 /// The core whose workload can be renewed.
228 core: CoreIndex,
229 /// The price at which the workload can be renewed.
230 price: BalanceOf<T>,
231 /// The time at which the workload would recommence of this renewal. The call to renew
232 /// cannot happen before the beginning of the interlude prior to the sale for regions
233 /// which begin at this time.
234 begin: Timeslice,
235 /// The actual workload which can be renewed.
236 workload: Schedule,
237 },
238 /// A workload has been renewed.
239 Renewed {
240 /// The identity of the renewer.
241 who: T::AccountId,
242 /// The price paid for this renewal.
243 price: BalanceOf<T>,
244 /// The index of the core on which the `workload` was previously scheduled.
245 old_core: CoreIndex,
246 /// The index of the core on which the renewed `workload` has been scheduled.
247 core: CoreIndex,
248 /// The time at which the `workload` will begin on the `core`.
249 begin: Timeslice,
250 /// The number of timeslices for which this `workload` is newly scheduled.
251 duration: Timeslice,
252 /// The workload which was renewed.
253 workload: Schedule,
254 },
255 /// Ownership of a Region has been transferred.
256 Transferred {
257 /// The Region which has been transferred.
258 region_id: RegionId,
259 /// The duration of the Region.
260 duration: Timeslice,
261 /// The old owner of the Region.
262 old_owner: Option<T::AccountId>,
263 /// The new owner of the Region.
264 owner: Option<T::AccountId>,
265 },
266 /// A Region has been split into two non-overlapping Regions.
267 Partitioned {
268 /// The Region which was split.
269 old_region_id: RegionId,
270 /// The new Regions into which it became.
271 new_region_ids: (RegionId, RegionId),
272 },
273 /// A Region has been converted into two overlapping Regions each of lesser regularity.
274 Interlaced {
275 /// The Region which was interlaced.
276 old_region_id: RegionId,
277 /// The new Regions into which it became.
278 new_region_ids: (RegionId, RegionId),
279 },
280 /// A Region has been assigned to a particular task.
281 Assigned {
282 /// The Region which was assigned.
283 region_id: RegionId,
284 /// The duration of the assignment.
285 duration: Timeslice,
286 /// The task to which the Region was assigned.
287 task: TaskId,
288 },
289 /// An assignment has been removed from the workplan.
290 AssignmentRemoved {
291 /// The Region which was removed from the workplan.
292 region_id: RegionId,
293 },
294 /// A Region has been added to the Instantaneous Coretime Pool.
295 Pooled {
296 /// The Region which was added to the Instantaneous Coretime Pool.
297 region_id: RegionId,
298 /// The duration of the Region.
299 duration: Timeslice,
300 },
301 /// A new number of cores has been requested.
302 CoreCountRequested {
303 /// The number of cores requested.
304 core_count: CoreIndex,
305 },
306 /// The number of cores available for scheduling has changed.
307 CoreCountChanged {
308 /// The new number of cores available for scheduling.
309 core_count: CoreIndex,
310 },
311 /// There is a new reservation for a workload.
312 ReservationMade {
313 /// The index of the reservation.
314 index: u32,
315 /// The workload of the reservation.
316 workload: Schedule,
317 },
318 /// A reservation for a workload has been cancelled.
319 ReservationCancelled {
320 /// The index of the reservation which was cancelled.
321 index: u32,
322 /// The workload of the now cancelled reservation.
323 workload: Schedule,
324 },
325 /// A new sale has been initialized.
326 SaleInitialized {
327 /// The relay block number at which the sale will/did start.
328 sale_start: RelayBlockNumberOf<T>,
329 /// The length in relay chain blocks of the Leadin Period (where the price is
330 /// decreasing).
331 leadin_length: RelayBlockNumberOf<T>,
332 /// The price of Bulk Coretime at the beginning of the Leadin Period.
333 start_price: BalanceOf<T>,
334 /// The price of Bulk Coretime after the Leadin Period.
335 end_price: BalanceOf<T>,
336 /// The first timeslice of the Regions which are being sold in this sale.
337 region_begin: Timeslice,
338 /// The timeslice on which the Regions which are being sold in the sale terminate.
339 /// (i.e. One after the last timeslice which the Regions control.)
340 region_end: Timeslice,
341 /// The number of cores we want to sell, ideally.
342 ideal_cores_sold: CoreIndex,
343 /// Number of cores which are/have been offered for sale.
344 cores_offered: CoreIndex,
345 },
346 /// A new lease has been created.
347 Leased {
348 /// The task to which a core will be assigned.
349 task: TaskId,
350 /// The timeslice contained in the sale period after which this lease will
351 /// self-terminate (and therefore the earliest timeslice at which the lease may no
352 /// longer apply).
353 until: Timeslice,
354 },
355 /// A lease has been removed.
356 LeaseRemoved {
357 /// The task to which a core was assigned.
358 task: TaskId,
359 },
360 /// A lease is about to end.
361 LeaseEnding {
362 /// The task to which a core was assigned.
363 task: TaskId,
364 /// The timeslice at which the task will no longer be scheduled.
365 when: Timeslice,
366 },
367 /// The sale rotation has been started and a new sale is imminent.
368 SalesStarted {
369 /// The nominal price of an Region of Bulk Coretime.
370 price: BalanceOf<T>,
371 /// The maximum number of cores which this pallet will attempt to assign.
372 core_count: CoreIndex,
373 },
374 /// The act of claiming revenue has begun.
375 RevenueClaimBegun {
376 /// The region to be claimed for.
377 region: RegionId,
378 /// The maximum number of timeslices which should be searched for claimed.
379 max_timeslices: Timeslice,
380 },
381 /// A particular timeslice has a non-zero claim.
382 RevenueClaimItem {
383 /// The timeslice whose claim is being processed.
384 when: Timeslice,
385 /// The amount which was claimed at this timeslice.
386 amount: BalanceOf<T>,
387 },
388 /// A revenue claim has (possibly only in part) been paid.
389 RevenueClaimPaid {
390 /// The account to whom revenue has been paid.
391 who: T::AccountId,
392 /// The total amount of revenue claimed and paid.
393 amount: BalanceOf<T>,
394 /// The next region which should be claimed for the continuation of this contribution.
395 next: Option<RegionId>,
396 },
397 /// Some Instantaneous Coretime Pool credit has been purchased.
398 CreditPurchased {
399 /// The account which purchased the credit.
400 who: T::AccountId,
401 /// The Relay-chain account to which the credit will be made.
402 beneficiary: RelayAccountIdOf<T>,
403 /// The amount of credit purchased.
404 amount: BalanceOf<T>,
405 },
406 /// A Region has been dropped due to being out of date.
407 RegionDropped {
408 /// The Region which no longer exists.
409 region_id: RegionId,
410 /// The duration of the Region.
411 duration: Timeslice,
412 },
413 /// Some historical Instantaneous Core Pool contribution record has been dropped.
414 ContributionDropped {
415 /// The Region whose contribution is no longer exists.
416 region_id: RegionId,
417 },
418 /// A region has been force-removed from the pool. This is usually due to a provisionally
419 /// pooled region being redeployed.
420 RegionUnpooled {
421 /// The Region which has been force-removed from the pool.
422 region_id: RegionId,
423 /// The timeslice at which the region was force-removed.
424 when: Timeslice,
425 },
426 /// Some historical Instantaneous Core Pool payment record has been initialized.
427 HistoryInitialized {
428 /// The timeslice whose history has been initialized.
429 when: Timeslice,
430 /// The amount of privately contributed Coretime to the Instantaneous Coretime Pool.
431 private_pool_size: CoreMaskBitCount,
432 /// The amount of Coretime contributed to the Instantaneous Coretime Pool by the
433 /// Polkadot System.
434 system_pool_size: CoreMaskBitCount,
435 },
436 /// Some historical Instantaneous Core Pool payment record has been dropped.
437 HistoryDropped {
438 /// The timeslice whose history is no longer available.
439 when: Timeslice,
440 /// The amount of revenue the system has taken.
441 revenue: BalanceOf<T>,
442 },
443 /// Some historical Instantaneous Core Pool payment record has been ignored because the
444 /// timeslice was already known. Governance may need to intervene.
445 HistoryIgnored {
446 /// The timeslice whose history is was ignored.
447 when: Timeslice,
448 /// The amount of revenue which was ignored.
449 revenue: BalanceOf<T>,
450 },
451 /// Some historical Instantaneous Core Pool Revenue is ready for payout claims.
452 ClaimsReady {
453 /// The timeslice whose history is available.
454 when: Timeslice,
455 /// The amount of revenue the Polkadot System has already taken.
456 system_payout: BalanceOf<T>,
457 /// The total amount of revenue remaining to be claimed.
458 private_payout: BalanceOf<T>,
459 },
460 /// A Core has been assigned to one or more tasks and/or the Pool on the Relay-chain.
461 CoreAssigned {
462 /// The index of the Core which has been assigned.
463 core: CoreIndex,
464 /// The Relay-chain block at which this assignment should take effect.
465 when: RelayBlockNumberOf<T>,
466 /// The workload to be done on the Core.
467 assignment: Vec<(CoreAssignment, PartsOf57600)>,
468 },
469 /// Some historical Instantaneous Core Pool payment record has been dropped.
470 PotentialRenewalDropped {
471 /// The timeslice whose renewal is no longer available.
472 when: Timeslice,
473 /// The core whose workload is no longer available to be renewed for `when`.
474 core: CoreIndex,
475 },
476 AutoRenewalEnabled {
477 /// The core for which the renewal was enabled.
478 core: CoreIndex,
479 /// The task for which the renewal was enabled.
480 task: TaskId,
481 },
482 AutoRenewalDisabled {
483 /// The core for which the renewal was disabled.
484 core: CoreIndex,
485 /// The task for which the renewal was disabled.
486 task: TaskId,
487 },
488 /// Failed to auto-renew a core, likely due to the payer account not being sufficiently
489 /// funded.
490 AutoRenewalFailed {
491 /// The core for which the renewal failed.
492 core: CoreIndex,
493 /// The account which was supposed to pay for renewal.
494 ///
495 /// If `None` it indicates that we failed to get the sovereign account of a task.
496 payer: Option<T::AccountId>,
497 },
498 /// The auto-renewal limit has been reached upon renewing cores.
499 ///
500 /// This should never happen, given that enable_auto_renew checks for this before enabling
501 /// auto-renewal.
502 AutoRenewalLimitReached,
503 /// Failed to assign a force reservation due to no free cores available.
504 ForceReservationFailed {
505 /// The schedule that could not be assigned.
506 schedule: Schedule,
507 },
508 /// Potential renewal was forcefully removed.
509 PotentialRenewalRemoved {
510 /// The core associated with the potential renewal that was removed.
511 core: CoreIndex,
512 /// The timeslice associated with the potential renewal that was removed.
513 timeslice: Timeslice,
514 },
515 }
516
517 #[pallet::error]
518 #[derive(PartialEq)]
519 pub enum Error<T> {
520 /// The given region identity is not known.
521 UnknownRegion,
522 /// The owner of the region is not the origin.
523 NotOwner,
524 /// The pivot point of the partition at or after the end of the region.
525 PivotTooLate,
526 /// The pivot point of the partition at the beginning of the region.
527 PivotTooEarly,
528 /// The pivot mask for the interlacing is not contained within the region's interlace mask.
529 ExteriorPivot,
530 /// The pivot mask for the interlacing is void (and therefore unschedulable).
531 VoidPivot,
532 /// The pivot mask for the interlacing is complete (and therefore not a strict subset).
533 CompletePivot,
534 /// The workplan of the pallet's state is invalid. This indicates a state corruption.
535 CorruptWorkplan,
536 /// There is no sale happening currently.
537 NoSales,
538 /// The price limit is exceeded.
539 Overpriced,
540 /// There are no cores available.
541 Unavailable,
542 /// The sale limit has been reached.
543 SoldOut,
544 /// The renewal operation is not valid at the current time (it may become valid in the next
545 /// sale).
546 WrongTime,
547 /// Invalid attempt to renew.
548 NotAllowed,
549 /// This pallet has not yet been initialized.
550 Uninitialized,
551 /// The purchase cannot happen yet as the sale period is yet to begin.
552 TooEarly,
553 /// There is no work to be done.
554 NothingToDo,
555 /// The maximum amount of reservations has already been reached.
556 TooManyReservations,
557 /// The maximum amount of leases has already been reached.
558 TooManyLeases,
559 /// The lease does not exist.
560 LeaseNotFound,
561 /// The revenue for the Instantaneous Core Sales of this period is not (yet) known and thus
562 /// this operation cannot proceed.
563 UnknownRevenue,
564 /// The identified contribution to the Instantaneous Core Pool is unknown.
565 UnknownContribution,
566 /// The workload assigned for renewal is incomplete. This is unexpected and indicates a
567 /// logic error.
568 IncompleteAssignment,
569 /// An item cannot be dropped because it is still valid.
570 StillValid,
571 /// The history item does not exist.
572 NoHistory,
573 /// No reservation of the given index exists.
574 UnknownReservation,
575 /// The renewal record cannot be found.
576 UnknownRenewal,
577 /// The lease expiry time has already passed.
578 AlreadyExpired,
579 /// The configuration could not be applied because it is invalid.
580 InvalidConfig,
581 /// The revenue must be claimed for 1 or more timeslices.
582 NoClaimTimeslices,
583 /// The caller doesn't have the permission to enable or disable auto-renewal.
584 NoPermission,
585 /// We reached the limit for auto-renewals.
586 TooManyAutoRenewals,
587 /// Only cores which are assigned to a task can be auto-renewed.
588 NonTaskAutoRenewal,
589 /// Failed to get the sovereign account of a task.
590 SovereignAccountNotFound,
591 /// Attempted to disable auto-renewal for a core that didn't have it enabled.
592 AutoRenewalNotEnabled,
593 /// Attempted to force remove an assignment that doesn't exist.
594 AssignmentNotFound,
595 /// Needed to prevent spam attacks.The amount of credits the user attempted to purchase is
596 /// below `T::MinimumCreditPurchase`.
597 CreditPurchaseTooSmall,
598 }
599
600 #[derive(frame_support::DefaultNoBound)]
601 #[pallet::genesis_config]
602 pub struct GenesisConfig<T: Config> {
603 #[serde(skip)]
604 pub _config: core::marker::PhantomData<T>,
605 }
606
607 #[pallet::genesis_build]
608 impl<T: Config> BuildGenesisConfig for GenesisConfig<T> {
609 fn build(&self) {
610 frame_system::Pallet::<T>::inc_providers(&Pallet::<T>::account_id());
611 }
612 }
613
614 #[pallet::hooks]
615 impl<T: Config> Hooks<BlockNumberFor<T>> for Pallet<T> {
616 fn on_initialize(_now: BlockNumberFor<T>) -> Weight {
617 Self::do_tick()
618 }
619 }
620
621 #[pallet::call(weight(<T as Config>::WeightInfo))]
622 impl<T: Config> Pallet<T> {
623 /// Configure the pallet.
624 ///
625 /// - `origin`: Must be Root or pass `AdminOrigin`.
626 /// - `config`: The configuration for this pallet.
627 #[pallet::call_index(0)]
628 pub fn configure(
629 origin: OriginFor<T>,
630 config: ConfigRecordOf<T>,
631 ) -> DispatchResultWithPostInfo {
632 T::AdminOrigin::ensure_origin_or_root(origin)?;
633 Self::do_configure(config)?;
634 Ok(Pays::No.into())
635 }
636
637 /// Reserve a core for a workload.
638 ///
639 /// The workload will be given a reservation, but two sale period boundaries must pass
640 /// before the core is actually assigned.
641 ///
642 /// - `origin`: Must be Root or pass `AdminOrigin`.
643 /// - `workload`: The workload which should be permanently placed on a core.
644 #[pallet::call_index(1)]
645 pub fn reserve(origin: OriginFor<T>, workload: Schedule) -> DispatchResultWithPostInfo {
646 T::AdminOrigin::ensure_origin_or_root(origin)?;
647 Self::do_reserve(workload)?;
648 Ok(Pays::No.into())
649 }
650
651 /// Cancel a reservation for a workload.
652 ///
653 /// - `origin`: Must be Root or pass `AdminOrigin`.
654 /// - `item_index`: The index of the reservation. Usually this will also be the index of the
655 /// core on which the reservation has been scheduled. However, it is possible that if
656 /// other cores are reserved or unreserved in the same sale rotation that they won't
657 /// correspond, so it's better to look up the core properly in the `Reservations` storage.
658 #[pallet::call_index(2)]
659 pub fn unreserve(origin: OriginFor<T>, item_index: u32) -> DispatchResultWithPostInfo {
660 T::AdminOrigin::ensure_origin_or_root(origin)?;
661 Self::do_unreserve(item_index)?;
662 Ok(Pays::No.into())
663 }
664
665 /// Reserve a core for a single task workload for a limited period.
666 ///
667 /// In the interlude and sale period where Bulk Coretime is sold for the period immediately
668 /// after `until`, then the same workload may be renewed.
669 ///
670 /// - `origin`: Must be Root or pass `AdminOrigin`.
671 /// - `task`: The workload which should be placed on a core.
672 /// - `until`: The timeslice now earlier than which `task` should be placed as a workload on
673 /// a core.
674 #[pallet::call_index(3)]
675 pub fn set_lease(
676 origin: OriginFor<T>,
677 task: TaskId,
678 until: Timeslice,
679 ) -> DispatchResultWithPostInfo {
680 T::AdminOrigin::ensure_origin_or_root(origin)?;
681 Self::do_set_lease(task, until)?;
682 Ok(Pays::No.into())
683 }
684
685 /// Begin the Bulk Coretime sales rotation.
686 ///
687 /// - `origin`: Must be Root or pass `AdminOrigin`.
688 /// - `end_price`: The price after the leadin period of Bulk Coretime in the first sale.
689 /// - `extra_cores`: Number of extra cores that should be requested on top of the cores
690 /// required for `Reservations` and `Leases`.
691 ///
692 /// This will call [`Self::request_core_count`] internally to set the correct core count on
693 /// the relay chain.
694 #[pallet::call_index(4)]
695 #[pallet::weight(T::WeightInfo::start_sales(
696 T::MaxLeasedCores::get() + T::MaxReservedCores::get() + *extra_cores as u32
697 ))]
698 pub fn start_sales(
699 origin: OriginFor<T>,
700 end_price: BalanceOf<T>,
701 extra_cores: CoreIndex,
702 ) -> DispatchResultWithPostInfo {
703 T::AdminOrigin::ensure_origin_or_root(origin)?;
704 Self::do_start_sales(end_price, extra_cores)?;
705 Ok(Pays::No.into())
706 }
707
708 /// Purchase Bulk Coretime in the ongoing Sale.
709 ///
710 /// - `origin`: Must be a Signed origin with at least enough funds to pay the current price
711 /// of Bulk Coretime.
712 /// - `price_limit`: An amount no more than which should be paid.
713 #[pallet::call_index(5)]
714 pub fn purchase(
715 origin: OriginFor<T>,
716 price_limit: BalanceOf<T>,
717 ) -> DispatchResultWithPostInfo {
718 let who = ensure_signed(origin)?;
719 Self::do_purchase(who, price_limit)?;
720 Ok(Pays::No.into())
721 }
722
723 /// Renew Bulk Coretime in the ongoing Sale or its prior Interlude Period.
724 ///
725 /// - `origin`: Must be a Signed origin with at least enough funds to pay the renewal price
726 /// of the core.
727 /// - `core`: The core which should be renewed.
728 #[pallet::call_index(6)]
729 pub fn renew(origin: OriginFor<T>, core: CoreIndex) -> DispatchResultWithPostInfo {
730 let who = ensure_signed(origin)?;
731 Self::do_renew(who, core)?;
732 Ok(Pays::No.into())
733 }
734
735 /// Transfer a Bulk Coretime Region to a new owner.
736 ///
737 /// - `origin`: Must be a Signed origin of the account which owns the Region `region_id`.
738 /// - `region_id`: The Region whose ownership should change.
739 /// - `new_owner`: The new owner for the Region.
740 #[pallet::call_index(7)]
741 pub fn transfer(
742 origin: OriginFor<T>,
743 region_id: RegionId,
744 new_owner: T::AccountId,
745 ) -> DispatchResult {
746 let who = ensure_signed(origin)?;
747 Self::do_transfer(region_id, Some(who), new_owner)?;
748 Ok(())
749 }
750
751 /// Split a Bulk Coretime Region into two non-overlapping Regions at a particular time into
752 /// the region.
753 ///
754 /// - `origin`: Must be a Signed origin of the account which owns the Region `region_id`.
755 /// - `region_id`: The Region which should be partitioned into two non-overlapping Regions.
756 /// - `pivot`: The offset in time into the Region at which to make the split.
757 #[pallet::call_index(8)]
758 pub fn partition(
759 origin: OriginFor<T>,
760 region_id: RegionId,
761 pivot: Timeslice,
762 ) -> DispatchResult {
763 let who = ensure_signed(origin)?;
764 Self::do_partition(region_id, Some(who), pivot)?;
765 Ok(())
766 }
767
768 /// Split a Bulk Coretime Region into two wholly-overlapping Regions with complementary
769 /// interlace masks which together make up the original Region's interlace mask.
770 ///
771 /// - `origin`: Must be a Signed origin of the account which owns the Region `region_id`.
772 /// - `region_id`: The Region which should become two interlaced Regions of incomplete
773 /// regularity.
774 /// - `pivot`: The interlace mask of one of the two new regions (the other is its partial
775 /// complement).
776 #[pallet::call_index(9)]
777 pub fn interlace(
778 origin: OriginFor<T>,
779 region_id: RegionId,
780 pivot: CoreMask,
781 ) -> DispatchResult {
782 let who = ensure_signed(origin)?;
783 Self::do_interlace(region_id, Some(who), pivot)?;
784 Ok(())
785 }
786
787 /// Assign a Bulk Coretime Region to a task.
788 ///
789 /// - `origin`: Must be a Signed origin of the account which owns the Region `region_id`.
790 /// - `region_id`: The Region which should be assigned to the task.
791 /// - `task`: The task to assign.
792 /// - `finality`: Indication of whether this assignment is final (in which case it may be
793 /// eligible for renewal) or provisional (in which case it may be manipulated and/or
794 /// reassigned at a later stage).
795 #[pallet::call_index(10)]
796 pub fn assign(
797 origin: OriginFor<T>,
798 region_id: RegionId,
799 task: TaskId,
800 finality: Finality,
801 ) -> DispatchResultWithPostInfo {
802 let who = ensure_signed(origin)?;
803 Self::do_assign(region_id, Some(who), task, finality)?;
804 Ok(if finality == Finality::Final { Pays::No } else { Pays::Yes }.into())
805 }
806
807 /// Place a Bulk Coretime Region into the Instantaneous Coretime Pool.
808 ///
809 /// - `origin`: Must be a Signed origin of the account which owns the Region `region_id`.
810 /// - `region_id`: The Region which should be assigned to the Pool.
811 /// - `payee`: The account which is able to collect any revenue due for the usage of this
812 /// Coretime.
813 #[pallet::call_index(11)]
814 pub fn pool(
815 origin: OriginFor<T>,
816 region_id: RegionId,
817 payee: T::AccountId,
818 finality: Finality,
819 ) -> DispatchResultWithPostInfo {
820 let who = ensure_signed(origin)?;
821 Self::do_pool(region_id, Some(who), payee, finality)?;
822 Ok(if finality == Finality::Final { Pays::No } else { Pays::Yes }.into())
823 }
824
825 /// Claim the revenue owed from inclusion in the Instantaneous Coretime Pool.
826 ///
827 /// - `origin`: Must be a Signed origin.
828 /// - `region_id`: The Region which was assigned to the Pool.
829 /// - `max_timeslices`: The maximum number of timeslices which should be processed. This
830 /// must be greater than 0. This may affect the weight of the call but should be ideally
831 /// made equivalent to the length of the Region `region_id`. If less, further dispatches
832 /// will be required with the same `region_id` to claim revenue for the remainder.
833 #[pallet::call_index(12)]
834 #[pallet::weight(T::WeightInfo::claim_revenue(*max_timeslices))]
835 pub fn claim_revenue(
836 origin: OriginFor<T>,
837 region_id: RegionId,
838 max_timeslices: Timeslice,
839 ) -> DispatchResultWithPostInfo {
840 ensure_signed(origin)?;
841 Self::do_claim_revenue(region_id, max_timeslices)?;
842 Ok(Pays::No.into())
843 }
844
845 /// Purchase credit for use in the Instantaneous Coretime Pool.
846 ///
847 /// - `origin`: Must be a Signed origin able to pay at least `amount`.
848 /// - `amount`: The amount of credit to purchase.
849 /// - `beneficiary`: The account on the Relay-chain which controls the credit (generally
850 /// this will be the collator's hot wallet).
851 #[pallet::call_index(13)]
852 pub fn purchase_credit(
853 origin: OriginFor<T>,
854 amount: BalanceOf<T>,
855 beneficiary: RelayAccountIdOf<T>,
856 ) -> DispatchResult {
857 let who = ensure_signed(origin)?;
858 Self::do_purchase_credit(who, amount, beneficiary)?;
859 Ok(())
860 }
861
862 /// Drop an expired Region from the chain.
863 ///
864 /// - `origin`: Can be any kind of origin.
865 /// - `region_id`: The Region which has expired.
866 #[pallet::call_index(14)]
867 pub fn drop_region(
868 _origin: OriginFor<T>,
869 region_id: RegionId,
870 ) -> DispatchResultWithPostInfo {
871 Self::do_drop_region(region_id)?;
872 Ok(Pays::No.into())
873 }
874
875 /// Drop an expired Instantaneous Pool Contribution record from the chain.
876 ///
877 /// - `origin`: Can be any kind of origin.
878 /// - `region_id`: The Region identifying the Pool Contribution which has expired.
879 #[pallet::call_index(15)]
880 pub fn drop_contribution(
881 _origin: OriginFor<T>,
882 region_id: RegionId,
883 ) -> DispatchResultWithPostInfo {
884 Self::do_drop_contribution(region_id)?;
885 Ok(Pays::No.into())
886 }
887
888 /// Drop an expired Instantaneous Pool History record from the chain.
889 ///
890 /// - `origin`: Can be any kind of origin.
891 /// - `region_id`: The time of the Pool History record which has expired.
892 #[pallet::call_index(16)]
893 pub fn drop_history(_origin: OriginFor<T>, when: Timeslice) -> DispatchResultWithPostInfo {
894 Self::do_drop_history(when)?;
895 Ok(Pays::No.into())
896 }
897
898 /// Drop an expired Allowed Renewal record from the chain.
899 ///
900 /// - `origin`: Can be any kind of origin.
901 /// - `core`: The core to which the expired renewal refers.
902 /// - `when`: The timeslice to which the expired renewal refers. This must have passed.
903 #[pallet::call_index(17)]
904 pub fn drop_renewal(
905 _origin: OriginFor<T>,
906 core: CoreIndex,
907 when: Timeslice,
908 ) -> DispatchResultWithPostInfo {
909 Self::do_drop_renewal(core, when)?;
910 Ok(Pays::No.into())
911 }
912
913 /// Request a change to the number of cores available for scheduling work.
914 ///
915 /// - `origin`: Must be Root or pass `AdminOrigin`.
916 /// - `core_count`: The desired number of cores to be made available.
917 #[pallet::call_index(18)]
918 #[pallet::weight(T::WeightInfo::request_core_count((*core_count).into()))]
919 pub fn request_core_count(origin: OriginFor<T>, core_count: CoreIndex) -> DispatchResult {
920 T::AdminOrigin::ensure_origin_or_root(origin)?;
921 Self::do_request_core_count(core_count)?;
922 Ok(())
923 }
924
925 #[pallet::call_index(19)]
926 #[pallet::weight(T::WeightInfo::notify_core_count())]
927 pub fn notify_core_count(origin: OriginFor<T>, core_count: CoreIndex) -> DispatchResult {
928 T::AdminOrigin::ensure_origin_or_root(origin)?;
929 Self::do_notify_core_count(core_count)?;
930 Ok(())
931 }
932
933 #[pallet::call_index(20)]
934 #[pallet::weight(T::WeightInfo::notify_revenue())]
935 pub fn notify_revenue(
936 origin: OriginFor<T>,
937 revenue: OnDemandRevenueRecordOf<T>,
938 ) -> DispatchResult {
939 T::AdminOrigin::ensure_origin_or_root(origin)?;
940 Self::do_notify_revenue(revenue)?;
941 Ok(())
942 }
943
944 /// Extrinsic for enabling auto renewal.
945 ///
946 /// Callable by the sovereign account of the task on the specified core. This account
947 /// will be charged at the start of every bulk period for renewing core time.
948 ///
949 /// - `origin`: Must be the sovereign account of the task
950 /// - `core`: The core to which the task to be renewed is currently assigned.
951 /// - `task`: The task for which we want to enable auto renewal.
952 /// - `workload_end_hint`: should be used when enabling auto-renewal for a core that is not
953 /// expiring in the upcoming bulk period (e.g., due to holding a lease) since it would be
954 /// inefficient to look up when the core expires to schedule the next renewal.
955 #[pallet::call_index(21)]
956 #[pallet::weight(T::WeightInfo::enable_auto_renew())]
957 pub fn enable_auto_renew(
958 origin: OriginFor<T>,
959 core: CoreIndex,
960 task: TaskId,
961 workload_end_hint: Option<Timeslice>,
962 ) -> DispatchResult {
963 let who = ensure_signed(origin)?;
964
965 let sovereign_account = T::SovereignAccountOf::maybe_convert(task)
966 .ok_or(Error::<T>::SovereignAccountNotFound)?;
967 // Only the sovereign account of a task can enable auto renewal for its own core.
968 ensure!(who == sovereign_account, Error::<T>::NoPermission);
969
970 Self::do_enable_auto_renew(sovereign_account, core, task, workload_end_hint)?;
971 Ok(())
972 }
973
974 /// Extrinsic for disabling auto renewal.
975 ///
976 /// Callable by the sovereign account of the task on the specified core.
977 ///
978 /// - `origin`: Must be the sovereign account of the task.
979 /// - `core`: The core for which we want to disable auto renewal.
980 /// - `task`: The task for which we want to disable auto renewal.
981 #[pallet::call_index(22)]
982 #[pallet::weight(T::WeightInfo::disable_auto_renew())]
983 pub fn disable_auto_renew(
984 origin: OriginFor<T>,
985 core: CoreIndex,
986 task: TaskId,
987 ) -> DispatchResult {
988 let who = ensure_signed(origin)?;
989
990 let sovereign_account = T::SovereignAccountOf::maybe_convert(task)
991 .ok_or(Error::<T>::SovereignAccountNotFound)?;
992 // Only the sovereign account of the task can disable auto-renewal.
993 ensure!(who == sovereign_account, Error::<T>::NoPermission);
994
995 Self::do_disable_auto_renew(core, task)?;
996
997 Ok(())
998 }
999
1000 /// Reserve a core for a workload immediately.
1001 ///
1002 /// - `origin`: Must be Root or pass `AdminOrigin`.
1003 /// - `workload`: The workload which should be permanently placed on a core starting
1004 /// immediately.
1005 /// - `core`: The core to which the assignment should be made until the reservation takes
1006 /// effect. It is left to the caller to either add this new core or reassign any other
1007 /// tasks to this existing core.
1008 ///
1009 /// This reserves the workload and then injects the workload into the Workplan for the next
1010 /// two sale periods. This overwrites any existing assignments for this core at the start of
1011 /// the next sale period.
1012 #[pallet::call_index(23)]
1013 pub fn force_reserve(
1014 origin: OriginFor<T>,
1015 workload: Schedule,
1016 core: CoreIndex,
1017 ) -> DispatchResultWithPostInfo {
1018 T::AdminOrigin::ensure_origin_or_root(origin)?;
1019 Self::do_force_reserve(workload, core)?;
1020 Ok(Pays::No.into())
1021 }
1022
1023 /// Remove a lease.
1024 ///
1025 /// - `origin`: Must be Root or pass `AdminOrigin`.
1026 /// - `task`: The task id of the lease which should be removed.
1027 #[pallet::call_index(24)]
1028 pub fn remove_lease(origin: OriginFor<T>, task: TaskId) -> DispatchResult {
1029 T::AdminOrigin::ensure_origin_or_root(origin)?;
1030 Self::do_remove_lease(task)
1031 }
1032
1033 /// Remove an assignment from the Workplan.
1034 ///
1035 /// - `origin`: Must be Root or pass `AdminOrigin`.
1036 /// - `region_id`: The Region to be removed from the workplan.
1037 #[pallet::call_index(26)]
1038 pub fn remove_assignment(origin: OriginFor<T>, region_id: RegionId) -> DispatchResult {
1039 T::AdminOrigin::ensure_origin_or_root(origin)?;
1040 Self::do_remove_assignment(region_id)
1041 }
1042
1043 /// Forcefully remove a potential renewal record from chain.
1044 ///
1045 /// Note that only the specified potential renewal will be removed while any related auto
1046 /// renewals will stay intact and will fail.
1047 ///
1048 /// - `origin`: Must be Root or pass `AdminOrigin`.
1049 /// - `core`: Core which the target potential renewal record refers to.
1050 /// - `when`: Timeslice which the target potential renewal record refers to.
1051 #[pallet::call_index(27)]
1052 pub fn remove_potential_renewal(
1053 origin: OriginFor<T>,
1054 core: CoreIndex,
1055 when: Timeslice,
1056 ) -> DispatchResult {
1057 T::AdminOrigin::ensure_origin_or_root(origin)?;
1058 Self::do_remove_potential_renewal(core, when)
1059 }
1060
1061 /// Transfer a Bulk Coretime Region to a new owner, ignoring the previous owner.
1062 ///
1063 /// This can also be used to recover regions that have been "burned" (e.g., from an
1064 /// XCM reserve transfer).
1065 ///
1066 /// - `origin`: Must be Root or pass `AdminOrigin`.
1067 /// - `region_id`: The Region whose ownership should change.
1068 /// - `new_owner`: The new owner for the Region.
1069 #[pallet::call_index(28)]
1070 pub fn force_transfer(
1071 origin: OriginFor<T>,
1072 region_id: RegionId,
1073 new_owner: T::AccountId,
1074 ) -> DispatchResult {
1075 T::AdminOrigin::ensure_origin_or_root(origin)?;
1076 Self::do_transfer(region_id, None, new_owner)?;
1077 Ok(())
1078 }
1079
1080 #[pallet::call_index(99)]
1081 #[pallet::weight(T::WeightInfo::swap_leases())]
1082 pub fn swap_leases(origin: OriginFor<T>, id: TaskId, other: TaskId) -> DispatchResult {
1083 T::AdminOrigin::ensure_origin_or_root(origin)?;
1084 Self::do_swap_leases(id, other)?;
1085 Ok(())
1086 }
1087 }
1088}