pallet_broker/lib.rs
1// This file is part of Substrate.
2
3// Copyright (C) Parity Technologies (UK) Ltd.
4// SPDX-License-Identifier: Apache-2.0
5
6// Licensed under the Apache License, Version 2.0 (the "License");
7// you may not use this file except in compliance with the License.
8// You may obtain a copy of the License at
9//
10// http://www.apache.org/licenses/LICENSE-2.0
11//
12// Unless required by applicable law or agreed to in writing, software
13// distributed under the License is distributed on an "AS IS" BASIS,
14// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15// See the License for the specific language governing permissions and
16// limitations under the License.
17
18#![cfg_attr(not(feature = "std"), no_std)]
19#![doc = include_str!("../README.md")]
20
21pub use pallet::*;
22
23mod adapt_price;
24mod benchmarking;
25mod core_mask;
26mod coretime_interface;
27mod dispatchable_impls;
28#[cfg(test)]
29mod mock;
30mod nonfungible_impl;
31#[cfg(test)]
32mod test_fungibles;
33#[cfg(test)]
34mod tests;
35mod tick_impls;
36mod types;
37mod utility_impls;
38
39pub mod migration;
40pub mod runtime_api;
41
42pub mod weights;
43pub use weights::WeightInfo;
44
45pub use adapt_price::*;
46pub use core_mask::*;
47pub use coretime_interface::*;
48pub use types::*;
49
50extern crate alloc;
51
52/// The log target for this pallet.
53const LOG_TARGET: &str = "runtime::broker";
54
55#[frame_support::pallet]
56pub mod pallet {
57 use super::*;
58 use alloc::vec::Vec;
59 use frame_support::{
60 pallet_prelude::{DispatchResult, DispatchResultWithPostInfo, *},
61 traits::{
62 fungible::{Balanced, Credit, Mutate},
63 BuildGenesisConfig, EnsureOrigin, OnUnbalanced,
64 },
65 PalletId,
66 };
67 use frame_system::pallet_prelude::*;
68 use sp_runtime::traits::{Convert, ConvertBack, MaybeConvert};
69
70 const STORAGE_VERSION: StorageVersion = StorageVersion::new(4);
71
72 #[pallet::pallet]
73 #[pallet::storage_version(STORAGE_VERSION)]
74 pub struct Pallet<T>(_);
75
76 #[pallet::config]
77 pub trait Config: frame_system::Config {
78 #[allow(deprecated)]
79 type RuntimeEvent: From<Event<Self>> + IsType<<Self as frame_system::Config>::RuntimeEvent>;
80
81 /// Weight information for all calls of this pallet.
82 type WeightInfo: WeightInfo;
83
84 /// Currency used to pay for Coretime.
85 type Currency: Mutate<Self::AccountId> + Balanced<Self::AccountId>;
86
87 /// The origin test needed for administrating this pallet.
88 type AdminOrigin: EnsureOrigin<Self::RuntimeOrigin>;
89
90 /// What to do with any revenues collected from the sale of Coretime.
91 type OnRevenue: OnUnbalanced<Credit<Self::AccountId, Self::Currency>>;
92
93 /// Relay chain's Coretime API used to interact with and instruct the low-level scheduling
94 /// system.
95 type Coretime: CoretimeInterface;
96
97 /// The algorithm to determine the next price on the basis of market performance.
98 type PriceAdapter: AdaptPrice<BalanceOf<Self>>;
99
100 /// Reversible conversion from local balance to Relay-chain balance. This will typically be
101 /// the `Identity`, but provided just in case the chains use different representations.
102 type ConvertBalance: Convert<BalanceOf<Self>, RelayBalanceOf<Self>>
103 + ConvertBack<BalanceOf<Self>, RelayBalanceOf<Self>>;
104
105 /// Type used for getting the associated account of a task. This account is controlled by
106 /// the task itself.
107 type SovereignAccountOf: MaybeConvert<TaskId, Self::AccountId>;
108
109 /// Identifier from which the internal Pot is generated.
110 #[pallet::constant]
111 type PalletId: Get<PalletId>;
112
113 /// Number of Relay-chain blocks per timeslice.
114 #[pallet::constant]
115 type TimeslicePeriod: Get<RelayBlockNumberOf<Self>>;
116
117 /// Maximum number of legacy leases.
118 #[pallet::constant]
119 type MaxLeasedCores: Get<u32>;
120
121 /// Maximum number of system cores.
122 #[pallet::constant]
123 type MaxReservedCores: Get<u32>;
124
125 /// Given that we are performing all auto-renewals in a single block, it has to be limited.
126 #[pallet::constant]
127 type MaxAutoRenewals: Get<u32>;
128
129 /// The smallest amount of credits a user can purchase.
130 ///
131 /// Needed to prevent spam attacks.
132 #[pallet::constant]
133 type MinimumCreditPurchase: Get<BalanceOf<Self>>;
134 }
135
136 /// The current configuration of this pallet.
137 #[pallet::storage]
138 pub type Configuration<T> = StorageValue<_, ConfigRecordOf<T>, OptionQuery>;
139
140 /// The Polkadot Core reservations (generally tasked with the maintenance of System Chains).
141 #[pallet::storage]
142 pub type Reservations<T> = StorageValue<_, ReservationsRecordOf<T>, ValueQuery>;
143
144 /// Force reservations that need to be inserted into the workplan at the next sale rotation.
145 ///
146 /// They are automatically freed at the next sale rotation.
147 #[pallet::storage]
148 pub type ForceReservations<T> = StorageValue<_, ReservationsRecordOf<T>, ValueQuery>;
149
150 /// The Polkadot Core legacy leases.
151 #[pallet::storage]
152 pub type Leases<T> = StorageValue<_, LeasesRecordOf<T>, ValueQuery>;
153
154 /// The current status of miscellaneous subsystems of this pallet.
155 #[pallet::storage]
156 pub type Status<T> = StorageValue<_, StatusRecord, OptionQuery>;
157
158 /// The details of the current sale, including its properties and status.
159 #[pallet::storage]
160 pub type SaleInfo<T> = StorageValue<_, SaleInfoRecordOf<T>, OptionQuery>;
161
162 /// Records of potential renewals.
163 ///
164 /// Renewals will only actually be allowed if `CompletionStatus` is actually `Complete`.
165 #[pallet::storage]
166 pub type PotentialRenewals<T> =
167 StorageMap<_, Twox64Concat, PotentialRenewalId, PotentialRenewalRecordOf<T>, OptionQuery>;
168
169 /// The current (unassigned or provisionally assigend) Regions.
170 #[pallet::storage]
171 pub type Regions<T> = StorageMap<_, Blake2_128Concat, RegionId, RegionRecordOf<T>, OptionQuery>;
172
173 /// The work we plan on having each core do at a particular time in the future.
174 #[pallet::storage]
175 pub type Workplan<T> =
176 StorageMap<_, Twox64Concat, (Timeslice, CoreIndex), Schedule, OptionQuery>;
177
178 /// The current workload of each core. This gets updated with workplan as timeslices pass.
179 #[pallet::storage]
180 pub type Workload<T> = StorageMap<_, Twox64Concat, CoreIndex, Schedule, ValueQuery>;
181
182 /// Record of a single contribution to the Instantaneous Coretime Pool.
183 #[pallet::storage]
184 pub type InstaPoolContribution<T> =
185 StorageMap<_, Blake2_128Concat, RegionId, ContributionRecordOf<T>, OptionQuery>;
186
187 /// Record of Coretime entering or leaving the Instantaneous Coretime Pool.
188 #[pallet::storage]
189 pub type InstaPoolIo<T> = StorageMap<_, Blake2_128Concat, Timeslice, PoolIoRecord, ValueQuery>;
190
191 /// Total InstaPool rewards for each Timeslice and the number of core parts which contributed.
192 #[pallet::storage]
193 pub type InstaPoolHistory<T> =
194 StorageMap<_, Blake2_128Concat, Timeslice, InstaPoolHistoryRecordOf<T>>;
195
196 /// Received core count change from the relay chain.
197 #[pallet::storage]
198 pub type CoreCountInbox<T> = StorageValue<_, CoreIndex, OptionQuery>;
199
200 /// Keeping track of cores which have auto-renewal enabled.
201 ///
202 /// Sorted by `CoreIndex` to make the removal of cores from auto-renewal more efficient.
203 #[pallet::storage]
204 pub type AutoRenewals<T: Config> =
205 StorageValue<_, BoundedVec<AutoRenewalRecord, T::MaxAutoRenewals>, ValueQuery>;
206
207 /// Received revenue info from the relay chain.
208 #[pallet::storage]
209 pub type RevenueInbox<T> = StorageValue<_, OnDemandRevenueRecordOf<T>, OptionQuery>;
210
211 #[pallet::event]
212 #[pallet::generate_deposit(pub(super) fn deposit_event)]
213 pub enum Event<T: Config> {
214 /// A Region of Bulk Coretime has been purchased.
215 Purchased {
216 /// The identity of the purchaser.
217 who: T::AccountId,
218 /// The identity of the Region.
219 region_id: RegionId,
220 /// The price paid for this Region.
221 price: BalanceOf<T>,
222 /// The duration of the Region.
223 duration: Timeslice,
224 },
225 /// The workload of a core has become renewable.
226 Renewable {
227 /// The core whose workload can be renewed.
228 core: CoreIndex,
229 /// The price at which the workload can be renewed.
230 price: BalanceOf<T>,
231 /// The time at which the workload would recommence of this renewal. The call to renew
232 /// cannot happen before the beginning of the interlude prior to the sale for regions
233 /// which begin at this time.
234 begin: Timeslice,
235 /// The actual workload which can be renewed.
236 workload: Schedule,
237 },
238 /// A workload has been renewed.
239 Renewed {
240 /// The identity of the renewer.
241 who: T::AccountId,
242 /// The price paid for this renewal.
243 price: BalanceOf<T>,
244 /// The index of the core on which the `workload` was previously scheduled.
245 old_core: CoreIndex,
246 /// The index of the core on which the renewed `workload` has been scheduled.
247 core: CoreIndex,
248 /// The time at which the `workload` will begin on the `core`.
249 begin: Timeslice,
250 /// The number of timeslices for which this `workload` is newly scheduled.
251 duration: Timeslice,
252 /// The workload which was renewed.
253 workload: Schedule,
254 },
255 /// Ownership of a Region has been transferred.
256 Transferred {
257 /// The Region which has been transferred.
258 region_id: RegionId,
259 /// The duration of the Region.
260 duration: Timeslice,
261 /// The old owner of the Region.
262 old_owner: Option<T::AccountId>,
263 /// The new owner of the Region.
264 owner: Option<T::AccountId>,
265 },
266 /// A Region has been split into two non-overlapping Regions.
267 Partitioned {
268 /// The Region which was split.
269 old_region_id: RegionId,
270 /// The new Regions into which it became.
271 new_region_ids: (RegionId, RegionId),
272 },
273 /// A Region has been converted into two overlapping Regions each of lesser regularity.
274 Interlaced {
275 /// The Region which was interlaced.
276 old_region_id: RegionId,
277 /// The new Regions into which it became.
278 new_region_ids: (RegionId, RegionId),
279 },
280 /// A Region has been assigned to a particular task.
281 Assigned {
282 /// The Region which was assigned.
283 region_id: RegionId,
284 /// The duration of the assignment.
285 duration: Timeslice,
286 /// The task to which the Region was assigned.
287 task: TaskId,
288 },
289 /// An assignment has been removed from the workplan.
290 AssignmentRemoved {
291 /// The Region which was removed from the workplan.
292 region_id: RegionId,
293 },
294 /// A Region has been added to the Instantaneous Coretime Pool.
295 Pooled {
296 /// The Region which was added to the Instantaneous Coretime Pool.
297 region_id: RegionId,
298 /// The duration of the Region.
299 duration: Timeslice,
300 },
301 /// A new number of cores has been requested.
302 CoreCountRequested {
303 /// The number of cores requested.
304 core_count: CoreIndex,
305 },
306 /// The number of cores available for scheduling has changed.
307 CoreCountChanged {
308 /// The new number of cores available for scheduling.
309 core_count: CoreIndex,
310 },
311 /// There is a new reservation for a workload.
312 ReservationMade {
313 /// The index of the reservation.
314 index: u32,
315 /// The workload of the reservation.
316 workload: Schedule,
317 },
318 /// A reservation for a workload has been cancelled.
319 ReservationCancelled {
320 /// The index of the reservation which was cancelled.
321 index: u32,
322 /// The workload of the now cancelled reservation.
323 workload: Schedule,
324 },
325 /// A new sale has been initialized.
326 SaleInitialized {
327 /// The relay block number at which the sale will/did start.
328 sale_start: RelayBlockNumberOf<T>,
329 /// The length in relay chain blocks of the Leadin Period (where the price is
330 /// decreasing).
331 leadin_length: RelayBlockNumberOf<T>,
332 /// The price of Bulk Coretime at the beginning of the Leadin Period.
333 start_price: BalanceOf<T>,
334 /// The price of Bulk Coretime after the Leadin Period.
335 end_price: BalanceOf<T>,
336 /// The first timeslice of the Regions which are being sold in this sale.
337 region_begin: Timeslice,
338 /// The timeslice on which the Regions which are being sold in the sale terminate.
339 /// (i.e. One after the last timeslice which the Regions control.)
340 region_end: Timeslice,
341 /// The number of cores we want to sell, ideally.
342 ideal_cores_sold: CoreIndex,
343 /// Number of cores which are/have been offered for sale.
344 cores_offered: CoreIndex,
345 },
346 /// A new lease has been created.
347 Leased {
348 /// The task to which a core will be assigned.
349 task: TaskId,
350 /// The timeslice contained in the sale period after which this lease will
351 /// self-terminate (and therefore the earliest timeslice at which the lease may no
352 /// longer apply).
353 until: Timeslice,
354 },
355 /// A lease has been removed.
356 LeaseRemoved {
357 /// The task to which a core was assigned.
358 task: TaskId,
359 },
360 /// A lease is about to end.
361 LeaseEnding {
362 /// The task to which a core was assigned.
363 task: TaskId,
364 /// The timeslice at which the task will no longer be scheduled.
365 when: Timeslice,
366 },
367 /// The sale rotation has been started and a new sale is imminent.
368 SalesStarted {
369 /// The nominal price of an Region of Bulk Coretime.
370 price: BalanceOf<T>,
371 /// The maximum number of cores which this pallet will attempt to assign.
372 core_count: CoreIndex,
373 },
374 /// The act of claiming revenue has begun.
375 RevenueClaimBegun {
376 /// The region to be claimed for.
377 region: RegionId,
378 /// The maximum number of timeslices which should be searched for claimed.
379 max_timeslices: Timeslice,
380 },
381 /// A particular timeslice has a non-zero claim.
382 RevenueClaimItem {
383 /// The timeslice whose claim is being processed.
384 when: Timeslice,
385 /// The amount which was claimed at this timeslice.
386 amount: BalanceOf<T>,
387 },
388 /// A revenue claim has (possibly only in part) been paid.
389 RevenueClaimPaid {
390 /// The account to whom revenue has been paid.
391 who: T::AccountId,
392 /// The total amount of revenue claimed and paid.
393 amount: BalanceOf<T>,
394 /// The next region which should be claimed for the continuation of this contribution.
395 next: Option<RegionId>,
396 },
397 /// Some Instantaneous Coretime Pool credit has been purchased.
398 CreditPurchased {
399 /// The account which purchased the credit.
400 who: T::AccountId,
401 /// The Relay-chain account to which the credit will be made.
402 beneficiary: RelayAccountIdOf<T>,
403 /// The amount of credit purchased.
404 amount: BalanceOf<T>,
405 },
406 /// A Region has been dropped due to being out of date.
407 RegionDropped {
408 /// The Region which no longer exists.
409 region_id: RegionId,
410 /// The duration of the Region.
411 duration: Timeslice,
412 },
413 /// Some historical Instantaneous Core Pool contribution record has been dropped.
414 ContributionDropped {
415 /// The Region whose contribution is no longer exists.
416 region_id: RegionId,
417 },
418 /// A region has been force-removed from the pool. This is usually due to a provisionally
419 /// pooled region being redeployed.
420 RegionUnpooled {
421 /// The Region which has been force-removed from the pool.
422 region_id: RegionId,
423 /// The timeslice at which the region was force-removed.
424 when: Timeslice,
425 },
426 /// Some historical Instantaneous Core Pool payment record has been initialized.
427 HistoryInitialized {
428 /// The timeslice whose history has been initialized.
429 when: Timeslice,
430 /// The amount of privately contributed Coretime to the Instantaneous Coretime Pool.
431 private_pool_size: CoreMaskBitCount,
432 /// The amount of Coretime contributed to the Instantaneous Coretime Pool by the
433 /// Polkadot System.
434 system_pool_size: CoreMaskBitCount,
435 },
436 /// Some historical Instantaneous Core Pool payment record has been dropped.
437 HistoryDropped {
438 /// The timeslice whose history is no longer available.
439 when: Timeslice,
440 /// The amount of revenue the system has taken.
441 revenue: BalanceOf<T>,
442 },
443 /// Some historical Instantaneous Core Pool payment record has been ignored because the
444 /// timeslice was already known. Governance may need to intervene.
445 HistoryIgnored {
446 /// The timeslice whose history is was ignored.
447 when: Timeslice,
448 /// The amount of revenue which was ignored.
449 revenue: BalanceOf<T>,
450 },
451 /// Some historical Instantaneous Core Pool Revenue is ready for payout claims.
452 ClaimsReady {
453 /// The timeslice whose history is available.
454 when: Timeslice,
455 /// The amount of revenue the Polkadot System has already taken.
456 system_payout: BalanceOf<T>,
457 /// The total amount of revenue remaining to be claimed.
458 private_payout: BalanceOf<T>,
459 },
460 /// A Core has been assigned to one or more tasks and/or the Pool on the Relay-chain.
461 CoreAssigned {
462 /// The index of the Core which has been assigned.
463 core: CoreIndex,
464 /// The Relay-chain block at which this assignment should take effect.
465 when: RelayBlockNumberOf<T>,
466 /// The workload to be done on the Core.
467 assignment: Vec<(CoreAssignment, PartsOf57600)>,
468 },
469 /// Some historical Instantaneous Core Pool payment record has been dropped.
470 PotentialRenewalDropped {
471 /// The timeslice whose renewal is no longer available.
472 when: Timeslice,
473 /// The core whose workload is no longer available to be renewed for `when`.
474 core: CoreIndex,
475 },
476 AutoRenewalEnabled {
477 /// The core for which the renewal was enabled.
478 core: CoreIndex,
479 /// The task for which the renewal was enabled.
480 task: TaskId,
481 },
482 AutoRenewalDisabled {
483 /// The core for which the renewal was disabled.
484 core: CoreIndex,
485 /// The task for which the renewal was disabled.
486 task: TaskId,
487 },
488 /// Failed to auto-renew a core, likely due to the payer account not being sufficiently
489 /// funded.
490 AutoRenewalFailed {
491 /// The core for which the renewal failed.
492 core: CoreIndex,
493 /// The account which was supposed to pay for renewal.
494 ///
495 /// If `None` it indicates that we failed to get the sovereign account of a task.
496 payer: Option<T::AccountId>,
497 },
498 /// The auto-renewal limit has been reached upon renewing cores.
499 ///
500 /// This should never happen, given that enable_auto_renew checks for this before enabling
501 /// auto-renewal.
502 AutoRenewalLimitReached,
503 /// Failed to assign a force reservation due to no free cores available.
504 ForceReservationFailed {
505 /// The schedule that could not be assigned.
506 schedule: Schedule,
507 },
508 }
509
510 #[pallet::error]
511 #[derive(PartialEq)]
512 pub enum Error<T> {
513 /// The given region identity is not known.
514 UnknownRegion,
515 /// The owner of the region is not the origin.
516 NotOwner,
517 /// The pivot point of the partition at or after the end of the region.
518 PivotTooLate,
519 /// The pivot point of the partition at the beginning of the region.
520 PivotTooEarly,
521 /// The pivot mask for the interlacing is not contained within the region's interlace mask.
522 ExteriorPivot,
523 /// The pivot mask for the interlacing is void (and therefore unschedulable).
524 VoidPivot,
525 /// The pivot mask for the interlacing is complete (and therefore not a strict subset).
526 CompletePivot,
527 /// The workplan of the pallet's state is invalid. This indicates a state corruption.
528 CorruptWorkplan,
529 /// There is no sale happening currently.
530 NoSales,
531 /// The price limit is exceeded.
532 Overpriced,
533 /// There are no cores available.
534 Unavailable,
535 /// The sale limit has been reached.
536 SoldOut,
537 /// The renewal operation is not valid at the current time (it may become valid in the next
538 /// sale).
539 WrongTime,
540 /// Invalid attempt to renew.
541 NotAllowed,
542 /// This pallet has not yet been initialized.
543 Uninitialized,
544 /// The purchase cannot happen yet as the sale period is yet to begin.
545 TooEarly,
546 /// There is no work to be done.
547 NothingToDo,
548 /// The maximum amount of reservations has already been reached.
549 TooManyReservations,
550 /// The maximum amount of leases has already been reached.
551 TooManyLeases,
552 /// The lease does not exist.
553 LeaseNotFound,
554 /// The revenue for the Instantaneous Core Sales of this period is not (yet) known and thus
555 /// this operation cannot proceed.
556 UnknownRevenue,
557 /// The identified contribution to the Instantaneous Core Pool is unknown.
558 UnknownContribution,
559 /// The workload assigned for renewal is incomplete. This is unexpected and indicates a
560 /// logic error.
561 IncompleteAssignment,
562 /// An item cannot be dropped because it is still valid.
563 StillValid,
564 /// The history item does not exist.
565 NoHistory,
566 /// No reservation of the given index exists.
567 UnknownReservation,
568 /// The renewal record cannot be found.
569 UnknownRenewal,
570 /// The lease expiry time has already passed.
571 AlreadyExpired,
572 /// The configuration could not be applied because it is invalid.
573 InvalidConfig,
574 /// The revenue must be claimed for 1 or more timeslices.
575 NoClaimTimeslices,
576 /// The caller doesn't have the permission to enable or disable auto-renewal.
577 NoPermission,
578 /// We reached the limit for auto-renewals.
579 TooManyAutoRenewals,
580 /// Only cores which are assigned to a task can be auto-renewed.
581 NonTaskAutoRenewal,
582 /// Failed to get the sovereign account of a task.
583 SovereignAccountNotFound,
584 /// Attempted to disable auto-renewal for a core that didn't have it enabled.
585 AutoRenewalNotEnabled,
586 /// Attempted to force remove an assignment that doesn't exist.
587 AssignmentNotFound,
588 /// Needed to prevent spam attacks.The amount of credits the user attempted to purchase is
589 /// below `T::MinimumCreditPurchase`.
590 CreditPurchaseTooSmall,
591 }
592
593 #[derive(frame_support::DefaultNoBound)]
594 #[pallet::genesis_config]
595 pub struct GenesisConfig<T: Config> {
596 #[serde(skip)]
597 pub _config: core::marker::PhantomData<T>,
598 }
599
600 #[pallet::genesis_build]
601 impl<T: Config> BuildGenesisConfig for GenesisConfig<T> {
602 fn build(&self) {
603 frame_system::Pallet::<T>::inc_providers(&Pallet::<T>::account_id());
604 }
605 }
606
607 #[pallet::hooks]
608 impl<T: Config> Hooks<BlockNumberFor<T>> for Pallet<T> {
609 fn on_initialize(_now: BlockNumberFor<T>) -> Weight {
610 Self::do_tick()
611 }
612 }
613
614 #[pallet::call(weight(<T as Config>::WeightInfo))]
615 impl<T: Config> Pallet<T> {
616 /// Configure the pallet.
617 ///
618 /// - `origin`: Must be Root or pass `AdminOrigin`.
619 /// - `config`: The configuration for this pallet.
620 #[pallet::call_index(0)]
621 pub fn configure(
622 origin: OriginFor<T>,
623 config: ConfigRecordOf<T>,
624 ) -> DispatchResultWithPostInfo {
625 T::AdminOrigin::ensure_origin_or_root(origin)?;
626 Self::do_configure(config)?;
627 Ok(Pays::No.into())
628 }
629
630 /// Reserve a core for a workload.
631 ///
632 /// The workload will be given a reservation, but two sale period boundaries must pass
633 /// before the core is actually assigned.
634 ///
635 /// - `origin`: Must be Root or pass `AdminOrigin`.
636 /// - `workload`: The workload which should be permanently placed on a core.
637 #[pallet::call_index(1)]
638 pub fn reserve(origin: OriginFor<T>, workload: Schedule) -> DispatchResultWithPostInfo {
639 T::AdminOrigin::ensure_origin_or_root(origin)?;
640 Self::do_reserve(workload)?;
641 Ok(Pays::No.into())
642 }
643
644 /// Cancel a reservation for a workload.
645 ///
646 /// - `origin`: Must be Root or pass `AdminOrigin`.
647 /// - `item_index`: The index of the reservation. Usually this will also be the index of the
648 /// core on which the reservation has been scheduled. However, it is possible that if
649 /// other cores are reserved or unreserved in the same sale rotation that they won't
650 /// correspond, so it's better to look up the core properly in the `Reservations` storage.
651 #[pallet::call_index(2)]
652 pub fn unreserve(origin: OriginFor<T>, item_index: u32) -> DispatchResultWithPostInfo {
653 T::AdminOrigin::ensure_origin_or_root(origin)?;
654 Self::do_unreserve(item_index)?;
655 Ok(Pays::No.into())
656 }
657
658 /// Reserve a core for a single task workload for a limited period.
659 ///
660 /// In the interlude and sale period where Bulk Coretime is sold for the period immediately
661 /// after `until`, then the same workload may be renewed.
662 ///
663 /// - `origin`: Must be Root or pass `AdminOrigin`.
664 /// - `task`: The workload which should be placed on a core.
665 /// - `until`: The timeslice now earlier than which `task` should be placed as a workload on
666 /// a core.
667 #[pallet::call_index(3)]
668 pub fn set_lease(
669 origin: OriginFor<T>,
670 task: TaskId,
671 until: Timeslice,
672 ) -> DispatchResultWithPostInfo {
673 T::AdminOrigin::ensure_origin_or_root(origin)?;
674 Self::do_set_lease(task, until)?;
675 Ok(Pays::No.into())
676 }
677
678 /// Begin the Bulk Coretime sales rotation.
679 ///
680 /// - `origin`: Must be Root or pass `AdminOrigin`.
681 /// - `end_price`: The price after the leadin period of Bulk Coretime in the first sale.
682 /// - `extra_cores`: Number of extra cores that should be requested on top of the cores
683 /// required for `Reservations` and `Leases`.
684 ///
685 /// This will call [`Self::request_core_count`] internally to set the correct core count on
686 /// the relay chain.
687 #[pallet::call_index(4)]
688 #[pallet::weight(T::WeightInfo::start_sales(
689 T::MaxLeasedCores::get() + T::MaxReservedCores::get() + *extra_cores as u32
690 ))]
691 pub fn start_sales(
692 origin: OriginFor<T>,
693 end_price: BalanceOf<T>,
694 extra_cores: CoreIndex,
695 ) -> DispatchResultWithPostInfo {
696 T::AdminOrigin::ensure_origin_or_root(origin)?;
697 Self::do_start_sales(end_price, extra_cores)?;
698 Ok(Pays::No.into())
699 }
700
701 /// Purchase Bulk Coretime in the ongoing Sale.
702 ///
703 /// - `origin`: Must be a Signed origin with at least enough funds to pay the current price
704 /// of Bulk Coretime.
705 /// - `price_limit`: An amount no more than which should be paid.
706 #[pallet::call_index(5)]
707 pub fn purchase(
708 origin: OriginFor<T>,
709 price_limit: BalanceOf<T>,
710 ) -> DispatchResultWithPostInfo {
711 let who = ensure_signed(origin)?;
712 Self::do_purchase(who, price_limit)?;
713 Ok(Pays::No.into())
714 }
715
716 /// Renew Bulk Coretime in the ongoing Sale or its prior Interlude Period.
717 ///
718 /// - `origin`: Must be a Signed origin with at least enough funds to pay the renewal price
719 /// of the core.
720 /// - `core`: The core which should be renewed.
721 #[pallet::call_index(6)]
722 pub fn renew(origin: OriginFor<T>, core: CoreIndex) -> DispatchResultWithPostInfo {
723 let who = ensure_signed(origin)?;
724 Self::do_renew(who, core)?;
725 Ok(Pays::No.into())
726 }
727
728 /// Transfer a Bulk Coretime Region to a new owner.
729 ///
730 /// - `origin`: Must be a Signed origin of the account which owns the Region `region_id`.
731 /// - `region_id`: The Region whose ownership should change.
732 /// - `new_owner`: The new owner for the Region.
733 #[pallet::call_index(7)]
734 pub fn transfer(
735 origin: OriginFor<T>,
736 region_id: RegionId,
737 new_owner: T::AccountId,
738 ) -> DispatchResult {
739 let who = ensure_signed(origin)?;
740 Self::do_transfer(region_id, Some(who), new_owner)?;
741 Ok(())
742 }
743
744 /// Split a Bulk Coretime Region into two non-overlapping Regions at a particular time into
745 /// the region.
746 ///
747 /// - `origin`: Must be a Signed origin of the account which owns the Region `region_id`.
748 /// - `region_id`: The Region which should be partitioned into two non-overlapping Regions.
749 /// - `pivot`: The offset in time into the Region at which to make the split.
750 #[pallet::call_index(8)]
751 pub fn partition(
752 origin: OriginFor<T>,
753 region_id: RegionId,
754 pivot: Timeslice,
755 ) -> DispatchResult {
756 let who = ensure_signed(origin)?;
757 Self::do_partition(region_id, Some(who), pivot)?;
758 Ok(())
759 }
760
761 /// Split a Bulk Coretime Region into two wholly-overlapping Regions with complementary
762 /// interlace masks which together make up the original Region's interlace mask.
763 ///
764 /// - `origin`: Must be a Signed origin of the account which owns the Region `region_id`.
765 /// - `region_id`: The Region which should become two interlaced Regions of incomplete
766 /// regularity.
767 /// - `pivot`: The interlace mask of one of the two new regions (the other is its partial
768 /// complement).
769 #[pallet::call_index(9)]
770 pub fn interlace(
771 origin: OriginFor<T>,
772 region_id: RegionId,
773 pivot: CoreMask,
774 ) -> DispatchResult {
775 let who = ensure_signed(origin)?;
776 Self::do_interlace(region_id, Some(who), pivot)?;
777 Ok(())
778 }
779
780 /// Assign a Bulk Coretime Region to a task.
781 ///
782 /// - `origin`: Must be a Signed origin of the account which owns the Region `region_id`.
783 /// - `region_id`: The Region which should be assigned to the task.
784 /// - `task`: The task to assign.
785 /// - `finality`: Indication of whether this assignment is final (in which case it may be
786 /// eligible for renewal) or provisional (in which case it may be manipulated and/or
787 /// reassigned at a later stage).
788 #[pallet::call_index(10)]
789 pub fn assign(
790 origin: OriginFor<T>,
791 region_id: RegionId,
792 task: TaskId,
793 finality: Finality,
794 ) -> DispatchResultWithPostInfo {
795 let who = ensure_signed(origin)?;
796 Self::do_assign(region_id, Some(who), task, finality)?;
797 Ok(if finality == Finality::Final { Pays::No } else { Pays::Yes }.into())
798 }
799
800 /// Place a Bulk Coretime Region into the Instantaneous Coretime Pool.
801 ///
802 /// - `origin`: Must be a Signed origin of the account which owns the Region `region_id`.
803 /// - `region_id`: The Region which should be assigned to the Pool.
804 /// - `payee`: The account which is able to collect any revenue due for the usage of this
805 /// Coretime.
806 #[pallet::call_index(11)]
807 pub fn pool(
808 origin: OriginFor<T>,
809 region_id: RegionId,
810 payee: T::AccountId,
811 finality: Finality,
812 ) -> DispatchResultWithPostInfo {
813 let who = ensure_signed(origin)?;
814 Self::do_pool(region_id, Some(who), payee, finality)?;
815 Ok(if finality == Finality::Final { Pays::No } else { Pays::Yes }.into())
816 }
817
818 /// Claim the revenue owed from inclusion in the Instantaneous Coretime Pool.
819 ///
820 /// - `origin`: Must be a Signed origin.
821 /// - `region_id`: The Region which was assigned to the Pool.
822 /// - `max_timeslices`: The maximum number of timeslices which should be processed. This
823 /// must be greater than 0. This may affect the weight of the call but should be ideally
824 /// made equivalent to the length of the Region `region_id`. If less, further dispatches
825 /// will be required with the same `region_id` to claim revenue for the remainder.
826 #[pallet::call_index(12)]
827 #[pallet::weight(T::WeightInfo::claim_revenue(*max_timeslices))]
828 pub fn claim_revenue(
829 origin: OriginFor<T>,
830 region_id: RegionId,
831 max_timeslices: Timeslice,
832 ) -> DispatchResultWithPostInfo {
833 ensure_signed(origin)?;
834 Self::do_claim_revenue(region_id, max_timeslices)?;
835 Ok(Pays::No.into())
836 }
837
838 /// Purchase credit for use in the Instantaneous Coretime Pool.
839 ///
840 /// - `origin`: Must be a Signed origin able to pay at least `amount`.
841 /// - `amount`: The amount of credit to purchase.
842 /// - `beneficiary`: The account on the Relay-chain which controls the credit (generally
843 /// this will be the collator's hot wallet).
844 #[pallet::call_index(13)]
845 pub fn purchase_credit(
846 origin: OriginFor<T>,
847 amount: BalanceOf<T>,
848 beneficiary: RelayAccountIdOf<T>,
849 ) -> DispatchResult {
850 let who = ensure_signed(origin)?;
851 Self::do_purchase_credit(who, amount, beneficiary)?;
852 Ok(())
853 }
854
855 /// Drop an expired Region from the chain.
856 ///
857 /// - `origin`: Can be any kind of origin.
858 /// - `region_id`: The Region which has expired.
859 #[pallet::call_index(14)]
860 pub fn drop_region(
861 _origin: OriginFor<T>,
862 region_id: RegionId,
863 ) -> DispatchResultWithPostInfo {
864 Self::do_drop_region(region_id)?;
865 Ok(Pays::No.into())
866 }
867
868 /// Drop an expired Instantaneous Pool Contribution record from the chain.
869 ///
870 /// - `origin`: Can be any kind of origin.
871 /// - `region_id`: The Region identifying the Pool Contribution which has expired.
872 #[pallet::call_index(15)]
873 pub fn drop_contribution(
874 _origin: OriginFor<T>,
875 region_id: RegionId,
876 ) -> DispatchResultWithPostInfo {
877 Self::do_drop_contribution(region_id)?;
878 Ok(Pays::No.into())
879 }
880
881 /// Drop an expired Instantaneous Pool History record from the chain.
882 ///
883 /// - `origin`: Can be any kind of origin.
884 /// - `region_id`: The time of the Pool History record which has expired.
885 #[pallet::call_index(16)]
886 pub fn drop_history(_origin: OriginFor<T>, when: Timeslice) -> DispatchResultWithPostInfo {
887 Self::do_drop_history(when)?;
888 Ok(Pays::No.into())
889 }
890
891 /// Drop an expired Allowed Renewal record from the chain.
892 ///
893 /// - `origin`: Can be any kind of origin.
894 /// - `core`: The core to which the expired renewal refers.
895 /// - `when`: The timeslice to which the expired renewal refers. This must have passed.
896 #[pallet::call_index(17)]
897 pub fn drop_renewal(
898 _origin: OriginFor<T>,
899 core: CoreIndex,
900 when: Timeslice,
901 ) -> DispatchResultWithPostInfo {
902 Self::do_drop_renewal(core, when)?;
903 Ok(Pays::No.into())
904 }
905
906 /// Request a change to the number of cores available for scheduling work.
907 ///
908 /// - `origin`: Must be Root or pass `AdminOrigin`.
909 /// - `core_count`: The desired number of cores to be made available.
910 #[pallet::call_index(18)]
911 #[pallet::weight(T::WeightInfo::request_core_count((*core_count).into()))]
912 pub fn request_core_count(origin: OriginFor<T>, core_count: CoreIndex) -> DispatchResult {
913 T::AdminOrigin::ensure_origin_or_root(origin)?;
914 Self::do_request_core_count(core_count)?;
915 Ok(())
916 }
917
918 #[pallet::call_index(19)]
919 #[pallet::weight(T::WeightInfo::notify_core_count())]
920 pub fn notify_core_count(origin: OriginFor<T>, core_count: CoreIndex) -> DispatchResult {
921 T::AdminOrigin::ensure_origin_or_root(origin)?;
922 Self::do_notify_core_count(core_count)?;
923 Ok(())
924 }
925
926 #[pallet::call_index(20)]
927 #[pallet::weight(T::WeightInfo::notify_revenue())]
928 pub fn notify_revenue(
929 origin: OriginFor<T>,
930 revenue: OnDemandRevenueRecordOf<T>,
931 ) -> DispatchResult {
932 T::AdminOrigin::ensure_origin_or_root(origin)?;
933 Self::do_notify_revenue(revenue)?;
934 Ok(())
935 }
936
937 /// Extrinsic for enabling auto renewal.
938 ///
939 /// Callable by the sovereign account of the task on the specified core. This account
940 /// will be charged at the start of every bulk period for renewing core time.
941 ///
942 /// - `origin`: Must be the sovereign account of the task
943 /// - `core`: The core to which the task to be renewed is currently assigned.
944 /// - `task`: The task for which we want to enable auto renewal.
945 /// - `workload_end_hint`: should be used when enabling auto-renewal for a core that is not
946 /// expiring in the upcoming bulk period (e.g., due to holding a lease) since it would be
947 /// inefficient to look up when the core expires to schedule the next renewal.
948 #[pallet::call_index(21)]
949 #[pallet::weight(T::WeightInfo::enable_auto_renew())]
950 pub fn enable_auto_renew(
951 origin: OriginFor<T>,
952 core: CoreIndex,
953 task: TaskId,
954 workload_end_hint: Option<Timeslice>,
955 ) -> DispatchResult {
956 let who = ensure_signed(origin)?;
957
958 let sovereign_account = T::SovereignAccountOf::maybe_convert(task)
959 .ok_or(Error::<T>::SovereignAccountNotFound)?;
960 // Only the sovereign account of a task can enable auto renewal for its own core.
961 ensure!(who == sovereign_account, Error::<T>::NoPermission);
962
963 Self::do_enable_auto_renew(sovereign_account, core, task, workload_end_hint)?;
964 Ok(())
965 }
966
967 /// Extrinsic for disabling auto renewal.
968 ///
969 /// Callable by the sovereign account of the task on the specified core.
970 ///
971 /// - `origin`: Must be the sovereign account of the task.
972 /// - `core`: The core for which we want to disable auto renewal.
973 /// - `task`: The task for which we want to disable auto renewal.
974 #[pallet::call_index(22)]
975 #[pallet::weight(T::WeightInfo::disable_auto_renew())]
976 pub fn disable_auto_renew(
977 origin: OriginFor<T>,
978 core: CoreIndex,
979 task: TaskId,
980 ) -> DispatchResult {
981 let who = ensure_signed(origin)?;
982
983 let sovereign_account = T::SovereignAccountOf::maybe_convert(task)
984 .ok_or(Error::<T>::SovereignAccountNotFound)?;
985 // Only the sovereign account of the task can disable auto-renewal.
986 ensure!(who == sovereign_account, Error::<T>::NoPermission);
987
988 Self::do_disable_auto_renew(core, task)?;
989
990 Ok(())
991 }
992
993 /// Reserve a core for a workload immediately.
994 ///
995 /// - `origin`: Must be Root or pass `AdminOrigin`.
996 /// - `workload`: The workload which should be permanently placed on a core starting
997 /// immediately.
998 /// - `core`: The core to which the assignment should be made until the reservation takes
999 /// effect. It is left to the caller to either add this new core or reassign any other
1000 /// tasks to this existing core.
1001 ///
1002 /// This reserves the workload and then injects the workload into the Workplan for the next
1003 /// two sale periods. This overwrites any existing assignments for this core at the start of
1004 /// the next sale period.
1005 #[pallet::call_index(23)]
1006 pub fn force_reserve(
1007 origin: OriginFor<T>,
1008 workload: Schedule,
1009 core: CoreIndex,
1010 ) -> DispatchResultWithPostInfo {
1011 T::AdminOrigin::ensure_origin_or_root(origin)?;
1012 Self::do_force_reserve(workload, core)?;
1013 Ok(Pays::No.into())
1014 }
1015
1016 /// Remove a lease.
1017 ///
1018 /// - `origin`: Must be Root or pass `AdminOrigin`.
1019 /// - `task`: The task id of the lease which should be removed.
1020 #[pallet::call_index(24)]
1021 pub fn remove_lease(origin: OriginFor<T>, task: TaskId) -> DispatchResult {
1022 T::AdminOrigin::ensure_origin_or_root(origin)?;
1023 Self::do_remove_lease(task)
1024 }
1025
1026 /// Remove an assignment from the Workplan.
1027 ///
1028 /// - `origin`: Must be Root or pass `AdminOrigin`.
1029 /// - `region_id`: The Region to be removed from the workplan.
1030 #[pallet::call_index(26)]
1031 pub fn remove_assignment(origin: OriginFor<T>, region_id: RegionId) -> DispatchResult {
1032 T::AdminOrigin::ensure_origin_or_root(origin)?;
1033 Self::do_remove_assignment(region_id)
1034 }
1035
1036 #[pallet::call_index(99)]
1037 #[pallet::weight(T::WeightInfo::swap_leases())]
1038 pub fn swap_leases(origin: OriginFor<T>, id: TaskId, other: TaskId) -> DispatchResult {
1039 T::AdminOrigin::ensure_origin_or_root(origin)?;
1040 Self::do_swap_leases(id, other)?;
1041 Ok(())
1042 }
1043 }
1044}