pallet_staking_async/
session_rotation.rs

1// This file is part of Substrate.
2
3// Copyright (C) Parity Technologies (UK) Ltd.
4// SPDX-License-Identifier: Apache-2.0
5
6// Licensed under the Apache License, Version 2.0 (the "License");
7// you may not use this file except in compliance with the License.
8// You may obtain a copy of the License at
9//
10// 	http://www.apache.org/licenses/LICENSE-2.0
11//
12// Unless required by applicable law or agreed to in writing, software
13// distributed under the License is distributed on an "AS IS" BASIS,
14// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15// See the License for the specific language governing permissions and
16// limitations under the License.
17
18//! Manages all era rotation logic based on session increments.
19//!
20//! # Lifecycle:
21//!
22//! When a session ends in RC, a session report is sent to AH with the ending session index. Given
23//! there are 6 sessions per Era, and we configure the PlanningEraOffset to be 1, the following
24//! happens.
25//!
26//! ## Idle Sessions
27//! In the happy path, first 3 sessions are idle. Nothing much happens in these sessions.
28//!
29//!
30//! ## Planning New Era Session
31//! In the happy path, `planning new era` session is initiated when 3rd session ends and the 4th
32//! starts in the active era.
33//!
34//! **Triggers**
35//! 1. `SessionProgress == SessionsPerEra - PlanningEraOffset`
36//! 2. Forcing is set to `ForceNew` or `ForceAlways`
37//!
38//! **Actions**
39//! 1. Triggers the election process,
40//! 2. Updates the CurrentEra.
41//!
42//! **SkipIf**
43//! CurrentEra = ActiveEra + 1 // this implies planning session has already been triggered.
44//!
45//! **FollowUp**
46//! When the election process is over, we send the new validator set, with the CurrentEra index
47//! as the id of the validator set.
48//!
49//!
50//! ## Era Rotation Session
51//! In the happy path, this happens when the 5th session ends and the 6th starts in the active era.
52//!
53//! **Triggers**
54//! When we receive an activation timestamp from RC.
55//!
56//! **Assertions**
57//! 1. CurrentEra must be ActiveEra + 1.
58//! 2. Id of the activation timestamp same as CurrentEra.
59//!
60//! **Actions**
61//! - Finalize the currently active era.
62//! - Increment ActiveEra by 1.
63//! - Cleanup the old era information.
64//!
65//! **Exceptional Scenarios**
66//! - Delay in exporting validator set: Triggered in a session later than 7th.
67//! - Forcing Era: May triggered in a session earlier than 7th.
68//!
69//! ## Example Flow of a happy path
70//!
71//! * end 0, start 1, plan 2
72//! * end 1, start 2, plan 3
73//! * end 2, start 3, plan 4
74//! * end 3, start 4, plan 5 // `Plan new era` session. Current Era++. Trigger Election.
75//! * **** Somewhere here: Election set is sent to RC, keyed with Current Era
76//! * end 4, start 5, plan 6 // RC::session receives and queues this set.
77//! * end 5, start 6, plan 7 // Session report contains activation timestamp with Current Era.
78
79use crate::*;
80use alloc::vec::Vec;
81use frame_election_provider_support::{BoundedSupportsOf, ElectionProvider, PageIndex};
82use frame_support::{
83	pallet_prelude::*,
84	traits::{Defensive, DefensiveMax, DefensiveSaturating, OnUnbalanced, TryCollect},
85};
86use sp_runtime::{Perbill, Percent, Saturating};
87use sp_staking::{
88	currency_to_vote::CurrencyToVote, Exposure, Page, PagedExposureMetadata, SessionIndex,
89};
90
91/// A handler for all era-based storage items.
92///
93/// All of the following storage items must be controlled by this type:
94///
95/// [`ErasValidatorPrefs`]
96/// [`ClaimedRewards`]
97/// [`ErasStakersPaged`]
98/// [`ErasStakersOverview`]
99/// [`ErasValidatorReward`]
100/// [`ErasRewardPoints`]
101/// [`ErasTotalStake`]
102pub struct Eras<T: Config>(core::marker::PhantomData<T>);
103
104impl<T: Config> Eras<T> {
105	pub(crate) fn set_validator_prefs(era: EraIndex, stash: &T::AccountId, prefs: ValidatorPrefs) {
106		debug_assert_eq!(era, Rotator::<T>::planned_era(), "we only set prefs for planning era");
107		<ErasValidatorPrefs<T>>::insert(era, stash, prefs);
108	}
109
110	pub(crate) fn get_validator_prefs(era: EraIndex, stash: &T::AccountId) -> ValidatorPrefs {
111		<ErasValidatorPrefs<T>>::get(era, stash)
112	}
113
114	/// Returns validator commission for this era and page.
115	pub(crate) fn get_validator_commission(era: EraIndex, stash: &T::AccountId) -> Perbill {
116		Self::get_validator_prefs(era, stash).commission
117	}
118
119	/// Returns true if validator has one or more page of era rewards not claimed yet.
120	pub(crate) fn pending_rewards(era: EraIndex, validator: &T::AccountId) -> bool {
121		<ErasStakersOverview<T>>::get(&era, validator)
122			.map(|overview| {
123				ClaimedRewards::<T>::get(era, validator).len() < overview.page_count as usize
124			})
125			.unwrap_or(false)
126	}
127
128	/// Get exposure for a validator at a given era and page.
129	///
130	/// This is mainly used for rewards and slashing. Validator's self-stake is only returned in
131	/// page 0.
132	///
133	/// This builds a paged exposure from `PagedExposureMetadata` and `ExposurePage` of the
134	/// validator.
135	pub(crate) fn get_paged_exposure(
136		era: EraIndex,
137		validator: &T::AccountId,
138		page: Page,
139	) -> Option<PagedExposure<T::AccountId, BalanceOf<T>>> {
140		let overview = <ErasStakersOverview<T>>::get(&era, validator)?;
141
142		// validator stake is added only in page zero.
143		let validator_stake = if page == 0 { overview.own } else { Zero::zero() };
144
145		// since overview is present, paged exposure will always be present except when a
146		// validator has only own stake and no nominator stake.
147		let exposure_page = <ErasStakersPaged<T>>::get((era, validator, page)).unwrap_or_default();
148
149		// build the exposure
150		Some(PagedExposure {
151			exposure_metadata: PagedExposureMetadata { own: validator_stake, ..overview },
152			exposure_page: exposure_page.into(),
153		})
154	}
155
156	/// Get full exposure of the validator at a given era.
157	pub(crate) fn get_full_exposure(
158		era: EraIndex,
159		validator: &T::AccountId,
160	) -> Exposure<T::AccountId, BalanceOf<T>> {
161		let Some(overview) = <ErasStakersOverview<T>>::get(&era, validator) else {
162			return Exposure::default();
163		};
164
165		let mut others = Vec::with_capacity(overview.nominator_count as usize);
166		for page in 0..overview.page_count {
167			let nominators = <ErasStakersPaged<T>>::get((era, validator, page));
168			others.append(&mut nominators.map(|n| n.others.clone()).defensive_unwrap_or_default());
169		}
170
171		Exposure { total: overview.total, own: overview.own, others }
172	}
173
174	/// Returns the number of pages of exposure a validator has for the given era.
175	///
176	/// For eras where paged exposure does not exist, this returns 1 to keep backward compatibility.
177	pub(crate) fn exposure_page_count(era: EraIndex, validator: &T::AccountId) -> Page {
178		<ErasStakersOverview<T>>::get(&era, validator)
179			.map(|overview| {
180				if overview.page_count == 0 && overview.own > Zero::zero() {
181					// Even though there are no nominator pages, there is still validator's own
182					// stake exposed which needs to be paid out in a page.
183					1
184				} else {
185					overview.page_count
186				}
187			})
188			// Always returns 1 page for older non-paged exposure.
189			// FIXME: Can be cleaned up with issue #13034.
190			.unwrap_or(1)
191	}
192
193	/// Returns the next page that can be claimed or `None` if nothing to claim.
194	pub(crate) fn get_next_claimable_page(era: EraIndex, validator: &T::AccountId) -> Option<Page> {
195		// Find next claimable page of paged exposure.
196		let page_count = Self::exposure_page_count(era, validator);
197		let all_claimable_pages: Vec<Page> = (0..page_count).collect();
198		let claimed_pages = ClaimedRewards::<T>::get(era, validator);
199
200		all_claimable_pages.into_iter().find(|p| !claimed_pages.contains(p))
201	}
202
203	/// Creates an entry to track validator reward has been claimed for a given era and page.
204	/// Noop if already claimed.
205	pub(crate) fn set_rewards_as_claimed(era: EraIndex, validator: &T::AccountId, page: Page) {
206		let mut claimed_pages = ClaimedRewards::<T>::get(era, validator).into_inner();
207
208		// this should never be called if the reward has already been claimed
209		if claimed_pages.contains(&page) {
210			defensive!("Trying to set an already claimed reward");
211			// nevertheless don't do anything since the page already exist in claimed rewards.
212			return
213		}
214
215		// add page to claimed entries
216		claimed_pages.push(page);
217		ClaimedRewards::<T>::insert(
218			era,
219			validator,
220			WeakBoundedVec::<_, _>::force_from(claimed_pages, Some("set_rewards_as_claimed")),
221		);
222	}
223
224	/// Store exposure for elected validators at start of an era.
225	///
226	/// If the exposure does not exist yet for the tuple (era, validator), it sets it. Otherwise,
227	/// it updates the existing record by ensuring *intermediate* exposure pages are filled up with
228	/// `T::MaxExposurePageSize` number of backers per page and the remaining exposures are added
229	/// to new exposure pages.
230	pub fn upsert_exposure(
231		era: EraIndex,
232		validator: &T::AccountId,
233		mut exposure: Exposure<T::AccountId, BalanceOf<T>>,
234	) {
235		let page_size = T::MaxExposurePageSize::get().defensive_max(1);
236		if cfg!(debug_assertions) && cfg!(not(feature = "runtime-benchmarks")) {
237			// sanitize the exposure in case some test data from this pallet is wrong.
238			// ignore benchmarks as other pallets might do weird things.
239			let expected_total = exposure
240				.others
241				.iter()
242				.map(|ie| ie.value)
243				.fold::<BalanceOf<T>, _>(Default::default(), |acc, x| acc + x)
244				.saturating_add(exposure.own);
245			debug_assert_eq!(expected_total, exposure.total, "exposure total must equal own + sum(others) for (era: {:?}, validator: {:?}, exposure: {:?})", era, validator, exposure);
246		}
247
248		if let Some(overview) = ErasStakersOverview::<T>::get(era, &validator) {
249			// collect some info from the un-touched overview for later use.
250			let last_page_idx = overview.page_count.saturating_sub(1);
251			let mut last_page =
252				ErasStakersPaged::<T>::get((era, validator, last_page_idx)).unwrap_or_default();
253			let last_page_empty_slots =
254				T::MaxExposurePageSize::get().saturating_sub(last_page.others.len() as u32);
255
256			// update nominator-count, page-count, and total stake in overview (done in
257			// `update_with`).
258			let new_stake_added = exposure.total;
259			let new_nominators_added = exposure.others.len() as u32;
260			let mut updated_overview = overview
261				.update_with::<T::MaxExposurePageSize>(new_stake_added, new_nominators_added);
262
263			// update own stake, if applicable.
264			match (updated_overview.own.is_zero(), exposure.own.is_zero()) {
265				(true, false) => {
266					// first time we see own exposure -- good.
267					// note: `total` is already updated above.
268					updated_overview.own = exposure.own;
269				},
270				(true, true) | (false, true) => {
271					// no new own exposure is added, nothing to do
272				},
273				(false, false) => {
274					debug_assert!(
275						false,
276						"validator own stake already set in overview for (era: {:?}, validator: {:?}, current overview: {:?}, new exposure: {:?})",
277						era,
278						validator,
279						updated_overview,
280						exposure,
281					);
282					defensive!("duplicate validator self stake in election");
283				},
284			};
285
286			ErasStakersOverview::<T>::insert(era, &validator, updated_overview);
287			// we are done updating the overview now, `updated_overview` should not be used anymore.
288			// We've updated:
289			// * nominator count
290			// * total stake
291			// * own stake (if applicable)
292			// * page count
293			//
294			// next step:
295			// * new-keys or updates in `ErasStakersPaged`
296			//
297			// we don't need the information about own stake anymore -- drop it.
298			exposure.total = exposure.total.saturating_sub(exposure.own);
299			exposure.own = Zero::zero();
300
301			// splits the exposure so that `append_to_last_page` will fit within the last exposure
302			// page, up to the max exposure page size. The remaining individual exposures in
303			// `put_in_new_pages` will be added to new pages.
304			let append_to_last_page = exposure.split_others(last_page_empty_slots);
305			let put_in_new_pages = exposure;
306
307			// handle last page first.
308
309			// fill up last page with exposures.
310			last_page.page_total = last_page.page_total.saturating_add(append_to_last_page.total);
311			last_page.others.extend(append_to_last_page.others);
312			ErasStakersPaged::<T>::insert((era, &validator, last_page_idx), last_page);
313
314			// now handle the remaining exposures and append the exposure pages. The metadata update
315			// has been already handled above.
316			let (_unused_metadata, put_in_new_pages_chunks) =
317				put_in_new_pages.into_pages(page_size);
318
319			put_in_new_pages_chunks
320				.into_iter()
321				.enumerate()
322				.for_each(|(idx, paged_exposure)| {
323					let append_at =
324						(last_page_idx.saturating_add(1).saturating_add(idx as u32)) as Page;
325					<ErasStakersPaged<T>>::insert((era, &validator, append_at), paged_exposure);
326				});
327		} else {
328			// expected page count is the number of nominators divided by the page size, rounded up.
329			let expected_page_count = exposure
330				.others
331				.len()
332				.defensive_saturating_add((page_size as usize).defensive_saturating_sub(1))
333				.saturating_div(page_size as usize);
334
335			// no exposures yet for this (era, validator) tuple, calculate paged exposure pages and
336			// metadata from a blank slate.
337			let (exposure_metadata, exposure_pages) = exposure.into_pages(page_size);
338			defensive_assert!(exposure_pages.len() == expected_page_count, "unexpected page count");
339
340			// insert metadata.
341			ErasStakersOverview::<T>::insert(era, &validator, exposure_metadata);
342
343			// insert validator's overview.
344			exposure_pages.into_iter().enumerate().for_each(|(idx, paged_exposure)| {
345				let append_at = idx as Page;
346				<ErasStakersPaged<T>>::insert((era, &validator, append_at), paged_exposure);
347			});
348		};
349	}
350
351	pub(crate) fn set_validators_reward(era: EraIndex, amount: BalanceOf<T>) {
352		ErasValidatorReward::<T>::insert(era, amount);
353	}
354
355	pub(crate) fn get_validators_reward(era: EraIndex) -> Option<BalanceOf<T>> {
356		ErasValidatorReward::<T>::get(era)
357	}
358
359	/// Update the total exposure for all the elected validators in the era.
360	pub(crate) fn add_total_stake(era: EraIndex, stake: BalanceOf<T>) {
361		<ErasTotalStake<T>>::mutate(era, |total_stake| {
362			*total_stake += stake;
363		});
364	}
365
366	/// Check if the rewards for the given era and page index have been claimed.
367	pub(crate) fn is_rewards_claimed(era: EraIndex, validator: &T::AccountId, page: Page) -> bool {
368		ClaimedRewards::<T>::get(era, validator).contains(&page)
369	}
370
371	/// Add reward points to validators using their stash account ID.
372	pub(crate) fn reward_active_era(
373		validators_points: impl IntoIterator<Item = (T::AccountId, u32)>,
374	) {
375		if let Some(active_era) = ActiveEra::<T>::get() {
376			<ErasRewardPoints<T>>::mutate(active_era.index, |era_rewards| {
377				for (validator, points) in validators_points.into_iter() {
378					match era_rewards.individual.get_mut(&validator) {
379						Some(individual) => individual.saturating_accrue(points),
380						None => {
381							// not much we can do -- validators should always be less than
382							// `MaxValidatorSet`.
383							let _ =
384								era_rewards.individual.try_insert(validator, points).defensive();
385						},
386					}
387					era_rewards.total.saturating_accrue(points);
388				}
389			});
390		}
391	}
392
393	pub(crate) fn get_reward_points(era: EraIndex) -> EraRewardPoints<T> {
394		ErasRewardPoints::<T>::get(era)
395	}
396}
397
398#[cfg(any(feature = "try-runtime", test, feature = "runtime-benchmarks"))]
399#[allow(unused)]
400impl<T: Config> Eras<T> {
401	/// Ensure the given era's data is fully present (all storage intact and not being pruned).
402	pub(crate) fn era_fully_present(era: EraIndex) -> Result<(), sp_runtime::TryRuntimeError> {
403		// these two are only set if we have some validators in an era.
404		let e0 = ErasValidatorPrefs::<T>::iter_prefix_values(era).count() != 0;
405		// note: we don't check `ErasStakersPaged` as a validator can have no backers.
406		let e1 = ErasStakersOverview::<T>::iter_prefix_values(era).count() != 0;
407		ensure!(e0 == e1, "ErasValidatorPrefs and ErasStakersOverview should be consistent");
408
409		// these two must always be set
410		let e2 = ErasTotalStake::<T>::contains_key(era);
411
412		let active_era = Rotator::<T>::active_era();
413		let e4 = if era.saturating_sub(1) > 0 &&
414			era.saturating_sub(1) > active_era.saturating_sub(T::HistoryDepth::get() + 1)
415		{
416			// `ErasValidatorReward` is set at active era n for era n-1, and is not set for era 0 in
417			// our tests. Moreover, it cannot be checked for presence in the oldest present era
418			// (`active_era.saturating_sub(1)`)
419			ErasValidatorReward::<T>::contains_key(era.saturating_sub(1))
420		} else {
421			// ignore
422			e2
423		};
424
425		ensure!(e2 == e4, "era info presence not consistent");
426
427		if e2 {
428			Ok(())
429		} else {
430			Err("era presence mismatch".into())
431		}
432	}
433
434	/// Check if the given era is currently being pruned.
435	pub(crate) fn era_pruning_in_progress(era: EraIndex) -> bool {
436		EraPruningState::<T>::contains_key(era)
437	}
438
439	/// Ensure the given era is either absent or currently being pruned.
440	pub(crate) fn era_absent_or_pruning(era: EraIndex) -> Result<(), sp_runtime::TryRuntimeError> {
441		if Self::era_pruning_in_progress(era) {
442			Ok(())
443		} else {
444			Self::era_absent(era)
445		}
446	}
447
448	/// Ensure the given era has indeed been already pruned. This is called by the main pallet in
449	/// do_prune_era_step.
450	pub(crate) fn era_absent(era: EraIndex) -> Result<(), sp_runtime::TryRuntimeError> {
451		// check double+ maps
452		let e0 = ErasValidatorPrefs::<T>::iter_prefix_values(era).count() != 0;
453		let e1 = ErasStakersPaged::<T>::iter_prefix_values((era,)).count() != 0;
454		let e2 = ErasStakersOverview::<T>::iter_prefix_values(era).count() != 0;
455
456		// check maps
457		// `ErasValidatorReward` is set at active era n for era n-1
458		let e3 = ErasValidatorReward::<T>::contains_key(era);
459		let e4 = ErasTotalStake::<T>::contains_key(era);
460
461		// these two are only populated conditionally, so we only check them for lack of existence
462		let e6 = ClaimedRewards::<T>::iter_prefix_values(era).count() != 0;
463		let e7 = ErasRewardPoints::<T>::contains_key(era);
464
465		// Check if era info is consistent - if not, era is in partial pruning state
466		if !vec![e0, e1, e2, e3, e4, e6, e7].windows(2).all(|w| w[0] == w[1]) {
467			return Err("era info absence not consistent - partial pruning state".into());
468		}
469
470		if !e0 {
471			Ok(())
472		} else {
473			Err("era absence mismatch".into())
474		}
475	}
476
477	pub(crate) fn do_try_state() -> Result<(), sp_runtime::TryRuntimeError> {
478		// pruning window works.
479		let active_era = Rotator::<T>::active_era();
480		// we max with 1 as in active era 0 we don't do an election and therefore we don't have some
481		// of the maps populated.
482		let oldest_present_era = active_era.saturating_sub(T::HistoryDepth::get()).max(1);
483
484		for e in oldest_present_era..=active_era {
485			Self::era_fully_present(e)?
486		}
487
488		// Ensure all eras older than oldest_present_era are either fully pruned or marked for
489		// pruning
490		ensure!(
491			(1..oldest_present_era).all(|e| Self::era_absent_or_pruning(e).is_ok()),
492			"All old eras must be either fully pruned or marked for pruning"
493		);
494
495		Ok(())
496	}
497}
498
499/// Manages session rotation logic.
500///
501/// This controls the following storage items in FULL, meaning that they should not be accessed
502/// directly from anywhere else in this pallet:
503///
504/// * `CurrentEra`: The current planning era
505/// * `ActiveEra`: The current active era
506/// * `BondedEras`: the list of ACTIVE eras and their session index
507pub struct Rotator<T: Config>(core::marker::PhantomData<T>);
508
509impl<T: Config> Rotator<T> {
510	#[cfg(feature = "runtime-benchmarks")]
511	pub(crate) fn legacy_insta_plan_era() -> Vec<T::AccountId> {
512		// Plan the era,
513		Self::plan_new_era();
514		// signal that we are about to call into elect asap.
515		<<T as Config>::ElectionProvider as ElectionProvider>::asap();
516		// immediately call into the election provider to fetch and process the results. We assume
517		// we are using an instant, onchain election here.
518		let msp = <T::ElectionProvider as ElectionProvider>::msp();
519		let lsp = 0;
520		for p in (lsp..=msp).rev() {
521			EraElectionPlanner::<T>::do_elect_paged(p);
522		}
523
524		crate::ElectableStashes::<T>::take().into_iter().collect()
525	}
526
527	#[cfg(any(feature = "try-runtime", test))]
528	pub(crate) fn do_try_state() -> Result<(), sp_runtime::TryRuntimeError> {
529		// Check planned era vs active era relationship
530		let active_era = ActiveEra::<T>::get();
531		let planned_era = CurrentEra::<T>::get();
532
533		let bonded = BondedEras::<T>::get();
534
535		match (&active_era, &planned_era) {
536			(None, None) => {
537				// Uninitialized state - both should be None
538				ensure!(bonded.is_empty(), "BondedEras must be empty when ActiveEra is None");
539			},
540			(Some(active), Some(planned)) => {
541				// Normal state - planned can be at most one more than active
542				ensure!(
543					*planned == active.index || *planned == active.index + 1,
544					"planned era is always equal or one more than active"
545				);
546
547				// If we have an active era, bonded eras must always be the range
548				// [active - bonding_duration .. active_era]
549				ensure!(
550					bonded.into_iter().map(|(era, _sess)| era).collect::<Vec<_>>() ==
551						(active.index.saturating_sub(T::BondingDuration::get())..=active.index)
552							.collect::<Vec<_>>(),
553					"BondedEras range incorrect"
554				);
555			},
556			_ => {
557				ensure!(false, "ActiveEra and CurrentEra must both be None or both be Some");
558			},
559		}
560
561		Ok(())
562	}
563
564	#[cfg(any(feature = "try-runtime", feature = "std", feature = "runtime-benchmarks", test))]
565	pub fn assert_election_ongoing() {
566		assert!(Self::is_planning().is_some(), "planning era must exist");
567		assert!(
568			T::ElectionProvider::status().is_ok(),
569			"Election provider must be in a good state during election"
570		);
571	}
572
573	/// Latest era that was planned.
574	///
575	/// The returned value does not necessarily indicate that planning for the era with this index
576	/// is underway, but rather the last era that was planned. If `Self::active_era()` is equal to
577	/// this value, it means that the era is currently active and no new era is planned.
578	///
579	/// See [`Self::is_planning()`] to only get the next index if planning in progress.
580	pub fn planned_era() -> EraIndex {
581		CurrentEra::<T>::get().unwrap_or(0)
582	}
583
584	pub fn active_era() -> EraIndex {
585		ActiveEra::<T>::get().map(|a| a.index).defensive_unwrap_or(0)
586	}
587
588	/// Next era that is planned to be started.
589	///
590	/// Returns None if no era is planned.
591	pub fn is_planning() -> Option<EraIndex> {
592		let (active, planned) = (Self::active_era(), Self::planned_era());
593		if planned.defensive_saturating_sub(active) > 1 {
594			defensive!("planned era must always be equal or one more than active");
595		}
596
597		(planned > active).then_some(planned)
598	}
599
600	/// End the session and start the next one.
601	pub(crate) fn end_session(
602		end_index: SessionIndex,
603		activation_timestamp: Option<(u64, u32)>,
604	) -> Weight {
605		// baseline weight for processing the relay chain session report
606		let weight = T::WeightInfo::rc_on_session_report();
607
608		let Some(active_era) = ActiveEra::<T>::get() else {
609			defensive!("Active era must always be available.");
610			return weight;
611		};
612		let current_planned_era = Self::is_planning();
613		let starting = end_index + 1;
614		// the session after the starting session.
615		let planning = starting + 1;
616
617		log!(
618			info,
619			"Session: end {:?}, start {:?} (ts: {:?}), planning {:?}",
620			end_index,
621			starting,
622			activation_timestamp,
623			planning
624		);
625		log!(info, "Era: active {:?}, planned {:?}", active_era.index, current_planned_era);
626
627		match activation_timestamp {
628			Some((time, id)) if Some(id) == current_planned_era => {
629				// We rotate the era if we have the activation timestamp.
630				Self::start_era(active_era, starting, time);
631			},
632			Some((_time, id)) => {
633				// RC has done something wrong -- we received the wrong ID. Don't start a new era.
634				crate::log!(
635					warn,
636					"received wrong ID with activation timestamp. Got {}, expected {:?}",
637					id,
638					current_planned_era
639				);
640				Pallet::<T>::deposit_event(Event::Unexpected(
641					UnexpectedKind::UnknownValidatorActivation,
642				));
643			},
644			None => (),
645		}
646
647		// check if we should plan new era.
648		let should_plan_era = match ForceEra::<T>::get() {
649			// see if it's good time to plan a new era.
650			Forcing::NotForcing => Self::is_plan_era_deadline(starting),
651			// Force plan new era only once.
652			Forcing::ForceNew => {
653				ForceEra::<T>::put(Forcing::NotForcing);
654				true
655			},
656			// always plan the new era.
657			Forcing::ForceAlways => true,
658			// never force.
659			Forcing::ForceNone => false,
660		};
661
662		// Note: we call `planning_era` again, as a new era might have started since we checked
663		// it last.
664		let has_pending_era = Self::is_planning().is_some();
665		match (should_plan_era, has_pending_era) {
666			(false, _) => {
667				// nothing to consider
668			},
669			(true, false) => {
670				// happy path
671				Self::plan_new_era();
672			},
673			(true, true) => {
674				// we are waiting for to start the previously planned era, we cannot plan a new era
675				// now.
676				crate::log!(
677					debug,
678					"time to plan a new era {:?}, but waiting for the activation of the previous.",
679					current_planned_era
680				);
681			},
682		}
683
684		Pallet::<T>::deposit_event(Event::SessionRotated {
685			starting_session: starting,
686			active_era: Self::active_era(),
687			planned_era: Self::planned_era(),
688		});
689
690		weight
691	}
692
693	pub(crate) fn start_era(
694		ending_era: ActiveEraInfo,
695		starting_session: SessionIndex,
696		new_era_start_timestamp: u64,
697	) {
698		// verify that a new era was planned
699		debug_assert!(CurrentEra::<T>::get().unwrap_or(0) == ending_era.index + 1);
700
701		let starting_era = ending_era.index + 1;
702
703		// finalize the ending era.
704		Self::end_era(&ending_era, new_era_start_timestamp);
705
706		// start the next era.
707		Self::start_era_inc_active_era(new_era_start_timestamp);
708		Self::start_era_update_bonded_eras(starting_era, starting_session);
709
710		// cleanup election state
711		EraElectionPlanner::<T>::cleanup();
712
713		// Mark ancient era for lazy pruning instead of immediately pruning it.
714		if let Some(old_era) = starting_era.checked_sub(T::HistoryDepth::get() + 1) {
715			log!(debug, "Marking era {:?} for lazy pruning", old_era);
716			EraPruningState::<T>::insert(old_era, PruningStep::ErasStakersPaged);
717		}
718	}
719
720	fn start_era_inc_active_era(start_timestamp: u64) {
721		ActiveEra::<T>::mutate(|active_era| {
722			let new_index = active_era.as_ref().map(|info| info.index + 1).unwrap_or(0);
723			log!(
724				debug,
725				"starting active era {:?} with RC-provided timestamp {:?}",
726				new_index,
727				start_timestamp
728			);
729			*active_era = Some(ActiveEraInfo { index: new_index, start: Some(start_timestamp) });
730		});
731	}
732
733	/// The session index of the current active era.
734	///
735	/// This must always exist in the `BondedEras` storage item, ergo the function is infallible.
736	pub fn active_era_start_session_index() -> SessionIndex {
737		Self::era_start_session_index(Self::active_era()).defensive_unwrap_or(0)
738	}
739
740	/// The session index of a given era.
741	pub fn era_start_session_index(era: EraIndex) -> Option<SessionIndex> {
742		BondedEras::<T>::get()
743			.into_iter()
744			.rev()
745			.find_map(|(e, s)| if e == era { Some(s) } else { None })
746	}
747
748	fn start_era_update_bonded_eras(starting_era: EraIndex, start_session: SessionIndex) {
749		let bonding_duration = T::BondingDuration::get();
750
751		BondedEras::<T>::mutate(|bonded| {
752			if bonded.is_full() {
753				// remove oldest
754				let (era_removed, _) = bonded.remove(0);
755				debug_assert!(
756					era_removed <= (starting_era.saturating_sub(bonding_duration)),
757					"should not delete an era that is not older than bonding duration"
758				);
759				slashing::clear_era_metadata::<T>(era_removed);
760			}
761
762			// must work -- we were not full, or just removed the oldest era.
763			let _ = bonded.try_push((starting_era, start_session)).defensive();
764		});
765	}
766
767	fn end_era(ending_era: &ActiveEraInfo, new_era_start: u64) {
768		let previous_era_start = ending_era.start.defensive_unwrap_or(new_era_start);
769		let uncapped_era_duration = new_era_start.saturating_sub(previous_era_start);
770
771		// maybe cap the era duration to the maximum allowed by the runtime.
772		let cap = T::MaxEraDuration::get();
773		let era_duration = if cap == 0 {
774			// if the cap is zero (not set), we don't cap the era duration.
775			uncapped_era_duration
776		} else if uncapped_era_duration > cap {
777			Pallet::<T>::deposit_event(Event::Unexpected(UnexpectedKind::EraDurationBoundExceeded));
778
779			// if the cap is set, and era duration exceeds the cap, we cap the era duration to the
780			// maximum allowed.
781			log!(
782				warn,
783				"capping era duration for era {:?} from {:?} to max allowed {:?}",
784				ending_era.index,
785				uncapped_era_duration,
786				cap
787			);
788			cap
789		} else {
790			uncapped_era_duration
791		};
792
793		Self::end_era_compute_payout(ending_era, era_duration);
794	}
795
796	fn end_era_compute_payout(ending_era: &ActiveEraInfo, era_duration: u64) {
797		let staked = ErasTotalStake::<T>::get(ending_era.index);
798		let issuance = asset::total_issuance::<T>();
799
800		log!(
801			debug,
802			"computing inflation for era {:?} with duration {:?}",
803			ending_era.index,
804			era_duration
805		);
806		let (validator_payout, remainder) =
807			T::EraPayout::era_payout(staked, issuance, era_duration);
808
809		let total_payout = validator_payout.saturating_add(remainder);
810		let max_staked_rewards = MaxStakedRewards::<T>::get().unwrap_or(Percent::from_percent(100));
811
812		// apply cap to validators payout and add difference to remainder.
813		let validator_payout = validator_payout.min(max_staked_rewards * total_payout);
814		let remainder = total_payout.saturating_sub(validator_payout);
815
816		Pallet::<T>::deposit_event(Event::<T>::EraPaid {
817			era_index: ending_era.index,
818			validator_payout,
819			remainder,
820		});
821
822		// Set ending era reward.
823		Eras::<T>::set_validators_reward(ending_era.index, validator_payout);
824		T::RewardRemainder::on_unbalanced(asset::issue::<T>(remainder));
825	}
826
827	/// Plans a new era by kicking off the election process.
828	///
829	/// The newly planned era is targeted to activate in the next session.
830	fn plan_new_era() {
831		let _ = CurrentEra::<T>::try_mutate(|x| {
832			log!(info, "Planning new era: {:?}, sending election start signal", x.unwrap_or(0));
833			let could_start_election = EraElectionPlanner::<T>::plan_new_election();
834			*x = Some(x.unwrap_or(0) + 1);
835			could_start_election
836		});
837	}
838
839	/// Returns whether we are at the session where we should plan the new era.
840	fn is_plan_era_deadline(start_session: SessionIndex) -> bool {
841		let planning_era_offset = T::PlanningEraOffset::get().min(T::SessionsPerEra::get());
842		// session at which we should plan the new era.
843		let target_plan_era_session = T::SessionsPerEra::get().saturating_sub(planning_era_offset);
844		let era_start_session = Self::active_era_start_session_index();
845
846		// progress of the active era in sessions.
847		let session_progress = start_session.defensive_saturating_sub(era_start_session);
848
849		log!(
850			debug,
851			"Session progress within era: {:?}, target_plan_era_session: {:?}",
852			session_progress,
853			target_plan_era_session
854		);
855		session_progress >= target_plan_era_session
856	}
857}
858
859/// Manager type which collects the election results from [`Config::ElectionProvider`] and
860/// finalizes the planning of a new era.
861///
862/// This type managed 3 storage items:
863///
864/// * [`crate::VoterSnapshotStatus`]
865/// * [`crate::NextElectionPage`]
866/// * [`crate::ElectableStashes`]
867///
868/// A new election is fetched over multiple pages, and finalized upon fetching the last page.
869///
870/// * The intermediate state of fetching the election result is kept in [`NextElectionPage`]. If
871///   `Some(_)` something is ongoing, otherwise not.
872/// * We fully trust [`Config::ElectionProvider`] to give us a full set of validators, with enough
873///   backing after all calls to `maybe_fetch_election_results` are done. Note that older versions
874///   of this pallet had a `MinimumValidatorCount` to double-check this, but we don't check it
875///   anymore.
876/// * `maybe_fetch_election_results` returns no weight. Its weight should be taken account in the
877///   e2e benchmarking of the [`Config::ElectionProvider`].
878///
879/// TODOs:
880///
881/// * Add a try-state check based on the 3 storage items
882/// * Move snapshot creation functions here as well.
883pub(crate) struct EraElectionPlanner<T: Config>(PhantomData<T>);
884impl<T: Config> EraElectionPlanner<T> {
885	/// Cleanup all associated storage items.
886	pub(crate) fn cleanup() {
887		VoterSnapshotStatus::<T>::kill();
888		NextElectionPage::<T>::kill();
889		ElectableStashes::<T>::kill();
890		Pallet::<T>::register_weight(T::DbWeight::get().writes(3));
891	}
892
893	/// Fetches the number of pages configured by the election provider.
894	pub(crate) fn election_pages() -> u32 {
895		<<T as Config>::ElectionProvider as ElectionProvider>::Pages::get()
896	}
897
898	/// Plan a new election
899	pub(crate) fn plan_new_election() -> Result<(), <T::ElectionProvider as ElectionProvider>::Error>
900	{
901		T::ElectionProvider::start()
902			.inspect_err(|e| log!(warn, "Election provider failed to start: {:?}", e))
903	}
904
905	/// Hook to be used in the pallet's on-initialize.
906	pub(crate) fn maybe_fetch_election_results() {
907		if let Ok(true) = T::ElectionProvider::status() {
908			crate::log!(
909				debug,
910				"Election provider is ready, our status is {:?}",
911				NextElectionPage::<T>::get()
912			);
913
914			debug_assert!(
915				CurrentEra::<T>::get().unwrap_or(0) ==
916					ActiveEra::<T>::get().map_or(0, |a| a.index) + 1,
917				"Next era must be already planned."
918			);
919
920			let current_page = NextElectionPage::<T>::get()
921				.unwrap_or(Self::election_pages().defensive_saturating_sub(1));
922			let maybe_next_page = current_page.checked_sub(1);
923			crate::log!(debug, "fetching page {:?}, next {:?}", current_page, maybe_next_page);
924
925			Self::do_elect_paged(current_page);
926			NextElectionPage::<T>::set(maybe_next_page);
927
928			// if current page was `Some`, and next is `None`, we have finished an election and
929			// we can report it now.
930			if maybe_next_page.is_none() {
931				use pallet_staking_async_rc_client::RcClientInterface;
932				let id = CurrentEra::<T>::get().defensive_unwrap_or(0);
933				let prune_up_to = Self::get_prune_up_to();
934				let rc_validators = ElectableStashes::<T>::take().into_iter().collect::<Vec<_>>();
935
936				crate::log!(
937					info,
938					"Sending new validator set of size {:?} to RC. ID: {:?}, prune_up_to: {:?}",
939					rc_validators.len(),
940					id,
941					prune_up_to
942				);
943
944				T::RcClientInterface::validator_set(rc_validators, id, prune_up_to);
945			}
946		}
947	}
948
949	/// Get the right value of the first session that needs to be pruned on the RC's historical
950	/// session pallet.
951	fn get_prune_up_to() -> Option<SessionIndex> {
952		let bonded_eras = BondedEras::<T>::get();
953
954		// get the first session of the oldest era in the bonded eras.
955		if bonded_eras.is_full() {
956			bonded_eras.first().map(|(_, first_session)| first_session.saturating_sub(1))
957		} else {
958			None
959		}
960	}
961
962	/// Paginated elect.
963	///
964	/// Fetches the election page with index `page` from the election provider.
965	///
966	/// The results from the elect call should be stored in the `ElectableStashes` storage. In
967	/// addition, it stores stakers' information for next planned era based on the paged
968	/// solution data returned.
969	///
970	/// If any new election winner does not fit in the electable stashes storage, it truncates
971	/// the result of the election. We ensure that only the winners that are part of the
972	/// electable stashes have exposures collected for the next era.
973	pub(crate) fn do_elect_paged(page: PageIndex) {
974		let election_result = T::ElectionProvider::elect(page);
975		match election_result {
976			Ok(supports) => {
977				let inner_processing_results = Self::do_elect_paged_inner(supports);
978				if let Err(not_included) = inner_processing_results {
979					defensive!(
980						"electable stashes exceeded limit, unexpected but election proceeds.\
981                		{} stashes from election result discarded",
982						not_included
983					);
984				};
985
986				Pallet::<T>::deposit_event(Event::PagedElectionProceeded {
987					page,
988					result: inner_processing_results.map(|x| x as u32).map_err(|x| x as u32),
989				});
990			},
991			Err(e) => {
992				log!(warn, "election provider page failed due to {:?} (page: {})", e, page);
993				Pallet::<T>::deposit_event(Event::PagedElectionProceeded { page, result: Err(0) });
994			},
995		}
996	}
997
998	/// Inner implementation of [`Self::do_elect_paged`].
999	///
1000	/// Returns an error if adding election winners to the electable stashes storage fails due
1001	/// to exceeded bounds. In case of error, it returns the index of the first stash that
1002	/// failed to be included.
1003	pub(crate) fn do_elect_paged_inner(
1004		mut supports: BoundedSupportsOf<T::ElectionProvider>,
1005	) -> Result<usize, usize> {
1006		let planning_era = Rotator::<T>::planned_era();
1007
1008		match Self::add_electables(supports.iter().map(|(s, _)| s.clone())) {
1009			Ok(added) => {
1010				let exposures = Self::collect_exposures(supports);
1011				let _ = Self::store_stakers_info(exposures, planning_era);
1012				Ok(added)
1013			},
1014			Err(not_included_idx) => {
1015				let not_included = supports.len().saturating_sub(not_included_idx);
1016
1017				log!(
1018					warn,
1019					"not all winners fit within the electable stashes, excluding {:?} accounts from solution.",
1020					not_included,
1021				);
1022
1023				// filter out supports of stashes that do not fit within the electable stashes
1024				// storage bounds to prevent collecting their exposures.
1025				supports.truncate(not_included_idx);
1026				let exposures = Self::collect_exposures(supports);
1027				let _ = Self::store_stakers_info(exposures, planning_era);
1028
1029				Err(not_included)
1030			},
1031		}
1032	}
1033
1034	/// Process the output of a paged election.
1035	///
1036	/// Store staking information for the new planned era of a single election page.
1037	pub(crate) fn store_stakers_info(
1038		exposures: BoundedExposuresOf<T>,
1039		new_planned_era: EraIndex,
1040	) -> BoundedVec<T::AccountId, MaxWinnersPerPageOf<T::ElectionProvider>> {
1041		// populate elected stash, stakers, exposures, and the snapshot of validator prefs.
1042		let mut total_stake_page: BalanceOf<T> = Zero::zero();
1043		let mut elected_stashes_page = Vec::with_capacity(exposures.len());
1044		let mut total_backers = 0u32;
1045
1046		exposures.into_iter().for_each(|(stash, exposure)| {
1047			log!(
1048				trace,
1049				"storing exposure for stash {:?} with {:?} own-stake and {:?} backers",
1050				stash,
1051				exposure.own,
1052				exposure.others.len()
1053			);
1054			// build elected stash.
1055			elected_stashes_page.push(stash.clone());
1056			// accumulate total stake and backer count for bookkeeping.
1057			total_stake_page = total_stake_page.saturating_add(exposure.total);
1058			total_backers += exposure.others.len() as u32;
1059			// set or update staker exposure for this era.
1060			Eras::<T>::upsert_exposure(new_planned_era, &stash, exposure);
1061		});
1062
1063		let elected_stashes: BoundedVec<_, MaxWinnersPerPageOf<T::ElectionProvider>> =
1064			elected_stashes_page
1065				.try_into()
1066				.expect("both types are bounded by MaxWinnersPerPageOf; qed");
1067
1068		// adds to total stake in this era.
1069		Eras::<T>::add_total_stake(new_planned_era, total_stake_page);
1070
1071		// collect or update the pref of all winners.
1072		// TODO: rather inefficient, we can do this once at the last page across all entries in
1073		// `ElectableStashes`.
1074		for stash in &elected_stashes {
1075			let pref = Validators::<T>::get(stash);
1076			Eras::<T>::set_validator_prefs(new_planned_era, stash, pref);
1077		}
1078
1079		log!(
1080			debug,
1081			"stored a page of stakers with {:?} validators and {:?} total backers for era {:?}",
1082			elected_stashes.len(),
1083			total_backers,
1084			new_planned_era,
1085		);
1086
1087		elected_stashes
1088	}
1089
1090	/// Consume a set of [`BoundedSupports`] from [`sp_npos_elections`] and collect them into a
1091	/// [`Exposure`].
1092	///
1093	/// Returns vec of all the exposures of a validator in `paged_supports`, bounded by the
1094	/// number of max winners per page returned by the election provider.
1095	fn collect_exposures(
1096		supports: BoundedSupportsOf<T::ElectionProvider>,
1097	) -> BoundedExposuresOf<T> {
1098		let total_issuance = asset::total_issuance::<T>();
1099		let to_currency = |e: frame_election_provider_support::ExtendedBalance| {
1100			T::CurrencyToVote::to_currency(e, total_issuance)
1101		};
1102
1103		supports
1104			.into_iter()
1105			.map(|(validator, support)| {
1106				// Build `struct exposure` from `support`.
1107				let mut others = Vec::with_capacity(support.voters.len());
1108				let mut own: BalanceOf<T> = Zero::zero();
1109				let mut total: BalanceOf<T> = Zero::zero();
1110				support
1111					.voters
1112					.into_iter()
1113					.map(|(nominator, weight)| (nominator, to_currency(weight)))
1114					.for_each(|(nominator, stake)| {
1115						if nominator == validator {
1116							defensive_assert!(own == Zero::zero(), "own stake should be unique");
1117							own = own.saturating_add(stake);
1118						} else {
1119							others.push(IndividualExposure { who: nominator, value: stake });
1120						}
1121						total = total.saturating_add(stake);
1122					});
1123
1124				let exposure = Exposure { own, others, total };
1125				(validator, exposure)
1126			})
1127			.try_collect()
1128			.expect("we only map through support vector which cannot change the size; qed")
1129	}
1130
1131	/// Adds a new set of stashes to the electable stashes.
1132	///
1133	/// Returns:
1134	///
1135	/// `Ok(newly_added)` if all stashes were added successfully.
1136	/// `Err(first_un_included)` if some stashes cannot be added due to bounds.
1137	pub(crate) fn add_electables(
1138		new_stashes: impl Iterator<Item = T::AccountId>,
1139	) -> Result<usize, usize> {
1140		ElectableStashes::<T>::mutate(|electable| {
1141			let pre_size = electable.len();
1142
1143			for (idx, stash) in new_stashes.enumerate() {
1144				if electable.try_insert(stash).is_err() {
1145					return Err(idx);
1146				}
1147			}
1148
1149			Ok(electable.len() - pre_size)
1150		})
1151	}
1152}