jam-std-common 0.1.26

Common datatypes and utilities for the JAM nodes and tooling
Documentation
use crate::{
	hash_encoded, header::OffendersMark, AvailabilityAssurance, EpochIndex, ErasureRoot,
	TicketEnvelope,
};
use bounded_collections::ConstU32;
use codec::{Compact, CompactLen, ConstEncodedLen, Decode, Encode, MaxEncodedLen};
use jam_types::{
	max_report_elective_data, max_work_items, val_count, AuthTrace, AuthorizerHash, BoundedVec,
	CoreCount, CoreIndex, ExtrinsicHash, FixedVec, MaxDependencies, MaxTicketsPerBlock,
	MaxWorkItems, RefineContext, SegmentTreeRoot, ServiceId, Slot, UnsignedGas, ValIndex,
	ValSuperMajority, VecMap, WorkDigest, WorkPackageHash, WorkReportHash, VALS_PER_CORE,
};

pub const MAX_VERDICTS_COUNT: usize = 16;
pub const MAX_OFFENSES_COUNT: usize = 16;

/// A bunch of preimages.
pub type PreimagesXt = Vec<Preimage>;
/// A collection of ticket envelopes.
pub type TicketsXt = BoundedVec<TicketEnvelope, MaxTicketsPerBlock>;
/// A bunch of assurances.
pub type AssurancesXt = BoundedVec<AvailabilityAssurance, jam_types::ValCount>;
/// A bunch of guarantees.
pub type GuaranteesXt = BoundedVec<ReportGuarantee, CoreCount>;
/// The disputes extrinsic.
#[derive(Clone, Encode, Decode, Debug, Default)]
pub struct DisputesXt {
	/// Disputed work reports together with their judgements.
	pub verdicts: BoundedVec<Verdict, ConstU32<{ MAX_VERDICTS_COUNT as u32 }>>,
	/// Validators who guaranteed a work report subsequently determined to be invalid.
	pub culprits: BoundedVec<CulpritProof, ConstU32<{ MAX_OFFENSES_COUNT as u32 }>>,
	/// Validators who audited a work report in conflict with the final dispute resolution.
	pub faults: BoundedVec<FaultProof, ConstU32<{ MAX_OFFENSES_COUNT as u32 }>>,
}

impl DisputesXt {
	pub fn offenders_mark(&self) -> OffendersMark {
		let offenders: Vec<_> = self
			.culprits
			.iter()
			.map(|v| v.key)
			.chain(self.faults.iter().map(|v| v.key))
			.collect();
		offenders.try_into().expect("OffendersMark bounds equal culprits + faults")
	}
	pub fn implies_offenders_mark(&self, offenders_mark: &OffendersMark) -> bool {
		self.culprits
			.iter()
			.map(|v| v.key)
			.chain(self.faults.iter().map(|v| v.key))
			.zip(offenders_mark.iter())
			.all(|(a, b)| a == *b)
	}
}

/// Judgements coming from a supermajority of either the active validators
/// set (κ) or the previous epoch's validator set (λ).
pub type VerdictVotes = FixedVec<Judgement, ValSuperMajority>;

/// Collection of judgements for a given target work report.
#[derive(Clone, Encode, Decode, Debug)]
pub struct Verdict {
	/// Target work report.
	pub target: WorkReportHash,
	/// Epoch index of the prior state or one less depending on the key set
	/// used to sign the votes.
	pub age: EpochIndex,
	/// Verdict votes collection.
	pub votes: VerdictVotes,
}

#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum VerdictKind {
	Good,
	Bad,
	Wonky,
}

impl Verdict {
	/// Returns the kind of verdict (good/bad/wonky) based on the vote split, or `Err` if the vote
	/// split is invalid.
	pub fn kind(&self) -> Result<VerdictKind, ()> {
		let valid_count = self.votes.iter().filter(|v| v.vote).count();
		if valid_count == self.votes.len() {
			Ok(VerdictKind::Good)
		} else if valid_count == 0 {
			Ok(VerdictKind::Bad)
		} else if valid_count as u16 == val_count() / 3 {
			Ok(VerdictKind::Wonky)
		} else {
			Err(())
		}
	}
}

#[derive(Clone, Encode, Decode, Debug)]
pub struct Judgement {
	pub vote: bool,
	pub index: ValIndex,
	pub signature: super::ed25519::Signature,
}

#[derive(Clone, Encode, Decode, Debug)]
pub struct CulpritProof {
	pub report_hash: WorkReportHash,
	pub key: super::ed25519::Public,
	pub signature: super::ed25519::Signature,
}

#[derive(Clone, Encode, Decode, Debug)]
pub struct FaultProof {
	pub report_hash: WorkReportHash,
	pub vote: bool,
	pub key: super::ed25519::Public,
	pub signature: super::ed25519::Signature,
}

#[derive(Clone, Encode, Decode, Debug, Default)]
pub struct Extrinsic {
	pub tickets: TicketsXt,
	pub preimages: PreimagesXt,
	pub guarantees: GuaranteesXt,
	pub assurances: AssurancesXt,
	pub disputes: DisputesXt,
}

impl Extrinsic {
	pub fn guarantees_prehashed(&self) -> Vec<(WorkReportHash, Slot, &GuaranteeSignatures)> {
		self.guarantees
			.iter()
			.map(|r| (r.report.hash(), r.slot, &r.signatures))
			.collect()
	}

	pub fn hash(&self) -> ExtrinsicHash {
		let tickets_hash = hash_encoded(&self.tickets);
		let disputes_hash = hash_encoded(&self.disputes);
		let preimages_hash = hash_encoded(&self.preimages);
		let assurances_hash = hash_encoded(&self.assurances);
		let guarantees_hash = hash_encoded(&self.guarantees_prehashed());
		let top = (tickets_hash, preimages_hash, guarantees_hash, assurances_hash, disputes_hash);
		hash_encoded(&top).into()
	}
}

#[derive(Clone, Encode, Decode, Debug, PartialEq, Eq, PartialOrd, Ord)]
pub struct Preimage {
	pub requester: ServiceId,
	pub blob: Vec<u8>,
}
impl Preimage {
	pub fn new(requester: ServiceId, blob: impl Into<Vec<u8>>) -> Self {
		Self { requester, blob: blob.into() }
	}

	pub fn encoded_len(blob_len: usize) -> usize {
		core::mem::size_of::<ServiceId>() +
			Compact::<u64>::compact_len(&(blob_len as u64)) +
			blob_len
	}
}

/// A Work Report along with a guarantee of its correctness from staked sources.
#[derive(Clone, Encode, Decode, Debug, MaxEncodedLen)]
#[cfg_attr(test, derive(PartialEq, Eq))]
pub struct ReportGuarantee {
	/// The Work Report which is being attested.
	pub report: WorkReport,
	/// The slot following production of the report. The validator set and hence the meaning of the
	/// validator indices in `signatures` is inferred from this. The indices must be a subset of
	/// those assigned to `report.core_index` during the slot.
	pub slot: Slot,
	/// The signatures from the guarantors whose message is the hash of the `report`.
	/// The order of the signatures is the same order as the validators appear in
	/// the epochal validator set.
	pub signatures: GuaranteeSignatures,
}

pub type GuaranteeSignatures = BoundedVec<ValSignature, ConstU32<{ VALS_PER_CORE as u32 }>>;

#[derive(Clone, Encode, Decode, Debug, Eq, PartialEq, MaxEncodedLen)]
pub struct ValSignature {
	pub val_index: ValIndex,
	pub signature: super::ed25519::Signature,
}

impl ConstEncodedLen for ValSignature {}

/// Secure reference to a Work Package.
#[derive(Clone, Encode, Decode, Debug, Eq, PartialEq, MaxEncodedLen)]
pub struct WorkPackageSpec {
	/// The hash of the Work Package.
	pub hash: WorkPackageHash,
	/// The length in bytes of Work Bundle.
	pub len: u32,
	/// The erasure root of the Work Bundle and export-segment pieces.
	pub erasure_root: ErasureRoot,
	/// The segment root of the Work Package.
	pub exports_root: SegmentTreeRoot,
	/// The number of segments exported by the Work Package.
	pub exports_count: u16,
}

impl WorkPackageSpec {
	pub fn wp_srl(&self) -> (WorkPackageHash, SegmentTreeRoot) {
		(self.hash, self.exports_root)
	}
}

/// Execution report of a Work Package, mainly comprising the Results from the Refinement
/// of its Work Items.
#[derive(Clone, Encode, Decode, Debug, Eq, PartialEq)]
pub struct WorkReport {
	/// The specification of the underlying Work Package.
	pub package_spec: WorkPackageSpec,
	/// The context of the underlying Work Package.
	pub context: RefineContext,
	/// The Core index under which the Work Package was Refined to generate the Report.
	#[codec(compact)]
	pub core_index: CoreIndex,
	/// The authorizer under which this Work Package got executed. For the Work Package to be
	/// validly reported, this must appear in the Core's authorizer queue at the time of reporting.
	pub authorizer_hash: AuthorizerHash,
	/// The amount of gas actually used by the IsAuthorized call.
	#[codec(compact)]
	pub auth_gas_used: UnsignedGas,
	/// The output of the authorizer under which this Work Package got executed.
	pub auth_output: AuthTrace,
	/// The segment-root lookup dictionary.
	pub sr_lookup: VecMap<WorkPackageHash, SegmentTreeRoot>,
	/// The results of the evaluation of the Items in the underlying Work Package.
	pub results: BoundedVec<WorkDigest, MaxWorkItems>,
}

impl WorkReport {
	/// Report hash.
	pub fn hash(&self) -> WorkReportHash {
		hash_encoded(self).into()
	}

	/// Report total gas requirement.
	pub fn gas(&self) -> UnsignedGas {
		self.results.iter().map(|r| r.accumulate_gas).sum()
	}

	/// Report dependencies derived from both context prerequisites and the segments root
	/// lookup dictionary.
	pub fn deps(&self) -> impl Iterator<Item = &WorkPackageHash> {
		self.sr_lookup.keys().chain(self.context.prerequisites.iter())
	}

	/// Count report dependencies derived from both context prerequisites and the segments root
	/// lookup dictionary.
	pub fn dep_count(&self) -> usize {
		self.sr_lookup.len() + self.context.prerequisites.len()
	}

	/// Determine if the report size is within the limit.
	pub fn check_size(&self) -> bool {
		let total: usize =
			self.results.iter().filter_map(|r| Some(r.result.as_ref().ok()?.len())).sum();
		total + self.auth_output.len() <= max_report_elective_data() as usize
	}

	/// Work package hash for this report.
	pub fn package_hash(&self) -> WorkPackageHash {
		self.package_spec.hash
	}
}

impl MaxEncodedLen for WorkReport {
	fn max_encoded_len() -> usize {
		let mut max = WorkPackageSpec::max_encoded_len() +
			RefineContext::max_encoded_len() +
			codec::Compact::<CoreIndex>::max_encoded_len() +
			AuthorizerHash::max_encoded_len() +
			AuthTrace::max_encoded_len() +
			BoundedVec::<(WorkPackageHash, SegmentTreeRoot), MaxDependencies>::max_encoded_len() + // sr_lookup
			BoundedVec::<WorkDigest, MaxWorkItems>::max_encoded_len() +
			codec::Compact::<UnsignedGas>::max_encoded_len(); // auth_gas_used

		// In the max expression above, the max_report_elective_data() bound is effectively applied
		// separately to auth_output and item results. It actually applies to the combined size of
		// these -- they cannot _all_ have maximum length. Account for this.
		max -= max_work_items() * (max_report_elective_data() as usize);

		max
	}
}

#[cfg(test)]
mod tests {
	use super::*;
	use bounded_collections::TryCollect;
	use jam_types::{max_dependencies, RefineLoad, MAX_PREIMAGE_BLOB_LEN, MAX_PREIMAGE_LEN};

	#[test]
	fn max_preimage_blob_len_is_correct() {
		let preimage =
			Preimage { requester: ServiceId::MAX, blob: vec![123_u8; MAX_PREIMAGE_BLOB_LEN] };
		let encoded_len = preimage.encode().len();
		assert_eq!(MAX_PREIMAGE_LEN, encoded_len);
	}

	#[test]
	fn preimage_encoded_len_works() {
		for blob_len in (0..MAX_PREIMAGE_BLOB_LEN).step_by(997) {
			let preimage = Preimage { requester: ServiceId::MAX, blob: vec![123_u8; blob_len] };
			let expected_len = Preimage::encoded_len(blob_len);
			let actual_len = preimage.encode().len();
			assert_eq!(expected_len, actual_len);
		}
	}

	#[test]
	fn report_max_encoded_len() {
		let largest_spec = WorkPackageSpec {
			hash: Default::default(),
			len: u32::MAX,
			erasure_root: Default::default(),
			exports_root: Default::default(),
			exports_count: u16::MAX,
		};
		assert_eq!(largest_spec.encoded_size(), WorkPackageSpec::max_encoded_len());

		let max_outputs = 1 + max_work_items(); // AuthTrace + WorkOutputs
		let output = |i| {
			let mut size = (max_report_elective_data() as usize) / max_outputs;
			if i == 0 {
				size += (max_report_elective_data() as usize) % max_outputs;
			}
			vec![0; size]
		};

		// The largest possible work-report, or at least very close -- it might be possible to get
		// a few bytes more by eg rebalancing the output Vec lengths, as Vec lengths use compact
		// encoding.
		//
		// This is actually a bit larger than the largest valid report as both the prerequisites
		// set and the sr_lookup map have max_dependencies() entries; the max_dependencies() limit
		// is supposed to apply to the combined count. WorkReport::max_encoded_len() doesn't
		// account for this though, so we don't bother accounting for it here either.
		let largest_report = WorkReport {
			package_spec: largest_spec,
			context: RefineContext::largest(),
			core_index: CoreIndex::MAX,
			authorizer_hash: Default::default(),
			auth_output: output(0).into(),
			sr_lookup: (0..max_dependencies())
				.map(|i| ([i as u8; 32].into(), Default::default()))
				.collect(),
			results: (0..max_work_items())
				.map(|i| WorkDigest {
					service: ServiceId::MAX,
					code_hash: Default::default(),
					payload_hash: Default::default(),
					accumulate_gas: UnsignedGas::MAX,
					result: Ok(output(1 + i).into()),
					refine_load: RefineLoad {
						gas_used: UnsignedGas::MAX,
						imports: u16::MAX,
						extrinsic_count: u16::MAX,
						extrinsic_size: u32::MAX,
						exports: u16::MAX,
					},
				})
				.try_collect()
				.unwrap(),
			auth_gas_used: UnsignedGas::MAX,
		};
		let largest_report_size = largest_report.encoded_size();

		// WorkReport::max_encoded_len() assumes each "output" Vec can be the maximum length when
		// calculating how many bytes are needed for encoding Vec lengths in the worst case. The
		// Vecs cannot all be the maximum length though due to the shared nature of
		// max_report_elective_data(). To account for this discrepancy, we allow
		// WorkReport::max_encoded_len() to overshoot largest_report_size by 1 for each such Vec.
		let max = WorkReport::max_encoded_len();
		assert!(largest_report_size <= max);
		assert!((largest_report_size + max_outputs) >= max);
	}
}