jam_std_common/
telemetry.rs

1use crate::{
2	AvailabilityStatement, Block, BoundedString, EpochIndex, ErasureRoot, PeerAddr, PeerDetails,
3	PeerId, ReportGuarantee, TicketId, WorkReport,
4};
5use bounded_collections::{BoundedVec, ConstU32, TryCollect};
6use codec::{Decode, Encode, MaxEncodedLen};
7use jam_types::{
8	CoreCount, CoreIndex, FixedVec, Hash, HeaderHash, ImportSpec, MaxDependencies,
9	MaxImportSegments, MaxImports, MaxWorkItems, ProtocolParameters, SegmentTreeRoot, ServiceId,
10	Slot, TicketAttempt, TicketsAttemptsNumber, UnsignedGas, ValIndex, WorkItem, WorkPackageHash,
11	WorkReportHash, JAM_COMMON_ERA, VALS_PER_CORE,
12};
13use std::{
14	ops::AddAssign,
15	time::{Duration, SystemTime, UNIX_EPOCH},
16};
17
18/// Telemetry protocol version, should be incremented when the protocol changes.
19pub const PROTOCOL_VERSION: u8 = 0;
20
21pub type Reason = BoundedString<128>;
22
23/// Microseconds since the beginning of the Jam "Common Era".
24#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Encode, Decode, MaxEncodedLen)]
25pub struct Timestamp(pub u64);
26
27impl Timestamp {
28	pub fn now() -> Self {
29		Self::from(SystemTime::now())
30	}
31}
32
33impl std::fmt::Debug for Timestamp {
34	fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
35		self.0.fmt(f)
36	}
37}
38
39impl From<SystemTime> for Timestamp {
40	fn from(time: SystemTime) -> Self {
41		let jam_ce = UNIX_EPOCH + Duration::from_secs(JAM_COMMON_ERA);
42		let since_jam_ce = time.duration_since(jam_ce).unwrap_or(Duration::ZERO);
43		Self(since_jam_ce.as_micros() as u64)
44	}
45}
46
47impl From<Timestamp> for SystemTime {
48	fn from(timestamp: Timestamp) -> Self {
49		let jam_ce = UNIX_EPOCH + Duration::from_secs(JAM_COMMON_ERA);
50		jam_ce + Duration::from_micros(timestamp.0)
51	}
52}
53
54/// Each event sent over a connection is implicitly given an ID:
55///
56/// - The first event is given ID 0.
57/// - The event immediately following an event E is given the ID of event E plus N, where N is the
58///   number of dropped events if E is a "dropped" event, or 1 otherwise.
59#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Encode, Decode, MaxEncodedLen)]
60pub struct EventId(pub u64);
61
62impl std::fmt::Debug for EventId {
63	fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
64		self.0.fmt(f)
65	}
66}
67
68impl std::fmt::Display for EventId {
69	fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
70		self.0.fmt(f)
71	}
72}
73
74#[repr(u8)]
75#[derive(Clone, Copy, Debug, PartialEq, Eq, Encode, Decode, MaxEncodedLen)]
76pub enum ConnectionSide {
77	Local = 0,
78	Remote = 1,
79}
80
81pub type ShardIndex = u16;
82
83#[derive(Clone, Debug, Encode, Decode, MaxEncodedLen)]
84pub struct BlockOutline {
85	/// Size in bytes.
86	pub size: u32,
87	pub hash: HeaderHash,
88	pub num_tickets: u32,
89	pub num_preimages: u32,
90	/// Total size of preimages in bytes.
91	pub preimages_size: u32,
92	pub num_guarantees: u32,
93	pub num_assurances: u32,
94	pub num_dispute_verdicts: u32,
95}
96
97impl From<(&Block, HeaderHash)> for BlockOutline {
98	fn from((block, hash): (&Block, HeaderHash)) -> Self {
99		Self {
100			size: block.encoded_size() as u32,
101			hash,
102			num_tickets: block.extrinsic.tickets.len() as u32,
103			num_preimages: block.extrinsic.preimages.len() as u32,
104			preimages_size: block.extrinsic.preimages.iter().map(|p| p.blob.len() as u32).sum(),
105			num_guarantees: block.extrinsic.guarantees.len() as u32,
106			num_assurances: block.extrinsic.assurances.len() as u32,
107			num_dispute_verdicts: block.extrinsic.disputes.verdicts.len() as u32,
108		}
109	}
110}
111
112impl From<&Block> for BlockOutline {
113	fn from(block: &Block) -> Self {
114		(block, block.header.hash()).into()
115	}
116}
117
118#[repr(u8)]
119#[derive(Clone, Copy, Debug, PartialEq, Eq, Encode, Decode, MaxEncodedLen)]
120pub enum BlockRequestDirection {
121	AscendingExclusive = 0,
122	DescendingInclusive = 1,
123}
124
125#[derive(Clone, Debug, Default, Encode, Decode, MaxEncodedLen)]
126pub struct ExecCost {
127	pub gas: UnsignedGas,
128	/// Elapsed wall-clock time in nanoseconds.
129	pub ns: u64,
130}
131
132impl AddAssign for ExecCost {
133	fn add_assign(&mut self, rhs: Self) {
134		self.gas += rhs.gas;
135		self.ns += rhs.ns;
136	}
137}
138
139#[derive(Clone, Debug, Default, Encode, Decode, MaxEncodedLen)]
140pub struct IsAuthorizedCost {
141	pub total: ExecCost,
142	/// Time taken to load and compile the code, in nanoseconds.
143	///
144	/// Note that this is included in `total.ns`.
145	pub load_ns: u64,
146	/// Total cost of host calls.
147	pub host_call: ExecCost,
148}
149
150impl AddAssign for IsAuthorizedCost {
151	fn add_assign(&mut self, rhs: Self) {
152		self.total += rhs.total;
153		self.host_call += rhs.host_call;
154	}
155}
156
157#[derive(Clone, Debug, Default, Encode, Decode, MaxEncodedLen)]
158pub struct RefineHostCallCost {
159	/// Total cost of `historical_lookup` calls.
160	pub lookup: ExecCost,
161	/// Total cost of `machine`/`expunge` calls.
162	pub vm: ExecCost,
163	/// Total cost of `peek`/`poke`/`pages` calls.
164	pub mem: ExecCost,
165	/// Total cost of `invoke` calls.
166	pub invoke: ExecCost,
167	/// Total cost of all other host calls.
168	pub other: ExecCost,
169}
170
171impl AddAssign for RefineHostCallCost {
172	fn add_assign(&mut self, rhs: Self) {
173		self.lookup += rhs.lookup;
174		self.vm += rhs.vm;
175		self.mem += rhs.mem;
176		self.invoke += rhs.invoke;
177		self.other += rhs.other;
178	}
179}
180
181#[derive(Clone, Debug, Default, Encode, Decode, MaxEncodedLen)]
182pub struct RefineCost {
183	pub total: ExecCost,
184	/// Time taken to load and compile the code, in nanoseconds.
185	///
186	/// Note that this is included in `total.ns`.
187	pub load_ns: u64,
188	pub host_call: RefineHostCallCost,
189}
190
191impl AddAssign for RefineCost {
192	fn add_assign(&mut self, rhs: Self) {
193		self.total += rhs.total;
194		self.host_call += rhs.host_call;
195	}
196}
197
198pub type RefineCosts = BoundedVec<RefineCost, MaxWorkItems>;
199
200#[derive(Clone, Debug, Default, Encode, Decode, MaxEncodedLen)]
201pub struct AccumulateHostCallCost {
202	/// Total cost of `read`/`write` calls.
203	pub state: ExecCost,
204	/// Total cost of `lookup` calls.
205	pub lookup: ExecCost,
206	/// Total cost of `query`/`solicit`/`forget`/`provide` calls.
207	pub preimage: ExecCost,
208	/// Total cost of `info`/`new`/`upgrade`/`eject` calls.
209	pub service: ExecCost,
210	/// Total cost of `transfer` calls.
211	pub transfer: ExecCost,
212	/// Total gas charged for transfer processing by destination services.
213	pub transfer_dest_gas: UnsignedGas,
214	/// Total cost of all other host calls.
215	pub other: ExecCost,
216}
217
218impl AddAssign for AccumulateHostCallCost {
219	fn add_assign(&mut self, rhs: Self) {
220		self.state += rhs.state;
221		self.lookup += rhs.lookup;
222		self.preimage += rhs.preimage;
223		self.service += rhs.service;
224		self.transfer += rhs.transfer;
225		self.transfer_dest_gas += rhs.transfer_dest_gas;
226		self.other += rhs.other;
227	}
228}
229
230#[derive(Clone, Debug, Default, Encode, Decode, MaxEncodedLen)]
231pub struct AccumulateCost {
232	/// Number of `accumulate` calls.
233	pub num_calls: u32,
234	/// Number of transfers processed.
235	pub num_transfers: u32,
236	/// Number of work items accumulated.
237	pub num_items: u32,
238	pub total: ExecCost,
239	/// Time taken to load and compile the code, in nanoseconds.
240	///
241	/// Note that this is included in `total.ns`.
242	pub load_ns: u64,
243	pub host_call: AccumulateHostCallCost,
244}
245
246impl AddAssign for AccumulateCost {
247	fn add_assign(&mut self, rhs: Self) {
248		self.num_calls += rhs.num_calls;
249		self.num_transfers += rhs.num_transfers;
250		self.num_items += rhs.num_items;
251		self.total += rhs.total;
252		self.host_call += rhs.host_call;
253	}
254}
255
256pub type AccumulateCosts = BoundedVec<(ServiceId, AccumulateCost), ConstU32<500>>;
257
258/// Identifies a segment needed by a work-package.
259#[derive(Clone, Copy, Debug, PartialEq, Eq)]
260pub enum ImportSegmentId {
261	/// The segment with the given index in the overall list of imports.
262	Import(u16),
263	/// The proof page for the segment with the given index in the overall list of imports.
264	ProofPage(u16),
265}
266
267impl ImportSegmentId {
268	pub fn import(self) -> Option<u16> {
269		match self {
270			Self::Import(index) => Some(index),
271			Self::ProofPage(_) => None,
272		}
273	}
274}
275
276impl Encode for ImportSegmentId {
277	fn size_hint(&self) -> usize {
278		2
279	}
280
281	fn encode_to<O: codec::Output + ?Sized>(&self, output: &mut O) {
282		let index = match *self {
283			Self::Import(index) => {
284				debug_assert!(index < (1 << 15));
285				index
286			},
287			Self::ProofPage(index) => {
288				debug_assert!(index < (1 << 15));
289				index + (1 << 15)
290			},
291		};
292		index.encode_to(output);
293	}
294}
295
296impl Decode for ImportSegmentId {
297	fn decode<I: codec::Input>(input: &mut I) -> Result<Self, codec::Error> {
298		let index = u16::decode(input)?;
299		if (index & (1 << 15)) == 0 {
300			Ok(Self::Import(index))
301		} else {
302			Ok(Self::ProofPage(index & !(1 << 15)))
303		}
304	}
305
306	fn encoded_fixed_size() -> Option<usize> {
307		u16::encoded_fixed_size()
308	}
309}
310
311impl MaxEncodedLen for ImportSegmentId {
312	fn max_encoded_len() -> usize {
313		u16::max_encoded_len()
314	}
315}
316
317#[derive(Clone, Debug, Encode, Decode, MaxEncodedLen)]
318pub struct WorkItemOutline {
319	pub service: ServiceId,
320	pub payload_size: u32,
321	pub refine_gas_limit: UnsignedGas,
322	pub accumulate_gas_limit: UnsignedGas,
323	/// Sum of extrinsic lengths.
324	pub extrinsic_size: u32,
325	pub imports: BoundedVec<ImportSpec, MaxImports>,
326	pub num_exports: u16,
327}
328
329impl From<&WorkItem> for WorkItemOutline {
330	fn from(item: &WorkItem) -> Self {
331		Self {
332			service: item.service,
333			payload_size: item.payload.len() as u32,
334			refine_gas_limit: item.refine_gas_limit,
335			accumulate_gas_limit: item.accumulate_gas_limit,
336			extrinsic_size: item.extrinsic_size(),
337			imports: item.import_segments.clone(),
338			num_exports: item.export_count,
339		}
340	}
341}
342
343#[derive(Clone, Debug, Encode, Decode, MaxEncodedLen)]
344pub struct WorkPackageOutline {
345	/// Size in bytes, excluding extrinsic data.
346	pub size: u32,
347	pub hash: WorkPackageHash,
348	pub anchor: HeaderHash,
349	pub lookup_anchor_slot: Slot,
350	pub prerequisites: BoundedVec<WorkPackageHash, MaxDependencies>,
351	pub items: BoundedVec<WorkItemOutline, MaxWorkItems>,
352}
353
354#[derive(Clone, Debug, Encode, Decode, MaxEncodedLen)]
355pub struct WorkReportOutline {
356	pub hash: WorkReportHash,
357	/// Size of bundle in bytes.
358	pub bundle_size: u32,
359	pub erasure_root: ErasureRoot,
360	pub exports_root: SegmentTreeRoot,
361}
362
363impl From<&WorkReport> for WorkReportOutline {
364	fn from(report: &WorkReport) -> Self {
365		Self {
366			hash: report.hash(),
367			bundle_size: report.package_spec.len,
368			erasure_root: report.package_spec.erasure_root,
369			exports_root: report.package_spec.exports_root,
370		}
371	}
372}
373
374#[derive(Clone, Debug, Encode, Decode, MaxEncodedLen)]
375pub struct GuaranteeOutline {
376	pub report_hash: WorkReportHash,
377	pub slot: Slot,
378	pub guarantors: BoundedVec<ValIndex, ConstU32<{ VALS_PER_CORE as u32 }>>,
379}
380
381impl From<&ReportGuarantee> for GuaranteeOutline {
382	fn from(guarantee: &ReportGuarantee) -> Self {
383		Self {
384			report_hash: guarantee.report.hash(),
385			slot: guarantee.slot,
386			guarantors: guarantee
387				.signatures
388				.iter()
389				.map(|signature| signature.val_index)
390				.try_collect()
391				.expect("Output and input bounds are the same"),
392		}
393	}
394}
395
396#[repr(u8)]
397#[derive(Clone, Copy, Debug, PartialEq, Eq, Encode, Decode, MaxEncodedLen)]
398pub enum GuaranteeDiscardReason {
399	PackageReportedOnChain = 0,
400	Superseded = 1,
401	CannotReportOnChain = 2,
402	TooMany = 3,
403	Other = 4,
404}
405
406#[repr(u8)]
407#[derive(Clone, Copy, Debug, PartialEq, Eq, Encode, Decode, MaxEncodedLen)]
408pub enum ReconstructionKind {
409	/// A non-trivial reconstruction, involving at least some recovery shards.
410	NonTrivial,
411	/// A trivial reconstruction, using only original-data shards.
412	Trivial,
413}
414
415#[repr(u8)]
416#[derive(Clone, Copy, Debug, PartialEq, Eq, Encode, Decode, MaxEncodedLen)]
417pub enum AnnouncedPreimageForgetReason {
418	ProvidedOnChain = 0,
419	NotRequestedOnChain = 1,
420	FailedToAcquire = 2,
421	TooMany = 3,
422	BadLength = 4,
423	Other = 5,
424}
425
426#[repr(u8)]
427#[derive(Clone, Copy, Debug, PartialEq, Eq, Encode, Decode, MaxEncodedLen)]
428pub enum PreimageDiscardReason {
429	ProvidedOnChain = 0,
430	NotRequestedOnChain = 1,
431	TooMany = 2,
432	Other = 3,
433}
434
435/// `NodeInfo` flag indicating that the node uses a PVM recompiler rather than an interpreter.
436pub const NODE_USES_PVM_RECOMPILER: u32 = 1 << 0;
437
438#[derive(Clone, Debug, Encode, Decode, MaxEncodedLen)]
439pub struct NodeInfo {
440	/// Protocol parameters the node is using.
441	pub params: ProtocolParameters,
442	/// Genesis header hash.
443	pub genesis: HeaderHash,
444	/// Peer ID and external address of the node.
445	pub details: PeerDetails,
446	pub flags: u32,
447	/// Name of the node implementation, eg "PolkaJam".
448	pub impl_name: BoundedString<32>,
449	/// Version of the node implementation, eg "1.0".
450	pub impl_version: BoundedString<32>,
451	/// Gray Paper version implemented by the node, eg "0.7.1".
452	pub gp_version: BoundedString<16>,
453	/// Freeform note with additional information about the node.
454	pub note: BoundedString<512>,
455}
456
457macro_rules! event_struct {
458	($(#[$attr:meta])* $name:ident { $($(#[$field_attr:meta])* $field_name:ident: $field_type:ty,)+ }) => {
459		$(#[$attr])*
460		#[derive(Clone, Debug, Encode, Decode, MaxEncodedLen)]
461		pub struct $name {
462			$($(#[$field_attr])* pub $field_name: $field_type,)+
463		}
464	};
465}
466
467macro_rules! events {
468	($($(#[$attr:meta])* $name:ident = $discriminator:literal $fields:tt)+) => {
469		pub mod event {
470			use super::*;
471			$(event_struct! { $(#[$attr])* $name $fields })+
472		}
473
474		#[repr(u8)]
475		#[derive(Clone, Encode, Decode, MaxEncodedLen)]
476		pub enum Event {
477			$($name(event::$name) = $discriminator,)+
478		}
479
480		impl std::fmt::Debug for Event {
481			fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
482				match self {
483					$(Self::$name(event) => event.fmt(f),)+
484				}
485			}
486		}
487	};
488}
489
490events! {
491	Dropped = 0 {
492		/// Timestamp of the last dropped event.
493		last_timestamp: Timestamp,
494		/// Number of dropped events.
495		num: u64,
496	}
497
498	Status = 10 {
499		/// Total number of peers.
500		num_peers: u32,
501		/// Number of validator peers.
502		num_val_peers: u32,
503		/// Number of peers with a block announcement stream open.
504		num_sync_peers: u32,
505		/// Number of guarantees in pool, by core.
506		num_guarantees: FixedVec<u8, CoreCount>,
507		/// Number of shards in availability store.
508		num_shards: u32,
509		/// Total size of shards in availability store, in bytes.
510		shards_size: u64,
511		/// Number of preimages in the pool, ready to be included in a block.
512		num_preimages: u32,
513		/// Total size of preimages in the pool, in bytes.
514		preimages_size: u32,
515	}
516
517	BestBlockChanged = 11 {
518		slot: Slot,
519		hash: HeaderHash,
520	}
521
522	FinalizedBlockChanged = 12 {
523		slot: Slot,
524		hash: HeaderHash,
525	}
526
527	SyncStatusChanged = 13 {
528		synced: bool,
529	}
530
531	ConnectionRefused = 20 {
532		from: PeerAddr,
533	}
534
535	ConnectingIn = 21 {
536		from: PeerAddr,
537	}
538
539	ConnectInFailed = 22 {
540		connecting_id: EventId,
541		reason: Reason,
542	}
543
544	ConnectedIn = 23 {
545		connecting_id: EventId,
546		peer_id: PeerId,
547	}
548
549	ConnectingOut = 24 {
550		to: PeerDetails,
551	}
552
553	ConnectOutFailed = 25 {
554		connecting_id: EventId,
555		reason: Reason,
556	}
557
558	ConnectedOut = 26 {
559		connecting_id: EventId,
560	}
561
562	Disconnected = 27 {
563		peer: PeerId,
564		/// Which side terminated the connection? May be `None` in case of eg timeout.
565		terminator: Option<ConnectionSide>,
566		reason: Reason,
567	}
568
569	PeerMisbehaved = 28 {
570		peer: PeerId,
571		reason: Reason,
572	}
573
574	Authoring = 40 {
575		slot: Slot,
576		parent: HeaderHash,
577	}
578
579	AuthoringFailed = 41 {
580		authoring_id: EventId,
581		reason: Reason,
582	}
583
584	Authored = 42 {
585		authoring_id: EventId,
586		outline: BlockOutline,
587	}
588
589	Importing = 43 {
590		slot: Slot,
591		outline: BlockOutline,
592	}
593
594	BlockVerificationFailed = 44 {
595		importing_id: EventId,
596		reason: Reason,
597	}
598
599	BlockVerified = 45 {
600		importing_id: EventId,
601	}
602
603	BlockExecutionFailed = 46 {
604		authoring_or_importing_id: EventId,
605		reason: Reason,
606	}
607
608	BlockExecuted = 47 {
609		authoring_or_importing_id: EventId,
610		accumulate_costs: AccumulateCosts,
611	}
612
613	BlockAnnouncementStreamOpened = 60 {
614		peer: PeerId,
615		/// Which side opened the stream?
616		opener: ConnectionSide,
617	}
618
619	BlockAnnouncementStreamClosed = 61 {
620		peer: PeerId,
621		/// Which side closed the stream?
622		closer: ConnectionSide,
623		reason: Reason,
624	}
625
626	BlockAnnounced = 62 {
627		peer: PeerId,
628		/// Which side announced the block?
629		announcer: ConnectionSide,
630		slot: Slot,
631		hash: HeaderHash,
632	}
633
634	SendingBlockRequest = 63 {
635		recipient: PeerId,
636		hash: HeaderHash,
637		direction: BlockRequestDirection,
638		max_blocks: u32,
639	}
640
641	ReceivingBlockRequest = 64 {
642		sender: PeerId,
643	}
644
645	BlockRequestFailed = 65 {
646		/// ID of the corresponding `SendingBlockRequest` or `ReceivingBlockRequest` event.
647		request_id: EventId,
648		reason: Reason,
649	}
650
651	BlockRequestSent = 66 {
652		/// ID of the corresponding `SendingBlockRequest`.
653		request_id: EventId,
654	}
655
656	BlockRequestReceived = 67 {
657		/// ID of the corresponding `ReceivingBlockRequest`.
658		request_id: EventId,
659		hash: HeaderHash,
660		direction: BlockRequestDirection,
661		max_blocks: u32,
662	}
663
664	BlockTransferred = 68 {
665		/// ID of the corresponding `SendingBlockRequest` or `ReceivingBlockRequest` event.
666		request_id: EventId,
667		slot: Slot,
668		outline: BlockOutline,
669		/// Was this the last block for the request?
670		last: bool,
671	}
672
673	GeneratingTickets = 80 {
674		epoch: EpochIndex,
675	}
676
677	TicketGenerationFailed = 81 {
678		/// ID of the corresponding `GeneratingTickets` event.
679		generating_id: EventId,
680		reason: Reason,
681	}
682
683	TicketsGenerated = 82 {
684		/// ID of the corresponding `GeneratingTickets` event.
685		generating_id: EventId,
686		/// VRF outputs.
687		ids: BoundedVec<TicketId, TicketsAttemptsNumber>,
688	}
689
690	TicketTransferFailed = 83 {
691		peer: PeerId,
692		/// Which side tried to send a ticket?
693		sender: ConnectionSide,
694		/// Was this a transfer from the proxy to a potential block author? If `false`, this was a
695		/// transfer from the ticket creator to the proxy.
696		from_proxy: bool,
697		reason: Reason,
698	}
699
700	TicketTransferred = 84 {
701		peer: PeerId,
702		/// Which side sent a ticket?
703		sender: ConnectionSide,
704		/// Was this a transfer from the proxy to a potential block author? If `false`, this was a
705		/// transfer from the ticket creator to the proxy.
706		from_proxy: bool,
707		/// The epoch the ticket is to be used in.
708		epoch: EpochIndex,
709		/// Ticket attempt number.
710		attempt: TicketAttempt,
711		/// VRF output.
712		id: TicketId,
713	}
714
715	WorkPackageSubmission = 90 {
716		builder: PeerId,
717		bundle: bool,
718	}
719
720	WorkPackageBeingShared = 91 {
721		primary: PeerId,
722	}
723
724	WorkPackageFailed = 92 {
725		/// ID of the corresponding `WorkPackageSubmission` or `WorkPackageBeingShared` event.
726		submission_or_share_id: EventId,
727		reason: Reason,
728	}
729
730	DuplicateWorkPackage = 93 {
731		/// ID of the corresponding `WorkPackageSubmission` or `WorkPackageBeingShared` event.
732		submission_or_share_id: EventId,
733		core: CoreIndex,
734		hash: WorkPackageHash,
735	}
736
737	WorkPackageReceived = 94 {
738		/// ID of the corresponding `WorkPackageSubmission` or `WorkPackageBeingShared` event.
739		submission_or_share_id: EventId,
740		core: CoreIndex,
741		outline: WorkPackageOutline,
742	}
743
744	Authorized = 95 {
745		/// ID of the corresponding `WorkPackageSubmission` or `WorkPackageBeingShared` event.
746		submission_or_share_id: EventId,
747		cost: IsAuthorizedCost,
748	}
749
750	ExtrinsicDataReceived = 96 {
751		/// ID of the corresponding `WorkPackageSubmission` or `WorkPackageBeingShared` event.
752		submission_or_share_id: EventId,
753	}
754
755	ImportsReceived = 97 {
756		/// ID of the corresponding `WorkPackageSubmission` or `WorkPackageBeingShared` event.
757		submission_or_share_id: EventId,
758	}
759
760	SharingWorkPackage = 98 {
761		/// ID of the corresponding `WorkPackageSubmission` event.
762		submission_id: EventId,
763		secondary: PeerId,
764	}
765
766	WorkPackageSharingFailed = 99 {
767		/// ID of the corresponding `WorkPackageSubmission` event.
768		submission_id: EventId,
769		secondary: PeerId,
770		reason: Reason,
771	}
772
773	BundleSent = 100 {
774		/// ID of the corresponding `WorkPackageSubmission` event.
775		submission_id: EventId,
776		secondary: PeerId,
777	}
778
779	Refined = 101 {
780		/// ID of the corresponding `WorkPackageSubmission` or `WorkPackageBeingShared` event.
781		submission_or_share_id: EventId,
782		/// Cost of `refine` call for each work item.
783		costs: RefineCosts,
784	}
785
786	WorkReportBuilt = 102 {
787		/// ID of the corresponding `WorkPackageSubmission` or `WorkPackageBeingShared` event.
788		submission_or_share_id: EventId,
789		outline: WorkReportOutline,
790	}
791
792	WorkReportSignatureSent = 103 {
793		/// ID of the corresponding `WorkPackageBeingShared` event.
794		share_id: EventId,
795	}
796
797	WorkReportSignatureReceived = 104 {
798		/// ID of the corresponding `WorkPackageSubmission` event.
799		submission_id: EventId,
800		secondary: PeerId,
801	}
802
803	GuaranteeBuilt = 105 {
804		/// ID of the corresponding `WorkPackageSubmission` event.
805		submission_id: EventId,
806		outline: GuaranteeOutline,
807	}
808
809	SendingGuarantee = 106 {
810		/// ID of the corresponding `GuaranteeBuilt` event.
811		built_id: EventId,
812		recipient: PeerId,
813	}
814
815	GuaranteeSendFailed = 107 {
816		sending_id: EventId,
817		reason: Reason,
818	}
819
820	GuaranteeSent = 108 {
821		sending_id: EventId,
822	}
823
824	GuaranteesDistributed = 109 {
825		/// ID of the corresponding `WorkPackageSubmission` event.
826		submission_id: EventId,
827	}
828
829	ReceivingGuarantee = 110 {
830		sender: PeerId,
831	}
832
833	GuaranteeReceiveFailed = 111 {
834		receiving_id: EventId,
835		reason: Reason,
836	}
837
838	GuaranteeReceived = 112 {
839		receiving_id: EventId,
840		outline: GuaranteeOutline,
841	}
842
843	GuaranteeDiscarded = 113 {
844		outline: GuaranteeOutline,
845		reason: GuaranteeDiscardReason,
846	}
847
848	SendingShardRequest = 120 {
849		guarantor: PeerId,
850		erasure_root: ErasureRoot,
851		shard: ShardIndex,
852	}
853
854	ReceivingShardRequest = 121 {
855		assurer: PeerId,
856	}
857
858	ShardRequestFailed = 122 {
859		/// ID of the corresponding `SendingShardRequest` or `ReceivingShardRequest` event.
860		request_id: EventId,
861		reason: Reason,
862	}
863
864	ShardRequestSent = 123 {
865		/// ID of the corresponding `SendingShardRequest`.
866		request_id: EventId,
867	}
868
869	ShardRequestReceived = 124 {
870		/// ID of the corresponding `ReceivingShardRequest`.
871		request_id: EventId,
872		erasure_root: ErasureRoot,
873		shard: ShardIndex,
874	}
875
876	ShardsTransferred = 125 {
877		/// ID of the corresponding `SendingShardRequest` or `ReceivingShardRequest` event.
878		request_id: EventId,
879	}
880
881	DistributingAssurance = 126 {
882		statement: AvailabilityStatement,
883	}
884
885	AssuranceSendFailed = 127 {
886		/// ID of the corresponding `DistributingAssurance` event.
887		distributing_id: EventId,
888		recipient: PeerId,
889		reason: Reason,
890	}
891
892	AssuranceSent = 128 {
893		/// ID of the corresponding `DistributingAssurance` event.
894		distributing_id: EventId,
895		recipient: PeerId,
896	}
897
898	AssuranceDistributed = 129 {
899		/// ID of the corresponding `DistributingAssurance` event.
900		distributing_id: EventId,
901	}
902
903	AssuranceReceiveFailed = 130 {
904		sender: PeerId,
905		reason: Reason,
906	}
907
908	AssuranceReceived = 131 {
909		sender: PeerId,
910		/// Assurance anchor.
911		anchor: HeaderHash,
912	}
913
914	SendingBundleShardRequest = 140 {
915		audit_id: EventId,
916		assurer: PeerId,
917		shard: ShardIndex,
918	}
919
920	ReceivingBundleShardRequest = 141 {
921		auditor: PeerId,
922	}
923
924	BundleShardRequestFailed = 142 {
925		/// ID of the corresponding `SendingBundleShardRequest` or `ReceivingBundleShardRequest`
926		/// event.
927		request_id: EventId,
928		reason: Reason,
929	}
930
931	BundleShardRequestSent = 143 {
932		/// ID of the corresponding `SendingBundleShardRequest`.
933		request_id: EventId,
934	}
935
936	BundleShardRequestReceived = 144 {
937		/// ID of the corresponding `ReceivingBundleShardRequest`.
938		request_id: EventId,
939		erasure_root: ErasureRoot,
940		shard: ShardIndex,
941	}
942
943	BundleShardTransferred = 145 {
944		/// ID of the corresponding `SendingBundleShardRequest` or `ReceivingBundleShardRequest`
945		/// event.
946		request_id: EventId,
947	}
948
949	ReconstructingBundle = 146 {
950		audit_id: EventId,
951		kind: ReconstructionKind,
952	}
953
954	BundleReconstructed = 147 {
955		audit_id: EventId,
956	}
957
958	SendingBundleRequest = 148 {
959		audit_id: EventId,
960		guarantor: PeerId,
961	}
962
963	ReceivingBundleRequest = 149 {
964		auditor: PeerId,
965	}
966
967	BundleRequestFailed = 150 {
968		/// ID of the corresponding `SendingBundleRequest` or `ReceivingBundleRequest` event.
969		request_id: EventId,
970		reason: Reason,
971	}
972
973	BundleRequestSent = 151 {
974		/// ID of the corresponding `SendingBundleRequest` event.
975		request_id: EventId,
976	}
977
978	BundleRequestReceived = 152 {
979		/// ID of the corresponding `ReceivingBundleRequest` event.
980		request_id: EventId,
981		erasure_root: ErasureRoot,
982	}
983
984	BundleTransferred = 153 {
985		/// ID of the corresponding `SendingBundleRequest` or `ReceivingBundleRequest` event.
986		request_id: EventId,
987	}
988
989	WorkPackageHashMapped = 160 {
990		/// ID of the corresponding `WorkPackageSubmission` event.
991		submission_id: EventId,
992		work_package_hash: WorkPackageHash,
993		segments_root: SegmentTreeRoot,
994	}
995
996	SegmentsRootMapped = 161 {
997		/// ID of the corresponding `WorkPackageSubmission` event.
998		submission_id: EventId,
999		segments_root: SegmentTreeRoot,
1000		erasure_root: ErasureRoot,
1001	}
1002
1003	SendingSegmentShardRequest = 162 {
1004		/// ID of the corresponding `WorkPackageSubmission` event.
1005		submission_id: EventId,
1006		assurer: PeerId,
1007		/// Are proofs of the segment shards being requested?
1008		proofs: bool,
1009		/// Segment shards being requested.
1010		shards: BoundedVec<(ImportSegmentId, ShardIndex), MaxImportSegments>,
1011	}
1012
1013	ReceivingSegmentShardRequest = 163 {
1014		sender: PeerId,
1015		/// Have proofs of the segment shards been requested?
1016		proofs: bool,
1017	}
1018
1019	SegmentShardRequestFailed = 164 {
1020		/// ID of the corresponding `SendingSegmentShardRequest` or `ReceivingSegmentShardRequest`
1021		/// event.
1022		request_id: EventId,
1023		reason: Reason,
1024	}
1025
1026	SegmentShardRequestSent = 165 {
1027		/// ID of the corresponding `SendingSegmentShardRequest` event.
1028		request_id: EventId,
1029	}
1030
1031	SegmentShardRequestReceived = 166 {
1032		/// ID of the corresponding `ReceivingSegmentShardRequest` event.
1033		request_id: EventId,
1034		/// Number of segment shards requested.
1035		num: u16,
1036	}
1037
1038	SegmentShardsTransferred = 167 {
1039		/// ID of the corresponding `SendingSegmentShardRequest` or `ReceivingSegmentShardRequest`
1040		/// event.
1041		request_id: EventId,
1042	}
1043
1044	ReconstructingSegments = 168 {
1045		/// ID of the corresponding `WorkPackageSubmission` event.
1046		submission_id: EventId,
1047		segments: BoundedVec<ImportSegmentId, MaxImportSegments>,
1048		kind: ReconstructionKind,
1049	}
1050
1051	SegmentReconstructionFailed = 169 {
1052		/// ID of the corresponding `ReconstructingSegments` event.
1053		reconstructing_id: EventId,
1054		reason: Reason,
1055	}
1056
1057	SegmentsReconstructed = 170 {
1058		/// ID of the corresponding `ReconstructingSegments` event.
1059		reconstructing_id: EventId,
1060	}
1061
1062	SegmentVerificationFailed = 171 {
1063		/// ID of the corresponding `WorkPackageSubmission` event.
1064		submission_id: EventId,
1065		segments: BoundedVec<u16, MaxImports>,
1066		reason: Reason,
1067	}
1068
1069	SegmentsVerified = 172 {
1070		/// ID of the corresponding `WorkPackageSubmission` event.
1071		submission_id: EventId,
1072		segments: BoundedVec<u16, MaxImports>,
1073	}
1074
1075	SendingSegmentRequest = 173 {
1076		/// ID of the corresponding `WorkPackageSubmission` event.
1077		submission_id: EventId,
1078		prev_guarantor: PeerId,
1079		segments: BoundedVec<u16, MaxImportSegments>,
1080	}
1081
1082	ReceivingSegmentRequest = 174 {
1083		guarantor: PeerId,
1084	}
1085
1086	SegmentRequestFailed = 175 {
1087		/// ID of the corresponding `SendingSegmentRequest` or `ReceivingSegmentRequest` event.
1088		request_id: EventId,
1089		reason: Reason,
1090	}
1091
1092	SegmentRequestSent = 176 {
1093		/// ID of the corresponding `SendingSegmentRequest` event.
1094		request_id: EventId,
1095	}
1096
1097	SegmentRequestReceived = 177 {
1098		/// ID of the corresponding `ReceivingSegmentRequest` event.
1099		request_id: EventId,
1100		/// Number of segments requested.
1101		num: u16,
1102	}
1103
1104	SegmentsTransferred = 178 {
1105		/// ID of the corresponding `SendingSegmentRequest` or `ReceivingSegmentRequest` event.
1106		request_id: EventId,
1107	}
1108
1109	PreimageAnnouncementFailed = 190 {
1110		peer: PeerId,
1111		/// Which side tried to announce a preimage?
1112		announcer: ConnectionSide,
1113		reason: Reason,
1114	}
1115
1116	PreimageAnnounced = 191 {
1117		peer: PeerId,
1118		/// Which side announced a preimage?
1119		announcer: ConnectionSide,
1120		/// ID of requesting service.
1121		service: ServiceId,
1122		hash: Hash,
1123		length: u32,
1124	}
1125
1126	AnnouncedPreimageForgotten = 192 {
1127		service: ServiceId,
1128		hash: Hash,
1129		length: u32,
1130		reason: AnnouncedPreimageForgetReason,
1131	}
1132
1133	SendingPreimageRequest = 193 {
1134		recipient: PeerId,
1135		hash: Hash,
1136	}
1137
1138	ReceivingPreimageRequest = 194 {
1139		sender: PeerId,
1140	}
1141
1142	PreimageRequestFailed = 195 {
1143		/// ID of the corresponding `SendingPreimageRequest` or `ReceivingPreimageRequest` event.
1144		request_id: EventId,
1145		reason: Reason,
1146	}
1147
1148	PreimageRequestSent = 196 {
1149		/// ID of the corresponding `SendingPreimageRequest`.
1150		request_id: EventId,
1151	}
1152
1153	PreimageRequestReceived = 197 {
1154		/// ID of the corresponding `ReceivingPreimageRequest`.
1155		request_id: EventId,
1156		hash: Hash,
1157	}
1158
1159	PreimageTransferred = 198 {
1160		/// ID of the corresponding `SendingPreimageRequest` or `ReceivingPreimageRequest` event.
1161		request_id: EventId,
1162		length: u32,
1163	}
1164
1165	PreimageDiscarded = 199 {
1166		hash: Hash,
1167		length: u32,
1168		reason: PreimageDiscardReason,
1169	}
1170}
1171
1172impl Event {
1173	/// Returns the ID of the following event.
1174	///
1175	/// `id` should be the ID of this event.
1176	pub fn next_id(&self, id: EventId) -> EventId {
1177		let inc = match self {
1178			Self::Dropped(dropped) => dropped.num,
1179			_ => 1,
1180		};
1181		EventId(id.0 + inc)
1182	}
1183}
1184
1185#[derive(Clone, Debug, Encode, Decode, MaxEncodedLen)]
1186pub struct TimestampedEvent {
1187	pub timestamp: Timestamp,
1188	pub event: Event,
1189}