commonware_consensus/simplex/config.rs
1use super::{
2 elector::Config as Elector,
3 types::{Activity, Context},
4};
5use crate::{
6 types::{Epoch, ViewDelta},
7 CertifiableAutomaton, Relay, Reporter,
8};
9use commonware_cryptography::{certificate::Scheme, Digest};
10use commonware_p2p::Blocker;
11use commonware_parallel::Strategy;
12use commonware_runtime::buffer::PoolRef;
13use std::{num::NonZeroUsize, time::Duration};
14
15/// Configuration for the consensus engine.
16pub struct Config<
17 S: Scheme,
18 L: Elector<S>,
19 B: Blocker<PublicKey = S::PublicKey>,
20 D: Digest,
21 A: CertifiableAutomaton<Context = Context<D, S::PublicKey>>,
22 R: Relay,
23 F: Reporter<Activity = Activity<S, D>>,
24 T: Strategy,
25> {
26 /// Signing scheme for the consensus engine.
27 ///
28 /// Consensus messages can be signed with a cryptosystem that differs from the static
29 /// participant identity keys exposed in `participants`. For example, we can authenticate peers
30 /// on the network with [commonware_cryptography::ed25519] keys while signing votes with shares distributed
31 /// via [commonware_cryptography::bls12381::dkg] (which change each epoch). The scheme implementation is
32 /// responsible for reusing the exact participant ordering carried by `participants` so that signer indices
33 /// remain stable across both key spaces; if the order diverges, validators will reject votes as coming from
34 /// the wrong validator.
35 pub scheme: S,
36
37 /// Leader election configuration.
38 ///
39 /// Determines how leaders are selected for each view. Built-in options include
40 /// [`RoundRobin`](super::elector::RoundRobin) for deterministic rotation and
41 /// [`Random`](super::elector::Random) for unpredictable selection using BLS
42 /// threshold signatures.
43 pub elector: L,
44
45 /// Blocker for the network.
46 ///
47 /// Blocking is handled by [commonware_p2p].
48 pub blocker: B,
49
50 /// Automaton for the consensus engine.
51 pub automaton: A,
52
53 /// Relay for the consensus engine.
54 pub relay: R,
55
56 /// Reporter for the consensus engine.
57 ///
58 /// All activity is exported for downstream applications that benefit from total observability,
59 /// consider wrapping with [`crate::simplex::scheme::reporter::AttributableReporter`] to
60 /// automatically filter and verify activities based on scheme attributability.
61 pub reporter: F,
62
63 /// Strategy for parallel operations.
64 pub strategy: T,
65
66 /// Partition for the consensus engine.
67 pub partition: String,
68
69 /// Maximum number of messages to buffer on channels inside the consensus
70 /// engine before blocking.
71 pub mailbox_size: usize,
72
73 /// Epoch for the consensus engine. Each running engine should have a unique epoch.
74 pub epoch: Epoch,
75
76 /// Number of bytes to buffer when replaying during startup.
77 pub replay_buffer: NonZeroUsize,
78
79 /// The size of the write buffer to use for each blob in the journal.
80 pub write_buffer: NonZeroUsize,
81
82 /// Buffer pool for the journal.
83 pub buffer_pool: PoolRef,
84
85 /// Amount of time to wait for a leader to propose a payload
86 /// in a view.
87 pub leader_timeout: Duration,
88
89 /// Amount of time to wait for a quorum of notarizations in a view
90 /// before attempting to skip the view.
91 pub notarization_timeout: Duration,
92
93 /// Amount of time to wait before retrying a nullify broadcast if
94 /// stuck in a view.
95 pub nullify_retry: Duration,
96
97 /// Number of views behind finalized tip to track
98 /// and persist activity derived from validator messages.
99 pub activity_timeout: ViewDelta,
100
101 /// Move to nullify immediately if the selected leader has been inactive
102 /// for this many recent known views (we ignore views we don't have data for).
103 ///
104 /// This number should be less than or equal to `activity_timeout` (how
105 /// many views we are tracking below the finalized tip).
106 pub skip_timeout: ViewDelta,
107
108 /// Timeout to wait for a peer to respond to a request.
109 pub fetch_timeout: Duration,
110
111 /// Number of concurrent requests to make at once.
112 pub fetch_concurrent: usize,
113}
114
115impl<
116 S: Scheme,
117 L: Elector<S>,
118 B: Blocker<PublicKey = S::PublicKey>,
119 D: Digest,
120 A: CertifiableAutomaton<Context = Context<D, S::PublicKey>>,
121 R: Relay,
122 F: Reporter<Activity = Activity<S, D>>,
123 T: Strategy,
124 > Config<S, L, B, D, A, R, F, T>
125{
126 /// Assert enforces that all configuration values are valid.
127 pub fn assert(&self) {
128 assert!(
129 !self.scheme.participants().is_empty(),
130 "there must be at least one participant"
131 );
132 assert!(
133 self.leader_timeout > Duration::default(),
134 "leader timeout must be greater than zero"
135 );
136 assert!(
137 self.notarization_timeout > Duration::default(),
138 "notarization timeout must be greater than zero"
139 );
140 assert!(
141 self.leader_timeout <= self.notarization_timeout,
142 "leader timeout must be less than or equal to notarization timeout"
143 );
144 assert!(
145 self.nullify_retry > Duration::default(),
146 "nullify retry broadcast must be greater than zero"
147 );
148 assert!(
149 !self.activity_timeout.is_zero(),
150 "activity timeout must be greater than zero"
151 );
152 assert!(
153 !self.skip_timeout.is_zero(),
154 "skip timeout must be greater than zero"
155 );
156 assert!(
157 self.skip_timeout <= self.activity_timeout,
158 "skip timeout must be less than or equal to activity timeout"
159 );
160 assert!(
161 self.fetch_timeout > Duration::default(),
162 "fetch timeout must be greater than zero"
163 );
164 assert!(
165 self.fetch_concurrent > 0,
166 "it must be possible to fetch from at least one peer at a time"
167 );
168 }
169}