commonware_consensus/simplex/
config.rs

1use super::{
2    elector::Config as Elector,
3    types::{Activity, Context},
4};
5use crate::{
6    types::{Epoch, ViewDelta},
7    CertifiableAutomaton, Relay, Reporter,
8};
9use commonware_cryptography::{certificate::Scheme, Digest};
10use commonware_p2p::Blocker;
11use commonware_runtime::buffer::PoolRef;
12use std::{num::NonZeroUsize, time::Duration};
13
14/// Configuration for the consensus engine.
15pub struct Config<
16    S: Scheme,
17    L: Elector<S>,
18    B: Blocker<PublicKey = S::PublicKey>,
19    D: Digest,
20    A: CertifiableAutomaton<Context = Context<D, S::PublicKey>>,
21    R: Relay,
22    F: Reporter<Activity = Activity<S, D>>,
23> {
24    /// Signing scheme for the consensus engine.
25    ///
26    /// Consensus messages can be signed with a cryptosystem that differs from the static
27    /// participant identity keys exposed in `participants`. For example, we can authenticate peers
28    /// on the network with [commonware_cryptography::ed25519] keys while signing votes with shares distributed
29    /// via [commonware_cryptography::bls12381::dkg] (which change each epoch). The scheme implementation is
30    /// responsible for reusing the exact participant ordering carried by `participants` so that signer indices
31    /// remain stable across both key spaces; if the order diverges, validators will reject votes as coming from
32    /// the wrong validator.
33    pub scheme: S,
34
35    /// Leader election configuration.
36    ///
37    /// Determines how leaders are selected for each view. Built-in options include
38    /// [`RoundRobin`](super::elector::RoundRobin) for deterministic rotation and
39    /// [`Random`](super::elector::Random) for unpredictable selection using BLS
40    /// threshold signatures.
41    pub elector: L,
42
43    /// Blocker for the network.
44    ///
45    /// Blocking is handled by [commonware_p2p].
46    pub blocker: B,
47
48    /// Automaton for the consensus engine.
49    pub automaton: A,
50
51    /// Relay for the consensus engine.
52    pub relay: R,
53
54    /// Reporter for the consensus engine.
55    ///
56    /// All activity is exported for downstream applications that benefit from total observability,
57    /// consider wrapping with [`crate::simplex::scheme::reporter::AttributableReporter`] to
58    /// automatically filter and verify activities based on scheme attributability.
59    pub reporter: F,
60
61    /// Partition for the consensus engine.
62    pub partition: String,
63
64    /// Maximum number of messages to buffer on channels inside the consensus
65    /// engine before blocking.
66    pub mailbox_size: usize,
67
68    /// Epoch for the consensus engine. Each running engine should have a unique epoch.
69    pub epoch: Epoch,
70
71    /// Prefix for all signed messages to prevent replay attacks.
72    pub namespace: Vec<u8>,
73
74    /// Number of bytes to buffer when replaying during startup.
75    pub replay_buffer: NonZeroUsize,
76
77    /// The size of the write buffer to use for each blob in the journal.
78    pub write_buffer: NonZeroUsize,
79
80    /// Buffer pool for the journal.
81    pub buffer_pool: PoolRef,
82
83    /// Amount of time to wait for a leader to propose a payload
84    /// in a view.
85    pub leader_timeout: Duration,
86
87    /// Amount of time to wait for a quorum of notarizations in a view
88    /// before attempting to skip the view.
89    pub notarization_timeout: Duration,
90
91    /// Amount of time to wait before retrying a nullify broadcast if
92    /// stuck in a view.
93    pub nullify_retry: Duration,
94
95    /// Number of views behind finalized tip to track
96    /// and persist activity derived from validator messages.
97    pub activity_timeout: ViewDelta,
98
99    /// Move to nullify immediately if the selected leader has been inactive
100    /// for this many recent known views (we ignore views we don't have data for).
101    ///
102    /// This number should be less than or equal to `activity_timeout` (how
103    /// many views we are tracking below the finalized tip).
104    pub skip_timeout: ViewDelta,
105
106    /// Timeout to wait for a peer to respond to a request.
107    pub fetch_timeout: Duration,
108
109    /// Number of concurrent requests to make at once.
110    pub fetch_concurrent: usize,
111}
112
113impl<
114        S: Scheme,
115        L: Elector<S>,
116        B: Blocker<PublicKey = S::PublicKey>,
117        D: Digest,
118        A: CertifiableAutomaton<Context = Context<D, S::PublicKey>>,
119        R: Relay,
120        F: Reporter<Activity = Activity<S, D>>,
121    > Config<S, L, B, D, A, R, F>
122{
123    /// Assert enforces that all configuration values are valid.
124    pub fn assert(&self) {
125        assert!(
126            !self.scheme.participants().is_empty(),
127            "there must be at least one participant"
128        );
129        assert!(
130            self.leader_timeout > Duration::default(),
131            "leader timeout must be greater than zero"
132        );
133        assert!(
134            self.notarization_timeout > Duration::default(),
135            "notarization timeout must be greater than zero"
136        );
137        assert!(
138            self.leader_timeout <= self.notarization_timeout,
139            "leader timeout must be less than or equal to notarization timeout"
140        );
141        assert!(
142            self.nullify_retry > Duration::default(),
143            "nullify retry broadcast must be greater than zero"
144        );
145        assert!(
146            !self.activity_timeout.is_zero(),
147            "activity timeout must be greater than zero"
148        );
149        assert!(
150            !self.skip_timeout.is_zero(),
151            "skip timeout must be greater than zero"
152        );
153        assert!(
154            self.skip_timeout <= self.activity_timeout,
155            "skip timeout must be less than or equal to activity timeout"
156        );
157        assert!(
158            self.fetch_timeout > Duration::default(),
159            "fetch timeout must be greater than zero"
160        );
161        assert!(
162            self.fetch_concurrent > 0,
163            "it must be possible to fetch from at least one peer at a time"
164        );
165    }
166}