commonware_consensus/simplex/
config.rs

1use super::types::{Activity, Context};
2use crate::{
3    simplex::signing_scheme::Scheme,
4    types::{Epoch, View},
5    Automaton, Relay, Reporter,
6};
7use commonware_cryptography::{Digest, PublicKey};
8use commonware_p2p::Blocker;
9use commonware_runtime::buffer::PoolRef;
10use governor::Quota;
11use std::{num::NonZeroUsize, time::Duration};
12
13/// Configuration for the consensus engine.
14pub struct Config<
15    P: PublicKey,
16    S: Scheme,
17    B: Blocker<PublicKey = P>,
18    D: Digest,
19    A: Automaton<Context = Context<D, P>>,
20    R: Relay,
21    F: Reporter<Activity = Activity<S, D>>,
22> {
23    /// Signing scheme for the consensus engine.
24    ///
25    /// Consensus messages can be signed with a cryptosystem that differs from the static
26    /// participant identity keys exposed in `participants`. For example, we can authenticate peers
27    /// on the network with [commonware_cryptography::ed25519] keys while signing votes with shares distributed
28    /// via [commonware_cryptography::bls12381::dkg] (which change each epoch). The scheme implementation is
29    /// responsible for reusing the exact participant ordering carried by `participants` so that signer indices
30    /// remain stable across both key spaces; if the order diverges, validators will reject votes as coming from
31    /// the wrong validator.
32    pub scheme: S,
33
34    /// Blocker for the network.
35    ///
36    /// Blocking is handled by [commonware_p2p].
37    pub blocker: B,
38
39    /// Automaton for the consensus engine.
40    pub automaton: A,
41
42    /// Relay for the consensus engine.
43    pub relay: R,
44
45    /// Reporter for the consensus engine.
46    ///
47    /// All activity is exported for downstream applications that benefit from total observability,
48    /// consider wrapping with [`crate::simplex::signing_scheme::reporter::AttributableReporter`] to
49    /// automatically filter and verify activities based on scheme attributability.
50    pub reporter: F,
51
52    /// Partition for the consensus engine.
53    pub partition: String,
54
55    /// Maximum number of messages to buffer on channels inside the consensus
56    /// engine before blocking.
57    pub mailbox_size: usize,
58
59    /// Epoch for the consensus engine. Each running engine should have a unique epoch.
60    pub epoch: Epoch,
61
62    /// Prefix for all signed messages to prevent replay attacks.
63    pub namespace: Vec<u8>,
64
65    /// Number of bytes to buffer when replaying during startup.
66    pub replay_buffer: NonZeroUsize,
67
68    /// The size of the write buffer to use for each blob in the journal.
69    pub write_buffer: NonZeroUsize,
70
71    /// Buffer pool for the journal.
72    pub buffer_pool: PoolRef,
73
74    /// Amount of time to wait for a leader to propose a payload
75    /// in a view.
76    pub leader_timeout: Duration,
77
78    /// Amount of time to wait for a quorum of notarizations in a view
79    /// before attempting to skip the view.
80    pub notarization_timeout: Duration,
81
82    /// Amount of time to wait before retrying a nullify broadcast if
83    /// stuck in a view.
84    pub nullify_retry: Duration,
85
86    /// Number of views behind finalized tip to track
87    /// and persist activity derived from validator messages.
88    pub activity_timeout: View,
89
90    /// Move to nullify immediately if the selected leader has been inactive
91    /// for this many views.
92    ///
93    /// This number should be less than or equal to `activity_timeout` (how
94    /// many views we are tracking).
95    pub skip_timeout: View,
96
97    /// Timeout to wait for a peer to respond to a request.
98    pub fetch_timeout: Duration,
99
100    /// Maximum number of notarizations/nullifications to request/respond with at once.
101    pub max_fetch_count: usize,
102
103    /// Maximum rate of requests to send to a given peer.
104    ///
105    /// Inbound rate limiting is handled by [commonware_p2p].
106    pub fetch_rate_per_peer: Quota,
107
108    /// Number of concurrent requests to make at once.
109    pub fetch_concurrent: usize,
110}
111
112impl<
113        P: PublicKey,
114        S: Scheme,
115        B: Blocker<PublicKey = P>,
116        D: Digest,
117        A: Automaton<Context = Context<D, P>>,
118        R: Relay,
119        F: Reporter<Activity = Activity<S, D>>,
120    > Config<P, S, B, D, A, R, F>
121{
122    /// Assert enforces that all configuration values are valid.
123    pub fn assert(&self) {
124        assert!(
125            !self.scheme.participants().is_empty(),
126            "there must be at least one participant"
127        );
128        assert!(
129            self.leader_timeout > Duration::default(),
130            "leader timeout must be greater than zero"
131        );
132        assert!(
133            self.notarization_timeout > Duration::default(),
134            "notarization timeout must be greater than zero"
135        );
136        assert!(
137            self.leader_timeout <= self.notarization_timeout,
138            "leader timeout must be less than or equal to notarization timeout"
139        );
140        assert!(
141            self.nullify_retry > Duration::default(),
142            "nullify retry broadcast must be greater than zero"
143        );
144        assert!(
145            self.activity_timeout > 0,
146            "activity timeout must be greater than zero"
147        );
148        assert!(
149            self.skip_timeout > 0,
150            "skip timeout must be greater than zero"
151        );
152        assert!(
153            self.skip_timeout <= self.activity_timeout,
154            "skip timeout must be less than or equal to activity timeout"
155        );
156        assert!(
157            self.fetch_timeout > Duration::default(),
158            "fetch timeout must be greater than zero"
159        );
160        assert!(
161            self.max_fetch_count > 0,
162            "it must be possible to fetch at least one container per request"
163        );
164        assert!(
165            self.fetch_concurrent > 0,
166            "it must be possible to fetch from at least one peer at a time"
167        );
168    }
169}