kona_node_service/actors/sequencer/
origin_selector.rs

1//! The [`L1OriginSelector`].
2
3use alloy_primitives::B256;
4use alloy_provider::{Provider, RootProvider};
5use alloy_transport::{RpcError, TransportErrorKind};
6use async_trait::async_trait;
7use kona_genesis::RollupConfig;
8use kona_protocol::{BlockInfo, L2BlockInfo};
9use std::sync::Arc;
10use tokio::sync::watch;
11
12/// The [`L1OriginSelector`] is responsible for selecting the L1 origin block based on the
13/// current L2 unsafe head's sequence epoch.
14#[derive(Debug)]
15pub struct L1OriginSelector<P: L1OriginSelectorProvider> {
16    /// The [`RollupConfig`].
17    cfg: Arc<RollupConfig>,
18    /// The [`L1OriginSelectorProvider`].
19    l1: P,
20    /// The current L1 origin.
21    current: Option<BlockInfo>,
22    /// The next L1 origin.
23    next: Option<BlockInfo>,
24}
25
26impl<P: L1OriginSelectorProvider> L1OriginSelector<P> {
27    /// Creates a new [`L1OriginSelector`].
28    pub const fn new(cfg: Arc<RollupConfig>, l1: P) -> Self {
29        Self { cfg, l1, current: None, next: None }
30    }
31
32    /// Returns the current L1 origin.
33    pub const fn current(&self) -> Option<&BlockInfo> {
34        self.current.as_ref()
35    }
36
37    /// Returns the next L1 origin.
38    pub const fn next(&self) -> Option<&BlockInfo> {
39        self.next.as_ref()
40    }
41
42    /// Determines what the next L1 origin block should be, based off of the [`L2BlockInfo`] unsafe
43    /// head.
44    ///
45    /// The L1 origin is selected based off of the sequencing epoch, determined by the next L2
46    /// block's timestamp in relation to the current L1 origin's timestamp. If the next L2
47    /// block's timestamp is greater than the L2 unsafe head's L1 origin timestamp, the L1
48    /// origin is the block following the current L1 origin.
49    pub async fn next_l1_origin(
50        &mut self,
51        unsafe_head: L2BlockInfo,
52        is_recovery_mode: bool,
53    ) -> Result<BlockInfo, L1OriginSelectorError> {
54        self.select_origins(&unsafe_head, is_recovery_mode).await?;
55
56        // Start building on the next L1 origin block if the next L2 block's timestamp is
57        // greater than or equal to the next L1 origin's timestamp.
58        if let Some(next) = self.next {
59            if unsafe_head.block_info.timestamp + self.cfg.block_time >= next.timestamp {
60                return Ok(next);
61            }
62        }
63
64        let Some(current) = self.current else {
65            unreachable!("Current L1 origin should always be set by `select_origins`");
66        };
67
68        let max_seq_drift = self.cfg.max_sequencer_drift(current.timestamp);
69        let past_seq_drift = unsafe_head.block_info.timestamp + self.cfg.block_time -
70            current.timestamp >
71            max_seq_drift;
72
73        // If the sequencer drift has not been exceeded, return the current L1 origin.
74        if !past_seq_drift {
75            return Ok(current);
76        }
77
78        warn!(
79            target: "l1_origin_selector",
80            current_origin_time = current.timestamp,
81            unsafe_head_time = unsafe_head.block_info.timestamp,
82            max_seq_drift,
83            "Next L2 block time is past the sequencer drift"
84        );
85
86        if self
87            .next
88            .map(|n| unsafe_head.block_info.timestamp + self.cfg.block_time < n.timestamp)
89            .unwrap_or(false)
90        {
91            // If the next L1 origin is ahead of the next L2 block's timestamp, return the current
92            // origin.
93            return Ok(current);
94        }
95
96        self.next.ok_or(L1OriginSelectorError::NotEnoughData(current))
97    }
98
99    /// Selects the current and next L1 origin blocks based on the unsafe head.
100    async fn select_origins(
101        &mut self,
102        unsafe_head: &L2BlockInfo,
103        in_recovery_mode: bool,
104    ) -> Result<(), L1OriginSelectorError> {
105        if in_recovery_mode {
106            self.current = self.l1.get_block_by_hash(unsafe_head.l1_origin.hash).await?;
107            self.next = self.l1.get_block_by_number(unsafe_head.l1_origin.number + 1).await?;
108            return Ok(());
109        }
110
111        if self.current.map(|c| c.hash == unsafe_head.l1_origin.hash).unwrap_or(false) {
112            // Do nothing; The next L2 block exists in the same epoch as the current L1 origin.
113        } else if self.next.map(|n| n.hash == unsafe_head.l1_origin.hash).unwrap_or(false) {
114            // Advance the origin.
115            self.current = self.next.take();
116            self.next = None;
117        } else {
118            // Find the current origin block, as it is missing.
119            let current = self.l1.get_block_by_hash(unsafe_head.l1_origin.hash).await?;
120
121            self.current = current;
122            self.next = None;
123        }
124
125        self.try_fetch_next_origin().await
126    }
127
128    /// Attempts to fetch the next L1 origin block.
129    async fn try_fetch_next_origin(&mut self) -> Result<(), L1OriginSelectorError> {
130        // If there is no next L1 origin set, attempt to find it. If it's not yet available, leave
131        // it unset.
132        if let Some(current) = self.current.as_ref() {
133            // If the next L1 origin is already set, do nothing.
134            if self.next.is_some() {
135                return Ok(());
136            }
137
138            // If the next L1 origin is a logical extension of the current L1 chain, set it.
139            //
140            // Ignore the eventuality that the block is not found, as the next L1 origin fetch is
141            // performed on a best-effort basis.
142            let next = self.l1.get_block_by_number(current.number + 1).await?;
143            if next.map(|n| n.parent_hash == current.hash).unwrap_or(false) {
144                self.next = next;
145            }
146        }
147
148        Ok(())
149    }
150}
151
152/// An error produced by the [`L1OriginSelector`].
153#[derive(Debug, thiserror::Error)]
154pub enum L1OriginSelectorError {
155    /// An error produced by the [`RootProvider`].
156    #[error(transparent)]
157    Provider(#[from] RpcError<TransportErrorKind>),
158    /// The L1 provider does not have enough data to select the next L1 origin block.
159    #[error(
160        "Waiting for more L1 data to be available to select the next L1 origin block. Current L1 origin: {0:?}"
161    )]
162    NotEnoughData(BlockInfo),
163}
164
165/// L1 [`BlockInfo`] provider interface for the [`L1OriginSelector`].
166#[async_trait]
167pub trait L1OriginSelectorProvider {
168    /// Returns a [`BlockInfo`] by its hash.
169    async fn get_block_by_hash(
170        &self,
171        hash: B256,
172    ) -> Result<Option<BlockInfo>, L1OriginSelectorError>;
173
174    /// Returns a [`BlockInfo`] by its number.
175    async fn get_block_by_number(
176        &self,
177        number: u64,
178    ) -> Result<Option<BlockInfo>, L1OriginSelectorError>;
179}
180
181/// A wrapper around the [`RootProvider`] that delays the view of the L1 chain by a configurable
182/// amount of blocks.
183#[derive(Debug)]
184pub struct DelayedL1OriginSelectorProvider {
185    /// The inner [`RootProvider`].
186    inner: RootProvider,
187    /// The L1 head watch channel.
188    l1_head: watch::Receiver<Option<BlockInfo>>,
189    /// The confirmation depth to delay the view of the L1 chain.
190    confirmation_depth: u64,
191}
192
193impl DelayedL1OriginSelectorProvider {
194    /// Creates a new [`DelayedL1OriginSelectorProvider`].
195    pub const fn new(
196        inner: RootProvider,
197        l1_head: watch::Receiver<Option<BlockInfo>>,
198        confirmation_depth: u64,
199    ) -> Self {
200        Self { inner, l1_head, confirmation_depth }
201    }
202}
203
204#[async_trait]
205impl L1OriginSelectorProvider for DelayedL1OriginSelectorProvider {
206    async fn get_block_by_hash(
207        &self,
208        hash: B256,
209    ) -> Result<Option<BlockInfo>, L1OriginSelectorError> {
210        // By-hash lookups are not delayed, as they're direct indexes.
211        Ok(Provider::get_block_by_hash(&self.inner, hash).await?.map(Into::into))
212    }
213
214    async fn get_block_by_number(
215        &self,
216        number: u64,
217    ) -> Result<Option<BlockInfo>, L1OriginSelectorError> {
218        let Some(l1_head) = *self.l1_head.borrow() else {
219            // If the L1 head is not available, do not enforce a confirmation delay.
220            return Ok(Provider::get_block_by_number(&self.inner, number.into())
221                .await?
222                .map(Into::into));
223        };
224
225        if number == 0 ||
226            self.confirmation_depth == 0 ||
227            number + self.confirmation_depth <= l1_head.number
228        {
229            Ok(Provider::get_block_by_number(&self.inner, number.into()).await?.map(Into::into))
230        } else {
231            Ok(None)
232        }
233    }
234}
235
236#[cfg(test)]
237mod test {
238    use super::*;
239    use alloy_eips::NumHash;
240    use rstest::rstest;
241    use std::collections::HashSet;
242
243    /// A mock [`OriginSelectorProvider`] with a local set of [`BlockInfo`]s available.
244    #[derive(Default, Debug, Clone)]
245    struct MockOriginSelectorProvider {
246        blocks: HashSet<BlockInfo>,
247    }
248
249    impl MockOriginSelectorProvider {
250        /// Creates a new [`MockOriginSelectorProvider`].
251        pub(crate) fn with_block(&mut self, block: BlockInfo) {
252            self.blocks.insert(block);
253        }
254    }
255
256    #[async_trait]
257    impl L1OriginSelectorProvider for MockOriginSelectorProvider {
258        async fn get_block_by_hash(
259            &self,
260            hash: B256,
261        ) -> Result<Option<BlockInfo>, L1OriginSelectorError> {
262            Ok(self.blocks.iter().find(|b| b.hash == hash).copied())
263        }
264
265        async fn get_block_by_number(
266            &self,
267            number: u64,
268        ) -> Result<Option<BlockInfo>, L1OriginSelectorError> {
269            Ok(self.blocks.iter().find(|b| b.number == number).copied())
270        }
271    }
272
273    #[tokio::test]
274    #[rstest]
275    #[case::single_epoch(1)]
276    #[case::many_epochs(12)]
277    async fn test_next_l1_origin_several_epochs(#[case] num_epochs: usize) {
278        // Assume an L1 slot time of 12 seconds.
279        const L1_SLOT_TIME: u64 = 12;
280        // Assume an L2 block time of 2 seconds.
281        const L2_BLOCK_TIME: u64 = 2;
282
283        // Initialize the rollup configuration with a block time of 2 seconds and a sequencer drift
284        // of 600 seconds.
285        let cfg = Arc::new(RollupConfig {
286            block_time: L2_BLOCK_TIME,
287            max_sequencer_drift: 600,
288            ..Default::default()
289        });
290
291        // Initialize the provider with mock L1 blocks, equal to the number of epochs + 1
292        // (such that the next logical origin is always available.)
293        let mut provider = MockOriginSelectorProvider::default();
294        for i in 0..num_epochs + 1 {
295            provider.with_block(BlockInfo {
296                parent_hash: B256::with_last_byte(i.saturating_sub(1) as u8),
297                hash: B256::with_last_byte(i as u8),
298                number: i as u64,
299                timestamp: i as u64 * L1_SLOT_TIME,
300            });
301        }
302
303        let mut selector = L1OriginSelector::new(cfg.clone(), provider);
304
305        // Ensure all L1 origin blocks are produced correctly for each L2 block within all available
306        // epochs.
307        for i in 0..(num_epochs as u64 * (L1_SLOT_TIME / cfg.block_time)) {
308            let current_epoch = (i * cfg.block_time) / L1_SLOT_TIME;
309            let unsafe_head = L2BlockInfo {
310                block_info: BlockInfo {
311                    hash: B256::ZERO,
312                    number: i,
313                    timestamp: i * cfg.block_time,
314                    ..Default::default()
315                },
316                l1_origin: NumHash {
317                    number: current_epoch,
318                    hash: B256::with_last_byte(current_epoch as u8),
319                },
320                seq_num: 0,
321            };
322            let next = selector.next_l1_origin(unsafe_head, false).await.unwrap();
323
324            // The expected L1 origin block is the one corresponding to the epoch of the current L2
325            // block.
326            let expected_epoch = ((i + 1) * cfg.block_time) / L1_SLOT_TIME;
327            assert_eq!(next.hash, B256::with_last_byte(expected_epoch as u8));
328            assert_eq!(next.number, expected_epoch);
329        }
330    }
331
332    #[tokio::test]
333    #[rstest]
334    #[case::not_available(false)]
335    #[case::is_available(true)]
336    async fn test_next_l1_origin_next_maybe_available(#[case] next_l1_origin_available: bool) {
337        // Assume an L2 block time of 2 seconds.
338        const L2_BLOCK_TIME: u64 = 2;
339
340        // Initialize the rollup configuration with a block time of 2 seconds and a sequencer drift
341        // of 600 seconds.
342        let cfg = Arc::new(RollupConfig {
343            block_time: L2_BLOCK_TIME,
344            max_sequencer_drift: 600,
345            ..Default::default()
346        });
347
348        // Initialize the provider with a single L1 block.
349        let mut provider = MockOriginSelectorProvider::default();
350        provider.with_block(BlockInfo {
351            parent_hash: B256::ZERO,
352            hash: B256::ZERO,
353            number: 0,
354            timestamp: 0,
355        });
356
357        if next_l1_origin_available {
358            // If the next L1 origin is available, add it to the provider.
359            provider.with_block(BlockInfo {
360                parent_hash: B256::ZERO,
361                hash: B256::with_last_byte(1),
362                number: 1,
363                timestamp: cfg.block_time,
364            });
365        }
366
367        let mut selector = L1OriginSelector::new(cfg.clone(), provider);
368
369        let current_epoch = 0;
370        let unsafe_head = L2BlockInfo {
371            block_info: BlockInfo {
372                hash: B256::ZERO,
373                number: 5,
374                timestamp: 5 * cfg.block_time,
375                ..Default::default()
376            },
377            l1_origin: NumHash {
378                number: current_epoch,
379                hash: B256::with_last_byte(current_epoch as u8),
380            },
381            seq_num: 0,
382        };
383        let next = selector.next_l1_origin(unsafe_head, false).await.unwrap();
384
385        // The expected L1 origin block is the one corresponding to the epoch of the current L2
386        // block. Assuming the next L1 origin block is not available from the eyes of the
387        // provider (_and_ it is not past the sequencer drift), the current L1 origin block
388        // will be re-used.
389        let expected_epoch =
390            if next_l1_origin_available { current_epoch + 1 } else { current_epoch };
391        assert_eq!(next.hash, B256::with_last_byte(expected_epoch as u8));
392        assert_eq!(next.number, expected_epoch);
393    }
394
395    #[tokio::test]
396    #[rstest]
397    #[case::next_not_available(false, false)]
398    #[case::next_available_but_behind(true, false)]
399    #[case::next_available_and_ahead(true, true)]
400    async fn test_next_l1_origin_next_past_seq_drift(
401        #[case] next_available: bool,
402        #[case] next_ahead_of_unsafe: bool,
403    ) {
404        // Assume an L2 block time of 2 seconds.
405        const L2_BLOCK_TIME: u64 = 2;
406
407        // Initialize the rollup configuration with a block time of 2 seconds and a sequencer drift
408        // of 600 seconds.
409        let cfg = Arc::new(RollupConfig {
410            block_time: L2_BLOCK_TIME,
411            max_sequencer_drift: 600,
412            ..Default::default()
413        });
414
415        // Initialize the provider with a single L1 block.
416        let mut provider = MockOriginSelectorProvider::default();
417        provider.with_block(BlockInfo {
418            parent_hash: B256::ZERO,
419            hash: B256::ZERO,
420            number: 0,
421            timestamp: 0,
422        });
423
424        if next_available {
425            // If the next L1 origin is to be available, add it to the provider.
426            provider.with_block(BlockInfo {
427                parent_hash: B256::ZERO,
428                hash: B256::with_last_byte(1),
429                number: 1,
430                timestamp: if next_ahead_of_unsafe {
431                    cfg.max_sequencer_drift + cfg.block_time * 2
432                } else {
433                    cfg.block_time
434                },
435            });
436        }
437
438        let mut selector = L1OriginSelector::new(cfg.clone(), provider);
439
440        let current_epoch = 0;
441        let unsafe_head = L2BlockInfo {
442            block_info: BlockInfo { timestamp: cfg.max_sequencer_drift, ..Default::default() },
443            l1_origin: NumHash {
444                number: current_epoch,
445                hash: B256::with_last_byte(current_epoch as u8),
446            },
447            seq_num: 0,
448        };
449
450        if next_available {
451            if next_ahead_of_unsafe {
452                // If the next L1 origin is available and ahead of the unsafe head, the L1 origin
453                // should not change.
454                let next = selector.next_l1_origin(unsafe_head, false).await.unwrap();
455                assert_eq!(next.hash, B256::ZERO);
456                assert_eq!(next.number, 0);
457            } else {
458                // If the next L1 origin is available and behind the unsafe head, the L1 origin
459                // should advance.
460                let next = selector.next_l1_origin(unsafe_head, false).await.unwrap();
461                assert_eq!(next.hash, B256::with_last_byte(1));
462                assert_eq!(next.number, 1);
463            }
464        } else {
465            // If we're past the sequencer drift, and the next L1 block is not available, a
466            // `NotEnoughData` error should be returned signifying that we cannot
467            // proceed with the next L1 origin until the block is present.
468            let next_err = selector.next_l1_origin(unsafe_head, false).await.unwrap_err();
469            assert!(matches!(next_err, L1OriginSelectorError::NotEnoughData(_)));
470        }
471    }
472}