evm-dex-pool 1.2.2

Reusable EVM DEX pool implementations (UniswapV2, UniswapV3, ERC4626) with traits and math
Documentation
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
use crate::collector::config::PoolFetchConfig;
use crate::collector::event_processor::{fetch_events_with_retry, PendingEvent};
use crate::collector::event_queue::EventQueue;
use crate::collector::metrics::CollectorMetrics;
use crate::collector::pool_fetcher::{fetch_pool, identify_pool_type};
use crate::collector::unified_pool_updater::{UnifiedPoolUpdater, UpdaterMode};
use crate::collector::websocket_listener::WebsocketListener;
use crate::collector::CollectorConfig;
use crate::{PoolInterface, PoolRegistry, PoolType, TokenInfo};
use alloy::eips::{BlockId, BlockNumberOrTag};
use alloy::primitives::Address;
use alloy::providers::Provider;
use alloy::rpc::types::Log;
use anyhow::Result;
use futures_util::future::join_all;
use log::{error, info, warn};
use std::collections::{HashMap, HashSet};
use std::sync::Arc;
use tokio::sync::{mpsc, oneshot};
use tokio::time::Duration;

/// Handle for a running collector. Returned by [`start_collector`](super::start_collector).
///
/// Provides [`stop`](CollectorHandle::stop) to halt the collector and
/// [`add_pools`](CollectorHandle::add_pools) to dynamically register new pool
/// addresses while keeping all pool states consistent.
pub struct CollectorHandle<P: Provider + Send + Sync + Clone + 'static> {
    cancel_tx: Option<oneshot::Sender<()>>,
    /// JoinHandle for the running updater task. Awaited in `stop_updater` to
    /// guarantee `last_processed_block` is fully committed before it is read.
    updater_handle: Option<tokio::task::JoinHandle<()>>,

    // WS-specific (empty in HTTP / PendingBlock mode)
    ws_listeners: Vec<Arc<WebsocketListener>>,
    ws_urls: Vec<String>,

    // Stored for collector restart
    provider: Arc<P>,
    pool_registry: Arc<PoolRegistry>,
    metrics: Option<Arc<dyn CollectorMetrics>>,
    swap_event_tx: Option<mpsc::Sender<PendingEvent>>,
    collector_config: CollectorConfig,
}

impl<P: Provider + Send + Sync + Clone + 'static> CollectorHandle<P> {
    #[allow(clippy::too_many_arguments)]
    pub(crate) fn new(
        cancel_tx: oneshot::Sender<()>,
        updater_handle: tokio::task::JoinHandle<()>,
        ws_listeners: Vec<Arc<WebsocketListener>>,
        ws_urls: Vec<String>,
        provider: Arc<P>,
        pool_registry: Arc<PoolRegistry>,
        metrics: Option<Arc<dyn CollectorMetrics>>,
        swap_event_tx: Option<mpsc::Sender<PendingEvent>>,
        collector_config: CollectorConfig,
    ) -> Self {
        Self {
            cancel_tx: Some(cancel_tx),
            updater_handle: Some(updater_handle),
            ws_listeners,
            ws_urls,
            provider,
            pool_registry,
            metrics,
            swap_event_tx,
            collector_config,
        }
    }

    /// Stop the running collector (updater loop and all WS listeners).
    pub async fn stop(&mut self) {
        // Stop WS listeners first so the EventQueue channel stays open while
        // we wait for the updater task to drain and exit. Stopping the updater
        // first would drop the EventQueue receiver (closing the channel) before
        // the listeners are told to stop, causing "channel closed" error spam.
        for listener in &self.ws_listeners {
            if let Err(e) = listener.stop().await {
                error!("Error stopping WS listener: {}", e);
            }
        }
        self.ws_listeners.clear();
        self.stop_updater().await;
    }

    /// Dynamically add new pool addresses to the running collector.
    ///
    /// `block_number` is the block at which the pools were detected (e.g. the
    /// block where the pool factory emitted a creation event). If the collector's
    /// `last_processed_block` is behind `block_number`, existing registry pools
    /// are caught up to `block_number` first, then the new pools are fetched at
    /// that block.
    ///
    /// Pools already present in the registry are silently skipped.
    pub async fn add_pools<T: TokenInfo>(
        &mut self,
        new_addresses: Vec<Address>,
        block_number: u64,
        fetch_config: &PoolFetchConfig,
        token_info: &T,
    ) -> Result<()> {
        if new_addresses.is_empty() {
            return Ok(());
        }

        // Skip addresses already present in the registry
        let addresses: Vec<Address> = new_addresses
            .into_iter()
            .filter(|a| self.pool_registry.get_pool(a).is_none())
            .collect();

        if addresses.is_empty() {
            info!(
                "[Chain {}] add_pools: all requested pools already in registry",
                fetch_config.chain_id
            );
            return Ok(());
        }

        if self.collector_config.use_websocket {
            self.add_pools_ws(&addresses, block_number, fetch_config, token_info)
                .await
        } else {
            self.add_pools_http(&addresses, block_number, fetch_config, token_info)
                .await
        }
    }

    /// Remove pool addresses from the registry.
    ///
    /// The collector continues running — events for removed pools are
    /// silently skipped by the `EventProcessor`. In RPC modes the
    /// address filter narrows on the next batch; in WebSocket mode the
    /// subscription remains unchanged (events arrive but are ignored).
    ///
    /// Returns the number of pools actually removed (addresses not in
    /// the registry are silently skipped).
    pub fn remove_pools(&self, addresses: &[Address]) -> usize {
        let chain_id = self.pool_registry.get_network_id();
        let mut removed = 0usize;
        for addr in addresses {
            if self.pool_registry.remove_pool(addr).is_some() {
                removed += 1;
            }
        }
        if removed > 0 {
            info!(
                "[Chain {}] remove_pools: removed {} of {} requested pools ({} remaining)",
                chain_id,
                removed,
                addresses.len(),
                self.pool_registry.pool_count(),
            );
        }
        removed
    }

    // -------------------------------------------------------------------------
    // HTTP / PendingBlock mode
    // -------------------------------------------------------------------------

    async fn add_pools_http<T: TokenInfo>(
        &mut self,
        addresses: &[Address],
        block_number: u64,
        fetch_config: &PoolFetchConfig,
        token_info: &T,
    ) -> Result<()> {
        let chain_id = fetch_config.chain_id;

        // 1. Determine fetch block: use the caller's block_number if it is
        //    ahead of last_processed_block (the pool may not exist at the
        //    stale cursor on low-activity networks).
        let last_block = self.pool_registry.get_last_processed_block();
        let fetch_block = block_number.max(last_block);
        info!(
            "[Chain {}] add_pools: fetching {} pools in memory at block {} (last_processed={}, requested={})",
            chain_id,
            addresses.len(),
            fetch_block,
            last_block,
            block_number
        );

        // 2. Fetch pool state into memory — NOT into the registry yet.
        //    The running collector excludes these addresses, so no race on events.
        let mut pools = fetch_pools_in_memory(
            &self.provider,
            addresses,
            BlockId::Number(BlockNumberOrTag::Number(fetch_block)),
            token_info,
            fetch_config,
        )
        .await?;

        info!(
            "[Chain {}] add_pools: fetched {} pools, stopping collector",
            chain_id,
            pools.len()
        );

        // 3. Stop the updater and wait for it to exit. last_processed_block is now stable.
        self.stop_updater().await;
        let stop_block = self.pool_registry.get_last_processed_block();

        // 4. Catch up as needed so all pools share the same block cursor.
        //    - fetch_block > stop_block: existing registry pools need to advance
        //    - stop_block > fetch_block: new in-memory pools need to advance
        if fetch_block > stop_block {
            info!(
                "[Chain {}] add_pools: catching up existing pools blocks {}..={}",
                chain_id,
                stop_block + 1,
                fetch_block
            );
            catchup_registry_to_block(
                &self.provider,
                &self.pool_registry,
                stop_block + 1,
                fetch_block,
                chain_id,
            )
            .await?;
        }

        let final_block = fetch_block.max(stop_block);
        if final_block > fetch_block {
            info!(
                "[Chain {}] add_pools: catching up new pools blocks {}..={}",
                chain_id,
                fetch_block + 1,
                final_block
            );
            apply_catchup_events_in_memory(
                &self.provider,
                &mut pools,
                self.pool_registry.get_topics(),
                fetch_block + 1,
                final_block,
                chain_id,
            )
            .await?;
        }

        // 5. Register pools and any new event topics.
        register_pools_and_topics(&self.pool_registry, pools);
        self.pool_registry.set_last_processed_block(final_block);

        info!(
            "[Chain {}] add_pools: restarting collector from block {}",
            chain_id, final_block
        );

        // 6. Restart the updater from final_block.
        let mode = if self.collector_config.use_pending_blocks {
            UpdaterMode::PendingBlock
        } else {
            UpdaterMode::LatestBlock {
                wait_time_ms: self.collector_config.wait_time,
            }
        };
        self.spawn_updater(mode, final_block);

        info!(
            "[Chain {}] add_pools: done — {} new pools now active",
            chain_id,
            addresses.len()
        );
        Ok(())
    }

    // -------------------------------------------------------------------------
    // WebSocket mode
    // -------------------------------------------------------------------------

    async fn add_pools_ws<T: TokenInfo>(
        &mut self,
        addresses: &[Address],
        block_number: u64,
        fetch_config: &PoolFetchConfig,
        token_info: &T,
    ) -> Result<()> {
        let chain_id = fetch_config.chain_id;

        // 1. Stop old WS listeners first so the EventQueue channel stays open
        //    while we wait for the updater to drain and exit (step 2 below).
        //    Stopping the updater first would drop the EventQueue receiver,
        //    closing the channel and causing "channel closed" error spam from
        //    listeners that haven't been told to stop yet.
        for listener in &self.ws_listeners {
            if let Err(e) = listener.stop().await {
                error!("[Chain {}] Error stopping WS listener: {}", chain_id, e);
            }
        }
        self.ws_listeners.clear();

        // 2. Stop the updater and wait for it to exit. last_processed_block is now stable.
        self.stop_updater().await;
        let stop_block = self.pool_registry.get_last_processed_block();
        info!(
            "[Chain {}] add_pools(ws): updater stopped at block {} (requested block={})",
            chain_id, stop_block, block_number
        );

        // 3. Determine fetch block: use the caller's block_number if it is
        //    ahead of stop_block (the pool may not exist at the stale cursor
        //    on low-activity networks).
        let fetch_block = block_number.max(stop_block);

        // 4. If fetch_block > stop_block, catch up existing registry pools
        //    so all pool state is consistent at fetch_block.
        if fetch_block > stop_block {
            info!(
                "[Chain {}] add_pools(ws): catching up existing pools blocks {}..={}",
                chain_id,
                stop_block + 1,
                fetch_block
            );
            catchup_registry_to_block(
                &self.provider,
                &self.pool_registry,
                stop_block + 1,
                fetch_block,
                chain_id,
            )
            .await?;
        }

        // 5. Create a fresh EventQueue.
        let new_event_queue = EventQueue::new(1000, 1000, chain_id);
        let event_sender = new_event_queue.get_sender();

        // 6. Start new WS listeners for ALL addresses (existing + new) NOW,
        //    so they buffer events during the upcoming (slow) pool fetch phase.
        //    This mirrors the WebsocketBlockSource::bootstrap pattern: listeners
        //    start first so no events are missed between fetch and subscription.
        let all_current_addresses = self.pool_registry.get_all_addresses();
        let all_addresses: Vec<Address> = {
            let mut set: HashSet<Address> = all_current_addresses.iter().copied().collect();
            set.extend(addresses.iter().copied());
            set.into_iter().collect()
        };
        let topics = self.pool_registry.get_topics();
        let mut new_ws_listeners: Vec<Arc<WebsocketListener>> =
            Vec::with_capacity(self.ws_urls.len());
        for url in &self.ws_urls {
            let listener = Arc::new(WebsocketListener::new(
                url.clone(),
                all_addresses.clone(),
                Arc::clone(&event_sender),
                topics.clone(),
                chain_id,
            ));
            if let Err(e) = listener.start().await {
                error!(
                    "[Chain {}] Error starting WS listener for {}: {}",
                    chain_id, url, e
                );
            }
            new_ws_listeners.push(listener);
        }
        self.ws_listeners = new_ws_listeners;

        // 7. Fetch new pool state into memory at fetch_block.
        //    WS listeners are already buffering events in the background.
        info!(
            "[Chain {}] add_pools(ws): fetching {} pools at block {}",
            chain_id,
            addresses.len(),
            fetch_block
        );
        let pools = match fetch_pools_in_memory(
            &self.provider,
            addresses,
            BlockId::Number(BlockNumberOrTag::Number(fetch_block)),
            token_info,
            fetch_config,
        )
        .await
        {
            Ok(pools) => pools,
            Err(e) => {
                // Recovery: restart the updater with existing pools so the
                // collector keeps running even though we failed to add new pools.
                warn!(
                    "[Chain {}] add_pools(ws): fetch failed, restarting collector without new pools: {}",
                    chain_id, e
                );
                self.pool_registry.set_last_processed_block(fetch_block);
                self.spawn_updater(
                    UpdaterMode::Websocket {
                        event_queue: new_event_queue,
                    },
                    fetch_block,
                );
                return Err(e);
            }
        };

        // 8. Register pools and any new event topics.
        //    Pool state is at fetch_block — consistent with existing pools.
        register_pools_and_topics(&self.pool_registry, pools);
        self.pool_registry.set_last_processed_block(fetch_block);

        // 9. Restart the updater in Websocket mode with the new EventQueue.
        //    WebsocketBlockSource::bootstrap() will drain the EventQueue
        //    and RPC-catch up ALL pools from fetch_block to current.
        self.spawn_updater(
            UpdaterMode::Websocket {
                event_queue: new_event_queue,
            },
            fetch_block,
        );

        info!(
            "[Chain {}] add_pools(ws): done — {} new pools registered, updater restarted from block {}",
            chain_id,
            addresses.len(),
            fetch_block
        );
        Ok(())
    }

    // -------------------------------------------------------------------------
    // Helpers
    // -------------------------------------------------------------------------

    /// Send the cancel signal and wait for the updater task to fully exit.
    ///
    /// Awaiting the `JoinHandle` guarantees that the updater's final
    /// `set_last_processed_block` call has completed before we read the
    /// registry's block cursor, preventing a stale-read race condition.
    async fn stop_updater(&mut self) {
        if let Some(tx) = self.cancel_tx.take() {
            let _ = tx.send(());
        }
        if let Some(handle) = self.updater_handle.take() {
            let _ = handle.await;
        }
    }

    /// Create and spawn a new `UnifiedPoolUpdater` task, storing the cancel
    /// sender and the `JoinHandle` for later awaiting in `stop_updater`.
    fn spawn_updater(&mut self, mode: UpdaterMode, start_block: u64) {
        let chain_id = self.pool_registry.get_network_id();
        let (cancel_tx, cancel_rx) = oneshot::channel();
        let mut updater = UnifiedPoolUpdater::new(
            Arc::clone(&self.provider),
            Arc::clone(&self.pool_registry),
            self.metrics.clone(),
            self.swap_event_tx.clone(),
            start_block,
            self.collector_config.max_blocks_per_batch,
            mode,
            cancel_rx,
        );
        let handle = tokio::spawn(async move {
            if let Err(e) = updater.start().await {
                error!("[Chain {}] Collector error after restart: {}", chain_id, e);
            }
        });
        self.updater_handle = Some(handle);
        self.cancel_tx = Some(cancel_tx);
    }
}

// =============================================================================
// Free helper functions
// =============================================================================

/// Fetch pool objects into memory without registering them.
///
/// Mirrors the chunking / rate-limiting / retry behaviour of
/// `fetch_pools_into_registry` but keeps pools in a local `Vec` so callers
/// can apply in-memory catchup before adding them to the registry.
async fn fetch_pools_in_memory<P: Provider + Send + Sync, T: TokenInfo>(
    provider: &Arc<P>,
    addresses: &[Address],
    block_number: BlockId,
    token_info: &T,
    config: &PoolFetchConfig,
) -> Result<Vec<Box<dyn PoolInterface>>> {
    let chain_id = config.chain_id;
    let multicall_address =
        crate::collector::resolve_multicall_address(config.chain_id, config.multicall_address);
    let chunk_size = config.chunk_size.max(1);
    let chunk_count = addresses.len().div_ceil(chunk_size);
    let mut pools: Vec<Box<dyn PoolInterface>> = Vec::with_capacity(addresses.len());

    for (chunk_idx, chunk) in addresses.chunks(chunk_size).enumerate() {
        info!(
            "[Chain {}] fetch_pools_in_memory: chunk {}/{} ({} pools)",
            chain_id,
            chunk_idx + 1,
            chunk_count,
            chunk.len()
        );

        let results: Vec<Result<Box<dyn PoolInterface>>> = if config.parallel_fetch {
            let futures: Vec<_> = chunk
                .iter()
                .map(|&address| {
                    let provider = Arc::clone(provider);
                    async move {
                        let pool_type =
                            identify_pool_type(&provider, address, multicall_address).await?;
                        fetch_pool(
                            &provider,
                            address,
                            block_number,
                            pool_type,
                            token_info,
                            config,
                        )
                        .await
                    }
                })
                .collect();
            join_all(futures).await
        } else {
            let mut seq = Vec::with_capacity(chunk.len());
            for (i, &address) in chunk.iter().enumerate() {
                let pool_type =
                    identify_pool_type(provider, address, multicall_address).await?;
                seq.push(
                    fetch_pool(
                        provider,
                        address,
                        block_number,
                        pool_type,
                        token_info,
                        config,
                    )
                    .await,
                );
                if i + 1 < chunk.len() && config.wait_time_between_chunks > 0 {
                    tokio::time::sleep(Duration::from_millis(
                        config.wait_time_between_chunks,
                    ))
                    .await;
                }
            }
            seq
        };

        // Retry failed pools with exponential backoff
        let mut failed: Vec<(usize, Address)> = Vec::new();
        for (i, result) in results.into_iter().enumerate() {
            match result {
                Ok(pool) => pools.push(pool),
                Err(e) => {
                    error!(
                        "[Chain {}] Failed to fetch pool {}: {}",
                        chain_id, chunk[i], e
                    );
                    failed.push((i, chunk[i]));
                }
            }
        }

        for (_, address) in failed {
            let mut success = false;
            for attempt in 1..=config.max_retries {
                let delay = Duration::from_millis(500 * 2u64.pow(attempt - 1));
                info!(
                    "[Chain {}] Retrying pool {} (attempt {}/{})",
                    chain_id, address, attempt, config.max_retries
                );
                tokio::time::sleep(delay).await;
                match async {
                    let pool_type =
                        identify_pool_type(provider, address, multicall_address).await?;
                    fetch_pool(
                        provider,
                        address,
                        block_number,
                        pool_type,
                        token_info,
                        config,
                    )
                    .await
                }
                .await
                {
                    Ok(pool) => {
                        pools.push(pool);
                        success = true;
                        break;
                    }
                    Err(e) => {
                        error!(
                            "[Chain {}] Retry {}/{} failed for pool {}: {}",
                            chain_id, attempt, config.max_retries, address, e
                        );
                    }
                }
            }
            if !success {
                return Err(anyhow::anyhow!(
                    "[Chain {}] Failed to fetch pool {} after {} retries",
                    chain_id,
                    address,
                    config.max_retries
                ));
            }
        }

        if chunk_idx + 1 < chunk_count && config.wait_time_between_chunks > 0 {
            tokio::time::sleep(Duration::from_millis(config.wait_time_between_chunks)).await;
        }
    }

    Ok(pools)
}

/// Fetch events for the given block range (filtered to the pool addresses) and
/// apply them in log order to the in-memory pool objects.
///
/// Only events whose `log.address()` matches one of the in-memory pools are
/// applied, so existing registry pools are never touched.
async fn apply_catchup_events_in_memory<P: Provider + Send + Sync>(
    provider: &Arc<P>,
    pools: &mut [Box<dyn PoolInterface>],
    topics: Vec<crate::Topic>,
    from_block: u64,
    to_block: u64,
    chain_id: u64,
) -> Result<()> {
    let addresses: Vec<Address> = pools.iter().map(|p| p.address()).collect();
    let addr_set: HashSet<Address> = addresses.iter().copied().collect();

    let events: Vec<Log> = fetch_events_with_retry(
        provider,
        addresses,
        topics,
        BlockNumberOrTag::Number(from_block),
        BlockNumberOrTag::Number(to_block),
        chain_id,
    )
    .await?;

    info!(
        "[Chain {}] apply_catchup_events_in_memory: {} events over {} pools",
        chain_id,
        events.len(),
        pools.len()
    );

    // Group events by address in log order so each pool gets a sequential slice.
    let mut events_by_addr: HashMap<Address, Vec<&Log>> = HashMap::new();
    for event in &events {
        let addr = event.address();
        if addr_set.contains(&addr) {
            events_by_addr.entry(addr).or_default().push(event);
        }
    }

    for pool in pools.iter_mut() {
        if let Some(pool_events) = events_by_addr.get(&pool.address()) {
            for event in pool_events {
                if let Err(e) = pool.apply_log(event) {
                    warn!(
                        "[Chain {}] Catchup event error for pool {}: {}",
                        chain_id,
                        pool.address(),
                        e
                    );
                }
            }
        }
    }

    Ok(())
}

/// Catch up existing registry pools from `from_block` to `to_block` by
/// fetching and applying on-chain events. Used when the caller's
/// `block_number` is ahead of `last_processed_block`.
async fn catchup_registry_to_block<P: Provider + Send + Sync>(
    provider: &Arc<P>,
    pool_registry: &Arc<PoolRegistry>,
    from_block: u64,
    to_block: u64,
    chain_id: u64,
) -> Result<()> {
    let addresses = pool_registry.get_all_addresses();
    let topics = pool_registry.get_topics();

    let events: Vec<Log> = fetch_events_with_retry(
        provider,
        addresses,
        topics,
        BlockNumberOrTag::Number(from_block),
        BlockNumberOrTag::Number(to_block),
        chain_id,
    )
    .await?;

    info!(
        "[Chain {}] catchup_registry_to_block: applying {} events over blocks {}..={}",
        chain_id,
        events.len(),
        from_block,
        to_block
    );

    for event in &events {
        if let Some(pool) = pool_registry.get_pool(&event.address()) {
            if let Err(e) = pool.write().await.apply_log(event) {
                warn!(
                    "[Chain {}] catchup_registry_to_block error for pool {}: {}",
                    chain_id,
                    event.address(),
                    e
                );
            }
        }
    }

    Ok(())
}

/// Insert pool objects into the registry and register any previously unseen
/// pool-type event topics.
fn register_pools_and_topics(registry: &Arc<PoolRegistry>, pools: Vec<Box<dyn PoolInterface>>) {
    let mut new_pool_types: HashSet<PoolType> = HashSet::new();
    for pool in pools {
        new_pool_types.insert(pool.pool_type());
        registry.add_pool(pool);
    }
    for pool_type in new_pool_types {
        registry.add_topics(pool_type.topics());
        registry.add_profitable_topics(pool_type.profitable_topics());
    }
}