saorsa-core 0.24.2

Saorsa - Core P2P networking library with DHT, QUIC transport, and post-quantum cryptography
Documentation
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
// Copyright 2024 Saorsa Labs Limited
//
// This software is dual-licensed under:
// - GNU Affero General Public License v3.0 or later (AGPL-3.0-or-later)
// - Commercial License
//
// For AGPL-3.0 license, see LICENSE-AGPL-3.0
// For commercial licensing, contact: david@saorsalabs.com
//
// Unless required by applicable law or agreed to in writing, software
// distributed under these licenses is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.

//! Relay acquisition driver.
//!
//! Owns every state transition for this node's MASQUE relay: the initial
//! acquisition at startup, the backoff retry when no candidate accepts,
//! the republish-then-reacquire sequence when an existing relay is lost,
//! and the K-closest-eviction watcher that forces a rebind when the
//! chosen relayer drops out of the close group.
//!
//! ## State machine
//!
//! The driver runs as a single tokio task and cycles through three states:
//!
//! 1. **Acquiring**: call [`run_relay_acquisition`]. On success, publish
//!    the full typed self-record (relay-allocated address tagged
//!    [`AddressType::Relay`] first, then one best non-relay address per
//!    IP family) to K-closest peers, store the relayer peer ID, and enter
//!    the **Holding** state. Relay serving stays permanently enabled. On
//!    failure, publish the direct-only address set so the node remains as
//!    reachable as possible, arm the exponential backoff timer, and enter
//!    the **Backoff** state.
//! 2. **Holding**: subscribe to `KClosestPeersChanged` events, republish
//!    when a pinned external address is promoted to
//!    [`AddressType::Direct`], and poll
//!    [`TransportHandle::is_relay_healthy`] every
//!    [`HEALTH_POLL_INTERVAL`]. On relayer-evicted or unhealthy-tunnel,
//!    transition to **Lost**; on shutdown, exit the driver.
//! 3. **Lost**: run the `republish-direct-only → reacquire` sequence.
//!    The republish MUST happen **before** the acquisition walk starts,
//!    so the network stops dialing the dead relay address during the
//!    1–10 s acquisition window. After republishing, loop back to
//!    **Acquiring**.
//! 4. **Backoff**: wait for the current backoff window or a
//!    `KClosestPeersChanged` event (whichever comes first), republishing
//!    if a pinned external is promoted to [`AddressType::Direct`] while
//!    waiting, then loop back to **Acquiring**. Successful acquisition
//!    resets the backoff.
//!
//! Clients ([`NodeMode::Client`](crate::network::NodeMode::Client)) do
//! not spawn the driver at all — they are outbound-only and do not need
//! a relay.

use std::net::SocketAddr;
use std::sync::Arc;
use std::time::Duration;

use tokio::sync::RwLock;
use tokio::sync::broadcast::error::RecvError;
use tokio_util::sync::CancellationToken;
use tracing::{debug, info, trace, warn};

use crate::dht::AddressType;
use crate::dht_network_manager::{DhtNetworkEvent, DhtNetworkManager};
use crate::reachability::session::{RelayAcquisitionOutcome, run_relay_acquisition};
use crate::self_address::build_typed_self_address_set;
use crate::transport_handle::TransportHandle;
use crate::{MultiAddr, PeerId};

/// How often to poll the transport for tunnel health while holding a relay.
///
/// 5 seconds fits inside the 10–30 s failover-window budget and keeps the
/// wake rate low (the poll is non-blocking and only reads an atomic
/// counter inside saorsa-transport).
const HEALTH_POLL_INTERVAL: Duration = Duration::from_secs(5);

/// Initial delay before the first retry after a failed acquisition walk.
const BACKOFF_INITIAL: Duration = Duration::from_secs(30);

/// Upper bound on the backoff delay. Retries beyond this cap continue to
/// fire every [`BACKOFF_MAX`] until the routing table expands or the
/// retry succeeds.
const BACKOFF_MAX: Duration = Duration::from_secs(300);

/// Multiplicative factor applied after each failed retry.
const BACKOFF_FACTOR: u32 = 2;

/// Spawn the relay acquisition driver as a background task.
///
/// The task runs until `shutdown` is cancelled. On spawn, it performs the
/// initial acquisition attempt and then enters the state machine described
/// in the module docs.
///
/// `relayer_peer_id` and `relay_address` are shared with the owning
/// [`P2PNode`](crate::network::P2PNode); the driver writes to them to
/// reflect the current relay state.
pub(crate) fn spawn_acquisition_driver(
    dht: Arc<DhtNetworkManager>,
    transport: Arc<TransportHandle>,
    relayer_peer_id: Arc<RwLock<Option<PeerId>>>,
    relay_address: Arc<RwLock<Option<SocketAddr>>>,
    shutdown: CancellationToken,
) {
    tokio::spawn(async move {
        let mut driver = AcquisitionDriver {
            dht,
            transport,
            relayer_peer_id,
            relay_address,
            shutdown,
            current_backoff: BACKOFF_INITIAL,
            last_published_typed_set: None,
        };
        driver.run().await;
    });
}

/// The driver's owned state, factored out of `spawn_acquisition_driver`
/// so the state-transition methods can share it without threading
/// individual arguments through each step.
struct AcquisitionDriver {
    dht: Arc<DhtNetworkManager>,
    transport: Arc<TransportHandle>,
    relayer_peer_id: Arc<RwLock<Option<PeerId>>>,
    relay_address: Arc<RwLock<Option<SocketAddr>>>,
    shutdown: CancellationToken,
    current_backoff: Duration,
    last_published_typed_set: Option<PublishedTypedSet>,
}

#[derive(Clone, Debug, PartialEq)]
struct PublishedTypedSet {
    typed_addresses: Vec<(MultiAddr, AddressType)>,
    peers: Vec<PeerId>,
}

impl AcquisitionDriver {
    async fn run(&mut self) {
        info!("relay acquisition driver starting");
        loop {
            if self.shutdown.is_cancelled() {
                debug!("relay acquisition driver: shutdown, exiting");
                return;
            }

            let outcome = run_relay_acquisition(self.dht.as_ref(), &self.transport).await;
            match outcome {
                RelayAcquisitionOutcome::Acquired(relay) => {
                    self.current_backoff = BACKOFF_INITIAL;
                    *self.relayer_peer_id.write().await = Some(relay.relayer);
                    *self.relay_address.write().await = Some(relay.allocated_public_addr);
                    self.transport
                        .set_relay_address(relay.allocated_public_addr);
                    self.force_publish_typed_set(Some(relay.allocated_public_addr))
                        .await;
                    info!(
                        relayer = ?relay.relayer,
                        allocated = %relay.allocated_public_addr,
                        "driver: relay acquired and published"
                    );
                    // Hold the relay until an eviction or tunnel-death
                    // event forces us back into the acquisition loop.
                    if self.hold_until_lost().await {
                        // shutdown
                        return;
                    }
                    // Fall through: hold_until_lost() returned false, the
                    // relay is considered lost, we need to republish
                    // direct-only BEFORE re-trying acquisition.
                    self.lose_relay_and_republish().await;
                }
                RelayAcquisitionOutcome::Failed(reason) => {
                    warn!(reason, "driver: acquisition failed, entering backoff");
                    *self.relayer_peer_id.write().await = None;
                    *self.relay_address.write().await = None;
                    self.transport.clear_relay_address();
                    self.publish_typed_set(None).await;
                    if self.wait_backoff_or_event().await {
                        return; // shutdown
                    }
                    self.advance_backoff();
                }
            }
        }
    }

    /// Publish this node's current typed address set to K-closest peers.
    ///
    /// Each address's [`AddressType`] is computed independently from the
    /// passive per-address reachability proof (see
    /// [`TransportHandle::is_external_proven`]): an address is tagged
    /// [`AddressType::Direct`] only after at least
    /// `MIN_DISTINCT_OBSERVERS_FOR_DIRECT` source-disjoint inbounds have
    /// been attributed to it; otherwise it is tagged
    /// [`AddressType::Unverified`] (so dialers know they may time out).
    ///
    /// When `relay` is `Some`, the relay-allocated socket is emitted
    /// first, tagged [`AddressType::Relay`].
    ///
    /// Per-address (not global) tagging matters for two cases the previous
    /// global-flag approach got wrong:
    ///
    /// 1. A v4 inbound proves nothing about a v6 external; the classifier
    ///    only credits same-family externals, and the tag is computed
    ///    per address from that per-address proof.
    /// 2. On a multi-NAT host, one external being proven Direct does not
    ///    promote unrelated externals.
    ///
    /// Quietly drops the publish when there are no dialable addresses to
    /// advertise — a fully wildcard-bound node cannot meaningfully tell
    /// peers how to reach it.
    async fn publish_typed_set(&mut self, relay: Option<SocketAddr>) {
        self.publish_typed_set_with_policy(relay, false).await;
    }

    async fn force_publish_typed_set(&mut self, relay: Option<SocketAddr>) {
        self.publish_typed_set_with_policy(relay, true).await;
    }

    async fn publish_typed_set_with_policy(&mut self, relay: Option<SocketAddr>, force: bool) {
        let listen = self.transport.listen_addrs().await;
        let observed = self.transport.non_relay_external_addresses();

        debug!(
            relay = ?relay,
            observed = ?observed,
            listen = ?listen,
            "driver: preparing typed self address set"
        );

        let typed = build_typed_self_address_set(observed, listen, relay, |sa| {
            self.transport.is_external_proven(sa)
        });

        if typed.is_empty() {
            debug!("driver: publish skipped, no dialable self addresses");
            return;
        }

        let own_key = *self.dht.peer_id().to_bytes();
        let all_peers = self
            .dht
            .find_closest_nodes_local(&own_key, self.dht.k_value())
            .await;
        let peers = all_peers.iter().map(|node| node.peer_id).collect();
        let publish_snapshot = PublishedTypedSet {
            typed_addresses: typed.clone(),
            peers,
        };
        if !force && self.last_published_typed_set.as_ref() == Some(&publish_snapshot) {
            debug!(
                peers = all_peers.len(),
                typed_addresses = ?typed,
                relay = ?relay,
                "driver: publish skipped, typed self address set unchanged"
            );
            return;
        }

        debug!(
            peers = all_peers.len(),
            typed_addresses = ?typed,
            relay = ?relay,
            "driver: publishing typed self address set"
        );
        trace!(
            peers = all_peers.len(),
            addrs = typed.len(),
            relay = ?relay,
            "driver: publishing typed address set to all routing table peers"
        );
        self.dht
            .publish_address_set_to_peers(typed, &all_peers)
            .await;
        self.last_published_typed_set = Some(publish_snapshot);
    }

    /// Hold the acquired relay until an eviction or death event forces a
    /// rebind. Returns `true` on shutdown (caller should exit), `false`
    /// when the relay is considered lost and a republish+reacquire is
    /// needed.
    async fn hold_until_lost(&mut self) -> bool {
        let mut events = self.dht.subscribe_events();
        let mut health = tokio::time::interval(HEALTH_POLL_INTERVAL);
        health.tick().await; // drop the immediate first tick

        loop {
            tokio::select! {
                biased;
                _ = self.shutdown.cancelled() => {
                    return true;
                }
                // Event-driven relay-death signal: the transport layer
                // emits `RelayLost` the moment its health monitor (or the
                // MASQUE tunnel reader task, via the graceful-close
                // watcher) observes the tunnel is gone.  Acting on it
                // immediately closes the staleness window that the 5 s
                // `health.tick()` path would otherwise leave open — the
                // window during which peers continue to dial the dead
                // relay address returned by DHT lookups.
                lost = self.transport.recv_relay_lost() => {
                    match lost {
                        Some(addr) => {
                            info!(
                                relay = %addr,
                                "driver: RelayLost event received, rebinding"
                            );
                            return false;
                        }
                        None => {
                            // Channel closed — transport is shutting
                            // down. Treat as shutdown.
                            return true;
                        }
                    }
                }
                promoted = self.transport.recv_direct_address_promoted() => {
                    match promoted {
                        Some(addr) => {
                            let relay = *self.relay_address.read().await;
                            info!(
                                address = %addr,
                                relay = ?relay,
                                "driver: direct address promoted, republishing typed self address set"
                            );
                            self.publish_typed_set(relay).await;
                        }
                        None => {
                            // Channel closed — transport is shutting down.
                            return true;
                        }
                    }
                }
                updated = self.transport.recv_self_address_updated() => {
                    match updated {
                        Some(addr) => {
                            let relay = *self.relay_address.read().await;
                            debug!(
                                address = %addr,
                                relay = ?relay,
                                "driver: self address updated, refreshing typed self address set"
                            );
                            self.publish_typed_set(relay).await;
                        }
                        None => {
                            // Channel closed — transport is shutting down.
                            return true;
                        }
                    }
                }
                event = events.recv() => {
                    match event {
                        Ok(DhtNetworkEvent::KClosestPeersChanged { ref new, .. }) => {
                            if self.relayer_evicted_from_k_closest(new).await {
                                info!("driver: relayer evicted from K-closest, rebinding");
                                return false;
                            }
                        }
                        Ok(_) => continue,
                        // `RecvError::Lagged` is recoverable — the broadcast
                        // channel dropped events we did not consume fast
                        // enough, but we are still subscribed. `Closed` is
                        // terminal (the DHT manager is dropping); treat it
                        // the same as shutdown.
                        Err(RecvError::Closed) => return true,
                        Err(_) => continue,
                    }
                }
                _ = health.tick() => {
                    if !self.transport.is_relay_healthy() {
                        info!("driver: relay tunnel unhealthy, rebinding");
                        return false;
                    }
                }
            }
        }
    }

    /// Returns `true` if the currently-chosen relayer is no longer in the
    /// new K-closest set.
    async fn relayer_evicted_from_k_closest(&self, new_k_closest: &[PeerId]) -> bool {
        let guard = self.relayer_peer_id.read().await;
        let Some(relayer) = guard.as_ref() else {
            return false;
        };
        !new_k_closest.contains(relayer)
    }

    /// Transition out of the Holding state: republish direct-only and
    /// clear relayer state, BEFORE the acquisition walk retries. The
    /// pre-retry publish is critical — without it, other peers would
    /// continue dialing the dead relay address during the 1–10 s
    /// acquisition walk.
    async fn lose_relay_and_republish(&mut self) {
        *self.relayer_peer_id.write().await = None;
        *self.relay_address.write().await = None;
        self.transport.clear_relay_address();
        self.force_publish_typed_set(None).await;
    }

    /// Wait out the current backoff window, or short-circuit on a
    /// `KClosestPeersChanged` event (new peers may offer fresh candidates).
    /// Returns `true` on shutdown.
    async fn wait_backoff_or_event(&mut self) -> bool {
        let mut events = self.dht.subscribe_events();
        let sleep = tokio::time::sleep(self.current_backoff);
        tokio::pin!(sleep);

        loop {
            tokio::select! {
                biased;
                _ = self.shutdown.cancelled() => return true,
                _ = &mut sleep => {
                    trace!(window = ?self.current_backoff, "driver: backoff window expired");
                    return false;
                }
                promoted = self.transport.recv_direct_address_promoted() => {
                    match promoted {
                        Some(addr) => {
                            info!(
                                address = %addr,
                                "driver: direct address promoted during relay backoff, republishing typed self address set"
                            );
                            self.publish_typed_set(None).await;
                        }
                        None => {
                            // Channel closed — transport is shutting down.
                            return true;
                        }
                    }
                }
                updated = self.transport.recv_self_address_updated() => {
                    match updated {
                        Some(addr) => {
                            debug!(
                                address = %addr,
                                "driver: self address updated during relay backoff, refreshing typed self address set"
                            );
                            self.publish_typed_set(None).await;
                        }
                        None => {
                            // Channel closed — transport is shutting down.
                            return true;
                        }
                    }
                }
                event = events.recv() => {
                    match event {
                        Ok(DhtNetworkEvent::KClosestPeersChanged { .. }) => {
                            debug!("driver: K-closest changed, retrying early");
                            return false;
                        }
                        Ok(_) => continue,
                        Err(RecvError::Closed) => return true,
                        Err(_) => continue,
                    }
                }
            }
        }
    }

    /// Move the backoff window one step closer to [`BACKOFF_MAX`].
    fn advance_backoff(&mut self) {
        let next = self.current_backoff.saturating_mul(BACKOFF_FACTOR);
        self.current_backoff = next.min(BACKOFF_MAX);
    }
}