fips-core 0.3.6

Reusable FIPS mesh, endpoint, transport, and protocol library
Documentation
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
//! Library-first endpoint API for embedding FIPS in applications.
//!
//! This module exposes a no-system-TUN runtime shape for apps that want to own
//! peer admission and local routing policy while reusing FIPS connectivity.

use crate::config::{NostrDiscoveryPolicy, TransportInstances, UdpConfig};
use crate::node::{NodeEndpointCommand, NodeEndpointEvent, NodeEndpointPeer};
use crate::{
    Config, FipsAddress, IdentityConfig, Node, NodeAddr, NodeDeliveredPacket, NodeError,
    PeerIdentity,
};
use std::sync::Arc;
use thiserror::Error;
use tokio::sync::{Mutex, mpsc, oneshot};
use tokio::task::JoinHandle;

/// Errors returned by the endpoint API.
#[derive(Debug, Error)]
pub enum FipsEndpointError {
    #[error("node error: {0}")]
    Node(#[from] NodeError),

    #[error("endpoint task failed: {0}")]
    TaskJoin(#[from] tokio::task::JoinError),

    #[error("endpoint is closed")]
    Closed,

    #[error("invalid remote npub '{npub}': {reason}")]
    InvalidRemoteNpub { npub: String, reason: String },
}

/// Source-attributed endpoint data delivered to an embedded application.
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct FipsEndpointMessage {
    /// FIPS node address that originated the endpoint data.
    pub source_node_addr: NodeAddr,
    /// Source Nostr public key when the node has learned it.
    pub source_npub: Option<String>,
    /// Application-owned payload bytes.
    pub data: Vec<u8>,
}

/// Authenticated FIPS peer state visible to an embedded application.
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct FipsEndpointPeer {
    /// Peer Nostr public key.
    pub npub: String,
    /// Current underlay transport address, when a link has authenticated.
    pub transport_addr: Option<String>,
    /// Current underlay transport kind, when known.
    pub transport_type: Option<String>,
    /// Authenticated link id.
    pub link_id: u64,
    /// Smoothed RTT in milliseconds, once measured by FIPS MMP.
    pub srtt_ms: Option<u64>,
    /// Link packets sent.
    pub packets_sent: u64,
    /// Link packets received.
    pub packets_recv: u64,
    /// Link bytes sent.
    pub bytes_sent: u64,
    /// Link bytes received.
    pub bytes_recv: u64,
}

/// Builder for an embedded FIPS endpoint.
#[derive(Debug, Clone)]
pub struct FipsEndpointBuilder {
    config: Config,
    identity_nsec: Option<String>,
    discovery_scope: Option<String>,
    disable_system_networking: bool,
    packet_channel_capacity: usize,
}

impl Default for FipsEndpointBuilder {
    fn default() -> Self {
        Self {
            config: Config::new(),
            identity_nsec: None,
            discovery_scope: None,
            disable_system_networking: true,
            packet_channel_capacity: 1024,
        }
    }
}

impl FipsEndpointBuilder {
    /// Start from an explicit FIPS config.
    pub fn config(mut self, config: Config) -> Self {
        self.config = config;
        self
    }

    /// Use an `nsec` or hex secret for the endpoint identity.
    pub fn identity_nsec(mut self, nsec: impl Into<String>) -> Self {
        self.identity_nsec = Some(nsec.into());
        self
    }

    /// Set an application-level discovery scope.
    ///
    /// When the builder owns the default empty connectivity config, this also
    /// enables scoped Nostr discovery, open same-scope peer discovery, local
    /// LAN candidates, and a UDP NAT advert. If an explicit transport or
    /// Nostr config was supplied, the explicit config is left in control and
    /// the scope is retained as endpoint metadata.
    pub fn discovery_scope(mut self, scope: impl Into<String>) -> Self {
        self.discovery_scope = Some(scope.into());
        self
    }

    /// Disable FIPS-owned TUN and DNS system integration.
    pub fn without_system_tun(mut self) -> Self {
        self.disable_system_networking = true;
        self
    }

    /// Set the app packet/data channel capacity.
    pub fn packet_channel_capacity(mut self, capacity: usize) -> Self {
        self.packet_channel_capacity = capacity.max(1);
        self
    }

    fn prepared_config(&self) -> Config {
        let mut config = self.config.clone();
        if let Some(nsec) = &self.identity_nsec {
            config.node.identity = IdentityConfig {
                nsec: Some(nsec.clone()),
                persistent: false,
            };
        }
        if self.disable_system_networking {
            config.tun.enabled = false;
            config.dns.enabled = false;
            config.node.system_files_enabled = false;
        }
        if let Some(scope) = self.discovery_scope.as_deref() {
            apply_default_scoped_discovery(&mut config, scope);
        }
        config
    }

    /// Bind and start the embedded endpoint.
    pub async fn bind(self) -> Result<FipsEndpoint, FipsEndpointError> {
        let config = self.prepared_config();

        let mut node = Node::new(config)?;
        let npub = node.npub();
        let node_addr = *node.node_addr();
        let address = *node.identity().address();
        let packet_io = node.attach_external_packet_io(self.packet_channel_capacity)?;
        let endpoint_data_io = node.attach_endpoint_data_io(self.packet_channel_capacity)?;
        node.start().await?;

        let (shutdown_tx, shutdown_rx) = oneshot::channel();
        let task = spawn_node_task(node, shutdown_rx);
        let endpoint_commands = endpoint_data_io.command_tx;

        Ok(FipsEndpoint {
            npub,
            node_addr,
            address,
            discovery_scope: self.discovery_scope,
            outbound_packets: packet_io.outbound_tx,
            delivered_packets: Arc::new(Mutex::new(packet_io.inbound_rx)),
            endpoint_commands,
            inbound_endpoint_tx: endpoint_data_io.event_tx,
            inbound_endpoint_rx: Arc::new(Mutex::new(endpoint_data_io.event_rx)),
            peer_identity_cache: std::sync::Mutex::new(std::collections::HashMap::new()),
            shutdown_tx: Some(shutdown_tx),
            task,
        })
    }
}

fn apply_default_scoped_discovery(config: &mut Config, scope: &str) {
    if config.node.discovery.nostr.enabled || !config.transports.is_empty() {
        return;
    }

    config.node.discovery.nostr.enabled = true;
    config.node.discovery.nostr.advertise = true;
    config.node.discovery.nostr.policy = NostrDiscoveryPolicy::Open;
    config.node.discovery.nostr.share_local_candidates = true;
    config.node.discovery.nostr.app = format!("fips-overlay-v1:{scope}");
    config.transports.udp = TransportInstances::Single(UdpConfig {
        bind_addr: Some("0.0.0.0:0".to_string()),
        advertise_on_nostr: Some(true),
        public: Some(false),
        outbound_only: Some(false),
        accept_connections: Some(true),
        ..UdpConfig::default()
    });
}

fn spawn_node_task(
    mut node: Node,
    shutdown_rx: oneshot::Receiver<()>,
) -> JoinHandle<Result<(), NodeError>> {
    tokio::spawn(async move {
        tokio::pin!(shutdown_rx);
        let loop_result = tokio::select! {
            result = node.run_rx_loop() => result,
            _ = &mut shutdown_rx => Ok(()),
        };
        let stop_result = if node.state().can_stop() {
            node.stop().await
        } else {
            Ok(())
        };
        loop_result?;
        stop_result
    })
}

/// A running embedded FIPS endpoint.
pub struct FipsEndpoint {
    npub: String,
    node_addr: NodeAddr,
    address: FipsAddress,
    discovery_scope: Option<String>,
    outbound_packets: mpsc::Sender<Vec<u8>>,
    delivered_packets: Arc<Mutex<mpsc::Receiver<NodeDeliveredPacket>>>,
    endpoint_commands: mpsc::Sender<NodeEndpointCommand>,
    /// In-process loopback sender — `send()` to our own npub injects an
    /// event into the same queue without going through the wire/encrypt
    /// path. The node's rx_loop also sends into this channel directly
    /// (it holds a clone of this sender) so there is no per-packet relay
    /// task between the node task and `recv()`.
    inbound_endpoint_tx: mpsc::UnboundedSender<NodeEndpointEvent>,
    /// Unbounded receiver. Was previously fed by a per-packet relay task
    /// that translated `NodeEndpointEvent::Data` into `FipsEndpointMessage`
    /// across an additional bounded mpsc; collapsed into a single channel
    /// — the translation happens inline in `recv()` and the second hop
    /// (with its scheduler wake per packet) is gone.
    inbound_endpoint_rx: Arc<Mutex<mpsc::UnboundedReceiver<NodeEndpointEvent>>>,
    /// Cache of resolved PeerIdentity by npub string. Avoids the per-packet
    /// secp256k1 EC point parse that `PeerIdentity::from_npub` performs;
    /// without this cache the bulk-data send hot path spends ~10–30% of CPU
    /// re-validating identity bytes the application has already configured.
    peer_identity_cache: std::sync::Mutex<std::collections::HashMap<String, PeerIdentity>>,
    shutdown_tx: Option<oneshot::Sender<()>>,
    task: JoinHandle<Result<(), NodeError>>,
}

impl FipsEndpoint {
    /// Create a builder for an embedded endpoint.
    pub fn builder() -> FipsEndpointBuilder {
        FipsEndpointBuilder::default()
    }

    /// Local endpoint npub.
    pub fn npub(&self) -> &str {
        &self.npub
    }

    /// Local FIPS node address.
    pub fn node_addr(&self) -> &NodeAddr {
        &self.node_addr
    }

    /// Local FIPS IPv6-compatible address.
    pub fn address(&self) -> FipsAddress {
        self.address
    }

    /// Application-level discovery scope, if configured.
    pub fn discovery_scope(&self) -> Option<&str> {
        self.discovery_scope.as_deref()
    }

    /// Send application-owned endpoint data to a remote npub.
    ///
    /// Fire-and-forget: enqueues the Send command on the node task and
    /// returns once the command channel accepts it. The node task's send
    /// result is discarded — TCP and the upper protocol handle loss
    /// recovery, and the per-packet oneshot round-trip the previous design
    /// used for error reporting added several hundred microseconds of
    /// queueing latency under load (measured: 456ms avg ping under iperf3
    /// saturation → 1ms after this change, 430× lower).
    ///
    /// PeerIdentity for `remote_npub` is cached after first resolution to
    /// avoid the secp256k1 EC point parse on every packet.
    pub async fn send(
        &self,
        remote_npub: impl Into<String>,
        data: impl Into<Vec<u8>>,
    ) -> Result<(), FipsEndpointError> {
        let remote_npub = remote_npub.into();
        let data = data.into();
        if remote_npub == self.npub {
            self.inbound_endpoint_tx
                .send(NodeEndpointEvent::Data {
                    source_node_addr: self.node_addr,
                    source_npub: Some(self.npub.clone()),
                    payload: data,
                    queued_at: crate::perf_profile::stamp(),
                })
                .map_err(|_| FipsEndpointError::Closed)?;
            return Ok(());
        }

        let remote = self.resolve_peer_identity(&remote_npub)?;

        // Fire-and-forget: caller already drops the result, so skip
        // the per-packet `oneshot::channel()` allocation entirely.
        // The node task's `SendOneway` arm runs the same code path as
        // `Send` but without writing the result into a oneshot.
        self.endpoint_commands
            .send(NodeEndpointCommand::SendOneway {
                remote,
                payload: data,
                queued_at: crate::perf_profile::stamp(),
            })
            .await
            .map_err(|_| FipsEndpointError::Closed)?;
        Ok(())
    }

    fn resolve_peer_identity(&self, remote_npub: &str) -> Result<PeerIdentity, FipsEndpointError> {
        // Fast path: cached identity (PeerIdentity is Copy after eager
        // pubkey_full precompute landed in b1e92af, so dereference is free).
        if let Ok(cache) = self.peer_identity_cache.lock()
            && let Some(remote) = cache.get(remote_npub)
        {
            return Ok(*remote);
        }

        let remote = PeerIdentity::from_npub(remote_npub).map_err(|error| {
            FipsEndpointError::InvalidRemoteNpub {
                npub: remote_npub.to_string(),
                reason: error.to_string(),
            }
        })?;

        if let Ok(mut cache) = self.peer_identity_cache.lock() {
            cache.entry(remote_npub.to_string()).or_insert(remote);
        }
        Ok(remote)
    }

    /// Receive the next source-attributed endpoint data message.
    ///
    /// Translation from the internal `NodeEndpointEvent::Data` shape to
    /// the public `FipsEndpointMessage` shape happens inline here — the
    /// rx_loop pushes directly onto this channel, no relay task in
    /// between, no extra cross-task hop per packet.
    pub async fn recv(&self) -> Option<FipsEndpointMessage> {
        let event = self.inbound_endpoint_rx.lock().await.recv().await?;
        let NodeEndpointEvent::Data {
            source_node_addr,
            source_npub,
            payload,
            queued_at,
        } = event;
        crate::perf_profile::record_since(crate::perf_profile::Stage::EndpointEventWait, queued_at);
        Some(FipsEndpointMessage {
            source_node_addr,
            source_npub,
            data: payload,
        })
    }

    /// Synchronous blocking send — parks the calling **OS thread** on
    /// the FIPS endpoint command channel until the runtime accepts
    /// the send. MUST be called only from a thread spawned via
    /// `std::thread::spawn`, not from inside a tokio runtime.
    ///
    /// Companion to [`Self::blocking_recv`] for control-frame replies
    /// (e.g. responding to a Ping with a Pong) issued from the
    /// dedicated TUN-write thread. Failures are returned via
    /// `FipsEndpointError::Closed` if the runtime has stopped.
    pub fn blocking_send(
        &self,
        remote_npub: impl Into<String>,
        data: impl Into<Vec<u8>>,
    ) -> Result<(), FipsEndpointError> {
        let remote_npub = remote_npub.into();
        let data = data.into();
        if remote_npub == self.npub {
            self.inbound_endpoint_tx
                .send(NodeEndpointEvent::Data {
                    source_node_addr: self.node_addr,
                    source_npub: Some(self.npub.clone()),
                    payload: data,
                    queued_at: crate::perf_profile::stamp(),
                })
                .map_err(|_| FipsEndpointError::Closed)?;
            return Ok(());
        }
        let remote = self.resolve_peer_identity(&remote_npub)?;
        let (response_tx, _response_rx) = oneshot::channel();
        self.endpoint_commands
            .blocking_send(NodeEndpointCommand::Send {
                remote,
                payload: data,
                queued_at: crate::perf_profile::stamp(),
                response_tx,
            })
            .map_err(|_| FipsEndpointError::Closed)?;
        Ok(())
    }

    /// Synchronous blocking receive — parks the calling **OS thread**
    /// on the channel until an event arrives or the channel closes.
    ///
    /// MUST NOT be called from inside a tokio runtime; use this only
    /// from a thread spawned via `std::thread::spawn` so the tokio
    /// scheduler doesn't deadlock.
    ///
    /// The motivation is the bench's CLI receive task: when run as a
    /// regular tokio task each `recv().await` is a full task-wake on
    /// the runtime (~1–3 µs scheduler bookkeeping), and at 113 kpps
    /// that's ~10–30% of one core spent in plumbing the wake-up
    /// rather than writing the packet to TUN. A dedicated OS thread
    /// blocked on the channel via `blocking_recv` parks on a futex
    /// directly — the wake is a single futex_wake() with no scheduler
    /// involvement, an order of magnitude cheaper.
    pub fn blocking_recv(&self) -> Option<FipsEndpointMessage> {
        let mut rx = self.inbound_endpoint_rx.blocking_lock();
        let event = rx.blocking_recv()?;
        let NodeEndpointEvent::Data {
            source_node_addr,
            source_npub,
            payload,
            queued_at,
        } = event;
        crate::perf_profile::record_since(crate::perf_profile::Stage::EndpointEventWait, queued_at);
        Some(FipsEndpointMessage {
            source_node_addr,
            source_npub,
            data: payload,
        })
    }

    /// Non-blocking receive — returns the next ready endpoint message
    /// if one is queued, otherwise `None`. Pair with `recv()` to drain
    /// follow-on packets without paying a scheduler wake per packet:
    ///
    /// ```ignore
    /// // wake on the first packet, then drain everything ready
    /// while let Some(msg) = endpoint.recv().await { process(msg); }
    /// while let Some(msg) = endpoint.try_recv() { process(msg); }
    /// ```
    ///
    /// On the bench's FIPS-tunnel receive path the kernel UDP socket
    /// delivers packets in `recvmmsg`-sized bursts, so after a `.recv()`
    /// await there are typically 5–30 packets queued waiting. Draining
    /// them inline with `try_recv` saves N-1 scheduler hops per burst
    /// at line rate, freeing the consumer task to spend its time on
    /// the TUN write syscall instead of cross-task plumbing.
    ///
    /// Returns `None` if the channel is empty, closed, or briefly
    /// contested by another consumer.
    pub fn try_recv(&self) -> Option<FipsEndpointMessage> {
        let mut rx = self.inbound_endpoint_rx.try_lock().ok()?;
        let event = rx.try_recv().ok()?;
        let NodeEndpointEvent::Data {
            source_node_addr,
            source_npub,
            payload,
            queued_at,
        } = event;
        crate::perf_profile::record_since(crate::perf_profile::Stage::EndpointEventWait, queued_at);
        Some(FipsEndpointMessage {
            source_node_addr,
            source_npub,
            data: payload,
        })
    }

    /// Snapshot authenticated peers known by the endpoint.
    pub async fn peers(&self) -> Result<Vec<FipsEndpointPeer>, FipsEndpointError> {
        let (response_tx, response_rx) = oneshot::channel();
        self.endpoint_commands
            .send(NodeEndpointCommand::PeerSnapshot { response_tx })
            .await
            .map_err(|_| FipsEndpointError::Closed)?;

        response_rx
            .await
            .map(|peers| peers.into_iter().map(FipsEndpointPeer::from).collect())
            .map_err(|_| FipsEndpointError::Closed)
    }

    /// Send an outbound IPv6 packet into the FIPS session pipeline.
    pub async fn send_ip_packet(
        &self,
        packet: impl Into<Vec<u8>>,
    ) -> Result<(), FipsEndpointError> {
        self.outbound_packets
            .send(packet.into())
            .await
            .map_err(|_| FipsEndpointError::Closed)
    }

    /// Receive the next source-attributed IPv6 packet delivered by FIPS.
    pub async fn recv_ip_packet(&self) -> Option<NodeDeliveredPacket> {
        self.delivered_packets.lock().await.recv().await
    }

    /// Shut down the endpoint and wait for the node task to stop.
    pub async fn shutdown(mut self) -> Result<(), FipsEndpointError> {
        if let Some(shutdown_tx) = self.shutdown_tx.take() {
            let _ = shutdown_tx.send(());
        }
        self.task.await??;
        Ok(())
    }
}

impl From<NodeEndpointPeer> for FipsEndpointPeer {
    fn from(peer: NodeEndpointPeer) -> Self {
        Self {
            npub: peer.npub,
            transport_addr: peer.transport_addr,
            transport_type: peer.transport_type,
            link_id: peer.link_id,
            srtt_ms: peer.srtt_ms,
            packets_sent: peer.packets_sent,
            packets_recv: peer.packets_recv,
            bytes_sent: peer.bytes_sent,
            bytes_recv: peer.bytes_recv,
        }
    }
}

#[cfg(test)]
mod tests {
    use super::*;
    use std::time::Duration;

    #[tokio::test]
    async fn endpoint_starts_without_system_tun() {
        let endpoint = FipsEndpoint::builder()
            .without_system_tun()
            .bind()
            .await
            .expect("endpoint should bind");

        assert!(!endpoint.npub().is_empty());
        assert!(endpoint.discovery_scope().is_none());
        endpoint.shutdown().await.expect("shutdown should succeed");
    }

    #[tokio::test]
    async fn loopback_endpoint_data_roundtrips() {
        let endpoint = FipsEndpoint::builder()
            .without_system_tun()
            .bind()
            .await
            .expect("endpoint should bind");

        endpoint
            .send(endpoint.npub().to_string(), b"ping".to_vec())
            .await
            .expect("loopback send should succeed");
        let message = tokio::time::timeout(Duration::from_secs(1), endpoint.recv())
            .await
            .expect("recv should not time out")
            .expect("message should arrive");
        assert_eq!(message.source_node_addr, *endpoint.node_addr());
        assert_eq!(message.source_npub, Some(endpoint.npub().to_string()));
        assert_eq!(message.data, b"ping");
        assert!(endpoint.discovery_scope().is_none());

        endpoint.shutdown().await.expect("shutdown should succeed");
    }

    #[test]
    fn discovery_scope_enables_default_scoped_udp_discovery() {
        let config = FipsEndpoint::builder()
            .discovery_scope("nostr-vpn:test")
            .prepared_config();

        assert!(!config.tun.enabled);
        assert!(!config.dns.enabled);
        assert!(!config.node.system_files_enabled);
        assert!(config.node.discovery.nostr.enabled);
        assert!(config.node.discovery.nostr.advertise);
        assert_eq!(
            config.node.discovery.nostr.policy,
            NostrDiscoveryPolicy::Open
        );
        assert!(config.node.discovery.nostr.share_local_candidates);
        assert_eq!(
            config.node.discovery.nostr.app,
            "fips-overlay-v1:nostr-vpn:test"
        );

        let udp = match config.transports.udp {
            TransportInstances::Single(udp) => udp,
            TransportInstances::Named(_) => panic!("expected a default UDP transport"),
        };
        assert_eq!(udp.bind_addr(), "0.0.0.0:0");
        assert!(udp.advertise_on_nostr());
        assert!(!udp.is_public());
        assert!(!udp.outbound_only());
        assert!(udp.accept_connections());
    }

    #[test]
    fn discovery_scope_preserves_explicit_connectivity_config() {
        let mut explicit = Config::new();
        explicit.node.discovery.nostr.enabled = true;
        explicit.node.discovery.nostr.app = "custom-app".to_string();
        explicit.node.discovery.nostr.policy = NostrDiscoveryPolicy::ConfiguredOnly;
        explicit.node.discovery.nostr.share_local_candidates = false;
        explicit.transports.udp = TransportInstances::Single(UdpConfig {
            bind_addr: Some("127.0.0.1:34567".to_string()),
            advertise_on_nostr: Some(false),
            outbound_only: Some(true),
            ..UdpConfig::default()
        });

        let config = FipsEndpoint::builder()
            .config(explicit)
            .discovery_scope("nostr-vpn:test")
            .prepared_config();

        assert_eq!(config.node.discovery.nostr.app, "custom-app");
        assert_eq!(
            config.node.discovery.nostr.policy,
            NostrDiscoveryPolicy::ConfiguredOnly
        );
        assert!(!config.node.discovery.nostr.share_local_candidates);
        let udp = match config.transports.udp {
            TransportInstances::Single(udp) => udp,
            TransportInstances::Named(_) => panic!("expected explicit UDP transport"),
        };
        assert_eq!(udp.bind_addr.as_deref(), Some("127.0.0.1:34567"));
        assert_eq!(udp.bind_addr(), "0.0.0.0:0");
        assert!(!udp.advertise_on_nostr());
        assert!(udp.outbound_only());
    }

    #[tokio::test]
    async fn invalid_remote_npub_is_rejected() {
        let endpoint = FipsEndpoint::builder()
            .without_system_tun()
            .bind()
            .await
            .expect("endpoint should bind");

        let error = endpoint
            .send("not-an-npub", b"hello".to_vec())
            .await
            .expect_err("invalid npub should fail");
        assert!(matches!(error, FipsEndpointError::InvalidRemoteNpub { .. }));

        endpoint.shutdown().await.expect("shutdown should succeed");
    }

    #[tokio::test]
    async fn endpoint_peer_snapshot_starts_empty() {
        let endpoint = FipsEndpoint::builder()
            .without_system_tun()
            .bind()
            .await
            .expect("endpoint should bind");

        let peers = endpoint.peers().await.expect("peer snapshot");
        assert!(peers.is_empty());

        endpoint.shutdown().await.expect("shutdown should succeed");
    }
}