Skip to main content

fips_core/
endpoint.rs

1//! Library-first endpoint API for embedding FIPS in applications.
2//!
3//! This module exposes a no-system-TUN runtime shape for apps that want to own
4//! peer admission and local routing policy while reusing FIPS connectivity.
5
6use crate::config::{NostrDiscoveryPolicy, TransportInstances, UdpConfig};
7use crate::node::{NodeEndpointCommand, NodeEndpointEvent, NodeEndpointPeer};
8use crate::{
9    Config, FipsAddress, IdentityConfig, Node, NodeAddr, NodeDeliveredPacket, NodeError,
10    PeerIdentity,
11};
12use std::sync::Arc;
13use thiserror::Error;
14use tokio::sync::{Mutex, mpsc, oneshot};
15use tokio::task::JoinHandle;
16
17/// Errors returned by the endpoint API.
18#[derive(Debug, Error)]
19pub enum FipsEndpointError {
20    #[error("node error: {0}")]
21    Node(#[from] NodeError),
22
23    #[error("endpoint task failed: {0}")]
24    TaskJoin(#[from] tokio::task::JoinError),
25
26    #[error("endpoint is closed")]
27    Closed,
28
29    #[error("invalid remote npub '{npub}': {reason}")]
30    InvalidRemoteNpub { npub: String, reason: String },
31}
32
33/// Source-attributed endpoint data delivered to an embedded application.
34#[derive(Debug, Clone, PartialEq, Eq)]
35pub struct FipsEndpointMessage {
36    /// FIPS node address that originated the endpoint data.
37    pub source_node_addr: NodeAddr,
38    /// Source Nostr public key when the node has learned it.
39    pub source_npub: Option<String>,
40    /// Application-owned payload bytes.
41    pub data: Vec<u8>,
42}
43
44/// Reports what changed in response to [`FipsEndpoint::update_peers`].
45#[derive(Debug, Clone, Default, PartialEq, Eq)]
46pub struct UpdatePeersOutcome {
47    /// Number of npubs that were not previously in the runtime peer list
48    /// and got an `initiate_peer_connection` call.
49    pub added: usize,
50    /// Number of npubs that were dropped from the runtime peer list. Their
51    /// retry entries are gone; any active session stays up until the
52    /// regular liveness timeout reaps it.
53    pub removed: usize,
54    /// Number of npubs that were already in the list but had a different
55    /// `addresses`, `alias`, `connect_policy`, or `auto_reconnect` value.
56    /// The new values are now in effect for retries and aliasing.
57    pub updated: usize,
58    /// Number of npubs that were in the list and identical to the new entry.
59    pub unchanged: usize,
60}
61
62impl From<crate::node::UpdatePeersOutcome> for UpdatePeersOutcome {
63    fn from(value: crate::node::UpdatePeersOutcome) -> Self {
64        Self {
65            added: value.added,
66            removed: value.removed,
67            updated: value.updated,
68            unchanged: value.unchanged,
69        }
70    }
71}
72
73/// Authenticated FIPS peer state visible to an embedded application.
74#[derive(Debug, Clone, PartialEq, Eq)]
75pub struct FipsEndpointPeer {
76    /// Peer Nostr public key.
77    pub npub: String,
78    /// Current underlay transport address, when a link has authenticated.
79    pub transport_addr: Option<String>,
80    /// Current underlay transport kind, when known.
81    pub transport_type: Option<String>,
82    /// Authenticated link id.
83    pub link_id: u64,
84    /// Smoothed RTT in milliseconds, once measured by FIPS MMP.
85    pub srtt_ms: Option<u64>,
86    /// Link packets sent.
87    pub packets_sent: u64,
88    /// Link packets received.
89    pub packets_recv: u64,
90    /// Link bytes sent.
91    pub bytes_sent: u64,
92    /// Link bytes received.
93    pub bytes_recv: u64,
94}
95
96/// Builder for an embedded FIPS endpoint.
97#[derive(Debug, Clone)]
98pub struct FipsEndpointBuilder {
99    config: Config,
100    identity_nsec: Option<String>,
101    discovery_scope: Option<String>,
102    disable_system_networking: bool,
103    packet_channel_capacity: usize,
104}
105
106impl Default for FipsEndpointBuilder {
107    fn default() -> Self {
108        Self {
109            config: Config::new(),
110            identity_nsec: None,
111            discovery_scope: None,
112            disable_system_networking: true,
113            packet_channel_capacity: 1024,
114        }
115    }
116}
117
118impl FipsEndpointBuilder {
119    /// Start from an explicit FIPS config.
120    pub fn config(mut self, config: Config) -> Self {
121        self.config = config;
122        self
123    }
124
125    /// Use an `nsec` or hex secret for the endpoint identity.
126    pub fn identity_nsec(mut self, nsec: impl Into<String>) -> Self {
127        self.identity_nsec = Some(nsec.into());
128        self
129    }
130
131    /// Set an application-level discovery scope.
132    ///
133    /// When the builder owns the default empty connectivity config, this also
134    /// enables scoped Nostr discovery, open same-scope peer discovery, local
135    /// LAN candidates, and a UDP NAT advert. If an explicit transport or
136    /// Nostr config was supplied, the explicit config is left in control and
137    /// the scope is retained as endpoint metadata.
138    pub fn discovery_scope(mut self, scope: impl Into<String>) -> Self {
139        self.discovery_scope = Some(scope.into());
140        self
141    }
142
143    /// Disable FIPS-owned TUN and DNS system integration.
144    pub fn without_system_tun(mut self) -> Self {
145        self.disable_system_networking = true;
146        self
147    }
148
149    /// Set the app packet/data channel capacity.
150    pub fn packet_channel_capacity(mut self, capacity: usize) -> Self {
151        self.packet_channel_capacity = capacity.max(1);
152        self
153    }
154
155    fn prepared_config(&self) -> Config {
156        let mut config = self.config.clone();
157        if let Some(nsec) = &self.identity_nsec {
158            config.node.identity = IdentityConfig {
159                nsec: Some(nsec.clone()),
160                persistent: false,
161            };
162        }
163        if self.disable_system_networking {
164            config.tun.enabled = false;
165            config.dns.enabled = false;
166            config.node.system_files_enabled = false;
167        }
168        if let Some(scope) = self.discovery_scope.as_deref() {
169            apply_default_scoped_discovery(&mut config, scope);
170        }
171        config
172    }
173
174    /// Bind and start the embedded endpoint.
175    pub async fn bind(self) -> Result<FipsEndpoint, FipsEndpointError> {
176        let config = self.prepared_config();
177
178        let mut node = Node::new(config)?;
179        let npub = node.npub();
180        let node_addr = *node.node_addr();
181        let address = *node.identity().address();
182        let packet_io = node.attach_external_packet_io(self.packet_channel_capacity)?;
183        let endpoint_data_io = node.attach_endpoint_data_io(self.packet_channel_capacity)?;
184        node.start().await?;
185
186        let (shutdown_tx, shutdown_rx) = oneshot::channel();
187        let task = spawn_node_task(node, shutdown_rx);
188        let endpoint_commands = endpoint_data_io.command_tx;
189
190        Ok(FipsEndpoint {
191            npub,
192            node_addr,
193            address,
194            discovery_scope: self.discovery_scope,
195            outbound_packets: packet_io.outbound_tx,
196            delivered_packets: Arc::new(Mutex::new(packet_io.inbound_rx)),
197            endpoint_commands,
198            inbound_endpoint_tx: endpoint_data_io.event_tx,
199            inbound_endpoint_rx: Arc::new(Mutex::new(endpoint_data_io.event_rx)),
200            peer_identity_cache: std::sync::Mutex::new(std::collections::HashMap::new()),
201            shutdown_tx: Some(shutdown_tx),
202            task,
203        })
204    }
205}
206
207fn apply_default_scoped_discovery(config: &mut Config, scope: &str) {
208    if config.node.discovery.nostr.enabled || !config.transports.is_empty() {
209        return;
210    }
211
212    config.node.discovery.nostr.enabled = true;
213    config.node.discovery.nostr.advertise = true;
214    config.node.discovery.nostr.policy = NostrDiscoveryPolicy::Open;
215    config.node.discovery.nostr.share_local_candidates = true;
216    config.node.discovery.nostr.app = format!("fips-overlay-v1:{scope}");
217    config.transports.udp = TransportInstances::Single(UdpConfig {
218        bind_addr: Some("0.0.0.0:0".to_string()),
219        advertise_on_nostr: Some(true),
220        public: Some(false),
221        outbound_only: Some(false),
222        accept_connections: Some(true),
223        ..UdpConfig::default()
224    });
225}
226
227fn spawn_node_task(
228    mut node: Node,
229    shutdown_rx: oneshot::Receiver<()>,
230) -> JoinHandle<Result<(), NodeError>> {
231    tokio::spawn(async move {
232        tokio::pin!(shutdown_rx);
233        let loop_result = tokio::select! {
234            result = node.run_rx_loop() => result,
235            _ = &mut shutdown_rx => Ok(()),
236        };
237        let stop_result = if node.state().can_stop() {
238            node.stop().await
239        } else {
240            Ok(())
241        };
242        loop_result?;
243        stop_result
244    })
245}
246
247/// A running embedded FIPS endpoint.
248pub struct FipsEndpoint {
249    npub: String,
250    node_addr: NodeAddr,
251    address: FipsAddress,
252    discovery_scope: Option<String>,
253    outbound_packets: mpsc::Sender<Vec<u8>>,
254    delivered_packets: Arc<Mutex<mpsc::Receiver<NodeDeliveredPacket>>>,
255    endpoint_commands: mpsc::Sender<NodeEndpointCommand>,
256    /// In-process loopback sender — `send()` to our own npub injects an
257    /// event into the same queue without going through the wire/encrypt
258    /// path. The node's rx_loop also sends into this channel directly
259    /// (it holds a clone of this sender) so there is no per-packet relay
260    /// task between the node task and `recv()`.
261    inbound_endpoint_tx: mpsc::UnboundedSender<NodeEndpointEvent>,
262    /// Unbounded receiver. Was previously fed by a per-packet relay task
263    /// that translated `NodeEndpointEvent::Data` into `FipsEndpointMessage`
264    /// across an additional bounded mpsc; collapsed into a single channel
265    /// — the translation happens inline in `recv()` and the second hop
266    /// (with its scheduler wake per packet) is gone.
267    inbound_endpoint_rx: Arc<Mutex<mpsc::UnboundedReceiver<NodeEndpointEvent>>>,
268    /// Cache of resolved PeerIdentity by npub string. Avoids the per-packet
269    /// secp256k1 EC point parse that `PeerIdentity::from_npub` performs;
270    /// without this cache the bulk-data send hot path spends ~10–30% of CPU
271    /// re-validating identity bytes the application has already configured.
272    peer_identity_cache: std::sync::Mutex<std::collections::HashMap<String, PeerIdentity>>,
273    shutdown_tx: Option<oneshot::Sender<()>>,
274    task: JoinHandle<Result<(), NodeError>>,
275}
276
277impl FipsEndpoint {
278    /// Create a builder for an embedded endpoint.
279    pub fn builder() -> FipsEndpointBuilder {
280        FipsEndpointBuilder::default()
281    }
282
283    /// Local endpoint npub.
284    pub fn npub(&self) -> &str {
285        &self.npub
286    }
287
288    /// Local FIPS node address.
289    pub fn node_addr(&self) -> &NodeAddr {
290        &self.node_addr
291    }
292
293    /// Local FIPS IPv6-compatible address.
294    pub fn address(&self) -> FipsAddress {
295        self.address
296    }
297
298    /// Application-level discovery scope, if configured.
299    pub fn discovery_scope(&self) -> Option<&str> {
300        self.discovery_scope.as_deref()
301    }
302
303    /// Send application-owned endpoint data to a remote npub.
304    ///
305    /// Fire-and-forget: enqueues the Send command on the node task and
306    /// returns once the command channel accepts it. The node task's send
307    /// result is discarded — TCP and the upper protocol handle loss
308    /// recovery, and the per-packet oneshot round-trip the previous design
309    /// used for error reporting added several hundred microseconds of
310    /// queueing latency under load (measured: 456ms avg ping under iperf3
311    /// saturation → 1ms after this change, 430× lower).
312    ///
313    /// PeerIdentity for `remote_npub` is cached after first resolution to
314    /// avoid the secp256k1 EC point parse on every packet.
315    pub async fn send(
316        &self,
317        remote_npub: impl Into<String>,
318        data: impl Into<Vec<u8>>,
319    ) -> Result<(), FipsEndpointError> {
320        let remote_npub = remote_npub.into();
321        let data = data.into();
322        if remote_npub == self.npub {
323            self.inbound_endpoint_tx
324                .send(NodeEndpointEvent::Data {
325                    source_node_addr: self.node_addr,
326                    source_npub: Some(self.npub.clone()),
327                    payload: data,
328                    queued_at: crate::perf_profile::stamp(),
329                })
330                .map_err(|_| FipsEndpointError::Closed)?;
331            return Ok(());
332        }
333
334        let remote = self.resolve_peer_identity(&remote_npub)?;
335
336        // Fire-and-forget: caller already drops the result, so skip
337        // the per-packet `oneshot::channel()` allocation entirely.
338        // The node task's `SendOneway` arm runs the same code path as
339        // `Send` but without writing the result into a oneshot.
340        self.endpoint_commands
341            .send(NodeEndpointCommand::SendOneway {
342                remote,
343                payload: data,
344                queued_at: crate::perf_profile::stamp(),
345            })
346            .await
347            .map_err(|_| FipsEndpointError::Closed)?;
348        Ok(())
349    }
350
351    fn resolve_peer_identity(&self, remote_npub: &str) -> Result<PeerIdentity, FipsEndpointError> {
352        // Fast path: cached identity (PeerIdentity is Copy after eager
353        // pubkey_full precompute landed in b1e92af, so dereference is free).
354        if let Ok(cache) = self.peer_identity_cache.lock()
355            && let Some(remote) = cache.get(remote_npub)
356        {
357            return Ok(*remote);
358        }
359
360        let remote = PeerIdentity::from_npub(remote_npub).map_err(|error| {
361            FipsEndpointError::InvalidRemoteNpub {
362                npub: remote_npub.to_string(),
363                reason: error.to_string(),
364            }
365        })?;
366
367        if let Ok(mut cache) = self.peer_identity_cache.lock() {
368            cache.entry(remote_npub.to_string()).or_insert(remote);
369        }
370        Ok(remote)
371    }
372
373    /// Receive the next source-attributed endpoint data message.
374    ///
375    /// Translation from the internal `NodeEndpointEvent::Data` shape to
376    /// the public `FipsEndpointMessage` shape happens inline here — the
377    /// rx_loop pushes directly onto this channel, no relay task in
378    /// between, no extra cross-task hop per packet.
379    pub async fn recv(&self) -> Option<FipsEndpointMessage> {
380        let event = self.inbound_endpoint_rx.lock().await.recv().await?;
381        let NodeEndpointEvent::Data {
382            source_node_addr,
383            source_npub,
384            payload,
385            queued_at,
386        } = event;
387        crate::perf_profile::record_since(crate::perf_profile::Stage::EndpointEventWait, queued_at);
388        Some(FipsEndpointMessage {
389            source_node_addr,
390            source_npub,
391            data: payload,
392        })
393    }
394
395    /// Synchronous blocking send — parks the calling **OS thread** on
396    /// the FIPS endpoint command channel until the runtime accepts
397    /// the send. MUST be called only from a thread spawned via
398    /// `std::thread::spawn`, not from inside a tokio runtime.
399    ///
400    /// Companion to [`Self::blocking_recv`] for control-frame replies
401    /// (e.g. responding to a Ping with a Pong) issued from the
402    /// dedicated TUN-write thread. Failures are returned via
403    /// `FipsEndpointError::Closed` if the runtime has stopped.
404    pub fn blocking_send(
405        &self,
406        remote_npub: impl Into<String>,
407        data: impl Into<Vec<u8>>,
408    ) -> Result<(), FipsEndpointError> {
409        let remote_npub = remote_npub.into();
410        let data = data.into();
411        if remote_npub == self.npub {
412            self.inbound_endpoint_tx
413                .send(NodeEndpointEvent::Data {
414                    source_node_addr: self.node_addr,
415                    source_npub: Some(self.npub.clone()),
416                    payload: data,
417                    queued_at: crate::perf_profile::stamp(),
418                })
419                .map_err(|_| FipsEndpointError::Closed)?;
420            return Ok(());
421        }
422        let remote = self.resolve_peer_identity(&remote_npub)?;
423        let (response_tx, _response_rx) = oneshot::channel();
424        self.endpoint_commands
425            .blocking_send(NodeEndpointCommand::Send {
426                remote,
427                payload: data,
428                queued_at: crate::perf_profile::stamp(),
429                response_tx,
430            })
431            .map_err(|_| FipsEndpointError::Closed)?;
432        Ok(())
433    }
434
435    /// Synchronous blocking receive — parks the calling **OS thread**
436    /// on the channel until an event arrives or the channel closes.
437    ///
438    /// MUST NOT be called from inside a tokio runtime; use this only
439    /// from a thread spawned via `std::thread::spawn` so the tokio
440    /// scheduler doesn't deadlock.
441    ///
442    /// The motivation is the bench's CLI receive task: when run as a
443    /// regular tokio task each `recv().await` is a full task-wake on
444    /// the runtime (~1–3 µs scheduler bookkeeping), and at 113 kpps
445    /// that's ~10–30% of one core spent in plumbing the wake-up
446    /// rather than writing the packet to TUN. A dedicated OS thread
447    /// blocked on the channel via `blocking_recv` parks on a futex
448    /// directly — the wake is a single futex_wake() with no scheduler
449    /// involvement, an order of magnitude cheaper.
450    pub fn blocking_recv(&self) -> Option<FipsEndpointMessage> {
451        let mut rx = self.inbound_endpoint_rx.blocking_lock();
452        let event = rx.blocking_recv()?;
453        let NodeEndpointEvent::Data {
454            source_node_addr,
455            source_npub,
456            payload,
457            queued_at,
458        } = event;
459        crate::perf_profile::record_since(crate::perf_profile::Stage::EndpointEventWait, queued_at);
460        Some(FipsEndpointMessage {
461            source_node_addr,
462            source_npub,
463            data: payload,
464        })
465    }
466
467    /// Non-blocking receive — returns the next ready endpoint message
468    /// if one is queued, otherwise `None`. Pair with `recv()` to drain
469    /// follow-on packets without paying a scheduler wake per packet:
470    ///
471    /// ```ignore
472    /// // wake on the first packet, then drain everything ready
473    /// while let Some(msg) = endpoint.recv().await { process(msg); }
474    /// while let Some(msg) = endpoint.try_recv() { process(msg); }
475    /// ```
476    ///
477    /// On the bench's FIPS-tunnel receive path the kernel UDP socket
478    /// delivers packets in `recvmmsg`-sized bursts, so after a `.recv()`
479    /// await there are typically 5–30 packets queued waiting. Draining
480    /// them inline with `try_recv` saves N-1 scheduler hops per burst
481    /// at line rate, freeing the consumer task to spend its time on
482    /// the TUN write syscall instead of cross-task plumbing.
483    ///
484    /// Returns `None` if the channel is empty, closed, or briefly
485    /// contested by another consumer.
486    pub fn try_recv(&self) -> Option<FipsEndpointMessage> {
487        let mut rx = self.inbound_endpoint_rx.try_lock().ok()?;
488        let event = rx.try_recv().ok()?;
489        let NodeEndpointEvent::Data {
490            source_node_addr,
491            source_npub,
492            payload,
493            queued_at,
494        } = event;
495        crate::perf_profile::record_since(crate::perf_profile::Stage::EndpointEventWait, queued_at);
496        Some(FipsEndpointMessage {
497            source_node_addr,
498            source_npub,
499            data: payload,
500        })
501    }
502
503    /// Replace the runtime peer list. Newly added auto-connect peers get
504    /// dialed immediately using every known address (overlay-fresh first,
505    /// then operator/cache hints). Removed peers are dropped from the
506    /// retry queue but stay connected if they currently are — the regular
507    /// liveness timeout reaps idle sessions. Existing entries get their
508    /// `addresses` field refreshed so the next retry sees the latest hints.
509    ///
510    /// Pass an empty `addresses` vector for a peer if you want fips to
511    /// resolve them entirely from the Nostr advert at dial time.
512    pub async fn update_peers(
513        &self,
514        peers: Vec<crate::config::PeerConfig>,
515    ) -> Result<UpdatePeersOutcome, FipsEndpointError> {
516        let (response_tx, response_rx) = oneshot::channel();
517        self.endpoint_commands
518            .send(NodeEndpointCommand::UpdatePeers { peers, response_tx })
519            .await
520            .map_err(|_| FipsEndpointError::Closed)?;
521
522        match response_rx.await.map_err(|_| FipsEndpointError::Closed)? {
523            Ok(outcome) => Ok(UpdatePeersOutcome::from(outcome)),
524            Err(error) => Err(FipsEndpointError::Node(error)),
525        }
526    }
527
528    /// Snapshot authenticated peers known by the endpoint.
529    pub async fn peers(&self) -> Result<Vec<FipsEndpointPeer>, FipsEndpointError> {
530        let (response_tx, response_rx) = oneshot::channel();
531        self.endpoint_commands
532            .send(NodeEndpointCommand::PeerSnapshot { response_tx })
533            .await
534            .map_err(|_| FipsEndpointError::Closed)?;
535
536        response_rx
537            .await
538            .map(|peers| peers.into_iter().map(FipsEndpointPeer::from).collect())
539            .map_err(|_| FipsEndpointError::Closed)
540    }
541
542    /// Send an outbound IPv6 packet into the FIPS session pipeline.
543    pub async fn send_ip_packet(
544        &self,
545        packet: impl Into<Vec<u8>>,
546    ) -> Result<(), FipsEndpointError> {
547        self.outbound_packets
548            .send(packet.into())
549            .await
550            .map_err(|_| FipsEndpointError::Closed)
551    }
552
553    /// Receive the next source-attributed IPv6 packet delivered by FIPS.
554    pub async fn recv_ip_packet(&self) -> Option<NodeDeliveredPacket> {
555        self.delivered_packets.lock().await.recv().await
556    }
557
558    /// Shut down the endpoint and wait for the node task to stop.
559    pub async fn shutdown(mut self) -> Result<(), FipsEndpointError> {
560        if let Some(shutdown_tx) = self.shutdown_tx.take() {
561            let _ = shutdown_tx.send(());
562        }
563        self.task.await??;
564        Ok(())
565    }
566}
567
568impl From<NodeEndpointPeer> for FipsEndpointPeer {
569    fn from(peer: NodeEndpointPeer) -> Self {
570        Self {
571            npub: peer.npub,
572            transport_addr: peer.transport_addr,
573            transport_type: peer.transport_type,
574            link_id: peer.link_id,
575            srtt_ms: peer.srtt_ms,
576            packets_sent: peer.packets_sent,
577            packets_recv: peer.packets_recv,
578            bytes_sent: peer.bytes_sent,
579            bytes_recv: peer.bytes_recv,
580        }
581    }
582}
583
584#[cfg(test)]
585mod tests {
586    use super::*;
587    use std::time::Duration;
588
589    #[tokio::test]
590    async fn endpoint_starts_without_system_tun() {
591        let endpoint = FipsEndpoint::builder()
592            .without_system_tun()
593            .bind()
594            .await
595            .expect("endpoint should bind");
596
597        assert!(!endpoint.npub().is_empty());
598        assert!(endpoint.discovery_scope().is_none());
599        endpoint.shutdown().await.expect("shutdown should succeed");
600    }
601
602    #[tokio::test]
603    async fn loopback_endpoint_data_roundtrips() {
604        let endpoint = FipsEndpoint::builder()
605            .without_system_tun()
606            .bind()
607            .await
608            .expect("endpoint should bind");
609
610        endpoint
611            .send(endpoint.npub().to_string(), b"ping".to_vec())
612            .await
613            .expect("loopback send should succeed");
614        let message = tokio::time::timeout(Duration::from_secs(1), endpoint.recv())
615            .await
616            .expect("recv should not time out")
617            .expect("message should arrive");
618        assert_eq!(message.source_node_addr, *endpoint.node_addr());
619        assert_eq!(message.source_npub, Some(endpoint.npub().to_string()));
620        assert_eq!(message.data, b"ping");
621        assert!(endpoint.discovery_scope().is_none());
622
623        endpoint.shutdown().await.expect("shutdown should succeed");
624    }
625
626    #[test]
627    fn discovery_scope_enables_default_scoped_udp_discovery() {
628        let config = FipsEndpoint::builder()
629            .discovery_scope("nostr-vpn:test")
630            .prepared_config();
631
632        assert!(!config.tun.enabled);
633        assert!(!config.dns.enabled);
634        assert!(!config.node.system_files_enabled);
635        assert!(config.node.discovery.nostr.enabled);
636        assert!(config.node.discovery.nostr.advertise);
637        assert_eq!(
638            config.node.discovery.nostr.policy,
639            NostrDiscoveryPolicy::Open
640        );
641        assert!(config.node.discovery.nostr.share_local_candidates);
642        assert_eq!(
643            config.node.discovery.nostr.app,
644            "fips-overlay-v1:nostr-vpn:test"
645        );
646
647        let udp = match config.transports.udp {
648            TransportInstances::Single(udp) => udp,
649            TransportInstances::Named(_) => panic!("expected a default UDP transport"),
650        };
651        assert_eq!(udp.bind_addr(), "0.0.0.0:0");
652        assert!(udp.advertise_on_nostr());
653        assert!(!udp.is_public());
654        assert!(!udp.outbound_only());
655        assert!(udp.accept_connections());
656    }
657
658    #[test]
659    fn discovery_scope_preserves_explicit_connectivity_config() {
660        let mut explicit = Config::new();
661        explicit.node.discovery.nostr.enabled = true;
662        explicit.node.discovery.nostr.app = "custom-app".to_string();
663        explicit.node.discovery.nostr.policy = NostrDiscoveryPolicy::ConfiguredOnly;
664        explicit.node.discovery.nostr.share_local_candidates = false;
665        explicit.transports.udp = TransportInstances::Single(UdpConfig {
666            bind_addr: Some("127.0.0.1:34567".to_string()),
667            advertise_on_nostr: Some(false),
668            outbound_only: Some(true),
669            ..UdpConfig::default()
670        });
671
672        let config = FipsEndpoint::builder()
673            .config(explicit)
674            .discovery_scope("nostr-vpn:test")
675            .prepared_config();
676
677        assert_eq!(config.node.discovery.nostr.app, "custom-app");
678        assert_eq!(
679            config.node.discovery.nostr.policy,
680            NostrDiscoveryPolicy::ConfiguredOnly
681        );
682        assert!(!config.node.discovery.nostr.share_local_candidates);
683        let udp = match config.transports.udp {
684            TransportInstances::Single(udp) => udp,
685            TransportInstances::Named(_) => panic!("expected explicit UDP transport"),
686        };
687        assert_eq!(udp.bind_addr.as_deref(), Some("127.0.0.1:34567"));
688        assert_eq!(udp.bind_addr(), "0.0.0.0:0");
689        assert!(!udp.advertise_on_nostr());
690        assert!(udp.outbound_only());
691    }
692
693    #[tokio::test]
694    async fn invalid_remote_npub_is_rejected() {
695        let endpoint = FipsEndpoint::builder()
696            .without_system_tun()
697            .bind()
698            .await
699            .expect("endpoint should bind");
700
701        let error = endpoint
702            .send("not-an-npub", b"hello".to_vec())
703            .await
704            .expect_err("invalid npub should fail");
705        assert!(matches!(error, FipsEndpointError::InvalidRemoteNpub { .. }));
706
707        endpoint.shutdown().await.expect("shutdown should succeed");
708    }
709
710    #[tokio::test]
711    async fn endpoint_peer_snapshot_starts_empty() {
712        let endpoint = FipsEndpoint::builder()
713            .without_system_tun()
714            .bind()
715            .await
716            .expect("endpoint should bind");
717
718        let peers = endpoint.peers().await.expect("peer snapshot");
719        assert!(peers.is_empty());
720
721        endpoint.shutdown().await.expect("shutdown should succeed");
722    }
723}