Skip to main content

fips_core/node/
mod.rs

1//! FIPS Node Entity
2//!
3//! Top-level structure representing a running FIPS instance. The Node
4//! holds all state required for mesh routing: identity, tree state,
5//! Bloom filters, coordinate caches, transports, links, and peers.
6
7mod acl;
8mod bloom;
9mod decrypt_worker;
10mod discovery_rate_limit;
11mod encrypt_worker;
12mod handlers;
13mod lifecycle;
14mod rate_limit;
15mod retry;
16mod routing;
17mod routing_error_rate_limit;
18pub(crate) mod session;
19pub(crate) mod session_wire;
20pub(crate) mod stats;
21pub(crate) mod stats_history;
22#[cfg(test)]
23mod tests;
24mod tree;
25pub(crate) mod wire;
26
27use self::discovery_rate_limit::{DiscoveryBackoff, DiscoveryForwardRateLimiter};
28use self::rate_limit::HandshakeRateLimiter;
29use self::routing::{LearnedRouteTable, LearnedRouteTableSnapshot};
30use self::routing_error_rate_limit::RoutingErrorRateLimiter;
31use self::wire::{
32    ESTABLISHED_HEADER_SIZE, FLAG_CE, FLAG_KEY_EPOCH, FLAG_SP, build_encrypted,
33    build_established_header, prepend_inner_header,
34};
35use crate::bloom::BloomState;
36use crate::cache::CoordCache;
37use crate::config::RoutingMode;
38use crate::node::session::SessionEntry;
39use crate::peer::{ActivePeer, PeerConnection};
40#[cfg(any(target_os = "linux", target_os = "macos"))]
41use crate::transport::ethernet::EthernetTransport;
42use crate::transport::tcp::TcpTransport;
43use crate::transport::tor::TorTransport;
44use crate::transport::udp::UdpTransport;
45use crate::transport::{
46    Link, LinkId, PacketRx, PacketTx, TransportAddr, TransportError, TransportHandle, TransportId,
47};
48use crate::tree::TreeState;
49use crate::upper::hosts::HostMap;
50use crate::upper::icmp_rate_limit::IcmpRateLimiter;
51use crate::upper::tun::{TunError, TunOutboundRx, TunState, TunTx};
52use crate::utils::index::IndexAllocator;
53use crate::{
54    Config, ConfigError, FipsAddress, Identity, IdentityError, NodeAddr, PeerIdentity,
55    SessionMessageType, encode_npub,
56};
57use rand::Rng;
58use std::collections::{HashMap, HashSet, VecDeque};
59use std::fmt;
60use std::sync::Arc;
61use std::thread::JoinHandle;
62use thiserror::Error;
63use tracing::{debug, warn};
64
65/// Half-range of the symmetric jitter applied to per-session rekey timers.
66///
67/// Each FMP/FSP session draws an offset uniformly from
68/// `[-REKEY_JITTER_SECS, +REKEY_JITTER_SECS]` seconds at construction and
69/// after each cutover. This preserves the configured mean interval while
70/// reducing dual-initiation bursts in symmetric-start meshes.
71pub(crate) const REKEY_JITTER_SECS: i64 = 15;
72
73/// Errors related to node operations.
74#[derive(Debug, Error)]
75pub enum NodeError {
76    #[error("node not started")]
77    NotStarted,
78
79    #[error("node already started")]
80    AlreadyStarted,
81
82    #[error("node already stopped")]
83    AlreadyStopped,
84
85    #[error("transport not found: {0}")]
86    TransportNotFound(TransportId),
87
88    #[error("no transport available for type: {0}")]
89    NoTransportForType(String),
90
91    #[error("link not found: {0}")]
92    LinkNotFound(LinkId),
93
94    #[error("connection not found: {0}")]
95    ConnectionNotFound(LinkId),
96
97    #[error("peer not found: {0:?}")]
98    PeerNotFound(NodeAddr),
99
100    #[error("peer already exists: {0:?}")]
101    PeerAlreadyExists(NodeAddr),
102
103    #[error("connection already exists for link: {0}")]
104    ConnectionAlreadyExists(LinkId),
105
106    #[error("invalid peer npub '{npub}': {reason}")]
107    InvalidPeerNpub { npub: String, reason: String },
108
109    #[error("access denied: {0}")]
110    AccessDenied(String),
111
112    #[error("max connections exceeded: {max}")]
113    MaxConnectionsExceeded { max: usize },
114
115    #[error("max peers exceeded: {max}")]
116    MaxPeersExceeded { max: usize },
117
118    #[error("max links exceeded: {max}")]
119    MaxLinksExceeded { max: usize },
120
121    #[error("handshake incomplete for link {0}")]
122    HandshakeIncomplete(LinkId),
123
124    #[error("no session available for link {0}")]
125    NoSession(LinkId),
126
127    #[error("promotion failed for link {link_id}: {reason}")]
128    PromotionFailed { link_id: LinkId, reason: String },
129
130    #[error("send failed to {node_addr}: {reason}")]
131    SendFailed { node_addr: NodeAddr, reason: String },
132
133    #[error("mtu exceeded forwarding to {node_addr}: packet {packet_size} > mtu {mtu}")]
134    MtuExceeded {
135        node_addr: NodeAddr,
136        packet_size: usize,
137        mtu: u16,
138    },
139
140    #[error("config error: {0}")]
141    Config(#[from] ConfigError),
142
143    #[error("identity error: {0}")]
144    Identity(#[from] IdentityError),
145
146    #[error("TUN error: {0}")]
147    Tun(#[from] TunError),
148
149    #[error("index allocation failed: {0}")]
150    IndexAllocationFailed(String),
151
152    #[error("handshake failed: {0}")]
153    HandshakeFailed(String),
154
155    #[error("transport error: {0}")]
156    TransportError(String),
157
158    #[error("bootstrap handoff failed: {0}")]
159    BootstrapHandoff(String),
160}
161
162/// Source-attributed packet delivered by a node running without a system TUN.
163#[derive(Debug, Clone, PartialEq, Eq)]
164pub struct NodeDeliveredPacket {
165    /// FIPS node address that originated the packet.
166    pub source_node_addr: NodeAddr,
167    /// Source Nostr public key when the node has learned it.
168    pub source_npub: Option<String>,
169    /// Destination FIPS address from the IPv6 packet.
170    pub destination: FipsAddress,
171    /// Full IPv6 packet after FIPS session decapsulation.
172    pub packet: Vec<u8>,
173}
174
175#[derive(Debug, Clone)]
176struct IdentityCacheEntry {
177    node_addr: NodeAddr,
178    pubkey: secp256k1::PublicKey,
179    npub: String,
180    last_seen_ms: u64,
181}
182
183impl IdentityCacheEntry {
184    fn new(
185        node_addr: NodeAddr,
186        pubkey: secp256k1::PublicKey,
187        npub: String,
188        last_seen_ms: u64,
189    ) -> Self {
190        Self {
191            node_addr,
192            pubkey,
193            npub,
194            last_seen_ms,
195        }
196    }
197}
198
199/// App-owned packet channels for embedding FIPS without a system TUN.
200#[derive(Debug)]
201pub struct ExternalPacketIo {
202    /// Send outbound IPv6 packets into the node.
203    pub outbound_tx: crate::upper::tun::TunOutboundTx,
204    /// Receive inbound IPv6 packets delivered by FIPS sessions.
205    pub inbound_rx: tokio::sync::mpsc::Receiver<NodeDeliveredPacket>,
206}
207
208/// App-owned endpoint data channels for embedding FIPS without a daemon.
209#[derive(Debug)]
210pub(crate) struct EndpointDataIo {
211    /// Send endpoint data commands into the node RX loop.
212    ///
213    /// Bounded with a generous default so normal sender bursts do not
214    /// stall on semaphore acquisition. macOS pacing happens at the UDP
215    /// egress thread where the real Wi-Fi/interface bottleneck is visible;
216    /// constraining this app queue instead caused the inner TCP flow to
217    /// collapse under iperf. `FIPS_ENDPOINT_DATA_QUEUE_CAP` overrides the
218    /// default for benches.
219    pub(crate) command_tx: tokio::sync::mpsc::Sender<NodeEndpointCommand>,
220    /// Receive endpoint data delivered by FIPS sessions.
221    ///
222    /// Unbounded so the rx_loop's send on inbound packet delivery is a
223    /// wait-free push (no semaphore acquire), and so we can drop the
224    /// per-packet cross-task relay that previously sat between the node
225    /// task and the `FipsEndpoint::recv()` consumer. Backpressure is
226    /// naturally bounded — the rx_loop both produces here and runs the
227    /// same runtime that schedules the consumer, so a stalled consumer
228    /// stalls production too.
229    pub(crate) event_rx: tokio::sync::mpsc::UnboundedReceiver<NodeEndpointEvent>,
230    /// Clone of the event_tx exposed for in-process loopback (e.g.
231    /// `FipsEndpoint::send` to self_npub). Lets the endpoint inject an
232    /// event into the same queue without going through the encrypt /
233    /// decrypt path, while keeping every consumer reading from a single
234    /// channel.
235    pub(crate) event_tx: tokio::sync::mpsc::UnboundedSender<NodeEndpointEvent>,
236}
237
238fn endpoint_data_command_capacity(requested: usize) -> usize {
239    if let Ok(raw) = std::env::var("FIPS_ENDPOINT_DATA_QUEUE_CAP")
240        && let Ok(value) = raw.trim().parse::<usize>()
241        && value > 0
242    {
243        return value;
244    }
245
246    requested.max(1).max(32_768)
247}
248
249/// Commands accepted by the node endpoint data service.
250#[derive(Debug)]
251pub(crate) enum NodeEndpointCommand {
252    /// Send with an explicit response channel — used by callers that
253    /// care whether the local-stack handoff succeeded (e.g.
254    /// `blocking_send` waits for the runtime to accept the send).
255    Send {
256        remote: PeerIdentity,
257        payload: Vec<u8>,
258        queued_at: Option<std::time::Instant>,
259        response_tx: tokio::sync::oneshot::Sender<Result<(), NodeError>>,
260    },
261    /// **Fire-and-forget** variant of `Send` — no oneshot allocation,
262    /// no per-packet result channel. Used by the data-plane fast path
263    /// (`FipsEndpoint::send`) where the caller already discards the
264    /// result. Saves one oneshot::channel() allocation per outbound
265    /// packet on the application's send hot path.
266    SendOneway {
267        remote: PeerIdentity,
268        payload: Vec<u8>,
269        queued_at: Option<std::time::Instant>,
270    },
271    PeerSnapshot {
272        response_tx: tokio::sync::oneshot::Sender<Vec<NodeEndpointPeer>>,
273    },
274    /// Replace the runtime peer list. Newly added auto-connect peers get
275    /// `initiate_peer_connection` immediately; removed peers are dropped
276    /// from the retry queue (the regular liveness timeout reaps any active
277    /// session). Existing entries are kept and their `addresses` field is
278    /// refreshed so the next retry sees the latest hints.
279    UpdatePeers {
280        peers: Vec<crate::config::PeerConfig>,
281        response_tx: tokio::sync::oneshot::Sender<Result<UpdatePeersOutcome, NodeError>>,
282    },
283}
284
285/// Reports what changed in response to `UpdatePeers`.
286#[derive(Debug, Clone, Default, PartialEq, Eq)]
287pub(crate) struct UpdatePeersOutcome {
288    pub(crate) added: usize,
289    pub(crate) removed: usize,
290    pub(crate) updated: usize,
291    pub(crate) unchanged: usize,
292}
293
294/// Endpoint data events emitted by the node session receive path.
295#[derive(Debug)]
296pub(crate) enum NodeEndpointEvent {
297    Data {
298        source_node_addr: NodeAddr,
299        source_npub: Option<String>,
300        payload: Vec<u8>,
301        queued_at: Option<std::time::Instant>,
302    },
303}
304
305/// Authenticated peer state exposed to embedded endpoint callers.
306#[derive(Debug, Clone, PartialEq, Eq)]
307pub(crate) struct NodeEndpointPeer {
308    pub(crate) npub: String,
309    pub(crate) transport_addr: Option<String>,
310    pub(crate) transport_type: Option<String>,
311    pub(crate) link_id: u64,
312    pub(crate) srtt_ms: Option<u64>,
313    pub(crate) packets_sent: u64,
314    pub(crate) packets_recv: u64,
315    pub(crate) bytes_sent: u64,
316    pub(crate) bytes_recv: u64,
317}
318
319/// Node operational state.
320#[derive(Clone, Copy, Debug, PartialEq, Eq)]
321pub enum NodeState {
322    /// Created but not started.
323    Created,
324    /// Starting up (initializing transports).
325    Starting,
326    /// Fully operational.
327    Running,
328    /// Shutting down.
329    Stopping,
330    /// Stopped.
331    Stopped,
332}
333
334impl NodeState {
335    /// Check if node is operational.
336    pub fn is_operational(&self) -> bool {
337        matches!(self, NodeState::Running)
338    }
339
340    /// Check if node can be started.
341    pub fn can_start(&self) -> bool {
342        matches!(self, NodeState::Created | NodeState::Stopped)
343    }
344
345    /// Check if node can be stopped.
346    pub fn can_stop(&self) -> bool {
347        matches!(self, NodeState::Running)
348    }
349}
350
351impl fmt::Display for NodeState {
352    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
353        let s = match self {
354            NodeState::Created => "created",
355            NodeState::Starting => "starting",
356            NodeState::Running => "running",
357            NodeState::Stopping => "stopping",
358            NodeState::Stopped => "stopped",
359        };
360        write!(f, "{}", s)
361    }
362}
363
364/// Recent request tracking for dedup and reverse-path forwarding.
365///
366/// When a LookupRequest is forwarded through a node, the node stores the
367/// request_id and which peer sent it. When the corresponding LookupResponse
368/// arrives, it's forwarded back to that peer (reverse-path forwarding).
369/// The `response_forwarded` flag prevents response routing loops.
370#[derive(Clone, Debug)]
371pub(crate) struct RecentRequest {
372    /// The peer who sent this request to us.
373    pub(crate) from_peer: NodeAddr,
374    /// When we received this request (Unix milliseconds).
375    pub(crate) timestamp_ms: u64,
376    /// Whether we've already forwarded a response for this request.
377    /// Prevents response routing loops when convergent request paths
378    /// create bidirectional entries in recent_requests.
379    pub(crate) response_forwarded: bool,
380}
381
382impl RecentRequest {
383    pub(crate) fn new(from_peer: NodeAddr, timestamp_ms: u64) -> Self {
384        Self {
385            from_peer,
386            timestamp_ms,
387            response_forwarded: false,
388        }
389    }
390
391    /// Check if this entry has expired (older than expiry_ms).
392    pub(crate) fn is_expired(&self, current_time_ms: u64, expiry_ms: u64) -> bool {
393        current_time_ms.saturating_sub(self.timestamp_ms) > expiry_ms
394    }
395}
396
397/// Key for addr_to_link reverse lookup.
398type AddrKey = (TransportId, TransportAddr);
399
400/// Per-transport kernel drop tracking for congestion detection.
401///
402/// Sampled every tick (1s). The `dropping` flag indicates whether new
403/// kernel drops were observed since the previous sample.
404#[derive(Debug, Default)]
405struct TransportDropState {
406    /// Previous `recv_drops` sample (cumulative counter).
407    prev_drops: u64,
408    /// True if drops increased since the last sample.
409    dropping: bool,
410}
411
412/// State for a link waiting for transport-level connection establishment.
413///
414/// For connection-oriented transports (TCP, Tor), the transport connect runs
415/// asynchronously. This struct holds the data needed to complete the handshake
416/// once the connection is ready.
417struct PendingConnect {
418    /// The link that was created for this connection.
419    link_id: LinkId,
420    /// Which transport is being used.
421    transport_id: TransportId,
422    /// The remote address being connected to.
423    remote_addr: TransportAddr,
424    /// The peer identity (for handshake initiation).
425    peer_identity: PeerIdentity,
426}
427
428/// A running FIPS node instance.
429///
430/// This is the top-level container holding all node state.
431///
432/// ## Peer Lifecycle
433///
434/// Peers go through two phases:
435/// 1. **Connection phase** (`connections`): Handshake in progress, indexed by LinkId
436/// 2. **Active phase** (`peers`): Authenticated, indexed by NodeAddr
437///
438/// The `addr_to_link` map enables dispatching incoming packets to the right
439/// connection before authentication completes.
440// Discovery lookup constants moved to config: node.discovery.attempt_timeouts_secs, node.discovery.ttl
441pub struct Node {
442    // === Identity ===
443    /// This node's cryptographic identity.
444    identity: Identity,
445
446    /// Random epoch generated at startup for peer restart detection.
447    /// Exchanged inside Noise handshake messages so peers can detect restarts.
448    startup_epoch: [u8; 8],
449
450    /// Instant when the node was created, for uptime reporting.
451    started_at: std::time::Instant,
452
453    // === Configuration ===
454    /// Loaded configuration.
455    config: Config,
456
457    // === State ===
458    /// Node operational state.
459    state: NodeState,
460
461    /// Whether this is a leaf-only node.
462    is_leaf_only: bool,
463
464    // === Spanning Tree ===
465    /// Local spanning tree state.
466    tree_state: TreeState,
467
468    // === Bloom Filter ===
469    /// Local Bloom filter state.
470    bloom_state: BloomState,
471
472    // === Routing ===
473    /// Address -> coordinates cache (from session setup and discovery).
474    coord_cache: CoordCache,
475    /// Locally learned reverse-path next-hop hints.
476    learned_routes: LearnedRouteTable,
477    /// Recent discovery requests (dedup + reverse-path forwarding).
478    /// Maps request_id → RecentRequest.
479    recent_requests: HashMap<u64, RecentRequest>,
480    /// Per-destination path MTU lookup, keyed by FipsAddress (mirrors
481    /// `coord_cache.entries[*].path_mtu`). Sync read-only access from
482    /// the TUN reader/writer threads at TCP MSS clamp time so the
483    /// SYN/SYN-ACK clamp can use the smaller of the local-egress floor
484    /// and the learned per-destination path MTU.
485    path_mtu_lookup: Arc<std::sync::RwLock<HashMap<crate::FipsAddress, u16>>>,
486
487    // === Transports & Links ===
488    /// Active transports (owned by Node).
489    transports: HashMap<TransportId, TransportHandle>,
490    /// Per-transport kernel drop tracking for congestion detection.
491    transport_drops: HashMap<TransportId, TransportDropState>,
492    /// Active links.
493    links: HashMap<LinkId, Link>,
494    /// Reverse lookup: (transport_id, remote_addr) -> link_id.
495    addr_to_link: HashMap<AddrKey, LinkId>,
496
497    // === Packet Channel ===
498    /// Packet sender for transports.
499    packet_tx: Option<PacketTx>,
500    /// Packet receiver (for event loop).
501    packet_rx: Option<PacketRx>,
502
503    // === Connections (Handshake Phase) ===
504    /// Pending connections (handshake in progress).
505    /// Indexed by LinkId since we don't know the peer's identity yet.
506    connections: HashMap<LinkId, PeerConnection>,
507
508    // === Peers (Active Phase) ===
509    /// Authenticated peers.
510    /// Indexed by NodeAddr (verified identity).
511    peers: HashMap<NodeAddr, ActivePeer>,
512
513    // === End-to-End Sessions ===
514    /// Session table for end-to-end encrypted sessions.
515    /// Keyed by remote NodeAddr.
516    sessions: HashMap<NodeAddr, SessionEntry>,
517
518    // === Identity Cache ===
519    /// Maps FipsAddress prefix bytes (bytes 1-15) to cached peer identity data.
520    /// Enables reverse lookup from IPv6 destination to session/routing identity.
521    identity_cache: HashMap<[u8; 15], IdentityCacheEntry>,
522
523    // === Pending TUN Packets ===
524    /// Packets queued while waiting for session establishment.
525    /// Keyed by destination NodeAddr, bounded per-dest and total.
526    pending_tun_packets: HashMap<NodeAddr, VecDeque<Vec<u8>>>,
527    /// Endpoint data payloads queued while waiting for session establishment.
528    pending_endpoint_data: HashMap<NodeAddr, VecDeque<Vec<u8>>>,
529    // === Pending Discovery Lookups ===
530    /// Tracks in-flight discovery lookups. Maps target NodeAddr to the
531    /// initiation timestamp (Unix ms). Prevents duplicate flood queries.
532    pending_lookups: HashMap<NodeAddr, handlers::discovery::PendingLookup>,
533
534    // === Resource Limits ===
535    /// Maximum connections (0 = unlimited).
536    max_connections: usize,
537    /// Maximum peers (0 = unlimited).
538    max_peers: usize,
539    /// Maximum links (0 = unlimited).
540    max_links: usize,
541
542    // === Counters ===
543    /// Next link ID to allocate.
544    next_link_id: u64,
545    /// Next transport ID to allocate.
546    next_transport_id: u32,
547
548    // === Node Statistics ===
549    /// Routing, forwarding, discovery, and error signal counters.
550    stats: stats::NodeStats,
551
552    /// Time-series history of node-level metrics (1s/1m rings).
553    stats_history: stats_history::StatsHistory,
554
555    // === TUN Interface ===
556    /// TUN device state.
557    tun_state: TunState,
558    /// TUN interface name (for cleanup).
559    tun_name: Option<String>,
560    /// TUN packet sender channel.
561    tun_tx: Option<TunTx>,
562    /// Receiver for outbound packets from the TUN reader.
563    tun_outbound_rx: Option<TunOutboundRx>,
564    /// App-owned packet sink used by embedded/no-TUN integrations.
565    external_packet_tx: Option<tokio::sync::mpsc::Sender<NodeDeliveredPacket>>,
566    /// Endpoint data command receiver used by embedded/no-daemon integrations.
567    endpoint_command_rx: Option<tokio::sync::mpsc::Receiver<NodeEndpointCommand>>,
568    /// Endpoint data event sink used by embedded/no-daemon integrations.
569    endpoint_event_tx: Option<tokio::sync::mpsc::UnboundedSender<NodeEndpointEvent>>,
570    /// Off-task FMP-encrypt + UDP-send worker pool. `None` if not yet
571    /// spawned (set up in `start()` once transports are running).
572    /// `Some(pool)` once available; the pool internally holds
573    /// per-worker mpsc senders and round-robins jobs across them.
574    /// See `node::encrypt_worker` for the rationale and layout.
575    encrypt_workers: Option<encrypt_worker::EncryptWorkerPool>,
576    /// Off-task FMP + FSP decrypt + delivery worker pool. Mirror of
577    /// `encrypt_workers` for the receive side.
578    decrypt_workers: Option<decrypt_worker::DecryptWorkerPool>,
579    /// Set of sessions that have been registered with the decrypt
580    /// shard worker pool. Used by rx_loop to decide between fast-path
581    /// dispatch (worker owns the session) and legacy in-place decrypt
582    /// (worker doesn't have it yet). Per the data-plane restructure,
583    /// the worker owns its session state directly — there's no shared
584    /// `Arc<RwLock<HashMap>>` of cipher / replay state anymore, only
585    /// this set tracks **whether** the worker has been told about a
586    /// given session.
587    decrypt_registered_sessions: std::collections::HashSet<(TransportId, u32)>,
588    /// Fallback channel: decrypt worker bounces non-fast-path packets
589    /// (anything that's not bulk EndpointData) back here for rx_loop
590    /// to handle via the legacy path. Drained by a new rx_loop arm.
591    decrypt_fallback_rx:
592        Option<tokio::sync::mpsc::UnboundedReceiver<decrypt_worker::DecryptWorkerEvent>>,
593    decrypt_fallback_tx: tokio::sync::mpsc::UnboundedSender<decrypt_worker::DecryptWorkerEvent>,
594    /// TUN reader thread handle.
595    tun_reader_handle: Option<JoinHandle<()>>,
596    /// TUN writer thread handle.
597    tun_writer_handle: Option<JoinHandle<()>>,
598    /// Shutdown pipe: writing to this fd unblocks the TUN reader thread on macOS.
599    /// On Linux, deleting the interface via netlink serves the same purpose.
600    #[cfg(target_os = "macos")]
601    tun_shutdown_fd: Option<std::os::unix::io::RawFd>,
602
603    // === DNS Responder ===
604    /// Receiver for resolved identities from the DNS responder.
605    dns_identity_rx: Option<crate::upper::dns::DnsIdentityRx>,
606    /// DNS responder task handle.
607    dns_task: Option<tokio::task::JoinHandle<()>>,
608
609    // === Index-Based Session Dispatch ===
610    /// Allocator for session indices.
611    index_allocator: IndexAllocator,
612    /// O(1) lookup: (transport_id, our_index) → NodeAddr.
613    /// This maps our session index to the peer that uses it.
614    peers_by_index: HashMap<(TransportId, u32), NodeAddr>,
615    /// Pending outbound handshakes by our sender_idx.
616    /// Tracks which LinkId corresponds to which session index.
617    pending_outbound: HashMap<(TransportId, u32), LinkId>,
618
619    // === Rate Limiting ===
620    /// Rate limiter for msg1 processing (DoS protection).
621    msg1_rate_limiter: HandshakeRateLimiter,
622    /// Rate limiter for ICMP Packet Too Big messages.
623    icmp_rate_limiter: IcmpRateLimiter,
624    /// Rate limiter for routing error signals (CoordsRequired / PathBroken).
625    routing_error_rate_limiter: RoutingErrorRateLimiter,
626    /// Rate limiter for source-side CoordsRequired/PathBroken responses.
627    coords_response_rate_limiter: RoutingErrorRateLimiter,
628    /// Backoff for failed discovery lookups (originator-side).
629    discovery_backoff: DiscoveryBackoff,
630    /// Rate limiter for forwarded discovery requests (transit-side).
631    discovery_forward_limiter: DiscoveryForwardRateLimiter,
632
633    // === Pending Transport Connects ===
634    /// Links waiting for transport-level connection establishment before
635    /// sending handshake msg1. For connection-oriented transports (TCP, Tor),
636    /// the transport connect runs in the background; the tick handler polls
637    /// connection_state() and initiates the handshake when connected.
638    pending_connects: Vec<PendingConnect>,
639
640    // === Connection Retry ===
641    /// Retry state for peers whose outbound connections have failed.
642    /// Keyed by NodeAddr. Entries are created when a handshake times out
643    /// or fails, and removed on successful promotion or when max retries
644    /// are exhausted.
645    retry_pending: HashMap<NodeAddr, retry::RetryState>,
646
647    /// Optional Nostr/STUN overlay discovery coordinator for `udp:nat` peers.
648    nostr_discovery: Option<Arc<crate::discovery::nostr::NostrDiscovery>>,
649    /// mDNS / DNS-SD responder + browser for local-link peer discovery.
650    /// Identity is unverified at this layer — the Noise XX handshake
651    /// initiated against an mDNS-observed endpoint is what proves the
652    /// peer holds the matching private key.
653    lan_discovery: Option<Arc<crate::discovery::lan::LanDiscovery>>,
654    /// Wall-clock ms when Nostr discovery successfully started, used to
655    /// schedule the one-shot startup advert sweep after a settle delay.
656    /// `None` until discovery comes up; remains `None` if discovery is
657    /// disabled or failed to start.
658    nostr_discovery_started_at_ms: Option<u64>,
659    /// Whether the one-shot startup advert sweep has run. Set to true
660    /// after the first sweep fires (under `policy: open`); thereafter
661    /// only the per-tick `queue_open_discovery_retries` continues.
662    startup_open_discovery_sweep_done: bool,
663    /// Per-peer UDP transports adopted from NAT traversal handoff.
664    bootstrap_transports: HashSet<TransportId>,
665    /// Originating peer npub (bech32) for each adopted bootstrap
666    /// transport, captured at `adopt_established_traversal` time.
667    /// Populated alongside `bootstrap_transports`; cleared in
668    /// `cleanup_bootstrap_transport_if_unused`. Used by the rx loop to
669    /// route fatal-protocol-mismatch observations back to the
670    /// Nostr-discovery `failure_state` for long cooldown application.
671    bootstrap_transport_npubs: HashMap<TransportId, String>,
672
673    // === Periodic Parent Re-evaluation ===
674    /// Timestamp of last periodic parent re-evaluation (for pacing).
675    last_parent_reeval: Option<crate::time::Instant>,
676
677    // === Congestion Logging ===
678    /// Timestamp of last congestion detection log (rate-limited to 5s).
679    last_congestion_log: Option<std::time::Instant>,
680
681    // === Mesh Size Estimate ===
682    /// Cached estimated mesh size (computed once per tick from bloom filters).
683    estimated_mesh_size: Option<u64>,
684    /// Timestamp of last mesh size log emission.
685    last_mesh_size_log: Option<std::time::Instant>,
686
687    // === Bloom Self-Plausibility ===
688    /// Rate-limit state for the self-plausibility WARN. Fires at most
689    /// once per 60s globally when our own outgoing FilterAnnounce has
690    /// an FPR above `node.bloom.max_inbound_fpr`, signalling either
691    /// aggregation drift or an ingress bypass.
692    last_self_warn: Option<std::time::Instant>,
693
694    // === Local Outbound Liveness ===
695    /// Set when a `transport.send` returned a local-side io error
696    /// (`NetworkUnreachable` / `HostUnreachable` / `AddrNotAvailable`),
697    /// cleared on the next successful send. Used by
698    /// `check_link_heartbeats` to compress the dead-timeout to
699    /// `fast_link_dead_timeout_secs` while our outbound is observed
700    /// broken — direct kernel evidence beats waiting on receive-silence.
701    last_local_send_failure_at: Option<std::time::Instant>,
702
703    // === Display Names ===
704    /// Human-readable names for configured peers (alias or short npub).
705    /// Populated at startup from peer config.
706    peer_aliases: HashMap<NodeAddr, String>,
707
708    /// Reloadable peer ACL state from standard allow/deny files.
709    peer_acl: acl::PeerAclReloader,
710
711    // === Host Map ===
712    /// Static hostname → npub mapping for DNS resolution.
713    /// Built at construction from peer aliases and /etc/fips/hosts.
714    host_map: Arc<HostMap>,
715}
716
717impl Node {
718    /// Create a new node from configuration.
719    pub fn new(config: Config) -> Result<Self, NodeError> {
720        config.validate()?;
721        let identity = config.create_identity()?;
722        let node_addr = *identity.node_addr();
723        let is_leaf_only = config.is_leaf_only();
724
725        let (decrypt_fallback_tx, decrypt_fallback_rx) = tokio::sync::mpsc::unbounded_channel();
726        let decrypt_fallback_rx = Some(decrypt_fallback_rx);
727
728        let mut startup_epoch = [0u8; 8];
729        rand::rng().fill_bytes(&mut startup_epoch);
730
731        let mut bloom_state = if is_leaf_only {
732            BloomState::leaf_only(node_addr)
733        } else {
734            BloomState::new(node_addr)
735        };
736        bloom_state.set_update_debounce_ms(config.node.bloom.update_debounce_ms);
737
738        let tun_state = if config.tun.enabled {
739            TunState::Configured
740        } else {
741            TunState::Disabled
742        };
743
744        // Initialize tree state with signed self-declaration
745        let mut tree_state = TreeState::new(node_addr);
746        tree_state.set_parent_hysteresis(config.node.tree.parent_hysteresis);
747        tree_state.set_hold_down(config.node.tree.hold_down_secs);
748        tree_state.set_flap_dampening(
749            config.node.tree.flap_threshold,
750            config.node.tree.flap_window_secs,
751            config.node.tree.flap_dampening_secs,
752        );
753        tree_state
754            .sign_declaration(&identity)
755            .expect("signing own declaration should never fail");
756
757        let coord_cache = CoordCache::new(
758            config.node.cache.coord_size,
759            config.node.cache.coord_ttl_secs * 1000,
760        );
761        let rl = &config.node.rate_limit;
762        let msg1_rate_limiter = HandshakeRateLimiter::with_params(
763            rate_limit::TokenBucket::with_params(rl.handshake_burst, rl.handshake_rate),
764            config.node.limits.max_pending_inbound,
765        );
766
767        let max_connections = config.node.limits.max_connections;
768        let max_peers = config.node.limits.max_peers;
769        let max_links = config.node.limits.max_links;
770        let coords_response_interval_ms = config.node.session.coords_response_interval_ms;
771        let backoff_base_secs = config.node.discovery.backoff_base_secs;
772        let backoff_max_secs = config.node.discovery.backoff_max_secs;
773        let forward_min_interval_secs = config.node.discovery.forward_min_interval_secs;
774
775        let (host_map, peer_acl) = Self::host_map_and_peer_acl(&config);
776
777        Ok(Self {
778            identity,
779            startup_epoch,
780            started_at: std::time::Instant::now(),
781            config,
782            state: NodeState::Created,
783            is_leaf_only,
784            tree_state,
785            bloom_state,
786            coord_cache,
787            learned_routes: LearnedRouteTable::default(),
788            recent_requests: HashMap::new(),
789            transports: HashMap::new(),
790            transport_drops: HashMap::new(),
791            links: HashMap::new(),
792            addr_to_link: HashMap::new(),
793            packet_tx: None,
794            packet_rx: None,
795            connections: HashMap::new(),
796            peers: HashMap::new(),
797            sessions: HashMap::new(),
798            identity_cache: HashMap::new(),
799            pending_tun_packets: HashMap::new(),
800            pending_endpoint_data: HashMap::new(),
801            pending_lookups: HashMap::new(),
802            max_connections,
803            max_peers,
804            max_links,
805            next_link_id: 1,
806            next_transport_id: 1,
807            stats: stats::NodeStats::new(),
808            stats_history: stats_history::StatsHistory::new(),
809            tun_state,
810            tun_name: None,
811            tun_tx: None,
812            tun_outbound_rx: None,
813            external_packet_tx: None,
814            endpoint_command_rx: None,
815            endpoint_event_tx: None,
816            encrypt_workers: None,
817            decrypt_workers: None,
818            decrypt_registered_sessions: std::collections::HashSet::new(),
819            decrypt_fallback_tx,
820            decrypt_fallback_rx,
821            tun_reader_handle: None,
822            tun_writer_handle: None,
823            #[cfg(target_os = "macos")]
824            tun_shutdown_fd: None,
825            dns_identity_rx: None,
826            dns_task: None,
827            index_allocator: IndexAllocator::new(),
828            peers_by_index: HashMap::new(),
829            pending_outbound: HashMap::new(),
830            msg1_rate_limiter,
831            icmp_rate_limiter: IcmpRateLimiter::new(),
832            routing_error_rate_limiter: RoutingErrorRateLimiter::new(),
833            coords_response_rate_limiter: RoutingErrorRateLimiter::with_interval(
834                std::time::Duration::from_millis(coords_response_interval_ms),
835            ),
836            discovery_backoff: DiscoveryBackoff::with_params(backoff_base_secs, backoff_max_secs),
837            discovery_forward_limiter: DiscoveryForwardRateLimiter::with_interval(
838                std::time::Duration::from_secs(forward_min_interval_secs),
839            ),
840            pending_connects: Vec::new(),
841            retry_pending: HashMap::new(),
842            nostr_discovery: None,
843            nostr_discovery_started_at_ms: None,
844            lan_discovery: None,
845            startup_open_discovery_sweep_done: false,
846            bootstrap_transports: HashSet::new(),
847            bootstrap_transport_npubs: HashMap::new(),
848            last_parent_reeval: None,
849            last_congestion_log: None,
850            estimated_mesh_size: None,
851            last_mesh_size_log: None,
852            last_self_warn: None,
853            last_local_send_failure_at: None,
854            peer_aliases: HashMap::new(),
855            peer_acl,
856            host_map,
857            path_mtu_lookup: Arc::new(std::sync::RwLock::new(HashMap::new())),
858        })
859    }
860
861    /// Create a node with a specific identity.
862    ///
863    /// This constructor validates cross-field config invariants before
864    /// constructing the node, same as [`Node::new`].
865    pub fn with_identity(identity: Identity, config: Config) -> Result<Self, NodeError> {
866        config.validate()?;
867        let node_addr = *identity.node_addr();
868
869        let (decrypt_fallback_tx, decrypt_fallback_rx) = tokio::sync::mpsc::unbounded_channel();
870        let decrypt_fallback_rx = Some(decrypt_fallback_rx);
871
872        let mut startup_epoch = [0u8; 8];
873        rand::rng().fill_bytes(&mut startup_epoch);
874
875        let tun_state = if config.tun.enabled {
876            TunState::Configured
877        } else {
878            TunState::Disabled
879        };
880
881        // Initialize tree state with signed self-declaration
882        let mut tree_state = TreeState::new(node_addr);
883        tree_state.set_parent_hysteresis(config.node.tree.parent_hysteresis);
884        tree_state.set_hold_down(config.node.tree.hold_down_secs);
885        tree_state.set_flap_dampening(
886            config.node.tree.flap_threshold,
887            config.node.tree.flap_window_secs,
888            config.node.tree.flap_dampening_secs,
889        );
890        tree_state
891            .sign_declaration(&identity)
892            .expect("signing own declaration should never fail");
893
894        let mut bloom_state = BloomState::new(node_addr);
895        bloom_state.set_update_debounce_ms(config.node.bloom.update_debounce_ms);
896
897        let coord_cache = CoordCache::new(
898            config.node.cache.coord_size,
899            config.node.cache.coord_ttl_secs * 1000,
900        );
901        let rl = &config.node.rate_limit;
902        let msg1_rate_limiter = HandshakeRateLimiter::with_params(
903            rate_limit::TokenBucket::with_params(rl.handshake_burst, rl.handshake_rate),
904            config.node.limits.max_pending_inbound,
905        );
906
907        let max_connections = config.node.limits.max_connections;
908        let max_peers = config.node.limits.max_peers;
909        let max_links = config.node.limits.max_links;
910        let coords_response_interval_ms = config.node.session.coords_response_interval_ms;
911
912        let (host_map, peer_acl) = Self::host_map_and_peer_acl(&config);
913
914        Ok(Self {
915            identity,
916            startup_epoch,
917            started_at: std::time::Instant::now(),
918            config,
919            state: NodeState::Created,
920            is_leaf_only: false,
921            tree_state,
922            bloom_state,
923            coord_cache,
924            learned_routes: LearnedRouteTable::default(),
925            recent_requests: HashMap::new(),
926            transports: HashMap::new(),
927            transport_drops: HashMap::new(),
928            links: HashMap::new(),
929            addr_to_link: HashMap::new(),
930            packet_tx: None,
931            packet_rx: None,
932            connections: HashMap::new(),
933            peers: HashMap::new(),
934            sessions: HashMap::new(),
935            identity_cache: HashMap::new(),
936            pending_tun_packets: HashMap::new(),
937            pending_endpoint_data: HashMap::new(),
938            pending_lookups: HashMap::new(),
939            max_connections,
940            max_peers,
941            max_links,
942            next_link_id: 1,
943            next_transport_id: 1,
944            stats: stats::NodeStats::new(),
945            stats_history: stats_history::StatsHistory::new(),
946            tun_state,
947            tun_name: None,
948            tun_tx: None,
949            tun_outbound_rx: None,
950            external_packet_tx: None,
951            endpoint_command_rx: None,
952            endpoint_event_tx: None,
953            encrypt_workers: None,
954            decrypt_workers: None,
955            decrypt_registered_sessions: std::collections::HashSet::new(),
956            decrypt_fallback_tx,
957            decrypt_fallback_rx,
958            tun_reader_handle: None,
959            tun_writer_handle: None,
960            #[cfg(target_os = "macos")]
961            tun_shutdown_fd: None,
962            dns_identity_rx: None,
963            dns_task: None,
964            index_allocator: IndexAllocator::new(),
965            peers_by_index: HashMap::new(),
966            pending_outbound: HashMap::new(),
967            msg1_rate_limiter,
968            icmp_rate_limiter: IcmpRateLimiter::new(),
969            routing_error_rate_limiter: RoutingErrorRateLimiter::new(),
970            coords_response_rate_limiter: RoutingErrorRateLimiter::with_interval(
971                std::time::Duration::from_millis(coords_response_interval_ms),
972            ),
973            discovery_backoff: DiscoveryBackoff::new(),
974            discovery_forward_limiter: DiscoveryForwardRateLimiter::new(),
975            pending_connects: Vec::new(),
976            retry_pending: HashMap::new(),
977            nostr_discovery: None,
978            nostr_discovery_started_at_ms: None,
979            lan_discovery: None,
980            startup_open_discovery_sweep_done: false,
981            bootstrap_transports: HashSet::new(),
982            bootstrap_transport_npubs: HashMap::new(),
983            last_parent_reeval: None,
984            last_congestion_log: None,
985            estimated_mesh_size: None,
986            last_mesh_size_log: None,
987            last_self_warn: None,
988            last_local_send_failure_at: None,
989            peer_aliases: HashMap::new(),
990            peer_acl,
991            host_map,
992            path_mtu_lookup: Arc::new(std::sync::RwLock::new(HashMap::new())),
993        })
994    }
995
996    /// Create a leaf-only node (simplified state).
997    pub fn leaf_only(config: Config) -> Result<Self, NodeError> {
998        let mut node = Self::new(config)?;
999        node.is_leaf_only = true;
1000        node.bloom_state = BloomState::leaf_only(*node.identity.node_addr());
1001        Ok(node)
1002    }
1003
1004    fn host_map_and_peer_acl(config: &Config) -> (Arc<HostMap>, acl::PeerAclReloader) {
1005        let base_host_map = HostMap::from_peer_configs(config.peers());
1006        if !config.node.system_files_enabled {
1007            return (
1008                Arc::new(base_host_map.clone()),
1009                acl::PeerAclReloader::memory_only(base_host_map),
1010            );
1011        }
1012
1013        let mut host_map = base_host_map.clone();
1014        let hosts_path = std::path::PathBuf::from(crate::upper::hosts::DEFAULT_HOSTS_PATH);
1015        let hosts_file = HostMap::load_hosts_file(std::path::Path::new(
1016            crate::upper::hosts::DEFAULT_HOSTS_PATH,
1017        ));
1018        host_map.merge(hosts_file);
1019        let peer_acl = acl::PeerAclReloader::with_alias_sources(
1020            std::path::PathBuf::from(acl::DEFAULT_PEERS_ALLOW_PATH),
1021            std::path::PathBuf::from(acl::DEFAULT_PEERS_DENY_PATH),
1022            base_host_map,
1023            hosts_path,
1024        );
1025        (Arc::new(host_map), peer_acl)
1026    }
1027
1028    /// Create transport instances from configuration.
1029    ///
1030    /// Returns a vector of TransportHandles for all configured transports.
1031    async fn create_transports(&mut self, packet_tx: &PacketTx) -> Vec<TransportHandle> {
1032        let mut transports = Vec::new();
1033
1034        // Collect UDP configs with optional names to avoid borrow conflicts
1035        let udp_instances: Vec<_> = self
1036            .config
1037            .transports
1038            .udp
1039            .iter()
1040            .map(|(name, config)| (name.map(|s| s.to_string()), config.clone()))
1041            .collect();
1042
1043        // Create UDP transport instances
1044        for (name, udp_config) in udp_instances {
1045            let transport_id = self.allocate_transport_id();
1046            let udp = UdpTransport::new(transport_id, name, udp_config, packet_tx.clone());
1047            transports.push(TransportHandle::Udp(udp));
1048        }
1049
1050        #[cfg(feature = "sim-transport")]
1051        {
1052            let sim_instances: Vec<_> = self
1053                .config
1054                .transports
1055                .sim
1056                .iter()
1057                .map(|(name, config)| (name.map(|s| s.to_string()), config.clone()))
1058                .collect();
1059
1060            for (name, sim_config) in sim_instances {
1061                let transport_id = self.allocate_transport_id();
1062                let sim = crate::transport::sim::SimTransport::new(
1063                    transport_id,
1064                    name,
1065                    sim_config,
1066                    packet_tx.clone(),
1067                );
1068                transports.push(TransportHandle::Sim(sim));
1069            }
1070        }
1071
1072        // Create Ethernet transport instances where raw-socket support exists.
1073        #[cfg(any(target_os = "linux", target_os = "macos"))]
1074        {
1075            let eth_instances: Vec<_> = self
1076                .config
1077                .transports
1078                .ethernet
1079                .iter()
1080                .map(|(name, config)| (name.map(|s| s.to_string()), config.clone()))
1081                .collect();
1082            let xonly = self.identity.pubkey();
1083            for (name, eth_config) in eth_instances {
1084                let transport_id = self.allocate_transport_id();
1085                let mut eth =
1086                    EthernetTransport::new(transport_id, name, eth_config, packet_tx.clone());
1087                eth.set_local_pubkey(xonly);
1088                transports.push(TransportHandle::Ethernet(eth));
1089            }
1090        }
1091
1092        // Create TCP transport instances
1093        let tcp_instances: Vec<_> = self
1094            .config
1095            .transports
1096            .tcp
1097            .iter()
1098            .map(|(name, config)| (name.map(|s| s.to_string()), config.clone()))
1099            .collect();
1100
1101        for (name, tcp_config) in tcp_instances {
1102            let transport_id = self.allocate_transport_id();
1103            let tcp = TcpTransport::new(transport_id, name, tcp_config, packet_tx.clone());
1104            transports.push(TransportHandle::Tcp(tcp));
1105        }
1106
1107        // Create Tor transport instances
1108        let tor_instances: Vec<_> = self
1109            .config
1110            .transports
1111            .tor
1112            .iter()
1113            .map(|(name, config)| (name.map(|s| s.to_string()), config.clone()))
1114            .collect();
1115
1116        for (name, tor_config) in tor_instances {
1117            let transport_id = self.allocate_transport_id();
1118            let tor = TorTransport::new(transport_id, name, tor_config, packet_tx.clone());
1119            transports.push(TransportHandle::Tor(tor));
1120        }
1121
1122        // Create BLE transport instances
1123        #[cfg(bluer_available)]
1124        {
1125            let ble_instances: Vec<_> = self
1126                .config
1127                .transports
1128                .ble
1129                .iter()
1130                .map(|(name, config)| (name.map(|s| s.to_string()), config.clone()))
1131                .collect();
1132
1133            #[cfg(all(bluer_available, not(test)))]
1134            for (name, ble_config) in ble_instances {
1135                let transport_id = self.allocate_transport_id();
1136                let adapter = ble_config.adapter().to_string();
1137                let mtu = ble_config.mtu();
1138                match crate::transport::ble::io::BluerIo::new(&adapter, mtu).await {
1139                    Ok(io) => {
1140                        let mut ble = crate::transport::ble::BleTransport::new(
1141                            transport_id,
1142                            name,
1143                            ble_config,
1144                            io,
1145                            packet_tx.clone(),
1146                        );
1147                        ble.set_local_pubkey(self.identity.pubkey().serialize());
1148                        transports.push(TransportHandle::Ble(ble));
1149                    }
1150                    Err(e) => {
1151                        tracing::warn!(adapter = %adapter, error = %e, "failed to initialize BLE adapter");
1152                    }
1153                }
1154            }
1155
1156            #[cfg(any(not(bluer_available), test))]
1157            if !ble_instances.is_empty() {
1158                #[cfg(not(test))]
1159                tracing::warn!("BLE transport configured but this build lacks BlueZ support");
1160            }
1161        }
1162
1163        transports
1164    }
1165
1166    /// Find an operational transport that matches the given transport type name.
1167    ///
1168    /// Adopted UDP bootstrap transports are point-to-point sockets handed off
1169    /// from Nostr/STUN traversal. They must not be reused for ordinary
1170    /// `udp host:port` dials discovered through static config, mDNS, or overlay
1171    /// adverts: on macOS a `send_to` through the wrong adopted socket can fail
1172    /// with `EINVAL`, and even on platforms that allow it the packet would use
1173    /// the wrong 5-tuple/NAT mapping. Prefer configured transports and make the
1174    /// choice deterministic by lowest transport id instead of HashMap order.
1175    fn find_transport_for_type(&self, transport_type: &str) -> Option<TransportId> {
1176        self.transports
1177            .iter()
1178            .filter(|(id, handle)| {
1179                handle.transport_type().name == transport_type
1180                    && handle.is_operational()
1181                    && !self.bootstrap_transports.contains(id)
1182            })
1183            .min_by_key(|(id, _)| id.as_u32())
1184            .map(|(id, _)| *id)
1185    }
1186
1187    /// Resolve an Ethernet peer address ("interface/mac") to a transport ID
1188    /// and binary TransportAddr.
1189    ///
1190    /// Finds the Ethernet transport instance bound to the named interface
1191    /// and parses the MAC portion into a 6-byte TransportAddr.
1192    #[allow(unused_variables)]
1193    fn resolve_ethernet_addr(
1194        &self,
1195        addr_str: &str,
1196    ) -> Result<(TransportId, TransportAddr), NodeError> {
1197        #[cfg(any(target_os = "linux", target_os = "macos"))]
1198        {
1199            let (iface, mac_str) = addr_str.split_once('/').ok_or_else(|| {
1200                NodeError::NoTransportForType(format!(
1201                    "invalid Ethernet address format '{}': expected 'interface/mac'",
1202                    addr_str
1203                ))
1204            })?;
1205
1206            // Find the Ethernet transport bound to this interface
1207            let transport_id = self
1208                .transports
1209                .iter()
1210                .find(|(_, handle)| {
1211                    handle.transport_type().name == "ethernet"
1212                        && handle.is_operational()
1213                        && handle.interface_name() == Some(iface)
1214                })
1215                .map(|(id, _)| *id)
1216                .ok_or_else(|| {
1217                    NodeError::NoTransportForType(format!(
1218                        "no operational Ethernet transport for interface '{}'",
1219                        iface
1220                    ))
1221                })?;
1222
1223            let mac = crate::transport::ethernet::parse_mac_string(mac_str).map_err(|e| {
1224                NodeError::NoTransportForType(format!("invalid MAC in '{}': {}", addr_str, e))
1225            })?;
1226
1227            Ok((transport_id, TransportAddr::from_bytes(&mac)))
1228        }
1229        #[cfg(not(any(target_os = "linux", target_os = "macos")))]
1230        {
1231            Err(NodeError::NoTransportForType(
1232                "Ethernet transport is not supported on this platform".to_string(),
1233            ))
1234        }
1235    }
1236
1237    /// Resolve a BLE address string (`"adapter/AA:BB:CC:DD:EE:FF"`) to a
1238    /// (TransportId, TransportAddr) pair by finding the BLE transport
1239    /// instance matching the adapter name.
1240    #[cfg(bluer_available)]
1241    fn resolve_ble_addr(&self, addr_str: &str) -> Result<(TransportId, TransportAddr), NodeError> {
1242        let ta = TransportAddr::from_string(addr_str);
1243        let adapter = crate::transport::ble::addr::adapter_from_addr(&ta).ok_or_else(|| {
1244            NodeError::NoTransportForType(format!(
1245                "invalid BLE address format '{}': expected 'adapter/mac'",
1246                addr_str
1247            ))
1248        })?;
1249
1250        // Find the BLE transport for this adapter
1251        let transport_id = self
1252            .transports
1253            .iter()
1254            .find(|(_, handle)| handle.transport_type().name == "ble" && handle.is_operational())
1255            .map(|(id, _)| *id)
1256            .ok_or_else(|| {
1257                NodeError::NoTransportForType(format!(
1258                    "no operational BLE transport for adapter '{}'",
1259                    adapter
1260                ))
1261            })?;
1262
1263        // Validate the address format
1264        crate::transport::ble::addr::BleAddr::parse(addr_str).map_err(|e| {
1265            NodeError::NoTransportForType(format!("invalid BLE address '{}': {}", addr_str, e))
1266        })?;
1267
1268        Ok((transport_id, TransportAddr::from_string(addr_str)))
1269    }
1270
1271    // === Identity Accessors ===
1272
1273    /// Get this node's identity.
1274    pub fn identity(&self) -> &Identity {
1275        &self.identity
1276    }
1277
1278    /// Get this node's NodeAddr.
1279    pub fn node_addr(&self) -> &NodeAddr {
1280        self.identity.node_addr()
1281    }
1282
1283    /// Get this node's npub.
1284    pub fn npub(&self) -> String {
1285        self.identity.npub()
1286    }
1287
1288    /// Return a human-readable display name for a NodeAddr.
1289    ///
1290    /// Lookup order:
1291    /// 1. Host map hostname (from peer aliases + /etc/fips/hosts)
1292    /// 2. Configured peer alias or short npub (from startup map)
1293    /// 3. Active peer's short npub (e.g., inbound peer not in config)
1294    /// 4. Session endpoint's short npub (end-to-end, may not be direct peer)
1295    /// 5. Truncated NodeAddr hex (unknown address)
1296    pub(crate) fn peer_display_name(&self, addr: &NodeAddr) -> String {
1297        if let Some(hostname) = self.host_map.lookup_hostname(addr) {
1298            return hostname.to_string();
1299        }
1300        if let Some(name) = self.peer_aliases.get(addr) {
1301            return name.clone();
1302        }
1303        if let Some(peer) = self.peers.get(addr) {
1304            return peer.identity().short_npub();
1305        }
1306        if let Some(entry) = self.sessions.get(addr) {
1307            let (xonly, _) = entry.remote_pubkey().x_only_public_key();
1308            return PeerIdentity::from_pubkey(xonly).short_npub();
1309        }
1310        addr.short_hex()
1311    }
1312
1313    /// Tear down a `peers_by_index` entry **and** keep the shard-owned
1314    /// decrypt-worker state coherent: removes the same `cache_key`
1315    /// from the registered-sessions tracking set and tells the
1316    /// assigned shard worker to drop its `OwnedSessionState` entry.
1317    ///
1318    /// Use this instead of a bare `self.peers_by_index.remove(&key)`
1319    /// at every session-lifecycle teardown site (rekey cross-connection
1320    /// swap, peer disconnect, dispatch session-rotation) so the worker
1321    /// doesn't keep stale ciphers / replay windows around. The
1322    /// follow-up `RegisterSession` for the NEW key (if any) will then
1323    /// install the fresh state on the same shard.
1324    pub(in crate::node) fn deregister_session_index(&mut self, cache_key: (TransportId, u32)) {
1325        // Find the peer that owns this index BEFORE removing it from
1326        // the index map, so we can decide whether the deregistration
1327        // also tears down the peer's connected UDP socket.
1328        let owning_peer = self.peers_by_index.get(&cache_key).copied();
1329        self.peers_by_index.remove(&cache_key);
1330        if self.decrypt_registered_sessions.remove(&cache_key)
1331            && let Some(workers) = self.decrypt_workers.as_ref()
1332        {
1333            workers.unregister_session(cache_key);
1334        }
1335        // Tear down the per-peer connected UDP socket *only* if no
1336        // other peers_by_index entry still resolves to this peer.
1337        // Rekey drain calls into this helper with the OLD session
1338        // index while the NEW index is already installed and points
1339        // at the same peer — there the connect()-ed 5-tuple is
1340        // still valid for the new session and we must not close it.
1341        // Peer-teardown sites (CrossConnection swap, stale-index
1342        // fall-through in encrypted.rs, disconnect handler) call
1343        // here when this is the peer's last index, so the connected
1344        // socket goes away with the peer.
1345        if let Some(peer_addr) = owning_peer {
1346            let peer_has_other_index = self
1347                .peers_by_index
1348                .values()
1349                .any(|other| *other == peer_addr);
1350            if !peer_has_other_index {
1351                self.clear_connected_udp_for_peer(&peer_addr);
1352            }
1353        }
1354    }
1355
1356    /// Ensure the current FMP receive index resolves to this peer.
1357    ///
1358    /// Rekey msg1/msg2 handlers pre-register the pending index before
1359    /// cutover, but losing that registration in a debug build used to
1360    /// panic in the cutover path. Repairing the map here is safe: the
1361    /// peer has already promoted the pending session, and the decrypt
1362    /// worker registration immediately after cutover depends on the
1363    /// same `(transport_id, our_index)` key.
1364    pub(in crate::node) fn ensure_current_session_index_registered(
1365        &mut self,
1366        node_addr: &NodeAddr,
1367        context: &'static str,
1368    ) -> bool {
1369        let Some(peer) = self.peers.get(node_addr) else {
1370            return false;
1371        };
1372        let Some(transport_id) = peer.transport_id() else {
1373            warn!(
1374                peer = %self.peer_display_name(node_addr),
1375                context,
1376                "Cannot register current session index without transport id"
1377            );
1378            return false;
1379        };
1380        let Some(our_index) = peer.our_index() else {
1381            warn!(
1382                peer = %self.peer_display_name(node_addr),
1383                context,
1384                "Cannot register current session index without local index"
1385            );
1386            return false;
1387        };
1388
1389        let cache_key = (transport_id, our_index.as_u32());
1390        match self.peers_by_index.get(&cache_key).copied() {
1391            Some(existing) if existing == *node_addr => true,
1392            Some(existing) => {
1393                warn!(
1394                    peer = %self.peer_display_name(node_addr),
1395                    previous_owner = %self.peer_display_name(&existing),
1396                    transport_id = %transport_id,
1397                    our_index = %our_index,
1398                    context,
1399                    "Repairing current session index with stale owner"
1400                );
1401                self.peers_by_index.insert(cache_key, *node_addr);
1402                true
1403            }
1404            None => {
1405                warn!(
1406                    peer = %self.peer_display_name(node_addr),
1407                    transport_id = %transport_id,
1408                    our_index = %our_index,
1409                    context,
1410                    "Repairing missing current session index"
1411                );
1412                self.peers_by_index.insert(cache_key, *node_addr);
1413                true
1414            }
1415        }
1416    }
1417
1418    // === Configuration ===
1419
1420    /// Get the configuration.
1421    pub fn config(&self) -> &Config {
1422        &self.config
1423    }
1424
1425    /// Calculate the effective IPv6 MTU that can be sent over FIPS.
1426    ///
1427    /// Delegates to `upper::icmp::effective_ipv6_mtu()` with this node's
1428    /// transport MTU. Returns the maximum IPv6 packet size (including
1429    /// IPv6 header) that can be transmitted through the FIPS mesh.
1430    pub fn effective_ipv6_mtu(&self) -> u16 {
1431        crate::upper::icmp::effective_ipv6_mtu(self.transport_mtu())
1432    }
1433
1434    /// Get the transport MTU governing the global TUN-boundary MSS clamp.
1435    ///
1436    /// Returns the **minimum** MTU across all operational transports, or
1437    /// 1280 (IPv6 minimum) as fallback. Used for initial TUN configuration
1438    /// where a specific egress transport isn't yet known: the resulting
1439    /// `effective_ipv6_mtu` (transport_mtu - 77) and `max_mss`
1440    /// (effective_mtu - 60) form a conservative ceiling that fits ANY
1441    /// configured-transport's egress, eliminating PMTU-D black holes that
1442    /// would otherwise occur when a flow's actual egress is smaller than
1443    /// the clamp ceiling assumed at TUN init.
1444    ///
1445    /// Returning the smallest (rather than the first-iterated, which used
1446    /// to vary across HashMap iteration order + async-startup race) makes
1447    /// the clamp deterministic across daemon restarts.
1448    ///
1449    /// See `ISSUE-2026-0011` for the empirical investigation.
1450    pub fn transport_mtu(&self) -> u16 {
1451        let min_operational = self
1452            .transports
1453            .values()
1454            .filter(|h| h.is_operational())
1455            .map(|h| h.mtu())
1456            .min();
1457        if let Some(mtu) = min_operational {
1458            return mtu;
1459        }
1460        // Fallback to config: try UDP first, then Ethernet
1461        if let Some((_, cfg)) = self.config.transports.udp.iter().next() {
1462            return cfg.mtu();
1463        }
1464        1280
1465    }
1466
1467    // === State ===
1468
1469    /// Get the node state.
1470    pub fn state(&self) -> NodeState {
1471        self.state
1472    }
1473
1474    /// Get the node uptime.
1475    pub fn uptime(&self) -> std::time::Duration {
1476        self.started_at.elapsed()
1477    }
1478
1479    /// Check if node is operational.
1480    pub fn is_running(&self) -> bool {
1481        self.state.is_operational()
1482    }
1483
1484    /// Check if this is a leaf-only node.
1485    pub fn is_leaf_only(&self) -> bool {
1486        self.is_leaf_only
1487    }
1488
1489    // === Tree State ===
1490
1491    /// Get the tree state.
1492    pub fn tree_state(&self) -> &TreeState {
1493        &self.tree_state
1494    }
1495
1496    /// Get mutable tree state.
1497    pub fn tree_state_mut(&mut self) -> &mut TreeState {
1498        &mut self.tree_state
1499    }
1500
1501    // === Bloom State ===
1502
1503    /// Get the Bloom filter state.
1504    pub fn bloom_state(&self) -> &BloomState {
1505        &self.bloom_state
1506    }
1507
1508    /// Get mutable Bloom filter state.
1509    pub fn bloom_state_mut(&mut self) -> &mut BloomState {
1510        &mut self.bloom_state
1511    }
1512
1513    // === Mesh Size Estimate ===
1514
1515    /// Get the cached estimated mesh size.
1516    pub fn estimated_mesh_size(&self) -> Option<u64> {
1517        self.estimated_mesh_size
1518    }
1519
1520    /// Compute and cache the estimated mesh size from bloom filters.
1521    ///
1522    /// Uses the spanning tree partition: parent's filter covers nodes reachable
1523    /// upward, children's filters cover disjoint subtrees downward. The sum
1524    /// of estimated entry counts plus one (self) approximates total network size.
1525    pub(crate) fn compute_mesh_size(&mut self) {
1526        let my_addr = *self.tree_state.my_node_addr();
1527        let parent_id = *self.tree_state.my_declaration().parent_id();
1528        let is_root = self.tree_state.is_root();
1529
1530        let max_fpr = self.config.node.bloom.max_inbound_fpr;
1531        let mut total: f64 = 1.0; // count self
1532        let mut child_count: u32 = 0;
1533        let mut has_data = false;
1534
1535        // Parent's filter: nodes reachable upward through the tree.
1536        // If any contributing filter is above the FPR cap, we refuse to
1537        // estimate rather than substitute a partial/biased aggregate —
1538        // Node.estimated_mesh_size is already Option<u64> and consumers
1539        // (control socket, fipstop, periodic debug log) handle None.
1540        if !is_root
1541            && let Some(parent) = self.peers.get(&parent_id)
1542            && let Some(filter) = parent.inbound_filter()
1543        {
1544            match filter.estimated_count(max_fpr) {
1545                Some(n) => {
1546                    total += n;
1547                    has_data = true;
1548                }
1549                None => {
1550                    self.estimated_mesh_size = None;
1551                    return;
1552                }
1553            }
1554        }
1555
1556        // Children's filters: each child's subtree is disjoint
1557        for (peer_addr, peer) in &self.peers {
1558            if let Some(decl) = self.tree_state.peer_declaration(peer_addr)
1559                && *decl.parent_id() == my_addr
1560            {
1561                child_count += 1;
1562                if let Some(filter) = peer.inbound_filter() {
1563                    match filter.estimated_count(max_fpr) {
1564                        Some(n) => {
1565                            total += n;
1566                            has_data = true;
1567                        }
1568                        None => {
1569                            self.estimated_mesh_size = None;
1570                            return;
1571                        }
1572                    }
1573                }
1574            }
1575        }
1576
1577        if !has_data {
1578            self.estimated_mesh_size = None;
1579            return;
1580        }
1581
1582        let size = total.round() as u64;
1583        self.estimated_mesh_size = Some(size);
1584
1585        // Periodic logging (reuse MMP default interval: 30s)
1586        let now = std::time::Instant::now();
1587        let should_log = match self.last_mesh_size_log {
1588            None => true,
1589            Some(last) => {
1590                now.duration_since(last)
1591                    >= std::time::Duration::from_secs(self.config.node.mmp.log_interval_secs)
1592            }
1593        };
1594        if should_log {
1595            tracing::debug!(
1596                estimated_mesh_size = size,
1597                peers = self.peers.len(),
1598                children = child_count,
1599                "Mesh size estimate"
1600            );
1601            self.last_mesh_size_log = Some(now);
1602        }
1603    }
1604
1605    // === Coord Cache ===
1606
1607    /// Get the coordinate cache.
1608    pub fn coord_cache(&self) -> &CoordCache {
1609        &self.coord_cache
1610    }
1611
1612    /// Get mutable coordinate cache.
1613    pub fn coord_cache_mut(&mut self) -> &mut CoordCache {
1614        &mut self.coord_cache
1615    }
1616
1617    // === Node Statistics ===
1618
1619    /// Get the node statistics.
1620    pub fn stats(&self) -> &stats::NodeStats {
1621        &self.stats
1622    }
1623
1624    /// Get mutable node statistics.
1625    pub(crate) fn stats_mut(&mut self) -> &mut stats::NodeStats {
1626        &mut self.stats
1627    }
1628
1629    /// Get the stats history collector.
1630    pub fn stats_history(&self) -> &stats_history::StatsHistory {
1631        &self.stats_history
1632    }
1633
1634    /// Sample the current node state into the stats history ring.
1635    /// Called once per tick from the RX loop.
1636    pub(crate) fn record_stats_history(&mut self) {
1637        let fwd = &self.stats.forwarding;
1638        let peers_with_mmp: Vec<f64> = self
1639            .peers
1640            .values()
1641            .filter_map(|p| p.mmp().map(|m| m.metrics.loss_rate()))
1642            .collect();
1643        let loss_rate = if peers_with_mmp.is_empty() {
1644            0.0
1645        } else {
1646            peers_with_mmp.iter().sum::<f64>() / peers_with_mmp.len() as f64
1647        };
1648
1649        let snap = stats_history::Snapshot {
1650            mesh_size: self.estimated_mesh_size,
1651            tree_depth: self.tree_state.my_coords().depth() as u32,
1652            peer_count: self.peers.len() as u64,
1653            parent_switches_total: self.stats.tree.parent_switches,
1654            bytes_in_total: fwd.received_bytes,
1655            bytes_out_total: fwd.forwarded_bytes + fwd.originated_bytes,
1656            packets_in_total: fwd.received_packets,
1657            packets_out_total: fwd.forwarded_packets + fwd.originated_packets,
1658            loss_rate,
1659            active_sessions: self.sessions.len() as u64,
1660        };
1661
1662        let now = std::time::Instant::now();
1663        let peer_snaps: Vec<stats_history::PeerSnapshot> = self
1664            .peers
1665            .values()
1666            .map(|p| {
1667                let stats = p.link_stats();
1668                let (srtt_ms, loss_rate, ecn_ce) = match p.mmp() {
1669                    Some(m) => (
1670                        m.metrics.srtt_ms(),
1671                        Some(m.metrics.loss_rate()),
1672                        m.receiver.ecn_ce_count() as u64,
1673                    ),
1674                    None => (None, None, 0),
1675                };
1676                stats_history::PeerSnapshot {
1677                    node_addr: *p.node_addr(),
1678                    last_seen: now,
1679                    srtt_ms,
1680                    loss_rate,
1681                    bytes_in_total: stats.bytes_recv,
1682                    bytes_out_total: stats.bytes_sent,
1683                    packets_in_total: stats.packets_recv,
1684                    packets_out_total: stats.packets_sent,
1685                    ecn_ce_total: ecn_ce,
1686                }
1687            })
1688            .collect();
1689
1690        self.stats_history.tick(now, &snap, &peer_snaps);
1691    }
1692
1693    // === TUN Interface ===
1694
1695    /// Get the TUN state.
1696    pub fn tun_state(&self) -> TunState {
1697        self.tun_state
1698    }
1699
1700    /// Get the TUN interface name, if active.
1701    pub fn tun_name(&self) -> Option<&str> {
1702        self.tun_name.as_deref()
1703    }
1704
1705    // === Resource Limits ===
1706
1707    /// Set the maximum number of connections (handshake phase).
1708    pub fn set_max_connections(&mut self, max: usize) {
1709        self.max_connections = max;
1710    }
1711
1712    /// Set the maximum number of peers (authenticated).
1713    pub fn set_max_peers(&mut self, max: usize) {
1714        self.max_peers = max;
1715    }
1716
1717    /// Set the maximum number of links.
1718    pub fn set_max_links(&mut self, max: usize) {
1719        self.max_links = max;
1720    }
1721
1722    // === Counts ===
1723
1724    /// Number of pending connections (handshake in progress).
1725    pub fn connection_count(&self) -> usize {
1726        self.connections.len()
1727    }
1728
1729    /// Number of authenticated peers.
1730    pub fn peer_count(&self) -> usize {
1731        self.peers.len()
1732    }
1733
1734    /// Number of active links.
1735    pub fn link_count(&self) -> usize {
1736        self.links.len()
1737    }
1738
1739    /// Number of active transports.
1740    pub fn transport_count(&self) -> usize {
1741        self.transports.len()
1742    }
1743
1744    // === Transport Management ===
1745
1746    /// Allocate a new transport ID.
1747    pub fn allocate_transport_id(&mut self) -> TransportId {
1748        let id = TransportId::new(self.next_transport_id);
1749        self.next_transport_id += 1;
1750        id
1751    }
1752
1753    /// Get a transport by ID.
1754    pub fn get_transport(&self, id: &TransportId) -> Option<&TransportHandle> {
1755        self.transports.get(id)
1756    }
1757
1758    /// Get mutable transport by ID.
1759    pub fn get_transport_mut(&mut self, id: &TransportId) -> Option<&mut TransportHandle> {
1760        self.transports.get_mut(id)
1761    }
1762
1763    /// Iterate over transport IDs.
1764    pub fn transport_ids(&self) -> impl Iterator<Item = &TransportId> {
1765        self.transports.keys()
1766    }
1767
1768    /// Get the packet receiver for the event loop.
1769    pub fn packet_rx(&mut self) -> Option<&mut PacketRx> {
1770        self.packet_rx.as_mut()
1771    }
1772
1773    // === Link Management ===
1774
1775    /// Allocate a new link ID.
1776    pub fn allocate_link_id(&mut self) -> LinkId {
1777        let id = LinkId::new(self.next_link_id);
1778        self.next_link_id += 1;
1779        id
1780    }
1781
1782    /// Add a link.
1783    pub fn add_link(&mut self, link: Link) -> Result<(), NodeError> {
1784        if self.max_links > 0 && self.links.len() >= self.max_links {
1785            return Err(NodeError::MaxLinksExceeded {
1786                max: self.max_links,
1787            });
1788        }
1789        let link_id = link.link_id();
1790        let transport_id = link.transport_id();
1791        let remote_addr = link.remote_addr().clone();
1792
1793        self.links.insert(link_id, link);
1794        self.addr_to_link
1795            .insert((transport_id, remote_addr), link_id);
1796        Ok(())
1797    }
1798
1799    /// Get a link by ID.
1800    pub fn get_link(&self, link_id: &LinkId) -> Option<&Link> {
1801        self.links.get(link_id)
1802    }
1803
1804    /// Get a mutable link by ID.
1805    pub fn get_link_mut(&mut self, link_id: &LinkId) -> Option<&mut Link> {
1806        self.links.get_mut(link_id)
1807    }
1808
1809    /// Find link ID by transport address.
1810    pub fn find_link_by_addr(
1811        &self,
1812        transport_id: TransportId,
1813        addr: &TransportAddr,
1814    ) -> Option<LinkId> {
1815        self.addr_to_link
1816            .get(&(transport_id, addr.clone()))
1817            .copied()
1818    }
1819
1820    /// Remove a link.
1821    ///
1822    /// Only removes the addr_to_link reverse lookup if it still points to this
1823    /// link. In cross-connection scenarios, a newer link may have replaced the
1824    /// entry for the same address.
1825    pub fn remove_link(&mut self, link_id: &LinkId) -> Option<Link> {
1826        if let Some(link) = self.links.remove(link_id) {
1827            // Clean up reverse lookup only if it still maps to this link
1828            let key = (link.transport_id(), link.remote_addr().clone());
1829            if self.addr_to_link.get(&key) == Some(link_id) {
1830                self.addr_to_link.remove(&key);
1831            }
1832            Some(link)
1833        } else {
1834            None
1835        }
1836    }
1837
1838    pub(crate) fn cleanup_bootstrap_transport_if_unused(&mut self, transport_id: TransportId) {
1839        if !self.bootstrap_transports.contains(&transport_id) {
1840            return;
1841        }
1842
1843        let transport_in_use = self
1844            .links
1845            .values()
1846            .any(|link| link.transport_id() == transport_id)
1847            || self
1848                .connections
1849                .values()
1850                .any(|conn| conn.transport_id() == Some(transport_id))
1851            || self
1852                .peers
1853                .values()
1854                .any(|peer| peer.transport_id() == Some(transport_id))
1855            || self
1856                .pending_connects
1857                .iter()
1858                .any(|pending| pending.transport_id == transport_id);
1859
1860        if transport_in_use {
1861            return;
1862        }
1863
1864        tracing::debug!(
1865            transport_id = %transport_id,
1866            "bootstrap transport has no remaining references; dropping"
1867        );
1868
1869        self.bootstrap_transports.remove(&transport_id);
1870        self.bootstrap_transport_npubs.remove(&transport_id);
1871        self.transport_drops.remove(&transport_id);
1872        self.transports.remove(&transport_id);
1873    }
1874
1875    /// Iterate over all links.
1876    pub fn links(&self) -> impl Iterator<Item = &Link> {
1877        self.links.values()
1878    }
1879
1880    // === Connection Management (Handshake Phase) ===
1881
1882    /// Add a pending connection.
1883    pub fn add_connection(&mut self, connection: PeerConnection) -> Result<(), NodeError> {
1884        let link_id = connection.link_id();
1885
1886        if self.connections.contains_key(&link_id) {
1887            return Err(NodeError::ConnectionAlreadyExists(link_id));
1888        }
1889
1890        if self.max_connections > 0 && self.connections.len() >= self.max_connections {
1891            return Err(NodeError::MaxConnectionsExceeded {
1892                max: self.max_connections,
1893            });
1894        }
1895
1896        self.connections.insert(link_id, connection);
1897        Ok(())
1898    }
1899
1900    /// Get a connection by LinkId.
1901    pub fn get_connection(&self, link_id: &LinkId) -> Option<&PeerConnection> {
1902        self.connections.get(link_id)
1903    }
1904
1905    /// Get a mutable connection by LinkId.
1906    pub fn get_connection_mut(&mut self, link_id: &LinkId) -> Option<&mut PeerConnection> {
1907        self.connections.get_mut(link_id)
1908    }
1909
1910    /// Remove a connection.
1911    pub fn remove_connection(&mut self, link_id: &LinkId) -> Option<PeerConnection> {
1912        self.connections.remove(link_id)
1913    }
1914
1915    /// Iterate over all connections.
1916    pub fn connections(&self) -> impl Iterator<Item = &PeerConnection> {
1917        self.connections.values()
1918    }
1919
1920    // === Peer Management (Active Phase) ===
1921
1922    /// Get a peer by NodeAddr.
1923    pub fn get_peer(&self, node_addr: &NodeAddr) -> Option<&ActivePeer> {
1924        self.peers.get(node_addr)
1925    }
1926
1927    /// Get a mutable peer by NodeAddr.
1928    pub fn get_peer_mut(&mut self, node_addr: &NodeAddr) -> Option<&mut ActivePeer> {
1929        self.peers.get_mut(node_addr)
1930    }
1931
1932    /// Remove a peer.
1933    pub fn remove_peer(&mut self, node_addr: &NodeAddr) -> Option<ActivePeer> {
1934        self.peers.remove(node_addr)
1935    }
1936
1937    /// Iterate over all peers.
1938    pub fn peers(&self) -> impl Iterator<Item = &ActivePeer> {
1939        self.peers.values()
1940    }
1941
1942    /// Reference to the Nostr discovery handle if discovery is enabled.
1943    /// Used by control queries (`show_peers` per-peer Nostr-traversal
1944    /// state) to read failure-state without taking shared ownership.
1945    pub fn nostr_discovery_handle(&self) -> Option<&crate::discovery::nostr::NostrDiscovery> {
1946        self.nostr_discovery.as_deref()
1947    }
1948
1949    /// Iterate over all peer node IDs.
1950    pub fn peer_ids(&self) -> impl Iterator<Item = &NodeAddr> {
1951        self.peers.keys()
1952    }
1953
1954    /// Iterate over peers that can send traffic.
1955    pub fn sendable_peers(&self) -> impl Iterator<Item = &ActivePeer> {
1956        self.peers.values().filter(|p| p.can_send())
1957    }
1958
1959    /// Number of peers that can send traffic.
1960    pub fn sendable_peer_count(&self) -> usize {
1961        self.peers.values().filter(|p| p.can_send()).count()
1962    }
1963
1964    // === End-to-End Sessions ===
1965
1966    /// Get a session by remote NodeAddr.
1967    /// Disable the discovery forward rate limiter (for tests).
1968    #[cfg(test)]
1969    pub(crate) fn disable_discovery_forward_rate_limit(&mut self) {
1970        self.discovery_forward_limiter
1971            .set_interval(std::time::Duration::ZERO);
1972    }
1973
1974    #[cfg(test)]
1975    pub(crate) fn get_session(&self, remote: &NodeAddr) -> Option<&SessionEntry> {
1976        self.sessions.get(remote)
1977    }
1978
1979    /// Get a mutable session by remote NodeAddr.
1980    #[cfg(test)]
1981    pub(crate) fn get_session_mut(&mut self, remote: &NodeAddr) -> Option<&mut SessionEntry> {
1982        self.sessions.get_mut(remote)
1983    }
1984
1985    /// Remove a session.
1986    #[cfg(test)]
1987    pub(crate) fn remove_session(&mut self, remote: &NodeAddr) -> Option<SessionEntry> {
1988        self.sessions.remove(remote)
1989    }
1990
1991    /// Read the path_mtu_lookup entry for a destination FipsAddress.
1992    #[cfg(test)]
1993    pub(crate) fn path_mtu_lookup_get(&self, fips_addr: &crate::FipsAddress) -> Option<u16> {
1994        self.path_mtu_lookup
1995            .read()
1996            .ok()
1997            .and_then(|map| map.get(fips_addr).copied())
1998    }
1999
2000    /// Write a path_mtu_lookup entry directly (for tests that pre-seed the map).
2001    #[cfg(test)]
2002    pub(crate) fn path_mtu_lookup_insert(&self, fips_addr: crate::FipsAddress, mtu: u16) {
2003        if let Ok(mut map) = self.path_mtu_lookup.write() {
2004            map.insert(fips_addr, mtu);
2005        }
2006    }
2007
2008    /// Number of end-to-end sessions.
2009    pub fn session_count(&self) -> usize {
2010        self.sessions.len()
2011    }
2012
2013    /// Iterate over all session entries (for control queries).
2014    pub(crate) fn session_entries(&self) -> impl Iterator<Item = (&NodeAddr, &SessionEntry)> {
2015        self.sessions.iter()
2016    }
2017
2018    // === Identity Cache ===
2019
2020    /// Register a node in the identity cache for FipsAddress → NodeAddr lookup.
2021    pub(crate) fn register_identity(
2022        &mut self,
2023        node_addr: NodeAddr,
2024        pubkey: secp256k1::PublicKey,
2025    ) -> bool {
2026        let mut prefix = [0u8; 15];
2027        prefix.copy_from_slice(&node_addr.as_bytes()[0..15]);
2028        if let Some(entry) = self.identity_cache.get(&prefix)
2029            && entry.node_addr == node_addr
2030            && entry.pubkey == pubkey
2031        {
2032            // Endpoint sends pass the same PeerIdentity on every packet. Once
2033            // validated, avoid re-deriving NodeAddr from the public key in the
2034            // data path; that hash showed up in macOS sender profiles.
2035            return true;
2036        }
2037
2038        let (xonly, _) = pubkey.x_only_public_key();
2039        let derived_node_addr = NodeAddr::from_pubkey(&xonly);
2040        if derived_node_addr != node_addr {
2041            debug!(
2042                claimed_node_addr = %node_addr,
2043                derived_node_addr = %derived_node_addr,
2044                "Rejected identity cache entry with mismatched public key"
2045            );
2046            return false;
2047        }
2048
2049        let now_ms = Self::now_ms();
2050        if let Some(entry) = self.identity_cache.get_mut(&prefix)
2051            && entry.node_addr == node_addr
2052        {
2053            entry.pubkey = pubkey;
2054            entry.last_seen_ms = now_ms;
2055            return true;
2056        }
2057
2058        let npub = encode_npub(&xonly);
2059        self.identity_cache.insert(
2060            prefix,
2061            IdentityCacheEntry::new(node_addr, pubkey, npub, now_ms),
2062        );
2063        // LRU eviction
2064        let max = self.config.node.cache.identity_size;
2065        if self.identity_cache.len() > max
2066            && let Some(oldest_key) = self
2067                .identity_cache
2068                .iter()
2069                .min_by_key(|(_, entry)| entry.last_seen_ms)
2070                .map(|(k, _)| *k)
2071        {
2072            self.identity_cache.remove(&oldest_key);
2073        }
2074        true
2075    }
2076
2077    /// Look up a destination by FipsAddress prefix (bytes 1-15 of the IPv6 address).
2078    pub(crate) fn lookup_by_fips_prefix(
2079        &mut self,
2080        prefix: &[u8; 15],
2081    ) -> Option<(NodeAddr, secp256k1::PublicKey)> {
2082        if let Some(entry) = self.identity_cache.get_mut(prefix) {
2083            entry.last_seen_ms = Self::now_ms(); // LRU touch
2084            Some((entry.node_addr, entry.pubkey))
2085        } else {
2086            None
2087        }
2088    }
2089
2090    /// Check if a node's identity is in the cache (without LRU touch).
2091    pub(crate) fn has_cached_identity(&self, addr: &NodeAddr) -> bool {
2092        let mut prefix = [0u8; 15];
2093        prefix.copy_from_slice(&addr.as_bytes()[0..15]);
2094        self.identity_cache.contains_key(&prefix)
2095    }
2096
2097    /// Number of identity cache entries.
2098    pub fn identity_cache_len(&self) -> usize {
2099        self.identity_cache.len()
2100    }
2101
2102    /// Iterate over identity cache entries.
2103    ///
2104    /// Returns `(NodeAddr, PublicKey, last_seen_ms)` for each cached identity.
2105    /// Used by the `show_identity_cache` control query.
2106    pub fn identity_cache_iter(
2107        &self,
2108    ) -> impl Iterator<Item = (&NodeAddr, &secp256k1::PublicKey, u64)> {
2109        self.identity_cache
2110            .values()
2111            .map(|entry| (&entry.node_addr, &entry.pubkey, entry.last_seen_ms))
2112    }
2113
2114    /// Configured maximum identity cache size.
2115    pub fn identity_cache_max(&self) -> usize {
2116        self.config.node.cache.identity_size
2117    }
2118
2119    /// Number of pending discovery lookups.
2120    pub fn pending_lookup_count(&self) -> usize {
2121        self.pending_lookups.len()
2122    }
2123
2124    /// Iterate over pending discovery lookups for diagnostics.
2125    pub fn pending_lookups_iter(
2126        &self,
2127    ) -> impl Iterator<Item = (&NodeAddr, &handlers::discovery::PendingLookup)> {
2128        self.pending_lookups.iter()
2129    }
2130
2131    /// Number of recent discovery requests tracked.
2132    pub fn recent_request_count(&self) -> usize {
2133        self.recent_requests.len()
2134    }
2135
2136    /// Count of destinations with queued TUN packets awaiting session setup.
2137    pub fn pending_tun_destinations(&self) -> usize {
2138        self.pending_tun_packets.len()
2139    }
2140
2141    /// Total TUN packets queued across all destinations.
2142    pub fn pending_tun_total_packets(&self) -> usize {
2143        self.pending_tun_packets.values().map(|q| q.len()).sum()
2144    }
2145
2146    /// Iterate over retry state for diagnostics.
2147    pub fn retry_state_iter(&self) -> impl Iterator<Item = (&NodeAddr, &retry::RetryState)> {
2148        self.retry_pending.iter()
2149    }
2150
2151    // === Routing ===
2152
2153    /// Check if a peer is a tree neighbor (parent or child in the spanning tree).
2154    ///
2155    /// Returns true if the peer is our current tree parent, or if the peer
2156    /// has declared us as their parent (making them our child).
2157    pub(crate) fn is_tree_peer(&self, peer_addr: &NodeAddr) -> bool {
2158        // Peer is our parent
2159        if !self.tree_state.is_root() && self.tree_state.my_declaration().parent_id() == peer_addr {
2160            return true;
2161        }
2162        // Peer is our child (their declaration names us as parent)
2163        if let Some(decl) = self.tree_state.peer_declaration(peer_addr)
2164            && decl.parent_id() == self.node_addr()
2165        {
2166            return true;
2167        }
2168        false
2169    }
2170
2171    /// Find next hop for a destination node address.
2172    ///
2173    /// Routing priority:
2174    /// 1. Destination is self → `None` (local delivery)
2175    /// 2. Destination is a direct peer → that peer
2176    /// 3. Reply-learned routes in `reply_learned` mode. These are locally
2177    ///    observed reverse paths, selected with weighted multipath plus
2178    ///    periodic coordinate/tree exploration.
2179    /// 4. Bloom filter candidates with cached dest coords → among peers whose
2180    ///    bloom filter contains the destination, pick the one that minimizes
2181    ///    tree distance to the destination, with
2182    ///    `(link_cost, tree_distance_to_dest, node_addr)` tie-breaking.
2183    ///    The self-distance check ensures only peers strictly closer to the
2184    ///    destination than us are considered (prevents routing loops).
2185    /// 5. Greedy tree routing fallback (requires cached dest coords)
2186    /// 6. No route → `None`
2187    ///
2188    /// Both the bloom filter and tree routing paths require cached destination
2189    /// coordinates (checked in `coord_cache`). Without coordinates, the node
2190    /// cannot make loop-free forwarding decisions. The caller should signal
2191    /// `CoordsRequired` back to the source when `None` is returned for a
2192    /// non-local destination.
2193    pub fn find_next_hop(&mut self, dest_node_addr: &NodeAddr) -> Option<&ActivePeer> {
2194        // 1. Local delivery
2195        if dest_node_addr == self.node_addr() {
2196            return None;
2197        }
2198
2199        // 2. Direct peer
2200        if let Some(peer) = self.peers.get(dest_node_addr)
2201            && peer.can_send()
2202        {
2203            return Some(peer);
2204        }
2205
2206        let now_ms = Self::now_ms();
2207
2208        let sendable_learned_peers = if self.config.node.routing.mode == RoutingMode::ReplyLearned {
2209            Some(
2210                self.peers
2211                    .iter()
2212                    .filter(|(_, peer)| peer.can_send())
2213                    .map(|(addr, _)| *addr)
2214                    .collect::<HashSet<_>>(),
2215            )
2216        } else {
2217            None
2218        };
2219
2220        // 3. Optional reply-learned routing. These entries are not peer
2221        // claims; they are local observations of which peer carried traffic
2222        // or a verified lookup response back from the destination. Most
2223        // packets use weighted multipath over learned routes, but periodic
2224        // fallback exploration lets coord/bloom/tree routes discover better
2225        // candidates.
2226        let explore_fallback = sendable_learned_peers.as_ref().is_some_and(|sendable| {
2227            self.learned_routes.should_explore_fallback(
2228                dest_node_addr,
2229                now_ms,
2230                self.config.node.routing.learned_fallback_explore_interval,
2231                |addr| sendable.contains(addr),
2232            )
2233        });
2234        if let Some(sendable) = &sendable_learned_peers
2235            && !explore_fallback
2236            && let Some(next_hop_addr) =
2237                self.learned_routes
2238                    .select_next_hop(dest_node_addr, now_ms, |addr| sendable.contains(addr))
2239        {
2240            return self.peers.get(&next_hop_addr);
2241        }
2242
2243        // Look up cached destination coordinates (required by both bloom and tree paths).
2244        let Some(dest_coords) = self
2245            .coord_cache
2246            .get_and_touch(dest_node_addr, now_ms)
2247            .cloned()
2248        else {
2249            if let Some(sendable) = &sendable_learned_peers
2250                && let Some(next_hop_addr) =
2251                    self.learned_routes
2252                        .select_next_hop(dest_node_addr, now_ms, |addr| sendable.contains(addr))
2253            {
2254                return self.peers.get(&next_hop_addr);
2255            }
2256            return None;
2257        };
2258
2259        // 4. Bloom filter candidates — requires dest_coords for loop-free selection.
2260        //    If no candidate is strictly closer, fall through to tree routing.
2261        let coordinate_route_addr = {
2262            let candidates: Vec<&ActivePeer> = self.destination_in_filters(dest_node_addr);
2263            if !candidates.is_empty() {
2264                self.select_best_candidate(&candidates, &dest_coords)
2265                    .map(|peer| *peer.node_addr())
2266            } else {
2267                None
2268            }
2269        };
2270        if let Some(next_hop_addr) = coordinate_route_addr {
2271            return self.peers.get(&next_hop_addr);
2272        }
2273
2274        // 5. Greedy tree routing fallback
2275        let tree_route_addr = self
2276            .tree_state
2277            .find_next_hop(&dest_coords)
2278            .filter(|next_hop_id| {
2279                self.peers
2280                    .get(next_hop_id)
2281                    .is_some_and(|peer| peer.can_send())
2282            });
2283        if let Some(next_hop_addr) = tree_route_addr {
2284            return self.peers.get(&next_hop_addr);
2285        }
2286        if explore_fallback {
2287            return sendable_learned_peers.as_ref().and_then(|sendable| {
2288                self.learned_routes
2289                    .select_next_hop(dest_node_addr, now_ms, |addr| sendable.contains(addr))
2290                    .and_then(|next_hop_addr| self.peers.get(&next_hop_addr))
2291            });
2292        }
2293
2294        if let Some(sendable) = &sendable_learned_peers
2295            && let Some(next_hop_addr) =
2296                self.learned_routes
2297                    .select_next_hop(dest_node_addr, now_ms, |addr| sendable.contains(addr))
2298        {
2299            return self.peers.get(&next_hop_addr);
2300        }
2301
2302        None
2303    }
2304
2305    pub(in crate::node) fn learn_reverse_route(
2306        &mut self,
2307        destination: NodeAddr,
2308        next_hop: NodeAddr,
2309    ) {
2310        if self.config.node.routing.mode != RoutingMode::ReplyLearned
2311            || destination == *self.node_addr()
2312        {
2313            return;
2314        }
2315        let now_ms = Self::now_ms();
2316        self.learned_routes.learn(
2317            destination,
2318            next_hop,
2319            now_ms,
2320            self.config.node.routing.learned_ttl_secs,
2321            self.config.node.routing.max_learned_routes_per_dest,
2322        );
2323    }
2324
2325    pub(in crate::node) fn record_route_failure(
2326        &mut self,
2327        destination: NodeAddr,
2328        next_hop: NodeAddr,
2329    ) {
2330        if self.config.node.routing.mode != RoutingMode::ReplyLearned {
2331            return;
2332        }
2333        self.learned_routes.record_failure(&destination, &next_hop);
2334    }
2335
2336    pub(crate) fn learned_route_table_snapshot(&self, now_ms: u64) -> LearnedRouteTableSnapshot {
2337        self.learned_routes.snapshot(now_ms)
2338    }
2339
2340    pub(in crate::node) fn purge_learned_routes(&mut self, now_ms: u64) {
2341        self.learned_routes.purge_expired(now_ms);
2342    }
2343
2344    /// Select the best peer from a set of bloom filter candidates.
2345    ///
2346    /// Uses distance from each candidate's tree coordinates to the destination
2347    /// as the primary metric (after link_cost). Only selects peers that are
2348    /// strictly closer to the destination than we are (self-distance check
2349    /// prevents routing loops).
2350    ///
2351    /// Ordering: `(link_cost, distance_to_dest, node_addr)`.
2352    fn select_best_candidate<'a>(
2353        &'a self,
2354        candidates: &[&'a ActivePeer],
2355        dest_coords: &crate::tree::TreeCoordinate,
2356    ) -> Option<&'a ActivePeer> {
2357        let my_distance = self.tree_state.my_coords().distance_to(dest_coords);
2358
2359        let mut best: Option<(&ActivePeer, f64, usize)> = None;
2360
2361        for &candidate in candidates {
2362            if !candidate.can_send() {
2363                continue;
2364            }
2365
2366            let cost = candidate.link_cost();
2367
2368            let dist = self
2369                .tree_state
2370                .peer_coords(candidate.node_addr())
2371                .map(|pc| pc.distance_to(dest_coords))
2372                .unwrap_or(usize::MAX);
2373
2374            // Self-distance check: only consider peers strictly closer
2375            // to the destination than we are (prevents routing loops)
2376            if dist >= my_distance {
2377                continue;
2378            }
2379
2380            let dominated = match &best {
2381                None => true,
2382                Some((_, best_cost, best_dist)) => {
2383                    cost < *best_cost
2384                        || (cost == *best_cost && dist < *best_dist)
2385                        || (cost == *best_cost
2386                            && dist == *best_dist
2387                            && candidate.node_addr() < best.as_ref().unwrap().0.node_addr())
2388                }
2389            };
2390
2391            if dominated {
2392                best = Some((candidate, cost, dist));
2393            }
2394        }
2395
2396        best.map(|(peer, _, _)| peer)
2397    }
2398
2399    /// Check if a destination is in any peer's bloom filter.
2400    pub fn destination_in_filters(&self, dest: &NodeAddr) -> Vec<&ActivePeer> {
2401        self.peers.values().filter(|p| p.may_reach(dest)).collect()
2402    }
2403
2404    /// Get the TUN packet sender channel.
2405    ///
2406    /// Returns None if TUN is not active or the node hasn't been started.
2407    pub fn tun_tx(&self) -> Option<&TunTx> {
2408        self.tun_tx.as_ref()
2409    }
2410
2411    /// Attach app-owned packet I/O for embedded operation without a system TUN.
2412    ///
2413    /// This must be called before [`Node::start`] and requires `tun.enabled =
2414    /// false`. Outbound packets sent to the returned sender are processed by the
2415    /// normal session pipeline. Inbound packets delivered by FIPS sessions are
2416    /// sent to the returned receiver with source attribution.
2417    pub fn attach_external_packet_io(
2418        &mut self,
2419        capacity: usize,
2420    ) -> Result<ExternalPacketIo, NodeError> {
2421        if self.state != NodeState::Created {
2422            return Err(NodeError::Config(ConfigError::Validation(
2423                "external packet I/O must be attached before node start".to_string(),
2424            )));
2425        }
2426        if self.config.tun.enabled {
2427            return Err(NodeError::Config(ConfigError::Validation(
2428                "external packet I/O requires tun.enabled=false".to_string(),
2429            )));
2430        }
2431
2432        let capacity = capacity.max(1);
2433        let (outbound_tx, outbound_rx) = tokio::sync::mpsc::channel(capacity);
2434        let (inbound_tx, inbound_rx) = tokio::sync::mpsc::channel(capacity);
2435        self.tun_outbound_rx = Some(outbound_rx);
2436        self.external_packet_tx = Some(inbound_tx);
2437
2438        Ok(ExternalPacketIo {
2439            outbound_tx,
2440            inbound_rx,
2441        })
2442    }
2443
2444    /// Attach app-owned endpoint data I/O for embedded operation.
2445    ///
2446    /// Commands sent to the returned sender are processed by the node RX loop.
2447    /// Incoming endpoint data is emitted as source-attributed events.
2448    pub(crate) fn attach_endpoint_data_io(
2449        &mut self,
2450        capacity: usize,
2451    ) -> Result<EndpointDataIo, NodeError> {
2452        if self.state != NodeState::Created {
2453            return Err(NodeError::Config(ConfigError::Validation(
2454                "endpoint data I/O must be attached before node start".to_string(),
2455            )));
2456        }
2457
2458        let command_capacity = endpoint_data_command_capacity(capacity);
2459        let (command_tx, command_rx) = tokio::sync::mpsc::channel(command_capacity);
2460        // Inbound endpoint-data events use an unbounded channel — see
2461        // `EndpointDataIo::event_rx` docs for the rationale (kills the
2462        // per-packet semaphore + the cross-task relay task that used to
2463        // sit on top of this channel).
2464        let (event_tx, event_rx) = tokio::sync::mpsc::unbounded_channel();
2465        self.endpoint_command_rx = Some(command_rx);
2466        self.endpoint_event_tx = Some(event_tx.clone());
2467
2468        Ok(EndpointDataIo {
2469            command_tx,
2470            event_rx,
2471            event_tx,
2472        })
2473    }
2474
2475    pub(crate) fn pubkey_for_node_addr(&self, addr: &NodeAddr) -> Option<secp256k1::PublicKey> {
2476        let mut prefix = [0u8; 15];
2477        prefix.copy_from_slice(&addr.as_bytes()[0..15]);
2478        self.identity_cache
2479            .get(&prefix)
2480            .filter(|entry| &entry.node_addr == addr)
2481            .map(|entry| entry.pubkey)
2482    }
2483
2484    pub(crate) fn npub_for_node_addr(&self, addr: &NodeAddr) -> Option<String> {
2485        let mut prefix = [0u8; 15];
2486        prefix.copy_from_slice(&addr.as_bytes()[0..15]);
2487        self.identity_cache
2488            .get(&prefix)
2489            .filter(|entry| &entry.node_addr == addr)
2490            .map(|entry| entry.npub.clone())
2491    }
2492
2493    pub(in crate::node) fn deliver_external_ipv6_packet(
2494        &self,
2495        src_addr: &NodeAddr,
2496        packet: Vec<u8>,
2497    ) {
2498        let Some(external_packet_tx) = &self.external_packet_tx else {
2499            return;
2500        };
2501        if packet.len() < 40 {
2502            return;
2503        }
2504        let Ok(destination) = FipsAddress::from_slice(&packet[24..40]) else {
2505            return;
2506        };
2507        let delivered = NodeDeliveredPacket {
2508            source_node_addr: *src_addr,
2509            source_npub: self.npub_for_node_addr(src_addr),
2510            destination,
2511            packet,
2512        };
2513        if let Err(error) = external_packet_tx.try_send(delivered) {
2514            debug!(error = %error, "Failed to deliver packet to external app sink");
2515        }
2516    }
2517
2518    // === Sending ===
2519
2520    /// Encrypt and send a link-layer message to an authenticated peer.
2521    ///
2522    /// The plaintext should include the message type byte followed by the
2523    /// message-specific payload (e.g., `[0x50, reason]` for Disconnect).
2524    ///
2525    /// The send path prepends a 4-byte session-relative timestamp (inner
2526    /// header) before encryption. The full 16-byte outer header is used
2527    /// as AAD for the AEAD construction.
2528    ///
2529    /// This is the standard path for sending any link-layer control message
2530    /// to a peer over their encrypted Noise session.
2531    pub(super) async fn send_encrypted_link_message(
2532        &mut self,
2533        node_addr: &NodeAddr,
2534        plaintext: &[u8],
2535    ) -> Result<(), NodeError> {
2536        self.send_encrypted_link_message_with_ce(node_addr, plaintext, false)
2537            .await
2538    }
2539
2540    /// Update the local-outbound-broken signal from a `transport.send`
2541    /// outcome. Sets `last_local_send_failure_at` on local-side io
2542    /// errors (NetworkUnreachable / HostUnreachable / AddrNotAvailable);
2543    /// clears it on success. The reaper consults this in
2544    /// `check_link_heartbeats` to switch to `fast_link_dead_timeout_secs`.
2545    pub(in crate::node) fn note_local_send_outcome(
2546        &mut self,
2547        result: &Result<usize, TransportError>,
2548    ) {
2549        match result {
2550            Ok(_) => {
2551                if self.last_local_send_failure_at.is_some() {
2552                    self.last_local_send_failure_at = None;
2553                }
2554            }
2555            Err(TransportError::Io(e))
2556                if matches!(
2557                    e.kind(),
2558                    std::io::ErrorKind::NetworkUnreachable
2559                        | std::io::ErrorKind::HostUnreachable
2560                        | std::io::ErrorKind::AddrNotAvailable
2561                ) =>
2562            {
2563                self.last_local_send_failure_at = Some(std::time::Instant::now());
2564            }
2565            Err(_) => {}
2566        }
2567    }
2568
2569    /// Returns the wall-clock instant of the most recent observed local
2570    /// outbound failure, if any. Used by the link-dead reaper.
2571    pub(in crate::node) fn last_local_send_failure_at(&self) -> Option<std::time::Instant> {
2572        self.last_local_send_failure_at
2573    }
2574
2575    /// Like `send_encrypted_link_message` but allows setting the FMP CE flag.
2576    ///
2577    /// Used by the forwarding path to relay congestion signals hop-by-hop.
2578    pub(super) async fn send_encrypted_link_message_with_ce(
2579        &mut self,
2580        node_addr: &NodeAddr,
2581        plaintext: &[u8],
2582        ce_flag: bool,
2583    ) -> Result<(), NodeError> {
2584        let peer = self
2585            .peers
2586            .get_mut(node_addr)
2587            .ok_or(NodeError::PeerNotFound(*node_addr))?;
2588
2589        let their_index = peer.their_index().ok_or_else(|| NodeError::SendFailed {
2590            node_addr: *node_addr,
2591            reason: "no their_index".into(),
2592        })?;
2593        let transport_id = peer.transport_id().ok_or_else(|| NodeError::SendFailed {
2594            node_addr: *node_addr,
2595            reason: "no transport_id".into(),
2596        })?;
2597        let remote_addr = peer
2598            .current_addr()
2599            .cloned()
2600            .ok_or_else(|| NodeError::SendFailed {
2601                node_addr: *node_addr,
2602                reason: "no current_addr".into(),
2603            })?;
2604        #[cfg(any(target_os = "linux", target_os = "macos"))]
2605        let connected_socket = peer.connected_udp();
2606
2607        // Prepend 4-byte session-relative timestamp (inner header)
2608        let timestamp_ms = peer.session_elapsed_ms();
2609
2610        // MMP: read spin bit value before entering session borrow
2611        let sp_flag = peer.mmp().map(|mmp| mmp.spin_bit.tx_bit()).unwrap_or(false);
2612        let mut flags = if sp_flag { FLAG_SP } else { 0 };
2613        if ce_flag {
2614            flags |= FLAG_CE;
2615        }
2616        if peer.current_k_bit() {
2617            flags |= FLAG_KEY_EPOCH;
2618        }
2619
2620        let session = peer
2621            .noise_session_mut()
2622            .ok_or_else(|| NodeError::SendFailed {
2623                node_addr: *node_addr,
2624                reason: "no noise session".into(),
2625            })?;
2626
2627        // Build 16-byte outer header upfront. The inner-plaintext
2628        // layout is `[ts:4 LE][plaintext...]`, so its length is exactly
2629        // `INNER_TS_LEN + plaintext.len()` — no need to build the Vec
2630        // just to measure it. The worker path uses this length to size
2631        // the wire buffer directly; the legacy path below still
2632        // materialises a separate `inner_plaintext` Vec for the inline
2633        // encrypt-and-send call.
2634        const INNER_TS_LEN: usize = 4;
2635        let counter = session.current_send_counter();
2636        let inner_len = INNER_TS_LEN + plaintext.len();
2637        let payload_len = inner_len as u16;
2638        let header = build_established_header(their_index, counter, flags, payload_len);
2639
2640        // **UDP send fast path.** The encrypt-worker pool is always
2641        // spawned at lifecycle start (workers = num_cpus) in
2642        // production, so this branch is taken for every authentic
2643        // send on every UDP-transported established session. The
2644        // AEAD work + sendmsg syscall run on a dedicated OS thread;
2645        // the rx_loop only builds the wire buffer + reserves the
2646        // counter inline.
2647        //
2648        // Other transport kinds (BLE, TCP, sim, ethernet) fall
2649        // through to the inline encrypt + transport.send path
2650        // below — those don't have raw-fd / sendmmsg / UDP_GSO
2651        // benefits to expose through the worker pool, so the simpler
2652        // synchronous send is the right shape for them.
2653        //
2654        // The `encrypt_workers.is_some()` check below is true in
2655        // production (lifecycle::start spawns the pool); it stays
2656        // checked rather than `expect()`-ed because unit tests
2657        // construct `Node` without calling `start()`.
2658        let transport_for_send = self
2659            .transports
2660            .get(&transport_id)
2661            .ok_or(NodeError::TransportNotFound(transport_id))?;
2662        let is_udp = matches!(transport_for_send, TransportHandle::Udp(_));
2663        if let Some(workers) = self.encrypt_workers.as_ref().cloned()
2664            && is_udp
2665            && let Some(cipher_clone) = session.send_cipher_clone()
2666        {
2667            {
2668                // Reserve the counter on the session so subsequent
2669                // sends don't reuse it. `current_send_counter` only
2670                // peeks; we advance via `take_send_counter`.
2671                let reserved_counter =
2672                    session
2673                        .take_send_counter()
2674                        .map_err(|e| NodeError::SendFailed {
2675                            node_addr: *node_addr,
2676                            reason: format!("counter reservation failed: {}", e),
2677                        })?;
2678                debug_assert_eq!(reserved_counter, counter);
2679                // Re-derive the header with the now-locked-in counter
2680                // value (same value, but the call sequence is more
2681                // explicit).
2682                let header =
2683                    build_established_header(their_index, reserved_counter, flags, payload_len);
2684                let transport = transport_for_send;
2685                // Snapshot the per-peer connected UDP socket before
2686                // resolving the fallback address. On the established
2687                // steady-state path this socket already carries the
2688                // kernel peer address, so re-parsing the configured
2689                // transport address and touching the DNS cache on every
2690                // packet is pure overhead on the sender hot path.
2691                let send_target = {
2692                    if let TransportHandle::Udp(udp) = transport {
2693                        let socket_addr = {
2694                            #[cfg(any(target_os = "linux", target_os = "macos"))]
2695                            {
2696                                match connected_socket.as_ref() {
2697                                    Some(socket) => Some(socket.peer_addr()),
2698                                    None => udp.resolve_for_off_task(&remote_addr).await.ok(),
2699                                }
2700                            }
2701                            #[cfg(not(any(target_os = "linux", target_os = "macos")))]
2702                            {
2703                                udp.resolve_for_off_task(&remote_addr).await.ok()
2704                            }
2705                        };
2706                        match (udp.async_socket(), socket_addr) {
2707                            (Some(socket), Some(socket_addr)) => Some((socket, socket_addr)),
2708                            _ => None,
2709                        }
2710                    } else {
2711                        None
2712                    }
2713                };
2714                if let Some((socket, socket_addr)) = send_target {
2715                    // Build the wire buffer **directly** from
2716                    // `plaintext` with a single allocation:
2717                    //   `[16 header][4 ts][plaintext...]` with
2718                    // +16 trailing capacity for the AEAD tag.
2719                    // The worker seals `wire_buf[16..]` in
2720                    // place and appends the tag — no second
2721                    // alloc, no second memcpy.
2722                    //
2723                    // Previous design built `inner_plaintext`
2724                    // via `prepend_inner_header` (1 alloc + 1
2725                    // copy) and then let the worker memcpy
2726                    // header + plaintext into a fresh Vec
2727                    // (another alloc + copy). At ~100 kpps the
2728                    // saved alloc/copy is ~150 MB/sec of memory
2729                    // bandwidth on the hot rx_loop + worker.
2730                    let wire_capacity = ESTABLISHED_HEADER_SIZE + inner_len + 16;
2731                    let mut wire_buf = Vec::with_capacity(wire_capacity);
2732                    wire_buf.extend_from_slice(&header);
2733                    wire_buf.extend_from_slice(&timestamp_ms.to_le_bytes());
2734                    wire_buf.extend_from_slice(plaintext);
2735                    let predicted_bytes = wire_capacity;
2736                    // Stats / MMP update inline — predicted size
2737                    // is exact for ChaCha20-Poly1305 (tag is
2738                    // constant 16 bytes). When `connected_socket` is
2739                    // `Some`, the worker sends on it without a
2740                    // destination sockaddr — the kernel skips the
2741                    // per-packet sockaddr + route + neighbor resolve.
2742                    if let Some(peer) = self.peers.get_mut(node_addr) {
2743                        peer.link_stats_mut().record_sent(predicted_bytes);
2744                        if let Some(mmp) = peer.mmp_mut() {
2745                            mmp.sender
2746                                .record_sent(reserved_counter, timestamp_ms, predicted_bytes);
2747                        }
2748                    }
2749                    workers.dispatch(self::encrypt_worker::FmpSendJob {
2750                        cipher: cipher_clone,
2751                        counter: reserved_counter,
2752                        wire_buf,
2753                        fsp_seal: None,
2754                        socket,
2755                        dest_addr: socket_addr,
2756                        #[cfg(any(target_os = "linux", target_os = "macos"))]
2757                        connected_socket,
2758                        drop_on_backpressure: plaintext
2759                            .first()
2760                            .is_some_and(|ty| *ty == SessionMessageType::EndpointData.to_byte()),
2761                        queued_at: crate::perf_profile::stamp(),
2762                    });
2763                    return Ok(());
2764                }
2765            }
2766        }
2767
2768        // Inline (legacy) path: encrypt + send on the rx_loop.
2769        // Build the inner plaintext lazily here — the worker path
2770        // above never reaches this point, so the prepend_inner_header
2771        // alloc is avoided in the fast path.
2772        let inner_plaintext = prepend_inner_header(timestamp_ms, plaintext);
2773        // Encrypt with AAD binding to the outer header
2774        let ciphertext = {
2775            let _t = crate::perf_profile::Timer::start(crate::perf_profile::Stage::FmpEncrypt);
2776            session
2777                .encrypt_with_aad(&inner_plaintext, &header)
2778                .map_err(|e| NodeError::SendFailed {
2779                    node_addr: *node_addr,
2780                    reason: format!("encryption failed: {}", e),
2781                })?
2782        };
2783
2784        let wire_packet = build_encrypted(&header, &ciphertext);
2785
2786        // Re-borrow peer for stats update after sending
2787        let send_result = {
2788            let _t = crate::perf_profile::Timer::start(crate::perf_profile::Stage::UdpSend);
2789            let transport = self
2790                .transports
2791                .get(&transport_id)
2792                .ok_or(NodeError::TransportNotFound(transport_id))?;
2793            transport.send(&remote_addr, &wire_packet).await
2794        };
2795        self.note_local_send_outcome(&send_result);
2796        let bytes_sent = send_result.map_err(|e| match e {
2797            TransportError::MtuExceeded { packet_size, mtu } => NodeError::MtuExceeded {
2798                node_addr: *node_addr,
2799                packet_size,
2800                mtu,
2801            },
2802            other => NodeError::SendFailed {
2803                node_addr: *node_addr,
2804                reason: format!("transport send: {}", other),
2805            },
2806        })?;
2807
2808        // Update send statistics
2809        if let Some(peer) = self.peers.get_mut(node_addr) {
2810            peer.link_stats_mut().record_sent(bytes_sent);
2811            // MMP: record sent frame for sender report generation
2812            if let Some(mmp) = peer.mmp_mut() {
2813                mmp.sender.record_sent(counter, timestamp_ms, bytes_sent);
2814            }
2815        }
2816
2817        Ok(())
2818    }
2819}
2820
2821impl fmt::Debug for Node {
2822    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2823        f.debug_struct("Node")
2824            .field("node_addr", self.node_addr())
2825            .field("state", &self.state)
2826            .field("is_leaf_only", &self.is_leaf_only)
2827            .field("connections", &self.connection_count())
2828            .field("peers", &self.peer_count())
2829            .field("links", &self.link_count())
2830            .field("transports", &self.transport_count())
2831            .finish()
2832    }
2833}