ant_quic/connection/
mod.rs

1// Copyright 2024 Saorsa Labs Ltd.
2//
3// This Saorsa Network Software is licensed under the General Public License (GPL), version 3.
4// Please see the file LICENSE-GPL, or visit <http://www.gnu.org/licenses/> for the full text.
5//
6// Full details available at https://saorsalabs.com/licenses
7
8#![allow(clippy::unwrap_used, clippy::expect_used, clippy::panic)]
9use std::{
10    cmp,
11    collections::VecDeque,
12    convert::TryFrom,
13    fmt, io, mem,
14    net::{IpAddr, SocketAddr},
15    sync::Arc,
16};
17
18use bytes::{Bytes, BytesMut};
19use frame::StreamMetaVec;
20// Removed qlog feature
21
22use rand::{Rng, SeedableRng, rngs::StdRng};
23use thiserror::Error;
24use tracing::{debug, error, info, trace, trace_span, warn};
25
26use crate::{
27    Dir, Duration, EndpointConfig, Frame, INITIAL_MTU, Instant, MAX_CID_SIZE, MAX_STREAM_COUNT,
28    MIN_INITIAL_SIZE, MtuDiscoveryConfig, Side, StreamId, TIMER_GRANULARITY, TokenStore, Transmit,
29    TransportError, TransportErrorCode, VarInt,
30    cid_generator::ConnectionIdGenerator,
31    cid_queue::CidQueue,
32    coding::BufMutExt,
33    config::{ServerConfig, TransportConfig},
34    crypto::{self, KeyPair, Keys, PacketKey},
35    endpoint::AddressDiscoveryStats,
36    frame::{self, Close, Datagram, FrameStruct, NewToken},
37    nat_traversal_api::PeerId,
38    packet::{
39        FixedLengthConnectionIdParser, Header, InitialHeader, InitialPacket, LongType, Packet,
40        PacketNumber, PartialDecode, SpaceId,
41    },
42    range_set::ArrayRangeSet,
43    shared::{
44        ConnectionEvent, ConnectionEventInner, ConnectionId, DatagramConnectionEvent, EcnCodepoint,
45        EndpointEvent, EndpointEventInner,
46    },
47    token::{ResetToken, Token, TokenPayload},
48    transport_parameters::TransportParameters,
49};
50
51mod ack_frequency;
52use ack_frequency::AckFrequencyState;
53
54pub(crate) mod nat_traversal;
55use nat_traversal::NatTraversalState;
56// v0.13.0: NatTraversalRole removed - all nodes are symmetric P2P nodes
57pub(crate) use nat_traversal::{CoordinationPhase, NatTraversalError};
58
59mod assembler;
60pub use assembler::Chunk;
61
62mod cid_state;
63use cid_state::CidState;
64
65mod datagrams;
66use datagrams::DatagramState;
67pub use datagrams::{Datagrams, SendDatagramError};
68
69mod mtud;
70use mtud::MtuDiscovery;
71
72mod pacing;
73
74mod packet_builder;
75use packet_builder::PacketBuilder;
76
77mod packet_crypto;
78use packet_crypto::{PrevCrypto, ZeroRttCrypto};
79
80mod paths;
81pub use paths::RttEstimator;
82use paths::{NatTraversalChallenges, PathData, PathResponses};
83
84mod send_buffer;
85
86mod spaces;
87#[cfg(fuzzing)]
88pub use spaces::Retransmits;
89#[cfg(not(fuzzing))]
90use spaces::Retransmits;
91use spaces::{PacketNumberFilter, PacketSpace, SendableFrames, SentPacket, ThinRetransmits};
92
93mod stats;
94pub use stats::{ConnectionStats, FrameStats, PathStats, UdpStats};
95
96mod streams;
97#[cfg(fuzzing)]
98pub use streams::StreamsState;
99#[cfg(not(fuzzing))]
100use streams::StreamsState;
101pub use streams::{
102    Chunks, ClosedStream, FinishError, ReadError, ReadableError, RecvStream, SendStream,
103    ShouldTransmit, StreamEvent, Streams, WriteError, Written,
104};
105
106mod timer;
107use crate::congestion::Controller;
108use timer::{Timer, TimerTable};
109
110/// Protocol state and logic for a single QUIC connection
111///
112/// Objects of this type receive [`ConnectionEvent`]s and emit [`EndpointEvent`]s and application
113/// [`Event`]s to make progress. To handle timeouts, a `Connection` returns timer updates and
114/// expects timeouts through various methods. A number of simple getter methods are exposed
115/// to allow callers to inspect some of the connection state.
116///
117/// `Connection` has roughly 4 types of methods:
118///
119/// - A. Simple getters, taking `&self`
120/// - B. Handlers for incoming events from the network or system, named `handle_*`.
121/// - C. State machine mutators, for incoming commands from the application. For convenience we
122///   refer to this as "performing I/O" below, however as per the design of this library none of the
123///   functions actually perform system-level I/O. For example, [`read`](RecvStream::read) and
124///   [`write`](SendStream::write), but also things like [`reset`](SendStream::reset).
125/// - D. Polling functions for outgoing events or actions for the caller to
126///   take, named `poll_*`.
127///
128/// The simplest way to use this API correctly is to call (B) and (C) whenever
129/// appropriate, then after each of those calls, as soon as feasible call all
130/// polling methods (D) and deal with their outputs appropriately, e.g. by
131/// passing it to the application or by making a system-level I/O call. You
132/// should call the polling functions in this order:
133///
134/// 1. [`poll_transmit`](Self::poll_transmit)
135/// 2. [`poll_timeout`](Self::poll_timeout)
136/// 3. [`poll_endpoint_events`](Self::poll_endpoint_events)
137/// 4. [`poll`](Self::poll)
138///
139/// Currently the only actual dependency is from (2) to (1), however additional
140/// dependencies may be added in future, so the above order is recommended.
141///
142/// (A) may be called whenever desired.
143///
144/// Care should be made to ensure that the input events represent monotonically
145/// increasing time. Specifically, calling [`handle_timeout`](Self::handle_timeout)
146/// with events of the same [`Instant`] may be interleaved in any order with a
147/// call to [`handle_event`](Self::handle_event) at that same instant; however
148/// events or timeouts with different instants must not be interleaved.
149pub struct Connection {
150    endpoint_config: Arc<EndpointConfig>,
151    config: Arc<TransportConfig>,
152    rng: StdRng,
153    crypto: Box<dyn crypto::Session>,
154    /// The CID we initially chose, for use during the handshake
155    handshake_cid: ConnectionId,
156    /// The CID the peer initially chose, for use during the handshake
157    rem_handshake_cid: ConnectionId,
158    /// The "real" local IP address which was was used to receive the initial packet.
159    /// This is only populated for the server case, and if known
160    local_ip: Option<IpAddr>,
161    path: PathData,
162    /// Whether MTU detection is supported in this environment
163    allow_mtud: bool,
164    prev_path: Option<(ConnectionId, PathData)>,
165    state: State,
166    side: ConnectionSide,
167    /// Whether or not 0-RTT was enabled during the handshake. Does not imply acceptance.
168    zero_rtt_enabled: bool,
169    /// Set if 0-RTT is supported, then cleared when no longer needed.
170    zero_rtt_crypto: Option<ZeroRttCrypto>,
171    key_phase: bool,
172    /// How many packets are in the current key phase. Used only for `Data` space.
173    key_phase_size: u64,
174    /// Transport parameters set by the peer
175    peer_params: TransportParameters,
176    /// Source ConnectionId of the first packet received from the peer
177    orig_rem_cid: ConnectionId,
178    /// Destination ConnectionId sent by the client on the first Initial
179    initial_dst_cid: ConnectionId,
180    /// The value that the server included in the Source Connection ID field of a Retry packet, if
181    /// one was received
182    retry_src_cid: Option<ConnectionId>,
183    /// Total number of outgoing packets that have been deemed lost
184    lost_packets: u64,
185    events: VecDeque<Event>,
186    endpoint_events: VecDeque<EndpointEventInner>,
187    /// Whether the spin bit is in use for this connection
188    spin_enabled: bool,
189    /// Outgoing spin bit state
190    spin: bool,
191    /// Packet number spaces: initial, handshake, 1-RTT
192    spaces: [PacketSpace; 3],
193    /// Highest usable packet number space
194    highest_space: SpaceId,
195    /// 1-RTT keys used prior to a key update
196    prev_crypto: Option<PrevCrypto>,
197    /// 1-RTT keys to be used for the next key update
198    ///
199    /// These are generated in advance to prevent timing attacks and/or DoS by third-party attackers
200    /// spoofing key updates.
201    next_crypto: Option<KeyPair<Box<dyn PacketKey>>>,
202    accepted_0rtt: bool,
203    /// Whether the idle timer should be reset the next time an ack-eliciting packet is transmitted.
204    permit_idle_reset: bool,
205    /// Negotiated idle timeout
206    idle_timeout: Option<Duration>,
207    timers: TimerTable,
208    /// Number of packets received which could not be authenticated
209    authentication_failures: u64,
210    /// Why the connection was lost, if it has been
211    error: Option<ConnectionError>,
212    /// Identifies Data-space packet numbers to skip. Not used in earlier spaces.
213    packet_number_filter: PacketNumberFilter,
214
215    //
216    // Queued non-retransmittable 1-RTT data
217    //
218    /// Responses to PATH_CHALLENGE frames
219    path_responses: PathResponses,
220    /// Challenges for NAT traversal candidate validation
221    nat_traversal_challenges: NatTraversalChallenges,
222    close: bool,
223
224    //
225    // ACK frequency
226    //
227    ack_frequency: AckFrequencyState,
228
229    //
230    // Loss Detection
231    //
232    /// The number of times a PTO has been sent without receiving an ack.
233    pto_count: u32,
234
235    //
236    // Congestion Control
237    //
238    /// Whether the most recently received packet had an ECN codepoint set
239    receiving_ecn: bool,
240    /// Number of packets authenticated
241    total_authed_packets: u64,
242    /// Whether the last `poll_transmit` call yielded no data because there was
243    /// no outgoing application data.
244    app_limited: bool,
245
246    streams: StreamsState,
247    /// Surplus remote CIDs for future use on new paths
248    rem_cids: CidQueue,
249    // Attributes of CIDs generated by local peer
250    local_cid_state: CidState,
251    /// State of the unreliable datagram extension
252    datagrams: DatagramState,
253    /// Connection level statistics
254    stats: ConnectionStats,
255    /// QUIC version used for the connection.
256    version: u32,
257
258    /// NAT traversal state for establishing direct P2P connections
259    nat_traversal: Option<NatTraversalState>,
260
261    /// NAT traversal frame format configuration
262    nat_traversal_frame_config: frame::nat_traversal_unified::NatTraversalFrameConfig,
263
264    /// Address discovery state for tracking observed addresses
265    address_discovery_state: Option<AddressDiscoveryState>,
266
267    /// PQC state for tracking post-quantum cryptography support
268    pqc_state: PqcState,
269
270    /// Trace context for this connection
271    #[cfg(feature = "trace")]
272    trace_context: crate::tracing::TraceContext,
273
274    /// Event log for tracing
275    #[cfg(feature = "trace")]
276    event_log: Arc<crate::tracing::EventLog>,
277
278    /// Qlog writer
279    #[cfg(feature = "__qlog")]
280    qlog_streamer: Option<Box<dyn std::io::Write + Send + Sync>>,
281
282    /// Optional bound peer identity for NEW_TOKEN v2 issuance
283    peer_id_for_tokens: Option<PeerId>,
284    /// When true, NEW_TOKEN frames are delayed until channel binding
285    /// sets `peer_id_for_tokens`, avoiding legacy tokens in v2 mode.
286    delay_new_token_until_binding: bool,
287}
288
289impl Connection {
290    pub(crate) fn new(
291        endpoint_config: Arc<EndpointConfig>,
292        config: Arc<TransportConfig>,
293        init_cid: ConnectionId,
294        loc_cid: ConnectionId,
295        rem_cid: ConnectionId,
296        remote: SocketAddr,
297        local_ip: Option<IpAddr>,
298        crypto: Box<dyn crypto::Session>,
299        cid_gen: &dyn ConnectionIdGenerator,
300        now: Instant,
301        version: u32,
302        allow_mtud: bool,
303        rng_seed: [u8; 32],
304        side_args: SideArgs,
305    ) -> Self {
306        let pref_addr_cid = side_args.pref_addr_cid();
307        let path_validated = side_args.path_validated();
308        let connection_side = ConnectionSide::from(side_args);
309        let side = connection_side.side();
310        let initial_space = PacketSpace {
311            crypto: Some(crypto.initial_keys(&init_cid, side)),
312            ..PacketSpace::new(now)
313        };
314        let state = State::Handshake(state::Handshake {
315            rem_cid_set: side.is_server(),
316            expected_token: Bytes::new(),
317            client_hello: None,
318        });
319        let mut rng = StdRng::from_seed(rng_seed);
320        let mut this = Self {
321            endpoint_config,
322            crypto,
323            handshake_cid: loc_cid,
324            rem_handshake_cid: rem_cid,
325            local_cid_state: CidState::new(
326                cid_gen.cid_len(),
327                cid_gen.cid_lifetime(),
328                now,
329                if pref_addr_cid.is_some() { 2 } else { 1 },
330            ),
331            path: PathData::new(remote, allow_mtud, None, now, &config),
332            allow_mtud,
333            local_ip,
334            prev_path: None,
335            state,
336            side: connection_side,
337            zero_rtt_enabled: false,
338            zero_rtt_crypto: None,
339            key_phase: false,
340            // A small initial key phase size ensures peers that don't handle key updates correctly
341            // fail sooner rather than later. It's okay for both peers to do this, as the first one
342            // to perform an update will reset the other's key phase size in `update_keys`, and a
343            // simultaneous key update by both is just like a regular key update with a really fast
344            // response. Inspired by quic-go's similar behavior of performing the first key update
345            // at the 100th short-header packet.
346            key_phase_size: rng.gen_range(10..1000),
347            peer_params: TransportParameters::default(),
348            orig_rem_cid: rem_cid,
349            initial_dst_cid: init_cid,
350            retry_src_cid: None,
351            lost_packets: 0,
352            events: VecDeque::new(),
353            endpoint_events: VecDeque::new(),
354            spin_enabled: config.allow_spin && rng.gen_ratio(7, 8),
355            spin: false,
356            spaces: [initial_space, PacketSpace::new(now), PacketSpace::new(now)],
357            highest_space: SpaceId::Initial,
358            prev_crypto: None,
359            next_crypto: None,
360            accepted_0rtt: false,
361            permit_idle_reset: true,
362            idle_timeout: match config.max_idle_timeout {
363                None | Some(VarInt(0)) => None,
364                Some(dur) => Some(Duration::from_millis(dur.0)),
365            },
366            timers: TimerTable::default(),
367            authentication_failures: 0,
368            error: None,
369            #[cfg(test)]
370            packet_number_filter: match config.deterministic_packet_numbers {
371                false => PacketNumberFilter::new(&mut rng),
372                true => PacketNumberFilter::disabled(),
373            },
374            #[cfg(not(test))]
375            packet_number_filter: PacketNumberFilter::new(&mut rng),
376
377            path_responses: PathResponses::default(),
378            nat_traversal_challenges: NatTraversalChallenges::default(),
379            close: false,
380
381            ack_frequency: AckFrequencyState::new(get_max_ack_delay(
382                &TransportParameters::default(),
383            )),
384
385            pto_count: 0,
386
387            app_limited: false,
388            receiving_ecn: false,
389            total_authed_packets: 0,
390
391            streams: StreamsState::new(
392                side,
393                config.max_concurrent_uni_streams,
394                config.max_concurrent_bidi_streams,
395                config.send_window,
396                config.receive_window,
397                config.stream_receive_window,
398            ),
399            datagrams: DatagramState::default(),
400            config,
401            rem_cids: CidQueue::new(rem_cid),
402            rng,
403            stats: ConnectionStats::default(),
404            version,
405            nat_traversal: None, // Will be initialized when NAT traversal is negotiated
406            nat_traversal_frame_config:
407                frame::nat_traversal_unified::NatTraversalFrameConfig::default(),
408            address_discovery_state: {
409                // Initialize with default config for now
410                // Will be updated when transport parameters are negotiated
411                Some(AddressDiscoveryState::new(
412                    &crate::transport_parameters::AddressDiscoveryConfig::default(),
413                    now,
414                ))
415            },
416            pqc_state: PqcState::new(),
417
418            #[cfg(feature = "trace")]
419            trace_context: crate::tracing::TraceContext::new(crate::tracing::TraceId::new()),
420
421            #[cfg(feature = "trace")]
422            event_log: crate::tracing::global_log(),
423
424            #[cfg(feature = "__qlog")]
425            qlog_streamer: None,
426
427            peer_id_for_tokens: None,
428            delay_new_token_until_binding: false,
429        };
430
431        // Trace connection creation
432        #[cfg(feature = "trace")]
433        {
434            use crate::trace_event;
435            use crate::tracing::{Event, EventData, socket_addr_to_bytes, timestamp_now};
436            // Tracing imports handled by macros
437            let _peer_id = {
438                let mut id = [0u8; 32];
439                let addr_bytes = match remote {
440                    SocketAddr::V4(addr) => addr.ip().octets().to_vec(),
441                    SocketAddr::V6(addr) => addr.ip().octets().to_vec(),
442                };
443                id[..addr_bytes.len().min(32)]
444                    .copy_from_slice(&addr_bytes[..addr_bytes.len().min(32)]);
445                id
446            };
447
448            let (addr_bytes, addr_type) = socket_addr_to_bytes(remote);
449            trace_event!(
450                &this.event_log,
451                Event {
452                    timestamp: timestamp_now(),
453                    trace_id: this.trace_context.trace_id(),
454                    sequence: 0,
455                    _padding: 0,
456                    node_id: [0u8; 32], // Will be set by endpoint
457                    event_data: EventData::ConnInit {
458                        endpoint_bytes: addr_bytes,
459                        addr_type,
460                        _padding: [0u8; 45],
461                    },
462                }
463            );
464        }
465
466        if path_validated {
467            this.on_path_validated();
468        }
469        if side.is_client() {
470            // Kick off the connection
471            this.write_crypto();
472            this.init_0rtt();
473        }
474        this
475    }
476
477    /// Set up qlog for this connection
478    #[cfg(feature = "__qlog")]
479    pub fn set_qlog(
480        &mut self,
481        writer: Box<dyn std::io::Write + Send + Sync>,
482        _title: Option<String>,
483        _description: Option<String>,
484        _now: Instant,
485    ) {
486        self.qlog_streamer = Some(writer);
487    }
488
489    /// Emit qlog recovery metrics
490    #[cfg(feature = "__qlog")]
491    fn emit_qlog_recovery_metrics(&mut self, _now: Instant) {
492        // TODO: Implement actual qlog recovery metrics emission
493        // For now, this is a stub to allow compilation
494    }
495
496    /// Returns the next time at which `handle_timeout` should be called
497    ///
498    /// The value returned may change after:
499    /// - the application performed some I/O on the connection
500    /// - a call was made to `handle_event`
501    /// - a call to `poll_transmit` returned `Some`
502    /// - a call was made to `handle_timeout`
503    #[must_use]
504    pub fn poll_timeout(&mut self) -> Option<Instant> {
505        let mut next_timeout = self.timers.next_timeout();
506
507        // Check NAT traversal timeouts
508        if let Some(nat_state) = &self.nat_traversal {
509            if let Some(nat_timeout) = nat_state.get_next_timeout(Instant::now()) {
510                // Schedule NAT traversal timer
511                self.timers.set(Timer::NatTraversal, nat_timeout);
512                next_timeout = Some(next_timeout.map_or(nat_timeout, |t| t.min(nat_timeout)));
513            }
514        }
515
516        next_timeout
517    }
518
519    /// Returns application-facing events
520    ///
521    /// Connections should be polled for events after:
522    /// - a call was made to `handle_event`
523    /// - a call was made to `handle_timeout`
524    #[must_use]
525    pub fn poll(&mut self) -> Option<Event> {
526        if let Some(x) = self.events.pop_front() {
527            return Some(x);
528        }
529
530        if let Some(event) = self.streams.poll() {
531            return Some(Event::Stream(event));
532        }
533
534        if let Some(err) = self.error.take() {
535            return Some(Event::ConnectionLost { reason: err });
536        }
537
538        None
539    }
540
541    /// Return endpoint-facing events
542    #[must_use]
543    pub fn poll_endpoint_events(&mut self) -> Option<EndpointEvent> {
544        self.endpoint_events.pop_front().map(EndpointEvent)
545    }
546
547    /// Provide control over streams
548    #[must_use]
549    pub fn streams(&mut self) -> Streams<'_> {
550        Streams {
551            state: &mut self.streams,
552            conn_state: &self.state,
553        }
554    }
555
556    // Removed unused trace accessors to eliminate dead_code warnings
557
558    /// Provide control over streams
559    #[must_use]
560    pub fn recv_stream(&mut self, id: StreamId) -> RecvStream<'_> {
561        assert!(id.dir() == Dir::Bi || id.initiator() != self.side.side());
562        RecvStream {
563            id,
564            state: &mut self.streams,
565            pending: &mut self.spaces[SpaceId::Data].pending,
566        }
567    }
568
569    /// Provide control over streams
570    #[must_use]
571    pub fn send_stream(&mut self, id: StreamId) -> SendStream<'_> {
572        assert!(id.dir() == Dir::Bi || id.initiator() == self.side.side());
573        SendStream {
574            id,
575            state: &mut self.streams,
576            pending: &mut self.spaces[SpaceId::Data].pending,
577            conn_state: &self.state,
578        }
579    }
580
581    /// Returns packets to transmit
582    ///
583    /// Connections should be polled for transmit after:
584    /// - the application performed some I/O on the connection
585    /// - a call was made to `handle_event`
586    /// - a call was made to `handle_timeout`
587    ///
588    /// `max_datagrams` specifies how many datagrams can be returned inside a
589    /// single Transmit using GSO. This must be at least 1.
590    #[must_use]
591    pub fn poll_transmit(
592        &mut self,
593        now: Instant,
594        max_datagrams: usize,
595        buf: &mut Vec<u8>,
596    ) -> Option<Transmit> {
597        assert!(max_datagrams != 0);
598        let max_datagrams = match self.config.enable_segmentation_offload {
599            false => 1,
600            true => max_datagrams,
601        };
602
603        let mut num_datagrams = 0;
604        // Position in `buf` of the first byte of the current UDP datagram. When coalescing QUIC
605        // packets, this can be earlier than the start of the current QUIC packet.
606        let mut datagram_start = 0;
607        let mut segment_size = usize::from(self.path.current_mtu());
608
609        // Check for NAT traversal coordination timeouts
610        if let Some(nat_traversal) = &mut self.nat_traversal {
611            if nat_traversal.check_coordination_timeout(now) {
612                trace!("NAT traversal coordination timed out, may retry");
613            }
614        }
615
616        // First priority: NAT traversal PATH_CHALLENGE packets (includes coordination)
617        if let Some(challenge) = self.send_nat_traversal_challenge(now, buf) {
618            return Some(challenge);
619        }
620
621        if let Some(challenge) = self.send_path_challenge(now, buf) {
622            return Some(challenge);
623        }
624
625        // If we need to send a probe, make sure we have something to send.
626        for space in SpaceId::iter() {
627            let request_immediate_ack =
628                space == SpaceId::Data && self.peer_supports_ack_frequency();
629            self.spaces[space].maybe_queue_probe(request_immediate_ack, &self.streams);
630        }
631
632        // Check whether we need to send a close message
633        let close = match self.state {
634            State::Drained => {
635                self.app_limited = true;
636                return None;
637            }
638            State::Draining | State::Closed(_) => {
639                // self.close is only reset once the associated packet had been
640                // encoded successfully
641                if !self.close {
642                    self.app_limited = true;
643                    return None;
644                }
645                true
646            }
647            _ => false,
648        };
649
650        // Check whether we need to send an ACK_FREQUENCY frame
651        if let Some(config) = &self.config.ack_frequency_config {
652            self.spaces[SpaceId::Data].pending.ack_frequency = self
653                .ack_frequency
654                .should_send_ack_frequency(self.path.rtt.get(), config, &self.peer_params)
655                && self.highest_space == SpaceId::Data
656                && self.peer_supports_ack_frequency();
657        }
658
659        // Reserving capacity can provide more capacity than we asked for. However, we are not
660        // allowed to write more than `segment_size`. Therefore the maximum capacity is tracked
661        // separately.
662        let mut buf_capacity = 0;
663
664        let mut coalesce = true;
665        let mut builder_storage: Option<PacketBuilder> = None;
666        let mut sent_frames = None;
667        let mut pad_datagram = false;
668        let mut pad_datagram_to_mtu = false;
669        let mut congestion_blocked = false;
670
671        // Iterate over all spaces and find data to send
672        let mut space_idx = 0;
673        let spaces = [SpaceId::Initial, SpaceId::Handshake, SpaceId::Data];
674        // This loop will potentially spend multiple iterations in the same `SpaceId`,
675        // so we cannot trivially rewrite it to take advantage of `SpaceId::iter()`.
676        while space_idx < spaces.len() {
677            let space_id = spaces[space_idx];
678            // Number of bytes available for frames if this is a 1-RTT packet. We're guaranteed to
679            // be able to send an individual frame at least this large in the next 1-RTT
680            // packet. This could be generalized to support every space, but it's only needed to
681            // handle large fixed-size frames, which only exist in 1-RTT (application datagrams). We
682            // don't account for coalesced packets potentially occupying space because frames can
683            // always spill into the next datagram.
684            let pn = self.packet_number_filter.peek(&self.spaces[SpaceId::Data]);
685            let frame_space_1rtt =
686                segment_size.saturating_sub(self.predict_1rtt_overhead(Some(pn)));
687
688            // Is there data or a close message to send in this space?
689            let can_send = self.space_can_send(space_id, frame_space_1rtt);
690            if can_send.is_empty() && (!close || self.spaces[space_id].crypto.is_none()) {
691                space_idx += 1;
692                continue;
693            }
694
695            let mut ack_eliciting = !self.spaces[space_id].pending.is_empty(&self.streams)
696                || self.spaces[space_id].ping_pending
697                || self.spaces[space_id].immediate_ack_pending;
698            if space_id == SpaceId::Data {
699                ack_eliciting |= self.can_send_1rtt(frame_space_1rtt);
700            }
701
702            pad_datagram_to_mtu |= space_id == SpaceId::Data && self.config.pad_to_mtu;
703
704            // Can we append more data into the current buffer?
705            // It is not safe to assume that `buf.len()` is the end of the data,
706            // since the last packet might not have been finished.
707            let buf_end = if let Some(builder) = &builder_storage {
708                buf.len().max(builder.min_size) + builder.tag_len
709            } else {
710                buf.len()
711            };
712
713            let tag_len = if let Some(ref crypto) = self.spaces[space_id].crypto {
714                crypto.packet.local.tag_len()
715            } else if space_id == SpaceId::Data {
716                match self.zero_rtt_crypto.as_ref() {
717                    Some(crypto) => crypto.packet.tag_len(),
718                    None => {
719                        // This should never happen - log and return early
720                        error!(
721                            "sending packets in the application data space requires known 0-RTT or 1-RTT keys"
722                        );
723                        return None;
724                    }
725                }
726            } else {
727                unreachable!("tried to send {:?} packet without keys", space_id)
728            };
729            if !coalesce || buf_capacity - buf_end < MIN_PACKET_SPACE + tag_len {
730                // We need to send 1 more datagram and extend the buffer for that.
731
732                // Is 1 more datagram allowed?
733                if num_datagrams >= max_datagrams {
734                    // No more datagrams allowed
735                    break;
736                }
737
738                // Anti-amplification is only based on `total_sent`, which gets
739                // updated at the end of this method. Therefore we pass the amount
740                // of bytes for datagrams that are already created, as well as 1 byte
741                // for starting another datagram. If there is any anti-amplification
742                // budget left, we always allow a full MTU to be sent
743                // (see https://github.com/quinn-rs/quinn/issues/1082)
744                if self
745                    .path
746                    .anti_amplification_blocked(segment_size as u64 * (num_datagrams as u64) + 1)
747                {
748                    trace!("blocked by anti-amplification");
749                    break;
750                }
751
752                // Congestion control and pacing checks
753                // Tail loss probes must not be blocked by congestion, or a deadlock could arise
754                if ack_eliciting && self.spaces[space_id].loss_probes == 0 {
755                    // Assume the current packet will get padded to fill the segment
756                    let untracked_bytes = if let Some(builder) = &builder_storage {
757                        buf_capacity - builder.partial_encode.start
758                    } else {
759                        0
760                    } as u64;
761                    debug_assert!(untracked_bytes <= segment_size as u64);
762
763                    let bytes_to_send = segment_size as u64 + untracked_bytes;
764                    if self.path.in_flight.bytes + bytes_to_send >= self.path.congestion.window() {
765                        space_idx += 1;
766                        congestion_blocked = true;
767                        // We continue instead of breaking here in order to avoid
768                        // blocking loss probes queued for higher spaces.
769                        trace!("blocked by congestion control");
770                        continue;
771                    }
772
773                    // Check whether the next datagram is blocked by pacing
774                    let smoothed_rtt = self.path.rtt.get();
775                    if let Some(delay) = self.path.pacing.delay(
776                        smoothed_rtt,
777                        bytes_to_send,
778                        self.path.current_mtu(),
779                        self.path.congestion.window(),
780                        now,
781                    ) {
782                        self.timers.set(Timer::Pacing, delay);
783                        congestion_blocked = true;
784                        // Loss probes should be subject to pacing, even though
785                        // they are not congestion controlled.
786                        trace!("blocked by pacing");
787                        break;
788                    }
789                }
790
791                // Finish current packet
792                if let Some(mut builder) = builder_storage.take() {
793                    if pad_datagram {
794                        let min_size = self.pqc_state.min_initial_size();
795                        builder.pad_to(min_size);
796                    }
797
798                    if num_datagrams > 1 || pad_datagram_to_mtu {
799                        // If too many padding bytes would be required to continue the GSO batch
800                        // after this packet, end the GSO batch here. Ensures that fixed-size frames
801                        // with heterogeneous sizes (e.g. application datagrams) won't inadvertently
802                        // waste large amounts of bandwidth. The exact threshold is a bit arbitrary
803                        // and might benefit from further tuning, though there's no universally
804                        // optimal value.
805                        //
806                        // Additionally, if this datagram is a loss probe and `segment_size` is
807                        // larger than `INITIAL_MTU`, then padding it to `segment_size` to continue
808                        // the GSO batch would risk failure to recover from a reduction in path
809                        // MTU. Loss probes are the only packets for which we might grow
810                        // `buf_capacity` by less than `segment_size`.
811                        const MAX_PADDING: usize = 16;
812                        let packet_len_unpadded = cmp::max(builder.min_size, buf.len())
813                            - datagram_start
814                            + builder.tag_len;
815                        if (packet_len_unpadded + MAX_PADDING < segment_size
816                            && !pad_datagram_to_mtu)
817                            || datagram_start + segment_size > buf_capacity
818                        {
819                            trace!(
820                                "GSO truncated by demand for {} padding bytes or loss probe",
821                                segment_size - packet_len_unpadded
822                            );
823                            builder_storage = Some(builder);
824                            break;
825                        }
826
827                        // Pad the current datagram to GSO segment size so it can be included in the
828                        // GSO batch.
829                        builder.pad_to(segment_size as u16);
830                    }
831
832                    builder.finish_and_track(now, self, sent_frames.take(), buf);
833
834                    if num_datagrams == 1 {
835                        // Set the segment size for this GSO batch to the size of the first UDP
836                        // datagram in the batch. Larger data that cannot be fragmented
837                        // (e.g. application datagrams) will be included in a future batch. When
838                        // sending large enough volumes of data for GSO to be useful, we expect
839                        // packet sizes to usually be consistent, e.g. populated by max-size STREAM
840                        // frames or uniformly sized datagrams.
841                        segment_size = buf.len();
842                        // Clip the unused capacity out of the buffer so future packets don't
843                        // overrun
844                        buf_capacity = buf.len();
845
846                        // Check whether the data we planned to send will fit in the reduced segment
847                        // size. If not, bail out and leave it for the next GSO batch so we don't
848                        // end up trying to send an empty packet. We can't easily compute the right
849                        // segment size before the original call to `space_can_send`, because at
850                        // that time we haven't determined whether we're going to coalesce with the
851                        // first datagram or potentially pad it to `MIN_INITIAL_SIZE`.
852                        if space_id == SpaceId::Data {
853                            let frame_space_1rtt =
854                                segment_size.saturating_sub(self.predict_1rtt_overhead(Some(pn)));
855                            if self.space_can_send(space_id, frame_space_1rtt).is_empty() {
856                                break;
857                            }
858                        }
859                    }
860                }
861
862                // Allocate space for another datagram
863                let next_datagram_size_limit = match self.spaces[space_id].loss_probes {
864                    0 => segment_size,
865                    _ => {
866                        self.spaces[space_id].loss_probes -= 1;
867                        // Clamp the datagram to at most the minimum MTU to ensure that loss probes
868                        // can get through and enable recovery even if the path MTU has shrank
869                        // unexpectedly.
870                        std::cmp::min(segment_size, usize::from(INITIAL_MTU))
871                    }
872                };
873                buf_capacity += next_datagram_size_limit;
874                if buf.capacity() < buf_capacity {
875                    // We reserve the maximum space for sending `max_datagrams` upfront
876                    // to avoid any reallocations if more datagrams have to be appended later on.
877                    // Benchmarks have shown shown a 5-10% throughput improvement
878                    // compared to continuously resizing the datagram buffer.
879                    // While this will lead to over-allocation for small transmits
880                    // (e.g. purely containing ACKs), modern memory allocators
881                    // (e.g. mimalloc and jemalloc) will pool certain allocation sizes
882                    // and therefore this is still rather efficient.
883                    buf.reserve(max_datagrams * segment_size);
884                }
885                num_datagrams += 1;
886                coalesce = true;
887                pad_datagram = false;
888                datagram_start = buf.len();
889
890                debug_assert_eq!(
891                    datagram_start % segment_size,
892                    0,
893                    "datagrams in a GSO batch must be aligned to the segment size"
894                );
895            } else {
896                // We can append/coalesce the next packet into the current
897                // datagram.
898                // Finish current packet without adding extra padding
899                if let Some(builder) = builder_storage.take() {
900                    builder.finish_and_track(now, self, sent_frames.take(), buf);
901                }
902            }
903
904            debug_assert!(buf_capacity - buf.len() >= MIN_PACKET_SPACE);
905
906            //
907            // From here on, we've determined that a packet will definitely be sent.
908            //
909
910            if self.spaces[SpaceId::Initial].crypto.is_some()
911                && space_id == SpaceId::Handshake
912                && self.side.is_client()
913            {
914                // A client stops both sending and processing Initial packets when it
915                // sends its first Handshake packet.
916                self.discard_space(now, SpaceId::Initial);
917            }
918            if let Some(ref mut prev) = self.prev_crypto {
919                prev.update_unacked = false;
920            }
921
922            debug_assert!(
923                builder_storage.is_none() && sent_frames.is_none(),
924                "Previous packet must have been finished"
925            );
926
927            let builder = builder_storage.insert(PacketBuilder::new(
928                now,
929                space_id,
930                self.rem_cids.active(),
931                buf,
932                buf_capacity,
933                datagram_start,
934                ack_eliciting,
935                self,
936            )?);
937            coalesce = coalesce && !builder.short_header;
938
939            // Check if we should adjust coalescing for PQC
940            let should_adjust_coalescing = self
941                .pqc_state
942                .should_adjust_coalescing(buf.len() - datagram_start, space_id);
943
944            if should_adjust_coalescing {
945                coalesce = false;
946                trace!("Disabling coalescing for PQC handshake in {:?}", space_id);
947            }
948
949            // https://tools.ietf.org/html/draft-ietf-quic-transport-34#section-14.1
950            pad_datagram |=
951                space_id == SpaceId::Initial && (self.side.is_client() || ack_eliciting);
952
953            if close {
954                trace!("sending CONNECTION_CLOSE");
955                // Encode ACKs before the ConnectionClose message, to give the receiver
956                // a better approximate on what data has been processed. This is
957                // especially important with ack delay, since the peer might not
958                // have gotten any other ACK for the data earlier on.
959                if !self.spaces[space_id].pending_acks.ranges().is_empty() {
960                    Self::populate_acks(
961                        now,
962                        self.receiving_ecn,
963                        &mut SentFrames::default(),
964                        &mut self.spaces[space_id],
965                        buf,
966                        &mut self.stats,
967                    );
968                }
969
970                // Since there only 64 ACK frames there will always be enough space
971                // to encode the ConnectionClose frame too. However we still have the
972                // check here to prevent crashes if something changes.
973                debug_assert!(
974                    buf.len() + frame::ConnectionClose::SIZE_BOUND < builder.max_size,
975                    "ACKs should leave space for ConnectionClose"
976                );
977                if buf.len() + frame::ConnectionClose::SIZE_BOUND < builder.max_size {
978                    let max_frame_size = builder.max_size - buf.len();
979                    match self.state {
980                        State::Closed(state::Closed { ref reason }) => {
981                            if space_id == SpaceId::Data || reason.is_transport_layer() {
982                                reason.encode(buf, max_frame_size)
983                            } else {
984                                frame::ConnectionClose {
985                                    error_code: TransportErrorCode::APPLICATION_ERROR,
986                                    frame_type: None,
987                                    reason: Bytes::new(),
988                                }
989                                .encode(buf, max_frame_size)
990                            }
991                        }
992                        State::Draining => frame::ConnectionClose {
993                            error_code: TransportErrorCode::NO_ERROR,
994                            frame_type: None,
995                            reason: Bytes::new(),
996                        }
997                        .encode(buf, max_frame_size),
998                        _ => unreachable!(
999                            "tried to make a close packet when the connection wasn't closed"
1000                        ),
1001                    }
1002                }
1003                if space_id == self.highest_space {
1004                    // Don't send another close packet
1005                    self.close = false;
1006                    // `CONNECTION_CLOSE` is the final packet
1007                    break;
1008                } else {
1009                    // Send a close frame in every possible space for robustness, per RFC9000
1010                    // "Immediate Close during the Handshake". Don't bother trying to send anything
1011                    // else.
1012                    space_idx += 1;
1013                    continue;
1014                }
1015            }
1016
1017            // Send an off-path PATH_RESPONSE. Prioritized over on-path data to ensure that path
1018            // validation can occur while the link is saturated.
1019            if space_id == SpaceId::Data && num_datagrams == 1 {
1020                if let Some((token, remote)) = self.path_responses.pop_off_path(self.path.remote) {
1021                    // `unwrap` guaranteed to succeed because `builder_storage` was populated just
1022                    // above.
1023                    let mut builder = builder_storage.take().unwrap();
1024                    trace!("PATH_RESPONSE {:08x} (off-path)", token);
1025                    buf.write(frame::FrameType::PATH_RESPONSE);
1026                    buf.write(token);
1027                    self.stats.frame_tx.path_response += 1;
1028                    let min_size = self.pqc_state.min_initial_size();
1029                    builder.pad_to(min_size);
1030                    builder.finish_and_track(
1031                        now,
1032                        self,
1033                        Some(SentFrames {
1034                            non_retransmits: true,
1035                            ..SentFrames::default()
1036                        }),
1037                        buf,
1038                    );
1039                    self.stats.udp_tx.on_sent(1, buf.len());
1040
1041                    // Trace packet sent
1042                    #[cfg(feature = "trace")]
1043                    {
1044                        use crate::trace_packet_sent;
1045                        // Tracing imports handled by macros
1046                        trace_packet_sent!(
1047                            &self.event_log,
1048                            self.trace_context.trace_id(),
1049                            buf.len() as u32,
1050                            0 // Close packet doesn't have a packet number
1051                        );
1052                    }
1053
1054                    return Some(Transmit {
1055                        destination: remote,
1056                        size: buf.len(),
1057                        ecn: None,
1058                        segment_size: None,
1059                        src_ip: self.local_ip,
1060                    });
1061                }
1062            }
1063
1064            // Check for address observations to send
1065            if space_id == SpaceId::Data && self.address_discovery_state.is_some() {
1066                let peer_supports = self.peer_params.address_discovery.is_some();
1067
1068                if let Some(state) = &mut self.address_discovery_state {
1069                    if peer_supports {
1070                        if let Some(frame) = state.queue_observed_address_frame(0, self.path.remote)
1071                        {
1072                            self.spaces[space_id]
1073                                .pending
1074                                .outbound_observations
1075                                .push(frame);
1076                        }
1077                    }
1078                }
1079            }
1080
1081            let sent =
1082                self.populate_packet(now, space_id, buf, builder.max_size, builder.exact_number);
1083
1084            // ACK-only packets should only be sent when explicitly allowed. If we write them due to
1085            // any other reason, there is a bug which leads to one component announcing write
1086            // readiness while not writing any data. This degrades performance. The condition is
1087            // only checked if the full MTU is available and when potentially large fixed-size
1088            // frames aren't queued, so that lack of space in the datagram isn't the reason for just
1089            // writing ACKs.
1090            debug_assert!(
1091                !(sent.is_ack_only(&self.streams)
1092                    && !can_send.acks
1093                    && can_send.other
1094                    && (buf_capacity - builder.datagram_start) == self.path.current_mtu() as usize
1095                    && self.datagrams.outgoing.is_empty()),
1096                "SendableFrames was {can_send:?}, but only ACKs have been written"
1097            );
1098            pad_datagram |= sent.requires_padding;
1099
1100            if sent.largest_acked.is_some() {
1101                self.spaces[space_id].pending_acks.acks_sent();
1102                self.timers.stop(Timer::MaxAckDelay);
1103            }
1104
1105            // Keep information about the packet around until it gets finalized
1106            sent_frames = Some(sent);
1107
1108            // Don't increment space_idx.
1109            // We stay in the current space and check if there is more data to send.
1110        }
1111
1112        // Finish the last packet
1113        if let Some(mut builder) = builder_storage {
1114            if pad_datagram {
1115                let min_size = self.pqc_state.min_initial_size();
1116                builder.pad_to(min_size);
1117            }
1118
1119            // If this datagram is a loss probe and `segment_size` is larger than `INITIAL_MTU`,
1120            // then padding it to `segment_size` would risk failure to recover from a reduction in
1121            // path MTU.
1122            // Loss probes are the only packets for which we might grow `buf_capacity`
1123            // by less than `segment_size`.
1124            if pad_datagram_to_mtu && buf_capacity >= datagram_start + segment_size {
1125                builder.pad_to(segment_size as u16);
1126            }
1127
1128            let last_packet_number = builder.exact_number;
1129            builder.finish_and_track(now, self, sent_frames, buf);
1130            self.path
1131                .congestion
1132                .on_sent(now, buf.len() as u64, last_packet_number);
1133
1134            #[cfg(feature = "__qlog")]
1135            self.emit_qlog_recovery_metrics(now);
1136        }
1137
1138        self.app_limited = buf.is_empty() && !congestion_blocked;
1139
1140        // Send MTU probe if necessary
1141        if buf.is_empty() && self.state.is_established() {
1142            let space_id = SpaceId::Data;
1143            let probe_size = self
1144                .path
1145                .mtud
1146                .poll_transmit(now, self.packet_number_filter.peek(&self.spaces[space_id]))?;
1147
1148            let buf_capacity = probe_size as usize;
1149            buf.reserve(buf_capacity);
1150
1151            let mut builder = PacketBuilder::new(
1152                now,
1153                space_id,
1154                self.rem_cids.active(),
1155                buf,
1156                buf_capacity,
1157                0,
1158                true,
1159                self,
1160            )?;
1161
1162            // We implement MTU probes as ping packets padded up to the probe size
1163            buf.write(frame::FrameType::PING);
1164            self.stats.frame_tx.ping += 1;
1165
1166            // If supported by the peer, we want no delays to the probe's ACK
1167            if self.peer_supports_ack_frequency() {
1168                buf.write(frame::FrameType::IMMEDIATE_ACK);
1169                self.stats.frame_tx.immediate_ack += 1;
1170            }
1171
1172            builder.pad_to(probe_size);
1173            let sent_frames = SentFrames {
1174                non_retransmits: true,
1175                ..Default::default()
1176            };
1177            builder.finish_and_track(now, self, Some(sent_frames), buf);
1178
1179            self.stats.path.sent_plpmtud_probes += 1;
1180            num_datagrams = 1;
1181
1182            trace!(?probe_size, "writing MTUD probe");
1183        }
1184
1185        if buf.is_empty() {
1186            return None;
1187        }
1188
1189        trace!("sending {} bytes in {} datagrams", buf.len(), num_datagrams);
1190        self.path.total_sent = self.path.total_sent.saturating_add(buf.len() as u64);
1191
1192        self.stats.udp_tx.on_sent(num_datagrams as u64, buf.len());
1193
1194        // Trace packets sent
1195        #[cfg(feature = "trace")]
1196        {
1197            use crate::trace_packet_sent;
1198            // Tracing imports handled by macros
1199            // Log packet transmission (use highest packet number in transmission)
1200            let packet_num = self.spaces[SpaceId::Data]
1201                .next_packet_number
1202                .saturating_sub(1);
1203            trace_packet_sent!(
1204                &self.event_log,
1205                self.trace_context.trace_id(),
1206                buf.len() as u32,
1207                packet_num
1208            );
1209        }
1210
1211        Some(Transmit {
1212            destination: self.path.remote,
1213            size: buf.len(),
1214            ecn: if self.path.sending_ecn {
1215                Some(EcnCodepoint::Ect0)
1216            } else {
1217                None
1218            },
1219            segment_size: match num_datagrams {
1220                1 => None,
1221                _ => Some(segment_size),
1222            },
1223            src_ip: self.local_ip,
1224        })
1225    }
1226
1227    /// Send PUNCH_ME_NOW for coordination if necessary
1228    fn send_coordination_request(&mut self, _now: Instant, _buf: &mut Vec<u8>) -> Option<Transmit> {
1229        // Get coordination info without borrowing mutably
1230        let nat = self.nat_traversal.as_mut()?;
1231        if !nat.should_send_punch_request() {
1232            return None;
1233        }
1234
1235        let coord = nat.coordination.as_ref()?;
1236        let round = coord.round;
1237        if coord.punch_targets.is_empty() {
1238            return None;
1239        }
1240
1241        trace!(
1242            "queuing PUNCH_ME_NOW round {} with {} targets",
1243            round,
1244            coord.punch_targets.len()
1245        );
1246
1247        // Enqueue one PunchMeNow frame per target (spec-compliant); normal send loop will encode
1248        for target in &coord.punch_targets {
1249            let punch = frame::PunchMeNow {
1250                round,
1251                paired_with_sequence_number: target.remote_sequence,
1252                address: target.remote_addr,
1253                target_peer_id: None,
1254            };
1255            self.spaces[SpaceId::Data].pending.punch_me_now.push(punch);
1256        }
1257
1258        // Mark request sent
1259        nat.mark_punch_request_sent();
1260
1261        // We don't need to craft a transmit here; frames will be sent by the normal writer
1262        None
1263    }
1264
1265    /// Send coordinated PATH_CHALLENGE for hole punching
1266    fn send_coordinated_path_challenge(
1267        &mut self,
1268        now: Instant,
1269        buf: &mut Vec<u8>,
1270    ) -> Option<Transmit> {
1271        // Check if it's time to start synchronized hole punching
1272        if let Some(nat_traversal) = &mut self.nat_traversal {
1273            if nat_traversal.should_start_punching(now) {
1274                nat_traversal.start_punching_phase(now);
1275            }
1276        }
1277
1278        // Get punch targets if we're in punching phase
1279        let (target_addr, challenge) = {
1280            let nat_traversal = self.nat_traversal.as_ref()?;
1281            match nat_traversal.get_coordination_phase() {
1282                Some(CoordinationPhase::Punching) => {
1283                    let targets = nat_traversal.get_punch_targets_from_coordination()?;
1284                    if targets.is_empty() {
1285                        return None;
1286                    }
1287                    // Send PATH_CHALLENGE to the first target (could be round-robin in future)
1288                    let target = &targets[0];
1289                    (target.remote_addr, target.challenge)
1290                }
1291                _ => return None,
1292            }
1293        };
1294
1295        debug_assert_eq!(
1296            self.highest_space,
1297            SpaceId::Data,
1298            "PATH_CHALLENGE queued without 1-RTT keys"
1299        );
1300
1301        buf.reserve(self.pqc_state.min_initial_size() as usize);
1302        let buf_capacity = buf.capacity();
1303
1304        let mut builder = PacketBuilder::new(
1305            now,
1306            SpaceId::Data,
1307            self.rem_cids.active(),
1308            buf,
1309            buf_capacity,
1310            0,
1311            false,
1312            self,
1313        )?;
1314
1315        trace!(
1316            "sending coordinated PATH_CHALLENGE {:08x} to {}",
1317            challenge, target_addr
1318        );
1319        buf.write(frame::FrameType::PATH_CHALLENGE);
1320        buf.write(challenge);
1321        self.stats.frame_tx.path_challenge += 1;
1322
1323        let min_size = self.pqc_state.min_initial_size();
1324        builder.pad_to(min_size);
1325        builder.finish_and_track(now, self, None, buf);
1326
1327        // Mark coordination as validating after packet is built
1328        if let Some(nat_traversal) = &mut self.nat_traversal {
1329            nat_traversal.mark_coordination_validating();
1330        }
1331
1332        Some(Transmit {
1333            destination: target_addr,
1334            size: buf.len(),
1335            ecn: if self.path.sending_ecn {
1336                Some(EcnCodepoint::Ect0)
1337            } else {
1338                None
1339            },
1340            segment_size: None,
1341            src_ip: self.local_ip,
1342        })
1343    }
1344
1345    /// Send PATH_CHALLENGE for NAT traversal candidates if necessary
1346    fn send_nat_traversal_challenge(
1347        &mut self,
1348        now: Instant,
1349        buf: &mut Vec<u8>,
1350    ) -> Option<Transmit> {
1351        // Priority 1: Coordination protocol requests
1352        if let Some(request) = self.send_coordination_request(now, buf) {
1353            return Some(request);
1354        }
1355
1356        // Priority 2: Coordinated hole punching
1357        if let Some(punch) = self.send_coordinated_path_challenge(now, buf) {
1358            return Some(punch);
1359        }
1360
1361        // Priority 3: Regular candidate validation (fallback)
1362        let (remote_addr, remote_sequence) = {
1363            let nat_traversal = self.nat_traversal.as_ref()?;
1364            let candidates = nat_traversal.get_validation_candidates();
1365            if candidates.is_empty() {
1366                return None;
1367            }
1368            // Get the highest priority candidate
1369            let (sequence, candidate) = candidates[0];
1370            (candidate.address, sequence)
1371        };
1372
1373        let challenge = self.rng.r#gen::<u64>();
1374
1375        // Start validation for this candidate
1376        if let Err(e) =
1377            self.nat_traversal
1378                .as_mut()?
1379                .start_validation(remote_sequence, challenge, now)
1380        {
1381            warn!("Failed to start NAT traversal validation: {}", e);
1382            return None;
1383        }
1384
1385        debug_assert_eq!(
1386            self.highest_space,
1387            SpaceId::Data,
1388            "PATH_CHALLENGE queued without 1-RTT keys"
1389        );
1390
1391        buf.reserve(self.pqc_state.min_initial_size() as usize);
1392        let buf_capacity = buf.capacity();
1393
1394        // Use current connection ID for NAT traversal PATH_CHALLENGE
1395        let mut builder = PacketBuilder::new(
1396            now,
1397            SpaceId::Data,
1398            self.rem_cids.active(),
1399            buf,
1400            buf_capacity,
1401            0,
1402            false,
1403            self,
1404        )?;
1405
1406        trace!(
1407            "sending PATH_CHALLENGE {:08x} to NAT candidate {}",
1408            challenge, remote_addr
1409        );
1410        buf.write(frame::FrameType::PATH_CHALLENGE);
1411        buf.write(challenge);
1412        self.stats.frame_tx.path_challenge += 1;
1413
1414        // PATH_CHALLENGE frames must be padded to at least 1200 bytes
1415        let min_size = self.pqc_state.min_initial_size();
1416        builder.pad_to(min_size);
1417
1418        builder.finish_and_track(now, self, None, buf);
1419
1420        Some(Transmit {
1421            destination: remote_addr,
1422            size: buf.len(),
1423            ecn: if self.path.sending_ecn {
1424                Some(EcnCodepoint::Ect0)
1425            } else {
1426                None
1427            },
1428            segment_size: None,
1429            src_ip: self.local_ip,
1430        })
1431    }
1432
1433    /// Send PATH_CHALLENGE for a previous path if necessary
1434    fn send_path_challenge(&mut self, now: Instant, buf: &mut Vec<u8>) -> Option<Transmit> {
1435        let (prev_cid, prev_path) = self.prev_path.as_mut()?;
1436        if !prev_path.challenge_pending {
1437            return None;
1438        }
1439        prev_path.challenge_pending = false;
1440        let token = prev_path
1441            .challenge
1442            .expect("previous path challenge pending without token");
1443        let destination = prev_path.remote;
1444        debug_assert_eq!(
1445            self.highest_space,
1446            SpaceId::Data,
1447            "PATH_CHALLENGE queued without 1-RTT keys"
1448        );
1449        buf.reserve(self.pqc_state.min_initial_size() as usize);
1450
1451        let buf_capacity = buf.capacity();
1452
1453        // Use the previous CID to avoid linking the new path with the previous path. We
1454        // don't bother accounting for possible retirement of that prev_cid because this is
1455        // sent once, immediately after migration, when the CID is known to be valid. Even
1456        // if a post-migration packet caused the CID to be retired, it's fair to pretend
1457        // this is sent first.
1458        let mut builder = PacketBuilder::new(
1459            now,
1460            SpaceId::Data,
1461            *prev_cid,
1462            buf,
1463            buf_capacity,
1464            0,
1465            false,
1466            self,
1467        )?;
1468        trace!("validating previous path with PATH_CHALLENGE {:08x}", token);
1469        buf.write(frame::FrameType::PATH_CHALLENGE);
1470        buf.write(token);
1471        self.stats.frame_tx.path_challenge += 1;
1472
1473        // An endpoint MUST expand datagrams that contain a PATH_CHALLENGE frame
1474        // to at least the smallest allowed maximum datagram size of 1200 bytes,
1475        // unless the anti-amplification limit for the path does not permit
1476        // sending a datagram of this size
1477        let min_size = self.pqc_state.min_initial_size();
1478        builder.pad_to(min_size);
1479
1480        builder.finish(self, buf);
1481        self.stats.udp_tx.on_sent(1, buf.len());
1482
1483        Some(Transmit {
1484            destination,
1485            size: buf.len(),
1486            ecn: None,
1487            segment_size: None,
1488            src_ip: self.local_ip,
1489        })
1490    }
1491
1492    /// Indicate what types of frames are ready to send for the given space
1493    fn space_can_send(&self, space_id: SpaceId, frame_space_1rtt: usize) -> SendableFrames {
1494        if self.spaces[space_id].crypto.is_none()
1495            && (space_id != SpaceId::Data
1496                || self.zero_rtt_crypto.is_none()
1497                || self.side.is_server())
1498        {
1499            // No keys available for this space
1500            return SendableFrames::empty();
1501        }
1502        let mut can_send = self.spaces[space_id].can_send(&self.streams);
1503        if space_id == SpaceId::Data {
1504            can_send.other |= self.can_send_1rtt(frame_space_1rtt);
1505        }
1506        can_send
1507    }
1508
1509    /// Process `ConnectionEvent`s generated by the associated `Endpoint`
1510    ///
1511    /// Will execute protocol logic upon receipt of a connection event, in turn preparing signals
1512    /// (including application `Event`s, `EndpointEvent`s and outgoing datagrams) that should be
1513    /// extracted through the relevant methods.
1514    pub fn handle_event(&mut self, event: ConnectionEvent) {
1515        use ConnectionEventInner::*;
1516        match event.0 {
1517            Datagram(DatagramConnectionEvent {
1518                now,
1519                remote,
1520                ecn,
1521                first_decode,
1522                remaining,
1523            }) => {
1524                // If this packet could initiate a migration and we're a client or a server that
1525                // forbids migration, drop the datagram. This could be relaxed to heuristically
1526                // permit NAT-rebinding-like migration.
1527                if remote != self.path.remote && !self.side.remote_may_migrate() {
1528                    trace!("discarding packet from unrecognized peer {}", remote);
1529                    return;
1530                }
1531
1532                let was_anti_amplification_blocked = self.path.anti_amplification_blocked(1);
1533
1534                self.stats.udp_rx.datagrams += 1;
1535                self.stats.udp_rx.bytes += first_decode.len() as u64;
1536                let data_len = first_decode.len();
1537
1538                self.handle_decode(now, remote, ecn, first_decode);
1539                // The current `path` might have changed inside `handle_decode`,
1540                // since the packet could have triggered a migration. Make sure
1541                // the data received is accounted for the most recent path by accessing
1542                // `path` after `handle_decode`.
1543                self.path.total_recvd = self.path.total_recvd.saturating_add(data_len as u64);
1544
1545                if let Some(data) = remaining {
1546                    self.stats.udp_rx.bytes += data.len() as u64;
1547                    self.handle_coalesced(now, remote, ecn, data);
1548                }
1549
1550                #[cfg(feature = "__qlog")]
1551                self.emit_qlog_recovery_metrics(now);
1552
1553                if was_anti_amplification_blocked {
1554                    // A prior attempt to set the loss detection timer may have failed due to
1555                    // anti-amplification, so ensure it's set now. Prevents a handshake deadlock if
1556                    // the server's first flight is lost.
1557                    self.set_loss_detection_timer(now);
1558                }
1559            }
1560            NewIdentifiers(ids, now) => {
1561                self.local_cid_state.new_cids(&ids, now);
1562                ids.into_iter().rev().for_each(|frame| {
1563                    self.spaces[SpaceId::Data].pending.new_cids.push(frame);
1564                });
1565                // Update Timer::PushNewCid
1566                if self.timers.get(Timer::PushNewCid).is_none_or(|x| x <= now) {
1567                    self.reset_cid_retirement();
1568                }
1569            }
1570            QueueAddAddress(add) => {
1571                // Enqueue AddAddress frame for transmission
1572                self.spaces[SpaceId::Data].pending.add_addresses.push(add);
1573            }
1574            QueuePunchMeNow(punch) => {
1575                // Enqueue PunchMeNow frame for transmission
1576                self.spaces[SpaceId::Data].pending.punch_me_now.push(punch);
1577            }
1578        }
1579    }
1580
1581    /// Process timer expirations
1582    ///
1583    /// Executes protocol logic, potentially preparing signals (including application `Event`s,
1584    /// `EndpointEvent`s and outgoing datagrams) that should be extracted through the relevant
1585    /// methods.
1586    ///
1587    /// It is most efficient to call this immediately after the system clock reaches the latest
1588    /// `Instant` that was output by `poll_timeout`; however spurious extra calls will simply
1589    /// no-op and therefore are safe.
1590    pub fn handle_timeout(&mut self, now: Instant) {
1591        for &timer in &Timer::VALUES {
1592            if !self.timers.is_expired(timer, now) {
1593                continue;
1594            }
1595            self.timers.stop(timer);
1596            trace!(timer = ?timer, "timeout");
1597            match timer {
1598                Timer::Close => {
1599                    self.state = State::Drained;
1600                    self.endpoint_events.push_back(EndpointEventInner::Drained);
1601                }
1602                Timer::Idle => {
1603                    self.kill(ConnectionError::TimedOut);
1604                }
1605                Timer::KeepAlive => {
1606                    trace!("sending keep-alive");
1607                    self.ping();
1608                }
1609                Timer::LossDetection => {
1610                    self.on_loss_detection_timeout(now);
1611
1612                    #[cfg(feature = "__qlog")]
1613                    self.emit_qlog_recovery_metrics(now);
1614                }
1615                Timer::KeyDiscard => {
1616                    self.zero_rtt_crypto = None;
1617                    self.prev_crypto = None;
1618                }
1619                Timer::PathValidation => {
1620                    debug!("path validation failed");
1621                    if let Some((_, prev)) = self.prev_path.take() {
1622                        self.path = prev;
1623                    }
1624                    self.path.challenge = None;
1625                    self.path.challenge_pending = false;
1626                }
1627                Timer::Pacing => trace!("pacing timer expired"),
1628                Timer::NatTraversal => {
1629                    self.handle_nat_traversal_timeout(now);
1630                }
1631                Timer::PushNewCid => {
1632                    // Update `retire_prior_to` field in NEW_CONNECTION_ID frame
1633                    let num_new_cid = self.local_cid_state.on_cid_timeout().into();
1634                    if !self.state.is_closed() {
1635                        trace!(
1636                            "push a new cid to peer RETIRE_PRIOR_TO field {}",
1637                            self.local_cid_state.retire_prior_to()
1638                        );
1639                        self.endpoint_events
1640                            .push_back(EndpointEventInner::NeedIdentifiers(now, num_new_cid));
1641                    }
1642                }
1643                Timer::MaxAckDelay => {
1644                    trace!("max ack delay reached");
1645                    // This timer is only armed in the Data space
1646                    self.spaces[SpaceId::Data]
1647                        .pending_acks
1648                        .on_max_ack_delay_timeout()
1649                }
1650            }
1651        }
1652    }
1653
1654    /// Close a connection immediately
1655    ///
1656    /// This does not ensure delivery of outstanding data. It is the application's responsibility to
1657    /// call this only when all important communications have been completed, e.g. by calling
1658    /// [`SendStream::finish`] on outstanding streams and waiting for the corresponding
1659    /// [`StreamEvent::Finished`] event.
1660    ///
1661    /// If [`Streams::send_streams`] returns 0, all outstanding stream data has been
1662    /// delivered. There may still be data from the peer that has not been received.
1663    ///
1664    /// [`StreamEvent::Finished`]: crate::StreamEvent::Finished
1665    pub fn close(&mut self, now: Instant, error_code: VarInt, reason: Bytes) {
1666        self.close_inner(
1667            now,
1668            Close::Application(frame::ApplicationClose { error_code, reason }),
1669        )
1670    }
1671
1672    fn close_inner(&mut self, now: Instant, reason: Close) {
1673        let was_closed = self.state.is_closed();
1674        if !was_closed {
1675            self.close_common();
1676            self.set_close_timer(now);
1677            self.close = true;
1678            self.state = State::Closed(state::Closed { reason });
1679        }
1680    }
1681
1682    /// Control datagrams
1683    pub fn datagrams(&mut self) -> Datagrams<'_> {
1684        Datagrams { conn: self }
1685    }
1686
1687    /// Returns connection statistics
1688    pub fn stats(&self) -> ConnectionStats {
1689        let mut stats = self.stats;
1690        stats.path.rtt = self.path.rtt.get();
1691        stats.path.cwnd = self.path.congestion.window();
1692        stats.path.current_mtu = self.path.mtud.current_mtu();
1693
1694        stats
1695    }
1696
1697    /// Set the bound peer identity for token v2 issuance.
1698    pub fn set_token_binding_peer_id(&mut self, pid: PeerId) {
1699        self.peer_id_for_tokens = Some(pid);
1700    }
1701
1702    /// Control whether NEW_TOKEN frames should be delayed until binding completes.
1703    pub fn set_delay_new_token_until_binding(&mut self, v: bool) {
1704        self.delay_new_token_until_binding = v;
1705    }
1706
1707    /// Ping the remote endpoint
1708    ///
1709    /// Causes an ACK-eliciting packet to be transmitted.
1710    pub fn ping(&mut self) {
1711        self.spaces[self.highest_space].ping_pending = true;
1712    }
1713
1714    /// Returns true if post-quantum algorithms are in use for this connection.
1715    pub(crate) fn is_pqc(&self) -> bool {
1716        self.pqc_state.using_pqc
1717    }
1718
1719    /// Update traffic keys spontaneously
1720    ///
1721    /// This can be useful for testing key updates, as they otherwise only happen infrequently.
1722    pub fn force_key_update(&mut self) {
1723        if !self.state.is_established() {
1724            debug!("ignoring forced key update in illegal state");
1725            return;
1726        }
1727        if self.prev_crypto.is_some() {
1728            // We already just updated, or are currently updating, the keys. Concurrent key updates
1729            // are illegal.
1730            debug!("ignoring redundant forced key update");
1731            return;
1732        }
1733        self.update_keys(None, false);
1734    }
1735
1736    /// Get a session reference
1737    pub fn crypto_session(&self) -> &dyn crypto::Session {
1738        &*self.crypto
1739    }
1740
1741    /// Whether the connection is in the process of being established
1742    ///
1743    /// If this returns `false`, the connection may be either established or closed, signaled by the
1744    /// emission of a `Connected` or `ConnectionLost` message respectively.
1745    pub fn is_handshaking(&self) -> bool {
1746        self.state.is_handshake()
1747    }
1748
1749    /// Whether the connection is closed
1750    ///
1751    /// Closed connections cannot transport any further data. A connection becomes closed when
1752    /// either peer application intentionally closes it, or when either transport layer detects an
1753    /// error such as a time-out or certificate validation failure.
1754    ///
1755    /// A `ConnectionLost` event is emitted with details when the connection becomes closed.
1756    pub fn is_closed(&self) -> bool {
1757        self.state.is_closed()
1758    }
1759
1760    /// Whether there is no longer any need to keep the connection around
1761    ///
1762    /// Closed connections become drained after a brief timeout to absorb any remaining in-flight
1763    /// packets from the peer. All drained connections have been closed.
1764    pub fn is_drained(&self) -> bool {
1765        self.state.is_drained()
1766    }
1767
1768    /// For clients, if the peer accepted the 0-RTT data packets
1769    ///
1770    /// The value is meaningless until after the handshake completes.
1771    pub fn accepted_0rtt(&self) -> bool {
1772        self.accepted_0rtt
1773    }
1774
1775    /// Whether 0-RTT is/was possible during the handshake
1776    pub fn has_0rtt(&self) -> bool {
1777        self.zero_rtt_enabled
1778    }
1779
1780    /// Whether there are any pending retransmits
1781    pub fn has_pending_retransmits(&self) -> bool {
1782        !self.spaces[SpaceId::Data].pending.is_empty(&self.streams)
1783    }
1784
1785    /// Look up whether we're the client or server of this Connection
1786    pub fn side(&self) -> Side {
1787        self.side.side()
1788    }
1789
1790    /// The latest socket address for this connection's peer
1791    pub fn remote_address(&self) -> SocketAddr {
1792        self.path.remote
1793    }
1794
1795    /// The local IP address which was used when the peer established
1796    /// the connection
1797    ///
1798    /// This can be different from the address the endpoint is bound to, in case
1799    /// the endpoint is bound to a wildcard address like `0.0.0.0` or `::`.
1800    ///
1801    /// This will return `None` for clients, or when no `local_ip` was passed to
1802    /// the endpoint's handle method for the datagrams establishing this
1803    /// connection.
1804    pub fn local_ip(&self) -> Option<IpAddr> {
1805        self.local_ip
1806    }
1807
1808    /// Current best estimate of this connection's latency (round-trip-time)
1809    pub fn rtt(&self) -> Duration {
1810        self.path.rtt.get()
1811    }
1812
1813    /// Current state of this connection's congestion controller, for debugging purposes
1814    pub fn congestion_state(&self) -> &dyn Controller {
1815        self.path.congestion.as_ref()
1816    }
1817
1818    /// Resets path-specific settings.
1819    ///
1820    /// This will force-reset several subsystems related to a specific network path.
1821    /// Currently this is the congestion controller, round-trip estimator, and the MTU
1822    /// discovery.
1823    ///
1824    /// This is useful when it is known the underlying network path has changed and the old
1825    /// state of these subsystems is no longer valid or optimal. In this case it might be
1826    /// faster or reduce loss to settle on optimal values by restarting from the initial
1827    /// configuration in the [`TransportConfig`].
1828    pub fn path_changed(&mut self, now: Instant) {
1829        self.path.reset(now, &self.config);
1830    }
1831
1832    /// Modify the number of remotely initiated streams that may be concurrently open
1833    ///
1834    /// No streams may be opened by the peer unless fewer than `count` are already open. Large
1835    /// `count`s increase both minimum and worst-case memory consumption.
1836    pub fn set_max_concurrent_streams(&mut self, dir: Dir, count: VarInt) {
1837        self.streams.set_max_concurrent(dir, count);
1838        // If the limit was reduced, then a flow control update previously deemed insignificant may
1839        // now be significant.
1840        let pending = &mut self.spaces[SpaceId::Data].pending;
1841        self.streams.queue_max_stream_id(pending);
1842    }
1843
1844    /// Current number of remotely initiated streams that may be concurrently open
1845    ///
1846    /// If the target for this limit is reduced using [`set_max_concurrent_streams`](Self::set_max_concurrent_streams),
1847    /// it will not change immediately, even if fewer streams are open. Instead, it will
1848    /// decrement by one for each time a remotely initiated stream of matching directionality is closed.
1849    pub fn max_concurrent_streams(&self, dir: Dir) -> u64 {
1850        self.streams.max_concurrent(dir)
1851    }
1852
1853    /// See [`TransportConfig::receive_window()`]
1854    pub fn set_receive_window(&mut self, receive_window: VarInt) {
1855        if self.streams.set_receive_window(receive_window) {
1856            self.spaces[SpaceId::Data].pending.max_data = true;
1857        }
1858    }
1859
1860    /// Enable or disable address discovery for this connection
1861    pub fn set_address_discovery_enabled(&mut self, enabled: bool) {
1862        if let Some(ref mut state) = self.address_discovery_state {
1863            state.enabled = enabled;
1864        }
1865    }
1866
1867    /// Check if address discovery is enabled for this connection
1868    pub fn address_discovery_enabled(&self) -> bool {
1869        self.address_discovery_state
1870            .as_ref()
1871            .is_some_and(|state| state.enabled)
1872    }
1873
1874    /// Get the observed address for this connection
1875    ///
1876    /// Returns the address that the remote peer has observed for this connection,
1877    /// or None if no OBSERVED_ADDRESS frame has been received yet.
1878    pub fn observed_address(&self) -> Option<SocketAddr> {
1879        self.address_discovery_state
1880            .as_ref()
1881            .and_then(|state| state.get_observed_address(0)) // Use path ID 0 for primary path
1882    }
1883
1884    /// Get the address discovery state (internal use)
1885    #[allow(dead_code)]
1886    pub(crate) fn address_discovery_state(&self) -> Option<&AddressDiscoveryState> {
1887        self.address_discovery_state.as_ref()
1888    }
1889
1890    fn on_ack_received(
1891        &mut self,
1892        now: Instant,
1893        space: SpaceId,
1894        ack: frame::Ack,
1895    ) -> Result<(), TransportError> {
1896        if ack.largest >= self.spaces[space].next_packet_number {
1897            return Err(TransportError::PROTOCOL_VIOLATION("unsent packet acked"));
1898        }
1899        let new_largest = {
1900            let space = &mut self.spaces[space];
1901            if space.largest_acked_packet.is_none_or(|pn| ack.largest > pn) {
1902                space.largest_acked_packet = Some(ack.largest);
1903                if let Some(info) = space.sent_packets.get(&ack.largest) {
1904                    // This should always succeed, but a misbehaving peer might ACK a packet we
1905                    // haven't sent. At worst, that will result in us spuriously reducing the
1906                    // congestion window.
1907                    space.largest_acked_packet_sent = info.time_sent;
1908                }
1909                true
1910            } else {
1911                false
1912            }
1913        };
1914
1915        // Avoid DoS from unreasonably huge ack ranges by filtering out just the new acks.
1916        let mut newly_acked = ArrayRangeSet::new();
1917        for range in ack.iter() {
1918            self.packet_number_filter.check_ack(space, range.clone())?;
1919            for (&pn, _) in self.spaces[space].sent_packets.range(range) {
1920                newly_acked.insert_one(pn);
1921            }
1922        }
1923
1924        if newly_acked.is_empty() {
1925            return Ok(());
1926        }
1927
1928        let mut ack_eliciting_acked = false;
1929        for packet in newly_acked.elts() {
1930            if let Some(info) = self.spaces[space].take(packet) {
1931                if let Some(acked) = info.largest_acked {
1932                    // Assume ACKs for all packets below the largest acknowledged in `packet` have
1933                    // been received. This can cause the peer to spuriously retransmit if some of
1934                    // our earlier ACKs were lost, but allows for simpler state tracking. See
1935                    // discussion at
1936                    // https://www.rfc-editor.org/rfc/rfc9000.html#name-limiting-ranges-by-tracking
1937                    self.spaces[space].pending_acks.subtract_below(acked);
1938                }
1939                ack_eliciting_acked |= info.ack_eliciting;
1940
1941                // Notify MTU discovery that a packet was acked, because it might be an MTU probe
1942                let mtu_updated = self.path.mtud.on_acked(space, packet, info.size);
1943                if mtu_updated {
1944                    self.path
1945                        .congestion
1946                        .on_mtu_update(self.path.mtud.current_mtu());
1947                }
1948
1949                // Notify ack frequency that a packet was acked, because it might contain an ACK_FREQUENCY frame
1950                self.ack_frequency.on_acked(packet);
1951
1952                self.on_packet_acked(now, packet, info);
1953            }
1954        }
1955
1956        self.path.congestion.on_end_acks(
1957            now,
1958            self.path.in_flight.bytes,
1959            self.app_limited,
1960            self.spaces[space].largest_acked_packet,
1961        );
1962
1963        if new_largest && ack_eliciting_acked {
1964            let ack_delay = if space != SpaceId::Data {
1965                Duration::from_micros(0)
1966            } else {
1967                cmp::min(
1968                    self.ack_frequency.peer_max_ack_delay,
1969                    Duration::from_micros(ack.delay << self.peer_params.ack_delay_exponent.0),
1970                )
1971            };
1972            let rtt = instant_saturating_sub(now, self.spaces[space].largest_acked_packet_sent);
1973            self.path.rtt.update(ack_delay, rtt);
1974            if self.path.first_packet_after_rtt_sample.is_none() {
1975                self.path.first_packet_after_rtt_sample =
1976                    Some((space, self.spaces[space].next_packet_number));
1977            }
1978        }
1979
1980        // Must be called before crypto/pto_count are clobbered
1981        self.detect_lost_packets(now, space, true);
1982
1983        if self.peer_completed_address_validation() {
1984            self.pto_count = 0;
1985        }
1986
1987        // Explicit congestion notification
1988        if self.path.sending_ecn {
1989            if let Some(ecn) = ack.ecn {
1990                // We only examine ECN counters from ACKs that we are certain we received in transmit
1991                // order, allowing us to compute an increase in ECN counts to compare against the number
1992                // of newly acked packets that remains well-defined in the presence of arbitrary packet
1993                // reordering.
1994                if new_largest {
1995                    let sent = self.spaces[space].largest_acked_packet_sent;
1996                    self.process_ecn(now, space, newly_acked.len() as u64, ecn, sent);
1997                }
1998            } else {
1999                // We always start out sending ECN, so any ack that doesn't acknowledge it disables it.
2000                debug!("ECN not acknowledged by peer");
2001                self.path.sending_ecn = false;
2002            }
2003        }
2004
2005        self.set_loss_detection_timer(now);
2006        Ok(())
2007    }
2008
2009    /// Process a new ECN block from an in-order ACK
2010    fn process_ecn(
2011        &mut self,
2012        now: Instant,
2013        space: SpaceId,
2014        newly_acked: u64,
2015        ecn: frame::EcnCounts,
2016        largest_sent_time: Instant,
2017    ) {
2018        match self.spaces[space].detect_ecn(newly_acked, ecn) {
2019            Err(e) => {
2020                debug!("halting ECN due to verification failure: {}", e);
2021                self.path.sending_ecn = false;
2022                // Wipe out the existing value because it might be garbage and could interfere with
2023                // future attempts to use ECN on new paths.
2024                self.spaces[space].ecn_feedback = frame::EcnCounts::ZERO;
2025            }
2026            Ok(false) => {}
2027            Ok(true) => {
2028                self.stats.path.congestion_events += 1;
2029                self.path
2030                    .congestion
2031                    .on_congestion_event(now, largest_sent_time, false, 0);
2032            }
2033        }
2034    }
2035
2036    // Not timing-aware, so it's safe to call this for inferred acks, such as arise from
2037    // high-latency handshakes
2038    fn on_packet_acked(&mut self, now: Instant, pn: u64, info: SentPacket) {
2039        self.remove_in_flight(pn, &info);
2040        if info.ack_eliciting && self.path.challenge.is_none() {
2041            // Only pass ACKs to the congestion controller if we are not validating the current
2042            // path, so as to ignore any ACKs from older paths still coming in.
2043            self.path.congestion.on_ack(
2044                now,
2045                info.time_sent,
2046                info.size.into(),
2047                self.app_limited,
2048                &self.path.rtt,
2049            );
2050        }
2051
2052        // Update state for confirmed delivery of frames
2053        if let Some(retransmits) = info.retransmits.get() {
2054            for (id, _) in retransmits.reset_stream.iter() {
2055                self.streams.reset_acked(*id);
2056            }
2057        }
2058
2059        for frame in info.stream_frames {
2060            self.streams.received_ack_of(frame);
2061        }
2062    }
2063
2064    fn set_key_discard_timer(&mut self, now: Instant, space: SpaceId) {
2065        let start = if self.zero_rtt_crypto.is_some() {
2066            now
2067        } else {
2068            self.prev_crypto
2069                .as_ref()
2070                .expect("no previous keys")
2071                .end_packet
2072                .as_ref()
2073                .expect("update not acknowledged yet")
2074                .1
2075        };
2076        self.timers
2077            .set(Timer::KeyDiscard, start + self.pto(space) * 3);
2078    }
2079
2080    fn on_loss_detection_timeout(&mut self, now: Instant) {
2081        if let Some((_, pn_space)) = self.loss_time_and_space() {
2082            // Time threshold loss Detection
2083            self.detect_lost_packets(now, pn_space, false);
2084            self.set_loss_detection_timer(now);
2085            return;
2086        }
2087
2088        let (_, space) = match self.pto_time_and_space(now) {
2089            Some(x) => x,
2090            None => {
2091                error!("PTO expired while unset");
2092                return;
2093            }
2094        };
2095        trace!(
2096            in_flight = self.path.in_flight.bytes,
2097            count = self.pto_count,
2098            ?space,
2099            "PTO fired"
2100        );
2101
2102        let count = match self.path.in_flight.ack_eliciting {
2103            // A PTO when we're not expecting any ACKs must be due to handshake anti-amplification
2104            // deadlock preventions
2105            0 => {
2106                debug_assert!(!self.peer_completed_address_validation());
2107                1
2108            }
2109            // Conventional loss probe
2110            _ => 2,
2111        };
2112        self.spaces[space].loss_probes = self.spaces[space].loss_probes.saturating_add(count);
2113        self.pto_count = self.pto_count.saturating_add(1);
2114        self.set_loss_detection_timer(now);
2115    }
2116
2117    fn detect_lost_packets(&mut self, now: Instant, pn_space: SpaceId, due_to_ack: bool) {
2118        let mut lost_packets = Vec::<u64>::new();
2119        let mut lost_mtu_probe = None;
2120        let in_flight_mtu_probe = self.path.mtud.in_flight_mtu_probe();
2121        let rtt = self.path.rtt.conservative();
2122        let loss_delay = cmp::max(rtt.mul_f32(self.config.time_threshold), TIMER_GRANULARITY);
2123
2124        // Packets sent before this time are deemed lost.
2125        let lost_send_time = now.checked_sub(loss_delay).unwrap();
2126        let largest_acked_packet = self.spaces[pn_space].largest_acked_packet.unwrap();
2127        let packet_threshold = self.config.packet_threshold as u64;
2128        let mut size_of_lost_packets = 0u64;
2129
2130        // InPersistentCongestion: Determine if all packets in the time period before the newest
2131        // lost packet, including the edges, are marked lost. PTO computation must always
2132        // include max ACK delay, i.e. operate as if in Data space (see RFC9001 §7.6.1).
2133        let congestion_period =
2134            self.pto(SpaceId::Data) * self.config.persistent_congestion_threshold;
2135        let mut persistent_congestion_start: Option<Instant> = None;
2136        let mut prev_packet = None;
2137        let mut in_persistent_congestion = false;
2138
2139        let space = &mut self.spaces[pn_space];
2140        space.loss_time = None;
2141
2142        for (&packet, info) in space.sent_packets.range(0..largest_acked_packet) {
2143            if prev_packet != Some(packet.wrapping_sub(1)) {
2144                // An intervening packet was acknowledged
2145                persistent_congestion_start = None;
2146            }
2147
2148            if info.time_sent <= lost_send_time || largest_acked_packet >= packet + packet_threshold
2149            {
2150                if Some(packet) == in_flight_mtu_probe {
2151                    // Lost MTU probes are not included in `lost_packets`, because they should not
2152                    // trigger a congestion control response
2153                    lost_mtu_probe = in_flight_mtu_probe;
2154                } else {
2155                    lost_packets.push(packet);
2156                    size_of_lost_packets += info.size as u64;
2157                    if info.ack_eliciting && due_to_ack {
2158                        match persistent_congestion_start {
2159                            // Two ACK-eliciting packets lost more than congestion_period apart, with no
2160                            // ACKed packets in between
2161                            Some(start) if info.time_sent - start > congestion_period => {
2162                                in_persistent_congestion = true;
2163                            }
2164                            // Persistent congestion must start after the first RTT sample
2165                            None if self
2166                                .path
2167                                .first_packet_after_rtt_sample
2168                                .is_some_and(|x| x < (pn_space, packet)) =>
2169                            {
2170                                persistent_congestion_start = Some(info.time_sent);
2171                            }
2172                            _ => {}
2173                        }
2174                    }
2175                }
2176            } else {
2177                let next_loss_time = info.time_sent + loss_delay;
2178                space.loss_time = Some(
2179                    space
2180                        .loss_time
2181                        .map_or(next_loss_time, |x| cmp::min(x, next_loss_time)),
2182                );
2183                persistent_congestion_start = None;
2184            }
2185
2186            prev_packet = Some(packet);
2187        }
2188
2189        // OnPacketsLost
2190        if let Some(largest_lost) = lost_packets.last().cloned() {
2191            let old_bytes_in_flight = self.path.in_flight.bytes;
2192            let largest_lost_sent = self.spaces[pn_space].sent_packets[&largest_lost].time_sent;
2193            self.lost_packets += lost_packets.len() as u64;
2194            self.stats.path.lost_packets += lost_packets.len() as u64;
2195            self.stats.path.lost_bytes += size_of_lost_packets;
2196            trace!(
2197                "packets lost: {:?}, bytes lost: {}",
2198                lost_packets, size_of_lost_packets
2199            );
2200
2201            for &packet in &lost_packets {
2202                let info = self.spaces[pn_space].take(packet).unwrap(); // safe: lost_packets is populated just above
2203                self.remove_in_flight(packet, &info);
2204                for frame in info.stream_frames {
2205                    self.streams.retransmit(frame);
2206                }
2207                self.spaces[pn_space].pending |= info.retransmits;
2208                self.path.mtud.on_non_probe_lost(packet, info.size);
2209            }
2210
2211            if self.path.mtud.black_hole_detected(now) {
2212                self.stats.path.black_holes_detected += 1;
2213                self.path
2214                    .congestion
2215                    .on_mtu_update(self.path.mtud.current_mtu());
2216                if let Some(max_datagram_size) = self.datagrams().max_size() {
2217                    self.datagrams.drop_oversized(max_datagram_size);
2218                }
2219            }
2220
2221            // Don't apply congestion penalty for lost ack-only packets
2222            let lost_ack_eliciting = old_bytes_in_flight != self.path.in_flight.bytes;
2223
2224            if lost_ack_eliciting {
2225                self.stats.path.congestion_events += 1;
2226                self.path.congestion.on_congestion_event(
2227                    now,
2228                    largest_lost_sent,
2229                    in_persistent_congestion,
2230                    size_of_lost_packets,
2231                );
2232            }
2233        }
2234
2235        // Handle a lost MTU probe
2236        if let Some(packet) = lost_mtu_probe {
2237            let info = self.spaces[SpaceId::Data].take(packet).unwrap(); // safe: lost_mtu_probe is omitted from lost_packets, and therefore must not have been removed yet
2238            self.remove_in_flight(packet, &info);
2239            self.path.mtud.on_probe_lost();
2240            self.stats.path.lost_plpmtud_probes += 1;
2241        }
2242    }
2243
2244    fn loss_time_and_space(&self) -> Option<(Instant, SpaceId)> {
2245        SpaceId::iter()
2246            .filter_map(|id| Some((self.spaces[id].loss_time?, id)))
2247            .min_by_key(|&(time, _)| time)
2248    }
2249
2250    fn pto_time_and_space(&self, now: Instant) -> Option<(Instant, SpaceId)> {
2251        let backoff = 2u32.pow(self.pto_count.min(MAX_BACKOFF_EXPONENT));
2252        let mut duration = self.path.rtt.pto_base() * backoff;
2253
2254        if self.path.in_flight.ack_eliciting == 0 {
2255            debug_assert!(!self.peer_completed_address_validation());
2256            let space = match self.highest_space {
2257                SpaceId::Handshake => SpaceId::Handshake,
2258                _ => SpaceId::Initial,
2259            };
2260            return Some((now + duration, space));
2261        }
2262
2263        let mut result = None;
2264        for space in SpaceId::iter() {
2265            if self.spaces[space].in_flight == 0 {
2266                continue;
2267            }
2268            if space == SpaceId::Data {
2269                // Skip ApplicationData until handshake completes.
2270                if self.is_handshaking() {
2271                    return result;
2272                }
2273                // Include max_ack_delay and backoff for ApplicationData.
2274                duration += self.ack_frequency.max_ack_delay_for_pto() * backoff;
2275            }
2276            let last_ack_eliciting = match self.spaces[space].time_of_last_ack_eliciting_packet {
2277                Some(time) => time,
2278                None => continue,
2279            };
2280            let pto = last_ack_eliciting + duration;
2281            if result.is_none_or(|(earliest_pto, _)| pto < earliest_pto) {
2282                result = Some((pto, space));
2283            }
2284        }
2285        result
2286    }
2287
2288    fn peer_completed_address_validation(&self) -> bool {
2289        if self.side.is_server() || self.state.is_closed() {
2290            return true;
2291        }
2292        // The server is guaranteed to have validated our address if any of our handshake or 1-RTT
2293        // packets are acknowledged or we've seen HANDSHAKE_DONE and discarded handshake keys.
2294        self.spaces[SpaceId::Handshake]
2295            .largest_acked_packet
2296            .is_some()
2297            || self.spaces[SpaceId::Data].largest_acked_packet.is_some()
2298            || (self.spaces[SpaceId::Data].crypto.is_some()
2299                && self.spaces[SpaceId::Handshake].crypto.is_none())
2300    }
2301
2302    fn set_loss_detection_timer(&mut self, now: Instant) {
2303        if self.state.is_closed() {
2304            // No loss detection takes place on closed connections, and `close_common` already
2305            // stopped time timer. Ensure we don't restart it inadvertently, e.g. in response to a
2306            // reordered packet being handled by state-insensitive code.
2307            return;
2308        }
2309
2310        if let Some((loss_time, _)) = self.loss_time_and_space() {
2311            // Time threshold loss detection.
2312            self.timers.set(Timer::LossDetection, loss_time);
2313            return;
2314        }
2315
2316        if self.path.anti_amplification_blocked(1) {
2317            // We wouldn't be able to send anything, so don't bother.
2318            self.timers.stop(Timer::LossDetection);
2319            return;
2320        }
2321
2322        if self.path.in_flight.ack_eliciting == 0 && self.peer_completed_address_validation() {
2323            // There is nothing to detect lost, so no timer is set. However, the client needs to arm
2324            // the timer if the server might be blocked by the anti-amplification limit.
2325            self.timers.stop(Timer::LossDetection);
2326            return;
2327        }
2328
2329        // Determine which PN space to arm PTO for.
2330        // Calculate PTO duration
2331        if let Some((timeout, _)) = self.pto_time_and_space(now) {
2332            self.timers.set(Timer::LossDetection, timeout);
2333        } else {
2334            self.timers.stop(Timer::LossDetection);
2335        }
2336    }
2337
2338    /// Probe Timeout
2339    fn pto(&self, space: SpaceId) -> Duration {
2340        let max_ack_delay = match space {
2341            SpaceId::Initial | SpaceId::Handshake => Duration::ZERO,
2342            SpaceId::Data => self.ack_frequency.max_ack_delay_for_pto(),
2343        };
2344        self.path.rtt.pto_base() + max_ack_delay
2345    }
2346
2347    fn on_packet_authenticated(
2348        &mut self,
2349        now: Instant,
2350        space_id: SpaceId,
2351        ecn: Option<EcnCodepoint>,
2352        packet: Option<u64>,
2353        spin: bool,
2354        is_1rtt: bool,
2355    ) {
2356        self.total_authed_packets += 1;
2357        self.reset_keep_alive(now);
2358        self.reset_idle_timeout(now, space_id);
2359        self.permit_idle_reset = true;
2360        self.receiving_ecn |= ecn.is_some();
2361        if let Some(x) = ecn {
2362            let space = &mut self.spaces[space_id];
2363            space.ecn_counters += x;
2364
2365            if x.is_ce() {
2366                space.pending_acks.set_immediate_ack_required();
2367            }
2368        }
2369
2370        let packet = match packet {
2371            Some(x) => x,
2372            None => return,
2373        };
2374        if self.side.is_server() {
2375            if self.spaces[SpaceId::Initial].crypto.is_some() && space_id == SpaceId::Handshake {
2376                // A server stops sending and processing Initial packets when it receives its first Handshake packet.
2377                self.discard_space(now, SpaceId::Initial);
2378            }
2379            if self.zero_rtt_crypto.is_some() && is_1rtt {
2380                // Discard 0-RTT keys soon after receiving a 1-RTT packet
2381                self.set_key_discard_timer(now, space_id)
2382            }
2383        }
2384        let space = &mut self.spaces[space_id];
2385        space.pending_acks.insert_one(packet, now);
2386        if packet >= space.rx_packet {
2387            space.rx_packet = packet;
2388            // Update outgoing spin bit, inverting iff we're the client
2389            self.spin = self.side.is_client() ^ spin;
2390        }
2391    }
2392
2393    fn reset_idle_timeout(&mut self, now: Instant, space: SpaceId) {
2394        let timeout = match self.idle_timeout {
2395            None => return,
2396            Some(dur) => dur,
2397        };
2398        if self.state.is_closed() {
2399            self.timers.stop(Timer::Idle);
2400            return;
2401        }
2402        let dt = cmp::max(timeout, 3 * self.pto(space));
2403        self.timers.set(Timer::Idle, now + dt);
2404    }
2405
2406    fn reset_keep_alive(&mut self, now: Instant) {
2407        let interval = match self.config.keep_alive_interval {
2408            Some(x) if self.state.is_established() => x,
2409            _ => return,
2410        };
2411        self.timers.set(Timer::KeepAlive, now + interval);
2412    }
2413
2414    fn reset_cid_retirement(&mut self) {
2415        if let Some(t) = self.local_cid_state.next_timeout() {
2416            self.timers.set(Timer::PushNewCid, t);
2417        }
2418    }
2419
2420    /// Handle the already-decrypted first packet from the client
2421    ///
2422    /// Decrypting the first packet in the `Endpoint` allows stateless packet handling to be more
2423    /// efficient.
2424    pub(crate) fn handle_first_packet(
2425        &mut self,
2426        now: Instant,
2427        remote: SocketAddr,
2428        ecn: Option<EcnCodepoint>,
2429        packet_number: u64,
2430        packet: InitialPacket,
2431        remaining: Option<BytesMut>,
2432    ) -> Result<(), ConnectionError> {
2433        let span = trace_span!("first recv");
2434        let _guard = span.enter();
2435        debug_assert!(self.side.is_server());
2436        let len = packet.header_data.len() + packet.payload.len();
2437        self.path.total_recvd = len as u64;
2438
2439        match self.state {
2440            State::Handshake(ref mut state) => {
2441                state.expected_token = packet.header.token.clone();
2442            }
2443            _ => unreachable!("first packet must be delivered in Handshake state"),
2444        }
2445
2446        self.on_packet_authenticated(
2447            now,
2448            SpaceId::Initial,
2449            ecn,
2450            Some(packet_number),
2451            false,
2452            false,
2453        );
2454
2455        self.process_decrypted_packet(now, remote, Some(packet_number), packet.into())?;
2456        if let Some(data) = remaining {
2457            self.handle_coalesced(now, remote, ecn, data);
2458        }
2459
2460        #[cfg(feature = "__qlog")]
2461        self.emit_qlog_recovery_metrics(now);
2462
2463        Ok(())
2464    }
2465
2466    fn init_0rtt(&mut self) {
2467        let (header, packet) = match self.crypto.early_crypto() {
2468            Some(x) => x,
2469            None => return,
2470        };
2471        if self.side.is_client() {
2472            match self.crypto.transport_parameters() {
2473                Ok(params) => {
2474                    let params = params
2475                        .expect("crypto layer didn't supply transport parameters with ticket");
2476                    // Certain values must not be cached
2477                    let params = TransportParameters {
2478                        initial_src_cid: None,
2479                        original_dst_cid: None,
2480                        preferred_address: None,
2481                        retry_src_cid: None,
2482                        stateless_reset_token: None,
2483                        min_ack_delay: None,
2484                        ack_delay_exponent: TransportParameters::default().ack_delay_exponent,
2485                        max_ack_delay: TransportParameters::default().max_ack_delay,
2486                        ..params
2487                    };
2488                    self.set_peer_params(params);
2489                }
2490                Err(e) => {
2491                    error!("session ticket has malformed transport parameters: {}", e);
2492                    return;
2493                }
2494            }
2495        }
2496        trace!("0-RTT enabled");
2497        self.zero_rtt_enabled = true;
2498        self.zero_rtt_crypto = Some(ZeroRttCrypto { header, packet });
2499    }
2500
2501    fn read_crypto(
2502        &mut self,
2503        space: SpaceId,
2504        crypto: &frame::Crypto,
2505        payload_len: usize,
2506    ) -> Result<(), TransportError> {
2507        let expected = if !self.state.is_handshake() {
2508            SpaceId::Data
2509        } else if self.highest_space == SpaceId::Initial {
2510            SpaceId::Initial
2511        } else {
2512            // On the server, self.highest_space can be Data after receiving the client's first
2513            // flight, but we expect Handshake CRYPTO until the handshake is complete.
2514            SpaceId::Handshake
2515        };
2516        // We can't decrypt Handshake packets when highest_space is Initial, CRYPTO frames in 0-RTT
2517        // packets are illegal, and we don't process 1-RTT packets until the handshake is
2518        // complete. Therefore, we will never see CRYPTO data from a later-than-expected space.
2519        debug_assert!(space <= expected, "received out-of-order CRYPTO data");
2520
2521        let end = crypto.offset + crypto.data.len() as u64;
2522        if space < expected && end > self.spaces[space].crypto_stream.bytes_read() {
2523            warn!(
2524                "received new {:?} CRYPTO data when expecting {:?}",
2525                space, expected
2526            );
2527            return Err(TransportError::PROTOCOL_VIOLATION(
2528                "new data at unexpected encryption level",
2529            ));
2530        }
2531
2532        // Detect PQC usage from CRYPTO frame data before processing
2533        self.pqc_state.detect_pqc_from_crypto(&crypto.data, space);
2534
2535        // Check if we should trigger MTU discovery for PQC
2536        if self.pqc_state.should_trigger_mtu_discovery() {
2537            // Request larger MTU for PQC handshakes
2538            self.path
2539                .mtud
2540                .reset(self.pqc_state.min_initial_size(), self.config.min_mtu);
2541            trace!("Triggered MTU discovery for PQC handshake");
2542        }
2543
2544        let space = &mut self.spaces[space];
2545        let max = end.saturating_sub(space.crypto_stream.bytes_read());
2546        if max > self.config.crypto_buffer_size as u64 {
2547            return Err(TransportError::CRYPTO_BUFFER_EXCEEDED(""));
2548        }
2549
2550        space
2551            .crypto_stream
2552            .insert(crypto.offset, crypto.data.clone(), payload_len);
2553        while let Some(chunk) = space.crypto_stream.read(usize::MAX, true) {
2554            trace!("consumed {} CRYPTO bytes", chunk.bytes.len());
2555            if self.crypto.read_handshake(&chunk.bytes)? {
2556                self.events.push_back(Event::HandshakeDataReady);
2557            }
2558        }
2559
2560        Ok(())
2561    }
2562
2563    fn write_crypto(&mut self) {
2564        loop {
2565            let space = self.highest_space;
2566            let mut outgoing = Vec::new();
2567            if let Some(crypto) = self.crypto.write_handshake(&mut outgoing) {
2568                match space {
2569                    SpaceId::Initial => {
2570                        self.upgrade_crypto(SpaceId::Handshake, crypto);
2571                    }
2572                    SpaceId::Handshake => {
2573                        self.upgrade_crypto(SpaceId::Data, crypto);
2574                    }
2575                    _ => unreachable!("got updated secrets during 1-RTT"),
2576                }
2577            }
2578            if outgoing.is_empty() {
2579                if space == self.highest_space {
2580                    break;
2581                } else {
2582                    // Keys updated, check for more data to send
2583                    continue;
2584                }
2585            }
2586            let offset = self.spaces[space].crypto_offset;
2587            let outgoing = Bytes::from(outgoing);
2588            if let State::Handshake(ref mut state) = self.state {
2589                if space == SpaceId::Initial && offset == 0 && self.side.is_client() {
2590                    state.client_hello = Some(outgoing.clone());
2591                }
2592            }
2593            self.spaces[space].crypto_offset += outgoing.len() as u64;
2594            trace!("wrote {} {:?} CRYPTO bytes", outgoing.len(), space);
2595
2596            // Use PQC-aware fragmentation for large CRYPTO data
2597            let use_pqc_fragmentation = self.pqc_state.using_pqc && outgoing.len() > 1200;
2598
2599            if use_pqc_fragmentation {
2600                // Fragment large CRYPTO data for PQC handshakes
2601                let frames = self.pqc_state.packet_handler.fragment_crypto_data(
2602                    &outgoing,
2603                    offset,
2604                    self.pqc_state.min_initial_size() as usize,
2605                );
2606                for frame in frames {
2607                    self.spaces[space].pending.crypto.push_back(frame);
2608                }
2609            } else {
2610                // Normal CRYPTO frame for non-PQC or small data
2611                self.spaces[space].pending.crypto.push_back(frame::Crypto {
2612                    offset,
2613                    data: outgoing,
2614                });
2615            }
2616        }
2617    }
2618
2619    /// Switch to stronger cryptography during handshake
2620    fn upgrade_crypto(&mut self, space: SpaceId, crypto: Keys) {
2621        debug_assert!(
2622            self.spaces[space].crypto.is_none(),
2623            "already reached packet space {space:?}"
2624        );
2625        trace!("{:?} keys ready", space);
2626        if space == SpaceId::Data {
2627            // Precompute the first key update
2628            self.next_crypto = Some(
2629                self.crypto
2630                    .next_1rtt_keys()
2631                    .expect("handshake should be complete"),
2632            );
2633        }
2634
2635        self.spaces[space].crypto = Some(crypto);
2636        debug_assert!(space as usize > self.highest_space as usize);
2637        self.highest_space = space;
2638        if space == SpaceId::Data && self.side.is_client() {
2639            // Discard 0-RTT keys because 1-RTT keys are available.
2640            self.zero_rtt_crypto = None;
2641        }
2642    }
2643
2644    fn discard_space(&mut self, now: Instant, space_id: SpaceId) {
2645        debug_assert!(space_id != SpaceId::Data);
2646        trace!("discarding {:?} keys", space_id);
2647        if space_id == SpaceId::Initial {
2648            // No longer needed
2649            if let ConnectionSide::Client { token, .. } = &mut self.side {
2650                *token = Bytes::new();
2651            }
2652        }
2653        let space = &mut self.spaces[space_id];
2654        space.crypto = None;
2655        space.time_of_last_ack_eliciting_packet = None;
2656        space.loss_time = None;
2657        space.in_flight = 0;
2658        let sent_packets = mem::take(&mut space.sent_packets);
2659        for (pn, packet) in sent_packets.into_iter() {
2660            self.remove_in_flight(pn, &packet);
2661        }
2662        self.set_loss_detection_timer(now)
2663    }
2664
2665    fn handle_coalesced(
2666        &mut self,
2667        now: Instant,
2668        remote: SocketAddr,
2669        ecn: Option<EcnCodepoint>,
2670        data: BytesMut,
2671    ) {
2672        self.path.total_recvd = self.path.total_recvd.saturating_add(data.len() as u64);
2673        let mut remaining = Some(data);
2674        while let Some(data) = remaining {
2675            match PartialDecode::new(
2676                data,
2677                &FixedLengthConnectionIdParser::new(self.local_cid_state.cid_len()),
2678                &[self.version],
2679                self.endpoint_config.grease_quic_bit,
2680            ) {
2681                Ok((partial_decode, rest)) => {
2682                    remaining = rest;
2683                    self.handle_decode(now, remote, ecn, partial_decode);
2684                }
2685                Err(e) => {
2686                    trace!("malformed header: {}", e);
2687                    return;
2688                }
2689            }
2690        }
2691    }
2692
2693    fn handle_decode(
2694        &mut self,
2695        now: Instant,
2696        remote: SocketAddr,
2697        ecn: Option<EcnCodepoint>,
2698        partial_decode: PartialDecode,
2699    ) {
2700        if let Some(decoded) = packet_crypto::unprotect_header(
2701            partial_decode,
2702            &self.spaces,
2703            self.zero_rtt_crypto.as_ref(),
2704            self.peer_params.stateless_reset_token,
2705        ) {
2706            self.handle_packet(now, remote, ecn, decoded.packet, decoded.stateless_reset);
2707        }
2708    }
2709
2710    fn handle_packet(
2711        &mut self,
2712        now: Instant,
2713        remote: SocketAddr,
2714        ecn: Option<EcnCodepoint>,
2715        packet: Option<Packet>,
2716        stateless_reset: bool,
2717    ) {
2718        self.stats.udp_rx.ios += 1;
2719        if let Some(ref packet) = packet {
2720            trace!(
2721                "got {:?} packet ({} bytes) from {} using id {}",
2722                packet.header.space(),
2723                packet.payload.len() + packet.header_data.len(),
2724                remote,
2725                packet.header.dst_cid(),
2726            );
2727
2728            // Trace packet received
2729            #[cfg(feature = "trace")]
2730            {
2731                use crate::trace_packet_received;
2732                // Tracing imports handled by macros
2733                let packet_size = packet.payload.len() + packet.header_data.len();
2734                trace_packet_received!(
2735                    &self.event_log,
2736                    self.trace_context.trace_id(),
2737                    packet_size as u32,
2738                    0 // Will be updated when packet number is decoded
2739                );
2740            }
2741        }
2742
2743        if self.is_handshaking() && remote != self.path.remote {
2744            debug!("discarding packet with unexpected remote during handshake");
2745            return;
2746        }
2747
2748        let was_closed = self.state.is_closed();
2749        let was_drained = self.state.is_drained();
2750
2751        let decrypted = match packet {
2752            None => Err(None),
2753            Some(mut packet) => self
2754                .decrypt_packet(now, &mut packet)
2755                .map(move |number| (packet, number)),
2756        };
2757        let result = match decrypted {
2758            _ if stateless_reset => {
2759                debug!("got stateless reset");
2760                Err(ConnectionError::Reset)
2761            }
2762            Err(Some(e)) => {
2763                warn!("illegal packet: {}", e);
2764                Err(e.into())
2765            }
2766            Err(None) => {
2767                debug!("failed to authenticate packet");
2768                self.authentication_failures += 1;
2769                let integrity_limit = self.spaces[self.highest_space]
2770                    .crypto
2771                    .as_ref()
2772                    .unwrap()
2773                    .packet
2774                    .local
2775                    .integrity_limit();
2776                if self.authentication_failures > integrity_limit {
2777                    Err(TransportError::AEAD_LIMIT_REACHED("integrity limit violated").into())
2778                } else {
2779                    return;
2780                }
2781            }
2782            Ok((packet, number)) => {
2783                let span = match number {
2784                    Some(pn) => trace_span!("recv", space = ?packet.header.space(), pn),
2785                    None => trace_span!("recv", space = ?packet.header.space()),
2786                };
2787                let _guard = span.enter();
2788
2789                let is_duplicate = |n| self.spaces[packet.header.space()].dedup.insert(n);
2790                if number.is_some_and(is_duplicate) {
2791                    debug!("discarding possible duplicate packet");
2792                    return;
2793                } else if self.state.is_handshake() && packet.header.is_short() {
2794                    // TODO: SHOULD buffer these to improve reordering tolerance.
2795                    trace!("dropping short packet during handshake");
2796                    return;
2797                } else {
2798                    if let Header::Initial(InitialHeader { ref token, .. }) = packet.header {
2799                        if let State::Handshake(ref hs) = self.state {
2800                            if self.side.is_server() && token != &hs.expected_token {
2801                                // Clients must send the same retry token in every Initial. Initial
2802                                // packets can be spoofed, so we discard rather than killing the
2803                                // connection.
2804                                warn!("discarding Initial with invalid retry token");
2805                                return;
2806                            }
2807                        }
2808                    }
2809
2810                    if !self.state.is_closed() {
2811                        let spin = match packet.header {
2812                            Header::Short { spin, .. } => spin,
2813                            _ => false,
2814                        };
2815                        self.on_packet_authenticated(
2816                            now,
2817                            packet.header.space(),
2818                            ecn,
2819                            number,
2820                            spin,
2821                            packet.header.is_1rtt(),
2822                        );
2823                    }
2824
2825                    self.process_decrypted_packet(now, remote, number, packet)
2826                }
2827            }
2828        };
2829
2830        // State transitions for error cases
2831        if let Err(conn_err) = result {
2832            self.error = Some(conn_err.clone());
2833            self.state = match conn_err {
2834                ConnectionError::ApplicationClosed(reason) => State::closed(reason),
2835                ConnectionError::ConnectionClosed(reason) => State::closed(reason),
2836                ConnectionError::Reset
2837                | ConnectionError::TransportError(TransportError {
2838                    code: TransportErrorCode::AEAD_LIMIT_REACHED,
2839                    ..
2840                }) => State::Drained,
2841                ConnectionError::TimedOut => {
2842                    unreachable!("timeouts aren't generated by packet processing");
2843                }
2844                ConnectionError::TransportError(err) => {
2845                    debug!("closing connection due to transport error: {}", err);
2846                    State::closed(err)
2847                }
2848                ConnectionError::VersionMismatch => State::Draining,
2849                ConnectionError::LocallyClosed => {
2850                    unreachable!("LocallyClosed isn't generated by packet processing");
2851                }
2852                ConnectionError::CidsExhausted => {
2853                    unreachable!("CidsExhausted isn't generated by packet processing");
2854                }
2855            };
2856        }
2857
2858        if !was_closed && self.state.is_closed() {
2859            self.close_common();
2860            if !self.state.is_drained() {
2861                self.set_close_timer(now);
2862            }
2863        }
2864        if !was_drained && self.state.is_drained() {
2865            self.endpoint_events.push_back(EndpointEventInner::Drained);
2866            // Close timer may have been started previously, e.g. if we sent a close and got a
2867            // stateless reset in response
2868            self.timers.stop(Timer::Close);
2869        }
2870
2871        // Transmit CONNECTION_CLOSE if necessary
2872        if let State::Closed(_) = self.state {
2873            self.close = remote == self.path.remote;
2874        }
2875    }
2876
2877    fn process_decrypted_packet(
2878        &mut self,
2879        now: Instant,
2880        remote: SocketAddr,
2881        number: Option<u64>,
2882        packet: Packet,
2883    ) -> Result<(), ConnectionError> {
2884        let state = match self.state {
2885            State::Established => {
2886                match packet.header.space() {
2887                    SpaceId::Data => self.process_payload(now, remote, number.unwrap(), packet)?,
2888                    _ if packet.header.has_frames() => self.process_early_payload(now, packet)?,
2889                    _ => {
2890                        trace!("discarding unexpected pre-handshake packet");
2891                    }
2892                }
2893                return Ok(());
2894            }
2895            State::Closed(_) => {
2896                for result in frame::Iter::new(packet.payload.freeze())? {
2897                    let frame = match result {
2898                        Ok(frame) => frame,
2899                        Err(err) => {
2900                            debug!("frame decoding error: {err:?}");
2901                            continue;
2902                        }
2903                    };
2904
2905                    if let Frame::Padding = frame {
2906                        continue;
2907                    };
2908
2909                    self.stats.frame_rx.record(&frame);
2910
2911                    if let Frame::Close(_) = frame {
2912                        trace!("draining");
2913                        self.state = State::Draining;
2914                        break;
2915                    }
2916                }
2917                return Ok(());
2918            }
2919            State::Draining | State::Drained => return Ok(()),
2920            State::Handshake(ref mut state) => state,
2921        };
2922
2923        match packet.header {
2924            Header::Retry {
2925                src_cid: rem_cid, ..
2926            } => {
2927                if self.side.is_server() {
2928                    return Err(TransportError::PROTOCOL_VIOLATION("client sent Retry").into());
2929                }
2930
2931                if self.total_authed_packets > 1
2932                            || packet.payload.len() <= 16 // token + 16 byte tag
2933                            || !self.crypto.is_valid_retry(
2934                                &self.rem_cids.active(),
2935                                &packet.header_data,
2936                                &packet.payload,
2937                            )
2938                {
2939                    trace!("discarding invalid Retry");
2940                    // - After the client has received and processed an Initial or Retry
2941                    //   packet from the server, it MUST discard any subsequent Retry
2942                    //   packets that it receives.
2943                    // - A client MUST discard a Retry packet with a zero-length Retry Token
2944                    //   field.
2945                    // - Clients MUST discard Retry packets that have a Retry Integrity Tag
2946                    //   that cannot be validated
2947                    return Ok(());
2948                }
2949
2950                trace!("retrying with CID {}", rem_cid);
2951                let client_hello = state.client_hello.take().unwrap();
2952                self.retry_src_cid = Some(rem_cid);
2953                self.rem_cids.update_initial_cid(rem_cid);
2954                self.rem_handshake_cid = rem_cid;
2955
2956                let space = &mut self.spaces[SpaceId::Initial];
2957                if let Some(info) = space.take(0) {
2958                    self.on_packet_acked(now, 0, info);
2959                };
2960
2961                self.discard_space(now, SpaceId::Initial); // Make sure we clean up after any retransmitted Initials
2962                self.spaces[SpaceId::Initial] = PacketSpace {
2963                    crypto: Some(self.crypto.initial_keys(&rem_cid, self.side.side())),
2964                    next_packet_number: self.spaces[SpaceId::Initial].next_packet_number,
2965                    crypto_offset: client_hello.len() as u64,
2966                    ..PacketSpace::new(now)
2967                };
2968                self.spaces[SpaceId::Initial]
2969                    .pending
2970                    .crypto
2971                    .push_back(frame::Crypto {
2972                        offset: 0,
2973                        data: client_hello,
2974                    });
2975
2976                // Retransmit all 0-RTT data
2977                let zero_rtt = mem::take(&mut self.spaces[SpaceId::Data].sent_packets);
2978                for (pn, info) in zero_rtt {
2979                    self.remove_in_flight(pn, &info);
2980                    self.spaces[SpaceId::Data].pending |= info.retransmits;
2981                }
2982                self.streams.retransmit_all_for_0rtt();
2983
2984                let token_len = packet.payload.len() - 16;
2985                let ConnectionSide::Client { ref mut token, .. } = self.side else {
2986                    unreachable!("we already short-circuited if we're server");
2987                };
2988                *token = packet.payload.freeze().split_to(token_len);
2989                self.state = State::Handshake(state::Handshake {
2990                    expected_token: Bytes::new(),
2991                    rem_cid_set: false,
2992                    client_hello: None,
2993                });
2994                Ok(())
2995            }
2996            Header::Long {
2997                ty: LongType::Handshake,
2998                src_cid: rem_cid,
2999                ..
3000            } => {
3001                if rem_cid != self.rem_handshake_cid {
3002                    debug!(
3003                        "discarding packet with mismatched remote CID: {} != {}",
3004                        self.rem_handshake_cid, rem_cid
3005                    );
3006                    return Ok(());
3007                }
3008                self.on_path_validated();
3009
3010                self.process_early_payload(now, packet)?;
3011                if self.state.is_closed() {
3012                    return Ok(());
3013                }
3014
3015                if self.crypto.is_handshaking() {
3016                    trace!("handshake ongoing");
3017                    return Ok(());
3018                }
3019
3020                if self.side.is_client() {
3021                    // Client-only because server params were set from the client's Initial
3022                    let params =
3023                        self.crypto
3024                            .transport_parameters()?
3025                            .ok_or_else(|| TransportError {
3026                                code: TransportErrorCode::crypto(0x6d),
3027                                frame: None,
3028                                reason: "transport parameters missing".into(),
3029                            })?;
3030
3031                    if self.has_0rtt() {
3032                        if !self.crypto.early_data_accepted().unwrap() {
3033                            debug_assert!(self.side.is_client());
3034                            debug!("0-RTT rejected");
3035                            self.accepted_0rtt = false;
3036                            self.streams.zero_rtt_rejected();
3037
3038                            // Discard already-queued frames
3039                            self.spaces[SpaceId::Data].pending = Retransmits::default();
3040
3041                            // Discard 0-RTT packets
3042                            let sent_packets =
3043                                mem::take(&mut self.spaces[SpaceId::Data].sent_packets);
3044                            for (pn, packet) in sent_packets {
3045                                self.remove_in_flight(pn, &packet);
3046                            }
3047                        } else {
3048                            self.accepted_0rtt = true;
3049                            params.validate_resumption_from(&self.peer_params)?;
3050                        }
3051                    }
3052                    if let Some(token) = params.stateless_reset_token {
3053                        self.endpoint_events
3054                            .push_back(EndpointEventInner::ResetToken(self.path.remote, token));
3055                    }
3056                    self.handle_peer_params(params)?;
3057                    self.issue_first_cids(now);
3058                } else {
3059                    // Server-only
3060                    self.spaces[SpaceId::Data].pending.handshake_done = true;
3061                    self.discard_space(now, SpaceId::Handshake);
3062                }
3063
3064                self.events.push_back(Event::Connected);
3065                self.state = State::Established;
3066                trace!("established");
3067                Ok(())
3068            }
3069            Header::Initial(InitialHeader {
3070                src_cid: rem_cid, ..
3071            }) => {
3072                if !state.rem_cid_set {
3073                    trace!("switching remote CID to {}", rem_cid);
3074                    let mut state = state.clone();
3075                    self.rem_cids.update_initial_cid(rem_cid);
3076                    self.rem_handshake_cid = rem_cid;
3077                    self.orig_rem_cid = rem_cid;
3078                    state.rem_cid_set = true;
3079                    self.state = State::Handshake(state);
3080                } else if rem_cid != self.rem_handshake_cid {
3081                    debug!(
3082                        "discarding packet with mismatched remote CID: {} != {}",
3083                        self.rem_handshake_cid, rem_cid
3084                    );
3085                    return Ok(());
3086                }
3087
3088                let starting_space = self.highest_space;
3089                self.process_early_payload(now, packet)?;
3090
3091                if self.side.is_server()
3092                    && starting_space == SpaceId::Initial
3093                    && self.highest_space != SpaceId::Initial
3094                {
3095                    let params =
3096                        self.crypto
3097                            .transport_parameters()?
3098                            .ok_or_else(|| TransportError {
3099                                code: TransportErrorCode::crypto(0x6d),
3100                                frame: None,
3101                                reason: "transport parameters missing".into(),
3102                            })?;
3103                    self.handle_peer_params(params)?;
3104                    self.issue_first_cids(now);
3105                    self.init_0rtt();
3106                }
3107                Ok(())
3108            }
3109            Header::Long {
3110                ty: LongType::ZeroRtt,
3111                ..
3112            } => {
3113                self.process_payload(now, remote, number.unwrap(), packet)?;
3114                Ok(())
3115            }
3116            Header::VersionNegotiate { .. } => {
3117                if self.total_authed_packets > 1 {
3118                    return Ok(());
3119                }
3120                let supported = packet
3121                    .payload
3122                    .chunks(4)
3123                    .any(|x| match <[u8; 4]>::try_from(x) {
3124                        Ok(version) => self.version == u32::from_be_bytes(version),
3125                        Err(_) => false,
3126                    });
3127                if supported {
3128                    return Ok(());
3129                }
3130                debug!("remote doesn't support our version");
3131                Err(ConnectionError::VersionMismatch)
3132            }
3133            Header::Short { .. } => unreachable!(
3134                "short packets received during handshake are discarded in handle_packet"
3135            ),
3136        }
3137    }
3138
3139    /// Process an Initial or Handshake packet payload
3140    fn process_early_payload(
3141        &mut self,
3142        now: Instant,
3143        packet: Packet,
3144    ) -> Result<(), TransportError> {
3145        debug_assert_ne!(packet.header.space(), SpaceId::Data);
3146        let payload_len = packet.payload.len();
3147        let mut ack_eliciting = false;
3148        for result in frame::Iter::new(packet.payload.freeze())? {
3149            let frame = result?;
3150            let span = match frame {
3151                Frame::Padding => continue,
3152                _ => Some(trace_span!("frame", ty = %frame.ty())),
3153            };
3154
3155            self.stats.frame_rx.record(&frame);
3156
3157            let _guard = span.as_ref().map(|x| x.enter());
3158            ack_eliciting |= frame.is_ack_eliciting();
3159
3160            // Process frames
3161            match frame {
3162                Frame::Padding | Frame::Ping => {}
3163                Frame::Crypto(frame) => {
3164                    self.read_crypto(packet.header.space(), &frame, payload_len)?;
3165                }
3166                Frame::Ack(ack) => {
3167                    self.on_ack_received(now, packet.header.space(), ack)?;
3168                }
3169                Frame::Close(reason) => {
3170                    self.error = Some(reason.into());
3171                    self.state = State::Draining;
3172                    return Ok(());
3173                }
3174                _ => {
3175                    let mut err =
3176                        TransportError::PROTOCOL_VIOLATION("illegal frame type in handshake");
3177                    err.frame = Some(frame.ty());
3178                    return Err(err);
3179                }
3180            }
3181        }
3182
3183        if ack_eliciting {
3184            // In the initial and handshake spaces, ACKs must be sent immediately
3185            self.spaces[packet.header.space()]
3186                .pending_acks
3187                .set_immediate_ack_required();
3188        }
3189
3190        self.write_crypto();
3191        Ok(())
3192    }
3193
3194    fn process_payload(
3195        &mut self,
3196        now: Instant,
3197        remote: SocketAddr,
3198        number: u64,
3199        packet: Packet,
3200    ) -> Result<(), TransportError> {
3201        let payload = packet.payload.freeze();
3202        let mut is_probing_packet = true;
3203        let mut close = None;
3204        let payload_len = payload.len();
3205        let mut ack_eliciting = false;
3206        for result in frame::Iter::new(payload)? {
3207            let frame = result?;
3208            let span = match frame {
3209                Frame::Padding => continue,
3210                _ => Some(trace_span!("frame", ty = %frame.ty())),
3211            };
3212
3213            self.stats.frame_rx.record(&frame);
3214            // Crypto, Stream and Datagram frames are special cased in order no pollute
3215            // the log with payload data
3216            match &frame {
3217                Frame::Crypto(f) => {
3218                    trace!(offset = f.offset, len = f.data.len(), "got crypto frame");
3219                }
3220                Frame::Stream(f) => {
3221                    trace!(id = %f.id, offset = f.offset, len = f.data.len(), fin = f.fin, "got stream frame");
3222                }
3223                Frame::Datagram(f) => {
3224                    trace!(len = f.data.len(), "got datagram frame");
3225                }
3226                f => {
3227                    trace!("got frame {:?}", f);
3228                }
3229            }
3230
3231            let _guard = span.as_ref().map(|x| x.enter());
3232            if packet.header.is_0rtt() {
3233                match frame {
3234                    Frame::Crypto(_) | Frame::Close(Close::Application(_)) => {
3235                        return Err(TransportError::PROTOCOL_VIOLATION(
3236                            "illegal frame type in 0-RTT",
3237                        ));
3238                    }
3239                    _ => {}
3240                }
3241            }
3242            ack_eliciting |= frame.is_ack_eliciting();
3243
3244            // Check whether this could be a probing packet
3245            match frame {
3246                Frame::Padding
3247                | Frame::PathChallenge(_)
3248                | Frame::PathResponse(_)
3249                | Frame::NewConnectionId(_) => {}
3250                _ => {
3251                    is_probing_packet = false;
3252                }
3253            }
3254            match frame {
3255                Frame::Crypto(frame) => {
3256                    self.read_crypto(SpaceId::Data, &frame, payload_len)?;
3257                }
3258                Frame::Stream(frame) => {
3259                    if self.streams.received(frame, payload_len)?.should_transmit() {
3260                        self.spaces[SpaceId::Data].pending.max_data = true;
3261                    }
3262                }
3263                Frame::Ack(ack) => {
3264                    self.on_ack_received(now, SpaceId::Data, ack)?;
3265                }
3266                Frame::Padding | Frame::Ping => {}
3267                Frame::Close(reason) => {
3268                    close = Some(reason);
3269                }
3270                Frame::PathChallenge(token) => {
3271                    self.path_responses.push(number, token, remote);
3272                    if remote == self.path.remote {
3273                        // PATH_CHALLENGE on active path, possible off-path packet forwarding
3274                        // attack. Send a non-probing packet to recover the active path.
3275                        match self.peer_supports_ack_frequency() {
3276                            true => self.immediate_ack(),
3277                            false => self.ping(),
3278                        }
3279                    }
3280                }
3281                Frame::PathResponse(token) => {
3282                    if self.path.challenge == Some(token) && remote == self.path.remote {
3283                        trace!("new path validated");
3284                        self.timers.stop(Timer::PathValidation);
3285                        self.path.challenge = None;
3286                        self.path.validated = true;
3287                        if let Some((_, ref mut prev_path)) = self.prev_path {
3288                            prev_path.challenge = None;
3289                            prev_path.challenge_pending = false;
3290                        }
3291                        self.on_path_validated();
3292                    } else if let Some(nat_traversal) = &mut self.nat_traversal {
3293                        // Check if this is a response to NAT traversal PATH_CHALLENGE
3294                        match nat_traversal.handle_validation_success(remote, token, now) {
3295                            Ok(sequence) => {
3296                                trace!(
3297                                    "NAT traversal candidate {} validated for sequence {}",
3298                                    remote, sequence
3299                                );
3300
3301                                // Check if this was part of a coordination round
3302                                if nat_traversal.handle_coordination_success(remote, now) {
3303                                    trace!("Coordination succeeded via {}", remote);
3304
3305                                    // Check if we should migrate to this better path
3306                                    let can_migrate = match &self.side {
3307                                        ConnectionSide::Client { .. } => true, // Clients can always migrate
3308                                        ConnectionSide::Server { server_config } => {
3309                                            server_config.migration
3310                                        }
3311                                    };
3312
3313                                    if can_migrate {
3314                                        // Get the best paths to see if this new one is better
3315                                        let best_pairs = nat_traversal.get_best_succeeded_pairs();
3316                                        if let Some(best) = best_pairs.first() {
3317                                            if best.remote_addr == remote
3318                                                && best.remote_addr != self.path.remote
3319                                            {
3320                                                debug!(
3321                                                    "NAT traversal found better path, initiating migration"
3322                                                );
3323                                                // Trigger migration to the better NAT-traversed path
3324                                                if let Err(e) =
3325                                                    self.migrate_to_nat_traversal_path(now)
3326                                                {
3327                                                    warn!(
3328                                                        "Failed to migrate to NAT traversal path: {:?}",
3329                                                        e
3330                                                    );
3331                                                }
3332                                            }
3333                                        }
3334                                    }
3335                                } else {
3336                                    // Mark the candidate pair as succeeded for regular validation
3337                                    if nat_traversal.mark_pair_succeeded(remote) {
3338                                        trace!("NAT traversal pair succeeded for {}", remote);
3339                                    }
3340                                }
3341                            }
3342                            Err(NatTraversalError::ChallengeMismatch) => {
3343                                debug!(
3344                                    "PATH_RESPONSE challenge mismatch for NAT candidate {}",
3345                                    remote
3346                                );
3347                            }
3348                            Err(e) => {
3349                                debug!("NAT traversal validation error: {}", e);
3350                            }
3351                        }
3352                    } else {
3353                        debug!(token, "ignoring invalid PATH_RESPONSE");
3354                    }
3355                }
3356                Frame::MaxData(bytes) => {
3357                    self.streams.received_max_data(bytes);
3358                }
3359                Frame::MaxStreamData { id, offset } => {
3360                    self.streams.received_max_stream_data(id, offset)?;
3361                }
3362                Frame::MaxStreams { dir, count } => {
3363                    self.streams.received_max_streams(dir, count)?;
3364                }
3365                Frame::ResetStream(frame) => {
3366                    if self.streams.received_reset(frame)?.should_transmit() {
3367                        self.spaces[SpaceId::Data].pending.max_data = true;
3368                    }
3369                }
3370                Frame::DataBlocked { offset } => {
3371                    debug!(offset, "peer claims to be blocked at connection level");
3372                }
3373                Frame::StreamDataBlocked { id, offset } => {
3374                    if id.initiator() == self.side.side() && id.dir() == Dir::Uni {
3375                        debug!("got STREAM_DATA_BLOCKED on send-only {}", id);
3376                        return Err(TransportError::STREAM_STATE_ERROR(
3377                            "STREAM_DATA_BLOCKED on send-only stream",
3378                        ));
3379                    }
3380                    debug!(
3381                        stream = %id,
3382                        offset, "peer claims to be blocked at stream level"
3383                    );
3384                }
3385                Frame::StreamsBlocked { dir, limit } => {
3386                    if limit > MAX_STREAM_COUNT {
3387                        return Err(TransportError::FRAME_ENCODING_ERROR(
3388                            "unrepresentable stream limit",
3389                        ));
3390                    }
3391                    debug!(
3392                        "peer claims to be blocked opening more than {} {} streams",
3393                        limit, dir
3394                    );
3395                }
3396                Frame::StopSending(frame::StopSending { id, error_code }) => {
3397                    if id.initiator() != self.side.side() {
3398                        if id.dir() == Dir::Uni {
3399                            debug!("got STOP_SENDING on recv-only {}", id);
3400                            return Err(TransportError::STREAM_STATE_ERROR(
3401                                "STOP_SENDING on recv-only stream",
3402                            ));
3403                        }
3404                    } else if self.streams.is_local_unopened(id) {
3405                        return Err(TransportError::STREAM_STATE_ERROR(
3406                            "STOP_SENDING on unopened stream",
3407                        ));
3408                    }
3409                    self.streams.received_stop_sending(id, error_code);
3410                }
3411                Frame::RetireConnectionId { sequence } => {
3412                    let allow_more_cids = self
3413                        .local_cid_state
3414                        .on_cid_retirement(sequence, self.peer_params.issue_cids_limit())?;
3415                    self.endpoint_events
3416                        .push_back(EndpointEventInner::RetireConnectionId(
3417                            now,
3418                            sequence,
3419                            allow_more_cids,
3420                        ));
3421                }
3422                Frame::NewConnectionId(frame) => {
3423                    trace!(
3424                        sequence = frame.sequence,
3425                        id = %frame.id,
3426                        retire_prior_to = frame.retire_prior_to,
3427                    );
3428                    if self.rem_cids.active().is_empty() {
3429                        return Err(TransportError::PROTOCOL_VIOLATION(
3430                            "NEW_CONNECTION_ID when CIDs aren't in use",
3431                        ));
3432                    }
3433                    if frame.retire_prior_to > frame.sequence {
3434                        return Err(TransportError::PROTOCOL_VIOLATION(
3435                            "NEW_CONNECTION_ID retiring unissued CIDs",
3436                        ));
3437                    }
3438
3439                    use crate::cid_queue::InsertError;
3440                    match self.rem_cids.insert(frame) {
3441                        Ok(None) => {}
3442                        Ok(Some((retired, reset_token))) => {
3443                            let pending_retired =
3444                                &mut self.spaces[SpaceId::Data].pending.retire_cids;
3445                            /// Ensure `pending_retired` cannot grow without bound. Limit is
3446                            /// somewhat arbitrary but very permissive.
3447                            const MAX_PENDING_RETIRED_CIDS: u64 = CidQueue::LEN as u64 * 10;
3448                            // We don't bother counting in-flight frames because those are bounded
3449                            // by congestion control.
3450                            if (pending_retired.len() as u64)
3451                                .saturating_add(retired.end.saturating_sub(retired.start))
3452                                > MAX_PENDING_RETIRED_CIDS
3453                            {
3454                                return Err(TransportError::CONNECTION_ID_LIMIT_ERROR(
3455                                    "queued too many retired CIDs",
3456                                ));
3457                            }
3458                            pending_retired.extend(retired);
3459                            self.set_reset_token(reset_token);
3460                        }
3461                        Err(InsertError::ExceedsLimit) => {
3462                            return Err(TransportError::CONNECTION_ID_LIMIT_ERROR(""));
3463                        }
3464                        Err(InsertError::Retired) => {
3465                            trace!("discarding already-retired");
3466                            // RETIRE_CONNECTION_ID might not have been previously sent if e.g. a
3467                            // range of connection IDs larger than the active connection ID limit
3468                            // was retired all at once via retire_prior_to.
3469                            self.spaces[SpaceId::Data]
3470                                .pending
3471                                .retire_cids
3472                                .push(frame.sequence);
3473                            continue;
3474                        }
3475                    };
3476
3477                    if self.side.is_server() && self.rem_cids.active_seq() == 0 {
3478                        // We're a server still using the initial remote CID for the client, so
3479                        // let's switch immediately to enable clientside stateless resets.
3480                        self.update_rem_cid();
3481                    }
3482                }
3483                Frame::NewToken(NewToken { token }) => {
3484                    let ConnectionSide::Client {
3485                        token_store,
3486                        server_name,
3487                        ..
3488                    } = &self.side
3489                    else {
3490                        return Err(TransportError::PROTOCOL_VIOLATION("client sent NEW_TOKEN"));
3491                    };
3492                    if token.is_empty() {
3493                        return Err(TransportError::FRAME_ENCODING_ERROR("empty token"));
3494                    }
3495                    trace!("got new token");
3496                    token_store.insert(server_name, token);
3497                }
3498                Frame::Datagram(datagram) => {
3499                    if self
3500                        .datagrams
3501                        .received(datagram, &self.config.datagram_receive_buffer_size)?
3502                    {
3503                        self.events.push_back(Event::DatagramReceived);
3504                    }
3505                }
3506                Frame::AckFrequency(ack_frequency) => {
3507                    // This frame can only be sent in the Data space
3508                    let space = &mut self.spaces[SpaceId::Data];
3509
3510                    if !self
3511                        .ack_frequency
3512                        .ack_frequency_received(&ack_frequency, &mut space.pending_acks)?
3513                    {
3514                        // The AckFrequency frame is stale (we have already received a more recent one)
3515                        continue;
3516                    }
3517
3518                    // Our `max_ack_delay` has been updated, so we may need to adjust its associated
3519                    // timeout
3520                    if let Some(timeout) = space
3521                        .pending_acks
3522                        .max_ack_delay_timeout(self.ack_frequency.max_ack_delay)
3523                    {
3524                        self.timers.set(Timer::MaxAckDelay, timeout);
3525                    }
3526                }
3527                Frame::ImmediateAck => {
3528                    // This frame can only be sent in the Data space
3529                    self.spaces[SpaceId::Data]
3530                        .pending_acks
3531                        .set_immediate_ack_required();
3532                }
3533                Frame::HandshakeDone => {
3534                    if self.side.is_server() {
3535                        return Err(TransportError::PROTOCOL_VIOLATION(
3536                            "client sent HANDSHAKE_DONE",
3537                        ));
3538                    }
3539                    if self.spaces[SpaceId::Handshake].crypto.is_some() {
3540                        self.discard_space(now, SpaceId::Handshake);
3541                    }
3542                }
3543                Frame::AddAddress(add_address) => {
3544                    self.handle_add_address(&add_address, now)?;
3545                }
3546                Frame::PunchMeNow(punch_me_now) => {
3547                    self.handle_punch_me_now(&punch_me_now, now)?;
3548                }
3549                Frame::RemoveAddress(remove_address) => {
3550                    self.handle_remove_address(&remove_address)?;
3551                }
3552                Frame::ObservedAddress(observed_address) => {
3553                    self.handle_observed_address_frame(&observed_address, now)?;
3554                }
3555                Frame::TryConnectTo(try_connect_to) => {
3556                    self.handle_try_connect_to(&try_connect_to, now)?;
3557                }
3558                Frame::TryConnectToResponse(response) => {
3559                    self.handle_try_connect_to_response(&response)?;
3560                }
3561            }
3562        }
3563
3564        let space = &mut self.spaces[SpaceId::Data];
3565        if space
3566            .pending_acks
3567            .packet_received(now, number, ack_eliciting, &space.dedup)
3568        {
3569            self.timers
3570                .set(Timer::MaxAckDelay, now + self.ack_frequency.max_ack_delay);
3571        }
3572
3573        // Issue stream ID credit due to ACKs of outgoing finish/resets and incoming finish/resets
3574        // on stopped streams. Incoming finishes/resets on open streams are not handled here as they
3575        // are only freed, and hence only issue credit, once the application has been notified
3576        // during a read on the stream.
3577        let pending = &mut self.spaces[SpaceId::Data].pending;
3578        self.streams.queue_max_stream_id(pending);
3579
3580        if let Some(reason) = close {
3581            self.error = Some(reason.into());
3582            self.state = State::Draining;
3583            self.close = true;
3584        }
3585
3586        if remote != self.path.remote
3587            && !is_probing_packet
3588            && number == self.spaces[SpaceId::Data].rx_packet
3589        {
3590            let ConnectionSide::Server { ref server_config } = self.side else {
3591                return Err(TransportError::PROTOCOL_VIOLATION(
3592                    "packets from unknown remote should be dropped by clients",
3593                ));
3594            };
3595            debug_assert!(
3596                server_config.migration,
3597                "migration-initiating packets should have been dropped immediately"
3598            );
3599            self.migrate(now, remote);
3600            // Break linkability, if possible
3601            self.update_rem_cid();
3602            self.spin = false;
3603        }
3604
3605        Ok(())
3606    }
3607
3608    fn migrate(&mut self, now: Instant, remote: SocketAddr) {
3609        trace!(%remote, "migration initiated");
3610        // Reset rtt/congestion state for new path unless it looks like a NAT rebinding.
3611        // Note that the congestion window will not grow until validation terminates. Helps mitigate
3612        // amplification attacks performed by spoofing source addresses.
3613        let mut new_path = if remote.is_ipv4() && remote.ip() == self.path.remote.ip() {
3614            PathData::from_previous(remote, &self.path, now)
3615        } else {
3616            let peer_max_udp_payload_size =
3617                u16::try_from(self.peer_params.max_udp_payload_size.into_inner())
3618                    .unwrap_or(u16::MAX);
3619            PathData::new(
3620                remote,
3621                self.allow_mtud,
3622                Some(peer_max_udp_payload_size),
3623                now,
3624                &self.config,
3625            )
3626        };
3627        new_path.challenge = Some(self.rng.r#gen());
3628        new_path.challenge_pending = true;
3629        let prev_pto = self.pto(SpaceId::Data);
3630
3631        let mut prev = mem::replace(&mut self.path, new_path);
3632        // Don't clobber the original path if the previous one hasn't been validated yet
3633        if prev.challenge.is_none() {
3634            prev.challenge = Some(self.rng.r#gen());
3635            prev.challenge_pending = true;
3636            // We haven't updated the remote CID yet, this captures the remote CID we were using on
3637            // the previous path.
3638            self.prev_path = Some((self.rem_cids.active(), prev));
3639        }
3640
3641        self.timers.set(
3642            Timer::PathValidation,
3643            now + 3 * cmp::max(self.pto(SpaceId::Data), prev_pto),
3644        );
3645    }
3646
3647    /// Handle a change in the local address, i.e. an active migration
3648    pub fn local_address_changed(&mut self) {
3649        self.update_rem_cid();
3650        self.ping();
3651    }
3652
3653    /// Migrate to a better path discovered through NAT traversal
3654    pub fn migrate_to_nat_traversal_path(&mut self, now: Instant) -> Result<(), TransportError> {
3655        // Extract necessary data before mutable operations
3656        let (remote_addr, local_addr) = {
3657            let nat_state = self
3658                .nat_traversal
3659                .as_ref()
3660                .ok_or_else(|| TransportError::PROTOCOL_VIOLATION("NAT traversal not enabled"))?;
3661
3662            // Get the best validated NAT traversal path
3663            let best_pairs = nat_state.get_best_succeeded_pairs();
3664            if best_pairs.is_empty() {
3665                return Err(TransportError::PROTOCOL_VIOLATION(
3666                    "No validated NAT traversal paths",
3667                ));
3668            }
3669
3670            // Select the best path (highest priority that's different from current)
3671            let best_path = best_pairs
3672                .iter()
3673                .find(|pair| pair.remote_addr != self.path.remote)
3674                .or_else(|| best_pairs.first());
3675
3676            let best_path = best_path.ok_or_else(|| {
3677                TransportError::PROTOCOL_VIOLATION("No suitable NAT traversal path")
3678            })?;
3679
3680            debug!(
3681                "Migrating to NAT traversal path: {} -> {} (priority: {})",
3682                self.path.remote, best_path.remote_addr, best_path.priority
3683            );
3684
3685            (best_path.remote_addr, best_path.local_addr)
3686        };
3687
3688        // Perform the migration
3689        self.migrate(now, remote_addr);
3690
3691        // Update local address if needed
3692        if local_addr != SocketAddr::new(std::net::IpAddr::V4(std::net::Ipv4Addr::UNSPECIFIED), 0) {
3693            self.local_ip = Some(local_addr.ip());
3694        }
3695
3696        // Queue a PATH_CHALLENGE to confirm the new path
3697        self.path.challenge_pending = true;
3698
3699        Ok(())
3700    }
3701
3702    /// Switch to a previously unused remote connection ID, if possible
3703    fn update_rem_cid(&mut self) {
3704        let (reset_token, retired) = match self.rem_cids.next() {
3705            Some(x) => x,
3706            None => return,
3707        };
3708
3709        // Retire the current remote CID and any CIDs we had to skip.
3710        self.spaces[SpaceId::Data]
3711            .pending
3712            .retire_cids
3713            .extend(retired);
3714        self.set_reset_token(reset_token);
3715    }
3716
3717    fn set_reset_token(&mut self, reset_token: ResetToken) {
3718        self.endpoint_events
3719            .push_back(EndpointEventInner::ResetToken(
3720                self.path.remote,
3721                reset_token,
3722            ));
3723        self.peer_params.stateless_reset_token = Some(reset_token);
3724    }
3725
3726    /// Issue an initial set of connection IDs to the peer upon connection
3727    fn issue_first_cids(&mut self, now: Instant) {
3728        if self.local_cid_state.cid_len() == 0 {
3729            return;
3730        }
3731
3732        // Subtract 1 to account for the CID we supplied while handshaking
3733        let mut n = self.peer_params.issue_cids_limit() - 1;
3734        if let ConnectionSide::Server { server_config } = &self.side {
3735            if server_config.has_preferred_address() {
3736                // We also sent a CID in the transport parameters
3737                n -= 1;
3738            }
3739        }
3740        self.endpoint_events
3741            .push_back(EndpointEventInner::NeedIdentifiers(now, n));
3742    }
3743
3744    fn populate_packet(
3745        &mut self,
3746        now: Instant,
3747        space_id: SpaceId,
3748        buf: &mut Vec<u8>,
3749        max_size: usize,
3750        pn: u64,
3751    ) -> SentFrames {
3752        let mut sent = SentFrames::default();
3753        let space = &mut self.spaces[space_id];
3754        let is_0rtt = space_id == SpaceId::Data && space.crypto.is_none();
3755        space.pending_acks.maybe_ack_non_eliciting();
3756
3757        // HANDSHAKE_DONE
3758        if !is_0rtt && mem::replace(&mut space.pending.handshake_done, false) {
3759            buf.write(frame::FrameType::HANDSHAKE_DONE);
3760            sent.retransmits.get_or_create().handshake_done = true;
3761            // This is just a u8 counter and the frame is typically just sent once
3762            self.stats.frame_tx.handshake_done =
3763                self.stats.frame_tx.handshake_done.saturating_add(1);
3764        }
3765
3766        // PING
3767        if mem::replace(&mut space.ping_pending, false) {
3768            trace!("PING");
3769            buf.write(frame::FrameType::PING);
3770            sent.non_retransmits = true;
3771            self.stats.frame_tx.ping += 1;
3772        }
3773
3774        // IMMEDIATE_ACK
3775        if mem::replace(&mut space.immediate_ack_pending, false) {
3776            trace!("IMMEDIATE_ACK");
3777            buf.write(frame::FrameType::IMMEDIATE_ACK);
3778            sent.non_retransmits = true;
3779            self.stats.frame_tx.immediate_ack += 1;
3780        }
3781
3782        // ACK
3783        if space.pending_acks.can_send() {
3784            Self::populate_acks(
3785                now,
3786                self.receiving_ecn,
3787                &mut sent,
3788                space,
3789                buf,
3790                &mut self.stats,
3791            );
3792        }
3793
3794        // ACK_FREQUENCY
3795        if mem::replace(&mut space.pending.ack_frequency, false) {
3796            let sequence_number = self.ack_frequency.next_sequence_number();
3797
3798            // Safe to unwrap because this is always provided when ACK frequency is enabled
3799            let config = self.config.ack_frequency_config.as_ref().unwrap();
3800
3801            // Ensure the delay is within bounds to avoid a PROTOCOL_VIOLATION error
3802            let max_ack_delay = self.ack_frequency.candidate_max_ack_delay(
3803                self.path.rtt.get(),
3804                config,
3805                &self.peer_params,
3806            );
3807
3808            trace!(?max_ack_delay, "ACK_FREQUENCY");
3809
3810            frame::AckFrequency {
3811                sequence: sequence_number,
3812                ack_eliciting_threshold: config.ack_eliciting_threshold,
3813                request_max_ack_delay: max_ack_delay.as_micros().try_into().unwrap_or(VarInt::MAX),
3814                reordering_threshold: config.reordering_threshold,
3815            }
3816            .encode(buf);
3817
3818            sent.retransmits.get_or_create().ack_frequency = true;
3819
3820            self.ack_frequency.ack_frequency_sent(pn, max_ack_delay);
3821            self.stats.frame_tx.ack_frequency += 1;
3822        }
3823
3824        // PATH_CHALLENGE
3825        if buf.len() + 9 < max_size && space_id == SpaceId::Data {
3826            // Transmit challenges with every outgoing frame on an unvalidated path
3827            if let Some(token) = self.path.challenge {
3828                // But only send a packet solely for that purpose at most once
3829                self.path.challenge_pending = false;
3830                sent.non_retransmits = true;
3831                sent.requires_padding = true;
3832                trace!("PATH_CHALLENGE {:08x}", token);
3833                buf.write(frame::FrameType::PATH_CHALLENGE);
3834                buf.write(token);
3835                self.stats.frame_tx.path_challenge += 1;
3836            }
3837
3838            // TODO: Send NAT traversal PATH_CHALLENGE frames
3839            // Currently, the packet sending infrastructure only supports sending to the
3840            // primary path (self.path.remote). To properly support NAT traversal, we need
3841            // to modify poll_transmit and the packet building logic to generate packets
3842            // for multiple destination addresses. For now, NAT traversal challenges are
3843            // queued in self.nat_traversal_challenges but not yet sent.
3844            // This will be implemented in a future phase when we add multi-destination
3845            // packet support to the endpoint.
3846        }
3847
3848        // PATH_RESPONSE
3849        if buf.len() + 9 < max_size && space_id == SpaceId::Data {
3850            if let Some(token) = self.path_responses.pop_on_path(self.path.remote) {
3851                sent.non_retransmits = true;
3852                sent.requires_padding = true;
3853                trace!("PATH_RESPONSE {:08x}", token);
3854                buf.write(frame::FrameType::PATH_RESPONSE);
3855                buf.write(token);
3856                self.stats.frame_tx.path_response += 1;
3857            }
3858        }
3859
3860        // CRYPTO
3861        while buf.len() + frame::Crypto::SIZE_BOUND < max_size && !is_0rtt {
3862            let mut frame = match space.pending.crypto.pop_front() {
3863                Some(x) => x,
3864                None => break,
3865            };
3866
3867            // Calculate the maximum amount of crypto data we can store in the buffer.
3868            // Since the offset is known, we can reserve the exact size required to encode it.
3869            // For length we reserve 2bytes which allows to encode up to 2^14,
3870            // which is more than what fits into normally sized QUIC frames.
3871            let max_crypto_data_size = max_size
3872                - buf.len()
3873                - 1 // Frame Type
3874                - VarInt::size(unsafe { VarInt::from_u64_unchecked(frame.offset) })
3875                - 2; // Maximum encoded length for frame size, given we send less than 2^14 bytes
3876
3877            // Use PQC-aware sizing for CRYPTO frames
3878            let available_space = max_size - buf.len();
3879            let remaining_data = frame.data.len();
3880            let optimal_size = self
3881                .pqc_state
3882                .calculate_crypto_frame_size(available_space, remaining_data);
3883
3884            let len = frame
3885                .data
3886                .len()
3887                .min(2usize.pow(14) - 1)
3888                .min(max_crypto_data_size)
3889                .min(optimal_size);
3890
3891            let data = frame.data.split_to(len);
3892            let truncated = frame::Crypto {
3893                offset: frame.offset,
3894                data,
3895            };
3896            trace!(
3897                "CRYPTO: off {} len {}",
3898                truncated.offset,
3899                truncated.data.len()
3900            );
3901            truncated.encode(buf);
3902            self.stats.frame_tx.crypto += 1;
3903            sent.retransmits.get_or_create().crypto.push_back(truncated);
3904            if !frame.data.is_empty() {
3905                frame.offset += len as u64;
3906                space.pending.crypto.push_front(frame);
3907            }
3908        }
3909
3910        if space_id == SpaceId::Data {
3911            self.streams.write_control_frames(
3912                buf,
3913                &mut space.pending,
3914                &mut sent.retransmits,
3915                &mut self.stats.frame_tx,
3916                max_size,
3917            );
3918        }
3919
3920        // NEW_CONNECTION_ID
3921        while buf.len() + 44 < max_size {
3922            let issued = match space.pending.new_cids.pop() {
3923                Some(x) => x,
3924                None => break,
3925            };
3926            trace!(
3927                sequence = issued.sequence,
3928                id = %issued.id,
3929                "NEW_CONNECTION_ID"
3930            );
3931            frame::NewConnectionId {
3932                sequence: issued.sequence,
3933                retire_prior_to: self.local_cid_state.retire_prior_to(),
3934                id: issued.id,
3935                reset_token: issued.reset_token,
3936            }
3937            .encode(buf);
3938            sent.retransmits.get_or_create().new_cids.push(issued);
3939            self.stats.frame_tx.new_connection_id += 1;
3940        }
3941
3942        // RETIRE_CONNECTION_ID
3943        while buf.len() + frame::RETIRE_CONNECTION_ID_SIZE_BOUND < max_size {
3944            let seq = match space.pending.retire_cids.pop() {
3945                Some(x) => x,
3946                None => break,
3947            };
3948            trace!(sequence = seq, "RETIRE_CONNECTION_ID");
3949            buf.write(frame::FrameType::RETIRE_CONNECTION_ID);
3950            buf.write_var(seq);
3951            sent.retransmits.get_or_create().retire_cids.push(seq);
3952            self.stats.frame_tx.retire_connection_id += 1;
3953        }
3954
3955        // DATAGRAM
3956        let mut sent_datagrams = false;
3957        while buf.len() + Datagram::SIZE_BOUND < max_size && space_id == SpaceId::Data {
3958            match self.datagrams.write(buf, max_size) {
3959                true => {
3960                    sent_datagrams = true;
3961                    sent.non_retransmits = true;
3962                    self.stats.frame_tx.datagram += 1;
3963                }
3964                false => break,
3965            }
3966        }
3967        if self.datagrams.send_blocked && sent_datagrams {
3968            self.events.push_back(Event::DatagramsUnblocked);
3969            self.datagrams.send_blocked = false;
3970        }
3971
3972        // NEW_TOKEN
3973        while let Some(remote_addr) = space.pending.new_tokens.pop() {
3974            debug_assert_eq!(space_id, SpaceId::Data);
3975            let ConnectionSide::Server { server_config } = &self.side else {
3976                // This should never happen as clients don't enqueue NEW_TOKEN frames
3977                debug_assert!(false, "NEW_TOKEN frames should not be enqueued by clients");
3978                continue;
3979            };
3980
3981            if remote_addr != self.path.remote {
3982                // NEW_TOKEN frames contain tokens bound to a client's IP address, and are only
3983                // useful if used from the same IP address.  Thus, we abandon enqueued NEW_TOKEN
3984                // frames upon an path change. Instead, when the new path becomes validated,
3985                // NEW_TOKEN frames may be enqueued for the new path instead.
3986                continue;
3987            }
3988
3989            // If configured to delay until binding and we don't yet have a peer id,
3990            // postpone NEW_TOKEN issuance.
3991            if self.delay_new_token_until_binding && self.peer_id_for_tokens.is_none() {
3992                // Requeue and try again later
3993                space.pending.new_tokens.push(remote_addr);
3994                break;
3995            }
3996
3997            // Issue token v2 if we have a bound peer id; otherwise fall back to legacy
3998            let new_token = if let Some(pid) = self.peer_id_for_tokens {
3999                // Compose token_v2: pt = peer_id[32] || cid_len[1] || cid[..] || nonce16
4000                // token = pt || nonce12_suffix (last 12 bytes of nonce)
4001                let nonce_u128: u128 = self.rng.r#gen();
4002                let nonce = nonce_u128.to_le_bytes();
4003                let cid = self.rem_cids.active();
4004                let mut pt = Vec::with_capacity(32 + 1 + cid.len() + 16);
4005                pt.extend_from_slice(&pid.0);
4006                pt.push(cid.len() as u8);
4007                pt.extend_from_slice(&cid[..]);
4008                pt.extend_from_slice(&nonce);
4009                let mut tok = pt;
4010                tok.extend_from_slice(&nonce[..12]);
4011                NewToken { token: tok.into() }
4012            } else {
4013                let token = Token::new(
4014                    TokenPayload::Validation {
4015                        ip: remote_addr.ip(),
4016                        issued: server_config.time_source.now(),
4017                    },
4018                    &mut self.rng,
4019                );
4020                NewToken {
4021                    token: token.encode(&*server_config.token_key).into(),
4022                }
4023            };
4024
4025            if buf.len() + new_token.size() >= max_size {
4026                space.pending.new_tokens.push(remote_addr);
4027                break;
4028            }
4029
4030            new_token.encode(buf);
4031            sent.retransmits
4032                .get_or_create()
4033                .new_tokens
4034                .push(remote_addr);
4035            self.stats.frame_tx.new_token += 1;
4036        }
4037
4038        // NAT traversal frames - AddAddress
4039        while buf.len() + frame::AddAddress::SIZE_BOUND < max_size && space_id == SpaceId::Data {
4040            let add_address = match space.pending.add_addresses.pop() {
4041                Some(x) => x,
4042                None => break,
4043            };
4044            trace!(
4045                sequence = %add_address.sequence,
4046                address = %add_address.address,
4047                "ADD_ADDRESS"
4048            );
4049            // Use the correct encoding format based on negotiated configuration
4050            if self.nat_traversal_frame_config.use_rfc_format {
4051                add_address.encode_rfc(buf);
4052            } else {
4053                add_address.encode_legacy(buf);
4054            }
4055            sent.retransmits
4056                .get_or_create()
4057                .add_addresses
4058                .push(add_address);
4059            self.stats.frame_tx.add_address += 1;
4060        }
4061
4062        // NAT traversal frames - PunchMeNow
4063        while buf.len() + frame::PunchMeNow::SIZE_BOUND < max_size && space_id == SpaceId::Data {
4064            let punch_me_now = match space.pending.punch_me_now.pop() {
4065                Some(x) => x,
4066                None => break,
4067            };
4068            trace!(
4069                round = %punch_me_now.round,
4070                paired_with_sequence_number = %punch_me_now.paired_with_sequence_number,
4071                "PUNCH_ME_NOW"
4072            );
4073            // Use the correct encoding format based on negotiated configuration
4074            if self.nat_traversal_frame_config.use_rfc_format {
4075                punch_me_now.encode_rfc(buf);
4076            } else {
4077                punch_me_now.encode_legacy(buf);
4078            }
4079            sent.retransmits
4080                .get_or_create()
4081                .punch_me_now
4082                .push(punch_me_now);
4083            self.stats.frame_tx.punch_me_now += 1;
4084        }
4085
4086        // NAT traversal frames - RemoveAddress
4087        while buf.len() + frame::RemoveAddress::SIZE_BOUND < max_size && space_id == SpaceId::Data {
4088            let remove_address = match space.pending.remove_addresses.pop() {
4089                Some(x) => x,
4090                None => break,
4091            };
4092            trace!(
4093                sequence = %remove_address.sequence,
4094                "REMOVE_ADDRESS"
4095            );
4096            // RemoveAddress has the same format in both RFC and legacy versions
4097            remove_address.encode(buf);
4098            sent.retransmits
4099                .get_or_create()
4100                .remove_addresses
4101                .push(remove_address);
4102            self.stats.frame_tx.remove_address += 1;
4103        }
4104
4105        // OBSERVED_ADDRESS frames
4106        while buf.len() + frame::ObservedAddress::SIZE_BOUND < max_size && space_id == SpaceId::Data
4107        {
4108            let observed_address = match space.pending.outbound_observations.pop() {
4109                Some(x) => x,
4110                None => break,
4111            };
4112            trace!(
4113                address = %observed_address.address,
4114                "OBSERVED_ADDRESS"
4115            );
4116            observed_address.encode(buf);
4117            sent.retransmits
4118                .get_or_create()
4119                .outbound_observations
4120                .push(observed_address);
4121            self.stats.frame_tx.observed_address += 1;
4122        }
4123
4124        // STREAM
4125        if space_id == SpaceId::Data {
4126            sent.stream_frames =
4127                self.streams
4128                    .write_stream_frames(buf, max_size, self.config.send_fairness);
4129            self.stats.frame_tx.stream += sent.stream_frames.len() as u64;
4130        }
4131
4132        sent
4133    }
4134
4135    /// Write pending ACKs into a buffer
4136    ///
4137    /// This method assumes ACKs are pending, and should only be called if
4138    /// `!PendingAcks::ranges().is_empty()` returns `true`.
4139    fn populate_acks(
4140        now: Instant,
4141        receiving_ecn: bool,
4142        sent: &mut SentFrames,
4143        space: &mut PacketSpace,
4144        buf: &mut Vec<u8>,
4145        stats: &mut ConnectionStats,
4146    ) {
4147        debug_assert!(!space.pending_acks.ranges().is_empty());
4148
4149        // 0-RTT packets must never carry acks (which would have to be of handshake packets)
4150        debug_assert!(space.crypto.is_some(), "tried to send ACK in 0-RTT");
4151        let ecn = if receiving_ecn {
4152            Some(&space.ecn_counters)
4153        } else {
4154            None
4155        };
4156        sent.largest_acked = space.pending_acks.ranges().max();
4157
4158        let delay_micros = space.pending_acks.ack_delay(now).as_micros() as u64;
4159
4160        // TODO: This should come from `TransportConfig` if that gets configurable.
4161        let ack_delay_exp = TransportParameters::default().ack_delay_exponent;
4162        let delay = delay_micros >> ack_delay_exp.into_inner();
4163
4164        trace!(
4165            "ACK {:?}, Delay = {}us",
4166            space.pending_acks.ranges(),
4167            delay_micros
4168        );
4169
4170        frame::Ack::encode(delay as _, space.pending_acks.ranges(), ecn, buf);
4171        stats.frame_tx.acks += 1;
4172    }
4173
4174    fn close_common(&mut self) {
4175        trace!("connection closed");
4176        for &timer in &Timer::VALUES {
4177            self.timers.stop(timer);
4178        }
4179    }
4180
4181    fn set_close_timer(&mut self, now: Instant) {
4182        self.timers
4183            .set(Timer::Close, now + 3 * self.pto(self.highest_space));
4184    }
4185
4186    /// Handle transport parameters received from the peer
4187    fn handle_peer_params(&mut self, params: TransportParameters) -> Result<(), TransportError> {
4188        if Some(self.orig_rem_cid) != params.initial_src_cid
4189            || (self.side.is_client()
4190                && (Some(self.initial_dst_cid) != params.original_dst_cid
4191                    || self.retry_src_cid != params.retry_src_cid))
4192        {
4193            return Err(TransportError::TRANSPORT_PARAMETER_ERROR(
4194                "CID authentication failure",
4195            ));
4196        }
4197
4198        self.set_peer_params(params);
4199
4200        Ok(())
4201    }
4202
4203    fn set_peer_params(&mut self, params: TransportParameters) {
4204        self.streams.set_params(&params);
4205        self.idle_timeout =
4206            negotiate_max_idle_timeout(self.config.max_idle_timeout, Some(params.max_idle_timeout));
4207        trace!("negotiated max idle timeout {:?}", self.idle_timeout);
4208        if let Some(ref info) = params.preferred_address {
4209            self.rem_cids.insert(frame::NewConnectionId {
4210                sequence: 1,
4211                id: info.connection_id,
4212                reset_token: info.stateless_reset_token,
4213                retire_prior_to: 0,
4214            }).expect("preferred address CID is the first received, and hence is guaranteed to be legal");
4215        }
4216        self.ack_frequency.peer_max_ack_delay = get_max_ack_delay(&params);
4217
4218        // Handle NAT traversal capability negotiation
4219        self.negotiate_nat_traversal_capability(&params);
4220
4221        // Update NAT traversal frame format configuration based on negotiated parameters
4222        // Check if we have NAT traversal enabled in our config
4223        let local_has_nat_traversal = self.config.nat_traversal_config.is_some();
4224        // For now, assume we support RFC if NAT traversal is enabled
4225        // TODO: Add proper RFC support flag to TransportConfig
4226        let local_supports_rfc = local_has_nat_traversal;
4227        self.nat_traversal_frame_config = frame::nat_traversal_unified::NatTraversalFrameConfig {
4228            // Use RFC format only if both endpoints support it
4229            use_rfc_format: local_supports_rfc && params.supports_rfc_nat_traversal(),
4230            // Always accept legacy for backward compatibility
4231            accept_legacy: true,
4232        };
4233
4234        // Handle address discovery negotiation
4235        self.negotiate_address_discovery(&params);
4236
4237        // Update PQC state based on peer parameters
4238        self.pqc_state.update_from_peer_params(&params);
4239
4240        // If PQC is enabled, adjust MTU discovery configuration
4241        if self.pqc_state.enabled && self.pqc_state.using_pqc {
4242            trace!("PQC enabled, adjusting MTU discovery for larger handshake packets");
4243            // When PQC is enabled, we need to handle larger packets during handshake
4244            // The actual MTU discovery will probe up to the peer's max_udp_payload_size
4245            // or the PQC handshake MTU, whichever is smaller
4246            let current_mtu = self.path.mtud.current_mtu();
4247            if current_mtu < self.pqc_state.handshake_mtu {
4248                trace!(
4249                    "Current MTU {} is less than PQC handshake MTU {}, will rely on MTU discovery",
4250                    current_mtu, self.pqc_state.handshake_mtu
4251                );
4252            }
4253        }
4254
4255        self.peer_params = params;
4256        self.path.mtud.on_peer_max_udp_payload_size_received(
4257            u16::try_from(self.peer_params.max_udp_payload_size.into_inner()).unwrap_or(u16::MAX),
4258        );
4259    }
4260
4261    /// Negotiate NAT traversal capability between local and peer configurations
4262    fn negotiate_nat_traversal_capability(&mut self, params: &TransportParameters) {
4263        // Check if peer supports NAT traversal
4264        let peer_nat_config = match &params.nat_traversal {
4265            Some(config) => config,
4266            None => {
4267                // Peer doesn't support NAT traversal - handle backward compatibility
4268                if self.config.nat_traversal_config.is_some() {
4269                    debug!(
4270                        "Peer does not support NAT traversal, maintaining backward compatibility"
4271                    );
4272                    self.emit_nat_traversal_capability_event(false);
4273
4274                    // Set connection state to indicate NAT traversal is not available
4275                    self.set_nat_traversal_compatibility_mode(false);
4276                }
4277                return;
4278            }
4279        };
4280
4281        // Check if we support NAT traversal locally
4282        let local_nat_config = match &self.config.nat_traversal_config {
4283            Some(config) => config,
4284            None => {
4285                debug!("NAT traversal not enabled locally, ignoring peer support");
4286                self.emit_nat_traversal_capability_event(false);
4287                self.set_nat_traversal_compatibility_mode(false);
4288                return;
4289            }
4290        };
4291
4292        // Both peers support NAT traversal - proceed with capability negotiation
4293        info!("Both peers support NAT traversal, negotiating capabilities");
4294
4295        // Validate role compatibility and negotiate parameters
4296        match self.negotiate_nat_traversal_parameters(local_nat_config, peer_nat_config) {
4297            Ok(negotiated_config) => {
4298                info!("NAT traversal capability negotiated successfully");
4299                self.emit_nat_traversal_capability_event(true);
4300
4301                // Initialize NAT traversal with negotiated parameters
4302                self.init_nat_traversal_with_negotiated_config(&negotiated_config);
4303
4304                // Set connection state to indicate NAT traversal is available
4305                self.set_nat_traversal_compatibility_mode(true);
4306
4307                // Start NAT traversal process if we're in a client role
4308                if matches!(
4309                    negotiated_config,
4310                    crate::transport_parameters::NatTraversalConfig::ClientSupport
4311                ) {
4312                    self.initiate_nat_traversal_process();
4313                }
4314            }
4315            Err(e) => {
4316                warn!("NAT traversal capability negotiation failed: {}", e);
4317                self.emit_nat_traversal_capability_event(false);
4318                self.set_nat_traversal_compatibility_mode(false);
4319            }
4320        }
4321    }
4322
4323    /* FIXME: This function needs to be rewritten for the new enum-based NatTraversalConfig
4324    /// Validate that NAT traversal roles are compatible
4325    fn validate_nat_traversal_roles(
4326        &self,
4327        local_config: &crate::transport_parameters::NatTraversalConfig,
4328        peer_config: &crate::transport_parameters::NatTraversalConfig,
4329    ) -> Result<(), String> {
4330        // Check for invalid role combinations
4331        match (&local_config.role, &peer_config.role) {
4332            // Both bootstrap nodes - this is unusual but allowed
4333            (
4334                crate::transport_parameters::NatTraversalRole::Bootstrap,
4335                crate::transport_parameters::NatTraversalRole::Bootstrap,
4336            ) => {
4337                debug!("Both endpoints are bootstrap nodes - unusual but allowed");
4338            }
4339            // Client-Server combinations are ideal
4340            (
4341                crate::transport_parameters::NatTraversalRole::Client,
4342                crate::transport_parameters::NatTraversalRole::Server { .. },
4343            )
4344            | (
4345                crate::transport_parameters::NatTraversalRole::Server { .. },
4346                crate::transport_parameters::NatTraversalRole::Client,
4347            ) => {
4348                debug!("Client-Server NAT traversal role combination");
4349            }
4350            // Bootstrap can coordinate with anyone
4351            (crate::transport_parameters::NatTraversalRole::Bootstrap, _)
4352            | (_, crate::transport_parameters::NatTraversalRole::Bootstrap) => {
4353                debug!("Bootstrap node coordination");
4354            }
4355            // Client-Client requires bootstrap coordination
4356            (
4357                crate::transport_parameters::NatTraversalRole::Client,
4358                crate::transport_parameters::NatTraversalRole::Client,
4359            ) => {
4360                debug!("Client-Client connection requires bootstrap coordination");
4361            }
4362            // Server-Server is allowed but may need coordination
4363            (
4364                crate::transport_parameters::NatTraversalRole::Server { .. },
4365                crate::transport_parameters::NatTraversalRole::Server { .. },
4366            ) => {
4367                debug!("Server-Server connection");
4368            }
4369        }
4370
4371        Ok(())
4372    }
4373    */
4374
4375    /// Emit NAT traversal capability negotiation event
4376    fn emit_nat_traversal_capability_event(&mut self, negotiated: bool) {
4377        // For now, we'll just log the event
4378        // In a full implementation, this could emit an event that applications can listen to
4379        if negotiated {
4380            info!("NAT traversal capability successfully negotiated");
4381        } else {
4382            info!("NAT traversal capability not available (peer or local support missing)");
4383        }
4384
4385        // Could add to events queue if needed:
4386        // self.events.push_back(Event::NatTraversalCapability { negotiated });
4387    }
4388
4389    /// Set NAT traversal compatibility mode for backward compatibility
4390    fn set_nat_traversal_compatibility_mode(&mut self, enabled: bool) {
4391        if enabled {
4392            debug!("NAT traversal enabled for this connection");
4393            // Connection supports NAT traversal - no special handling needed
4394        } else {
4395            debug!("NAT traversal disabled for this connection (backward compatibility mode)");
4396            // Ensure NAT traversal state is cleared if it was partially initialized
4397            if self.nat_traversal.is_some() {
4398                warn!("Clearing NAT traversal state due to compatibility mode");
4399                self.nat_traversal = None;
4400            }
4401        }
4402    }
4403
4404    /// Negotiate NAT traversal parameters between local and peer configurations
4405    fn negotiate_nat_traversal_parameters(
4406        &self,
4407        local_config: &crate::transport_parameters::NatTraversalConfig,
4408        peer_config: &crate::transport_parameters::NatTraversalConfig,
4409    ) -> Result<crate::transport_parameters::NatTraversalConfig, String> {
4410        // With the new enum-based config, negotiation is simple:
4411        // - Client/Server roles are determined by who initiated the connection
4412        // - Concurrency limit is taken from the server's config
4413
4414        match (local_config, peer_config) {
4415            // We're client, peer is server - use server's concurrency limit
4416            (
4417                crate::transport_parameters::NatTraversalConfig::ClientSupport,
4418                crate::transport_parameters::NatTraversalConfig::ServerSupport {
4419                    concurrency_limit,
4420                },
4421            ) => Ok(
4422                crate::transport_parameters::NatTraversalConfig::ServerSupport {
4423                    concurrency_limit: *concurrency_limit,
4424                },
4425            ),
4426            // We're server, peer is client - use our concurrency limit
4427            (
4428                crate::transport_parameters::NatTraversalConfig::ServerSupport {
4429                    concurrency_limit,
4430                },
4431                crate::transport_parameters::NatTraversalConfig::ClientSupport,
4432            ) => Ok(
4433                crate::transport_parameters::NatTraversalConfig::ServerSupport {
4434                    concurrency_limit: *concurrency_limit,
4435                },
4436            ),
4437            // Both are servers (e.g., peer-to-peer) - use minimum concurrency
4438            (
4439                crate::transport_parameters::NatTraversalConfig::ServerSupport {
4440                    concurrency_limit: limit1,
4441                },
4442                crate::transport_parameters::NatTraversalConfig::ServerSupport {
4443                    concurrency_limit: limit2,
4444                },
4445            ) => Ok(
4446                crate::transport_parameters::NatTraversalConfig::ServerSupport {
4447                    concurrency_limit: (*limit1).min(*limit2),
4448                },
4449            ),
4450            // Both are clients - shouldn't happen in normal operation
4451            (
4452                crate::transport_parameters::NatTraversalConfig::ClientSupport,
4453                crate::transport_parameters::NatTraversalConfig::ClientSupport,
4454            ) => Err("Both endpoints claim to be NAT traversal clients".to_string()),
4455        }
4456    }
4457
4458    /// Initialize NAT traversal with negotiated configuration
4459    ///
4460    /// v0.13.0: All nodes are symmetric P2P nodes - no role distinction.
4461    /// Every node can observe addresses, discover candidates, and handle coordination.
4462    fn init_nat_traversal_with_negotiated_config(
4463        &mut self,
4464        _config: &crate::transport_parameters::NatTraversalConfig,
4465    ) {
4466        // v0.13.0: All nodes are symmetric P2P nodes - no role-based configuration
4467        // Use sensible defaults for all nodes
4468        let max_candidates = 50; // Default maximum candidates
4469        let coordination_timeout = Duration::from_secs(10); // Default 10 second timeout
4470
4471        // Initialize NAT traversal state (no role parameter - all nodes are symmetric)
4472        self.nat_traversal = Some(NatTraversalState::new(max_candidates, coordination_timeout));
4473
4474        trace!("NAT traversal initialized for symmetric P2P node");
4475
4476        // v0.13.0: All nodes perform all initialization - no role-specific branching
4477        // All nodes can observe addresses, discover candidates, and coordinate
4478        self.prepare_address_observation();
4479        self.schedule_candidate_discovery();
4480        self.prepare_coordination_handling();
4481    }
4482
4483    /// Initiate NAT traversal process for client endpoints
4484    fn initiate_nat_traversal_process(&mut self) {
4485        if let Some(nat_state) = &mut self.nat_traversal {
4486            match nat_state.start_candidate_discovery() {
4487                Ok(()) => {
4488                    debug!("NAT traversal process initiated - candidate discovery started");
4489                    // Schedule the first coordination attempt
4490                    self.timers.set(
4491                        Timer::NatTraversal,
4492                        Instant::now() + Duration::from_millis(100),
4493                    );
4494                }
4495                Err(e) => {
4496                    warn!("Failed to initiate NAT traversal process: {}", e);
4497                }
4498            }
4499        }
4500    }
4501
4502    /// Prepare for address observation (bootstrap nodes)
4503    fn prepare_address_observation(&mut self) {
4504        debug!("Preparing for address observation as bootstrap node");
4505        // Bootstrap nodes are ready to observe peer addresses immediately
4506        // No additional setup needed - observation happens during connection establishment
4507    }
4508
4509    /// Schedule candidate discovery for later execution
4510    fn schedule_candidate_discovery(&mut self) {
4511        debug!("Scheduling candidate discovery for client endpoint");
4512        // Set a timer to start candidate discovery after connection establishment
4513        self.timers.set(
4514            Timer::NatTraversal,
4515            Instant::now() + Duration::from_millis(50),
4516        );
4517    }
4518
4519    /// Prepare to handle coordination requests (server nodes)
4520    fn prepare_coordination_handling(&mut self) {
4521        debug!("Preparing to handle coordination requests as server endpoint");
4522        // Server nodes are ready to handle coordination requests immediately
4523        // No additional setup needed - coordination happens via frame processing
4524    }
4525
4526    /// Handle NAT traversal timeout events
4527    fn handle_nat_traversal_timeout(&mut self, now: Instant) {
4528        // First get the actions from nat_state
4529        let timeout_result = if let Some(nat_state) = &mut self.nat_traversal {
4530            nat_state.handle_timeout(now)
4531        } else {
4532            return;
4533        };
4534
4535        // Then handle the actions without holding a mutable borrow to nat_state
4536        match timeout_result {
4537            Ok(actions) => {
4538                for action in actions {
4539                    match action {
4540                        nat_traversal::TimeoutAction::RetryDiscovery => {
4541                            debug!("NAT traversal timeout: retrying candidate discovery");
4542                            if let Some(nat_state) = &mut self.nat_traversal {
4543                                if let Err(e) = nat_state.start_candidate_discovery() {
4544                                    warn!("Failed to retry candidate discovery: {}", e);
4545                                }
4546                            }
4547                        }
4548                        nat_traversal::TimeoutAction::RetryCoordination => {
4549                            debug!("NAT traversal timeout: retrying coordination");
4550                            // Schedule next coordination attempt
4551                            self.timers
4552                                .set(Timer::NatTraversal, now + Duration::from_secs(2));
4553                        }
4554                        nat_traversal::TimeoutAction::StartValidation => {
4555                            debug!("NAT traversal timeout: starting path validation");
4556                            self.start_nat_traversal_validation(now);
4557                        }
4558                        nat_traversal::TimeoutAction::Complete => {
4559                            debug!("NAT traversal completed successfully");
4560                            // NAT traversal is complete, no more timeouts needed
4561                            self.timers.stop(Timer::NatTraversal);
4562                        }
4563                        nat_traversal::TimeoutAction::Failed => {
4564                            warn!("NAT traversal failed after timeout");
4565                            // Consider fallback options or connection failure
4566                            self.handle_nat_traversal_failure();
4567                        }
4568                    }
4569                }
4570            }
4571            Err(e) => {
4572                warn!("NAT traversal timeout handling failed: {}", e);
4573                self.handle_nat_traversal_failure();
4574            }
4575        }
4576    }
4577
4578    /// Start NAT traversal path validation
4579    fn start_nat_traversal_validation(&mut self, now: Instant) {
4580        if let Some(nat_state) = &mut self.nat_traversal {
4581            // Get candidate pairs that need validation
4582            let pairs = nat_state.get_next_validation_pairs(3);
4583
4584            for pair in pairs {
4585                // Send PATH_CHALLENGE to validate the path
4586                let challenge = self.rng.r#gen();
4587                self.path.challenge = Some(challenge);
4588                self.path.challenge_pending = true;
4589
4590                debug!(
4591                    "Starting path validation for NAT traversal candidate: {}",
4592                    pair.remote_addr
4593                );
4594            }
4595
4596            // Set validation timeout
4597            self.timers
4598                .set(Timer::PathValidation, now + Duration::from_secs(3));
4599        }
4600    }
4601
4602    /// Handle NAT traversal failure
4603    fn handle_nat_traversal_failure(&mut self) {
4604        warn!("NAT traversal failed, considering fallback options");
4605
4606        // Clear NAT traversal state
4607        self.nat_traversal = None;
4608        self.timers.stop(Timer::NatTraversal);
4609
4610        // In a full implementation, this could:
4611        // 1. Try relay connections
4612        // 2. Emit failure events to the application
4613        // 3. Attempt direct connection as fallback
4614
4615        // For now, we'll just log the failure
4616        debug!("NAT traversal disabled for this connection due to failure");
4617    }
4618
4619    /// Check if NAT traversal is supported and enabled for this connection
4620    pub fn nat_traversal_supported(&self) -> bool {
4621        self.nat_traversal.is_some()
4622            && self.config.nat_traversal_config.is_some()
4623            && self.peer_params.nat_traversal.is_some()
4624    }
4625
4626    /// Get the negotiated NAT traversal configuration
4627    pub fn nat_traversal_config(&self) -> Option<&crate::transport_parameters::NatTraversalConfig> {
4628        self.peer_params.nat_traversal.as_ref()
4629    }
4630
4631    /// Check if the connection is ready for NAT traversal operations
4632    pub fn nat_traversal_ready(&self) -> bool {
4633        self.nat_traversal_supported() && matches!(self.state, State::Established)
4634    }
4635
4636    /// Get NAT traversal statistics for this connection
4637    ///
4638    /// This method is preserved for debugging and monitoring purposes.
4639    /// It may be used in future telemetry or diagnostic features.
4640    #[allow(dead_code)]
4641    pub(crate) fn nat_traversal_stats(&self) -> Option<nat_traversal::NatTraversalStats> {
4642        self.nat_traversal.as_ref().map(|state| state.stats.clone())
4643    }
4644
4645    /// Force enable NAT traversal for testing purposes
4646    ///
4647    /// v0.13.0: Role parameter removed - all nodes are symmetric P2P nodes.
4648    #[cfg(test)]
4649    #[allow(dead_code)]
4650    pub(crate) fn force_enable_nat_traversal(&mut self) {
4651        use crate::transport_parameters::NatTraversalConfig;
4652
4653        // v0.13.0: All nodes use ServerSupport (can coordinate)
4654        let config = NatTraversalConfig::ServerSupport {
4655            concurrency_limit: VarInt::from_u32(5),
4656        };
4657
4658        self.peer_params.nat_traversal = Some(config.clone());
4659        self.config = Arc::new({
4660            let mut transport_config = (*self.config).clone();
4661            transport_config.nat_traversal_config = Some(config);
4662            transport_config
4663        });
4664
4665        // v0.13.0: No role parameter - all nodes are symmetric
4666        self.nat_traversal = Some(NatTraversalState::new(8, Duration::from_secs(10)));
4667    }
4668
4669    /// Queue an ADD_ADDRESS frame to be sent to the peer
4670    /// Derive peer ID from connection context
4671    fn derive_peer_id_from_connection(&self) -> [u8; 32] {
4672        // Generate a peer ID based on connection IDs
4673        let mut hasher = std::collections::hash_map::DefaultHasher::new();
4674        use std::hash::Hasher;
4675        hasher.write(&self.rem_handshake_cid);
4676        hasher.write(&self.handshake_cid);
4677        hasher.write(&self.path.remote.to_string().into_bytes());
4678        let hash = hasher.finish();
4679        let mut peer_id = [0u8; 32];
4680        peer_id[..8].copy_from_slice(&hash.to_be_bytes());
4681        // Fill remaining bytes with connection ID data
4682        let cid_bytes = self.rem_handshake_cid.as_ref();
4683        let copy_len = (cid_bytes.len()).min(24);
4684        peer_id[8..8 + copy_len].copy_from_slice(&cid_bytes[..copy_len]);
4685        peer_id
4686    }
4687
4688    /// Handle AddAddress frame from peer
4689    fn handle_add_address(
4690        &mut self,
4691        add_address: &crate::frame::AddAddress,
4692        now: Instant,
4693    ) -> Result<(), TransportError> {
4694        let nat_state = self.nat_traversal.as_mut().ok_or_else(|| {
4695            TransportError::PROTOCOL_VIOLATION("AddAddress frame without NAT traversal negotiation")
4696        })?;
4697
4698        match nat_state.add_remote_candidate(
4699            add_address.sequence,
4700            add_address.address,
4701            add_address.priority,
4702            now,
4703        ) {
4704            Ok(()) => {
4705                trace!(
4706                    "Added remote candidate: {} (seq={}, priority={})",
4707                    add_address.address, add_address.sequence, add_address.priority
4708                );
4709
4710                // Trigger validation of this new candidate
4711                self.trigger_candidate_validation(add_address.address, now)?;
4712                Ok(())
4713            }
4714            Err(NatTraversalError::TooManyCandidates) => Err(TransportError::PROTOCOL_VIOLATION(
4715                "too many NAT traversal candidates",
4716            )),
4717            Err(NatTraversalError::DuplicateAddress) => {
4718                // Silently ignore duplicates (peer may resend)
4719                Ok(())
4720            }
4721            Err(e) => {
4722                warn!("Failed to add remote candidate: {}", e);
4723                Ok(()) // Don't terminate connection for non-critical errors
4724            }
4725        }
4726    }
4727
4728    /// Handle PunchMeNow frame from peer (via coordinator)
4729    ///
4730    /// v0.13.0: All nodes can coordinate - no role check needed.
4731    fn handle_punch_me_now(
4732        &mut self,
4733        punch_me_now: &crate::frame::PunchMeNow,
4734        now: Instant,
4735    ) -> Result<(), TransportError> {
4736        trace!(
4737            "Received PunchMeNow: round={}, target_seq={}, local_addr={}",
4738            punch_me_now.round, punch_me_now.paired_with_sequence_number, punch_me_now.address
4739        );
4740
4741        // v0.13.0: All nodes can coordinate - try coordination first
4742        if let Some(nat_state) = &self.nat_traversal {
4743            // All nodes have bootstrap_coordinator now (v0.13.0)
4744            if nat_state.bootstrap_coordinator.is_some() {
4745                // Process coordination request
4746                let from_peer_id = self.derive_peer_id_from_connection();
4747
4748                // Clone the frame to avoid borrow checker issues
4749                let punch_me_now_clone = punch_me_now.clone();
4750                drop(nat_state); // Release the borrow
4751
4752                match self
4753                    .nat_traversal
4754                    .as_mut()
4755                    .unwrap()
4756                    .handle_punch_me_now_frame(
4757                        from_peer_id,
4758                        self.path.remote,
4759                        &punch_me_now_clone,
4760                        now,
4761                    ) {
4762                    Ok(Some(coordination_frame)) => {
4763                        trace!("Node coordinating PUNCH_ME_NOW between peers");
4764
4765                        // Send coordination frame to target peer via endpoint
4766                        if let Some(target_peer_id) = punch_me_now.target_peer_id {
4767                            self.endpoint_events.push_back(
4768                                crate::shared::EndpointEventInner::RelayPunchMeNow(
4769                                    target_peer_id,
4770                                    coordination_frame,
4771                                ),
4772                            );
4773                        }
4774
4775                        return Ok(());
4776                    }
4777                    Ok(None) => {
4778                        trace!("Coordination completed or no action needed");
4779                        return Ok(());
4780                    }
4781                    Err(e) => {
4782                        warn!("Coordination failed: {}", e);
4783                        return Ok(());
4784                    }
4785                }
4786            }
4787        }
4788
4789        // We're a regular peer receiving coordination from bootstrap
4790        let nat_state = self.nat_traversal.as_mut().ok_or_else(|| {
4791            TransportError::PROTOCOL_VIOLATION("PunchMeNow frame without NAT traversal negotiation")
4792        })?;
4793
4794        // Handle peer's coordination request
4795        if nat_state
4796            .handle_peer_punch_request(punch_me_now.round, now)
4797            .map_err(|_e| {
4798                TransportError::PROTOCOL_VIOLATION("Failed to handle peer punch request")
4799            })?
4800        {
4801            trace!("Coordination synchronized for round {}", punch_me_now.round);
4802
4803            // Create punch targets based on the received information
4804            // The peer's address tells us where they'll be listening
4805            let _local_addr = self
4806                .local_ip
4807                .map(|ip| SocketAddr::new(ip, 0))
4808                .unwrap_or_else(|| {
4809                    SocketAddr::new(std::net::IpAddr::V4(std::net::Ipv4Addr::UNSPECIFIED), 0)
4810                });
4811
4812            let target = nat_traversal::PunchTarget {
4813                remote_addr: punch_me_now.address,
4814                remote_sequence: punch_me_now.paired_with_sequence_number,
4815                challenge: self.rng.r#gen(),
4816            };
4817
4818            // Start coordination with this target
4819            let _ = nat_state.start_coordination_round(vec![target], now);
4820        } else {
4821            debug!(
4822                "Failed to synchronize coordination for round {}",
4823                punch_me_now.round
4824            );
4825        }
4826
4827        Ok(())
4828    }
4829
4830    /// Handle RemoveAddress frame from peer
4831    fn handle_remove_address(
4832        &mut self,
4833        remove_address: &crate::frame::RemoveAddress,
4834    ) -> Result<(), TransportError> {
4835        let nat_state = self.nat_traversal.as_mut().ok_or_else(|| {
4836            TransportError::PROTOCOL_VIOLATION(
4837                "RemoveAddress frame without NAT traversal negotiation",
4838            )
4839        })?;
4840
4841        if nat_state.remove_candidate(remove_address.sequence) {
4842            trace!(
4843                "Removed candidate with sequence {}",
4844                remove_address.sequence
4845            );
4846        } else {
4847            trace!(
4848                "Attempted to remove unknown candidate sequence {}",
4849                remove_address.sequence
4850            );
4851        }
4852
4853        Ok(())
4854    }
4855
4856    /// Handle ObservedAddress frame from peer
4857    fn handle_observed_address_frame(
4858        &mut self,
4859        observed_address: &crate::frame::ObservedAddress,
4860        now: Instant,
4861    ) -> Result<(), TransportError> {
4862        println!(
4863            "DEBUG: handle_observed_address_frame: received address {}",
4864            observed_address.address
4865        );
4866        // Get the address discovery state
4867        let state = self.address_discovery_state.as_mut().ok_or_else(|| {
4868            TransportError::PROTOCOL_VIOLATION(
4869                "ObservedAddress frame without address discovery negotiation",
4870            )
4871        })?;
4872
4873        // Check if address discovery is enabled
4874        if !state.enabled {
4875            return Err(TransportError::PROTOCOL_VIOLATION(
4876                "ObservedAddress frame received when address discovery is disabled",
4877            ));
4878        }
4879
4880        // Trace observed address received
4881        #[cfg(feature = "trace")]
4882        {
4883            use crate::trace_observed_address_received;
4884            // Tracing imports handled by macros
4885            trace_observed_address_received!(
4886                &self.event_log,
4887                self.trace_context.trace_id(),
4888                observed_address.address,
4889                0u64 // path_id not part of the frame yet
4890            );
4891        }
4892
4893        // Get the current path ID (0 for primary path in single-path connections)
4894        let path_id = 0u64; // TODO: Support multi-path scenarios
4895
4896        // Check sequence number per RFC draft-ietf-quic-address-discovery-00
4897        // "A peer SHOULD ignore an incoming OBSERVED_ADDRESS frame if it previously
4898        // received another OBSERVED_ADDRESS frame for the same path with a Sequence
4899        // Number equal to or higher than the sequence number of the incoming frame."
4900        if let Some(&last_seq) = state.last_received_sequence.get(&path_id) {
4901            if observed_address.sequence_number <= last_seq {
4902                trace!(
4903                    "Ignoring OBSERVED_ADDRESS frame with stale sequence number {} (last was {})",
4904                    observed_address.sequence_number, last_seq
4905                );
4906                return Ok(());
4907            }
4908        }
4909
4910        // Update the last received sequence number for this path
4911        state
4912            .last_received_sequence
4913            .insert(path_id, observed_address.sequence_number);
4914
4915        // Process the observed address
4916        state.handle_observed_address(observed_address.address, path_id, now);
4917
4918        // Update the path's address info
4919        self.path
4920            .update_observed_address(observed_address.address, now);
4921
4922        // Log the observation
4923        trace!(
4924            "Received ObservedAddress frame: address={} for path={}",
4925            observed_address.address, path_id
4926        );
4927
4928        Ok(())
4929    }
4930
4931    /// Handle TryConnectTo frame - request from peer to attempt connection to a target
4932    ///
4933    /// This is part of the NAT traversal callback mechanism where a peer can request
4934    /// this node to attempt a connection to verify connectivity.
4935    fn handle_try_connect_to(
4936        &mut self,
4937        try_connect_to: &crate::frame::TryConnectTo,
4938        now: Instant,
4939    ) -> Result<(), TransportError> {
4940        trace!(
4941            "Received TryConnectTo: request_id={}, target={}, timeout_ms={}",
4942            try_connect_to.request_id, try_connect_to.target_address, try_connect_to.timeout_ms
4943        );
4944
4945        // Validate the target address (basic security checks)
4946        let target = try_connect_to.target_address;
4947
4948        // Don't allow requests to loopback addresses from remote peers
4949        if target.ip().is_loopback() {
4950            warn!(
4951                "Rejecting TryConnectTo request to loopback address: {}",
4952                target
4953            );
4954            // Queue error response
4955            let response = crate::frame::TryConnectToResponse {
4956                request_id: try_connect_to.request_id,
4957                success: false,
4958                error_code: Some(crate::frame::TryConnectError::InvalidAddress),
4959                source_address: self.path.remote,
4960            };
4961            self.spaces[SpaceId::Data]
4962                .pending
4963                .try_connect_to_responses
4964                .push(response);
4965            return Ok(());
4966        }
4967
4968        // Don't allow requests to unspecified addresses
4969        if target.ip().is_unspecified() {
4970            warn!(
4971                "Rejecting TryConnectTo request to unspecified address: {}",
4972                target
4973            );
4974            let response = crate::frame::TryConnectToResponse {
4975                request_id: try_connect_to.request_id,
4976                success: false,
4977                error_code: Some(crate::frame::TryConnectError::InvalidAddress),
4978                source_address: self.path.remote,
4979            };
4980            self.spaces[SpaceId::Data]
4981                .pending
4982                .try_connect_to_responses
4983                .push(response);
4984            return Ok(());
4985        }
4986
4987        // Queue an endpoint event to perform the connection attempt asynchronously
4988        // The endpoint will handle the actual connection and send back a response
4989        self.endpoint_events
4990            .push_back(EndpointEventInner::TryConnectTo {
4991                request_id: try_connect_to.request_id,
4992                target_address: try_connect_to.target_address,
4993                timeout_ms: try_connect_to.timeout_ms,
4994                requester_connection: self.path.remote,
4995                requested_at: now,
4996            });
4997
4998        trace!(
4999            "Queued TryConnectTo attempt for request_id={}",
5000            try_connect_to.request_id
5001        );
5002
5003        Ok(())
5004    }
5005
5006    /// Handle TryConnectToResponse frame - result of a connection attempt we requested
5007    fn handle_try_connect_to_response(
5008        &mut self,
5009        response: &crate::frame::TryConnectToResponse,
5010    ) -> Result<(), TransportError> {
5011        trace!(
5012            "Received TryConnectToResponse: request_id={}, success={}, error={:?}, source={}",
5013            response.request_id, response.success, response.error_code, response.source_address
5014        );
5015
5016        // If the connection was successful, we've confirmed that the target address
5017        // can receive connections from the peer that attempted the connection
5018        if response.success {
5019            debug!(
5020                "TryConnectTo succeeded: target can receive connections from {}",
5021                response.source_address
5022            );
5023
5024            // Update NAT traversal state with the successful probe result
5025            if let Some(nat_state) = &mut self.nat_traversal {
5026                nat_state
5027                    .record_successful_callback_probe(response.request_id, response.source_address);
5028            }
5029        } else {
5030            debug!("TryConnectTo failed with error: {:?}", response.error_code);
5031
5032            // Update NAT traversal state with the failed probe result
5033            if let Some(nat_state) = &mut self.nat_traversal {
5034                nat_state.record_failed_callback_probe(response.request_id, response.error_code);
5035            }
5036        }
5037
5038        Ok(())
5039    }
5040
5041    /// Queue an AddAddress frame to advertise a new candidate address
5042    pub fn queue_add_address(&mut self, sequence: VarInt, address: SocketAddr, priority: VarInt) {
5043        // Queue the AddAddress frame
5044        let add_address = frame::AddAddress {
5045            sequence,
5046            address,
5047            priority,
5048        };
5049
5050        self.spaces[SpaceId::Data]
5051            .pending
5052            .add_addresses
5053            .push(add_address);
5054        trace!(
5055            "Queued AddAddress frame: seq={}, addr={}, priority={}",
5056            sequence, address, priority
5057        );
5058    }
5059
5060    /// Queue a PunchMeNow frame to coordinate NAT traversal
5061    pub fn queue_punch_me_now(
5062        &mut self,
5063        round: VarInt,
5064        paired_with_sequence_number: VarInt,
5065        address: SocketAddr,
5066    ) {
5067        let punch_me_now = frame::PunchMeNow {
5068            round,
5069            paired_with_sequence_number,
5070            address,
5071            target_peer_id: None, // Direct peer-to-peer communication
5072        };
5073
5074        self.spaces[SpaceId::Data]
5075            .pending
5076            .punch_me_now
5077            .push(punch_me_now);
5078        trace!(
5079            "Queued PunchMeNow frame: round={}, target={}",
5080            round, paired_with_sequence_number
5081        );
5082    }
5083
5084    /// Queue a RemoveAddress frame to remove a candidate
5085    pub fn queue_remove_address(&mut self, sequence: VarInt) {
5086        let remove_address = frame::RemoveAddress { sequence };
5087
5088        self.spaces[SpaceId::Data]
5089            .pending
5090            .remove_addresses
5091            .push(remove_address);
5092        trace!("Queued RemoveAddress frame: seq={}", sequence);
5093    }
5094
5095    /// Queue an ObservedAddress frame to send to peer
5096    pub fn queue_observed_address(&mut self, address: SocketAddr) {
5097        // Get sequence number from address discovery state
5098        let sequence_number = if let Some(state) = &mut self.address_discovery_state {
5099            let seq = state.next_sequence_number;
5100            state.next_sequence_number =
5101                VarInt::from_u64(state.next_sequence_number.into_inner() + 1)
5102                    .expect("sequence number overflow");
5103            seq
5104        } else {
5105            // Fallback if no state (shouldn't happen in practice)
5106            VarInt::from_u32(0)
5107        };
5108
5109        let observed_address = frame::ObservedAddress {
5110            sequence_number,
5111            address,
5112        };
5113        self.spaces[SpaceId::Data]
5114            .pending
5115            .outbound_observations
5116            .push(observed_address);
5117        trace!("Queued ObservedAddress frame: addr={}", address);
5118    }
5119
5120    /// Check if we should send OBSERVED_ADDRESS frames and queue them
5121    pub fn check_for_address_observations(&mut self, now: Instant) {
5122        // Only check if we have address discovery state
5123        let Some(state) = &mut self.address_discovery_state else {
5124            return;
5125        };
5126
5127        // Check if address discovery is enabled
5128        if !state.enabled {
5129            return;
5130        }
5131
5132        // Get the current path ID (0 for primary path)
5133        let path_id = 0u64; // TODO: Support multi-path scenarios
5134
5135        // Get the remote address for this path
5136        let remote_address = self.path.remote;
5137
5138        // Check if we should send an observation for this path
5139        if state.should_send_observation(path_id, now) {
5140            // Try to queue the observation frame
5141            if let Some(frame) = state.queue_observed_address_frame(path_id, remote_address) {
5142                // Queue the frame for sending
5143                self.spaces[SpaceId::Data]
5144                    .pending
5145                    .outbound_observations
5146                    .push(frame);
5147
5148                // Record that we sent the observation
5149                state.record_observation_sent(path_id);
5150
5151                // Trace observed address sent
5152                #[cfg(feature = "trace")]
5153                {
5154                    use crate::trace_observed_address_sent;
5155                    // Tracing imports handled by macros
5156                    trace_observed_address_sent!(
5157                        &self.event_log,
5158                        self.trace_context.trace_id(),
5159                        remote_address,
5160                        path_id
5161                    );
5162                }
5163
5164                trace!(
5165                    "Queued OBSERVED_ADDRESS frame for path {} with address {}",
5166                    path_id, remote_address
5167                );
5168            }
5169        }
5170    }
5171
5172    /// Trigger validation of a candidate address using PATH_CHALLENGE
5173    fn trigger_candidate_validation(
5174        &mut self,
5175        candidate_address: SocketAddr,
5176        now: Instant,
5177    ) -> Result<(), TransportError> {
5178        let nat_state = self
5179            .nat_traversal
5180            .as_mut()
5181            .ok_or_else(|| TransportError::PROTOCOL_VIOLATION("NAT traversal not enabled"))?;
5182
5183        // Check if we already have an active validation for this address
5184        if nat_state
5185            .active_validations
5186            .contains_key(&candidate_address)
5187        {
5188            trace!("Validation already in progress for {}", candidate_address);
5189            return Ok(());
5190        }
5191
5192        // Generate a random challenge value
5193        let challenge = self.rng.r#gen::<u64>();
5194
5195        // Create path validation state
5196        let validation_state = nat_traversal::PathValidationState {
5197            challenge,
5198            sent_at: now,
5199            retry_count: 0,
5200            max_retries: 3,
5201            coordination_round: None,
5202            timeout_state: nat_traversal::AdaptiveTimeoutState::new(),
5203            last_retry_at: None,
5204        };
5205
5206        // Store the validation attempt
5207        nat_state
5208            .active_validations
5209            .insert(candidate_address, validation_state);
5210
5211        // Queue PATH_CHALLENGE frame to be sent to the candidate address
5212        self.nat_traversal_challenges
5213            .push(candidate_address, challenge);
5214
5215        // Update statistics
5216        nat_state.stats.validations_succeeded += 1; // Will be decremented if validation fails
5217
5218        trace!(
5219            "Triggered PATH_CHALLENGE validation for {} with challenge {:016x}",
5220            candidate_address, challenge
5221        );
5222
5223        Ok(())
5224    }
5225
5226    /// Get current NAT traversal state information
5227    ///
5228    /// v0.13.0: Returns (local_candidates, remote_candidates) - role removed since all
5229    /// nodes are symmetric P2P nodes.
5230    pub fn nat_traversal_state(&self) -> Option<(usize, usize)> {
5231        self.nat_traversal
5232            .as_ref()
5233            .map(|state| (state.local_candidates.len(), state.remote_candidates.len()))
5234    }
5235
5236    /// Initiate NAT traversal coordination through a bootstrap node
5237    pub fn initiate_nat_traversal_coordination(
5238        &mut self,
5239        now: Instant,
5240    ) -> Result<(), TransportError> {
5241        let nat_state = self
5242            .nat_traversal
5243            .as_mut()
5244            .ok_or_else(|| TransportError::PROTOCOL_VIOLATION("NAT traversal not enabled"))?;
5245
5246        // Check if we should send PUNCH_ME_NOW to coordinator
5247        if nat_state.should_send_punch_request() {
5248            // Generate candidate pairs for coordination
5249            nat_state.generate_candidate_pairs(now);
5250
5251            // Get the best candidate pairs to try
5252            let pairs = nat_state.get_next_validation_pairs(3);
5253            if pairs.is_empty() {
5254                return Err(TransportError::PROTOCOL_VIOLATION(
5255                    "No candidate pairs for coordination",
5256                ));
5257            }
5258
5259            // Create punch targets from the pairs
5260            let targets: Vec<_> = pairs
5261                .into_iter()
5262                .map(|pair| nat_traversal::PunchTarget {
5263                    remote_addr: pair.remote_addr,
5264                    remote_sequence: pair.remote_sequence,
5265                    challenge: self.rng.r#gen(),
5266                })
5267                .collect();
5268
5269            // Start coordination round
5270            let round = nat_state
5271                .start_coordination_round(targets, now)
5272                .map_err(|_e| {
5273                    TransportError::PROTOCOL_VIOLATION("Failed to start coordination round")
5274                })?;
5275
5276            // Queue PUNCH_ME_NOW frame to be sent to bootstrap node
5277            // Include our best local address for the peer to target
5278            let local_addr = self
5279                .local_ip
5280                .map(|ip| SocketAddr::new(ip, self.local_ip.map(|_| 0).unwrap_or(0)))
5281                .unwrap_or_else(|| {
5282                    SocketAddr::new(std::net::IpAddr::V4(std::net::Ipv4Addr::UNSPECIFIED), 0)
5283                });
5284
5285            let punch_me_now = frame::PunchMeNow {
5286                round,
5287                paired_with_sequence_number: VarInt::from_u32(0), // Will be filled by bootstrap
5288                address: local_addr,
5289                target_peer_id: None, // Direct peer-to-peer communication
5290            };
5291
5292            self.spaces[SpaceId::Data]
5293                .pending
5294                .punch_me_now
5295                .push(punch_me_now);
5296            nat_state.mark_punch_request_sent();
5297
5298            trace!("Initiated NAT traversal coordination round {}", round);
5299        }
5300
5301        Ok(())
5302    }
5303
5304    /// Trigger validation of NAT traversal candidates using PATH_CHALLENGE
5305    pub fn validate_nat_candidates(&mut self, now: Instant) {
5306        self.generate_nat_traversal_challenges(now);
5307    }
5308
5309    // === PUBLIC NAT TRAVERSAL FRAME TRANSMISSION API ===
5310
5311    /// Send an ADD_ADDRESS frame to advertise a candidate address to the peer
5312    ///
5313    /// This is the primary method for sending NAT traversal address advertisements.
5314    /// The frame will be transmitted in the next outgoing QUIC packet.
5315    ///
5316    /// # Arguments
5317    /// * `address` - The candidate address to advertise
5318    /// * `priority` - ICE-style priority for this candidate (higher = better)
5319    ///
5320    /// # Returns
5321    /// * `Ok(sequence)` - The sequence number assigned to this candidate
5322    /// * `Err(ConnectionError)` - If NAT traversal is not enabled or other error
5323    pub fn send_nat_address_advertisement(
5324        &mut self,
5325        address: SocketAddr,
5326        priority: u32,
5327    ) -> Result<u64, ConnectionError> {
5328        // Verify NAT traversal is enabled
5329        let nat_state = self.nat_traversal.as_mut().ok_or_else(|| {
5330            ConnectionError::TransportError(TransportError::PROTOCOL_VIOLATION(
5331                "NAT traversal not enabled on this connection",
5332            ))
5333        })?;
5334
5335        // Generate sequence number and add to local candidates
5336        let sequence = nat_state.next_sequence;
5337        nat_state.next_sequence =
5338            VarInt::from_u64(nat_state.next_sequence.into_inner() + 1).unwrap();
5339
5340        // Add to local candidates
5341        let now = Instant::now();
5342        nat_state.local_candidates.insert(
5343            sequence,
5344            nat_traversal::AddressCandidate {
5345                address,
5346                priority,
5347                source: nat_traversal::CandidateSource::Local,
5348                discovered_at: now,
5349                state: nat_traversal::CandidateState::New,
5350                attempt_count: 0,
5351                last_attempt: None,
5352            },
5353        );
5354
5355        // Update statistics
5356        nat_state.stats.local_candidates_sent += 1;
5357
5358        // Queue the frame for transmission (must be done after releasing nat_state borrow)
5359        self.queue_add_address(sequence, address, VarInt::from_u32(priority));
5360
5361        debug!(
5362            "Queued ADD_ADDRESS frame: addr={}, priority={}, seq={}",
5363            address, priority, sequence
5364        );
5365        Ok(sequence.into_inner())
5366    }
5367
5368    /// Send a PUNCH_ME_NOW frame to coordinate hole punching with a peer
5369    ///
5370    /// This triggers synchronized hole punching for NAT traversal.
5371    ///
5372    /// # Arguments
5373    /// * `paired_with_sequence_number` - Sequence number of the target candidate address
5374    /// * `address` - Our address for the hole punching attempt
5375    /// * `round` - Coordination round number for synchronization
5376    ///
5377    /// # Returns
5378    /// * `Ok(())` - Frame queued for transmission
5379    /// * `Err(ConnectionError)` - If NAT traversal is not enabled
5380    pub fn send_nat_punch_coordination(
5381        &mut self,
5382        paired_with_sequence_number: u64,
5383        address: SocketAddr,
5384        round: u32,
5385    ) -> Result<(), ConnectionError> {
5386        // Verify NAT traversal is enabled
5387        let _nat_state = self.nat_traversal.as_ref().ok_or_else(|| {
5388            ConnectionError::TransportError(TransportError::PROTOCOL_VIOLATION(
5389                "NAT traversal not enabled on this connection",
5390            ))
5391        })?;
5392
5393        // Queue the frame for transmission
5394        self.queue_punch_me_now(
5395            VarInt::from_u32(round),
5396            VarInt::from_u64(paired_with_sequence_number).map_err(|_| {
5397                ConnectionError::TransportError(TransportError::PROTOCOL_VIOLATION(
5398                    "Invalid target sequence number",
5399                ))
5400            })?,
5401            address,
5402        );
5403
5404        debug!(
5405            "Queued PUNCH_ME_NOW frame: paired_with_seq={}, addr={}, round={}",
5406            paired_with_sequence_number, address, round
5407        );
5408        Ok(())
5409    }
5410
5411    /// Send a REMOVE_ADDRESS frame to remove a previously advertised candidate
5412    ///
5413    /// This removes a candidate address that is no longer valid or available.
5414    ///
5415    /// # Arguments
5416    /// * `sequence` - Sequence number of the candidate to remove
5417    ///
5418    /// # Returns
5419    /// * `Ok(())` - Frame queued for transmission
5420    /// * `Err(ConnectionError)` - If NAT traversal is not enabled
5421    pub fn send_nat_address_removal(&mut self, sequence: u64) -> Result<(), ConnectionError> {
5422        // Verify NAT traversal is enabled
5423        let nat_state = self.nat_traversal.as_mut().ok_or_else(|| {
5424            ConnectionError::TransportError(TransportError::PROTOCOL_VIOLATION(
5425                "NAT traversal not enabled on this connection",
5426            ))
5427        })?;
5428
5429        let sequence_varint = VarInt::from_u64(sequence).map_err(|_| {
5430            ConnectionError::TransportError(TransportError::PROTOCOL_VIOLATION(
5431                "Invalid sequence number",
5432            ))
5433        })?;
5434
5435        // Remove from local candidates
5436        nat_state.local_candidates.remove(&sequence_varint);
5437
5438        // Queue the frame for transmission
5439        self.queue_remove_address(sequence_varint);
5440
5441        debug!("Queued REMOVE_ADDRESS frame: seq={}", sequence);
5442        Ok(())
5443    }
5444
5445    /// Get statistics about NAT traversal activity on this connection
5446    ///
5447    /// # Returns
5448    /// * `Some(stats)` - Current NAT traversal statistics
5449    /// * `None` - If NAT traversal is not enabled
5450    ///
5451    /// This method is preserved for debugging and monitoring purposes.
5452    /// It may be used in future telemetry or diagnostic features.
5453    #[allow(dead_code)]
5454    pub(crate) fn get_nat_traversal_stats(&self) -> Option<&nat_traversal::NatTraversalStats> {
5455        self.nat_traversal.as_ref().map(|state| &state.stats)
5456    }
5457
5458    /// Check if NAT traversal is enabled and active on this connection
5459    pub fn is_nat_traversal_enabled(&self) -> bool {
5460        self.nat_traversal.is_some()
5461    }
5462
5463    // v0.13.0: get_nat_traversal_role() removed - all nodes are symmetric P2P nodes
5464
5465    /// Negotiate address discovery parameters with peer
5466    fn negotiate_address_discovery(&mut self, peer_params: &TransportParameters) {
5467        let now = Instant::now();
5468
5469        // Check if peer supports address discovery
5470        match &peer_params.address_discovery {
5471            Some(peer_config) => {
5472                // Peer supports address discovery
5473                if let Some(state) = &mut self.address_discovery_state {
5474                    if state.enabled {
5475                        // Both support - no additional negotiation needed with enum-based config
5476                        // Rate limiting and path observation use fixed defaults from state creation
5477                        debug!(
5478                            "Address discovery negotiated: rate={}, all_paths={}",
5479                            state.max_observation_rate, state.observe_all_paths
5480                        );
5481                    } else {
5482                        // We don't support it but peer does
5483                        debug!("Address discovery disabled locally, ignoring peer support");
5484                    }
5485                } else {
5486                    // Initialize state based on peer config if we don't have one
5487                    self.address_discovery_state =
5488                        Some(AddressDiscoveryState::new(peer_config, now));
5489                    debug!("Address discovery initialized from peer config");
5490                }
5491            }
5492            _ => {
5493                // Peer doesn't support address discovery
5494                if let Some(state) = &mut self.address_discovery_state {
5495                    state.enabled = false;
5496                    debug!("Address discovery disabled - peer doesn't support it");
5497                }
5498            }
5499        }
5500
5501        // Update paths with negotiated observation rate if enabled
5502        if let Some(state) = &self.address_discovery_state {
5503            if state.enabled {
5504                self.path.set_observation_rate(state.max_observation_rate);
5505            }
5506        }
5507    }
5508
5509    fn decrypt_packet(
5510        &mut self,
5511        now: Instant,
5512        packet: &mut Packet,
5513    ) -> Result<Option<u64>, Option<TransportError>> {
5514        let result = packet_crypto::decrypt_packet_body(
5515            packet,
5516            &self.spaces,
5517            self.zero_rtt_crypto.as_ref(),
5518            self.key_phase,
5519            self.prev_crypto.as_ref(),
5520            self.next_crypto.as_ref(),
5521        )?;
5522
5523        let result = match result {
5524            Some(r) => r,
5525            None => return Ok(None),
5526        };
5527
5528        if result.outgoing_key_update_acked {
5529            if let Some(prev) = self.prev_crypto.as_mut() {
5530                prev.end_packet = Some((result.number, now));
5531                self.set_key_discard_timer(now, packet.header.space());
5532            }
5533        }
5534
5535        if result.incoming_key_update {
5536            trace!("key update authenticated");
5537            self.update_keys(Some((result.number, now)), true);
5538            self.set_key_discard_timer(now, packet.header.space());
5539        }
5540
5541        Ok(Some(result.number))
5542    }
5543
5544    fn update_keys(&mut self, end_packet: Option<(u64, Instant)>, remote: bool) {
5545        trace!("executing key update");
5546        // Generate keys for the key phase after the one we're switching to, store them in
5547        // `next_crypto`, make the contents of `next_crypto` current, and move the current keys into
5548        // `prev_crypto`.
5549        let new = self
5550            .crypto
5551            .next_1rtt_keys()
5552            .expect("only called for `Data` packets");
5553        self.key_phase_size = new
5554            .local
5555            .confidentiality_limit()
5556            .saturating_sub(KEY_UPDATE_MARGIN);
5557        let old = mem::replace(
5558            &mut self.spaces[SpaceId::Data]
5559                .crypto
5560                .as_mut()
5561                .unwrap() // safe because update_keys() can only be triggered by short packets
5562                .packet,
5563            mem::replace(self.next_crypto.as_mut().unwrap(), new),
5564        );
5565        self.spaces[SpaceId::Data].sent_with_keys = 0;
5566        self.prev_crypto = Some(PrevCrypto {
5567            crypto: old,
5568            end_packet,
5569            update_unacked: remote,
5570        });
5571        self.key_phase = !self.key_phase;
5572    }
5573
5574    fn peer_supports_ack_frequency(&self) -> bool {
5575        self.peer_params.min_ack_delay.is_some()
5576    }
5577
5578    /// Send an IMMEDIATE_ACK frame to the remote endpoint
5579    ///
5580    /// According to the spec, this will result in an error if the remote endpoint does not support
5581    /// the Acknowledgement Frequency extension
5582    pub(crate) fn immediate_ack(&mut self) {
5583        self.spaces[self.highest_space].immediate_ack_pending = true;
5584    }
5585
5586    /// Decodes a packet, returning its decrypted payload, so it can be inspected in tests
5587    #[cfg(test)]
5588    #[allow(dead_code)]
5589    pub(crate) fn decode_packet(&self, event: &ConnectionEvent) -> Option<Vec<u8>> {
5590        let (first_decode, remaining) = match &event.0 {
5591            ConnectionEventInner::Datagram(DatagramConnectionEvent {
5592                first_decode,
5593                remaining,
5594                ..
5595            }) => (first_decode, remaining),
5596            _ => return None,
5597        };
5598
5599        if remaining.is_some() {
5600            panic!("Packets should never be coalesced in tests");
5601        }
5602
5603        let decrypted_header = packet_crypto::unprotect_header(
5604            first_decode.clone(),
5605            &self.spaces,
5606            self.zero_rtt_crypto.as_ref(),
5607            self.peer_params.stateless_reset_token,
5608        )?;
5609
5610        let mut packet = decrypted_header.packet?;
5611        packet_crypto::decrypt_packet_body(
5612            &mut packet,
5613            &self.spaces,
5614            self.zero_rtt_crypto.as_ref(),
5615            self.key_phase,
5616            self.prev_crypto.as_ref(),
5617            self.next_crypto.as_ref(),
5618        )
5619        .ok()?;
5620
5621        Some(packet.payload.to_vec())
5622    }
5623
5624    /// The number of bytes of packets containing retransmittable frames that have not been
5625    /// acknowledged or declared lost.
5626    #[cfg(test)]
5627    #[allow(dead_code)]
5628    pub(crate) fn bytes_in_flight(&self) -> u64 {
5629        self.path.in_flight.bytes
5630    }
5631
5632    /// Number of bytes worth of non-ack-only packets that may be sent
5633    #[cfg(test)]
5634    #[allow(dead_code)]
5635    pub(crate) fn congestion_window(&self) -> u64 {
5636        self.path
5637            .congestion
5638            .window()
5639            .saturating_sub(self.path.in_flight.bytes)
5640    }
5641
5642    /// Whether no timers but keepalive, idle, rtt, pushnewcid, and key discard are running
5643    #[cfg(test)]
5644    #[allow(dead_code)]
5645    pub(crate) fn is_idle(&self) -> bool {
5646        Timer::VALUES
5647            .iter()
5648            .filter(|&&t| !matches!(t, Timer::KeepAlive | Timer::PushNewCid | Timer::KeyDiscard))
5649            .filter_map(|&t| Some((t, self.timers.get(t)?)))
5650            .min_by_key(|&(_, time)| time)
5651            .is_none_or(|(timer, _)| timer == Timer::Idle)
5652    }
5653
5654    /// Total number of outgoing packets that have been deemed lost
5655    #[cfg(test)]
5656    #[allow(dead_code)]
5657    pub(crate) fn lost_packets(&self) -> u64 {
5658        self.lost_packets
5659    }
5660
5661    /// Whether explicit congestion notification is in use on outgoing packets.
5662    #[cfg(test)]
5663    #[allow(dead_code)]
5664    pub(crate) fn using_ecn(&self) -> bool {
5665        self.path.sending_ecn
5666    }
5667
5668    /// The number of received bytes in the current path
5669    #[cfg(test)]
5670    #[allow(dead_code)]
5671    pub(crate) fn total_recvd(&self) -> u64 {
5672        self.path.total_recvd
5673    }
5674
5675    #[cfg(test)]
5676    #[allow(dead_code)]
5677    pub(crate) fn active_local_cid_seq(&self) -> (u64, u64) {
5678        self.local_cid_state.active_seq()
5679    }
5680
5681    /// Instruct the peer to replace previously issued CIDs by sending a NEW_CONNECTION_ID frame
5682    /// with updated `retire_prior_to` field set to `v`
5683    #[cfg(test)]
5684    #[allow(dead_code)]
5685    pub(crate) fn rotate_local_cid(&mut self, v: u64, now: Instant) {
5686        let n = self.local_cid_state.assign_retire_seq(v);
5687        self.endpoint_events
5688            .push_back(EndpointEventInner::NeedIdentifiers(now, n));
5689    }
5690
5691    /// Check the current active remote CID sequence
5692    #[cfg(test)]
5693    #[allow(dead_code)]
5694    pub(crate) fn active_rem_cid_seq(&self) -> u64 {
5695        self.rem_cids.active_seq()
5696    }
5697
5698    /// Returns the detected maximum udp payload size for the current path
5699    #[cfg(test)]
5700    #[cfg(test)]
5701    #[allow(dead_code)]
5702    pub(crate) fn path_mtu(&self) -> u16 {
5703        self.path.current_mtu()
5704    }
5705
5706    /// Whether we have 1-RTT data to send
5707    ///
5708    /// See also `self.space(SpaceId::Data).can_send()`
5709    fn can_send_1rtt(&self, max_size: usize) -> bool {
5710        self.streams.can_send_stream_data()
5711            || self.path.challenge_pending
5712            || self
5713                .prev_path
5714                .as_ref()
5715                .is_some_and(|(_, x)| x.challenge_pending)
5716            || !self.path_responses.is_empty()
5717            || !self.nat_traversal_challenges.is_empty()
5718            || self
5719                .datagrams
5720                .outgoing
5721                .front()
5722                .is_some_and(|x| x.size(true) <= max_size)
5723    }
5724
5725    /// Update counters to account for a packet becoming acknowledged, lost, or abandoned
5726    fn remove_in_flight(&mut self, pn: u64, packet: &SentPacket) {
5727        // Visit known paths from newest to oldest to find the one `pn` was sent on
5728        for path in [&mut self.path]
5729            .into_iter()
5730            .chain(self.prev_path.as_mut().map(|(_, data)| data))
5731        {
5732            if path.remove_in_flight(pn, packet) {
5733                return;
5734            }
5735        }
5736    }
5737
5738    /// Terminate the connection instantly, without sending a close packet
5739    fn kill(&mut self, reason: ConnectionError) {
5740        self.close_common();
5741        self.error = Some(reason);
5742        self.state = State::Drained;
5743        self.endpoint_events.push_back(EndpointEventInner::Drained);
5744    }
5745
5746    /// Generate PATH_CHALLENGE frames for NAT traversal candidate validation
5747    fn generate_nat_traversal_challenges(&mut self, now: Instant) {
5748        // Get candidates ready for validation first
5749        let candidates: Vec<(VarInt, SocketAddr)> = if let Some(nat_state) = &self.nat_traversal {
5750            nat_state
5751                .get_validation_candidates()
5752                .into_iter()
5753                .take(3) // Validate up to 3 candidates in parallel
5754                .map(|(seq, candidate)| (seq, candidate.address))
5755                .collect()
5756        } else {
5757            return;
5758        };
5759
5760        if candidates.is_empty() {
5761            return;
5762        }
5763
5764        // Now process candidates with mutable access
5765        if let Some(nat_state) = &mut self.nat_traversal {
5766            for (seq, address) in candidates {
5767                // Generate a random challenge token
5768                let challenge: u64 = self.rng.r#gen();
5769
5770                // Start validation for this candidate
5771                if let Err(e) = nat_state.start_validation(seq, challenge, now) {
5772                    debug!("Failed to start validation for candidate {}: {}", seq, e);
5773                    continue;
5774                }
5775
5776                // Queue the challenge
5777                self.nat_traversal_challenges.push(address, challenge);
5778                trace!(
5779                    "Queuing NAT validation PATH_CHALLENGE for {} with token {:08x}",
5780                    address, challenge
5781                );
5782            }
5783        }
5784    }
5785
5786    /// Storage size required for the largest packet known to be supported by the current path
5787    ///
5788    /// Buffers passed to [`Connection::poll_transmit`] should be at least this large.
5789    pub fn current_mtu(&self) -> u16 {
5790        self.path.current_mtu()
5791    }
5792
5793    /// Size of non-frame data for a 1-RTT packet
5794    ///
5795    /// Quantifies space consumed by the QUIC header and AEAD tag. All other bytes in a packet are
5796    /// frames. Changes if the length of the remote connection ID changes, which is expected to be
5797    /// rare. If `pn` is specified, may additionally change unpredictably due to variations in
5798    /// latency and packet loss.
5799    fn predict_1rtt_overhead(&self, pn: Option<u64>) -> usize {
5800        let pn_len = match pn {
5801            Some(pn) => PacketNumber::new(
5802                pn,
5803                self.spaces[SpaceId::Data].largest_acked_packet.unwrap_or(0),
5804            )
5805            .len(),
5806            // Upper bound
5807            None => 4,
5808        };
5809
5810        // 1 byte for flags
5811        1 + self.rem_cids.active().len() + pn_len + self.tag_len_1rtt()
5812    }
5813
5814    fn tag_len_1rtt(&self) -> usize {
5815        let key = match self.spaces[SpaceId::Data].crypto.as_ref() {
5816            Some(crypto) => Some(&*crypto.packet.local),
5817            None => self.zero_rtt_crypto.as_ref().map(|x| &*x.packet),
5818        };
5819        // If neither Data nor 0-RTT keys are available, make a reasonable tag length guess. As of
5820        // this writing, all QUIC cipher suites use 16-byte tags. We could return `None` instead,
5821        // but that would needlessly prevent sending datagrams during 0-RTT.
5822        key.map_or(16, |x| x.tag_len())
5823    }
5824
5825    /// Mark the path as validated, and enqueue NEW_TOKEN frames to be sent as appropriate
5826    fn on_path_validated(&mut self) {
5827        self.path.validated = true;
5828        let ConnectionSide::Server { server_config } = &self.side else {
5829            return;
5830        };
5831        let new_tokens = &mut self.spaces[SpaceId::Data as usize].pending.new_tokens;
5832        new_tokens.clear();
5833        for _ in 0..server_config.validation_token.sent {
5834            new_tokens.push(self.path.remote);
5835        }
5836    }
5837}
5838
5839impl fmt::Debug for Connection {
5840    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
5841        f.debug_struct("Connection")
5842            .field("handshake_cid", &self.handshake_cid)
5843            .finish()
5844    }
5845}
5846
5847/// Fields of `Connection` specific to it being client-side or server-side
5848enum ConnectionSide {
5849    Client {
5850        /// Sent in every outgoing Initial packet. Always empty after Initial keys are discarded
5851        token: Bytes,
5852        token_store: Arc<dyn TokenStore>,
5853        server_name: String,
5854    },
5855    Server {
5856        server_config: Arc<ServerConfig>,
5857    },
5858}
5859
5860impl ConnectionSide {
5861    fn remote_may_migrate(&self) -> bool {
5862        match self {
5863            Self::Server { server_config } => server_config.migration,
5864            Self::Client { .. } => false,
5865        }
5866    }
5867
5868    fn is_client(&self) -> bool {
5869        self.side().is_client()
5870    }
5871
5872    fn is_server(&self) -> bool {
5873        self.side().is_server()
5874    }
5875
5876    fn side(&self) -> Side {
5877        match *self {
5878            Self::Client { .. } => Side::Client,
5879            Self::Server { .. } => Side::Server,
5880        }
5881    }
5882}
5883
5884impl From<SideArgs> for ConnectionSide {
5885    fn from(side: SideArgs) -> Self {
5886        match side {
5887            SideArgs::Client {
5888                token_store,
5889                server_name,
5890            } => Self::Client {
5891                token: token_store.take(&server_name).unwrap_or_default(),
5892                token_store,
5893                server_name,
5894            },
5895            SideArgs::Server {
5896                server_config,
5897                pref_addr_cid: _,
5898                path_validated: _,
5899            } => Self::Server { server_config },
5900        }
5901    }
5902}
5903
5904/// Parameters to `Connection::new` specific to it being client-side or server-side
5905pub(crate) enum SideArgs {
5906    Client {
5907        token_store: Arc<dyn TokenStore>,
5908        server_name: String,
5909    },
5910    Server {
5911        server_config: Arc<ServerConfig>,
5912        pref_addr_cid: Option<ConnectionId>,
5913        path_validated: bool,
5914    },
5915}
5916
5917impl SideArgs {
5918    pub(crate) fn pref_addr_cid(&self) -> Option<ConnectionId> {
5919        match *self {
5920            Self::Client { .. } => None,
5921            Self::Server { pref_addr_cid, .. } => pref_addr_cid,
5922        }
5923    }
5924
5925    pub(crate) fn path_validated(&self) -> bool {
5926        match *self {
5927            Self::Client { .. } => true,
5928            Self::Server { path_validated, .. } => path_validated,
5929        }
5930    }
5931
5932    pub(crate) fn side(&self) -> Side {
5933        match *self {
5934            Self::Client { .. } => Side::Client,
5935            Self::Server { .. } => Side::Server,
5936        }
5937    }
5938}
5939
5940/// Reasons why a connection might be lost
5941#[derive(Debug, Error, Clone, PartialEq, Eq)]
5942pub enum ConnectionError {
5943    /// The peer doesn't implement any supported version
5944    #[error("peer doesn't implement any supported version")]
5945    VersionMismatch,
5946    /// The peer violated the QUIC specification as understood by this implementation
5947    #[error(transparent)]
5948    TransportError(#[from] TransportError),
5949    /// The peer's QUIC stack aborted the connection automatically
5950    #[error("aborted by peer: {0}")]
5951    ConnectionClosed(frame::ConnectionClose),
5952    /// The peer closed the connection
5953    #[error("closed by peer: {0}")]
5954    ApplicationClosed(frame::ApplicationClose),
5955    /// The peer is unable to continue processing this connection, usually due to having restarted
5956    #[error("reset by peer")]
5957    Reset,
5958    /// Communication with the peer has lapsed for longer than the negotiated idle timeout
5959    ///
5960    /// If neither side is sending keep-alives, a connection will time out after a long enough idle
5961    /// period even if the peer is still reachable. See also [`TransportConfig::max_idle_timeout()`]
5962    /// and [`TransportConfig::keep_alive_interval()`].
5963    #[error("timed out")]
5964    TimedOut,
5965    /// The local application closed the connection
5966    #[error("closed")]
5967    LocallyClosed,
5968    /// The connection could not be created because not enough of the CID space is available
5969    ///
5970    /// Try using longer connection IDs.
5971    #[error("CIDs exhausted")]
5972    CidsExhausted,
5973}
5974
5975impl From<Close> for ConnectionError {
5976    fn from(x: Close) -> Self {
5977        match x {
5978            Close::Connection(reason) => Self::ConnectionClosed(reason),
5979            Close::Application(reason) => Self::ApplicationClosed(reason),
5980        }
5981    }
5982}
5983
5984// For compatibility with API consumers
5985impl From<ConnectionError> for io::Error {
5986    fn from(x: ConnectionError) -> Self {
5987        use ConnectionError::*;
5988        let kind = match x {
5989            TimedOut => io::ErrorKind::TimedOut,
5990            Reset => io::ErrorKind::ConnectionReset,
5991            ApplicationClosed(_) | ConnectionClosed(_) => io::ErrorKind::ConnectionAborted,
5992            TransportError(_) | VersionMismatch | LocallyClosed | CidsExhausted => {
5993                io::ErrorKind::Other
5994            }
5995        };
5996        Self::new(kind, x)
5997    }
5998}
5999
6000#[derive(Clone, Debug)]
6001/// Connection state machine states
6002pub enum State {
6003    /// Connection is in handshake phase
6004    Handshake(state::Handshake),
6005    /// Connection is established and ready for data transfer
6006    Established,
6007    /// Connection is closed with a reason
6008    Closed(state::Closed),
6009    /// Connection is draining (waiting for peer acknowledgment)
6010    Draining,
6011    /// Waiting for application to call close so we can dispose of the resources
6012    Drained,
6013}
6014
6015impl State {
6016    fn closed<R: Into<Close>>(reason: R) -> Self {
6017        Self::Closed(state::Closed {
6018            reason: reason.into(),
6019        })
6020    }
6021
6022    fn is_handshake(&self) -> bool {
6023        matches!(*self, Self::Handshake(_))
6024    }
6025
6026    fn is_established(&self) -> bool {
6027        matches!(*self, Self::Established)
6028    }
6029
6030    fn is_closed(&self) -> bool {
6031        matches!(*self, Self::Closed(_) | Self::Draining | Self::Drained)
6032    }
6033
6034    fn is_drained(&self) -> bool {
6035        matches!(*self, Self::Drained)
6036    }
6037}
6038
6039mod state {
6040    use super::*;
6041
6042    #[derive(Clone, Debug)]
6043    pub struct Handshake {
6044        /// Whether the remote CID has been set by the peer yet
6045        ///
6046        /// Always set for servers
6047        pub(super) rem_cid_set: bool,
6048        /// Stateless retry token received in the first Initial by a server.
6049        ///
6050        /// Must be present in every Initial. Always empty for clients.
6051        pub(super) expected_token: Bytes,
6052        /// First cryptographic message
6053        ///
6054        /// Only set for clients
6055        pub(super) client_hello: Option<Bytes>,
6056    }
6057
6058    #[derive(Clone, Debug)]
6059    pub struct Closed {
6060        pub(super) reason: Close,
6061    }
6062}
6063
6064/// Events of interest to the application
6065#[derive(Debug)]
6066pub enum Event {
6067    /// The connection's handshake data is ready
6068    HandshakeDataReady,
6069    /// The connection was successfully established
6070    Connected,
6071    /// The connection was lost
6072    ///
6073    /// Emitted if the peer closes the connection or an error is encountered.
6074    ConnectionLost {
6075        /// Reason that the connection was closed
6076        reason: ConnectionError,
6077    },
6078    /// Stream events
6079    Stream(StreamEvent),
6080    /// One or more application datagrams have been received
6081    DatagramReceived,
6082    /// One or more application datagrams have been sent after blocking
6083    DatagramsUnblocked,
6084}
6085
6086fn instant_saturating_sub(x: Instant, y: Instant) -> Duration {
6087    if x > y { x - y } else { Duration::ZERO }
6088}
6089
6090fn get_max_ack_delay(params: &TransportParameters) -> Duration {
6091    Duration::from_micros(params.max_ack_delay.0 * 1000)
6092}
6093
6094// Prevents overflow and improves behavior in extreme circumstances
6095const MAX_BACKOFF_EXPONENT: u32 = 16;
6096
6097/// Minimal remaining size to allow packet coalescing, excluding cryptographic tag
6098///
6099/// This must be at least as large as the header for a well-formed empty packet to be coalesced,
6100/// plus some space for frames. We only care about handshake headers because short header packets
6101/// necessarily have smaller headers, and initial packets are only ever the first packet in a
6102/// datagram (because we coalesce in ascending packet space order and the only reason to split a
6103/// packet is when packet space changes).
6104const MIN_PACKET_SPACE: usize = MAX_HANDSHAKE_OR_0RTT_HEADER_SIZE + 32;
6105
6106/// Largest amount of space that could be occupied by a Handshake or 0-RTT packet's header
6107///
6108/// Excludes packet-type-specific fields such as packet number or Initial token
6109// https://www.rfc-editor.org/rfc/rfc9000.html#name-0-rtt: flags + version + dcid len + dcid +
6110// scid len + scid + length + pn
6111const MAX_HANDSHAKE_OR_0RTT_HEADER_SIZE: usize =
6112    1 + 4 + 1 + MAX_CID_SIZE + 1 + MAX_CID_SIZE + VarInt::from_u32(u16::MAX as u32).size() + 4;
6113
6114/// Perform key updates this many packets before the AEAD confidentiality limit.
6115///
6116/// Chosen arbitrarily, intended to be large enough to prevent spurious connection loss.
6117const KEY_UPDATE_MARGIN: u64 = 10_000;
6118
6119#[derive(Default)]
6120struct SentFrames {
6121    retransmits: ThinRetransmits,
6122    largest_acked: Option<u64>,
6123    stream_frames: StreamMetaVec,
6124    /// Whether the packet contains non-retransmittable frames (like datagrams)
6125    non_retransmits: bool,
6126    requires_padding: bool,
6127}
6128
6129impl SentFrames {
6130    /// Returns whether the packet contains only ACKs
6131    fn is_ack_only(&self, streams: &StreamsState) -> bool {
6132        self.largest_acked.is_some()
6133            && !self.non_retransmits
6134            && self.stream_frames.is_empty()
6135            && self.retransmits.is_empty(streams)
6136    }
6137}
6138
6139/// Compute the negotiated idle timeout based on local and remote max_idle_timeout transport parameters.
6140///
6141/// According to the definition of max_idle_timeout, a value of `0` means the timeout is disabled; see <https://www.rfc-editor.org/rfc/rfc9000#section-18.2-4.4.1.>
6142///
6143/// According to the negotiation procedure, either the minimum of the timeouts or one specified is used as the negotiated value; see <https://www.rfc-editor.org/rfc/rfc9000#section-10.1-2.>
6144///
6145/// Returns the negotiated idle timeout as a `Duration`, or `None` when both endpoints have opted out of idle timeout.
6146fn negotiate_max_idle_timeout(x: Option<VarInt>, y: Option<VarInt>) -> Option<Duration> {
6147    match (x, y) {
6148        (Some(VarInt(0)) | None, Some(VarInt(0)) | None) => None,
6149        (Some(VarInt(0)) | None, Some(y)) => Some(Duration::from_millis(y.0)),
6150        (Some(x), Some(VarInt(0)) | None) => Some(Duration::from_millis(x.0)),
6151        (Some(x), Some(y)) => Some(Duration::from_millis(cmp::min(x, y).0)),
6152    }
6153}
6154
6155/// State for tracking PQC support in the connection
6156#[derive(Debug, Clone)]
6157pub(crate) struct PqcState {
6158    /// Whether the peer supports PQC algorithms
6159    enabled: bool,
6160    /// Supported PQC algorithms advertised by peer
6161    #[allow(dead_code)]
6162    algorithms: Option<crate::transport_parameters::PqcAlgorithms>,
6163    /// Target MTU for PQC handshakes
6164    handshake_mtu: u16,
6165    /// Whether we're currently using PQC algorithms
6166    using_pqc: bool,
6167    /// PQC packet handler for managing larger handshakes
6168    packet_handler: crate::crypto::pqc::packet_handler::PqcPacketHandler,
6169}
6170
6171#[allow(dead_code)]
6172impl PqcState {
6173    fn new() -> Self {
6174        Self {
6175            enabled: false,
6176            algorithms: None,
6177            handshake_mtu: MIN_INITIAL_SIZE,
6178            using_pqc: false,
6179            packet_handler: crate::crypto::pqc::packet_handler::PqcPacketHandler::new(),
6180        }
6181    }
6182
6183    /// Get the minimum initial packet size based on PQC state
6184    fn min_initial_size(&self) -> u16 {
6185        if self.enabled && self.using_pqc {
6186            // Use larger initial packet size for PQC handshakes
6187            std::cmp::max(self.handshake_mtu, 4096)
6188        } else {
6189            MIN_INITIAL_SIZE
6190        }
6191    }
6192
6193    /// Update PQC state based on peer's transport parameters
6194    fn update_from_peer_params(&mut self, params: &TransportParameters) {
6195        if let Some(ref algorithms) = params.pqc_algorithms {
6196            self.enabled = true;
6197            self.algorithms = Some(algorithms.clone());
6198            // v0.2: Pure PQC - if any algorithm is supported, prepare for larger packets
6199            if algorithms.ml_kem_768 || algorithms.ml_dsa_65 {
6200                self.using_pqc = true;
6201                self.handshake_mtu = 4096; // Default PQC handshake MTU
6202            }
6203        }
6204    }
6205
6206    /// Detect PQC from CRYPTO frame data
6207    fn detect_pqc_from_crypto(&mut self, crypto_data: &[u8], space: SpaceId) {
6208        if !self.enabled {
6209            return;
6210        }
6211        if self.packet_handler.detect_pqc_handshake(crypto_data, space) {
6212            self.using_pqc = true;
6213            // Update handshake MTU based on PQC detection
6214            self.handshake_mtu = self.packet_handler.get_min_packet_size(space);
6215        }
6216    }
6217
6218    /// Check if MTU discovery should be triggered for PQC
6219    fn should_trigger_mtu_discovery(&mut self) -> bool {
6220        self.packet_handler.should_trigger_mtu_discovery()
6221    }
6222
6223    /// Get PQC-aware MTU configuration
6224    fn get_mtu_config(&self) -> MtuDiscoveryConfig {
6225        self.packet_handler.get_pqc_mtu_config()
6226    }
6227
6228    /// Calculate optimal CRYPTO frame size
6229    fn calculate_crypto_frame_size(&self, available_space: usize, remaining_data: usize) -> usize {
6230        self.packet_handler
6231            .calculate_crypto_frame_size(available_space, remaining_data)
6232    }
6233
6234    /// Check if packet coalescing should be adjusted
6235    fn should_adjust_coalescing(&self, current_size: usize, space: SpaceId) -> bool {
6236        self.packet_handler
6237            .adjust_coalescing_for_pqc(current_size, space)
6238    }
6239
6240    /// Handle packet sent event
6241    fn on_packet_sent(&mut self, space: SpaceId, size: u16) {
6242        self.packet_handler.on_packet_sent(space, size);
6243    }
6244
6245    /// Reset PQC state (e.g., on retry)
6246    fn reset(&mut self) {
6247        self.enabled = false;
6248        self.algorithms = None;
6249        self.handshake_mtu = MIN_INITIAL_SIZE;
6250        self.using_pqc = false;
6251        self.packet_handler.reset();
6252    }
6253}
6254
6255impl Default for PqcState {
6256    fn default() -> Self {
6257        Self::new()
6258    }
6259}
6260
6261/// State for tracking address discovery via OBSERVED_ADDRESS frames
6262#[derive(Debug, Clone)]
6263pub(crate) struct AddressDiscoveryState {
6264    /// Whether address discovery is enabled for this connection
6265    enabled: bool,
6266    /// Maximum rate of OBSERVED_ADDRESS frames per path (per second)
6267    max_observation_rate: u8,
6268    /// Whether to observe addresses for all paths or just primary
6269    observe_all_paths: bool,
6270    /// Per-path local observations (what we saw the peer at, for sending)
6271    sent_observations: std::collections::HashMap<u64, paths::PathAddressInfo>,
6272    /// Per-path remote observations (what the peer saw us at, for our info)
6273    received_observations: std::collections::HashMap<u64, paths::PathAddressInfo>,
6274    /// Rate limiter for sending observations
6275    rate_limiter: AddressObservationRateLimiter,
6276    /// Historical record of observations received
6277    received_history: Vec<ObservedAddressEvent>,
6278    /// Whether this connection is in bootstrap mode (aggressive observation)
6279    bootstrap_mode: bool,
6280    /// Next sequence number for OBSERVED_ADDRESS frames
6281    next_sequence_number: VarInt,
6282    /// Map of path_id to last received sequence number
6283    last_received_sequence: std::collections::HashMap<u64, VarInt>,
6284    /// Total number of observations sent
6285    frames_sent: u64,
6286}
6287
6288/// Event for when we receive an OBSERVED_ADDRESS frame
6289#[derive(Debug, Clone, PartialEq, Eq)]
6290struct ObservedAddressEvent {
6291    /// The address the peer observed
6292    address: SocketAddr,
6293    /// When we received this observation
6294    received_at: Instant,
6295    /// Which path this was received on
6296    path_id: u64,
6297}
6298
6299/// Rate limiter for address observations
6300#[derive(Debug, Clone)]
6301struct AddressObservationRateLimiter {
6302    /// Tokens available for sending observations
6303    tokens: f64,
6304    /// Maximum tokens (burst capacity)
6305    max_tokens: f64,
6306    /// Rate of token replenishment (tokens per second)
6307    rate: f64,
6308    /// Last time tokens were updated
6309    last_update: Instant,
6310}
6311
6312#[allow(dead_code)]
6313impl AddressDiscoveryState {
6314    /// Create a new address discovery state
6315    fn new(config: &crate::transport_parameters::AddressDiscoveryConfig, now: Instant) -> Self {
6316        use crate::transport_parameters::AddressDiscoveryConfig::*;
6317
6318        // Set defaults based on the config variant
6319        let (enabled, _can_send, _can_receive) = match config {
6320            SendOnly => (true, true, false),
6321            ReceiveOnly => (true, false, true),
6322            SendAndReceive => (true, true, true),
6323        };
6324
6325        // For now, use fixed defaults for rate limiting
6326        // TODO: These could be made configurable via a separate mechanism
6327        let max_observation_rate = 10u8; // Default rate
6328        let observe_all_paths = false; // Default to primary path only
6329
6330        Self {
6331            enabled,
6332            max_observation_rate,
6333            observe_all_paths,
6334            sent_observations: std::collections::HashMap::new(),
6335            received_observations: std::collections::HashMap::new(),
6336            rate_limiter: AddressObservationRateLimiter::new(max_observation_rate, now),
6337            received_history: Vec::new(),
6338            bootstrap_mode: false,
6339            next_sequence_number: VarInt::from_u32(0),
6340            last_received_sequence: std::collections::HashMap::new(),
6341            frames_sent: 0,
6342        }
6343    }
6344
6345    /// Check if we should send an observation for the given path
6346    fn should_send_observation(&mut self, path_id: u64, now: Instant) -> bool {
6347        // Use the new should_observe_path method which considers bootstrap mode
6348        if !self.should_observe_path(path_id) {
6349            return false;
6350        }
6351
6352        // Check if this is a new path or if the address has changed
6353        let needs_observation = match self.sent_observations.get(&path_id) {
6354            Some(info) => info.observed_address.is_none() || !info.notified,
6355            None => true,
6356        };
6357
6358        if !needs_observation {
6359            return false;
6360        }
6361
6362        // Check rate limit
6363        self.rate_limiter.try_consume(1.0, now)
6364    }
6365
6366    /// Record that we sent an observation for a path
6367    fn record_observation_sent(&mut self, path_id: u64) {
6368        if let Some(info) = self.sent_observations.get_mut(&path_id) {
6369            info.mark_notified();
6370        }
6371    }
6372
6373    /// Handle receiving an OBSERVED_ADDRESS frame
6374    fn handle_observed_address(&mut self, address: SocketAddr, path_id: u64, now: Instant) {
6375        if !self.enabled {
6376            return;
6377        }
6378
6379        self.received_history.push(ObservedAddressEvent {
6380            address,
6381            received_at: now,
6382            path_id,
6383        });
6384
6385        // Update or create path info for received observations
6386        let info = self
6387            .received_observations
6388            .entry(path_id)
6389            .or_insert_with(paths::PathAddressInfo::new);
6390        info.update_observed_address(address, now);
6391    }
6392
6393    /// Get the most recently observed address for a path
6394    pub(crate) fn get_observed_address(&self, path_id: u64) -> Option<SocketAddr> {
6395        self.received_observations
6396            .get(&path_id)
6397            .and_then(|info| info.observed_address)
6398    }
6399
6400    /// Get all observed addresses across all paths
6401    pub(crate) fn get_all_received_history(&self) -> Vec<SocketAddr> {
6402        self.received_observations
6403            .values()
6404            .filter_map(|info| info.observed_address)
6405            .collect()
6406    }
6407
6408    /// Get statistics for address discovery
6409    pub(crate) fn stats(&self) -> AddressDiscoveryStats {
6410        AddressDiscoveryStats {
6411            frames_sent: self.frames_sent,
6412            frames_received: self.received_history.len() as u64,
6413            addresses_discovered: self
6414                .received_observations
6415                .values()
6416                .filter(|info| info.observed_address.is_some())
6417                .count() as u64,
6418            address_changes_detected: 0, // TODO: Track address changes properly
6419        }
6420    }
6421
6422    /// Check if we have any unnotified address changes
6423    ///
6424    /// This checks both:
6425    /// - `sent_observations`: addresses we've observed about peers that need to be sent
6426    /// - `received_observations`: addresses peers observed about us that need app notification
6427    fn has_unnotified_changes(&self) -> bool {
6428        // Check if we have observations to send to peers
6429        let has_unsent = self
6430            .sent_observations
6431            .values()
6432            .any(|info| info.observed_address.is_some() && !info.notified);
6433
6434        // Check if we have received observations to notify the app about
6435        let has_unreceived = self
6436            .received_observations
6437            .values()
6438            .any(|info| info.observed_address.is_some() && !info.notified);
6439
6440        has_unsent || has_unreceived
6441    }
6442
6443    /// Queue an OBSERVED_ADDRESS frame for sending if conditions are met
6444    fn queue_observed_address_frame(
6445        &mut self,
6446        path_id: u64,
6447        address: SocketAddr,
6448    ) -> Option<frame::ObservedAddress> {
6449        // Check if address discovery is enabled
6450        if !self.enabled {
6451            return None;
6452        }
6453
6454        // Check path restrictions
6455        if !self.observe_all_paths && path_id != 0 {
6456            return None;
6457        }
6458
6459        // Check if this path has already been notified
6460        if let Some(info) = self.sent_observations.get(&path_id) {
6461            if info.notified {
6462                return None;
6463            }
6464        }
6465
6466        // Check rate limiting
6467        if self.rate_limiter.tokens < 1.0 {
6468            return None;
6469        }
6470
6471        // Consume a token and update path info
6472        self.rate_limiter.tokens -= 1.0;
6473
6474        // Update or create path info
6475        let info = self
6476            .sent_observations
6477            .entry(path_id)
6478            .or_insert_with(paths::PathAddressInfo::new);
6479        info.observed_address = Some(address);
6480        info.notified = true;
6481
6482        println!(
6483            "DEBUG: queue_observed_address_frame: ACTUALLY QUEUING frame for path {} with address {}",
6484            path_id, address
6485        );
6486
6487        // Create and return the frame with sequence number
6488        let sequence_number = self.next_sequence_number;
6489        self.next_sequence_number = VarInt::from_u64(self.next_sequence_number.into_inner() + 1)
6490            .expect("sequence number overflow");
6491
6492        Some(frame::ObservedAddress {
6493            sequence_number,
6494            address,
6495        })
6496    }
6497
6498    /// Check for address observations that need to be sent
6499    fn check_for_address_observations(
6500        &mut self,
6501        _current_path: u64,
6502        peer_supports_address_discovery: bool,
6503        now: Instant,
6504    ) -> Vec<frame::ObservedAddress> {
6505        let mut frames = Vec::new();
6506
6507        // Check if we should send observations
6508        if !self.enabled || !peer_supports_address_discovery {
6509            return frames;
6510        }
6511
6512        // Update rate limiter tokens
6513        self.rate_limiter.update_tokens(now);
6514
6515        // Collect all paths that need observation frames
6516        let paths_to_notify: Vec<u64> = self
6517            .sent_observations
6518            .iter()
6519            .filter_map(|(&path_id, info)| {
6520                if info.observed_address.is_some() && !info.notified {
6521                    Some(path_id)
6522                } else {
6523                    None
6524                }
6525            })
6526            .collect();
6527
6528        // Send frames for each path that needs notification
6529        for path_id in paths_to_notify {
6530            // Check path restrictions (considers bootstrap mode)
6531            if !self.should_observe_path(path_id) {
6532                continue;
6533            }
6534
6535            // Check rate limiting (bootstrap nodes get more lenient limits)
6536            if !self.bootstrap_mode && self.rate_limiter.tokens < 1.0 {
6537                break; // No more tokens available for non-bootstrap nodes
6538            }
6539
6540            // Get the address
6541            if let Some(info) = self.sent_observations.get_mut(&path_id) {
6542                if let Some(address) = info.observed_address {
6543                    // Consume a token (bootstrap nodes consume at reduced rate)
6544                    if self.bootstrap_mode {
6545                        self.rate_limiter.tokens -= 0.2; // Bootstrap nodes consume 1/5th token
6546                    } else {
6547                        self.rate_limiter.tokens -= 1.0;
6548                    }
6549
6550                    // Mark as notified
6551                    info.notified = true;
6552
6553                    // Create frame with sequence number
6554                    let sequence_number = self.next_sequence_number;
6555                    self.next_sequence_number =
6556                        VarInt::from_u64(self.next_sequence_number.into_inner() + 1)
6557                            .expect("sequence number overflow");
6558
6559                    self.frames_sent += 1;
6560
6561                    frames.push(frame::ObservedAddress {
6562                        sequence_number,
6563                        address,
6564                    });
6565                }
6566            }
6567        }
6568
6569        frames
6570    }
6571
6572    /// Update the rate limit configuration
6573    fn update_rate_limit(&mut self, new_rate: f64) {
6574        self.max_observation_rate = new_rate as u8;
6575        self.rate_limiter.set_rate(new_rate as u8);
6576    }
6577
6578    /// Create from transport parameters
6579    fn from_transport_params(params: &TransportParameters) -> Option<Self> {
6580        params
6581            .address_discovery
6582            .as_ref()
6583            .map(|config| Self::new(config, Instant::now()))
6584    }
6585
6586    /// Alternative constructor for tests - creates with simplified parameters
6587    #[cfg(test)]
6588    fn new_with_params(enabled: bool, max_rate: f64, observe_all_paths: bool) -> Self {
6589        // For tests, use SendAndReceive if enabled, otherwise create a disabled state
6590        if !enabled {
6591            // Create disabled state manually since we don't have a "disabled" variant
6592            return Self {
6593                enabled: false,
6594                max_observation_rate: max_rate as u8,
6595                observe_all_paths,
6596                sent_observations: std::collections::HashMap::new(),
6597                received_observations: std::collections::HashMap::new(),
6598                rate_limiter: AddressObservationRateLimiter::new(max_rate as u8, Instant::now()),
6599                received_history: Vec::new(),
6600                bootstrap_mode: false,
6601                next_sequence_number: VarInt::from_u32(0),
6602                last_received_sequence: std::collections::HashMap::new(),
6603                frames_sent: 0,
6604            };
6605        }
6606
6607        // Create using the config, then override specific fields for test purposes
6608        let config = crate::transport_parameters::AddressDiscoveryConfig::SendAndReceive;
6609        let mut state = Self::new(&config, Instant::now());
6610        state.max_observation_rate = max_rate as u8;
6611        state.observe_all_paths = observe_all_paths;
6612        state.rate_limiter = AddressObservationRateLimiter::new(max_rate as u8, Instant::now());
6613        state
6614    }
6615
6616    /// Enable or disable bootstrap mode (aggressive observation)
6617    fn set_bootstrap_mode(&mut self, enabled: bool) {
6618        self.bootstrap_mode = enabled;
6619        // If enabling bootstrap mode, update rate limiter to allow higher rates
6620        if enabled {
6621            let bootstrap_rate = self.get_effective_rate_limit();
6622            self.rate_limiter.rate = bootstrap_rate;
6623            self.rate_limiter.max_tokens = bootstrap_rate * 2.0; // Allow burst of 2 seconds
6624            // Also fill tokens to max for immediate use
6625            self.rate_limiter.tokens = self.rate_limiter.max_tokens;
6626        }
6627    }
6628
6629    /// Check if bootstrap mode is enabled
6630    fn is_bootstrap_mode(&self) -> bool {
6631        self.bootstrap_mode
6632    }
6633
6634    /// Get the effective rate limit (considering bootstrap mode)
6635    fn get_effective_rate_limit(&self) -> f64 {
6636        if self.bootstrap_mode {
6637            // Bootstrap nodes get 5x the configured rate
6638            (self.max_observation_rate as f64) * 5.0
6639        } else {
6640            self.max_observation_rate as f64
6641        }
6642    }
6643
6644    /// Check if we should observe this path (considering bootstrap mode)
6645    fn should_observe_path(&self, path_id: u64) -> bool {
6646        if !self.enabled {
6647            return false;
6648        }
6649
6650        // Bootstrap nodes observe all paths regardless of configuration
6651        if self.bootstrap_mode {
6652            return true;
6653        }
6654
6655        // Normal mode respects the configuration
6656        self.observe_all_paths || path_id == 0
6657    }
6658
6659    /// Check if we should send observation immediately (for bootstrap nodes)
6660    fn should_send_observation_immediately(&self, is_new_connection: bool) -> bool {
6661        self.bootstrap_mode && is_new_connection
6662    }
6663}
6664
6665#[allow(dead_code)]
6666impl AddressObservationRateLimiter {
6667    /// Create a new rate limiter
6668    fn new(rate: u8, now: Instant) -> Self {
6669        let rate_f64 = rate as f64;
6670        Self {
6671            tokens: rate_f64,
6672            max_tokens: rate_f64,
6673            rate: rate_f64,
6674            last_update: now,
6675        }
6676    }
6677
6678    /// Try to consume tokens, returns true if successful
6679    fn try_consume(&mut self, tokens: f64, now: Instant) -> bool {
6680        self.update_tokens(now);
6681
6682        if self.tokens >= tokens {
6683            self.tokens -= tokens;
6684            true
6685        } else {
6686            false
6687        }
6688    }
6689
6690    /// Update available tokens based on elapsed time
6691    fn update_tokens(&mut self, now: Instant) {
6692        let elapsed = now.saturating_duration_since(self.last_update);
6693        let new_tokens = elapsed.as_secs_f64() * self.rate;
6694        self.tokens = (self.tokens + new_tokens).min(self.max_tokens);
6695        self.last_update = now;
6696    }
6697
6698    /// Update the rate
6699    fn set_rate(&mut self, rate: u8) {
6700        let rate_f64 = rate as f64;
6701        self.rate = rate_f64;
6702        self.max_tokens = rate_f64;
6703        // Don't change current tokens, just cap at new max
6704        if self.tokens > self.max_tokens {
6705            self.tokens = self.max_tokens;
6706        }
6707    }
6708}
6709
6710#[cfg(test)]
6711mod tests {
6712    use super::*;
6713    use crate::transport_parameters::AddressDiscoveryConfig;
6714    use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr};
6715
6716    #[test]
6717    fn address_discovery_state_new() {
6718        let config = crate::transport_parameters::AddressDiscoveryConfig::SendAndReceive;
6719        let now = Instant::now();
6720        let state = AddressDiscoveryState::new(&config, now);
6721
6722        assert!(state.enabled);
6723        assert_eq!(state.max_observation_rate, 10);
6724        assert!(!state.observe_all_paths);
6725        assert!(state.sent_observations.is_empty());
6726        assert!(state.received_observations.is_empty());
6727        assert!(state.received_history.is_empty());
6728        assert_eq!(state.rate_limiter.tokens, 10.0);
6729    }
6730
6731    #[test]
6732    fn address_discovery_state_disabled() {
6733        let config = crate::transport_parameters::AddressDiscoveryConfig::SendAndReceive;
6734        let now = Instant::now();
6735        let mut state = AddressDiscoveryState::new(&config, now);
6736
6737        // Disable the state
6738        state.enabled = false;
6739
6740        // Should not send observations when disabled
6741        assert!(!state.should_send_observation(0, now));
6742    }
6743
6744    #[test]
6745    fn address_discovery_state_should_send_observation() {
6746        let config = crate::transport_parameters::AddressDiscoveryConfig::SendAndReceive;
6747        let now = Instant::now();
6748        let mut state = AddressDiscoveryState::new(&config, now);
6749
6750        // Should send for new path
6751        assert!(state.should_send_observation(0, now));
6752
6753        // Add path info
6754        let mut path_info = paths::PathAddressInfo::new();
6755        path_info.update_observed_address(
6756            SocketAddr::new(IpAddr::V4(Ipv4Addr::new(192, 168, 1, 1)), 8080),
6757            now,
6758        );
6759        path_info.mark_notified();
6760        state.sent_observations.insert(0, path_info);
6761
6762        // Should not send if already notified
6763        assert!(!state.should_send_observation(0, now));
6764
6765        // Path 1 is not observed by default (only path 0 is)
6766        assert!(!state.should_send_observation(1, now));
6767    }
6768
6769    #[test]
6770    fn address_discovery_state_rate_limiting() {
6771        let config = crate::transport_parameters::AddressDiscoveryConfig::SendAndReceive;
6772        let now = Instant::now();
6773        let mut state = AddressDiscoveryState::new(&config, now);
6774
6775        // Configure to observe all paths for this test
6776        state.observe_all_paths = true;
6777
6778        // Should allow first observation on path 0
6779        assert!(state.should_send_observation(0, now));
6780
6781        // Consume some tokens to test rate limiting
6782        state.rate_limiter.try_consume(9.0, now); // Consume 9 tokens (leaving ~1)
6783
6784        // Next observation should be rate limited
6785        assert!(!state.should_send_observation(0, now));
6786
6787        // After 1 second, should have replenished tokens (10 per second)
6788        let later = now + Duration::from_secs(1);
6789        state.rate_limiter.update_tokens(later);
6790        assert!(state.should_send_observation(0, later));
6791    }
6792
6793    #[test]
6794    fn address_discovery_state_handle_observed_address() {
6795        let config = crate::transport_parameters::AddressDiscoveryConfig::SendAndReceive;
6796        let now = Instant::now();
6797        let mut state = AddressDiscoveryState::new(&config, now);
6798
6799        let addr1 = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(10, 0, 0, 1)), 443);
6800        let addr2 = SocketAddr::new(
6801            IpAddr::V6(Ipv6Addr::new(0x2001, 0xdb8, 0, 0, 0, 0, 0, 1)),
6802            8080,
6803        );
6804
6805        // Handle first observation
6806        state.handle_observed_address(addr1, 0, now);
6807        assert_eq!(state.received_history.len(), 1);
6808        assert_eq!(state.received_history[0].address, addr1);
6809        assert_eq!(state.received_history[0].path_id, 0);
6810
6811        // Handle second observation
6812        let later = now + Duration::from_millis(100);
6813        state.handle_observed_address(addr2, 1, later);
6814        assert_eq!(state.received_history.len(), 2);
6815        assert_eq!(state.received_history[1].address, addr2);
6816        assert_eq!(state.received_history[1].path_id, 1);
6817    }
6818
6819    #[test]
6820    fn address_discovery_state_get_observed_address() {
6821        let config = crate::transport_parameters::AddressDiscoveryConfig::SendAndReceive;
6822        let now = Instant::now();
6823        let mut state = AddressDiscoveryState::new(&config, now);
6824
6825        // No address initially
6826        assert_eq!(state.get_observed_address(0), None);
6827
6828        // Add path info
6829        let mut path_info = paths::PathAddressInfo::new();
6830        let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 80);
6831        path_info.update_observed_address(addr, now);
6832        state.received_observations.insert(0, path_info);
6833
6834        // Should return the address
6835        assert_eq!(state.get_observed_address(0), Some(addr));
6836        assert_eq!(state.get_observed_address(1), None);
6837    }
6838
6839    #[test]
6840    fn address_discovery_state_unnotified_changes() {
6841        let config = crate::transport_parameters::AddressDiscoveryConfig::SendAndReceive;
6842        let now = Instant::now();
6843        let mut state = AddressDiscoveryState::new(&config, now);
6844
6845        // No changes initially
6846        assert!(!state.has_unnotified_changes());
6847
6848        // Add unnotified path
6849        let mut path_info = paths::PathAddressInfo::new();
6850        path_info.update_observed_address(
6851            SocketAddr::new(IpAddr::V4(Ipv4Addr::new(192, 168, 1, 1)), 8080),
6852            now,
6853        );
6854        state.sent_observations.insert(0, path_info);
6855
6856        // Should have unnotified changes
6857        assert!(state.has_unnotified_changes());
6858
6859        // Mark as notified
6860        state.record_observation_sent(0);
6861        assert!(!state.has_unnotified_changes());
6862    }
6863
6864    #[test]
6865    fn address_observation_rate_limiter_token_bucket() {
6866        let now = Instant::now();
6867        let mut limiter = AddressObservationRateLimiter::new(5, now); // 5 tokens/sec
6868
6869        // Initial state
6870        assert_eq!(limiter.tokens, 5.0);
6871        assert_eq!(limiter.max_tokens, 5.0);
6872        assert_eq!(limiter.rate, 5.0);
6873
6874        // Consume 3 tokens
6875        assert!(limiter.try_consume(3.0, now));
6876        assert_eq!(limiter.tokens, 2.0);
6877
6878        // Try to consume more than available
6879        assert!(!limiter.try_consume(3.0, now));
6880        assert_eq!(limiter.tokens, 2.0);
6881
6882        // After 1 second, should have 5 more tokens (capped at max)
6883        let later = now + Duration::from_secs(1);
6884        limiter.update_tokens(later);
6885        assert_eq!(limiter.tokens, 5.0); // 2 + 5 = 7, but capped at 5
6886
6887        // After 0.5 seconds from original, should have 2.5 more tokens
6888        let half_sec = now + Duration::from_millis(500);
6889        let mut limiter2 = AddressObservationRateLimiter::new(5, now);
6890        limiter2.try_consume(3.0, now);
6891        limiter2.update_tokens(half_sec);
6892        assert_eq!(limiter2.tokens, 4.5); // 2 + 2.5
6893    }
6894
6895    // Tests for address_discovery_state field in Connection
6896    #[test]
6897    fn connection_initializes_address_discovery_state_default() {
6898        // Test that Connection initializes with default address discovery state
6899        // For now, just test that AddressDiscoveryState can be created with default config
6900        let config = crate::transport_parameters::AddressDiscoveryConfig::default();
6901        let state = AddressDiscoveryState::new(&config, Instant::now());
6902        assert!(state.enabled); // Default is now enabled
6903        assert_eq!(state.max_observation_rate, 10); // Default is 10
6904        assert!(!state.observe_all_paths);
6905    }
6906
6907    #[test]
6908    fn connection_initializes_with_address_discovery_enabled() {
6909        // Test that AddressDiscoveryState can be created with enabled config
6910        let config = crate::transport_parameters::AddressDiscoveryConfig::SendAndReceive;
6911        let state = AddressDiscoveryState::new(&config, Instant::now());
6912        assert!(state.enabled);
6913        assert_eq!(state.max_observation_rate, 10);
6914        assert!(!state.observe_all_paths);
6915    }
6916
6917    #[test]
6918    fn connection_address_discovery_enabled_by_default() {
6919        // Test that AddressDiscoveryState is enabled with default config
6920        let config = crate::transport_parameters::AddressDiscoveryConfig::default();
6921        let state = AddressDiscoveryState::new(&config, Instant::now());
6922        assert!(state.enabled); // Default is now enabled
6923    }
6924
6925    #[test]
6926    fn negotiate_max_idle_timeout_commutative() {
6927        let test_params = [
6928            (None, None, None),
6929            (None, Some(VarInt(0)), None),
6930            (None, Some(VarInt(2)), Some(Duration::from_millis(2))),
6931            (Some(VarInt(0)), Some(VarInt(0)), None),
6932            (
6933                Some(VarInt(2)),
6934                Some(VarInt(0)),
6935                Some(Duration::from_millis(2)),
6936            ),
6937            (
6938                Some(VarInt(1)),
6939                Some(VarInt(4)),
6940                Some(Duration::from_millis(1)),
6941            ),
6942        ];
6943
6944        for (left, right, result) in test_params {
6945            assert_eq!(negotiate_max_idle_timeout(left, right), result);
6946            assert_eq!(negotiate_max_idle_timeout(right, left), result);
6947        }
6948    }
6949
6950    #[test]
6951    fn path_creation_initializes_address_discovery() {
6952        let config = TransportConfig::default();
6953        let remote = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(192, 168, 1, 1)), 8080);
6954        let now = Instant::now();
6955
6956        // Test initial path creation
6957        let path = paths::PathData::new(remote, false, None, now, &config);
6958
6959        // Should have address info initialized
6960        assert!(path.address_info.observed_address.is_none());
6961        assert!(path.address_info.last_observed.is_none());
6962        assert_eq!(path.address_info.observation_count, 0);
6963        assert!(!path.address_info.notified);
6964
6965        // Should have rate limiter initialized
6966        assert_eq!(path.observation_rate_limiter.rate, 10.0);
6967        assert_eq!(path.observation_rate_limiter.max_tokens, 10.0);
6968        assert_eq!(path.observation_rate_limiter.tokens, 10.0);
6969    }
6970
6971    #[test]
6972    fn path_migration_resets_address_discovery() {
6973        let config = TransportConfig::default();
6974        let remote1 = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(192, 168, 1, 1)), 8080);
6975        let remote2 = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(10, 0, 0, 1)), 443);
6976        let now = Instant::now();
6977
6978        // Create initial path with some address discovery state
6979        let mut path1 = paths::PathData::new(remote1, false, None, now, &config);
6980        path1.update_observed_address(remote1, now);
6981        path1.mark_address_notified();
6982        path1.consume_observation_token(now);
6983        path1.set_observation_rate(20);
6984
6985        // Migrate to new path
6986        let path2 = paths::PathData::from_previous(remote2, &path1, now);
6987
6988        // Address info should be reset
6989        assert!(path2.address_info.observed_address.is_none());
6990        assert!(path2.address_info.last_observed.is_none());
6991        assert_eq!(path2.address_info.observation_count, 0);
6992        assert!(!path2.address_info.notified);
6993
6994        // Rate limiter should have same rate but full tokens
6995        assert_eq!(path2.observation_rate_limiter.rate, 20.0);
6996        assert_eq!(path2.observation_rate_limiter.tokens, 20.0);
6997    }
6998
6999    #[test]
7000    fn connection_path_updates_observation_rate() {
7001        let config = TransportConfig::default();
7002        let remote = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 42);
7003        let now = Instant::now();
7004
7005        let mut path = paths::PathData::new(remote, false, None, now, &config);
7006
7007        // Initial rate should be default
7008        assert_eq!(path.observation_rate_limiter.rate, 10.0);
7009
7010        // Update rate based on negotiated config
7011        path.set_observation_rate(25);
7012        assert_eq!(path.observation_rate_limiter.rate, 25.0);
7013        assert_eq!(path.observation_rate_limiter.max_tokens, 25.0);
7014
7015        // Tokens should be capped at new max if needed
7016        path.observation_rate_limiter.tokens = 30.0; // Set higher than max
7017        path.set_observation_rate(20);
7018        assert_eq!(path.observation_rate_limiter.tokens, 20.0); // Capped at new max
7019    }
7020
7021    #[test]
7022    fn path_validation_preserves_discovery_state() {
7023        let config = TransportConfig::default();
7024        let remote = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(192, 168, 1, 1)), 8080);
7025        let now = Instant::now();
7026
7027        let mut path = paths::PathData::new(remote, false, None, now, &config);
7028
7029        // Set up some discovery state
7030        let observed = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(1, 2, 3, 4)), 5678);
7031        path.update_observed_address(observed, now);
7032        path.set_observation_rate(15);
7033
7034        // Simulate path validation
7035        path.validated = true;
7036
7037        // Discovery state should be preserved
7038        assert_eq!(path.address_info.observed_address, Some(observed));
7039        assert_eq!(path.observation_rate_limiter.rate, 15.0);
7040    }
7041
7042    #[test]
7043    fn address_discovery_state_initialization() {
7044        // Use the test constructor that allows setting specific values
7045        let state = AddressDiscoveryState::new_with_params(true, 30.0, true);
7046
7047        assert!(state.enabled);
7048        assert_eq!(state.max_observation_rate, 30);
7049        assert!(state.observe_all_paths);
7050        assert!(state.sent_observations.is_empty());
7051        assert!(state.received_observations.is_empty());
7052        assert!(state.received_history.is_empty());
7053    }
7054
7055    // Tests for Task 2.3: Frame Processing Pipeline
7056    #[test]
7057    fn handle_observed_address_frame_basic() {
7058        let config = AddressDiscoveryConfig::SendAndReceive;
7059        let mut state = AddressDiscoveryState::new(&config, Instant::now());
7060        let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(192, 168, 1, 1)), 8080);
7061        let now = Instant::now();
7062        let path_id = 0;
7063
7064        // Handle an observed address frame
7065        state.handle_observed_address(addr, path_id, now);
7066
7067        // Should have recorded the observation
7068        assert_eq!(state.received_history.len(), 1);
7069        assert_eq!(state.received_history[0].address, addr);
7070        assert_eq!(state.received_history[0].path_id, path_id);
7071        assert_eq!(state.received_history[0].received_at, now);
7072
7073        // Should have updated path state
7074        assert!(state.received_observations.contains_key(&path_id));
7075        let path_info = &state.received_observations[&path_id];
7076        assert_eq!(path_info.observed_address, Some(addr));
7077        assert_eq!(path_info.last_observed, Some(now));
7078        assert_eq!(path_info.observation_count, 1);
7079    }
7080
7081    #[test]
7082    fn handle_observed_address_frame_multiple_observations() {
7083        let config = AddressDiscoveryConfig::SendAndReceive;
7084        let mut state = AddressDiscoveryState::new(&config, Instant::now());
7085        let addr1 = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(192, 168, 1, 1)), 8080);
7086        let addr2 = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(10, 0, 0, 1)), 443);
7087        let now = Instant::now();
7088        let path_id = 0;
7089
7090        // Handle multiple observations
7091        state.handle_observed_address(addr1, path_id, now);
7092        state.handle_observed_address(addr1, path_id, now + Duration::from_secs(1));
7093        state.handle_observed_address(addr2, path_id, now + Duration::from_secs(2));
7094
7095        // Should have all observations in the event list
7096        assert_eq!(state.received_history.len(), 3);
7097
7098        // Path info should reflect the latest observation
7099        let path_info = &state.received_observations[&path_id];
7100        assert_eq!(path_info.observed_address, Some(addr2));
7101        assert_eq!(path_info.observation_count, 1); // Reset for new address
7102    }
7103
7104    #[test]
7105    fn handle_observed_address_frame_disabled() {
7106        let config = AddressDiscoveryConfig::SendAndReceive;
7107        let mut state = AddressDiscoveryState::new(&config, Instant::now());
7108        state.enabled = false; // Disable after creation
7109        let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(192, 168, 1, 1)), 8080);
7110        let now = Instant::now();
7111
7112        // Should not handle when disabled
7113        state.handle_observed_address(addr, 0, now);
7114
7115        // Should not record anything
7116        assert!(state.received_history.is_empty());
7117        assert!(state.sent_observations.is_empty());
7118        assert!(state.received_observations.is_empty());
7119    }
7120
7121    #[test]
7122    fn should_send_observation_basic() {
7123        let config = AddressDiscoveryConfig::SendAndReceive;
7124        let mut state = AddressDiscoveryState::new(&config, Instant::now());
7125        state.max_observation_rate = 10;
7126        let now = Instant::now();
7127        let path_id = 0;
7128
7129        // Should be able to send initially
7130        assert!(state.should_send_observation(path_id, now));
7131
7132        // Record that we sent one
7133        state.record_observation_sent(path_id);
7134
7135        // Should still be able to send (have tokens)
7136        assert!(state.should_send_observation(path_id, now));
7137    }
7138
7139    #[test]
7140    fn should_send_observation_rate_limiting() {
7141        let config = AddressDiscoveryConfig::SendAndReceive;
7142        let now = Instant::now();
7143        let mut state = AddressDiscoveryState::new(&config, now);
7144        state.max_observation_rate = 2; // Very low rate
7145        state.update_rate_limit(2.0);
7146        let path_id = 0;
7147
7148        // Consume all tokens
7149        assert!(state.should_send_observation(path_id, now));
7150        state.record_observation_sent(path_id);
7151        assert!(state.should_send_observation(path_id, now));
7152        state.record_observation_sent(path_id);
7153
7154        // Should be rate limited now
7155        assert!(!state.should_send_observation(path_id, now));
7156
7157        // Wait for token replenishment
7158        let later = now + Duration::from_secs(1);
7159        assert!(state.should_send_observation(path_id, later));
7160    }
7161
7162    #[test]
7163    fn should_send_observation_disabled() {
7164        let config = AddressDiscoveryConfig::SendAndReceive;
7165        let mut state = AddressDiscoveryState::new(&config, Instant::now());
7166        state.enabled = false;
7167
7168        // Should never send when disabled
7169        assert!(!state.should_send_observation(0, Instant::now()));
7170    }
7171
7172    #[test]
7173    fn should_send_observation_per_path() {
7174        let config = AddressDiscoveryConfig::SendAndReceive;
7175        let now = Instant::now();
7176        let mut state = AddressDiscoveryState::new(&config, now);
7177        state.max_observation_rate = 2; // Allow 2 observations per second
7178        state.observe_all_paths = true;
7179        state.update_rate_limit(2.0);
7180
7181        // Path 0 uses a token from the shared rate limiter
7182        assert!(state.should_send_observation(0, now));
7183        state.record_observation_sent(0);
7184
7185        // Path 1 can still send because we have 2 tokens per second
7186        assert!(state.should_send_observation(1, now));
7187        state.record_observation_sent(1);
7188
7189        // Now both paths should be rate limited (no more tokens)
7190        assert!(!state.should_send_observation(0, now));
7191        assert!(!state.should_send_observation(1, now));
7192
7193        // After 1 second, we should have new tokens
7194        let later = now + Duration::from_secs(1);
7195        assert!(state.should_send_observation(0, later));
7196    }
7197
7198    #[test]
7199    fn has_unnotified_changes_test() {
7200        let config = AddressDiscoveryConfig::SendAndReceive;
7201        let mut state = AddressDiscoveryState::new(&config, Instant::now());
7202        let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(192, 168, 1, 1)), 8080);
7203        let now = Instant::now();
7204
7205        // Initially no changes
7206        assert!(!state.has_unnotified_changes());
7207
7208        // After receiving an observation
7209        state.handle_observed_address(addr, 0, now);
7210        assert!(state.has_unnotified_changes());
7211
7212        // After marking as notified
7213        state.received_observations.get_mut(&0).unwrap().notified = true;
7214        assert!(!state.has_unnotified_changes());
7215    }
7216
7217    #[test]
7218    fn get_observed_address_test() {
7219        let config = AddressDiscoveryConfig::SendAndReceive;
7220        let mut state = AddressDiscoveryState::new(&config, Instant::now());
7221        let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(192, 168, 1, 1)), 8080);
7222        let now = Instant::now();
7223        let path_id = 0;
7224
7225        // Initially no address
7226        assert_eq!(state.get_observed_address(path_id), None);
7227
7228        // After observation
7229        state.handle_observed_address(addr, path_id, now);
7230        assert_eq!(state.get_observed_address(path_id), Some(addr));
7231
7232        // Non-existent path
7233        assert_eq!(state.get_observed_address(999), None);
7234    }
7235
7236    // Tests for Task 2.4: Rate Limiting Implementation
7237    #[test]
7238    fn rate_limiter_token_bucket_basic() {
7239        let now = Instant::now();
7240        let mut limiter = AddressObservationRateLimiter::new(10, now); // 10 tokens per second
7241
7242        // Should be able to consume tokens up to the limit
7243        assert!(limiter.try_consume(5.0, now));
7244        assert!(limiter.try_consume(5.0, now));
7245
7246        // Should not be able to consume more tokens
7247        assert!(!limiter.try_consume(1.0, now));
7248    }
7249
7250    #[test]
7251    fn rate_limiter_token_replenishment() {
7252        let now = Instant::now();
7253        let mut limiter = AddressObservationRateLimiter::new(10, now); // 10 tokens per second
7254
7255        // Consume all tokens
7256        assert!(limiter.try_consume(10.0, now));
7257        assert!(!limiter.try_consume(0.1, now)); // Should be empty
7258
7259        // After 1 second, should have new tokens
7260        let later = now + Duration::from_secs(1);
7261        assert!(limiter.try_consume(10.0, later)); // Should work after replenishment
7262
7263        // After 0.5 seconds, should have 5 new tokens
7264        assert!(!limiter.try_consume(0.1, later)); // Empty again
7265        let later = later + Duration::from_millis(500);
7266        assert!(limiter.try_consume(5.0, later)); // Should have ~5 tokens
7267        assert!(!limiter.try_consume(0.1, later)); // But not more
7268    }
7269
7270    #[test]
7271    fn rate_limiter_max_tokens_cap() {
7272        let now = Instant::now();
7273        let mut limiter = AddressObservationRateLimiter::new(10, now);
7274
7275        // After 2 seconds, should still be capped at max_tokens
7276        let later = now + Duration::from_secs(2);
7277        // Try to consume more than max - should fail
7278        assert!(limiter.try_consume(10.0, later));
7279        assert!(!limiter.try_consume(10.1, later)); // Can't consume more than max even after time
7280
7281        // Consume some tokens
7282        let later2 = later + Duration::from_secs(1);
7283        assert!(limiter.try_consume(3.0, later2));
7284
7285        // After another 2 seconds, should be back at max
7286        let much_later = later2 + Duration::from_secs(2);
7287        assert!(limiter.try_consume(10.0, much_later)); // Can consume full amount
7288        assert!(!limiter.try_consume(0.1, much_later)); // But not more
7289    }
7290
7291    #[test]
7292    fn rate_limiter_fractional_consumption() {
7293        let now = Instant::now();
7294        let mut limiter = AddressObservationRateLimiter::new(10, now);
7295
7296        // Should handle fractional token consumption
7297        assert!(limiter.try_consume(0.5, now));
7298        assert!(limiter.try_consume(2.3, now));
7299        assert!(limiter.try_consume(7.2, now)); // Total: 10.0
7300        assert!(!limiter.try_consume(0.1, now)); // Should be empty
7301
7302        // Should handle fractional replenishment
7303        let later = now + Duration::from_millis(100); // 0.1 seconds = 1 token
7304        assert!(limiter.try_consume(1.0, later));
7305        assert!(!limiter.try_consume(0.1, later));
7306    }
7307
7308    #[test]
7309    fn rate_limiter_zero_rate() {
7310        let now = Instant::now();
7311        let mut limiter = AddressObservationRateLimiter::new(0, now); // 0 tokens per second
7312
7313        // Should never be able to consume tokens
7314        assert!(!limiter.try_consume(1.0, now));
7315        assert!(!limiter.try_consume(0.1, now));
7316        assert!(!limiter.try_consume(0.001, now));
7317
7318        // Even after time passes, no tokens
7319        let later = now + Duration::from_secs(10);
7320        assert!(!limiter.try_consume(0.001, later));
7321    }
7322
7323    #[test]
7324    fn rate_limiter_high_rate() {
7325        let now = Instant::now();
7326        let mut limiter = AddressObservationRateLimiter::new(63, now); // Max allowed rate
7327
7328        // Consume many tokens
7329        assert!(limiter.try_consume(60.0, now));
7330        assert!(limiter.try_consume(3.0, now));
7331        assert!(!limiter.try_consume(0.1, now)); // Should be empty
7332
7333        // After 1 second, should have replenished
7334        let later = now + Duration::from_secs(1);
7335        assert!(limiter.try_consume(63.0, later)); // Full amount available
7336        assert!(!limiter.try_consume(0.1, later)); // But not more
7337    }
7338
7339    #[test]
7340    fn rate_limiter_time_precision() {
7341        let now = Instant::now();
7342        let mut limiter = AddressObservationRateLimiter::new(100, now); // 100 tokens per second (max for u8)
7343
7344        // Consume all tokens
7345        assert!(limiter.try_consume(100.0, now));
7346        assert!(!limiter.try_consume(0.1, now));
7347
7348        // After 10 milliseconds, should have ~1 token
7349        let later = now + Duration::from_millis(10);
7350        assert!(limiter.try_consume(0.8, later)); // Should have ~1 token (allowing for precision)
7351        assert!(!limiter.try_consume(0.5, later)); // But not much more
7352
7353        // Reset for next test by waiting longer
7354        let much_later = later + Duration::from_millis(100); // 100ms = 10 tokens
7355        assert!(limiter.try_consume(5.0, much_later)); // Should have some tokens
7356
7357        // Consume remaining to have a clean state
7358        limiter.tokens = 0.0; // Force empty state
7359
7360        // After 1 millisecond from empty state
7361        let final_time = much_later + Duration::from_millis(1);
7362        // With 100 tokens/sec, 1 millisecond = 0.1 tokens
7363        limiter.update_tokens(final_time); // Update tokens manually
7364
7365        // Check we have approximately 0.1 tokens (allow for floating point error)
7366        assert!(limiter.tokens >= 0.09 && limiter.tokens <= 0.11);
7367    }
7368
7369    #[test]
7370    fn per_path_rate_limiting_independent() {
7371        let config = crate::transport_parameters::AddressDiscoveryConfig::SendAndReceive;
7372        let now = Instant::now();
7373        let mut state = AddressDiscoveryState::new(&config, now);
7374
7375        // Enable all paths observation
7376        state.observe_all_paths = true;
7377
7378        // Set a lower rate limit for this test (5 tokens)
7379        state.update_rate_limit(5.0);
7380
7381        // Set up path addresses so should_send_observation returns true
7382        state
7383            .sent_observations
7384            .insert(0, paths::PathAddressInfo::new());
7385        state
7386            .sent_observations
7387            .insert(1, paths::PathAddressInfo::new());
7388        state
7389            .sent_observations
7390            .insert(2, paths::PathAddressInfo::new());
7391
7392        // Set observed addresses so paths need observation
7393        state
7394            .sent_observations
7395            .get_mut(&0)
7396            .unwrap()
7397            .observed_address = Some(SocketAddr::new(
7398            IpAddr::V4(Ipv4Addr::new(192, 168, 1, 1)),
7399            8080,
7400        ));
7401        state
7402            .sent_observations
7403            .get_mut(&1)
7404            .unwrap()
7405            .observed_address = Some(SocketAddr::new(
7406            IpAddr::V4(Ipv4Addr::new(192, 168, 1, 2)),
7407            8081,
7408        ));
7409        state
7410            .sent_observations
7411            .get_mut(&2)
7412            .unwrap()
7413            .observed_address = Some(SocketAddr::new(
7414            IpAddr::V4(Ipv4Addr::new(192, 168, 1, 3)),
7415            8082,
7416        ));
7417
7418        // Path 0: consume 3 tokens
7419        for _ in 0..3 {
7420            assert!(state.should_send_observation(0, now));
7421            state.record_observation_sent(0);
7422            // Reset notified flag for next check
7423            state.sent_observations.get_mut(&0).unwrap().notified = false;
7424        }
7425
7426        // Path 1: consume 2 tokens
7427        for _ in 0..2 {
7428            assert!(state.should_send_observation(1, now));
7429            state.record_observation_sent(1);
7430            // Reset notified flag for next check
7431            state.sent_observations.get_mut(&1).unwrap().notified = false;
7432        }
7433
7434        // Global limit should be hit (5 total)
7435        assert!(!state.should_send_observation(2, now));
7436
7437        // After 1 second, should have 5 more tokens
7438        let later = now + Duration::from_secs(1);
7439
7440        // All paths should be able to send again
7441        assert!(state.should_send_observation(0, later));
7442        assert!(state.should_send_observation(1, later));
7443        assert!(state.should_send_observation(2, later));
7444    }
7445
7446    #[test]
7447    fn per_path_rate_limiting_with_path_specific_limits() {
7448        let now = Instant::now();
7449        let remote1 = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(192, 168, 1, 1)), 8080);
7450        let remote2 = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(192, 168, 1, 2)), 8081);
7451        let config = TransportConfig::default();
7452
7453        // Create paths with different rate limits
7454        let mut path1 = paths::PathData::new(remote1, false, None, now, &config);
7455        let mut path2 = paths::PathData::new(remote2, false, None, now, &config);
7456
7457        // Set different rate limits
7458        path1.observation_rate_limiter = paths::PathObservationRateLimiter::new(10, now); // 10/sec
7459        path2.observation_rate_limiter = paths::PathObservationRateLimiter::new(5, now); // 5/sec
7460
7461        // Path 1 should allow 10 observations
7462        for _ in 0..10 {
7463            assert!(path1.observation_rate_limiter.can_send(now));
7464            path1.observation_rate_limiter.consume_token(now);
7465        }
7466        assert!(!path1.observation_rate_limiter.can_send(now));
7467
7468        // Path 2 should allow 5 observations
7469        for _ in 0..5 {
7470            assert!(path2.observation_rate_limiter.can_send(now));
7471            path2.observation_rate_limiter.consume_token(now);
7472        }
7473        assert!(!path2.observation_rate_limiter.can_send(now));
7474    }
7475
7476    #[test]
7477    fn per_path_rate_limiting_address_change_detection() {
7478        let config = crate::transport_parameters::AddressDiscoveryConfig::SendAndReceive;
7479        let now = Instant::now();
7480        let mut state = AddressDiscoveryState::new(&config, now);
7481
7482        // Setup initial path with address
7483        let path_id = 0;
7484        let addr1 = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(192, 168, 1, 1)), 8080);
7485        let addr2 = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(192, 168, 1, 2)), 8080);
7486
7487        // First observation should be allowed
7488        assert!(state.should_send_observation(path_id, now));
7489
7490        // Queue the frame (this also marks it as notified in sent_observations)
7491        let frame = state.queue_observed_address_frame(path_id, addr1);
7492        assert!(frame.is_some());
7493
7494        // Same path, should not send again (already notified)
7495        assert!(!state.should_send_observation(path_id, now));
7496
7497        // Simulate address change detection by marking as not notified
7498        if let Some(info) = state.sent_observations.get_mut(&path_id) {
7499            info.notified = false;
7500            info.observed_address = Some(addr2);
7501        }
7502
7503        // Should now allow sending for the address change
7504        assert!(state.should_send_observation(path_id, now));
7505    }
7506
7507    #[test]
7508    fn per_path_rate_limiting_migration() {
7509        let now = Instant::now();
7510        let remote1 = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(192, 168, 1, 1)), 8080);
7511        let remote2 = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(192, 168, 1, 2)), 8081);
7512        let config = TransportConfig::default();
7513
7514        // Create initial path and consume tokens
7515        let mut path = paths::PathData::new(remote1, false, None, now, &config);
7516        path.observation_rate_limiter = paths::PathObservationRateLimiter::new(10, now);
7517
7518        // Consume some tokens
7519        for _ in 0..5 {
7520            assert!(path.observation_rate_limiter.can_send(now));
7521            path.observation_rate_limiter.consume_token(now);
7522        }
7523
7524        // Create new path (simulates connection migration)
7525        let mut new_path = paths::PathData::new(remote2, false, None, now, &config);
7526
7527        // New path should have fresh rate limiter (migration resets limits)
7528        // Since default observation rate is 0, set it manually
7529        new_path.observation_rate_limiter = paths::PathObservationRateLimiter::new(10, now);
7530
7531        // Should have full tokens available
7532        for _ in 0..10 {
7533            assert!(new_path.observation_rate_limiter.can_send(now));
7534            new_path.observation_rate_limiter.consume_token(now);
7535        }
7536        assert!(!new_path.observation_rate_limiter.can_send(now));
7537    }
7538
7539    #[test]
7540    fn per_path_rate_limiting_disabled_paths() {
7541        let config = crate::transport_parameters::AddressDiscoveryConfig::SendAndReceive;
7542        let now = Instant::now();
7543        let mut state = AddressDiscoveryState::new(&config, now);
7544
7545        // Primary path (id 0) should be allowed
7546        assert!(state.should_send_observation(0, now));
7547
7548        // Non-primary paths should not be allowed when observe_all_paths is false
7549        assert!(!state.should_send_observation(1, now));
7550        assert!(!state.should_send_observation(2, now));
7551
7552        // Even with rate limit available
7553        let later = now + Duration::from_secs(1);
7554        assert!(!state.should_send_observation(1, later));
7555    }
7556
7557    #[test]
7558    fn respecting_negotiated_max_observation_rate_basic() {
7559        let config = crate::transport_parameters::AddressDiscoveryConfig::SendAndReceive;
7560        let now = Instant::now();
7561        let mut state = AddressDiscoveryState::new(&config, now);
7562
7563        // Simulate negotiated rate from peer (lower than ours)
7564        state.max_observation_rate = 10; // Peer only allows 10/sec
7565        state.rate_limiter = AddressObservationRateLimiter::new(10, now);
7566
7567        // Should respect the negotiated rate (10, not 20)
7568        for _ in 0..10 {
7569            assert!(state.should_send_observation(0, now));
7570        }
7571        // 11th should fail
7572        assert!(!state.should_send_observation(0, now));
7573    }
7574
7575    #[test]
7576    fn respecting_negotiated_max_observation_rate_zero() {
7577        let config = crate::transport_parameters::AddressDiscoveryConfig::SendAndReceive;
7578        let now = Instant::now();
7579        let mut state = AddressDiscoveryState::new(&config, now);
7580
7581        // Peer negotiated rate of 0 (disabled)
7582        state.max_observation_rate = 0;
7583        state.rate_limiter = AddressObservationRateLimiter::new(0, now);
7584
7585        // Should not send any observations
7586        assert!(!state.should_send_observation(0, now));
7587        assert!(!state.should_send_observation(1, now));
7588
7589        // Even after time passes
7590        let later = now + Duration::from_secs(10);
7591        assert!(!state.should_send_observation(0, later));
7592    }
7593
7594    #[test]
7595    fn respecting_negotiated_max_observation_rate_higher() {
7596        let config = crate::transport_parameters::AddressDiscoveryConfig::SendAndReceive;
7597        let now = Instant::now();
7598        let mut state = AddressDiscoveryState::new(&config, now);
7599
7600        // Set up a path with an address to observe
7601        state
7602            .sent_observations
7603            .insert(0, paths::PathAddressInfo::new());
7604        state
7605            .sent_observations
7606            .get_mut(&0)
7607            .unwrap()
7608            .observed_address = Some(SocketAddr::new(
7609            IpAddr::V4(Ipv4Addr::new(192, 168, 1, 1)),
7610            8080,
7611        ));
7612
7613        // Set our local rate to 5
7614        state.update_rate_limit(5.0);
7615
7616        // Simulate negotiated rate from peer (higher than ours)
7617        state.max_observation_rate = 20; // Peer allows 20/sec
7618
7619        // Should respect our local rate (5, not 20)
7620        for _ in 0..5 {
7621            assert!(state.should_send_observation(0, now));
7622            state.record_observation_sent(0);
7623            // Reset notified flag for next iteration
7624            state.sent_observations.get_mut(&0).unwrap().notified = false;
7625        }
7626        // 6th should fail (out of tokens)
7627        assert!(!state.should_send_observation(0, now));
7628    }
7629
7630    #[test]
7631    fn respecting_negotiated_max_observation_rate_dynamic_update() {
7632        let config = crate::transport_parameters::AddressDiscoveryConfig::SendAndReceive;
7633        let now = Instant::now();
7634        let mut state = AddressDiscoveryState::new(&config, now);
7635
7636        // Set up initial path
7637        state
7638            .sent_observations
7639            .insert(0, paths::PathAddressInfo::new());
7640        state
7641            .sent_observations
7642            .get_mut(&0)
7643            .unwrap()
7644            .observed_address = Some(SocketAddr::new(
7645            IpAddr::V4(Ipv4Addr::new(192, 168, 1, 1)),
7646            8080,
7647        ));
7648
7649        // Use initial rate - consume 5 tokens
7650        for _ in 0..5 {
7651            assert!(state.should_send_observation(0, now));
7652            state.record_observation_sent(0);
7653            // Reset notified flag for next iteration
7654            state.sent_observations.get_mut(&0).unwrap().notified = false;
7655        }
7656
7657        // We have 5 tokens remaining
7658
7659        // Simulate rate renegotiation (e.g., from transport parameter update)
7660        state.max_observation_rate = 3;
7661        state.rate_limiter.set_rate(3);
7662
7663        // Can still use remaining tokens from before (5 tokens)
7664        // But they're capped at new max (3), so we'll have 3 tokens
7665        for _ in 0..3 {
7666            assert!(state.should_send_observation(0, now));
7667            state.record_observation_sent(0);
7668            // Reset notified flag for next iteration
7669            state.sent_observations.get_mut(&0).unwrap().notified = false;
7670        }
7671
7672        // Should be out of tokens now
7673        assert!(!state.should_send_observation(0, now));
7674
7675        // After 1 second, should only have 3 new tokens
7676        let later = now + Duration::from_secs(1);
7677        for _ in 0..3 {
7678            assert!(state.should_send_observation(0, later));
7679            state.record_observation_sent(0);
7680            // Reset notified flag for next iteration
7681            state.sent_observations.get_mut(&0).unwrap().notified = false;
7682        }
7683
7684        // Should be out of tokens again
7685        assert!(!state.should_send_observation(0, later));
7686    }
7687
7688    #[test]
7689    fn respecting_negotiated_max_observation_rate_with_paths() {
7690        let config = crate::transport_parameters::AddressDiscoveryConfig::SendAndReceive;
7691        let now = Instant::now();
7692        let mut state = AddressDiscoveryState::new(&config, now);
7693
7694        // Enable all paths observation
7695        state.observe_all_paths = true;
7696
7697        // Set up multiple paths with addresses
7698        for i in 0..3 {
7699            state
7700                .sent_observations
7701                .insert(i, paths::PathAddressInfo::new());
7702            state
7703                .sent_observations
7704                .get_mut(&i)
7705                .unwrap()
7706                .observed_address = Some(SocketAddr::new(
7707                IpAddr::V4(Ipv4Addr::new(192, 168, 1, 100 + i as u8)),
7708                5000,
7709            ));
7710        }
7711
7712        // Consume tokens by sending observations
7713        // We start with 10 tokens
7714        for _ in 0..3 {
7715            // Each iteration sends one observation per path
7716            for i in 0..3 {
7717                if state.should_send_observation(i, now) {
7718                    state.record_observation_sent(i);
7719                    // Reset notified flag for next iteration
7720                    state.sent_observations.get_mut(&i).unwrap().notified = false;
7721                }
7722            }
7723        }
7724
7725        // We've sent 9 observations (3 iterations × 3 paths), have 1 token left
7726        // One more observation should succeed
7727        assert!(state.should_send_observation(0, now));
7728        state.record_observation_sent(0);
7729
7730        // All paths should be rate limited now (no tokens left)
7731        assert!(!state.should_send_observation(0, now));
7732        assert!(!state.should_send_observation(1, now));
7733        assert!(!state.should_send_observation(2, now));
7734    }
7735
7736    #[test]
7737    fn queue_observed_address_frame_basic() {
7738        let config = crate::transport_parameters::AddressDiscoveryConfig::SendAndReceive;
7739        let now = Instant::now();
7740        let mut state = AddressDiscoveryState::new(&config, now);
7741
7742        // Queue a frame for path 0
7743        let address = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(192, 168, 1, 100)), 5000);
7744        let frame = state.queue_observed_address_frame(0, address);
7745
7746        // Should return Some(frame) since this is the first observation
7747        assert!(frame.is_some());
7748        let frame = frame.unwrap();
7749        assert_eq!(frame.address, address);
7750
7751        // Should mark path as notified
7752        assert!(state.sent_observations.contains_key(&0));
7753        assert!(state.sent_observations.get(&0).unwrap().notified);
7754    }
7755
7756    #[test]
7757    fn queue_observed_address_frame_rate_limited() {
7758        let config = crate::transport_parameters::AddressDiscoveryConfig::SendAndReceive;
7759        let now = Instant::now();
7760        let mut state = AddressDiscoveryState::new(&config, now);
7761
7762        // Enable all paths for this test
7763        state.observe_all_paths = true;
7764
7765        // With 10 tokens initially, we should be able to send 10 frames
7766        let mut addresses = Vec::new();
7767        for i in 0..10 {
7768            let addr = SocketAddr::new(
7769                IpAddr::V4(Ipv4Addr::new(192, 168, 1, i as u8)),
7770                5000 + i as u16,
7771            );
7772            addresses.push(addr);
7773            assert!(
7774                state.queue_observed_address_frame(i as u64, addr).is_some(),
7775                "Frame {} should be allowed",
7776                i + 1
7777            );
7778        }
7779
7780        // 11th should be rate limited
7781        let addr11 = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(192, 168, 1, 11)), 5011);
7782        assert!(
7783            state.queue_observed_address_frame(10, addr11).is_none(),
7784            "11th frame should be rate limited"
7785        );
7786    }
7787
7788    #[test]
7789    fn queue_observed_address_frame_disabled() {
7790        let config = crate::transport_parameters::AddressDiscoveryConfig::SendAndReceive;
7791        let now = Instant::now();
7792        let mut state = AddressDiscoveryState::new(&config, now);
7793
7794        // Disable address discovery
7795        state.enabled = false;
7796
7797        let address = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(192, 168, 1, 100)), 5000);
7798
7799        // Should return None when disabled
7800        assert!(state.queue_observed_address_frame(0, address).is_none());
7801    }
7802
7803    #[test]
7804    fn queue_observed_address_frame_already_notified() {
7805        let config = crate::transport_parameters::AddressDiscoveryConfig::SendAndReceive;
7806        let now = Instant::now();
7807        let mut state = AddressDiscoveryState::new(&config, now);
7808
7809        let address = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(192, 168, 1, 100)), 5000);
7810
7811        // First observation should succeed
7812        assert!(state.queue_observed_address_frame(0, address).is_some());
7813
7814        // Second observation for same address should return None
7815        assert!(state.queue_observed_address_frame(0, address).is_none());
7816
7817        // Even with different address, if already notified, should return None
7818        let new_address = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(192, 168, 1, 101)), 5001);
7819        assert!(state.queue_observed_address_frame(0, new_address).is_none());
7820    }
7821
7822    #[test]
7823    fn queue_observed_address_frame_primary_path_only() {
7824        let config = crate::transport_parameters::AddressDiscoveryConfig::SendAndReceive;
7825        let now = Instant::now();
7826        let mut state = AddressDiscoveryState::new(&config, now);
7827
7828        let address = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(192, 168, 1, 100)), 5000);
7829
7830        // Primary path should work
7831        assert!(state.queue_observed_address_frame(0, address).is_some());
7832
7833        // Non-primary paths should not work
7834        assert!(state.queue_observed_address_frame(1, address).is_none());
7835        assert!(state.queue_observed_address_frame(2, address).is_none());
7836    }
7837
7838    #[test]
7839    fn queue_observed_address_frame_updates_path_info() {
7840        let config = crate::transport_parameters::AddressDiscoveryConfig::SendAndReceive;
7841        let now = Instant::now();
7842        let mut state = AddressDiscoveryState::new(&config, now);
7843
7844        let address = SocketAddr::new(
7845            IpAddr::V6(Ipv6Addr::new(0x2001, 0xdb8, 0, 0, 0, 0, 0, 1)),
7846            5000,
7847        );
7848
7849        // Queue frame
7850        let frame = state.queue_observed_address_frame(0, address);
7851        assert!(frame.is_some());
7852
7853        // Check path info was updated
7854        let path_info = state.sent_observations.get(&0).unwrap();
7855        assert_eq!(path_info.observed_address, Some(address));
7856        assert!(path_info.notified);
7857
7858        // Note: received_history list is NOT updated by queue_observed_address_frame
7859        // That list is for addresses we've received from peers, not ones we're sending
7860        assert_eq!(state.received_history.len(), 0);
7861    }
7862
7863    #[test]
7864    fn retransmits_includes_outbound_observations() {
7865        use crate::connection::spaces::Retransmits;
7866
7867        // Create a retransmits struct
7868        let mut retransmits = Retransmits::default();
7869
7870        // Initially should be empty
7871        assert!(retransmits.outbound_observations.is_empty());
7872
7873        // Add an observed address frame
7874        let address = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(192, 168, 1, 100)), 5000);
7875        let frame = frame::ObservedAddress {
7876            sequence_number: VarInt::from_u32(1),
7877            address,
7878        };
7879        retransmits.outbound_observations.push(frame);
7880
7881        // Should now have one frame
7882        assert_eq!(retransmits.outbound_observations.len(), 1);
7883        assert_eq!(retransmits.outbound_observations[0].address, address);
7884    }
7885
7886    #[test]
7887    fn check_for_address_observations_no_peer_support() {
7888        let config = crate::transport_parameters::AddressDiscoveryConfig::SendAndReceive;
7889        let now = Instant::now();
7890        let mut state = AddressDiscoveryState::new(&config, now);
7891
7892        // Simulate address change on path 0
7893        state
7894            .sent_observations
7895            .insert(0, paths::PathAddressInfo::new());
7896        state
7897            .sent_observations
7898            .get_mut(&0)
7899            .unwrap()
7900            .observed_address = Some(SocketAddr::new(
7901            IpAddr::V4(Ipv4Addr::new(192, 168, 1, 100)),
7902            5000,
7903        ));
7904
7905        // Check for observations with no peer support
7906        let frames = state.check_for_address_observations(0, false, now);
7907
7908        // Should return empty vec when peer doesn't support
7909        assert!(frames.is_empty());
7910    }
7911
7912    #[test]
7913    fn check_for_address_observations_with_peer_support() {
7914        let config = crate::transport_parameters::AddressDiscoveryConfig::SendAndReceive;
7915        let now = Instant::now();
7916        let mut state = AddressDiscoveryState::new(&config, now);
7917
7918        // Simulate address change on path 0
7919        let address = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(192, 168, 1, 100)), 5000);
7920        state
7921            .sent_observations
7922            .insert(0, paths::PathAddressInfo::new());
7923        state
7924            .sent_observations
7925            .get_mut(&0)
7926            .unwrap()
7927            .observed_address = Some(address);
7928
7929        // Check for observations with peer support
7930        let frames = state.check_for_address_observations(0, true, now);
7931
7932        // Should return frame for unnotified address
7933        assert_eq!(frames.len(), 1);
7934        assert_eq!(frames[0].address, address);
7935
7936        // Path should now be marked as notified
7937        assert!(state.sent_observations.get(&0).unwrap().notified);
7938    }
7939
7940    #[test]
7941    fn check_for_address_observations_rate_limited() {
7942        let config = crate::transport_parameters::AddressDiscoveryConfig::SendAndReceive;
7943        let now = Instant::now();
7944        let mut state = AddressDiscoveryState::new(&config, now);
7945
7946        // Set up a single path with observed address
7947        let address = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(192, 168, 1, 100)), 5000);
7948        state
7949            .sent_observations
7950            .insert(0, paths::PathAddressInfo::new());
7951        state
7952            .sent_observations
7953            .get_mut(&0)
7954            .unwrap()
7955            .observed_address = Some(address);
7956
7957        // Consume all initial tokens (starts with 10)
7958        for _ in 0..10 {
7959            let frames = state.check_for_address_observations(0, true, now);
7960            if frames.is_empty() {
7961                break;
7962            }
7963            // Mark path as unnotified again for next iteration
7964            state.sent_observations.get_mut(&0).unwrap().notified = false;
7965        }
7966
7967        // Verify we've consumed all tokens
7968        assert_eq!(state.rate_limiter.tokens, 0.0);
7969
7970        // Mark path as unnotified again to test rate limiting
7971        state.sent_observations.get_mut(&0).unwrap().notified = false;
7972
7973        // Now check should be rate limited (no tokens left)
7974        let frames2 = state.check_for_address_observations(0, true, now);
7975        assert_eq!(frames2.len(), 0);
7976
7977        // Mark path as unnotified again
7978        state.sent_observations.get_mut(&0).unwrap().notified = false;
7979
7980        // After time passes, should be able to send again
7981        let later = now + Duration::from_millis(200); // 0.2 seconds = 2 tokens at 10/sec
7982        let frames3 = state.check_for_address_observations(0, true, later);
7983        assert_eq!(frames3.len(), 1);
7984    }
7985
7986    #[test]
7987    fn check_for_address_observations_multiple_paths() {
7988        let config = crate::transport_parameters::AddressDiscoveryConfig::SendAndReceive;
7989        let now = Instant::now();
7990        let mut state = AddressDiscoveryState::new(&config, now);
7991
7992        // Enable observation on all paths for this test
7993        state.observe_all_paths = true;
7994
7995        // Set up two paths with observed addresses
7996        let addr1 = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(192, 168, 1, 100)), 5000);
7997        let addr2 = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(192, 168, 1, 101)), 5001);
7998
7999        state
8000            .sent_observations
8001            .insert(0, paths::PathAddressInfo::new());
8002        state
8003            .sent_observations
8004            .get_mut(&0)
8005            .unwrap()
8006            .observed_address = Some(addr1);
8007
8008        state
8009            .sent_observations
8010            .insert(1, paths::PathAddressInfo::new());
8011        state
8012            .sent_observations
8013            .get_mut(&1)
8014            .unwrap()
8015            .observed_address = Some(addr2);
8016
8017        // Check for observations - should get both since we have tokens
8018        let frames = state.check_for_address_observations(0, true, now);
8019
8020        // Should get frames for both paths
8021        assert_eq!(frames.len(), 2);
8022
8023        // Verify both addresses are included
8024        let addresses: Vec<_> = frames.iter().map(|f| f.address).collect();
8025        assert!(addresses.contains(&addr1));
8026        assert!(addresses.contains(&addr2));
8027
8028        // Both paths should be marked as notified
8029        assert!(state.sent_observations.get(&0).unwrap().notified);
8030        assert!(state.sent_observations.get(&1).unwrap().notified);
8031    }
8032
8033    // Tests for Task 2.4: Rate Limiter Configuration
8034    #[test]
8035    fn test_rate_limiter_configuration() {
8036        // Test different rate configurations
8037        let state = AddressDiscoveryState::new_with_params(true, 10.0, false);
8038        assert_eq!(state.rate_limiter.rate, 10.0);
8039        assert_eq!(state.rate_limiter.max_tokens, 10.0);
8040        assert_eq!(state.rate_limiter.tokens, 10.0);
8041
8042        let state = AddressDiscoveryState::new_with_params(true, 63.0, false);
8043        assert_eq!(state.rate_limiter.rate, 63.0);
8044        assert_eq!(state.rate_limiter.max_tokens, 63.0);
8045    }
8046
8047    #[test]
8048    fn test_rate_limiter_update_configuration() {
8049        let mut state = AddressDiscoveryState::new_with_params(true, 5.0, false);
8050
8051        // Initial configuration
8052        assert_eq!(state.rate_limiter.rate, 5.0);
8053
8054        // Update configuration
8055        state.update_rate_limit(10.0);
8056        assert_eq!(state.rate_limiter.rate, 10.0);
8057        assert_eq!(state.rate_limiter.max_tokens, 10.0);
8058
8059        // Tokens should not exceed new max
8060        state.rate_limiter.tokens = 15.0;
8061        state.update_rate_limit(8.0);
8062        assert_eq!(state.rate_limiter.tokens, 8.0);
8063    }
8064
8065    #[test]
8066    fn test_rate_limiter_from_transport_params() {
8067        let mut params = TransportParameters::default();
8068        params.address_discovery = Some(AddressDiscoveryConfig::SendAndReceive);
8069
8070        let state = AddressDiscoveryState::from_transport_params(&params);
8071        assert!(state.is_some());
8072        let state = state.unwrap();
8073        assert_eq!(state.rate_limiter.rate, 10.0); // Default rate is 10
8074        assert!(!state.observe_all_paths); // Default is false
8075    }
8076
8077    #[test]
8078    fn test_rate_limiter_zero_rate() {
8079        let state = AddressDiscoveryState::new_with_params(true, 0.0, false);
8080        assert_eq!(state.rate_limiter.rate, 0.0);
8081        assert_eq!(state.rate_limiter.tokens, 0.0);
8082
8083        // Should never allow sending with zero rate
8084        let address = "192.168.1.1:443".parse().unwrap();
8085        let mut state = AddressDiscoveryState::new_with_params(true, 0.0, false);
8086        let frame = state.queue_observed_address_frame(0, address);
8087        assert!(frame.is_none());
8088    }
8089
8090    #[test]
8091    fn test_rate_limiter_configuration_edge_cases() {
8092        // Test maximum allowed rate (63)
8093        let state = AddressDiscoveryState::new_with_params(true, 63.0, false);
8094        assert_eq!(state.rate_limiter.rate, 63.0);
8095
8096        // Test rates > 63 get converted to u8 then back to f64
8097        let state = AddressDiscoveryState::new_with_params(true, 100.0, false);
8098        // 100 as u8 is 100
8099        assert_eq!(state.rate_limiter.rate, 100.0);
8100
8101        // Test fractional rates get truncated due to u8 storage
8102        let state = AddressDiscoveryState::new_with_params(true, 2.5, false);
8103        // 2.5 as u8 is 2, then back to f64 is 2.0
8104        assert_eq!(state.rate_limiter.rate, 2.0);
8105    }
8106
8107    #[test]
8108    fn test_rate_limiter_runtime_update() {
8109        let mut state = AddressDiscoveryState::new_with_params(true, 10.0, false);
8110        let now = Instant::now();
8111
8112        // Consume some tokens
8113        state.rate_limiter.tokens = 5.0;
8114
8115        // Update rate while tokens are partially consumed
8116        state.update_rate_limit(3.0);
8117
8118        // Tokens should be capped at new max
8119        assert_eq!(state.rate_limiter.tokens, 3.0);
8120        assert_eq!(state.rate_limiter.rate, 3.0);
8121        assert_eq!(state.rate_limiter.max_tokens, 3.0);
8122
8123        // Wait for replenishment
8124        let later = now + Duration::from_secs(1);
8125        state.rate_limiter.update_tokens(later);
8126
8127        // Should be capped at new max
8128        assert_eq!(state.rate_limiter.tokens, 3.0);
8129    }
8130
8131    // Tests for Task 2.5: Connection Tests
8132    #[test]
8133    fn test_address_discovery_state_initialization_default() {
8134        // Test that connection initializes with default address discovery state
8135        let now = Instant::now();
8136        let default_config = crate::transport_parameters::AddressDiscoveryConfig::default();
8137
8138        // Create a connection (simplified test setup)
8139        // In reality, this happens in Connection::new()
8140        let address_discovery_state = Some(AddressDiscoveryState::new(&default_config, now));
8141
8142        assert!(address_discovery_state.is_some());
8143        let state = address_discovery_state.unwrap();
8144
8145        // Default config should have address discovery disabled
8146        assert!(state.enabled); // Default is now enabled
8147        assert_eq!(state.max_observation_rate, 10); // Default rate
8148        assert!(!state.observe_all_paths);
8149    }
8150
8151    #[test]
8152    fn test_address_discovery_state_initialization_on_handshake() {
8153        // Test that address discovery state is updated when transport parameters are received
8154        let now = Instant::now();
8155
8156        // Simulate initial state (as in Connection::new)
8157        let mut address_discovery_state = Some(AddressDiscoveryState::new(
8158            &crate::transport_parameters::AddressDiscoveryConfig::default(),
8159            now,
8160        ));
8161
8162        // Simulate receiving peer's transport parameters with address discovery enabled
8163        let peer_params = TransportParameters {
8164            address_discovery: Some(AddressDiscoveryConfig::SendAndReceive),
8165            ..TransportParameters::default()
8166        };
8167
8168        // Update address discovery state based on peer params
8169        if let Some(peer_config) = &peer_params.address_discovery {
8170            // Any variant means address discovery is supported
8171            address_discovery_state = Some(AddressDiscoveryState::new(peer_config, now));
8172        }
8173
8174        // Verify state was updated
8175        assert!(address_discovery_state.is_some());
8176        let state = address_discovery_state.unwrap();
8177        assert!(state.enabled);
8178        // Default values from new state creation
8179        assert_eq!(state.max_observation_rate, 10); // Default rate
8180        assert!(!state.observe_all_paths); // Default is primary path only
8181    }
8182
8183    #[test]
8184    fn test_address_discovery_negotiation_disabled_peer() {
8185        // Test when peer doesn't support address discovery
8186        let now = Instant::now();
8187
8188        // Start with our config enabling address discovery
8189        let our_config = AddressDiscoveryConfig::SendAndReceive;
8190        let mut address_discovery_state = Some(AddressDiscoveryState::new(&our_config, now));
8191
8192        // Peer's transport parameters without address discovery
8193        let peer_params = TransportParameters {
8194            address_discovery: None,
8195            ..TransportParameters::default()
8196        };
8197
8198        // If peer doesn't advertise address discovery, we should disable it
8199        if peer_params.address_discovery.is_none() {
8200            if let Some(state) = &mut address_discovery_state {
8201                state.enabled = false;
8202            }
8203        }
8204
8205        // Verify it's disabled
8206        let state = address_discovery_state.unwrap();
8207        assert!(!state.enabled); // Should be disabled when peer doesn't support it
8208    }
8209
8210    #[test]
8211    fn test_address_discovery_negotiation_rate_limiting() {
8212        // Test rate limit negotiation - should use minimum of local and peer rates
8213        let now = Instant::now();
8214
8215        // Our config with rate 30
8216        let our_config = AddressDiscoveryConfig::SendAndReceive;
8217        let mut address_discovery_state = Some(AddressDiscoveryState::new(&our_config, now));
8218
8219        // Set a custom rate for testing
8220        if let Some(state) = &mut address_discovery_state {
8221            state.max_observation_rate = 30;
8222            state.update_rate_limit(30.0);
8223        }
8224
8225        // Peer config with rate 15
8226        let peer_params = TransportParameters {
8227            address_discovery: Some(AddressDiscoveryConfig::SendAndReceive),
8228            ..TransportParameters::default()
8229        };
8230
8231        // Negotiate - should use minimum rate
8232        // Since the enum doesn't contain rate info, this test simulates negotiation
8233        if let (Some(state), Some(_peer_config)) =
8234            (&mut address_discovery_state, &peer_params.address_discovery)
8235        {
8236            // In a real scenario, rate would be extracted from connection parameters
8237            // For this test, we simulate peer having rate 15
8238            let peer_rate = 15u8;
8239            let negotiated_rate = state.max_observation_rate.min(peer_rate);
8240            state.update_rate_limit(negotiated_rate as f64);
8241        }
8242
8243        // Verify negotiated rate
8244        let state = address_discovery_state.unwrap();
8245        assert_eq!(state.rate_limiter.rate, 15.0); // Min of 30 and 15
8246    }
8247
8248    #[test]
8249    fn test_address_discovery_path_initialization() {
8250        // Test that paths are initialized with address discovery support
8251        let now = Instant::now();
8252        let config = AddressDiscoveryConfig::SendAndReceive;
8253        let mut state = AddressDiscoveryState::new(&config, now);
8254
8255        // Simulate path creation (path_id = 0)
8256        assert!(state.sent_observations.is_empty());
8257        assert!(state.received_observations.is_empty());
8258
8259        // When we first check if we should send observation, it should create path entry
8260        let should_send = state.should_send_observation(0, now);
8261        assert!(should_send); // Should allow first observation
8262
8263        // Path entry should now exist (created on demand)
8264        // Note: In the actual implementation, path entries are created when needed
8265    }
8266
8267    #[test]
8268    fn test_address_discovery_multiple_path_initialization() {
8269        // Test initialization with multiple paths
8270        let now = Instant::now();
8271        let config = AddressDiscoveryConfig::SendAndReceive;
8272        let mut state = AddressDiscoveryState::new(&config, now);
8273
8274        // By default, only primary path is observed
8275        assert!(state.should_send_observation(0, now)); // Primary path
8276        assert!(!state.should_send_observation(1, now)); // Secondary path not observed by default
8277        assert!(!state.should_send_observation(2, now)); // Additional path not observed by default
8278
8279        // Enable all paths
8280        state.observe_all_paths = true;
8281        assert!(state.should_send_observation(1, now)); // Now secondary path is observed
8282        assert!(state.should_send_observation(2, now)); // Now additional path is observed
8283
8284        // With observe_all_paths = false, only primary path should be allowed
8285        let config_primary_only = AddressDiscoveryConfig::SendAndReceive;
8286        let mut state_primary = AddressDiscoveryState::new(&config_primary_only, now);
8287
8288        assert!(state_primary.should_send_observation(0, now)); // Primary path allowed
8289        assert!(!state_primary.should_send_observation(1, now)); // Secondary path not allowed
8290    }
8291
8292    #[test]
8293    fn test_handle_observed_address_frame_valid() {
8294        // Test processing a valid OBSERVED_ADDRESS frame
8295        let now = Instant::now();
8296        let config = AddressDiscoveryConfig::SendAndReceive;
8297        let mut state = AddressDiscoveryState::new(&config, now);
8298
8299        // Simulate receiving an OBSERVED_ADDRESS frame
8300        let observed_addr = SocketAddr::from(([192, 168, 1, 100], 5000));
8301        state.handle_observed_address(observed_addr, 0, now);
8302
8303        // Verify the address was recorded
8304        assert_eq!(state.received_history.len(), 1);
8305        assert_eq!(state.received_history[0].address, observed_addr);
8306        assert_eq!(state.received_history[0].path_id, 0);
8307        assert_eq!(state.received_history[0].received_at, now);
8308
8309        // Path should also have the observed address
8310        let path_info = state.received_observations.get(&0).unwrap();
8311        assert_eq!(path_info.observed_address, Some(observed_addr));
8312        assert_eq!(path_info.last_observed, Some(now));
8313        assert_eq!(path_info.observation_count, 1);
8314    }
8315
8316    #[test]
8317    fn test_handle_multiple_received_history() {
8318        // Test processing multiple OBSERVED_ADDRESS frames from different paths
8319        let now = Instant::now();
8320        let config = AddressDiscoveryConfig::SendAndReceive;
8321        let mut state = AddressDiscoveryState::new(&config, now);
8322
8323        // Receive addresses from multiple paths
8324        let addr1 = SocketAddr::from(([192, 168, 1, 100], 5000));
8325        let addr2 = SocketAddr::from(([10, 0, 0, 50], 6000));
8326        let addr3 = SocketAddr::from(([192, 168, 1, 100], 7000)); // Same IP, different port
8327
8328        state.handle_observed_address(addr1, 0, now);
8329        state.handle_observed_address(addr2, 1, now);
8330        state.handle_observed_address(addr3, 0, now + Duration::from_millis(100));
8331
8332        // Verify all addresses were recorded
8333        assert_eq!(state.received_history.len(), 3);
8334
8335        // Path 0 should have the most recent address (addr3)
8336        let path0_info = state.received_observations.get(&0).unwrap();
8337        assert_eq!(path0_info.observed_address, Some(addr3));
8338        assert_eq!(path0_info.observation_count, 1); // Reset to 1 for new address
8339
8340        // Path 1 should have addr2
8341        let path1_info = state.received_observations.get(&1).unwrap();
8342        assert_eq!(path1_info.observed_address, Some(addr2));
8343        assert_eq!(path1_info.observation_count, 1);
8344    }
8345
8346    #[test]
8347    fn test_get_observed_address() {
8348        // Test retrieving observed addresses for specific paths
8349        let now = Instant::now();
8350        let config = AddressDiscoveryConfig::SendAndReceive;
8351        let mut state = AddressDiscoveryState::new(&config, now);
8352
8353        // Initially no address
8354        assert_eq!(state.get_observed_address(0), None);
8355
8356        // Add an address
8357        let addr = SocketAddr::from(([192, 168, 1, 100], 5000));
8358        state.handle_observed_address(addr, 0, now);
8359
8360        // Should return the most recent address for the path
8361        assert_eq!(state.get_observed_address(0), Some(addr));
8362
8363        // Non-existent path should return None
8364        assert_eq!(state.get_observed_address(999), None);
8365    }
8366
8367    #[test]
8368    fn test_has_unnotified_changes() {
8369        // Test detection of unnotified address changes
8370        let now = Instant::now();
8371        let config = AddressDiscoveryConfig::SendAndReceive;
8372        let mut state = AddressDiscoveryState::new(&config, now);
8373
8374        // Initially no changes
8375        assert!(!state.has_unnotified_changes());
8376
8377        // Add an address - should have unnotified change
8378        let addr = SocketAddr::from(([192, 168, 1, 100], 5000));
8379        state.handle_observed_address(addr, 0, now);
8380        assert!(state.has_unnotified_changes());
8381
8382        // Mark as notified
8383        if let Some(path_info) = state.received_observations.get_mut(&0) {
8384            path_info.notified = true;
8385        }
8386        assert!(!state.has_unnotified_changes());
8387
8388        // Add another address - should have change again
8389        let addr2 = SocketAddr::from(([192, 168, 1, 100], 6000));
8390        state.handle_observed_address(addr2, 0, now + Duration::from_secs(1));
8391        assert!(state.has_unnotified_changes());
8392    }
8393
8394    #[test]
8395    fn test_address_discovery_disabled() {
8396        // Test that frames are not processed when address discovery is disabled
8397        let now = Instant::now();
8398        let config = AddressDiscoveryConfig::SendAndReceive;
8399        let mut state = AddressDiscoveryState::new(&config, now);
8400
8401        // Disable address discovery after creation
8402        state.enabled = false;
8403
8404        // Try to process a frame
8405        let addr = SocketAddr::from(([192, 168, 1, 100], 5000));
8406        state.handle_observed_address(addr, 0, now);
8407
8408        // When disabled, addresses are not recorded
8409        assert_eq!(state.received_history.len(), 0);
8410
8411        // Should not send observations when disabled
8412        assert!(!state.should_send_observation(0, now));
8413    }
8414
8415    #[test]
8416    fn test_rate_limiting_basic() {
8417        // Test basic rate limiting functionality
8418        let now = Instant::now();
8419        let config = AddressDiscoveryConfig::SendAndReceive;
8420        let mut state = AddressDiscoveryState::new(&config, now);
8421
8422        // Enable all paths for this test and set a low rate
8423        state.observe_all_paths = true;
8424        state.rate_limiter.set_rate(2); // 2 per second
8425
8426        // First observation should be allowed and consumes a token
8427        assert!(state.should_send_observation(0, now));
8428        // Need to mark path 0 as notified so subsequent checks will pass
8429        state.record_observation_sent(0);
8430
8431        // Need a different path since path 0 is already notified
8432        assert!(state.should_send_observation(1, now));
8433        state.record_observation_sent(1);
8434
8435        // Third observation should be rate limited (no more tokens)
8436        assert!(!state.should_send_observation(2, now));
8437
8438        // After 500ms, we should have 1 token available
8439        let later = now + Duration::from_millis(500);
8440        assert!(state.should_send_observation(3, later));
8441        state.record_observation_sent(3);
8442
8443        // But not a second one (all tokens consumed)
8444        assert!(!state.should_send_observation(4, later));
8445
8446        // After 1 second from start, we've consumed 3 tokens total
8447        // With rate 2/sec, after 1 second we've generated 2 new tokens
8448        // So we should have 0 tokens available (consumed 3, generated 2 = -1, but capped at 0)
8449        let _one_sec_later = now + Duration::from_secs(1);
8450        // Actually we need to wait longer to accumulate more tokens
8451        // After 1.5 seconds, we've generated 3 tokens total, consumed 3, so we can send 0 more
8452        // After 2 seconds, we've generated 4 tokens total, consumed 3, so we can send 1 more
8453        let two_sec_later = now + Duration::from_secs(2);
8454        assert!(state.should_send_observation(5, two_sec_later));
8455        state.record_observation_sent(5);
8456
8457        // At exactly 2 seconds, we have:
8458        // - Generated: 4 tokens (2 per second × 2 seconds)
8459        // - Consumed: 4 tokens (paths 0, 1, 3, 5)
8460        // - Remaining: 0 tokens
8461        // But since the rate limiter is continuous and tokens accumulate over time,
8462        // by the time we check, we might have accumulated a tiny fraction more.
8463        // The test shows we have exactly 1 token, which makes sense - we're checking
8464        // slightly after consuming for path 5, so we've accumulated a bit more.
8465
8466        // So path 6 CAN send one more time, consuming that 1 token
8467        assert!(state.should_send_observation(6, two_sec_later));
8468        state.record_observation_sent(6);
8469
8470        // NOW we should be out of tokens
8471        assert!(
8472            !state.should_send_observation(7, two_sec_later),
8473            "Expected no tokens available"
8474        );
8475    }
8476
8477    #[test]
8478    fn test_rate_limiting_per_path() {
8479        // Test that rate limiting is shared across paths (not per-path)
8480        let now = Instant::now();
8481        let config = AddressDiscoveryConfig::SendAndReceive;
8482        let mut state = AddressDiscoveryState::new(&config, now);
8483
8484        // Set up path 0 with an address to observe
8485        state
8486            .sent_observations
8487            .insert(0, paths::PathAddressInfo::new());
8488        state
8489            .sent_observations
8490            .get_mut(&0)
8491            .unwrap()
8492            .observed_address = Some(SocketAddr::new(
8493            IpAddr::V4(Ipv4Addr::new(192, 168, 1, 1)),
8494            8080,
8495        ));
8496
8497        // Use up all initial tokens (we start with 10)
8498        for _ in 0..10 {
8499            assert!(state.should_send_observation(0, now));
8500            state.record_observation_sent(0);
8501            // Reset notified flag for next iteration
8502            state.sent_observations.get_mut(&0).unwrap().notified = false;
8503        }
8504
8505        // Now we're out of tokens, so path 0 should be rate limited
8506        assert!(!state.should_send_observation(0, now));
8507
8508        // After 100ms, we get 1 token back (10 tokens/sec = 1 token/100ms)
8509        let later = now + Duration::from_millis(100);
8510        assert!(state.should_send_observation(0, later));
8511        state.record_observation_sent(0);
8512
8513        // Reset notified flag to test again
8514        state.sent_observations.get_mut(&0).unwrap().notified = false;
8515
8516        // And it's consumed again
8517        assert!(!state.should_send_observation(0, later));
8518    }
8519
8520    #[test]
8521    fn test_rate_limiting_zero_rate() {
8522        // Test that rate of 0 means no observations
8523        let now = Instant::now();
8524        let config = AddressDiscoveryConfig::SendAndReceive;
8525        let mut state = AddressDiscoveryState::new(&config, now);
8526
8527        // Set rate to 0
8528        state.rate_limiter.set_rate(0);
8529        state.rate_limiter.tokens = 0.0;
8530        state.rate_limiter.max_tokens = 0.0;
8531
8532        // Should never allow observations
8533        assert!(!state.should_send_observation(0, now));
8534        assert!(!state.should_send_observation(0, now + Duration::from_secs(10)));
8535        assert!(!state.should_send_observation(0, now + Duration::from_secs(100)));
8536    }
8537
8538    #[test]
8539    fn test_rate_limiting_update() {
8540        // Test updating rate limit during connection
8541        let now = Instant::now();
8542        let config = AddressDiscoveryConfig::SendAndReceive;
8543        let mut state = AddressDiscoveryState::new(&config, now);
8544
8545        // Enable all paths observation
8546        state.observe_all_paths = true;
8547
8548        // Set up multiple paths with addresses to observe
8549        for i in 0..12 {
8550            state
8551                .sent_observations
8552                .insert(i, paths::PathAddressInfo::new());
8553            state
8554                .sent_observations
8555                .get_mut(&i)
8556                .unwrap()
8557                .observed_address = Some(SocketAddr::new(
8558                IpAddr::V4(Ipv4Addr::new(192, 168, 1, (i + 1) as u8)),
8559                8080,
8560            ));
8561        }
8562
8563        // Initially we have 10 tokens (rate is 10/sec)
8564        // Use up all the initial tokens
8565        for i in 0..10 {
8566            assert!(state.should_send_observation(i, now));
8567            state.record_observation_sent(i);
8568        }
8569        // Now we should be out of tokens
8570        assert!(!state.should_send_observation(10, now));
8571
8572        // Update rate limit to 20 per second (double the original)
8573        state.update_rate_limit(20.0);
8574
8575        // Tokens don't immediately increase, need to wait for replenishment
8576        // After 50ms with rate 20/sec, we should get 1 token
8577        let later = now + Duration::from_millis(50);
8578        assert!(state.should_send_observation(10, later));
8579        state.record_observation_sent(10);
8580
8581        // And we can continue sending at the new rate
8582        let later2 = now + Duration::from_millis(100);
8583        assert!(state.should_send_observation(11, later2));
8584    }
8585
8586    #[test]
8587    fn test_rate_limiting_burst() {
8588        // Test that rate limiter allows burst up to bucket capacity
8589        let now = Instant::now();
8590        let config = AddressDiscoveryConfig::SendAndReceive;
8591        let mut state = AddressDiscoveryState::new(&config, now);
8592
8593        // Should allow up to 10 observations in burst
8594        for _ in 0..10 {
8595            assert!(state.should_send_observation(0, now));
8596            state.record_observation_sent(0);
8597        }
8598
8599        // 11th should be rate limited
8600        assert!(!state.should_send_observation(0, now));
8601
8602        // After 100ms, we should have 1 more token
8603        let later = now + Duration::from_millis(100);
8604        assert!(state.should_send_observation(0, later));
8605        state.record_observation_sent(0);
8606        assert!(!state.should_send_observation(0, later));
8607    }
8608
8609    #[test]
8610    fn test_connection_rate_limiting_with_check_observations() {
8611        // Test rate limiting through check_for_address_observations
8612        let now = Instant::now();
8613        let config = AddressDiscoveryConfig::SendAndReceive;
8614        let mut state = AddressDiscoveryState::new(&config, now);
8615
8616        // Set up a path with an address
8617        let mut path_info = paths::PathAddressInfo::new();
8618        path_info.update_observed_address(
8619            SocketAddr::new(IpAddr::V4(Ipv4Addr::new(192, 168, 1, 1)), 8080),
8620            now,
8621        );
8622        state.sent_observations.insert(0, path_info);
8623
8624        // First observation should succeed
8625        let frame1 =
8626            state.queue_observed_address_frame(0, SocketAddr::from(([192, 168, 1, 1], 8080)));
8627        assert!(frame1.is_some());
8628        state.record_observation_sent(0);
8629
8630        // Reset notified flag to test rate limiting (simulate address change or new observation opportunity)
8631        if let Some(info) = state.sent_observations.get_mut(&0) {
8632            info.notified = false;
8633        }
8634
8635        // We start with 10 tokens, use them all up (minus the 1 we already used)
8636        for _ in 1..10 {
8637            // Reset notified flag to allow testing rate limiting
8638            if let Some(info) = state.sent_observations.get_mut(&0) {
8639                info.notified = false;
8640            }
8641            let frame =
8642                state.queue_observed_address_frame(0, SocketAddr::from(([192, 168, 1, 1], 8080)));
8643            assert!(frame.is_some());
8644            state.record_observation_sent(0);
8645        }
8646
8647        // Now we should be out of tokens
8648        if let Some(info) = state.sent_observations.get_mut(&0) {
8649            info.notified = false;
8650        }
8651        let frame3 =
8652            state.queue_observed_address_frame(0, SocketAddr::from(([192, 168, 1, 1], 8080)));
8653        assert!(frame3.is_none()); // Should fail due to rate limiting
8654
8655        // After 100ms, should allow 1 more (rate is 10/sec, so 0.1s = 1 token)
8656        let later = now + Duration::from_millis(100);
8657        state.rate_limiter.update_tokens(later); // Update tokens based on elapsed time
8658
8659        // Reset notified flag to test token replenishment
8660        if let Some(info) = state.sent_observations.get_mut(&0) {
8661            info.notified = false;
8662        }
8663
8664        let frame4 =
8665            state.queue_observed_address_frame(0, SocketAddr::from(([192, 168, 1, 1], 8080)));
8666        assert!(frame4.is_some()); // Should succeed due to token replenishment
8667    }
8668
8669    #[test]
8670    fn test_queue_observed_address_frame() {
8671        // Test queueing OBSERVED_ADDRESS frames with rate limiting
8672        let now = Instant::now();
8673        let config = AddressDiscoveryConfig::SendAndReceive;
8674        let mut state = AddressDiscoveryState::new(&config, now);
8675
8676        let addr = SocketAddr::from(([192, 168, 1, 100], 5000));
8677
8678        // Should queue frame when allowed
8679        let frame = state.queue_observed_address_frame(0, addr);
8680        assert!(frame.is_some());
8681        assert_eq!(frame.unwrap().address, addr);
8682
8683        // Record that we sent it
8684        state.record_observation_sent(0);
8685
8686        // Should respect rate limiting - we start with 10 tokens
8687        for i in 0..9 {
8688            // Reset notified flag to test rate limiting
8689            if let Some(info) = state.sent_observations.get_mut(&0) {
8690                info.notified = false;
8691            }
8692
8693            let frame = state.queue_observed_address_frame(0, addr);
8694            assert!(frame.is_some(), "Frame {} should be allowed", i + 2);
8695            state.record_observation_sent(0);
8696        }
8697
8698        // Reset notified flag one more time
8699        if let Some(info) = state.sent_observations.get_mut(&0) {
8700            info.notified = false;
8701        }
8702
8703        // 11th should be rate limited (we've used all 10 tokens)
8704        let frame = state.queue_observed_address_frame(0, addr);
8705        assert!(frame.is_none(), "11th frame should be rate limited");
8706    }
8707
8708    #[test]
8709    fn test_multi_path_basic() {
8710        // Test basic multi-path functionality
8711        let now = Instant::now();
8712        let config = AddressDiscoveryConfig::SendAndReceive;
8713        let mut state = AddressDiscoveryState::new(&config, now);
8714
8715        let addr1 = SocketAddr::from(([192, 168, 1, 1], 5000));
8716        let addr2 = SocketAddr::from(([10, 0, 0, 1], 6000));
8717        let addr3 = SocketAddr::from(([172, 16, 0, 1], 7000));
8718
8719        // Handle observations for different paths
8720        state.handle_observed_address(addr1, 0, now);
8721        state.handle_observed_address(addr2, 1, now);
8722        state.handle_observed_address(addr3, 2, now);
8723
8724        // Each path should have its own observed address
8725        assert_eq!(state.get_observed_address(0), Some(addr1));
8726        assert_eq!(state.get_observed_address(1), Some(addr2));
8727        assert_eq!(state.get_observed_address(2), Some(addr3));
8728
8729        // All paths should have unnotified changes
8730        assert!(state.has_unnotified_changes());
8731
8732        // Check that we have 3 observation events
8733        assert_eq!(state.received_history.len(), 3);
8734    }
8735
8736    #[test]
8737    fn test_multi_path_observe_primary_only() {
8738        // Test that when observe_all_paths is false, only primary path is observed
8739        let now = Instant::now();
8740        let config = AddressDiscoveryConfig::SendAndReceive;
8741        let mut state = AddressDiscoveryState::new(&config, now);
8742
8743        // Primary path (0) should be observable
8744        assert!(state.should_send_observation(0, now));
8745        state.record_observation_sent(0);
8746
8747        // Non-primary paths should not be observable
8748        assert!(!state.should_send_observation(1, now));
8749        assert!(!state.should_send_observation(2, now));
8750
8751        // Can't queue frames for non-primary paths
8752        let addr = SocketAddr::from(([192, 168, 1, 1], 5000));
8753        assert!(state.queue_observed_address_frame(0, addr).is_some());
8754        assert!(state.queue_observed_address_frame(1, addr).is_none());
8755        assert!(state.queue_observed_address_frame(2, addr).is_none());
8756    }
8757
8758    #[test]
8759    fn test_multi_path_rate_limiting() {
8760        // Test that rate limiting is shared across all paths
8761        let now = Instant::now();
8762        let config = AddressDiscoveryConfig::SendAndReceive;
8763        let mut state = AddressDiscoveryState::new(&config, now);
8764
8765        // Enable all paths observation
8766        state.observe_all_paths = true;
8767
8768        // Set up multiple paths with addresses to observe
8769        for i in 0..21 {
8770            state
8771                .sent_observations
8772                .insert(i, paths::PathAddressInfo::new());
8773            state
8774                .sent_observations
8775                .get_mut(&i)
8776                .unwrap()
8777                .observed_address = Some(SocketAddr::new(
8778                IpAddr::V4(Ipv4Addr::new(192, 168, 1, (i + 1) as u8)),
8779                8080,
8780            ));
8781        }
8782
8783        // Use all 10 initial tokens across different paths
8784        for i in 0..10 {
8785            assert!(state.should_send_observation(i, now));
8786            state.record_observation_sent(i);
8787        }
8788
8789        // All tokens consumed, no path can send
8790        assert!(!state.should_send_observation(10, now));
8791
8792        // Reset path 0 to test if it can send again (it shouldn't)
8793        state.sent_observations.get_mut(&0).unwrap().notified = false;
8794        assert!(!state.should_send_observation(0, now)); // Even path 0 can't send again
8795
8796        // After 1 second, we get 10 more tokens (rate is 10/sec)
8797        let later = now + Duration::from_secs(1);
8798        for i in 10..20 {
8799            assert!(state.should_send_observation(i, later));
8800            state.record_observation_sent(i);
8801        }
8802        // And we're out again
8803        assert!(!state.should_send_observation(20, later));
8804    }
8805
8806    #[test]
8807    fn test_multi_path_address_changes() {
8808        // Test handling address changes on different paths
8809        let now = Instant::now();
8810        let config = AddressDiscoveryConfig::SendAndReceive;
8811        let mut state = AddressDiscoveryState::new(&config, now);
8812
8813        let addr1a = SocketAddr::from(([192, 168, 1, 1], 5000));
8814        let addr1b = SocketAddr::from(([192, 168, 1, 2], 5000));
8815        let addr2a = SocketAddr::from(([10, 0, 0, 1], 6000));
8816        let addr2b = SocketAddr::from(([10, 0, 0, 2], 6000));
8817
8818        // Initial addresses
8819        state.handle_observed_address(addr1a, 0, now);
8820        state.handle_observed_address(addr2a, 1, now);
8821
8822        // Mark received observations as notified
8823        if let Some(info) = state.received_observations.get_mut(&0) {
8824            info.notified = true;
8825        }
8826        if let Some(info) = state.received_observations.get_mut(&1) {
8827            info.notified = true;
8828        }
8829        assert!(!state.has_unnotified_changes());
8830
8831        // Change address on path 0
8832        state.handle_observed_address(addr1b, 0, now + Duration::from_secs(1));
8833        assert!(state.has_unnotified_changes());
8834
8835        // Path 0 should have new address, path 1 unchanged
8836        assert_eq!(state.get_observed_address(0), Some(addr1b));
8837        assert_eq!(state.get_observed_address(1), Some(addr2a));
8838
8839        // Mark path 0 as notified
8840        if let Some(info) = state.received_observations.get_mut(&0) {
8841            info.notified = true;
8842        }
8843        assert!(!state.has_unnotified_changes());
8844
8845        // Change address on path 1
8846        state.handle_observed_address(addr2b, 1, now + Duration::from_secs(2));
8847        assert!(state.has_unnotified_changes());
8848    }
8849
8850    #[test]
8851    fn test_multi_path_migration() {
8852        // Test path migration scenario
8853        let now = Instant::now();
8854        let config = AddressDiscoveryConfig::SendAndReceive;
8855        let mut state = AddressDiscoveryState::new(&config, now);
8856
8857        let addr_old = SocketAddr::from(([192, 168, 1, 1], 5000));
8858        let addr_new = SocketAddr::from(([10, 0, 0, 1], 6000));
8859
8860        // Establish observation on path 0
8861        state.handle_observed_address(addr_old, 0, now);
8862        assert_eq!(state.get_observed_address(0), Some(addr_old));
8863
8864        // Simulate path migration - new path gets different ID
8865        state.handle_observed_address(addr_new, 1, now + Duration::from_secs(1));
8866
8867        // Both paths should have their addresses
8868        assert_eq!(state.get_observed_address(0), Some(addr_old));
8869        assert_eq!(state.get_observed_address(1), Some(addr_new));
8870
8871        // In real implementation, old path would be cleaned up eventually
8872        // For now, we just track both in received_observations
8873        assert_eq!(state.received_observations.len(), 2);
8874    }
8875
8876    #[test]
8877    fn test_check_for_address_observations_multi_path() {
8878        // Test the check_for_address_observations method with multiple paths
8879        let now = Instant::now();
8880        let config = AddressDiscoveryConfig::SendAndReceive;
8881        let mut state = AddressDiscoveryState::new(&config, now);
8882
8883        // Enable observation of all paths
8884        state.observe_all_paths = true;
8885
8886        // Set up multiple paths with addresses to send (sent_observations)
8887        let addr1 = SocketAddr::from(([192, 168, 1, 1], 5000));
8888        let addr2 = SocketAddr::from(([10, 0, 0, 1], 6000));
8889        let addr3 = SocketAddr::from(([172, 16, 0, 1], 7000));
8890
8891        // Set up sent_observations for testing check_for_address_observations
8892        state
8893            .sent_observations
8894            .insert(0, paths::PathAddressInfo::new());
8895        state
8896            .sent_observations
8897            .get_mut(&0)
8898            .unwrap()
8899            .observed_address = Some(addr1);
8900        state
8901            .sent_observations
8902            .insert(1, paths::PathAddressInfo::new());
8903        state
8904            .sent_observations
8905            .get_mut(&1)
8906            .unwrap()
8907            .observed_address = Some(addr2);
8908        state
8909            .sent_observations
8910            .insert(2, paths::PathAddressInfo::new());
8911        state
8912            .sent_observations
8913            .get_mut(&2)
8914            .unwrap()
8915            .observed_address = Some(addr3);
8916
8917        // Check for observations - should return frames for all unnotified paths
8918        let frames = state.check_for_address_observations(0, true, now);
8919
8920        // Should get frames for all 3 paths
8921        assert_eq!(frames.len(), 3);
8922
8923        // Verify all addresses are present in frames (order doesn't matter)
8924        let frame_addrs: Vec<_> = frames.iter().map(|f| f.address).collect();
8925        assert!(frame_addrs.contains(&addr1), "addr1 should be in frames");
8926        assert!(frame_addrs.contains(&addr2), "addr2 should be in frames");
8927        assert!(frame_addrs.contains(&addr3), "addr3 should be in frames");
8928
8929        // All paths should now be marked as notified
8930        assert!(!state.has_unnotified_changes());
8931    }
8932
8933    #[test]
8934    fn test_multi_path_with_peer_not_supporting() {
8935        // Test behavior when peer doesn't support address discovery
8936        let now = Instant::now();
8937        let config = AddressDiscoveryConfig::SendAndReceive;
8938        let mut state = AddressDiscoveryState::new(&config, now);
8939
8940        // Set up paths
8941        state.handle_observed_address(SocketAddr::from(([192, 168, 1, 1], 5000)), 0, now);
8942        state.handle_observed_address(SocketAddr::from(([10, 0, 0, 1], 6000)), 1, now);
8943
8944        // Check with peer not supporting - should return empty
8945        let frames = state.check_for_address_observations(0, false, now);
8946        assert_eq!(frames.len(), 0);
8947
8948        // Paths should still have unnotified changes
8949        assert!(state.has_unnotified_changes());
8950    }
8951
8952    // Tests for Phase 3.2: Bootstrap Node Behavior
8953    #[test]
8954    fn test_bootstrap_node_aggressive_observation_mode() {
8955        // Test that bootstrap nodes use more aggressive observation settings
8956        let config = AddressDiscoveryConfig::SendAndReceive;
8957        let now = Instant::now();
8958        let mut state = AddressDiscoveryState::new(&config, now);
8959
8960        // Initially not in bootstrap mode
8961        assert!(!state.is_bootstrap_mode());
8962
8963        // Enable bootstrap mode
8964        state.set_bootstrap_mode(true);
8965        assert!(state.is_bootstrap_mode());
8966
8967        // Bootstrap mode should observe all paths regardless of config
8968        assert!(state.should_observe_path(0)); // Primary path
8969        assert!(state.should_observe_path(1)); // Secondary paths
8970        assert!(state.should_observe_path(2));
8971
8972        // Bootstrap mode should have higher rate limit
8973        let bootstrap_rate = state.get_effective_rate_limit();
8974        assert!(bootstrap_rate > 10.0); // Should be higher than configured
8975    }
8976
8977    #[test]
8978    fn test_bootstrap_node_immediate_observation() {
8979        // Test that bootstrap nodes send observations immediately on new connections
8980        let config = AddressDiscoveryConfig::SendAndReceive;
8981        let now = Instant::now();
8982        let mut state = AddressDiscoveryState::new(&config, now);
8983        state.set_bootstrap_mode(true);
8984
8985        // Add an observed address
8986        let addr = SocketAddr::from(([192, 168, 1, 100], 5000));
8987        state.handle_observed_address(addr, 0, now);
8988
8989        // Bootstrap nodes should want to send immediately on new connections
8990        assert!(state.should_send_observation_immediately(true));
8991
8992        // Should bypass normal rate limiting for first observation
8993        assert!(state.should_send_observation(0, now));
8994
8995        // Queue the frame
8996        let frame = state.queue_observed_address_frame(0, addr);
8997        assert!(frame.is_some());
8998    }
8999
9000    #[test]
9001    fn test_bootstrap_node_multiple_path_observations() {
9002        // Test bootstrap nodes observe all paths aggressively
9003        let config = AddressDiscoveryConfig::SendAndReceive;
9004        let now = Instant::now();
9005        let mut state = AddressDiscoveryState::new(&config, now);
9006        state.set_bootstrap_mode(true);
9007
9008        // Add addresses to sent_observations for testing check_for_address_observations
9009        let addrs = vec![
9010            (0u64, SocketAddr::from(([192, 168, 1, 1], 5000))),
9011            (1u64, SocketAddr::from(([10, 0, 0, 1], 6000))),
9012            (2u64, SocketAddr::from(([172, 16, 0, 1], 7000))),
9013        ];
9014
9015        for (path_id, addr) in &addrs {
9016            state
9017                .sent_observations
9018                .insert(*path_id, paths::PathAddressInfo::new());
9019            state
9020                .sent_observations
9021                .get_mut(path_id)
9022                .unwrap()
9023                .observed_address = Some(*addr);
9024        }
9025
9026        // Bootstrap nodes should observe all paths despite config
9027        let frames = state.check_for_address_observations(0, true, now);
9028        assert_eq!(frames.len(), 3);
9029
9030        // Verify all addresses are included
9031        for (_, addr) in &addrs {
9032            assert!(frames.iter().any(|f| f.address == *addr));
9033        }
9034    }
9035
9036    #[test]
9037    fn test_bootstrap_node_rate_limit_override() {
9038        // Test that bootstrap nodes have higher rate limits
9039        let config = AddressDiscoveryConfig::SendAndReceive;
9040        let now = Instant::now();
9041        let mut state = AddressDiscoveryState::new(&config, now);
9042        state.set_bootstrap_mode(true);
9043
9044        // Bootstrap nodes should be able to send more than configured rate
9045        let addr = SocketAddr::from(([192, 168, 1, 1], 5000));
9046
9047        // Send multiple observations rapidly
9048        for i in 0..10 {
9049            state.handle_observed_address(addr, i, now);
9050            let can_send = state.should_send_observation(i, now);
9051            assert!(can_send, "Bootstrap node should send observation {i}");
9052            state.record_observation_sent(i);
9053        }
9054    }
9055
9056    #[test]
9057    fn test_bootstrap_node_configuration() {
9058        // Test bootstrap-specific configuration
9059        let config = AddressDiscoveryConfig::SendAndReceive;
9060        let mut state = AddressDiscoveryState::new(&config, Instant::now());
9061
9062        // Apply bootstrap mode
9063        state.set_bootstrap_mode(true);
9064
9065        // Bootstrap mode should enable aggressive observation
9066        assert!(state.bootstrap_mode);
9067        assert!(state.enabled);
9068
9069        // Rate limiter should be updated for bootstrap mode
9070        let effective_rate = state.get_effective_rate_limit();
9071        assert!(effective_rate > state.max_observation_rate as f64);
9072    }
9073
9074    #[test]
9075    fn test_bootstrap_node_persistent_observation() {
9076        // Test that bootstrap nodes continue observing throughout connection lifetime
9077        let config = AddressDiscoveryConfig::SendAndReceive;
9078        let mut now = Instant::now();
9079        let mut state = AddressDiscoveryState::new(&config, now);
9080        state.set_bootstrap_mode(true);
9081
9082        let addr1 = SocketAddr::from(([192, 168, 1, 1], 5000));
9083        let addr2 = SocketAddr::from(([192, 168, 1, 2], 5000));
9084
9085        // Initial observation
9086        state.handle_observed_address(addr1, 0, now);
9087        assert!(state.should_send_observation(0, now));
9088        state.record_observation_sent(0);
9089
9090        // After some time, address changes
9091        now += Duration::from_secs(60);
9092        state.handle_observed_address(addr2, 0, now);
9093
9094        // Bootstrap nodes should still be observing actively
9095        assert!(state.should_send_observation(0, now));
9096    }
9097
9098    #[test]
9099    fn test_bootstrap_node_multi_peer_support() {
9100        // Test that bootstrap nodes can handle observations for multiple peers
9101        // This is more of an integration test concept, but we can test the state management
9102        let config = AddressDiscoveryConfig::SendAndReceive;
9103        let now = Instant::now();
9104        let mut state = AddressDiscoveryState::new(&config, now);
9105        state.set_bootstrap_mode(true);
9106
9107        // Simulate multiple peer connections (using different path IDs)
9108        let peer_addresses: Vec<(u64, SocketAddr)> = vec![
9109            (0, SocketAddr::from(([192, 168, 1, 1], 5000))), // Peer 1
9110            (1, SocketAddr::from(([10, 0, 0, 1], 6000))),    // Peer 2
9111            (2, SocketAddr::from(([172, 16, 0, 1], 7000))),  // Peer 3
9112            (3, SocketAddr::from(([192, 168, 2, 1], 8000))), // Peer 4
9113        ];
9114
9115        // Add all peer addresses to sent_observations
9116        for (path_id, addr) in &peer_addresses {
9117            state
9118                .sent_observations
9119                .insert(*path_id, paths::PathAddressInfo::new());
9120            state
9121                .sent_observations
9122                .get_mut(path_id)
9123                .unwrap()
9124                .observed_address = Some(*addr);
9125        }
9126
9127        // Bootstrap should observe all peers
9128        let frames = state.check_for_address_observations(0, true, now);
9129        assert_eq!(frames.len(), peer_addresses.len());
9130
9131        // Verify all addresses are observed
9132        for (_, addr) in &peer_addresses {
9133            assert!(frames.iter().any(|f| f.address == *addr));
9134        }
9135    }
9136
9137    // Include comprehensive address discovery tests
9138    mod address_discovery_tests {
9139        include!("address_discovery_tests.rs");
9140    }
9141}