ant_quic/config/
transport.rs

1// Copyright 2024 Saorsa Labs Ltd.
2//
3// This Saorsa Network Software is licensed under the General Public License (GPL), version 3.
4// Please see the file LICENSE-GPL, or visit <http://www.gnu.org/licenses/> for the full text.
5//
6// Full details available at https://saorsalabs.com/licenses
7
8use std::{fmt, sync::Arc};
9
10use crate::{Duration, INITIAL_MTU, MAX_UDP_PAYLOAD, VarInt, VarIntBoundsExceeded, congestion};
11
12/// Parameters governing the core QUIC state machine
13///
14/// Default values should be suitable for most internet applications. Applications protocols which
15/// forbid remotely-initiated streams should set `max_concurrent_bidi_streams` and
16/// `max_concurrent_uni_streams` to zero.
17///
18/// In some cases, performance or resource requirements can be improved by tuning these values to
19/// suit a particular application and/or network connection. In particular, data window sizes can be
20/// tuned for a particular expected round trip time, link capacity, and memory availability. Tuning
21/// for higher bandwidths and latencies increases worst-case memory consumption, but does not impair
22/// performance at lower bandwidths and latencies. The default configuration is tuned for a 100Mbps
23/// link with a 100ms round trip time.
24#[derive(Clone)]
25pub struct TransportConfig {
26    pub(crate) max_concurrent_bidi_streams: VarInt,
27    pub(crate) max_concurrent_uni_streams: VarInt,
28    pub(crate) max_idle_timeout: Option<VarInt>,
29    pub(crate) stream_receive_window: VarInt,
30    pub(crate) receive_window: VarInt,
31    pub(crate) send_window: u64,
32    pub(crate) send_fairness: bool,
33
34    pub(crate) packet_threshold: u32,
35    pub(crate) time_threshold: f32,
36    pub(crate) initial_rtt: Duration,
37    pub(crate) initial_mtu: u16,
38    pub(crate) min_mtu: u16,
39    pub(crate) mtu_discovery_config: Option<MtuDiscoveryConfig>,
40    pub(crate) pad_to_mtu: bool,
41    pub(crate) ack_frequency_config: Option<AckFrequencyConfig>,
42
43    pub(crate) persistent_congestion_threshold: u32,
44    pub(crate) keep_alive_interval: Option<Duration>,
45    pub(crate) crypto_buffer_size: usize,
46    pub(crate) allow_spin: bool,
47    pub(crate) datagram_receive_buffer_size: Option<usize>,
48    pub(crate) datagram_send_buffer_size: usize,
49    #[cfg(test)]
50    pub(crate) deterministic_packet_numbers: bool,
51
52    pub(crate) congestion_controller_factory: Arc<dyn congestion::ControllerFactory + Send + Sync>,
53
54    pub(crate) enable_segmentation_offload: bool,
55
56    /// NAT traversal configuration
57    pub(crate) nat_traversal_config: Option<crate::transport_parameters::NatTraversalConfig>,
58
59    /// Address discovery configuration
60    pub(crate) address_discovery_config:
61        Option<crate::transport_parameters::AddressDiscoveryConfig>,
62
63    /// Post-Quantum Cryptography algorithms configuration
64    pub(crate) pqc_algorithms: Option<crate::transport_parameters::PqcAlgorithms>,
65}
66
67impl TransportConfig {
68    /// Maximum number of incoming bidirectional streams that may be open concurrently
69    ///
70    /// Must be nonzero for the peer to open any bidirectional streams.
71    ///
72    /// Worst-case memory use is directly proportional to `max_concurrent_bidi_streams *
73    /// stream_receive_window`, with an upper bound proportional to `receive_window`.
74    pub fn max_concurrent_bidi_streams(&mut self, value: VarInt) -> &mut Self {
75        self.max_concurrent_bidi_streams = value;
76        self
77    }
78
79    /// Variant of `max_concurrent_bidi_streams` affecting unidirectional streams
80    pub fn max_concurrent_uni_streams(&mut self, value: VarInt) -> &mut Self {
81        self.max_concurrent_uni_streams = value;
82        self
83    }
84
85    /// Maximum duration of inactivity to accept before timing out the connection.
86    ///
87    /// The true idle timeout is the minimum of this and the peer's own max idle timeout. `None`
88    /// represents an infinite timeout. Defaults to 30 seconds.
89    ///
90    /// **WARNING**: If a peer or its network path malfunctions or acts maliciously, an infinite
91    /// idle timeout can result in permanently hung futures!
92    ///
93    /// ```
94    /// # use std::{convert::TryInto, time::Duration};
95    /// # use ant_quic::{TransportConfig, VarInt, VarIntBoundsExceeded};
96    /// # fn main() -> Result<(), VarIntBoundsExceeded> {
97    /// let mut config = TransportConfig::default();
98    ///
99    /// // Set the idle timeout as `VarInt`-encoded milliseconds
100    /// config.max_idle_timeout(Some(VarInt::from_u32(10_000).into()));
101    ///
102    /// // Set the idle timeout as a `Duration`
103    /// config.max_idle_timeout(Some(Duration::from_secs(10).try_into()?));
104    /// # Ok(())
105    /// # }
106    /// ```
107    pub fn max_idle_timeout(&mut self, value: Option<IdleTimeout>) -> &mut Self {
108        self.max_idle_timeout = value.map(|t| t.0);
109        self
110    }
111
112    /// Maximum number of bytes the peer may transmit without acknowledgement on any one stream
113    /// before becoming blocked.
114    ///
115    /// This should be set to at least the expected connection latency multiplied by the maximum
116    /// desired throughput. Setting this smaller than `receive_window` helps ensure that a single
117    /// stream doesn't monopolize receive buffers, which may otherwise occur if the application
118    /// chooses not to read from a large stream for a time while still requiring data on other
119    /// streams.
120    pub fn stream_receive_window(&mut self, value: VarInt) -> &mut Self {
121        self.stream_receive_window = value;
122        self
123    }
124
125    /// Maximum number of bytes the peer may transmit across all streams of a connection before
126    /// becoming blocked.
127    ///
128    /// This should be set to at least the expected connection latency multiplied by the maximum
129    /// desired throughput. Larger values can be useful to allow maximum throughput within a
130    /// stream while another is blocked.
131    pub fn receive_window(&mut self, value: VarInt) -> &mut Self {
132        self.receive_window = value;
133        self
134    }
135
136    /// Maximum number of bytes to transmit to a peer without acknowledgment
137    ///
138    /// Provides an upper bound on memory when communicating with peers that issue large amounts of
139    /// flow control credit. Endpoints that wish to handle large numbers of connections robustly
140    /// should take care to set this low enough to guarantee memory exhaustion does not occur if
141    /// every connection uses the entire window.
142    pub fn send_window(&mut self, value: u64) -> &mut Self {
143        self.send_window = value;
144        self
145    }
146
147    /// Whether to implement fair queuing for send streams having the same priority.
148    ///
149    /// When enabled, connections schedule data from outgoing streams having the same priority in a
150    /// round-robin fashion. When disabled, streams are scheduled in the order they are written to.
151    ///
152    /// Note that this only affects streams with the same priority. Higher priority streams always
153    /// take precedence over lower priority streams.
154    ///
155    /// Disabling fairness can reduce fragmentation and protocol overhead for workloads that use
156    /// many small streams.
157    pub fn send_fairness(&mut self, value: bool) -> &mut Self {
158        self.send_fairness = value;
159        self
160    }
161
162    /// Maximum reordering in packet number space before FACK style loss detection considers a
163    /// packet lost. Should not be less than 3, per RFC5681.
164    pub fn packet_threshold(&mut self, value: u32) -> &mut Self {
165        self.packet_threshold = value;
166        self
167    }
168
169    /// Maximum reordering in time space before time based loss detection considers a packet lost,
170    /// as a factor of RTT
171    pub fn time_threshold(&mut self, value: f32) -> &mut Self {
172        self.time_threshold = value;
173        self
174    }
175
176    /// The RTT used before an RTT sample is taken
177    pub fn initial_rtt(&mut self, value: Duration) -> &mut Self {
178        self.initial_rtt = value;
179        self
180    }
181
182    /// The initial value to be used as the maximum UDP payload size before running MTU discovery
183    /// (see [`TransportConfig::mtu_discovery_config`]).
184    ///
185    /// Must be at least 1200, which is the default, and known to be safe for typical internet
186    /// applications. Larger values are more efficient, but increase the risk of packet loss due to
187    /// exceeding the network path's IP MTU. If the provided value is higher than what the network
188    /// path actually supports, packet loss will eventually trigger black hole detection and bring
189    /// it down to [`TransportConfig::min_mtu`].
190    pub fn initial_mtu(&mut self, value: u16) -> &mut Self {
191        self.initial_mtu = value.max(INITIAL_MTU);
192        self
193    }
194
195    pub(crate) fn get_initial_mtu(&self) -> u16 {
196        self.initial_mtu.max(self.min_mtu)
197    }
198
199    /// The maximum UDP payload size guaranteed to be supported by the network.
200    ///
201    /// Must be at least 1200, which is the default, and lower than or equal to
202    /// [`TransportConfig::initial_mtu`].
203    ///
204    /// Real-world MTUs can vary according to ISP, VPN, and properties of intermediate network links
205    /// outside of either endpoint's control. Extreme care should be used when raising this value
206    /// outside of private networks where these factors are fully controlled. If the provided value
207    /// is higher than what the network path actually supports, the result will be unpredictable and
208    /// catastrophic packet loss, without a possibility of repair. Prefer
209    /// [`TransportConfig::initial_mtu`] together with
210    /// [`TransportConfig::mtu_discovery_config`] to set a maximum UDP payload size that robustly
211    /// adapts to the network.
212    pub fn min_mtu(&mut self, value: u16) -> &mut Self {
213        self.min_mtu = value.max(INITIAL_MTU);
214        self
215    }
216
217    /// Specifies the MTU discovery config (see [`MtuDiscoveryConfig`] for details).
218    ///
219    /// Enabled by default.
220    pub fn mtu_discovery_config(&mut self, value: Option<MtuDiscoveryConfig>) -> &mut Self {
221        self.mtu_discovery_config = value;
222        self
223    }
224
225    /// Pad UDP datagrams carrying application data to current maximum UDP payload size
226    ///
227    /// Disabled by default. UDP datagrams containing loss probes are exempt from padding.
228    ///
229    /// Enabling this helps mitigate traffic analysis by network observers, but it increases
230    /// bandwidth usage. Without this mitigation precise plain text size of application datagrams as
231    /// well as the total size of stream write bursts can be inferred by observers under certain
232    /// conditions. This analysis requires either an uncongested connection or application datagrams
233    /// too large to be coalesced.
234    pub fn pad_to_mtu(&mut self, value: bool) -> &mut Self {
235        self.pad_to_mtu = value;
236        self
237    }
238
239    /// Specifies the ACK frequency config (see [`AckFrequencyConfig`] for details)
240    ///
241    /// The provided configuration will be ignored if the peer does not support the acknowledgement
242    /// frequency QUIC extension.
243    ///
244    /// Defaults to `None`, which disables controlling the peer's acknowledgement frequency. Even
245    /// if set to `None`, the local side still supports the acknowledgement frequency QUIC
246    /// extension and may use it in other ways.
247    pub fn ack_frequency_config(&mut self, value: Option<AckFrequencyConfig>) -> &mut Self {
248        self.ack_frequency_config = value;
249        self
250    }
251
252    /// Number of consecutive PTOs after which network is considered to be experiencing persistent congestion.
253    pub fn persistent_congestion_threshold(&mut self, value: u32) -> &mut Self {
254        self.persistent_congestion_threshold = value;
255        self
256    }
257
258    /// Period of inactivity before sending a keep-alive packet
259    ///
260    /// Keep-alive packets prevent an inactive but otherwise healthy connection from timing out.
261    ///
262    /// `None` to disable, which is the default. Only one side of any given connection needs keep-alive
263    /// enabled for the connection to be preserved. Must be set lower than the idle_timeout of both
264    /// peers to be effective.
265    pub fn keep_alive_interval(&mut self, value: Option<Duration>) -> &mut Self {
266        self.keep_alive_interval = value;
267        self
268    }
269
270    /// Maximum quantity of out-of-order crypto layer data to buffer
271    pub fn crypto_buffer_size(&mut self, value: usize) -> &mut Self {
272        self.crypto_buffer_size = value;
273        self
274    }
275
276    /// Whether the implementation is permitted to set the spin bit on this connection
277    ///
278    /// This allows passive observers to easily judge the round trip time of a connection, which can
279    /// be useful for network administration but sacrifices a small amount of privacy.
280    pub fn allow_spin(&mut self, value: bool) -> &mut Self {
281        self.allow_spin = value;
282        self
283    }
284
285    /// Maximum number of incoming application datagram bytes to buffer, or None to disable
286    /// incoming datagrams
287    ///
288    /// The peer is forbidden to send single datagrams larger than this size. If the aggregate size
289    /// of all datagrams that have been received from the peer but not consumed by the application
290    /// exceeds this value, old datagrams are dropped until it is no longer exceeded.
291    pub fn datagram_receive_buffer_size(&mut self, value: Option<usize>) -> &mut Self {
292        self.datagram_receive_buffer_size = value;
293        self
294    }
295
296    /// Maximum number of outgoing application datagram bytes to buffer
297    ///
298    /// While datagrams are sent ASAP, it is possible for an application to generate data faster
299    /// than the link, or even the underlying hardware, can transmit them. This limits the amount of
300    /// memory that may be consumed in that case. When the send buffer is full and a new datagram is
301    /// sent, older datagrams are dropped until sufficient space is available.
302    pub fn datagram_send_buffer_size(&mut self, value: usize) -> &mut Self {
303        self.datagram_send_buffer_size = value;
304        self
305    }
306
307    /// Whether to force every packet number to be used
308    ///
309    /// By default, packet numbers are occasionally skipped to ensure peers aren't ACKing packets
310    /// before they see them.
311    #[cfg(test)]
312    #[allow(dead_code)]
313    pub(crate) fn deterministic_packet_numbers(&mut self, enabled: bool) -> &mut Self {
314        self.deterministic_packet_numbers = enabled;
315        self
316    }
317
318    /// How to construct new `congestion::Controller`s
319    ///
320    /// Typically the refcounted configuration of a `congestion::Controller`,
321    /// e.g. a `congestion::NewRenoConfig`.
322    ///
323    /// # Example
324    /// ```
325    /// # use std::sync::Arc;
326    /// use ant_quic::config::TransportConfig;
327    ///
328    /// let mut config = TransportConfig::default();
329    /// // The default uses CubicConfig, but custom implementations can be provided
330    /// // by implementing the congestion::ControllerFactory trait
331    /// ```
332    pub fn congestion_controller_factory(
333        &mut self,
334        factory: Arc<dyn congestion::ControllerFactory + Send + Sync + 'static>,
335    ) -> &mut Self {
336        self.congestion_controller_factory = factory;
337        self
338    }
339
340    /// Whether to use "Generic Segmentation Offload" to accelerate transmits, when supported by the
341    /// environment
342    ///
343    /// Defaults to `true`.
344    ///
345    /// GSO dramatically reduces CPU consumption when sending large numbers of packets with the same
346    /// headers, such as when transmitting bulk data on a connection. However, it is not supported
347    /// by all network interface drivers or packet inspection tools. `quinn-udp` will attempt to
348    /// disable GSO automatically when unavailable, but this can lead to spurious packet loss at
349    /// startup, temporarily degrading performance.
350    pub fn enable_segmentation_offload(&mut self, enabled: bool) -> &mut Self {
351        self.enable_segmentation_offload = enabled;
352        self
353    }
354
355    /// Configure NAT traversal capabilities for this connection
356    ///
357    /// When enabled, this connection will support QUIC NAT traversal extensions including:
358    /// - Address candidate advertisement and validation
359    /// - Coordinated hole punching through bootstrap nodes
360    /// - Multi-path connectivity testing
361    /// - Automatic path migration for NAT rebinding
362    ///
363    /// This is required for P2P connections through NATs in Autonomi networks.
364    /// Pass `None` to disable NAT traversal or use the high-level NAT traversal API
365    /// to create appropriate configurations.
366    pub fn nat_traversal_config(
367        &mut self,
368        config: Option<crate::transport_parameters::NatTraversalConfig>,
369    ) -> &mut Self {
370        self.nat_traversal_config = config;
371        self
372    }
373
374    /// Enable NAT traversal with default client configuration
375    ///
376    /// This is a convenience method that enables NAT traversal with sensible defaults
377    /// for a client endpoint. Use `nat_traversal_config()` for more control.
378    pub fn enable_nat_traversal(&mut self, enabled: bool) -> &mut Self {
379        if enabled {
380            use crate::transport_parameters::NatTraversalConfig;
381            // Default to client support (empty parameter)
382            self.nat_traversal_config = Some(NatTraversalConfig::ClientSupport);
383        } else {
384            self.nat_traversal_config = None;
385        }
386        self
387    }
388
389    /// Set the address discovery configuration
390    ///
391    /// This enables the QUIC Address Discovery extension (draft-ietf-quic-address-discovery-00)
392    /// which allows endpoints to share observed addresses with each other.
393    pub fn address_discovery_config(
394        &mut self,
395        config: Option<crate::transport_parameters::AddressDiscoveryConfig>,
396    ) -> &mut Self {
397        self.address_discovery_config = config;
398        self
399    }
400
401    /// Enable address discovery with default configuration
402    ///
403    /// This is a convenience method that enables address discovery with sensible defaults.
404    /// Use `address_discovery_config()` for more control.
405    pub fn enable_address_discovery(&mut self, enabled: bool) -> &mut Self {
406        if enabled {
407            use crate::transport_parameters::AddressDiscoveryConfig;
408            // Default configuration - willing to both send and receive address observations
409            self.address_discovery_config = Some(AddressDiscoveryConfig::SendAndReceive);
410        } else {
411            self.address_discovery_config = None;
412        }
413        self
414    }
415
416    /// Set the Post-Quantum Cryptography algorithms configuration
417    ///
418    /// This advertises which PQC algorithms are supported by this endpoint.
419    /// When both endpoints support PQC, they can negotiate the use of quantum-resistant algorithms.
420    pub fn pqc_algorithms(
421        &mut self,
422        algorithms: Option<crate::transport_parameters::PqcAlgorithms>,
423    ) -> &mut Self {
424        self.pqc_algorithms = algorithms;
425        self
426    }
427
428    /// Enable Post-Quantum Cryptography with default algorithms
429    ///
430    /// This is a convenience method that enables all standard PQC algorithms.
431    /// Use `pqc_algorithms()` for more control over which algorithms to support.
432    pub fn enable_pqc(&mut self, enabled: bool) -> &mut Self {
433        if enabled {
434            use crate::transport_parameters::PqcAlgorithms;
435            // Enable all standard algorithms
436            self.pqc_algorithms = Some(PqcAlgorithms {
437                ml_kem_768: true,
438                ml_dsa_65: true,
439                hybrid_x25519_ml_kem: true,
440                hybrid_ed25519_ml_dsa: true,
441            });
442        } else {
443            self.pqc_algorithms = None;
444        }
445        self
446    }
447
448    /// Get the address discovery configuration (read-only)
449    pub fn get_address_discovery_config(
450        &self,
451    ) -> Option<&crate::transport_parameters::AddressDiscoveryConfig> {
452        self.address_discovery_config.as_ref()
453    }
454
455    /// Get the PQC algorithms configuration (read-only)
456    pub fn get_pqc_algorithms(&self) -> Option<&crate::transport_parameters::PqcAlgorithms> {
457        self.pqc_algorithms.as_ref()
458    }
459
460    /// Get the NAT traversal configuration (read-only)
461    pub fn get_nat_traversal_config(
462        &self,
463    ) -> Option<&crate::transport_parameters::NatTraversalConfig> {
464        self.nat_traversal_config.as_ref()
465    }
466}
467
468impl Default for TransportConfig {
469    fn default() -> Self {
470        const EXPECTED_RTT: u32 = 100; // ms
471        const MAX_STREAM_BANDWIDTH: u32 = 12500 * 1000; // bytes/s
472        // Window size needed to avoid pipeline
473        // stalls
474        const STREAM_RWND: u32 = MAX_STREAM_BANDWIDTH / 1000 * EXPECTED_RTT;
475
476        Self {
477            max_concurrent_bidi_streams: 100u32.into(),
478            max_concurrent_uni_streams: 100u32.into(),
479            // 30 second default recommended by RFC 9308 ยง 3.2
480            max_idle_timeout: Some(VarInt(30_000)),
481            stream_receive_window: STREAM_RWND.into(),
482            receive_window: VarInt::MAX,
483            send_window: (8 * STREAM_RWND).into(),
484            send_fairness: true,
485
486            packet_threshold: 3,
487            time_threshold: 9.0 / 8.0,
488            initial_rtt: Duration::from_millis(333), // per spec, intentionally distinct from EXPECTED_RTT
489            initial_mtu: INITIAL_MTU,
490            min_mtu: INITIAL_MTU,
491            mtu_discovery_config: Some(MtuDiscoveryConfig::default()),
492            pad_to_mtu: false,
493            ack_frequency_config: None,
494
495            persistent_congestion_threshold: 3,
496            keep_alive_interval: None,
497            crypto_buffer_size: 16 * 1024,
498            allow_spin: true,
499            datagram_receive_buffer_size: Some(STREAM_RWND as usize),
500            datagram_send_buffer_size: 1024 * 1024,
501            #[cfg(test)]
502            deterministic_packet_numbers: false,
503
504            congestion_controller_factory: Arc::new(congestion::CubicConfig::default()),
505
506            enable_segmentation_offload: true,
507            nat_traversal_config: None,
508            address_discovery_config: None,
509            // Default to pure PQ key exchange (ML-KEM-768); signatures are handled at
510            // the transport's binding layer via pinned ML-DSA raw public keys.
511            pqc_algorithms: Some(crate::transport_parameters::PqcAlgorithms {
512                ml_kem_768: true,
513                ml_dsa_65: false,
514                hybrid_x25519_ml_kem: false,
515                hybrid_ed25519_ml_dsa: false,
516            }),
517        }
518    }
519}
520
521impl fmt::Debug for TransportConfig {
522    fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
523        let Self {
524            max_concurrent_bidi_streams,
525            max_concurrent_uni_streams,
526            max_idle_timeout,
527            stream_receive_window,
528            receive_window,
529            send_window,
530            send_fairness,
531            packet_threshold,
532            time_threshold,
533            initial_rtt,
534            initial_mtu,
535            min_mtu,
536            mtu_discovery_config,
537            pad_to_mtu,
538            ack_frequency_config,
539            persistent_congestion_threshold,
540            keep_alive_interval,
541            crypto_buffer_size,
542            allow_spin,
543            datagram_receive_buffer_size,
544            datagram_send_buffer_size,
545            #[cfg(test)]
546                deterministic_packet_numbers: _,
547            congestion_controller_factory: _,
548            enable_segmentation_offload,
549            nat_traversal_config,
550            address_discovery_config,
551            pqc_algorithms,
552        } = self;
553        fmt.debug_struct("TransportConfig")
554            .field("max_concurrent_bidi_streams", max_concurrent_bidi_streams)
555            .field("max_concurrent_uni_streams", max_concurrent_uni_streams)
556            .field("max_idle_timeout", max_idle_timeout)
557            .field("stream_receive_window", stream_receive_window)
558            .field("receive_window", receive_window)
559            .field("send_window", send_window)
560            .field("send_fairness", send_fairness)
561            .field("packet_threshold", packet_threshold)
562            .field("time_threshold", time_threshold)
563            .field("initial_rtt", initial_rtt)
564            .field("initial_mtu", initial_mtu)
565            .field("min_mtu", min_mtu)
566            .field("mtu_discovery_config", mtu_discovery_config)
567            .field("pad_to_mtu", pad_to_mtu)
568            .field("ack_frequency_config", ack_frequency_config)
569            .field(
570                "persistent_congestion_threshold",
571                persistent_congestion_threshold,
572            )
573            .field("keep_alive_interval", keep_alive_interval)
574            .field("crypto_buffer_size", crypto_buffer_size)
575            .field("allow_spin", allow_spin)
576            .field("datagram_receive_buffer_size", datagram_receive_buffer_size)
577            .field("datagram_send_buffer_size", datagram_send_buffer_size)
578            // congestion_controller_factory not debug
579            .field("enable_segmentation_offload", enable_segmentation_offload)
580            .field("nat_traversal_config", nat_traversal_config)
581            .field("address_discovery_config", address_discovery_config)
582            .field("pqc_algorithms", pqc_algorithms)
583            .finish_non_exhaustive()
584    }
585}
586
587/// Parameters for controlling the peer's acknowledgement frequency
588///
589/// The parameters provided in this config will be sent to the peer at the beginning of the
590/// connection, so it can take them into account when sending acknowledgements (see each parameter's
591/// description for details on how it influences acknowledgement frequency).
592///
593/// Quinn's implementation follows the fourth draft of the
594/// [QUIC Acknowledgement Frequency extension](https://datatracker.ietf.org/doc/html/draft-ietf-quic-ack-frequency-04).
595/// The defaults produce behavior slightly different than the behavior without this extension,
596/// because they change the way reordered packets are handled (see
597/// [`AckFrequencyConfig::reordering_threshold`] for details).
598#[derive(Clone, Debug)]
599pub struct AckFrequencyConfig {
600    pub(crate) ack_eliciting_threshold: VarInt,
601    pub(crate) max_ack_delay: Option<Duration>,
602    pub(crate) reordering_threshold: VarInt,
603}
604
605impl AckFrequencyConfig {
606    /// The ack-eliciting threshold we will request the peer to use
607    ///
608    /// This threshold represents the number of ack-eliciting packets an endpoint may receive
609    /// without immediately sending an ACK.
610    ///
611    /// The remote peer should send at least one ACK frame when more than this number of
612    /// ack-eliciting packets have been received. A value of 0 results in a receiver immediately
613    /// acknowledging every ack-eliciting packet.
614    ///
615    /// Defaults to 1, which sends ACK frames for every other ack-eliciting packet.
616    pub fn ack_eliciting_threshold(&mut self, value: VarInt) -> &mut Self {
617        self.ack_eliciting_threshold = value;
618        self
619    }
620
621    /// The `max_ack_delay` we will request the peer to use
622    ///
623    /// This parameter represents the maximum amount of time that an endpoint waits before sending
624    /// an ACK when the ack-eliciting threshold hasn't been reached.
625    ///
626    /// The effective `max_ack_delay` will be clamped to be at least the peer's `min_ack_delay`
627    /// transport parameter, and at most the greater of the current path RTT or 25ms.
628    ///
629    /// Defaults to `None`, in which case the peer's original `max_ack_delay` will be used, as
630    /// obtained from its transport parameters.
631    pub fn max_ack_delay(&mut self, value: Option<Duration>) -> &mut Self {
632        self.max_ack_delay = value;
633        self
634    }
635
636    /// The reordering threshold we will request the peer to use
637    ///
638    /// This threshold represents the amount of out-of-order packets that will trigger an endpoint
639    /// to send an ACK, without waiting for `ack_eliciting_threshold` to be exceeded or for
640    /// `max_ack_delay` to be elapsed.
641    ///
642    /// A value of 0 indicates out-of-order packets do not elicit an immediate ACK. A value of 1
643    /// immediately acknowledges any packets that are received out of order (this is also the
644    /// behavior when the extension is disabled).
645    ///
646    /// It is recommended to set this value to [`TransportConfig::packet_threshold`] minus one.
647    /// Since the default value for [`TransportConfig::packet_threshold`] is 3, this value defaults
648    /// to 2.
649    pub fn reordering_threshold(&mut self, value: VarInt) -> &mut Self {
650        self.reordering_threshold = value;
651        self
652    }
653}
654
655impl Default for AckFrequencyConfig {
656    fn default() -> Self {
657        Self {
658            ack_eliciting_threshold: VarInt(1),
659            max_ack_delay: None,
660            reordering_threshold: VarInt(2),
661        }
662    }
663}
664
665/// Parameters governing MTU discovery.
666///
667/// # The why of MTU discovery
668///
669/// By design, QUIC ensures during the handshake that the network path between the client and the
670/// server is able to transmit unfragmented UDP packets with a body of 1200 bytes. In other words,
671/// once the connection is established, we know that the network path's maximum transmission unit
672/// (MTU) is of at least 1200 bytes (plus IP and UDP headers). Because of this, a QUIC endpoint can
673/// split outgoing data in packets of 1200 bytes, with confidence that the network will be able to
674/// deliver them (if the endpoint were to send bigger packets, they could prove too big and end up
675/// being dropped).
676///
677/// There is, however, a significant overhead associated to sending a packet. If the same
678/// information can be sent in fewer packets, that results in higher throughput. The amount of
679/// packets that need to be sent is inversely proportional to the MTU: the higher the MTU, the
680/// bigger the packets that can be sent, and the fewer packets that are needed to transmit a given
681/// amount of bytes.
682///
683/// Most networks have an MTU higher than 1200. Through MTU discovery, endpoints can detect the
684/// path's MTU and, if it turns out to be higher, start sending bigger packets.
685///
686/// # MTU discovery internals
687///
688/// Quinn implements MTU discovery through DPLPMTUD (Datagram Packetization Layer Path MTU
689/// Discovery), described in [section 14.3 of RFC
690/// 9000](https://www.rfc-editor.org/rfc/rfc9000.html#section-14.3). This method consists of sending
691/// QUIC packets padded to a particular size (called PMTU probes), and waiting to see if the remote
692/// peer responds with an ACK. If an ACK is received, that means the probe arrived at the remote
693/// peer, which in turn means that the network path's MTU is of at least the packet's size. If the
694/// probe is lost, it is sent another 2 times before concluding that the MTU is lower than the
695/// packet's size.
696///
697/// MTU discovery runs on a schedule (e.g. every 600 seconds) specified through
698/// [`MtuDiscoveryConfig::interval`]. The first run happens right after the handshake, and
699/// subsequent discoveries are scheduled to run when the interval has elapsed, starting from the
700/// last time when MTU discovery completed.
701///
702/// Since the search space for MTUs is quite big (the smallest possible MTU is 1200, and the highest
703/// is 65527), Quinn performs a binary search to keep the number of probes as low as possible. The
704/// lower bound of the search is equal to [`TransportConfig::initial_mtu`] in the
705/// initial MTU discovery run, and is equal to the currently discovered MTU in subsequent runs. The
706/// upper bound is determined by the minimum of [`MtuDiscoveryConfig::upper_bound`] and the
707/// `max_udp_payload_size` transport parameter received from the peer during the handshake.
708///
709/// # Black hole detection
710///
711/// If, at some point, the network path no longer accepts packets of the detected size, packet loss
712/// will eventually trigger black hole detection and reset the detected MTU to 1200. In that case,
713/// MTU discovery will be triggered after [`MtuDiscoveryConfig::black_hole_cooldown`] (ignoring the
714/// timer that was set based on [`MtuDiscoveryConfig::interval`]).
715///
716/// # Interaction between peers
717///
718/// There is no guarantee that the MTU on the path between A and B is the same as the MTU of the
719/// path between B and A. Therefore, each peer in the connection needs to run MTU discovery
720/// independently in order to discover the path's MTU.
721#[derive(Clone, Debug)]
722pub struct MtuDiscoveryConfig {
723    pub(crate) interval: Duration,
724    pub(crate) upper_bound: u16,
725    pub(crate) minimum_change: u16,
726    pub(crate) black_hole_cooldown: Duration,
727}
728
729impl MtuDiscoveryConfig {
730    /// Specifies the time to wait after completing MTU discovery before starting a new MTU
731    /// discovery run.
732    ///
733    /// Defaults to 600 seconds, as recommended by [RFC
734    /// 8899](https://www.rfc-editor.org/rfc/rfc8899).
735    pub fn interval(&mut self, value: Duration) -> &mut Self {
736        self.interval = value;
737        self
738    }
739
740    /// Specifies the upper bound to the max UDP payload size that MTU discovery will search for.
741    ///
742    /// Defaults to 1452, to stay within Ethernet's MTU when using IPv4 and IPv6. The highest
743    /// allowed value is 65527, which corresponds to the maximum permitted UDP payload on IPv6.
744    ///
745    /// It is safe to use an arbitrarily high upper bound, regardless of the network path's MTU. The
746    /// only drawback is that MTU discovery might take more time to finish.
747    pub fn upper_bound(&mut self, value: u16) -> &mut Self {
748        self.upper_bound = value.min(MAX_UDP_PAYLOAD);
749        self
750    }
751
752    /// Specifies the amount of time that MTU discovery should wait after a black hole was detected
753    /// before running again. Defaults to one minute.
754    ///
755    /// Black hole detection can be spuriously triggered in case of congestion, so it makes sense to
756    /// try MTU discovery again after a short period of time.
757    pub fn black_hole_cooldown(&mut self, value: Duration) -> &mut Self {
758        self.black_hole_cooldown = value;
759        self
760    }
761
762    /// Specifies the minimum MTU change to stop the MTU discovery phase.
763    /// Defaults to 20.
764    pub fn minimum_change(&mut self, value: u16) -> &mut Self {
765        self.minimum_change = value;
766        self
767    }
768}
769
770impl Default for MtuDiscoveryConfig {
771    fn default() -> Self {
772        Self {
773            interval: Duration::from_secs(600),
774            upper_bound: 1452,
775            black_hole_cooldown: Duration::from_secs(60),
776            minimum_change: 20,
777        }
778    }
779}
780
781/// Maximum duration of inactivity to accept before timing out the connection
782///
783/// This wraps an underlying [`VarInt`], representing the duration in milliseconds. Values can be
784/// constructed by converting directly from `VarInt`, or using `TryFrom<Duration>`.
785///
786/// ```
787/// # use std::{convert::TryFrom, time::Duration};
788/// use ant_quic::config::IdleTimeout;
789/// use ant_quic::{VarIntBoundsExceeded, VarInt};
790/// # fn main() -> Result<(), VarIntBoundsExceeded> {
791/// // A `VarInt`-encoded value in milliseconds
792/// let timeout = IdleTimeout::from(VarInt::from_u32(10_000));
793///
794/// // Try to convert a `Duration` into a `VarInt`-encoded timeout
795/// let timeout = IdleTimeout::try_from(Duration::from_secs(10))?;
796/// # Ok(())
797/// # }
798/// ```
799#[derive(Default, Copy, Clone, Eq, Hash, Ord, PartialEq, PartialOrd)]
800pub struct IdleTimeout(VarInt);
801
802impl From<VarInt> for IdleTimeout {
803    fn from(inner: VarInt) -> Self {
804        Self(inner)
805    }
806}
807
808impl std::convert::TryFrom<Duration> for IdleTimeout {
809    type Error = VarIntBoundsExceeded;
810
811    fn try_from(timeout: Duration) -> Result<Self, Self::Error> {
812        let inner = VarInt::try_from(timeout.as_millis())?;
813        Ok(Self(inner))
814    }
815}
816
817impl fmt::Debug for IdleTimeout {
818    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
819        self.0.fmt(f)
820    }
821}