Skip to main content

ant_quic/config/
transport.rs

1// Copyright 2024 Saorsa Labs Ltd.
2//
3// This Saorsa Network Software is licensed under the General Public License (GPL), version 3.
4// Please see the file LICENSE-GPL, or visit <http://www.gnu.org/licenses/> for the full text.
5//
6// Full details available at https://saorsalabs.com/licenses
7
8use std::{fmt, sync::Arc};
9
10use crate::{Duration, INITIAL_MTU, MAX_UDP_PAYLOAD, VarInt, VarIntBoundsExceeded, congestion};
11
12/// Parameters governing the core QUIC state machine
13///
14/// Default values should be suitable for most internet applications. Applications protocols which
15/// forbid remotely-initiated streams should set `max_concurrent_bidi_streams` and
16/// `max_concurrent_uni_streams` to zero.
17///
18/// In some cases, performance or resource requirements can be improved by tuning these values to
19/// suit a particular application and/or network connection. In particular, data window sizes can be
20/// tuned for a particular expected round trip time, link capacity, and memory availability. Tuning
21/// for higher bandwidths and latencies increases worst-case memory consumption, but does not impair
22/// performance at lower bandwidths and latencies. The default configuration is tuned for a 100Mbps
23/// link with a 100ms round trip time.
24#[derive(Clone)]
25pub struct TransportConfig {
26    pub(crate) max_concurrent_bidi_streams: VarInt,
27    pub(crate) max_concurrent_uni_streams: VarInt,
28    pub(crate) max_idle_timeout: Option<VarInt>,
29    pub(crate) stream_receive_window: VarInt,
30    pub(crate) receive_window: VarInt,
31    pub(crate) send_window: u64,
32    pub(crate) send_fairness: bool,
33
34    pub(crate) packet_threshold: u32,
35    pub(crate) time_threshold: f32,
36    pub(crate) initial_rtt: Duration,
37    pub(crate) initial_mtu: u16,
38    pub(crate) min_mtu: u16,
39    pub(crate) mtu_discovery_config: Option<MtuDiscoveryConfig>,
40    pub(crate) pad_to_mtu: bool,
41    pub(crate) ack_frequency_config: Option<AckFrequencyConfig>,
42
43    pub(crate) persistent_congestion_threshold: u32,
44    pub(crate) keep_alive_interval: Option<Duration>,
45    pub(crate) crypto_buffer_size: usize,
46    pub(crate) allow_spin: bool,
47    pub(crate) datagram_receive_buffer_size: Option<usize>,
48    pub(crate) datagram_send_buffer_size: usize,
49    #[cfg(test)]
50    pub(crate) deterministic_packet_numbers: bool,
51
52    pub(crate) congestion_controller_factory: Arc<dyn congestion::ControllerFactory + Send + Sync>,
53
54    pub(crate) enable_segmentation_offload: bool,
55
56    /// NAT traversal configuration
57    pub(crate) nat_traversal_config: Option<crate::transport_parameters::NatTraversalConfig>,
58
59    /// Address discovery configuration
60    pub(crate) address_discovery_config:
61        Option<crate::transport_parameters::AddressDiscoveryConfig>,
62
63    /// Post-Quantum Cryptography algorithms configuration
64    pub(crate) pqc_algorithms: Option<crate::transport_parameters::PqcAlgorithms>,
65}
66
67impl TransportConfig {
68    /// Maximum number of incoming bidirectional streams that may be open concurrently
69    ///
70    /// Must be nonzero for the peer to open any bidirectional streams.
71    ///
72    /// Worst-case memory use is directly proportional to `max_concurrent_bidi_streams *
73    /// stream_receive_window`, with an upper bound proportional to `receive_window`.
74    pub fn max_concurrent_bidi_streams(&mut self, value: VarInt) -> &mut Self {
75        self.max_concurrent_bidi_streams = value;
76        self
77    }
78
79    /// Variant of `max_concurrent_bidi_streams` affecting unidirectional streams
80    pub fn max_concurrent_uni_streams(&mut self, value: VarInt) -> &mut Self {
81        self.max_concurrent_uni_streams = value;
82        self
83    }
84
85    /// Maximum duration of inactivity to accept before timing out the connection.
86    ///
87    /// The true idle timeout is the minimum of this and the peer's own max idle timeout. `None`
88    /// represents an infinite timeout. Defaults to 30 seconds.
89    ///
90    /// **WARNING**: If a peer or its network path malfunctions or acts maliciously, an infinite
91    /// idle timeout can result in permanently hung futures!
92    ///
93    /// ```
94    /// # use std::{convert::TryInto, time::Duration};
95    /// # use ant_quic::{TransportConfig, VarInt, VarIntBoundsExceeded};
96    /// # fn main() -> Result<(), VarIntBoundsExceeded> {
97    /// let mut config = TransportConfig::default();
98    ///
99    /// // Set the idle timeout as `VarInt`-encoded milliseconds
100    /// config.max_idle_timeout(Some(VarInt::from_u32(10_000).into()));
101    ///
102    /// // Set the idle timeout as a `Duration`
103    /// config.max_idle_timeout(Some(Duration::from_secs(10).try_into()?));
104    /// # Ok(())
105    /// # }
106    /// ```
107    pub fn max_idle_timeout(&mut self, value: Option<IdleTimeout>) -> &mut Self {
108        self.max_idle_timeout = value.map(|t| t.0);
109        self
110    }
111
112    /// Maximum number of bytes the peer may transmit without acknowledgement on any one stream
113    /// before becoming blocked.
114    ///
115    /// This should be set to at least the expected connection latency multiplied by the maximum
116    /// desired throughput. Setting this smaller than `receive_window` helps ensure that a single
117    /// stream doesn't monopolize receive buffers, which may otherwise occur if the application
118    /// chooses not to read from a large stream for a time while still requiring data on other
119    /// streams.
120    pub fn stream_receive_window(&mut self, value: VarInt) -> &mut Self {
121        self.stream_receive_window = value;
122        self
123    }
124
125    /// Maximum number of bytes the peer may transmit across all streams of a connection before
126    /// becoming blocked.
127    ///
128    /// This should be set to at least the expected connection latency multiplied by the maximum
129    /// desired throughput. Larger values can be useful to allow maximum throughput within a
130    /// stream while another is blocked.
131    pub fn receive_window(&mut self, value: VarInt) -> &mut Self {
132        self.receive_window = value;
133        self
134    }
135
136    /// Maximum number of bytes to transmit to a peer without acknowledgment
137    ///
138    /// Provides an upper bound on memory when communicating with peers that issue large amounts of
139    /// flow control credit. Endpoints that wish to handle large numbers of connections robustly
140    /// should take care to set this low enough to guarantee memory exhaustion does not occur if
141    /// every connection uses the entire window.
142    pub fn send_window(&mut self, value: u64) -> &mut Self {
143        self.send_window = value;
144        self
145    }
146
147    /// Whether to implement fair queuing for send streams having the same priority.
148    ///
149    /// When enabled, connections schedule data from outgoing streams having the same priority in a
150    /// round-robin fashion. When disabled, streams are scheduled in the order they are written to.
151    ///
152    /// Note that this only affects streams with the same priority. Higher priority streams always
153    /// take precedence over lower priority streams.
154    ///
155    /// Disabling fairness can reduce fragmentation and protocol overhead for workloads that use
156    /// many small streams.
157    pub fn send_fairness(&mut self, value: bool) -> &mut Self {
158        self.send_fairness = value;
159        self
160    }
161
162    /// Maximum reordering in packet number space before FACK style loss detection considers a
163    /// packet lost. Should not be less than 3, per RFC5681.
164    pub fn packet_threshold(&mut self, value: u32) -> &mut Self {
165        self.packet_threshold = value;
166        self
167    }
168
169    /// Maximum reordering in time space before time based loss detection considers a packet lost,
170    /// as a factor of RTT
171    pub fn time_threshold(&mut self, value: f32) -> &mut Self {
172        self.time_threshold = value;
173        self
174    }
175
176    /// The RTT used before an RTT sample is taken
177    pub fn initial_rtt(&mut self, value: Duration) -> &mut Self {
178        self.initial_rtt = value;
179        self
180    }
181
182    /// The initial value to be used as the maximum UDP payload size before running MTU discovery
183    /// (see [`TransportConfig::mtu_discovery_config`]).
184    ///
185    /// Must be at least 1200, which is the default, and known to be safe for typical internet
186    /// applications. Larger values are more efficient, but increase the risk of packet loss due to
187    /// exceeding the network path's IP MTU. If the provided value is higher than what the network
188    /// path actually supports, packet loss will eventually trigger black hole detection and bring
189    /// it down to [`TransportConfig::min_mtu`].
190    pub fn initial_mtu(&mut self, value: u16) -> &mut Self {
191        self.initial_mtu = value.max(INITIAL_MTU);
192        self
193    }
194
195    pub(crate) fn get_initial_mtu(&self) -> u16 {
196        self.initial_mtu.max(self.min_mtu)
197    }
198
199    /// The maximum UDP payload size guaranteed to be supported by the network.
200    ///
201    /// Must be at least 1200, which is the default, and lower than or equal to
202    /// [`TransportConfig::initial_mtu`].
203    ///
204    /// Real-world MTUs can vary according to ISP, VPN, and properties of intermediate network links
205    /// outside of either endpoint's control. Extreme care should be used when raising this value
206    /// outside of private networks where these factors are fully controlled. If the provided value
207    /// is higher than what the network path actually supports, the result will be unpredictable and
208    /// catastrophic packet loss, without a possibility of repair. Prefer
209    /// [`TransportConfig::initial_mtu`] together with
210    /// [`TransportConfig::mtu_discovery_config`] to set a maximum UDP payload size that robustly
211    /// adapts to the network.
212    pub fn min_mtu(&mut self, value: u16) -> &mut Self {
213        self.min_mtu = value.max(INITIAL_MTU);
214        self
215    }
216
217    /// Specifies the MTU discovery config (see [`MtuDiscoveryConfig`] for details).
218    ///
219    /// Enabled by default.
220    pub fn mtu_discovery_config(&mut self, value: Option<MtuDiscoveryConfig>) -> &mut Self {
221        self.mtu_discovery_config = value;
222        self
223    }
224
225    /// Pad UDP datagrams carrying application data to current maximum UDP payload size
226    ///
227    /// Disabled by default. UDP datagrams containing loss probes are exempt from padding.
228    ///
229    /// Enabling this helps mitigate traffic analysis by network observers, but it increases
230    /// bandwidth usage. Without this mitigation precise plain text size of application datagrams as
231    /// well as the total size of stream write bursts can be inferred by observers under certain
232    /// conditions. This analysis requires either an uncongested connection or application datagrams
233    /// too large to be coalesced.
234    pub fn pad_to_mtu(&mut self, value: bool) -> &mut Self {
235        self.pad_to_mtu = value;
236        self
237    }
238
239    /// Specifies the ACK frequency config (see [`AckFrequencyConfig`] for details)
240    ///
241    /// The provided configuration will be ignored if the peer does not support the acknowledgement
242    /// frequency QUIC extension.
243    ///
244    /// Defaults to `None`, which disables controlling the peer's acknowledgement frequency. Even
245    /// if set to `None`, the local side still supports the acknowledgement frequency QUIC
246    /// extension and may use it in other ways.
247    pub fn ack_frequency_config(&mut self, value: Option<AckFrequencyConfig>) -> &mut Self {
248        self.ack_frequency_config = value;
249        self
250    }
251
252    /// Number of consecutive PTOs after which network is considered to be experiencing persistent congestion.
253    pub fn persistent_congestion_threshold(&mut self, value: u32) -> &mut Self {
254        self.persistent_congestion_threshold = value;
255        self
256    }
257
258    /// Period of inactivity before sending a keep-alive packet
259    ///
260    /// Keep-alive packets prevent an inactive but otherwise healthy connection from timing out.
261    ///
262    /// `None` to disable, which is the default. Only one side of any given connection needs keep-alive
263    /// enabled for the connection to be preserved. Must be set lower than the idle_timeout of both
264    /// peers to be effective.
265    pub fn keep_alive_interval(&mut self, value: Option<Duration>) -> &mut Self {
266        self.keep_alive_interval = value;
267        self
268    }
269
270    /// Maximum quantity of out-of-order crypto layer data to buffer
271    pub fn crypto_buffer_size(&mut self, value: usize) -> &mut Self {
272        self.crypto_buffer_size = value;
273        self
274    }
275
276    /// Whether the implementation is permitted to set the spin bit on this connection
277    ///
278    /// This allows passive observers to easily judge the round trip time of a connection, which can
279    /// be useful for network administration but sacrifices a small amount of privacy.
280    pub fn allow_spin(&mut self, value: bool) -> &mut Self {
281        self.allow_spin = value;
282        self
283    }
284
285    /// Maximum number of incoming application datagram bytes to buffer, or None to disable
286    /// incoming datagrams
287    ///
288    /// The peer is forbidden to send single datagrams larger than this size. If the aggregate size
289    /// of all datagrams that have been received from the peer but not consumed by the application
290    /// exceeds this value, old datagrams are dropped until it is no longer exceeded.
291    pub fn datagram_receive_buffer_size(&mut self, value: Option<usize>) -> &mut Self {
292        self.datagram_receive_buffer_size = value;
293        self
294    }
295
296    /// Maximum number of outgoing application datagram bytes to buffer
297    ///
298    /// While datagrams are sent ASAP, it is possible for an application to generate data faster
299    /// than the link, or even the underlying hardware, can transmit them. This limits the amount of
300    /// memory that may be consumed in that case. When the send buffer is full and a new datagram is
301    /// sent, older datagrams are dropped until sufficient space is available.
302    pub fn datagram_send_buffer_size(&mut self, value: usize) -> &mut Self {
303        self.datagram_send_buffer_size = value;
304        self
305    }
306
307    /// Whether to force every packet number to be used
308    ///
309    /// By default, packet numbers are occasionally skipped to ensure peers aren't ACKing packets
310    /// before they see them.
311    #[cfg(test)]
312    #[allow(dead_code)]
313    pub(crate) fn deterministic_packet_numbers(&mut self, enabled: bool) -> &mut Self {
314        self.deterministic_packet_numbers = enabled;
315        self
316    }
317
318    /// How to construct new `congestion::Controller`s
319    ///
320    /// Typically the refcounted configuration of a `congestion::Controller`,
321    /// e.g. a `congestion::NewRenoConfig`.
322    ///
323    /// # Example
324    /// ```
325    /// # use std::sync::Arc;
326    /// use ant_quic::config::TransportConfig;
327    ///
328    /// let mut config = TransportConfig::default();
329    /// // The default uses CubicConfig, but custom implementations can be provided
330    /// // by implementing the congestion::ControllerFactory trait
331    /// ```
332    pub fn congestion_controller_factory(
333        &mut self,
334        factory: Arc<dyn congestion::ControllerFactory + Send + Sync + 'static>,
335    ) -> &mut Self {
336        self.congestion_controller_factory = factory;
337        self
338    }
339
340    /// Whether to use "Generic Segmentation Offload" to accelerate transmits, when supported by the
341    /// environment
342    ///
343    /// Defaults to `true`.
344    ///
345    /// GSO dramatically reduces CPU consumption when sending large numbers of packets with the same
346    /// headers, such as when transmitting bulk data on a connection. However, it is not supported
347    /// by all network interface drivers or packet inspection tools. `quinn-udp` will attempt to
348    /// disable GSO automatically when unavailable, but this can lead to spurious packet loss at
349    /// startup, temporarily degrading performance.
350    pub fn enable_segmentation_offload(&mut self, enabled: bool) -> &mut Self {
351        self.enable_segmentation_offload = enabled;
352        self
353    }
354
355    /// Configure NAT traversal capabilities for this connection
356    ///
357    /// When enabled, this connection will support QUIC NAT traversal extensions including:
358    /// - Address candidate advertisement and validation
359    /// - Coordinated hole punching through bootstrap nodes
360    /// - Multi-path connectivity testing
361    /// - Automatic path migration for NAT rebinding
362    ///
363    /// This is required for P2P connections through NATs in Autonomi networks.
364    /// Pass `None` to disable NAT traversal or use the high-level NAT traversal API
365    /// to create appropriate configurations.
366    pub fn nat_traversal_config(
367        &mut self,
368        config: Option<crate::transport_parameters::NatTraversalConfig>,
369    ) -> &mut Self {
370        self.nat_traversal_config = config;
371        self
372    }
373
374    /// Enable NAT traversal with default client configuration
375    ///
376    /// This is a convenience method that enables NAT traversal with sensible defaults
377    /// for a client endpoint. Use `nat_traversal_config()` for more control.
378    pub fn enable_nat_traversal(&mut self, enabled: bool) -> &mut Self {
379        // v0.13.0+: NAT traversal is mandatory in symmetric P2P.
380        // The `enabled` flag is ignored and kept only for legacy configs.
381        let _ = enabled;
382        use crate::transport_parameters::NatTraversalConfig;
383        self.nat_traversal_config = Some(NatTraversalConfig::ClientSupport);
384        self
385    }
386
387    /// Set the address discovery configuration
388    ///
389    /// This enables the QUIC Address Discovery extension (draft-ietf-quic-address-discovery-00)
390    /// which allows endpoints to share observed addresses with each other.
391    pub fn address_discovery_config(
392        &mut self,
393        config: Option<crate::transport_parameters::AddressDiscoveryConfig>,
394    ) -> &mut Self {
395        self.address_discovery_config = config;
396        self
397    }
398
399    /// Enable address discovery with default configuration
400    ///
401    /// This is a convenience method that enables address discovery with sensible defaults.
402    /// Use `address_discovery_config()` for more control.
403    pub fn enable_address_discovery(&mut self, enabled: bool) -> &mut Self {
404        // v0.13.0+: Address discovery is mandatory in symmetric P2P.
405        // The `enabled` flag is ignored and kept only for legacy configs.
406        let _ = enabled;
407        use crate::transport_parameters::AddressDiscoveryConfig;
408        self.address_discovery_config = Some(AddressDiscoveryConfig::SendAndReceive);
409        self
410    }
411
412    /// Set the Post-Quantum Cryptography algorithms configuration
413    ///
414    /// This advertises which PQC algorithms are supported by this endpoint.
415    /// When both endpoints support PQC, they can negotiate the use of quantum-resistant algorithms.
416    pub fn pqc_algorithms(
417        &mut self,
418        algorithms: Option<crate::transport_parameters::PqcAlgorithms>,
419    ) -> &mut Self {
420        self.pqc_algorithms = algorithms;
421        self
422    }
423
424    /// Enable Post-Quantum Cryptography with default algorithms
425    ///
426    /// This is a convenience method that enables all standard PQC algorithms.
427    /// Use `pqc_algorithms()` for more control over which algorithms to support.
428    pub fn enable_pqc(&mut self, enabled: bool) -> &mut Self {
429        // v0.13.0+: PQC is mandatory. The `enabled` flag is ignored and kept
430        // only for legacy configs.
431        let _ = enabled;
432        use crate::transport_parameters::PqcAlgorithms;
433        self.pqc_algorithms = Some(PqcAlgorithms {
434            ml_kem_768: true,
435            ml_dsa_65: true,
436        });
437        self
438    }
439
440    /// Get the address discovery configuration (read-only)
441    pub fn get_address_discovery_config(
442        &self,
443    ) -> Option<&crate::transport_parameters::AddressDiscoveryConfig> {
444        self.address_discovery_config.as_ref()
445    }
446
447    /// Get the PQC algorithms configuration (read-only)
448    pub fn get_pqc_algorithms(&self) -> Option<&crate::transport_parameters::PqcAlgorithms> {
449        self.pqc_algorithms.as_ref()
450    }
451
452    /// Get the NAT traversal configuration (read-only)
453    pub fn get_nat_traversal_config(
454        &self,
455    ) -> Option<&crate::transport_parameters::NatTraversalConfig> {
456        self.nat_traversal_config.as_ref()
457    }
458}
459
460impl Default for TransportConfig {
461    fn default() -> Self {
462        const EXPECTED_RTT: u32 = 100; // ms
463        const MAX_STREAM_BANDWIDTH: u32 = 12500 * 1000; // bytes/s
464        // Window size needed to avoid pipeline
465        // stalls
466        const STREAM_RWND: u32 = MAX_STREAM_BANDWIDTH / 1000 * EXPECTED_RTT;
467
468        Self {
469            max_concurrent_bidi_streams: 100u32.into(),
470            max_concurrent_uni_streams: 100u32.into(),
471            // 30 second default recommended by RFC 9308 ยง 3.2
472            max_idle_timeout: Some(VarInt(30_000)),
473            stream_receive_window: STREAM_RWND.into(),
474            receive_window: VarInt::MAX,
475            send_window: (8 * STREAM_RWND).into(),
476            send_fairness: true,
477
478            packet_threshold: 3,
479            time_threshold: 9.0 / 8.0,
480            initial_rtt: Duration::from_millis(333), // per spec, intentionally distinct from EXPECTED_RTT
481            initial_mtu: INITIAL_MTU,
482            min_mtu: INITIAL_MTU,
483            mtu_discovery_config: Some(MtuDiscoveryConfig::default()),
484            pad_to_mtu: false,
485            ack_frequency_config: None,
486
487            persistent_congestion_threshold: 3,
488            // Send QUIC PING frames to prevent idle timeout from closing
489            // connections during gaps in application traffic (e.g., EVM payment
490            // processing between quote and chunk storage phases). Must be less
491            // than max_idle_timeout (30s).
492            keep_alive_interval: Some(Duration::from_secs(15)),
493            crypto_buffer_size: 16 * 1024,
494            allow_spin: true,
495            datagram_receive_buffer_size: Some(STREAM_RWND as usize),
496            datagram_send_buffer_size: 1024 * 1024,
497            #[cfg(test)]
498            deterministic_packet_numbers: false,
499
500            congestion_controller_factory: Arc::new(congestion::CubicConfig::default()),
501
502            enable_segmentation_offload: true,
503            nat_traversal_config: None,
504            address_discovery_config: None,
505            // v0.2: Pure PQC - ML-KEM-768 for key exchange, ML-DSA-65 at binding layer
506            pqc_algorithms: Some(crate::transport_parameters::PqcAlgorithms {
507                ml_kem_768: true,
508                ml_dsa_65: false,
509            }),
510        }
511    }
512}
513
514impl fmt::Debug for TransportConfig {
515    fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
516        let Self {
517            max_concurrent_bidi_streams,
518            max_concurrent_uni_streams,
519            max_idle_timeout,
520            stream_receive_window,
521            receive_window,
522            send_window,
523            send_fairness,
524            packet_threshold,
525            time_threshold,
526            initial_rtt,
527            initial_mtu,
528            min_mtu,
529            mtu_discovery_config,
530            pad_to_mtu,
531            ack_frequency_config,
532            persistent_congestion_threshold,
533            keep_alive_interval,
534            crypto_buffer_size,
535            allow_spin,
536            datagram_receive_buffer_size,
537            datagram_send_buffer_size,
538            #[cfg(test)]
539                deterministic_packet_numbers: _,
540            congestion_controller_factory: _,
541            enable_segmentation_offload,
542            nat_traversal_config,
543            address_discovery_config,
544            pqc_algorithms,
545        } = self;
546        fmt.debug_struct("TransportConfig")
547            .field("max_concurrent_bidi_streams", max_concurrent_bidi_streams)
548            .field("max_concurrent_uni_streams", max_concurrent_uni_streams)
549            .field("max_idle_timeout", max_idle_timeout)
550            .field("stream_receive_window", stream_receive_window)
551            .field("receive_window", receive_window)
552            .field("send_window", send_window)
553            .field("send_fairness", send_fairness)
554            .field("packet_threshold", packet_threshold)
555            .field("time_threshold", time_threshold)
556            .field("initial_rtt", initial_rtt)
557            .field("initial_mtu", initial_mtu)
558            .field("min_mtu", min_mtu)
559            .field("mtu_discovery_config", mtu_discovery_config)
560            .field("pad_to_mtu", pad_to_mtu)
561            .field("ack_frequency_config", ack_frequency_config)
562            .field(
563                "persistent_congestion_threshold",
564                persistent_congestion_threshold,
565            )
566            .field("keep_alive_interval", keep_alive_interval)
567            .field("crypto_buffer_size", crypto_buffer_size)
568            .field("allow_spin", allow_spin)
569            .field("datagram_receive_buffer_size", datagram_receive_buffer_size)
570            .field("datagram_send_buffer_size", datagram_send_buffer_size)
571            // congestion_controller_factory not debug
572            .field("enable_segmentation_offload", enable_segmentation_offload)
573            .field("nat_traversal_config", nat_traversal_config)
574            .field("address_discovery_config", address_discovery_config)
575            .field("pqc_algorithms", pqc_algorithms)
576            .finish_non_exhaustive()
577    }
578}
579
580/// Parameters for controlling the peer's acknowledgement frequency
581///
582/// The parameters provided in this config will be sent to the peer at the beginning of the
583/// connection, so it can take them into account when sending acknowledgements (see each parameter's
584/// description for details on how it influences acknowledgement frequency).
585///
586/// Quinn's implementation follows the fourth draft of the
587/// [QUIC Acknowledgement Frequency extension](https://datatracker.ietf.org/doc/html/draft-ietf-quic-ack-frequency-04).
588/// The defaults produce behavior slightly different than the behavior without this extension,
589/// because they change the way reordered packets are handled (see
590/// [`AckFrequencyConfig::reordering_threshold`] for details).
591#[derive(Clone, Debug)]
592pub struct AckFrequencyConfig {
593    pub(crate) ack_eliciting_threshold: VarInt,
594    pub(crate) max_ack_delay: Option<Duration>,
595    pub(crate) reordering_threshold: VarInt,
596}
597
598impl AckFrequencyConfig {
599    /// The ack-eliciting threshold we will request the peer to use
600    ///
601    /// This threshold represents the number of ack-eliciting packets an endpoint may receive
602    /// without immediately sending an ACK.
603    ///
604    /// The remote peer should send at least one ACK frame when more than this number of
605    /// ack-eliciting packets have been received. A value of 0 results in a receiver immediately
606    /// acknowledging every ack-eliciting packet.
607    ///
608    /// Defaults to 1, which sends ACK frames for every other ack-eliciting packet.
609    pub fn ack_eliciting_threshold(&mut self, value: VarInt) -> &mut Self {
610        self.ack_eliciting_threshold = value;
611        self
612    }
613
614    /// The `max_ack_delay` we will request the peer to use
615    ///
616    /// This parameter represents the maximum amount of time that an endpoint waits before sending
617    /// an ACK when the ack-eliciting threshold hasn't been reached.
618    ///
619    /// The effective `max_ack_delay` will be clamped to be at least the peer's `min_ack_delay`
620    /// transport parameter, and at most the greater of the current path RTT or 25ms.
621    ///
622    /// Defaults to `None`, in which case the peer's original `max_ack_delay` will be used, as
623    /// obtained from its transport parameters.
624    pub fn max_ack_delay(&mut self, value: Option<Duration>) -> &mut Self {
625        self.max_ack_delay = value;
626        self
627    }
628
629    /// The reordering threshold we will request the peer to use
630    ///
631    /// This threshold represents the amount of out-of-order packets that will trigger an endpoint
632    /// to send an ACK, without waiting for `ack_eliciting_threshold` to be exceeded or for
633    /// `max_ack_delay` to be elapsed.
634    ///
635    /// A value of 0 indicates out-of-order packets do not elicit an immediate ACK. A value of 1
636    /// immediately acknowledges any packets that are received out of order (this is also the
637    /// behavior when the extension is disabled).
638    ///
639    /// It is recommended to set this value to [`TransportConfig::packet_threshold`] minus one.
640    /// Since the default value for [`TransportConfig::packet_threshold`] is 3, this value defaults
641    /// to 2.
642    pub fn reordering_threshold(&mut self, value: VarInt) -> &mut Self {
643        self.reordering_threshold = value;
644        self
645    }
646}
647
648impl Default for AckFrequencyConfig {
649    fn default() -> Self {
650        Self {
651            ack_eliciting_threshold: VarInt(1),
652            max_ack_delay: None,
653            reordering_threshold: VarInt(2),
654        }
655    }
656}
657
658/// Parameters governing MTU discovery.
659///
660/// # The why of MTU discovery
661///
662/// By design, QUIC ensures during the handshake that the network path between the client and the
663/// server is able to transmit unfragmented UDP packets with a body of 1200 bytes. In other words,
664/// once the connection is established, we know that the network path's maximum transmission unit
665/// (MTU) is of at least 1200 bytes (plus IP and UDP headers). Because of this, a QUIC endpoint can
666/// split outgoing data in packets of 1200 bytes, with confidence that the network will be able to
667/// deliver them (if the endpoint were to send bigger packets, they could prove too big and end up
668/// being dropped).
669///
670/// There is, however, a significant overhead associated to sending a packet. If the same
671/// information can be sent in fewer packets, that results in higher throughput. The amount of
672/// packets that need to be sent is inversely proportional to the MTU: the higher the MTU, the
673/// bigger the packets that can be sent, and the fewer packets that are needed to transmit a given
674/// amount of bytes.
675///
676/// Most networks have an MTU higher than 1200. Through MTU discovery, endpoints can detect the
677/// path's MTU and, if it turns out to be higher, start sending bigger packets.
678///
679/// # MTU discovery internals
680///
681/// Quinn implements MTU discovery through DPLPMTUD (Datagram Packetization Layer Path MTU
682/// Discovery), described in [section 14.3 of RFC
683/// 9000](https://www.rfc-editor.org/rfc/rfc9000.html#section-14.3). This method consists of sending
684/// QUIC packets padded to a particular size (called PMTU probes), and waiting to see if the remote
685/// peer responds with an ACK. If an ACK is received, that means the probe arrived at the remote
686/// peer, which in turn means that the network path's MTU is of at least the packet's size. If the
687/// probe is lost, it is sent another 2 times before concluding that the MTU is lower than the
688/// packet's size.
689///
690/// MTU discovery runs on a schedule (e.g. every 600 seconds) specified through
691/// [`MtuDiscoveryConfig::interval`]. The first run happens right after the handshake, and
692/// subsequent discoveries are scheduled to run when the interval has elapsed, starting from the
693/// last time when MTU discovery completed.
694///
695/// Since the search space for MTUs is quite big (the smallest possible MTU is 1200, and the highest
696/// is 65527), Quinn performs a binary search to keep the number of probes as low as possible. The
697/// lower bound of the search is equal to [`TransportConfig::initial_mtu`] in the
698/// initial MTU discovery run, and is equal to the currently discovered MTU in subsequent runs. The
699/// upper bound is determined by the minimum of [`MtuDiscoveryConfig::upper_bound`] and the
700/// `max_udp_payload_size` transport parameter received from the peer during the handshake.
701///
702/// # Black hole detection
703///
704/// If, at some point, the network path no longer accepts packets of the detected size, packet loss
705/// will eventually trigger black hole detection and reset the detected MTU to 1200. In that case,
706/// MTU discovery will be triggered after [`MtuDiscoveryConfig::black_hole_cooldown`] (ignoring the
707/// timer that was set based on [`MtuDiscoveryConfig::interval`]).
708///
709/// # Interaction between peers
710///
711/// There is no guarantee that the MTU on the path between A and B is the same as the MTU of the
712/// path between B and A. Therefore, each peer in the connection needs to run MTU discovery
713/// independently in order to discover the path's MTU.
714#[derive(Clone, Debug)]
715pub struct MtuDiscoveryConfig {
716    pub(crate) interval: Duration,
717    pub(crate) upper_bound: u16,
718    pub(crate) minimum_change: u16,
719    pub(crate) black_hole_cooldown: Duration,
720}
721
722impl MtuDiscoveryConfig {
723    /// Specifies the time to wait after completing MTU discovery before starting a new MTU
724    /// discovery run.
725    ///
726    /// Defaults to 600 seconds, as recommended by [RFC
727    /// 8899](https://www.rfc-editor.org/rfc/rfc8899).
728    pub fn interval(&mut self, value: Duration) -> &mut Self {
729        self.interval = value;
730        self
731    }
732
733    /// Specifies the upper bound to the max UDP payload size that MTU discovery will search for.
734    ///
735    /// Defaults to 1452, to stay within Ethernet's MTU when using IPv4 and IPv6. The highest
736    /// allowed value is 65527, which corresponds to the maximum permitted UDP payload on IPv6.
737    ///
738    /// It is safe to use an arbitrarily high upper bound, regardless of the network path's MTU. The
739    /// only drawback is that MTU discovery might take more time to finish.
740    pub fn upper_bound(&mut self, value: u16) -> &mut Self {
741        self.upper_bound = value.min(MAX_UDP_PAYLOAD);
742        self
743    }
744
745    /// Specifies the amount of time that MTU discovery should wait after a black hole was detected
746    /// before running again. Defaults to one minute.
747    ///
748    /// Black hole detection can be spuriously triggered in case of congestion, so it makes sense to
749    /// try MTU discovery again after a short period of time.
750    pub fn black_hole_cooldown(&mut self, value: Duration) -> &mut Self {
751        self.black_hole_cooldown = value;
752        self
753    }
754
755    /// Specifies the minimum MTU change to stop the MTU discovery phase.
756    /// Defaults to 20.
757    pub fn minimum_change(&mut self, value: u16) -> &mut Self {
758        self.minimum_change = value;
759        self
760    }
761}
762
763impl Default for MtuDiscoveryConfig {
764    fn default() -> Self {
765        Self {
766            interval: Duration::from_secs(600),
767            upper_bound: 1452,
768            black_hole_cooldown: Duration::from_secs(60),
769            minimum_change: 20,
770        }
771    }
772}
773
774/// Maximum duration of inactivity to accept before timing out the connection
775///
776/// This wraps an underlying [`VarInt`], representing the duration in milliseconds. Values can be
777/// constructed by converting directly from `VarInt`, or using `TryFrom<Duration>`.
778///
779/// ```
780/// # use std::{convert::TryFrom, time::Duration};
781/// use ant_quic::config::IdleTimeout;
782/// use ant_quic::{VarIntBoundsExceeded, VarInt};
783/// # fn main() -> Result<(), VarIntBoundsExceeded> {
784/// // A `VarInt`-encoded value in milliseconds
785/// let timeout = IdleTimeout::from(VarInt::from_u32(10_000));
786///
787/// // Try to convert a `Duration` into a `VarInt`-encoded timeout
788/// let timeout = IdleTimeout::try_from(Duration::from_secs(10))?;
789/// # Ok(())
790/// # }
791/// ```
792#[derive(Default, Copy, Clone, Eq, Hash, Ord, PartialEq, PartialOrd)]
793pub struct IdleTimeout(VarInt);
794
795impl From<VarInt> for IdleTimeout {
796    fn from(inner: VarInt) -> Self {
797        Self(inner)
798    }
799}
800
801impl std::convert::TryFrom<Duration> for IdleTimeout {
802    type Error = VarIntBoundsExceeded;
803
804    fn try_from(timeout: Duration) -> Result<Self, Self::Error> {
805        let inner = VarInt::try_from(timeout.as_millis())?;
806        Ok(Self(inner))
807    }
808}
809
810impl fmt::Debug for IdleTimeout {
811    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
812        self.0.fmt(f)
813    }
814}