ant_quic/config/transport.rs
1use std::{fmt, sync::Arc};
2
3use crate::{Duration, INITIAL_MTU, MAX_UDP_PAYLOAD, VarInt, VarIntBoundsExceeded, congestion};
4
5/// Parameters governing the core QUIC state machine
6///
7/// Default values should be suitable for most internet applications. Applications protocols which
8/// forbid remotely-initiated streams should set `max_concurrent_bidi_streams` and
9/// `max_concurrent_uni_streams` to zero.
10///
11/// In some cases, performance or resource requirements can be improved by tuning these values to
12/// suit a particular application and/or network connection. In particular, data window sizes can be
13/// tuned for a particular expected round trip time, link capacity, and memory availability. Tuning
14/// for higher bandwidths and latencies increases worst-case memory consumption, but does not impair
15/// performance at lower bandwidths and latencies. The default configuration is tuned for a 100Mbps
16/// link with a 100ms round trip time.
17#[derive(Clone)]
18pub struct TransportConfig {
19 pub(crate) max_concurrent_bidi_streams: VarInt,
20 pub(crate) max_concurrent_uni_streams: VarInt,
21 pub(crate) max_idle_timeout: Option<VarInt>,
22 pub(crate) stream_receive_window: VarInt,
23 pub(crate) receive_window: VarInt,
24 pub(crate) send_window: u64,
25 pub(crate) send_fairness: bool,
26
27 pub(crate) packet_threshold: u32,
28 pub(crate) time_threshold: f32,
29 pub(crate) initial_rtt: Duration,
30 pub(crate) initial_mtu: u16,
31 pub(crate) min_mtu: u16,
32 pub(crate) mtu_discovery_config: Option<MtuDiscoveryConfig>,
33 pub(crate) pad_to_mtu: bool,
34 pub(crate) ack_frequency_config: Option<AckFrequencyConfig>,
35
36 pub(crate) persistent_congestion_threshold: u32,
37 pub(crate) keep_alive_interval: Option<Duration>,
38 pub(crate) crypto_buffer_size: usize,
39 pub(crate) allow_spin: bool,
40 pub(crate) datagram_receive_buffer_size: Option<usize>,
41 pub(crate) datagram_send_buffer_size: usize,
42 #[cfg(test)]
43 pub(crate) deterministic_packet_numbers: bool,
44
45 pub(crate) congestion_controller_factory: Arc<dyn congestion::ControllerFactory + Send + Sync>,
46
47 pub(crate) enable_segmentation_offload: bool,
48
49 /// NAT traversal configuration
50 pub(crate) nat_traversal_config: Option<crate::transport_parameters::NatTraversalConfig>,
51
52 /// Address discovery configuration
53 pub(crate) address_discovery_config:
54 Option<crate::transport_parameters::AddressDiscoveryConfig>,
55
56 /// Post-Quantum Cryptography algorithms configuration
57 pub(crate) pqc_algorithms: Option<crate::transport_parameters::PqcAlgorithms>,
58}
59
60impl TransportConfig {
61 /// Maximum number of incoming bidirectional streams that may be open concurrently
62 ///
63 /// Must be nonzero for the peer to open any bidirectional streams.
64 ///
65 /// Worst-case memory use is directly proportional to `max_concurrent_bidi_streams *
66 /// stream_receive_window`, with an upper bound proportional to `receive_window`.
67 pub fn max_concurrent_bidi_streams(&mut self, value: VarInt) -> &mut Self {
68 self.max_concurrent_bidi_streams = value;
69 self
70 }
71
72 /// Variant of `max_concurrent_bidi_streams` affecting unidirectional streams
73 pub fn max_concurrent_uni_streams(&mut self, value: VarInt) -> &mut Self {
74 self.max_concurrent_uni_streams = value;
75 self
76 }
77
78 /// Maximum duration of inactivity to accept before timing out the connection.
79 ///
80 /// The true idle timeout is the minimum of this and the peer's own max idle timeout. `None`
81 /// represents an infinite timeout. Defaults to 30 seconds.
82 ///
83 /// **WARNING**: If a peer or its network path malfunctions or acts maliciously, an infinite
84 /// idle timeout can result in permanently hung futures!
85 ///
86 /// ```
87 /// # use std::{convert::TryInto, time::Duration};
88 /// # use ant_quic::{TransportConfig, VarInt, VarIntBoundsExceeded};
89 /// # fn main() -> Result<(), VarIntBoundsExceeded> {
90 /// let mut config = TransportConfig::default();
91 ///
92 /// // Set the idle timeout as `VarInt`-encoded milliseconds
93 /// config.max_idle_timeout(Some(VarInt::from_u32(10_000).into()));
94 ///
95 /// // Set the idle timeout as a `Duration`
96 /// config.max_idle_timeout(Some(Duration::from_secs(10).try_into()?));
97 /// # Ok(())
98 /// # }
99 /// ```
100 pub fn max_idle_timeout(&mut self, value: Option<IdleTimeout>) -> &mut Self {
101 self.max_idle_timeout = value.map(|t| t.0);
102 self
103 }
104
105 /// Maximum number of bytes the peer may transmit without acknowledgement on any one stream
106 /// before becoming blocked.
107 ///
108 /// This should be set to at least the expected connection latency multiplied by the maximum
109 /// desired throughput. Setting this smaller than `receive_window` helps ensure that a single
110 /// stream doesn't monopolize receive buffers, which may otherwise occur if the application
111 /// chooses not to read from a large stream for a time while still requiring data on other
112 /// streams.
113 pub fn stream_receive_window(&mut self, value: VarInt) -> &mut Self {
114 self.stream_receive_window = value;
115 self
116 }
117
118 /// Maximum number of bytes the peer may transmit across all streams of a connection before
119 /// becoming blocked.
120 ///
121 /// This should be set to at least the expected connection latency multiplied by the maximum
122 /// desired throughput. Larger values can be useful to allow maximum throughput within a
123 /// stream while another is blocked.
124 pub fn receive_window(&mut self, value: VarInt) -> &mut Self {
125 self.receive_window = value;
126 self
127 }
128
129 /// Maximum number of bytes to transmit to a peer without acknowledgment
130 ///
131 /// Provides an upper bound on memory when communicating with peers that issue large amounts of
132 /// flow control credit. Endpoints that wish to handle large numbers of connections robustly
133 /// should take care to set this low enough to guarantee memory exhaustion does not occur if
134 /// every connection uses the entire window.
135 pub fn send_window(&mut self, value: u64) -> &mut Self {
136 self.send_window = value;
137 self
138 }
139
140 /// Whether to implement fair queuing for send streams having the same priority.
141 ///
142 /// When enabled, connections schedule data from outgoing streams having the same priority in a
143 /// round-robin fashion. When disabled, streams are scheduled in the order they are written to.
144 ///
145 /// Note that this only affects streams with the same priority. Higher priority streams always
146 /// take precedence over lower priority streams.
147 ///
148 /// Disabling fairness can reduce fragmentation and protocol overhead for workloads that use
149 /// many small streams.
150 pub fn send_fairness(&mut self, value: bool) -> &mut Self {
151 self.send_fairness = value;
152 self
153 }
154
155 /// Maximum reordering in packet number space before FACK style loss detection considers a
156 /// packet lost. Should not be less than 3, per RFC5681.
157 pub fn packet_threshold(&mut self, value: u32) -> &mut Self {
158 self.packet_threshold = value;
159 self
160 }
161
162 /// Maximum reordering in time space before time based loss detection considers a packet lost,
163 /// as a factor of RTT
164 pub fn time_threshold(&mut self, value: f32) -> &mut Self {
165 self.time_threshold = value;
166 self
167 }
168
169 /// The RTT used before an RTT sample is taken
170 pub fn initial_rtt(&mut self, value: Duration) -> &mut Self {
171 self.initial_rtt = value;
172 self
173 }
174
175 /// The initial value to be used as the maximum UDP payload size before running MTU discovery
176 /// (see [`TransportConfig::mtu_discovery_config`]).
177 ///
178 /// Must be at least 1200, which is the default, and known to be safe for typical internet
179 /// applications. Larger values are more efficient, but increase the risk of packet loss due to
180 /// exceeding the network path's IP MTU. If the provided value is higher than what the network
181 /// path actually supports, packet loss will eventually trigger black hole detection and bring
182 /// it down to [`TransportConfig::min_mtu`].
183 pub fn initial_mtu(&mut self, value: u16) -> &mut Self {
184 self.initial_mtu = value.max(INITIAL_MTU);
185 self
186 }
187
188 pub(crate) fn get_initial_mtu(&self) -> u16 {
189 self.initial_mtu.max(self.min_mtu)
190 }
191
192 /// The maximum UDP payload size guaranteed to be supported by the network.
193 ///
194 /// Must be at least 1200, which is the default, and lower than or equal to
195 /// [`TransportConfig::initial_mtu`].
196 ///
197 /// Real-world MTUs can vary according to ISP, VPN, and properties of intermediate network links
198 /// outside of either endpoint's control. Extreme care should be used when raising this value
199 /// outside of private networks where these factors are fully controlled. If the provided value
200 /// is higher than what the network path actually supports, the result will be unpredictable and
201 /// catastrophic packet loss, without a possibility of repair. Prefer
202 /// [`TransportConfig::initial_mtu`] together with
203 /// [`TransportConfig::mtu_discovery_config`] to set a maximum UDP payload size that robustly
204 /// adapts to the network.
205 pub fn min_mtu(&mut self, value: u16) -> &mut Self {
206 self.min_mtu = value.max(INITIAL_MTU);
207 self
208 }
209
210 /// Specifies the MTU discovery config (see [`MtuDiscoveryConfig`] for details).
211 ///
212 /// Enabled by default.
213 pub fn mtu_discovery_config(&mut self, value: Option<MtuDiscoveryConfig>) -> &mut Self {
214 self.mtu_discovery_config = value;
215 self
216 }
217
218 /// Pad UDP datagrams carrying application data to current maximum UDP payload size
219 ///
220 /// Disabled by default. UDP datagrams containing loss probes are exempt from padding.
221 ///
222 /// Enabling this helps mitigate traffic analysis by network observers, but it increases
223 /// bandwidth usage. Without this mitigation precise plain text size of application datagrams as
224 /// well as the total size of stream write bursts can be inferred by observers under certain
225 /// conditions. This analysis requires either an uncongested connection or application datagrams
226 /// too large to be coalesced.
227 pub fn pad_to_mtu(&mut self, value: bool) -> &mut Self {
228 self.pad_to_mtu = value;
229 self
230 }
231
232 /// Specifies the ACK frequency config (see [`AckFrequencyConfig`] for details)
233 ///
234 /// The provided configuration will be ignored if the peer does not support the acknowledgement
235 /// frequency QUIC extension.
236 ///
237 /// Defaults to `None`, which disables controlling the peer's acknowledgement frequency. Even
238 /// if set to `None`, the local side still supports the acknowledgement frequency QUIC
239 /// extension and may use it in other ways.
240 pub fn ack_frequency_config(&mut self, value: Option<AckFrequencyConfig>) -> &mut Self {
241 self.ack_frequency_config = value;
242 self
243 }
244
245 /// Number of consecutive PTOs after which network is considered to be experiencing persistent congestion.
246 pub fn persistent_congestion_threshold(&mut self, value: u32) -> &mut Self {
247 self.persistent_congestion_threshold = value;
248 self
249 }
250
251 /// Period of inactivity before sending a keep-alive packet
252 ///
253 /// Keep-alive packets prevent an inactive but otherwise healthy connection from timing out.
254 ///
255 /// `None` to disable, which is the default. Only one side of any given connection needs keep-alive
256 /// enabled for the connection to be preserved. Must be set lower than the idle_timeout of both
257 /// peers to be effective.
258 pub fn keep_alive_interval(&mut self, value: Option<Duration>) -> &mut Self {
259 self.keep_alive_interval = value;
260 self
261 }
262
263 /// Maximum quantity of out-of-order crypto layer data to buffer
264 pub fn crypto_buffer_size(&mut self, value: usize) -> &mut Self {
265 self.crypto_buffer_size = value;
266 self
267 }
268
269 /// Whether the implementation is permitted to set the spin bit on this connection
270 ///
271 /// This allows passive observers to easily judge the round trip time of a connection, which can
272 /// be useful for network administration but sacrifices a small amount of privacy.
273 pub fn allow_spin(&mut self, value: bool) -> &mut Self {
274 self.allow_spin = value;
275 self
276 }
277
278 /// Maximum number of incoming application datagram bytes to buffer, or None to disable
279 /// incoming datagrams
280 ///
281 /// The peer is forbidden to send single datagrams larger than this size. If the aggregate size
282 /// of all datagrams that have been received from the peer but not consumed by the application
283 /// exceeds this value, old datagrams are dropped until it is no longer exceeded.
284 pub fn datagram_receive_buffer_size(&mut self, value: Option<usize>) -> &mut Self {
285 self.datagram_receive_buffer_size = value;
286 self
287 }
288
289 /// Maximum number of outgoing application datagram bytes to buffer
290 ///
291 /// While datagrams are sent ASAP, it is possible for an application to generate data faster
292 /// than the link, or even the underlying hardware, can transmit them. This limits the amount of
293 /// memory that may be consumed in that case. When the send buffer is full and a new datagram is
294 /// sent, older datagrams are dropped until sufficient space is available.
295 pub fn datagram_send_buffer_size(&mut self, value: usize) -> &mut Self {
296 self.datagram_send_buffer_size = value;
297 self
298 }
299
300 /// Whether to force every packet number to be used
301 ///
302 /// By default, packet numbers are occasionally skipped to ensure peers aren't ACKing packets
303 /// before they see them.
304 #[cfg(test)]
305 pub(crate) fn deterministic_packet_numbers(&mut self, enabled: bool) -> &mut Self {
306 self.deterministic_packet_numbers = enabled;
307 self
308 }
309
310 /// How to construct new `congestion::Controller`s
311 ///
312 /// Typically the refcounted configuration of a `congestion::Controller`,
313 /// e.g. a `congestion::NewRenoConfig`.
314 ///
315 /// # Example
316 /// ```
317 /// # use std::sync::Arc;
318 /// use ant_quic::config::TransportConfig;
319 ///
320 /// let mut config = TransportConfig::default();
321 /// // The default uses CubicConfig, but custom implementations can be provided
322 /// // by implementing the congestion::ControllerFactory trait
323 /// ```
324 pub fn congestion_controller_factory(
325 &mut self,
326 factory: Arc<dyn congestion::ControllerFactory + Send + Sync + 'static>,
327 ) -> &mut Self {
328 self.congestion_controller_factory = factory;
329 self
330 }
331
332 /// Whether to use "Generic Segmentation Offload" to accelerate transmits, when supported by the
333 /// environment
334 ///
335 /// Defaults to `true`.
336 ///
337 /// GSO dramatically reduces CPU consumption when sending large numbers of packets with the same
338 /// headers, such as when transmitting bulk data on a connection. However, it is not supported
339 /// by all network interface drivers or packet inspection tools. `quinn-udp` will attempt to
340 /// disable GSO automatically when unavailable, but this can lead to spurious packet loss at
341 /// startup, temporarily degrading performance.
342 pub fn enable_segmentation_offload(&mut self, enabled: bool) -> &mut Self {
343 self.enable_segmentation_offload = enabled;
344 self
345 }
346
347 /// Configure NAT traversal capabilities for this connection
348 ///
349 /// When enabled, this connection will support QUIC NAT traversal extensions including:
350 /// - Address candidate advertisement and validation
351 /// - Coordinated hole punching through bootstrap nodes
352 /// - Multi-path connectivity testing
353 /// - Automatic path migration for NAT rebinding
354 ///
355 /// This is required for P2P connections through NATs in Autonomi networks.
356 /// Pass `None` to disable NAT traversal or use the high-level NAT traversal API
357 /// to create appropriate configurations.
358 pub fn nat_traversal_config(
359 &mut self,
360 config: Option<crate::transport_parameters::NatTraversalConfig>,
361 ) -> &mut Self {
362 self.nat_traversal_config = config;
363 self
364 }
365
366 /// Enable NAT traversal with default client configuration
367 ///
368 /// This is a convenience method that enables NAT traversal with sensible defaults
369 /// for a client endpoint. Use `nat_traversal_config()` for more control.
370 pub fn enable_nat_traversal(&mut self, enabled: bool) -> &mut Self {
371 if enabled {
372 use crate::transport_parameters::NatTraversalConfig;
373 // Default to client support (empty parameter)
374 self.nat_traversal_config = Some(NatTraversalConfig::ClientSupport);
375 } else {
376 self.nat_traversal_config = None;
377 }
378 self
379 }
380
381 /// Set the address discovery configuration
382 ///
383 /// This enables the QUIC Address Discovery extension (draft-ietf-quic-address-discovery-00)
384 /// which allows endpoints to share observed addresses with each other.
385 pub fn address_discovery_config(
386 &mut self,
387 config: Option<crate::transport_parameters::AddressDiscoveryConfig>,
388 ) -> &mut Self {
389 self.address_discovery_config = config;
390 self
391 }
392
393 /// Enable address discovery with default configuration
394 ///
395 /// This is a convenience method that enables address discovery with sensible defaults.
396 /// Use `address_discovery_config()` for more control.
397 pub fn enable_address_discovery(&mut self, enabled: bool) -> &mut Self {
398 if enabled {
399 use crate::transport_parameters::AddressDiscoveryConfig;
400 // Default configuration - willing to both send and receive address observations
401 self.address_discovery_config = Some(AddressDiscoveryConfig::SendAndReceive);
402 } else {
403 self.address_discovery_config = None;
404 }
405 self
406 }
407
408 /// Set the Post-Quantum Cryptography algorithms configuration
409 ///
410 /// This advertises which PQC algorithms are supported by this endpoint.
411 /// When both endpoints support PQC, they can negotiate the use of quantum-resistant algorithms.
412 pub fn pqc_algorithms(
413 &mut self,
414 algorithms: Option<crate::transport_parameters::PqcAlgorithms>,
415 ) -> &mut Self {
416 self.pqc_algorithms = algorithms;
417 self
418 }
419
420 /// Enable Post-Quantum Cryptography with default algorithms
421 ///
422 /// This is a convenience method that enables all standard PQC algorithms.
423 /// Use `pqc_algorithms()` for more control over which algorithms to support.
424 pub fn enable_pqc(&mut self, enabled: bool) -> &mut Self {
425 if enabled {
426 use crate::transport_parameters::PqcAlgorithms;
427 // Enable all standard algorithms
428 self.pqc_algorithms = Some(PqcAlgorithms {
429 ml_kem_768: true,
430 ml_dsa_65: true,
431 hybrid_x25519_ml_kem: true,
432 hybrid_ed25519_ml_dsa: true,
433 });
434 } else {
435 self.pqc_algorithms = None;
436 }
437 self
438 }
439
440 /// Get the address discovery configuration (read-only)
441 pub fn get_address_discovery_config(
442 &self,
443 ) -> Option<&crate::transport_parameters::AddressDiscoveryConfig> {
444 self.address_discovery_config.as_ref()
445 }
446
447 /// Get the PQC algorithms configuration (read-only)
448 pub fn get_pqc_algorithms(&self) -> Option<&crate::transport_parameters::PqcAlgorithms> {
449 self.pqc_algorithms.as_ref()
450 }
451
452 /// Get the NAT traversal configuration (read-only)
453 pub fn get_nat_traversal_config(
454 &self,
455 ) -> Option<&crate::transport_parameters::NatTraversalConfig> {
456 self.nat_traversal_config.as_ref()
457 }
458}
459
460impl Default for TransportConfig {
461 fn default() -> Self {
462 const EXPECTED_RTT: u32 = 100; // ms
463 const MAX_STREAM_BANDWIDTH: u32 = 12500 * 1000; // bytes/s
464 // Window size needed to avoid pipeline
465 // stalls
466 const STREAM_RWND: u32 = MAX_STREAM_BANDWIDTH / 1000 * EXPECTED_RTT;
467
468 Self {
469 max_concurrent_bidi_streams: 100u32.into(),
470 max_concurrent_uni_streams: 100u32.into(),
471 // 30 second default recommended by RFC 9308 ยง 3.2
472 max_idle_timeout: Some(VarInt(30_000)),
473 stream_receive_window: STREAM_RWND.into(),
474 receive_window: VarInt::MAX,
475 send_window: (8 * STREAM_RWND).into(),
476 send_fairness: true,
477
478 packet_threshold: 3,
479 time_threshold: 9.0 / 8.0,
480 initial_rtt: Duration::from_millis(333), // per spec, intentionally distinct from EXPECTED_RTT
481 initial_mtu: INITIAL_MTU,
482 min_mtu: INITIAL_MTU,
483 mtu_discovery_config: Some(MtuDiscoveryConfig::default()),
484 pad_to_mtu: false,
485 ack_frequency_config: None,
486
487 persistent_congestion_threshold: 3,
488 keep_alive_interval: None,
489 crypto_buffer_size: 16 * 1024,
490 allow_spin: true,
491 datagram_receive_buffer_size: Some(STREAM_RWND as usize),
492 datagram_send_buffer_size: 1024 * 1024,
493 #[cfg(test)]
494 deterministic_packet_numbers: false,
495
496 congestion_controller_factory: Arc::new(congestion::CubicConfig::default()),
497
498 enable_segmentation_offload: true,
499 nat_traversal_config: None,
500 address_discovery_config: None,
501 pqc_algorithms: None,
502 }
503 }
504}
505
506impl fmt::Debug for TransportConfig {
507 fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
508 let Self {
509 max_concurrent_bidi_streams,
510 max_concurrent_uni_streams,
511 max_idle_timeout,
512 stream_receive_window,
513 receive_window,
514 send_window,
515 send_fairness,
516 packet_threshold,
517 time_threshold,
518 initial_rtt,
519 initial_mtu,
520 min_mtu,
521 mtu_discovery_config,
522 pad_to_mtu,
523 ack_frequency_config,
524 persistent_congestion_threshold,
525 keep_alive_interval,
526 crypto_buffer_size,
527 allow_spin,
528 datagram_receive_buffer_size,
529 datagram_send_buffer_size,
530 #[cfg(test)]
531 deterministic_packet_numbers: _,
532 congestion_controller_factory: _,
533 enable_segmentation_offload,
534 nat_traversal_config,
535 address_discovery_config,
536 pqc_algorithms,
537 } = self;
538 fmt.debug_struct("TransportConfig")
539 .field("max_concurrent_bidi_streams", max_concurrent_bidi_streams)
540 .field("max_concurrent_uni_streams", max_concurrent_uni_streams)
541 .field("max_idle_timeout", max_idle_timeout)
542 .field("stream_receive_window", stream_receive_window)
543 .field("receive_window", receive_window)
544 .field("send_window", send_window)
545 .field("send_fairness", send_fairness)
546 .field("packet_threshold", packet_threshold)
547 .field("time_threshold", time_threshold)
548 .field("initial_rtt", initial_rtt)
549 .field("initial_mtu", initial_mtu)
550 .field("min_mtu", min_mtu)
551 .field("mtu_discovery_config", mtu_discovery_config)
552 .field("pad_to_mtu", pad_to_mtu)
553 .field("ack_frequency_config", ack_frequency_config)
554 .field(
555 "persistent_congestion_threshold",
556 persistent_congestion_threshold,
557 )
558 .field("keep_alive_interval", keep_alive_interval)
559 .field("crypto_buffer_size", crypto_buffer_size)
560 .field("allow_spin", allow_spin)
561 .field("datagram_receive_buffer_size", datagram_receive_buffer_size)
562 .field("datagram_send_buffer_size", datagram_send_buffer_size)
563 // congestion_controller_factory not debug
564 .field("enable_segmentation_offload", enable_segmentation_offload)
565 .field("nat_traversal_config", nat_traversal_config)
566 .field("address_discovery_config", address_discovery_config)
567 .field("pqc_algorithms", pqc_algorithms)
568 .finish_non_exhaustive()
569 }
570}
571
572/// Parameters for controlling the peer's acknowledgement frequency
573///
574/// The parameters provided in this config will be sent to the peer at the beginning of the
575/// connection, so it can take them into account when sending acknowledgements (see each parameter's
576/// description for details on how it influences acknowledgement frequency).
577///
578/// Quinn's implementation follows the fourth draft of the
579/// [QUIC Acknowledgement Frequency extension](https://datatracker.ietf.org/doc/html/draft-ietf-quic-ack-frequency-04).
580/// The defaults produce behavior slightly different than the behavior without this extension,
581/// because they change the way reordered packets are handled (see
582/// [`AckFrequencyConfig::reordering_threshold`] for details).
583#[derive(Clone, Debug)]
584pub struct AckFrequencyConfig {
585 pub(crate) ack_eliciting_threshold: VarInt,
586 pub(crate) max_ack_delay: Option<Duration>,
587 pub(crate) reordering_threshold: VarInt,
588}
589
590impl AckFrequencyConfig {
591 /// The ack-eliciting threshold we will request the peer to use
592 ///
593 /// This threshold represents the number of ack-eliciting packets an endpoint may receive
594 /// without immediately sending an ACK.
595 ///
596 /// The remote peer should send at least one ACK frame when more than this number of
597 /// ack-eliciting packets have been received. A value of 0 results in a receiver immediately
598 /// acknowledging every ack-eliciting packet.
599 ///
600 /// Defaults to 1, which sends ACK frames for every other ack-eliciting packet.
601 pub fn ack_eliciting_threshold(&mut self, value: VarInt) -> &mut Self {
602 self.ack_eliciting_threshold = value;
603 self
604 }
605
606 /// The `max_ack_delay` we will request the peer to use
607 ///
608 /// This parameter represents the maximum amount of time that an endpoint waits before sending
609 /// an ACK when the ack-eliciting threshold hasn't been reached.
610 ///
611 /// The effective `max_ack_delay` will be clamped to be at least the peer's `min_ack_delay`
612 /// transport parameter, and at most the greater of the current path RTT or 25ms.
613 ///
614 /// Defaults to `None`, in which case the peer's original `max_ack_delay` will be used, as
615 /// obtained from its transport parameters.
616 pub fn max_ack_delay(&mut self, value: Option<Duration>) -> &mut Self {
617 self.max_ack_delay = value;
618 self
619 }
620
621 /// The reordering threshold we will request the peer to use
622 ///
623 /// This threshold represents the amount of out-of-order packets that will trigger an endpoint
624 /// to send an ACK, without waiting for `ack_eliciting_threshold` to be exceeded or for
625 /// `max_ack_delay` to be elapsed.
626 ///
627 /// A value of 0 indicates out-of-order packets do not elicit an immediate ACK. A value of 1
628 /// immediately acknowledges any packets that are received out of order (this is also the
629 /// behavior when the extension is disabled).
630 ///
631 /// It is recommended to set this value to [`TransportConfig::packet_threshold`] minus one.
632 /// Since the default value for [`TransportConfig::packet_threshold`] is 3, this value defaults
633 /// to 2.
634 pub fn reordering_threshold(&mut self, value: VarInt) -> &mut Self {
635 self.reordering_threshold = value;
636 self
637 }
638}
639
640impl Default for AckFrequencyConfig {
641 fn default() -> Self {
642 Self {
643 ack_eliciting_threshold: VarInt(1),
644 max_ack_delay: None,
645 reordering_threshold: VarInt(2),
646 }
647 }
648}
649
650/// Parameters governing MTU discovery.
651///
652/// # The why of MTU discovery
653///
654/// By design, QUIC ensures during the handshake that the network path between the client and the
655/// server is able to transmit unfragmented UDP packets with a body of 1200 bytes. In other words,
656/// once the connection is established, we know that the network path's maximum transmission unit
657/// (MTU) is of at least 1200 bytes (plus IP and UDP headers). Because of this, a QUIC endpoint can
658/// split outgoing data in packets of 1200 bytes, with confidence that the network will be able to
659/// deliver them (if the endpoint were to send bigger packets, they could prove too big and end up
660/// being dropped).
661///
662/// There is, however, a significant overhead associated to sending a packet. If the same
663/// information can be sent in fewer packets, that results in higher throughput. The amount of
664/// packets that need to be sent is inversely proportional to the MTU: the higher the MTU, the
665/// bigger the packets that can be sent, and the fewer packets that are needed to transmit a given
666/// amount of bytes.
667///
668/// Most networks have an MTU higher than 1200. Through MTU discovery, endpoints can detect the
669/// path's MTU and, if it turns out to be higher, start sending bigger packets.
670///
671/// # MTU discovery internals
672///
673/// Quinn implements MTU discovery through DPLPMTUD (Datagram Packetization Layer Path MTU
674/// Discovery), described in [section 14.3 of RFC
675/// 9000](https://www.rfc-editor.org/rfc/rfc9000.html#section-14.3). This method consists of sending
676/// QUIC packets padded to a particular size (called PMTU probes), and waiting to see if the remote
677/// peer responds with an ACK. If an ACK is received, that means the probe arrived at the remote
678/// peer, which in turn means that the network path's MTU is of at least the packet's size. If the
679/// probe is lost, it is sent another 2 times before concluding that the MTU is lower than the
680/// packet's size.
681///
682/// MTU discovery runs on a schedule (e.g. every 600 seconds) specified through
683/// [`MtuDiscoveryConfig::interval`]. The first run happens right after the handshake, and
684/// subsequent discoveries are scheduled to run when the interval has elapsed, starting from the
685/// last time when MTU discovery completed.
686///
687/// Since the search space for MTUs is quite big (the smallest possible MTU is 1200, and the highest
688/// is 65527), Quinn performs a binary search to keep the number of probes as low as possible. The
689/// lower bound of the search is equal to [`TransportConfig::initial_mtu`] in the
690/// initial MTU discovery run, and is equal to the currently discovered MTU in subsequent runs. The
691/// upper bound is determined by the minimum of [`MtuDiscoveryConfig::upper_bound`] and the
692/// `max_udp_payload_size` transport parameter received from the peer during the handshake.
693///
694/// # Black hole detection
695///
696/// If, at some point, the network path no longer accepts packets of the detected size, packet loss
697/// will eventually trigger black hole detection and reset the detected MTU to 1200. In that case,
698/// MTU discovery will be triggered after [`MtuDiscoveryConfig::black_hole_cooldown`] (ignoring the
699/// timer that was set based on [`MtuDiscoveryConfig::interval`]).
700///
701/// # Interaction between peers
702///
703/// There is no guarantee that the MTU on the path between A and B is the same as the MTU of the
704/// path between B and A. Therefore, each peer in the connection needs to run MTU discovery
705/// independently in order to discover the path's MTU.
706#[derive(Clone, Debug)]
707pub struct MtuDiscoveryConfig {
708 pub(crate) interval: Duration,
709 pub(crate) upper_bound: u16,
710 pub(crate) minimum_change: u16,
711 pub(crate) black_hole_cooldown: Duration,
712}
713
714impl MtuDiscoveryConfig {
715 /// Specifies the time to wait after completing MTU discovery before starting a new MTU
716 /// discovery run.
717 ///
718 /// Defaults to 600 seconds, as recommended by [RFC
719 /// 8899](https://www.rfc-editor.org/rfc/rfc8899).
720 pub fn interval(&mut self, value: Duration) -> &mut Self {
721 self.interval = value;
722 self
723 }
724
725 /// Specifies the upper bound to the max UDP payload size that MTU discovery will search for.
726 ///
727 /// Defaults to 1452, to stay within Ethernet's MTU when using IPv4 and IPv6. The highest
728 /// allowed value is 65527, which corresponds to the maximum permitted UDP payload on IPv6.
729 ///
730 /// It is safe to use an arbitrarily high upper bound, regardless of the network path's MTU. The
731 /// only drawback is that MTU discovery might take more time to finish.
732 pub fn upper_bound(&mut self, value: u16) -> &mut Self {
733 self.upper_bound = value.min(MAX_UDP_PAYLOAD);
734 self
735 }
736
737 /// Specifies the amount of time that MTU discovery should wait after a black hole was detected
738 /// before running again. Defaults to one minute.
739 ///
740 /// Black hole detection can be spuriously triggered in case of congestion, so it makes sense to
741 /// try MTU discovery again after a short period of time.
742 pub fn black_hole_cooldown(&mut self, value: Duration) -> &mut Self {
743 self.black_hole_cooldown = value;
744 self
745 }
746
747 /// Specifies the minimum MTU change to stop the MTU discovery phase.
748 /// Defaults to 20.
749 pub fn minimum_change(&mut self, value: u16) -> &mut Self {
750 self.minimum_change = value;
751 self
752 }
753}
754
755impl Default for MtuDiscoveryConfig {
756 fn default() -> Self {
757 Self {
758 interval: Duration::from_secs(600),
759 upper_bound: 1452,
760 black_hole_cooldown: Duration::from_secs(60),
761 minimum_change: 20,
762 }
763 }
764}
765
766/// Maximum duration of inactivity to accept before timing out the connection
767///
768/// This wraps an underlying [`VarInt`], representing the duration in milliseconds. Values can be
769/// constructed by converting directly from `VarInt`, or using `TryFrom<Duration>`.
770///
771/// ```
772/// # use std::{convert::TryFrom, time::Duration};
773/// use ant_quic::config::IdleTimeout;
774/// use ant_quic::{VarIntBoundsExceeded, VarInt};
775/// # fn main() -> Result<(), VarIntBoundsExceeded> {
776/// // A `VarInt`-encoded value in milliseconds
777/// let timeout = IdleTimeout::from(VarInt::from_u32(10_000));
778///
779/// // Try to convert a `Duration` into a `VarInt`-encoded timeout
780/// let timeout = IdleTimeout::try_from(Duration::from_secs(10))?;
781/// # Ok(())
782/// # }
783/// ```
784#[derive(Default, Copy, Clone, Eq, Hash, Ord, PartialEq, PartialOrd)]
785pub struct IdleTimeout(VarInt);
786
787impl From<VarInt> for IdleTimeout {
788 fn from(inner: VarInt) -> Self {
789 Self(inner)
790 }
791}
792
793impl std::convert::TryFrom<Duration> for IdleTimeout {
794 type Error = VarIntBoundsExceeded;
795
796 fn try_from(timeout: Duration) -> Result<Self, Self::Error> {
797 let inner = VarInt::try_from(timeout.as_millis())?;
798 Ok(Self(inner))
799 }
800}
801
802impl fmt::Debug for IdleTimeout {
803 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
804 self.0.fmt(f)
805 }
806}