ant_quic/config/transport.rs
1use std::{fmt, sync::Arc};
2
3use crate::{Duration, INITIAL_MTU, MAX_UDP_PAYLOAD, VarInt, VarIntBoundsExceeded, congestion};
4
5/// Parameters governing the core QUIC state machine
6///
7/// Default values should be suitable for most internet applications. Applications protocols which
8/// forbid remotely-initiated streams should set `max_concurrent_bidi_streams` and
9/// `max_concurrent_uni_streams` to zero.
10///
11/// In some cases, performance or resource requirements can be improved by tuning these values to
12/// suit a particular application and/or network connection. In particular, data window sizes can be
13/// tuned for a particular expected round trip time, link capacity, and memory availability. Tuning
14/// for higher bandwidths and latencies increases worst-case memory consumption, but does not impair
15/// performance at lower bandwidths and latencies. The default configuration is tuned for a 100Mbps
16/// link with a 100ms round trip time.
17#[derive(Clone)]
18pub struct TransportConfig {
19 pub(crate) max_concurrent_bidi_streams: VarInt,
20 pub(crate) max_concurrent_uni_streams: VarInt,
21 pub(crate) max_idle_timeout: Option<VarInt>,
22 pub(crate) stream_receive_window: VarInt,
23 pub(crate) receive_window: VarInt,
24 pub(crate) send_window: u64,
25 pub(crate) send_fairness: bool,
26
27 pub(crate) packet_threshold: u32,
28 pub(crate) time_threshold: f32,
29 pub(crate) initial_rtt: Duration,
30 pub(crate) initial_mtu: u16,
31 pub(crate) min_mtu: u16,
32 pub(crate) mtu_discovery_config: Option<MtuDiscoveryConfig>,
33 pub(crate) pad_to_mtu: bool,
34 pub(crate) ack_frequency_config: Option<AckFrequencyConfig>,
35
36 pub(crate) persistent_congestion_threshold: u32,
37 pub(crate) keep_alive_interval: Option<Duration>,
38 pub(crate) crypto_buffer_size: usize,
39 pub(crate) allow_spin: bool,
40 pub(crate) datagram_receive_buffer_size: Option<usize>,
41 pub(crate) datagram_send_buffer_size: usize,
42 #[cfg(test)]
43 pub(crate) deterministic_packet_numbers: bool,
44
45 pub(crate) congestion_controller_factory: Arc<dyn congestion::ControllerFactory + Send + Sync>,
46
47 pub(crate) enable_segmentation_offload: bool,
48
49 /// NAT traversal configuration
50 pub(crate) nat_traversal_config: Option<crate::transport_parameters::NatTraversalConfig>,
51}
52
53impl TransportConfig {
54 /// Maximum number of incoming bidirectional streams that may be open concurrently
55 ///
56 /// Must be nonzero for the peer to open any bidirectional streams.
57 ///
58 /// Worst-case memory use is directly proportional to `max_concurrent_bidi_streams *
59 /// stream_receive_window`, with an upper bound proportional to `receive_window`.
60 pub fn max_concurrent_bidi_streams(&mut self, value: VarInt) -> &mut Self {
61 self.max_concurrent_bidi_streams = value;
62 self
63 }
64
65 /// Variant of `max_concurrent_bidi_streams` affecting unidirectional streams
66 pub fn max_concurrent_uni_streams(&mut self, value: VarInt) -> &mut Self {
67 self.max_concurrent_uni_streams = value;
68 self
69 }
70
71 /// Maximum duration of inactivity to accept before timing out the connection.
72 ///
73 /// The true idle timeout is the minimum of this and the peer's own max idle timeout. `None`
74 /// represents an infinite timeout. Defaults to 30 seconds.
75 ///
76 /// **WARNING**: If a peer or its network path malfunctions or acts maliciously, an infinite
77 /// idle timeout can result in permanently hung futures!
78 ///
79 /// ```
80 /// # use std::{convert::TryInto, time::Duration};
81 /// # use ant_quic::{TransportConfig, VarInt, VarIntBoundsExceeded};
82 /// # fn main() -> Result<(), VarIntBoundsExceeded> {
83 /// let mut config = TransportConfig::default();
84 ///
85 /// // Set the idle timeout as `VarInt`-encoded milliseconds
86 /// config.max_idle_timeout(Some(VarInt::from_u32(10_000).into()));
87 ///
88 /// // Set the idle timeout as a `Duration`
89 /// config.max_idle_timeout(Some(Duration::from_secs(10).try_into()?));
90 /// # Ok(())
91 /// # }
92 /// ```
93 pub fn max_idle_timeout(&mut self, value: Option<IdleTimeout>) -> &mut Self {
94 self.max_idle_timeout = value.map(|t| t.0);
95 self
96 }
97
98 /// Maximum number of bytes the peer may transmit without acknowledgement on any one stream
99 /// before becoming blocked.
100 ///
101 /// This should be set to at least the expected connection latency multiplied by the maximum
102 /// desired throughput. Setting this smaller than `receive_window` helps ensure that a single
103 /// stream doesn't monopolize receive buffers, which may otherwise occur if the application
104 /// chooses not to read from a large stream for a time while still requiring data on other
105 /// streams.
106 pub fn stream_receive_window(&mut self, value: VarInt) -> &mut Self {
107 self.stream_receive_window = value;
108 self
109 }
110
111 /// Maximum number of bytes the peer may transmit across all streams of a connection before
112 /// becoming blocked.
113 ///
114 /// This should be set to at least the expected connection latency multiplied by the maximum
115 /// desired throughput. Larger values can be useful to allow maximum throughput within a
116 /// stream while another is blocked.
117 pub fn receive_window(&mut self, value: VarInt) -> &mut Self {
118 self.receive_window = value;
119 self
120 }
121
122 /// Maximum number of bytes to transmit to a peer without acknowledgment
123 ///
124 /// Provides an upper bound on memory when communicating with peers that issue large amounts of
125 /// flow control credit. Endpoints that wish to handle large numbers of connections robustly
126 /// should take care to set this low enough to guarantee memory exhaustion does not occur if
127 /// every connection uses the entire window.
128 pub fn send_window(&mut self, value: u64) -> &mut Self {
129 self.send_window = value;
130 self
131 }
132
133 /// Whether to implement fair queuing for send streams having the same priority.
134 ///
135 /// When enabled, connections schedule data from outgoing streams having the same priority in a
136 /// round-robin fashion. When disabled, streams are scheduled in the order they are written to.
137 ///
138 /// Note that this only affects streams with the same priority. Higher priority streams always
139 /// take precedence over lower priority streams.
140 ///
141 /// Disabling fairness can reduce fragmentation and protocol overhead for workloads that use
142 /// many small streams.
143 pub fn send_fairness(&mut self, value: bool) -> &mut Self {
144 self.send_fairness = value;
145 self
146 }
147
148 /// Maximum reordering in packet number space before FACK style loss detection considers a
149 /// packet lost. Should not be less than 3, per RFC5681.
150 pub fn packet_threshold(&mut self, value: u32) -> &mut Self {
151 self.packet_threshold = value;
152 self
153 }
154
155 /// Maximum reordering in time space before time based loss detection considers a packet lost,
156 /// as a factor of RTT
157 pub fn time_threshold(&mut self, value: f32) -> &mut Self {
158 self.time_threshold = value;
159 self
160 }
161
162 /// The RTT used before an RTT sample is taken
163 pub fn initial_rtt(&mut self, value: Duration) -> &mut Self {
164 self.initial_rtt = value;
165 self
166 }
167
168 /// The initial value to be used as the maximum UDP payload size before running MTU discovery
169 /// (see [`TransportConfig::mtu_discovery_config`]).
170 ///
171 /// Must be at least 1200, which is the default, and known to be safe for typical internet
172 /// applications. Larger values are more efficient, but increase the risk of packet loss due to
173 /// exceeding the network path's IP MTU. If the provided value is higher than what the network
174 /// path actually supports, packet loss will eventually trigger black hole detection and bring
175 /// it down to [`TransportConfig::min_mtu`].
176 pub fn initial_mtu(&mut self, value: u16) -> &mut Self {
177 self.initial_mtu = value.max(INITIAL_MTU);
178 self
179 }
180
181 pub(crate) fn get_initial_mtu(&self) -> u16 {
182 self.initial_mtu.max(self.min_mtu)
183 }
184
185 /// The maximum UDP payload size guaranteed to be supported by the network.
186 ///
187 /// Must be at least 1200, which is the default, and lower than or equal to
188 /// [`TransportConfig::initial_mtu`].
189 ///
190 /// Real-world MTUs can vary according to ISP, VPN, and properties of intermediate network links
191 /// outside of either endpoint's control. Extreme care should be used when raising this value
192 /// outside of private networks where these factors are fully controlled. If the provided value
193 /// is higher than what the network path actually supports, the result will be unpredictable and
194 /// catastrophic packet loss, without a possibility of repair. Prefer
195 /// [`TransportConfig::initial_mtu`] together with
196 /// [`TransportConfig::mtu_discovery_config`] to set a maximum UDP payload size that robustly
197 /// adapts to the network.
198 pub fn min_mtu(&mut self, value: u16) -> &mut Self {
199 self.min_mtu = value.max(INITIAL_MTU);
200 self
201 }
202
203 /// Specifies the MTU discovery config (see [`MtuDiscoveryConfig`] for details).
204 ///
205 /// Enabled by default.
206 pub fn mtu_discovery_config(&mut self, value: Option<MtuDiscoveryConfig>) -> &mut Self {
207 self.mtu_discovery_config = value;
208 self
209 }
210
211 /// Pad UDP datagrams carrying application data to current maximum UDP payload size
212 ///
213 /// Disabled by default. UDP datagrams containing loss probes are exempt from padding.
214 ///
215 /// Enabling this helps mitigate traffic analysis by network observers, but it increases
216 /// bandwidth usage. Without this mitigation precise plain text size of application datagrams as
217 /// well as the total size of stream write bursts can be inferred by observers under certain
218 /// conditions. This analysis requires either an uncongested connection or application datagrams
219 /// too large to be coalesced.
220 pub fn pad_to_mtu(&mut self, value: bool) -> &mut Self {
221 self.pad_to_mtu = value;
222 self
223 }
224
225 /// Specifies the ACK frequency config (see [`AckFrequencyConfig`] for details)
226 ///
227 /// The provided configuration will be ignored if the peer does not support the acknowledgement
228 /// frequency QUIC extension.
229 ///
230 /// Defaults to `None`, which disables controlling the peer's acknowledgement frequency. Even
231 /// if set to `None`, the local side still supports the acknowledgement frequency QUIC
232 /// extension and may use it in other ways.
233 pub fn ack_frequency_config(&mut self, value: Option<AckFrequencyConfig>) -> &mut Self {
234 self.ack_frequency_config = value;
235 self
236 }
237
238 /// Number of consecutive PTOs after which network is considered to be experiencing persistent congestion.
239 pub fn persistent_congestion_threshold(&mut self, value: u32) -> &mut Self {
240 self.persistent_congestion_threshold = value;
241 self
242 }
243
244 /// Period of inactivity before sending a keep-alive packet
245 ///
246 /// Keep-alive packets prevent an inactive but otherwise healthy connection from timing out.
247 ///
248 /// `None` to disable, which is the default. Only one side of any given connection needs keep-alive
249 /// enabled for the connection to be preserved. Must be set lower than the idle_timeout of both
250 /// peers to be effective.
251 pub fn keep_alive_interval(&mut self, value: Option<Duration>) -> &mut Self {
252 self.keep_alive_interval = value;
253 self
254 }
255
256 /// Maximum quantity of out-of-order crypto layer data to buffer
257 pub fn crypto_buffer_size(&mut self, value: usize) -> &mut Self {
258 self.crypto_buffer_size = value;
259 self
260 }
261
262 /// Whether the implementation is permitted to set the spin bit on this connection
263 ///
264 /// This allows passive observers to easily judge the round trip time of a connection, which can
265 /// be useful for network administration but sacrifices a small amount of privacy.
266 pub fn allow_spin(&mut self, value: bool) -> &mut Self {
267 self.allow_spin = value;
268 self
269 }
270
271 /// Maximum number of incoming application datagram bytes to buffer, or None to disable
272 /// incoming datagrams
273 ///
274 /// The peer is forbidden to send single datagrams larger than this size. If the aggregate size
275 /// of all datagrams that have been received from the peer but not consumed by the application
276 /// exceeds this value, old datagrams are dropped until it is no longer exceeded.
277 pub fn datagram_receive_buffer_size(&mut self, value: Option<usize>) -> &mut Self {
278 self.datagram_receive_buffer_size = value;
279 self
280 }
281
282 /// Maximum number of outgoing application datagram bytes to buffer
283 ///
284 /// While datagrams are sent ASAP, it is possible for an application to generate data faster
285 /// than the link, or even the underlying hardware, can transmit them. This limits the amount of
286 /// memory that may be consumed in that case. When the send buffer is full and a new datagram is
287 /// sent, older datagrams are dropped until sufficient space is available.
288 pub fn datagram_send_buffer_size(&mut self, value: usize) -> &mut Self {
289 self.datagram_send_buffer_size = value;
290 self
291 }
292
293 /// Whether to force every packet number to be used
294 ///
295 /// By default, packet numbers are occasionally skipped to ensure peers aren't ACKing packets
296 /// before they see them.
297 #[cfg(test)]
298 pub(crate) fn deterministic_packet_numbers(&mut self, enabled: bool) -> &mut Self {
299 self.deterministic_packet_numbers = enabled;
300 self
301 }
302
303 /// How to construct new `congestion::Controller`s
304 ///
305 /// Typically the refcounted configuration of a `congestion::Controller`,
306 /// e.g. a `congestion::NewRenoConfig`.
307 ///
308 /// # Example
309 /// ```
310 /// # use std::sync::Arc;
311 /// use ant_quic::config::TransportConfig;
312 ///
313 /// let mut config = TransportConfig::default();
314 /// // The default uses CubicConfig, but custom implementations can be provided
315 /// // by implementing the congestion::ControllerFactory trait
316 /// ```
317 pub fn congestion_controller_factory(
318 &mut self,
319 factory: Arc<dyn congestion::ControllerFactory + Send + Sync + 'static>,
320 ) -> &mut Self {
321 self.congestion_controller_factory = factory;
322 self
323 }
324
325 /// Whether to use "Generic Segmentation Offload" to accelerate transmits, when supported by the
326 /// environment
327 ///
328 /// Defaults to `true`.
329 ///
330 /// GSO dramatically reduces CPU consumption when sending large numbers of packets with the same
331 /// headers, such as when transmitting bulk data on a connection. However, it is not supported
332 /// by all network interface drivers or packet inspection tools. `quinn-udp` will attempt to
333 /// disable GSO automatically when unavailable, but this can lead to spurious packet loss at
334 /// startup, temporarily degrading performance.
335 pub fn enable_segmentation_offload(&mut self, enabled: bool) -> &mut Self {
336 self.enable_segmentation_offload = enabled;
337 self
338 }
339
340 /// Configure NAT traversal capabilities for this connection
341 ///
342 /// When enabled, this connection will support QUIC NAT traversal extensions including:
343 /// - Address candidate advertisement and validation
344 /// - Coordinated hole punching through bootstrap nodes
345 /// - Multi-path connectivity testing
346 /// - Automatic path migration for NAT rebinding
347 ///
348 /// This is required for P2P connections through NATs in Autonomi networks.
349 /// Pass `None` to disable NAT traversal or use the high-level NAT traversal API
350 /// to create appropriate configurations.
351 pub fn nat_traversal_config(
352 &mut self,
353 config: Option<crate::transport_parameters::NatTraversalConfig>,
354 ) -> &mut Self {
355 self.nat_traversal_config = config;
356 self
357 }
358
359 /// Enable NAT traversal with default client configuration
360 ///
361 /// This is a convenience method that enables NAT traversal with sensible defaults
362 /// for a client endpoint. Use `nat_traversal_config()` for more control.
363 pub fn enable_nat_traversal(&mut self, enabled: bool) -> &mut Self {
364 if enabled {
365 use crate::transport_parameters::{NatTraversalConfig, NatTraversalRole};
366 self.nat_traversal_config = Some(NatTraversalConfig {
367 role: NatTraversalRole::Client,
368 max_candidates: VarInt::from_u32(10),
369 coordination_timeout: VarInt::from_u32(5000), // 5 seconds
370 max_concurrent_attempts: VarInt::from_u32(3),
371 peer_id: None, // Will be set later when peer ID is determined
372 });
373 } else {
374 self.nat_traversal_config = None;
375 }
376 self
377 }
378}
379
380impl Default for TransportConfig {
381 fn default() -> Self {
382 const EXPECTED_RTT: u32 = 100; // ms
383 const MAX_STREAM_BANDWIDTH: u32 = 12500 * 1000; // bytes/s
384 // Window size needed to avoid pipeline
385 // stalls
386 const STREAM_RWND: u32 = MAX_STREAM_BANDWIDTH / 1000 * EXPECTED_RTT;
387
388 Self {
389 max_concurrent_bidi_streams: 100u32.into(),
390 max_concurrent_uni_streams: 100u32.into(),
391 // 30 second default recommended by RFC 9308 ยง 3.2
392 max_idle_timeout: Some(VarInt(30_000)),
393 stream_receive_window: STREAM_RWND.into(),
394 receive_window: VarInt::MAX,
395 send_window: (8 * STREAM_RWND).into(),
396 send_fairness: true,
397
398 packet_threshold: 3,
399 time_threshold: 9.0 / 8.0,
400 initial_rtt: Duration::from_millis(333), // per spec, intentionally distinct from EXPECTED_RTT
401 initial_mtu: INITIAL_MTU,
402 min_mtu: INITIAL_MTU,
403 mtu_discovery_config: Some(MtuDiscoveryConfig::default()),
404 pad_to_mtu: false,
405 ack_frequency_config: None,
406
407 persistent_congestion_threshold: 3,
408 keep_alive_interval: None,
409 crypto_buffer_size: 16 * 1024,
410 allow_spin: true,
411 datagram_receive_buffer_size: Some(STREAM_RWND as usize),
412 datagram_send_buffer_size: 1024 * 1024,
413 #[cfg(test)]
414 deterministic_packet_numbers: false,
415
416 congestion_controller_factory: Arc::new(congestion::CubicConfig::default()),
417
418 enable_segmentation_offload: true,
419 nat_traversal_config: None,
420 }
421 }
422}
423
424impl fmt::Debug for TransportConfig {
425 fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
426 let Self {
427 max_concurrent_bidi_streams,
428 max_concurrent_uni_streams,
429 max_idle_timeout,
430 stream_receive_window,
431 receive_window,
432 send_window,
433 send_fairness,
434 packet_threshold,
435 time_threshold,
436 initial_rtt,
437 initial_mtu,
438 min_mtu,
439 mtu_discovery_config,
440 pad_to_mtu,
441 ack_frequency_config,
442 persistent_congestion_threshold,
443 keep_alive_interval,
444 crypto_buffer_size,
445 allow_spin,
446 datagram_receive_buffer_size,
447 datagram_send_buffer_size,
448 #[cfg(test)]
449 deterministic_packet_numbers: _,
450 congestion_controller_factory: _,
451 enable_segmentation_offload,
452 nat_traversal_config,
453 } = self;
454 fmt.debug_struct("TransportConfig")
455 .field("max_concurrent_bidi_streams", max_concurrent_bidi_streams)
456 .field("max_concurrent_uni_streams", max_concurrent_uni_streams)
457 .field("max_idle_timeout", max_idle_timeout)
458 .field("stream_receive_window", stream_receive_window)
459 .field("receive_window", receive_window)
460 .field("send_window", send_window)
461 .field("send_fairness", send_fairness)
462 .field("packet_threshold", packet_threshold)
463 .field("time_threshold", time_threshold)
464 .field("initial_rtt", initial_rtt)
465 .field("initial_mtu", initial_mtu)
466 .field("min_mtu", min_mtu)
467 .field("mtu_discovery_config", mtu_discovery_config)
468 .field("pad_to_mtu", pad_to_mtu)
469 .field("ack_frequency_config", ack_frequency_config)
470 .field(
471 "persistent_congestion_threshold",
472 persistent_congestion_threshold,
473 )
474 .field("keep_alive_interval", keep_alive_interval)
475 .field("crypto_buffer_size", crypto_buffer_size)
476 .field("allow_spin", allow_spin)
477 .field("datagram_receive_buffer_size", datagram_receive_buffer_size)
478 .field("datagram_send_buffer_size", datagram_send_buffer_size)
479 // congestion_controller_factory not debug
480 .field("enable_segmentation_offload", enable_segmentation_offload)
481 .field("nat_traversal_config", nat_traversal_config)
482 .finish_non_exhaustive()
483 }
484}
485
486/// Parameters for controlling the peer's acknowledgement frequency
487///
488/// The parameters provided in this config will be sent to the peer at the beginning of the
489/// connection, so it can take them into account when sending acknowledgements (see each parameter's
490/// description for details on how it influences acknowledgement frequency).
491///
492/// Quinn's implementation follows the fourth draft of the
493/// [QUIC Acknowledgement Frequency extension](https://datatracker.ietf.org/doc/html/draft-ietf-quic-ack-frequency-04).
494/// The defaults produce behavior slightly different than the behavior without this extension,
495/// because they change the way reordered packets are handled (see
496/// [`AckFrequencyConfig::reordering_threshold`] for details).
497#[derive(Clone, Debug)]
498pub struct AckFrequencyConfig {
499 pub(crate) ack_eliciting_threshold: VarInt,
500 pub(crate) max_ack_delay: Option<Duration>,
501 pub(crate) reordering_threshold: VarInt,
502}
503
504impl AckFrequencyConfig {
505 /// The ack-eliciting threshold we will request the peer to use
506 ///
507 /// This threshold represents the number of ack-eliciting packets an endpoint may receive
508 /// without immediately sending an ACK.
509 ///
510 /// The remote peer should send at least one ACK frame when more than this number of
511 /// ack-eliciting packets have been received. A value of 0 results in a receiver immediately
512 /// acknowledging every ack-eliciting packet.
513 ///
514 /// Defaults to 1, which sends ACK frames for every other ack-eliciting packet.
515 pub fn ack_eliciting_threshold(&mut self, value: VarInt) -> &mut Self {
516 self.ack_eliciting_threshold = value;
517 self
518 }
519
520 /// The `max_ack_delay` we will request the peer to use
521 ///
522 /// This parameter represents the maximum amount of time that an endpoint waits before sending
523 /// an ACK when the ack-eliciting threshold hasn't been reached.
524 ///
525 /// The effective `max_ack_delay` will be clamped to be at least the peer's `min_ack_delay`
526 /// transport parameter, and at most the greater of the current path RTT or 25ms.
527 ///
528 /// Defaults to `None`, in which case the peer's original `max_ack_delay` will be used, as
529 /// obtained from its transport parameters.
530 pub fn max_ack_delay(&mut self, value: Option<Duration>) -> &mut Self {
531 self.max_ack_delay = value;
532 self
533 }
534
535 /// The reordering threshold we will request the peer to use
536 ///
537 /// This threshold represents the amount of out-of-order packets that will trigger an endpoint
538 /// to send an ACK, without waiting for `ack_eliciting_threshold` to be exceeded or for
539 /// `max_ack_delay` to be elapsed.
540 ///
541 /// A value of 0 indicates out-of-order packets do not elicit an immediate ACK. A value of 1
542 /// immediately acknowledges any packets that are received out of order (this is also the
543 /// behavior when the extension is disabled).
544 ///
545 /// It is recommended to set this value to [`TransportConfig::packet_threshold`] minus one.
546 /// Since the default value for [`TransportConfig::packet_threshold`] is 3, this value defaults
547 /// to 2.
548 pub fn reordering_threshold(&mut self, value: VarInt) -> &mut Self {
549 self.reordering_threshold = value;
550 self
551 }
552}
553
554impl Default for AckFrequencyConfig {
555 fn default() -> Self {
556 Self {
557 ack_eliciting_threshold: VarInt(1),
558 max_ack_delay: None,
559 reordering_threshold: VarInt(2),
560 }
561 }
562}
563
564/// Parameters governing MTU discovery.
565///
566/// # The why of MTU discovery
567///
568/// By design, QUIC ensures during the handshake that the network path between the client and the
569/// server is able to transmit unfragmented UDP packets with a body of 1200 bytes. In other words,
570/// once the connection is established, we know that the network path's maximum transmission unit
571/// (MTU) is of at least 1200 bytes (plus IP and UDP headers). Because of this, a QUIC endpoint can
572/// split outgoing data in packets of 1200 bytes, with confidence that the network will be able to
573/// deliver them (if the endpoint were to send bigger packets, they could prove too big and end up
574/// being dropped).
575///
576/// There is, however, a significant overhead associated to sending a packet. If the same
577/// information can be sent in fewer packets, that results in higher throughput. The amount of
578/// packets that need to be sent is inversely proportional to the MTU: the higher the MTU, the
579/// bigger the packets that can be sent, and the fewer packets that are needed to transmit a given
580/// amount of bytes.
581///
582/// Most networks have an MTU higher than 1200. Through MTU discovery, endpoints can detect the
583/// path's MTU and, if it turns out to be higher, start sending bigger packets.
584///
585/// # MTU discovery internals
586///
587/// Quinn implements MTU discovery through DPLPMTUD (Datagram Packetization Layer Path MTU
588/// Discovery), described in [section 14.3 of RFC
589/// 9000](https://www.rfc-editor.org/rfc/rfc9000.html#section-14.3). This method consists of sending
590/// QUIC packets padded to a particular size (called PMTU probes), and waiting to see if the remote
591/// peer responds with an ACK. If an ACK is received, that means the probe arrived at the remote
592/// peer, which in turn means that the network path's MTU is of at least the packet's size. If the
593/// probe is lost, it is sent another 2 times before concluding that the MTU is lower than the
594/// packet's size.
595///
596/// MTU discovery runs on a schedule (e.g. every 600 seconds) specified through
597/// [`MtuDiscoveryConfig::interval`]. The first run happens right after the handshake, and
598/// subsequent discoveries are scheduled to run when the interval has elapsed, starting from the
599/// last time when MTU discovery completed.
600///
601/// Since the search space for MTUs is quite big (the smallest possible MTU is 1200, and the highest
602/// is 65527), Quinn performs a binary search to keep the number of probes as low as possible. The
603/// lower bound of the search is equal to [`TransportConfig::initial_mtu`] in the
604/// initial MTU discovery run, and is equal to the currently discovered MTU in subsequent runs. The
605/// upper bound is determined by the minimum of [`MtuDiscoveryConfig::upper_bound`] and the
606/// `max_udp_payload_size` transport parameter received from the peer during the handshake.
607///
608/// # Black hole detection
609///
610/// If, at some point, the network path no longer accepts packets of the detected size, packet loss
611/// will eventually trigger black hole detection and reset the detected MTU to 1200. In that case,
612/// MTU discovery will be triggered after [`MtuDiscoveryConfig::black_hole_cooldown`] (ignoring the
613/// timer that was set based on [`MtuDiscoveryConfig::interval`]).
614///
615/// # Interaction between peers
616///
617/// There is no guarantee that the MTU on the path between A and B is the same as the MTU of the
618/// path between B and A. Therefore, each peer in the connection needs to run MTU discovery
619/// independently in order to discover the path's MTU.
620#[derive(Clone, Debug)]
621pub struct MtuDiscoveryConfig {
622 pub(crate) interval: Duration,
623 pub(crate) upper_bound: u16,
624 pub(crate) minimum_change: u16,
625 pub(crate) black_hole_cooldown: Duration,
626}
627
628impl MtuDiscoveryConfig {
629 /// Specifies the time to wait after completing MTU discovery before starting a new MTU
630 /// discovery run.
631 ///
632 /// Defaults to 600 seconds, as recommended by [RFC
633 /// 8899](https://www.rfc-editor.org/rfc/rfc8899).
634 pub fn interval(&mut self, value: Duration) -> &mut Self {
635 self.interval = value;
636 self
637 }
638
639 /// Specifies the upper bound to the max UDP payload size that MTU discovery will search for.
640 ///
641 /// Defaults to 1452, to stay within Ethernet's MTU when using IPv4 and IPv6. The highest
642 /// allowed value is 65527, which corresponds to the maximum permitted UDP payload on IPv6.
643 ///
644 /// It is safe to use an arbitrarily high upper bound, regardless of the network path's MTU. The
645 /// only drawback is that MTU discovery might take more time to finish.
646 pub fn upper_bound(&mut self, value: u16) -> &mut Self {
647 self.upper_bound = value.min(MAX_UDP_PAYLOAD);
648 self
649 }
650
651 /// Specifies the amount of time that MTU discovery should wait after a black hole was detected
652 /// before running again. Defaults to one minute.
653 ///
654 /// Black hole detection can be spuriously triggered in case of congestion, so it makes sense to
655 /// try MTU discovery again after a short period of time.
656 pub fn black_hole_cooldown(&mut self, value: Duration) -> &mut Self {
657 self.black_hole_cooldown = value;
658 self
659 }
660
661 /// Specifies the minimum MTU change to stop the MTU discovery phase.
662 /// Defaults to 20.
663 pub fn minimum_change(&mut self, value: u16) -> &mut Self {
664 self.minimum_change = value;
665 self
666 }
667}
668
669impl Default for MtuDiscoveryConfig {
670 fn default() -> Self {
671 Self {
672 interval: Duration::from_secs(600),
673 upper_bound: 1452,
674 black_hole_cooldown: Duration::from_secs(60),
675 minimum_change: 20,
676 }
677 }
678}
679
680/// Maximum duration of inactivity to accept before timing out the connection
681///
682/// This wraps an underlying [`VarInt`], representing the duration in milliseconds. Values can be
683/// constructed by converting directly from `VarInt`, or using `TryFrom<Duration>`.
684///
685/// ```
686/// # use std::{convert::TryFrom, time::Duration};
687/// use ant_quic::config::IdleTimeout;
688/// use ant_quic::{VarIntBoundsExceeded, VarInt};
689/// # fn main() -> Result<(), VarIntBoundsExceeded> {
690/// // A `VarInt`-encoded value in milliseconds
691/// let timeout = IdleTimeout::from(VarInt::from_u32(10_000));
692///
693/// // Try to convert a `Duration` into a `VarInt`-encoded timeout
694/// let timeout = IdleTimeout::try_from(Duration::from_secs(10))?;
695/// # Ok(())
696/// # }
697/// ```
698#[derive(Default, Copy, Clone, Eq, Hash, Ord, PartialEq, PartialOrd)]
699pub struct IdleTimeout(VarInt);
700
701impl From<VarInt> for IdleTimeout {
702 fn from(inner: VarInt) -> Self {
703 Self(inner)
704 }
705}
706
707impl std::convert::TryFrom<Duration> for IdleTimeout {
708 type Error = VarIntBoundsExceeded;
709
710 fn try_from(timeout: Duration) -> Result<Self, Self::Error> {
711 let inner = VarInt::try_from(timeout.as_millis())?;
712 Ok(Self(inner))
713 }
714}
715
716impl fmt::Debug for IdleTimeout {
717 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
718 self.0.fmt(f)
719 }
720}