quinn_proto/config/
transport.rs

1use std::{fmt, sync::Arc};
2#[cfg(feature = "qlog")]
3use std::{io, sync::Mutex, time::Instant};
4
5#[cfg(feature = "qlog")]
6use qlog::streamer::QlogStreamer;
7
8#[cfg(feature = "qlog")]
9use crate::QlogStream;
10use crate::{
11    Duration, INITIAL_MTU, MAX_UDP_PAYLOAD, VarInt, VarIntBoundsExceeded, congestion,
12    connection::qlog::QlogSink,
13};
14
15/// Parameters governing the core QUIC state machine
16///
17/// Default values should be suitable for most internet applications. Applications protocols which
18/// forbid remotely-initiated streams should set `max_concurrent_bidi_streams` and
19/// `max_concurrent_uni_streams` to zero.
20///
21/// In some cases, performance or resource requirements can be improved by tuning these values to
22/// suit a particular application and/or network connection. In particular, data window sizes can be
23/// tuned for a particular expected round trip time, link capacity, and memory availability. Tuning
24/// for higher bandwidths and latencies increases worst-case memory consumption, but does not impair
25/// performance at lower bandwidths and latencies. The default configuration is tuned for a 100Mbps
26/// link with a 100ms round trip time.
27pub struct TransportConfig {
28    pub(crate) max_concurrent_bidi_streams: VarInt,
29    pub(crate) max_concurrent_uni_streams: VarInt,
30    pub(crate) max_idle_timeout: Option<VarInt>,
31    pub(crate) stream_receive_window: VarInt,
32    pub(crate) receive_window: VarInt,
33    pub(crate) send_window: u64,
34    pub(crate) send_fairness: bool,
35
36    pub(crate) packet_threshold: u32,
37    pub(crate) time_threshold: f32,
38    pub(crate) initial_rtt: Duration,
39    pub(crate) initial_mtu: u16,
40    pub(crate) min_mtu: u16,
41    pub(crate) mtu_discovery_config: Option<MtuDiscoveryConfig>,
42    pub(crate) pad_to_mtu: bool,
43    pub(crate) ack_frequency_config: Option<AckFrequencyConfig>,
44
45    pub(crate) persistent_congestion_threshold: u32,
46    pub(crate) keep_alive_interval: Option<Duration>,
47    pub(crate) crypto_buffer_size: usize,
48    pub(crate) allow_spin: bool,
49    pub(crate) datagram_receive_buffer_size: Option<usize>,
50    pub(crate) datagram_send_buffer_size: usize,
51    #[cfg(test)]
52    pub(crate) deterministic_packet_numbers: bool,
53
54    pub(crate) congestion_controller_factory: Arc<dyn congestion::ControllerFactory + Send + Sync>,
55
56    pub(crate) enable_segmentation_offload: bool,
57
58    pub(crate) qlog_sink: QlogSink,
59}
60
61impl TransportConfig {
62    /// Maximum number of incoming bidirectional streams that may be open concurrently
63    ///
64    /// Must be nonzero for the peer to open any bidirectional streams.
65    ///
66    /// Worst-case memory use is directly proportional to `max_concurrent_bidi_streams *
67    /// stream_receive_window`, with an upper bound proportional to `receive_window`.
68    pub fn max_concurrent_bidi_streams(&mut self, value: VarInt) -> &mut Self {
69        self.max_concurrent_bidi_streams = value;
70        self
71    }
72
73    /// Variant of `max_concurrent_bidi_streams` affecting unidirectional streams
74    pub fn max_concurrent_uni_streams(&mut self, value: VarInt) -> &mut Self {
75        self.max_concurrent_uni_streams = value;
76        self
77    }
78
79    /// Maximum duration of inactivity to accept before timing out the connection.
80    ///
81    /// The true idle timeout is the minimum of this and the peer's own max idle timeout. `None`
82    /// represents an infinite timeout. Defaults to 30 seconds.
83    ///
84    /// **WARNING**: If a peer or its network path malfunctions or acts maliciously, an infinite
85    /// idle timeout can result in permanently hung futures!
86    ///
87    /// ```
88    /// # use std::{convert::TryInto, time::Duration};
89    /// # use quinn_proto::{TransportConfig, VarInt, VarIntBoundsExceeded};
90    /// # fn main() -> Result<(), VarIntBoundsExceeded> {
91    /// let mut config = TransportConfig::default();
92    ///
93    /// // Set the idle timeout as `VarInt`-encoded milliseconds
94    /// config.max_idle_timeout(Some(VarInt::from_u32(10_000).into()));
95    ///
96    /// // Set the idle timeout as a `Duration`
97    /// config.max_idle_timeout(Some(Duration::from_secs(10).try_into()?));
98    /// # Ok(())
99    /// # }
100    /// ```
101    pub fn max_idle_timeout(&mut self, value: Option<IdleTimeout>) -> &mut Self {
102        self.max_idle_timeout = value.map(|t| t.0);
103        self
104    }
105
106    /// Maximum number of bytes the peer may transmit without acknowledgement on any one stream
107    /// before becoming blocked.
108    ///
109    /// This should be set to at least the expected connection latency multiplied by the maximum
110    /// desired throughput. Setting this smaller than `receive_window` helps ensure that a single
111    /// stream doesn't monopolize receive buffers, which may otherwise occur if the application
112    /// chooses not to read from a large stream for a time while still requiring data on other
113    /// streams.
114    pub fn stream_receive_window(&mut self, value: VarInt) -> &mut Self {
115        self.stream_receive_window = value;
116        self
117    }
118
119    /// Maximum number of bytes the peer may transmit across all streams of a connection before
120    /// becoming blocked.
121    ///
122    /// This should be set to at least the expected connection latency multiplied by the maximum
123    /// desired throughput. Larger values can be useful to allow maximum throughput within a
124    /// stream while another is blocked.
125    pub fn receive_window(&mut self, value: VarInt) -> &mut Self {
126        self.receive_window = value;
127        self
128    }
129
130    /// Maximum number of bytes to transmit to a peer without acknowledgment
131    ///
132    /// Provides an upper bound on memory when communicating with peers that issue large amounts of
133    /// flow control credit. Endpoints that wish to handle large numbers of connections robustly
134    /// should take care to set this low enough to guarantee memory exhaustion does not occur if
135    /// every connection uses the entire window.
136    pub fn send_window(&mut self, value: u64) -> &mut Self {
137        self.send_window = value;
138        self
139    }
140
141    /// Whether to implement fair queuing for send streams having the same priority.
142    ///
143    /// When enabled, connections schedule data from outgoing streams having the same priority in a
144    /// round-robin fashion. When disabled, streams are scheduled in the order they are written to.
145    ///
146    /// Note that this only affects streams with the same priority. Higher priority streams always
147    /// take precedence over lower priority streams.
148    ///
149    /// Disabling fairness can reduce fragmentation and protocol overhead for workloads that use
150    /// many small streams.
151    pub fn send_fairness(&mut self, value: bool) -> &mut Self {
152        self.send_fairness = value;
153        self
154    }
155
156    /// Maximum reordering in packet number space before FACK style loss detection considers a
157    /// packet lost. Should not be less than 3, per RFC5681.
158    pub fn packet_threshold(&mut self, value: u32) -> &mut Self {
159        self.packet_threshold = value;
160        self
161    }
162
163    /// Maximum reordering in time space before time based loss detection considers a packet lost,
164    /// as a factor of RTT
165    pub fn time_threshold(&mut self, value: f32) -> &mut Self {
166        self.time_threshold = value;
167        self
168    }
169
170    /// The RTT used before an RTT sample is taken
171    pub fn initial_rtt(&mut self, value: Duration) -> &mut Self {
172        self.initial_rtt = value;
173        self
174    }
175
176    /// The initial value to be used as the maximum UDP payload size before running MTU discovery
177    /// (see [`TransportConfig::mtu_discovery_config`]).
178    ///
179    /// Must be at least 1200, which is the default, and known to be safe for typical internet
180    /// applications. Larger values are more efficient, but increase the risk of packet loss due to
181    /// exceeding the network path's IP MTU. If the provided value is higher than what the network
182    /// path actually supports, packet loss will eventually trigger black hole detection and bring
183    /// it down to [`TransportConfig::min_mtu`].
184    pub fn initial_mtu(&mut self, value: u16) -> &mut Self {
185        self.initial_mtu = value.max(INITIAL_MTU);
186        self
187    }
188
189    pub(crate) fn get_initial_mtu(&self) -> u16 {
190        self.initial_mtu.max(self.min_mtu)
191    }
192
193    /// The maximum UDP payload size guaranteed to be supported by the network.
194    ///
195    /// Must be at least 1200, which is the default, and lower than or equal to
196    /// [`TransportConfig::initial_mtu`].
197    ///
198    /// Real-world MTUs can vary according to ISP, VPN, and properties of intermediate network links
199    /// outside of either endpoint's control. Extreme care should be used when raising this value
200    /// outside of private networks where these factors are fully controlled. If the provided value
201    /// is higher than what the network path actually supports, the result will be unpredictable and
202    /// catastrophic packet loss, without a possibility of repair. Prefer
203    /// [`TransportConfig::initial_mtu`] together with
204    /// [`TransportConfig::mtu_discovery_config`] to set a maximum UDP payload size that robustly
205    /// adapts to the network.
206    pub fn min_mtu(&mut self, value: u16) -> &mut Self {
207        self.min_mtu = value.max(INITIAL_MTU);
208        self
209    }
210
211    /// Specifies the MTU discovery config (see [`MtuDiscoveryConfig`] for details).
212    ///
213    /// Enabled by default.
214    pub fn mtu_discovery_config(&mut self, value: Option<MtuDiscoveryConfig>) -> &mut Self {
215        self.mtu_discovery_config = value;
216        self
217    }
218
219    /// Pad UDP datagrams carrying application data to current maximum UDP payload size
220    ///
221    /// Disabled by default. UDP datagrams containing loss probes are exempt from padding.
222    ///
223    /// Enabling this helps mitigate traffic analysis by network observers, but it increases
224    /// bandwidth usage. Without this mitigation precise plain text size of application datagrams as
225    /// well as the total size of stream write bursts can be inferred by observers under certain
226    /// conditions. This analysis requires either an uncongested connection or application datagrams
227    /// too large to be coalesced.
228    pub fn pad_to_mtu(&mut self, value: bool) -> &mut Self {
229        self.pad_to_mtu = value;
230        self
231    }
232
233    /// Specifies the ACK frequency config (see [`AckFrequencyConfig`] for details)
234    ///
235    /// The provided configuration will be ignored if the peer does not support the acknowledgement
236    /// frequency QUIC extension.
237    ///
238    /// Defaults to `None`, which disables controlling the peer's acknowledgement frequency. Even
239    /// if set to `None`, the local side still supports the acknowledgement frequency QUIC
240    /// extension and may use it in other ways.
241    pub fn ack_frequency_config(&mut self, value: Option<AckFrequencyConfig>) -> &mut Self {
242        self.ack_frequency_config = value;
243        self
244    }
245
246    /// Number of consecutive PTOs after which network is considered to be experiencing persistent congestion.
247    pub fn persistent_congestion_threshold(&mut self, value: u32) -> &mut Self {
248        self.persistent_congestion_threshold = value;
249        self
250    }
251
252    /// Period of inactivity before sending a keep-alive packet
253    ///
254    /// Keep-alive packets prevent an inactive but otherwise healthy connection from timing out.
255    ///
256    /// `None` to disable, which is the default. Only one side of any given connection needs keep-alive
257    /// enabled for the connection to be preserved. Must be set lower than the idle_timeout of both
258    /// peers to be effective.
259    pub fn keep_alive_interval(&mut self, value: Option<Duration>) -> &mut Self {
260        self.keep_alive_interval = value;
261        self
262    }
263
264    /// Maximum quantity of out-of-order crypto layer data to buffer
265    pub fn crypto_buffer_size(&mut self, value: usize) -> &mut Self {
266        self.crypto_buffer_size = value;
267        self
268    }
269
270    /// Whether the implementation is permitted to set the spin bit on this connection
271    ///
272    /// This allows passive observers to easily judge the round trip time of a connection, which can
273    /// be useful for network administration but sacrifices a small amount of privacy.
274    pub fn allow_spin(&mut self, value: bool) -> &mut Self {
275        self.allow_spin = value;
276        self
277    }
278
279    /// Maximum number of incoming application datagram bytes to buffer, or None to disable
280    /// incoming datagrams
281    ///
282    /// The peer is forbidden to send single datagrams larger than this size. If the aggregate size
283    /// of all datagrams that have been received from the peer but not consumed by the application
284    /// exceeds this value, old datagrams are dropped until it is no longer exceeded.
285    pub fn datagram_receive_buffer_size(&mut self, value: Option<usize>) -> &mut Self {
286        self.datagram_receive_buffer_size = value;
287        self
288    }
289
290    /// Maximum number of outgoing application datagram bytes to buffer
291    ///
292    /// While datagrams are sent ASAP, it is possible for an application to generate data faster
293    /// than the link, or even the underlying hardware, can transmit them. This limits the amount of
294    /// memory that may be consumed in that case. When the send buffer is full and a new datagram is
295    /// sent, older datagrams are dropped until sufficient space is available.
296    pub fn datagram_send_buffer_size(&mut self, value: usize) -> &mut Self {
297        self.datagram_send_buffer_size = value;
298        self
299    }
300
301    /// Whether to force every packet number to be used
302    ///
303    /// By default, packet numbers are occasionally skipped to ensure peers aren't ACKing packets
304    /// before they see them.
305    #[cfg(test)]
306    pub(crate) fn deterministic_packet_numbers(&mut self, enabled: bool) -> &mut Self {
307        self.deterministic_packet_numbers = enabled;
308        self
309    }
310
311    /// How to construct new `congestion::Controller`s
312    ///
313    /// Typically the refcounted configuration of a `congestion::Controller`,
314    /// e.g. a `congestion::NewRenoConfig`.
315    ///
316    /// # Example
317    /// ```
318    /// # use quinn_proto::*; use std::sync::Arc;
319    /// let mut config = TransportConfig::default();
320    /// config.congestion_controller_factory(Arc::new(congestion::NewRenoConfig::default()));
321    /// ```
322    pub fn congestion_controller_factory(
323        &mut self,
324        factory: Arc<dyn congestion::ControllerFactory + Send + Sync + 'static>,
325    ) -> &mut Self {
326        self.congestion_controller_factory = factory;
327        self
328    }
329
330    /// Whether to use "Generic Segmentation Offload" to accelerate transmits, when supported by the
331    /// environment
332    ///
333    /// Defaults to `true`.
334    ///
335    /// GSO dramatically reduces CPU consumption when sending large numbers of packets with the same
336    /// headers, such as when transmitting bulk data on a connection. However, it is not supported
337    /// by all network interface drivers or packet inspection tools. `quinn-udp` will attempt to
338    /// disable GSO automatically when unavailable, but this can lead to spurious packet loss at
339    /// startup, temporarily degrading performance.
340    pub fn enable_segmentation_offload(&mut self, enabled: bool) -> &mut Self {
341        self.enable_segmentation_offload = enabled;
342        self
343    }
344
345    /// qlog capture configuration to use for a particular connection
346    #[cfg(feature = "qlog")]
347    pub fn qlog_stream(&mut self, stream: Option<QlogStream>) -> &mut Self {
348        self.qlog_sink = stream.into();
349        self
350    }
351}
352
353impl Default for TransportConfig {
354    fn default() -> Self {
355        const EXPECTED_RTT: u32 = 100; // ms
356        const MAX_STREAM_BANDWIDTH: u32 = 12500 * 1000; // bytes/s
357        // Window size needed to avoid pipeline
358        // stalls
359        const STREAM_RWND: u32 = MAX_STREAM_BANDWIDTH / 1000 * EXPECTED_RTT;
360
361        Self {
362            max_concurrent_bidi_streams: 100u32.into(),
363            max_concurrent_uni_streams: 100u32.into(),
364            // 30 second default recommended by RFC 9308 ยง 3.2
365            max_idle_timeout: Some(VarInt(30_000)),
366            stream_receive_window: STREAM_RWND.into(),
367            receive_window: VarInt::MAX,
368            send_window: (8 * STREAM_RWND).into(),
369            send_fairness: true,
370
371            packet_threshold: 3,
372            time_threshold: 9.0 / 8.0,
373            initial_rtt: Duration::from_millis(333), // per spec, intentionally distinct from EXPECTED_RTT
374            initial_mtu: INITIAL_MTU,
375            min_mtu: INITIAL_MTU,
376            mtu_discovery_config: Some(MtuDiscoveryConfig::default()),
377            pad_to_mtu: false,
378            ack_frequency_config: None,
379
380            persistent_congestion_threshold: 3,
381            keep_alive_interval: None,
382            crypto_buffer_size: 16 * 1024,
383            allow_spin: true,
384            datagram_receive_buffer_size: Some(STREAM_RWND as usize),
385            datagram_send_buffer_size: 1024 * 1024,
386            #[cfg(test)]
387            deterministic_packet_numbers: false,
388
389            congestion_controller_factory: Arc::new(congestion::CubicConfig::default()),
390
391            enable_segmentation_offload: true,
392
393            qlog_sink: QlogSink::default(),
394        }
395    }
396}
397
398impl fmt::Debug for TransportConfig {
399    fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
400        let Self {
401            max_concurrent_bidi_streams,
402            max_concurrent_uni_streams,
403            max_idle_timeout,
404            stream_receive_window,
405            receive_window,
406            send_window,
407            send_fairness,
408            packet_threshold,
409            time_threshold,
410            initial_rtt,
411            initial_mtu,
412            min_mtu,
413            mtu_discovery_config,
414            pad_to_mtu,
415            ack_frequency_config,
416            persistent_congestion_threshold,
417            keep_alive_interval,
418            crypto_buffer_size,
419            allow_spin,
420            datagram_receive_buffer_size,
421            datagram_send_buffer_size,
422            #[cfg(test)]
423                deterministic_packet_numbers: _,
424            congestion_controller_factory: _,
425            enable_segmentation_offload,
426            qlog_sink,
427        } = self;
428        let mut s = fmt.debug_struct("TransportConfig");
429
430        s.field("max_concurrent_bidi_streams", max_concurrent_bidi_streams)
431            .field("max_concurrent_uni_streams", max_concurrent_uni_streams)
432            .field("max_idle_timeout", max_idle_timeout)
433            .field("stream_receive_window", stream_receive_window)
434            .field("receive_window", receive_window)
435            .field("send_window", send_window)
436            .field("send_fairness", send_fairness)
437            .field("packet_threshold", packet_threshold)
438            .field("time_threshold", time_threshold)
439            .field("initial_rtt", initial_rtt)
440            .field("initial_mtu", initial_mtu)
441            .field("min_mtu", min_mtu)
442            .field("mtu_discovery_config", mtu_discovery_config)
443            .field("pad_to_mtu", pad_to_mtu)
444            .field("ack_frequency_config", ack_frequency_config)
445            .field(
446                "persistent_congestion_threshold",
447                persistent_congestion_threshold,
448            )
449            .field("keep_alive_interval", keep_alive_interval)
450            .field("crypto_buffer_size", crypto_buffer_size)
451            .field("allow_spin", allow_spin)
452            .field("datagram_receive_buffer_size", datagram_receive_buffer_size)
453            .field("datagram_send_buffer_size", datagram_send_buffer_size)
454            // congestion_controller_factory not debug
455            .field("enable_segmentation_offload", enable_segmentation_offload);
456        if cfg!(feature = "qlog") {
457            s.field("qlog_stream", &qlog_sink.is_enabled());
458        }
459
460        s.finish_non_exhaustive()
461    }
462}
463
464/// Parameters for controlling the peer's acknowledgement frequency
465///
466/// The parameters provided in this config will be sent to the peer at the beginning of the
467/// connection, so it can take them into account when sending acknowledgements (see each parameter's
468/// description for details on how it influences acknowledgement frequency).
469///
470/// Quinn's implementation follows the fourth draft of the
471/// [QUIC Acknowledgement Frequency extension](https://datatracker.ietf.org/doc/html/draft-ietf-quic-ack-frequency-04).
472/// The defaults produce behavior slightly different than the behavior without this extension,
473/// because they change the way reordered packets are handled (see
474/// [`AckFrequencyConfig::reordering_threshold`] for details).
475#[derive(Clone, Debug)]
476pub struct AckFrequencyConfig {
477    pub(crate) ack_eliciting_threshold: VarInt,
478    pub(crate) max_ack_delay: Option<Duration>,
479    pub(crate) reordering_threshold: VarInt,
480}
481
482impl AckFrequencyConfig {
483    /// The ack-eliciting threshold we will request the peer to use
484    ///
485    /// This threshold represents the number of ack-eliciting packets an endpoint may receive
486    /// without immediately sending an ACK.
487    ///
488    /// The remote peer should send at least one ACK frame when more than this number of
489    /// ack-eliciting packets have been received. A value of 0 results in a receiver immediately
490    /// acknowledging every ack-eliciting packet.
491    ///
492    /// Defaults to 1, which sends ACK frames for every other ack-eliciting packet.
493    pub fn ack_eliciting_threshold(&mut self, value: VarInt) -> &mut Self {
494        self.ack_eliciting_threshold = value;
495        self
496    }
497
498    /// The `max_ack_delay` we will request the peer to use
499    ///
500    /// This parameter represents the maximum amount of time that an endpoint waits before sending
501    /// an ACK when the ack-eliciting threshold hasn't been reached.
502    ///
503    /// The effective `max_ack_delay` will be clamped to be at least the peer's `min_ack_delay`
504    /// transport parameter, and at most the greater of the current path RTT or 25ms.
505    ///
506    /// Defaults to `None`, in which case the peer's original `max_ack_delay` will be used, as
507    /// obtained from its transport parameters.
508    pub fn max_ack_delay(&mut self, value: Option<Duration>) -> &mut Self {
509        self.max_ack_delay = value;
510        self
511    }
512
513    /// The reordering threshold we will request the peer to use
514    ///
515    /// This threshold represents the amount of out-of-order packets that will trigger an endpoint
516    /// to send an ACK, without waiting for `ack_eliciting_threshold` to be exceeded or for
517    /// `max_ack_delay` to be elapsed.
518    ///
519    /// A value of 0 indicates out-of-order packets do not elicit an immediate ACK. A value of 1
520    /// immediately acknowledges any packets that are received out of order (this is also the
521    /// behavior when the extension is disabled).
522    ///
523    /// It is recommended to set this value to [`TransportConfig::packet_threshold`] minus one.
524    /// Since the default value for [`TransportConfig::packet_threshold`] is 3, this value defaults
525    /// to 2.
526    pub fn reordering_threshold(&mut self, value: VarInt) -> &mut Self {
527        self.reordering_threshold = value;
528        self
529    }
530}
531
532impl Default for AckFrequencyConfig {
533    fn default() -> Self {
534        Self {
535            ack_eliciting_threshold: VarInt(1),
536            max_ack_delay: None,
537            reordering_threshold: VarInt(2),
538        }
539    }
540}
541
542/// Configuration for qlog trace logging
543#[cfg(feature = "qlog")]
544pub struct QlogConfig {
545    writer: Option<Box<dyn io::Write + Send + Sync>>,
546    title: Option<String>,
547    description: Option<String>,
548    start_time: Instant,
549}
550
551#[cfg(feature = "qlog")]
552impl QlogConfig {
553    /// Where to write a qlog `TraceSeq`
554    pub fn writer(&mut self, writer: Box<dyn io::Write + Send + Sync>) -> &mut Self {
555        self.writer = Some(writer);
556        self
557    }
558
559    /// Title to record in the qlog capture
560    pub fn title(&mut self, title: Option<String>) -> &mut Self {
561        self.title = title;
562        self
563    }
564
565    /// Description to record in the qlog capture
566    pub fn description(&mut self, description: Option<String>) -> &mut Self {
567        self.description = description;
568        self
569    }
570
571    /// Epoch qlog event times are recorded relative to
572    pub fn start_time(&mut self, start_time: Instant) -> &mut Self {
573        self.start_time = start_time;
574        self
575    }
576
577    /// Construct the [`QlogStream`] described by this configuration
578    pub fn into_stream(self) -> Option<QlogStream> {
579        use tracing::warn;
580
581        let writer = self.writer?;
582        let trace = qlog::TraceSeq::new(
583            qlog::VantagePoint {
584                name: None,
585                ty: qlog::VantagePointType::Unknown,
586                flow: None,
587            },
588            self.title.clone(),
589            self.description.clone(),
590            Some(qlog::Configuration {
591                time_offset: Some(0.0),
592                original_uris: None,
593            }),
594            None,
595        );
596
597        let mut streamer = QlogStreamer::new(
598            qlog::QLOG_VERSION.into(),
599            self.title,
600            self.description,
601            None,
602            self.start_time,
603            trace,
604            qlog::events::EventImportance::Core,
605            writer,
606        );
607
608        match streamer.start_log() {
609            Ok(()) => Some(QlogStream(Arc::new(Mutex::new(streamer)))),
610            Err(e) => {
611                warn!("could not initialize endpoint qlog streamer: {e}");
612                None
613            }
614        }
615    }
616}
617
618#[cfg(feature = "qlog")]
619impl Default for QlogConfig {
620    fn default() -> Self {
621        Self {
622            writer: None,
623            title: None,
624            description: None,
625            start_time: Instant::now(),
626        }
627    }
628}
629
630/// Parameters governing MTU discovery.
631///
632/// # The why of MTU discovery
633///
634/// By design, QUIC ensures during the handshake that the network path between the client and the
635/// server is able to transmit unfragmented UDP packets with a body of 1200 bytes. In other words,
636/// once the connection is established, we know that the network path's maximum transmission unit
637/// (MTU) is of at least 1200 bytes (plus IP and UDP headers). Because of this, a QUIC endpoint can
638/// split outgoing data in packets of 1200 bytes, with confidence that the network will be able to
639/// deliver them (if the endpoint were to send bigger packets, they could prove too big and end up
640/// being dropped).
641///
642/// There is, however, a significant overhead associated to sending a packet. If the same
643/// information can be sent in fewer packets, that results in higher throughput. The amount of
644/// packets that need to be sent is inversely proportional to the MTU: the higher the MTU, the
645/// bigger the packets that can be sent, and the fewer packets that are needed to transmit a given
646/// amount of bytes.
647///
648/// Most networks have an MTU higher than 1200. Through MTU discovery, endpoints can detect the
649/// path's MTU and, if it turns out to be higher, start sending bigger packets.
650///
651/// # MTU discovery internals
652///
653/// Quinn implements MTU discovery through DPLPMTUD (Datagram Packetization Layer Path MTU
654/// Discovery), described in [section 14.3 of RFC
655/// 9000](https://www.rfc-editor.org/rfc/rfc9000.html#section-14.3). This method consists of sending
656/// QUIC packets padded to a particular size (called PMTU probes), and waiting to see if the remote
657/// peer responds with an ACK. If an ACK is received, that means the probe arrived at the remote
658/// peer, which in turn means that the network path's MTU is of at least the packet's size. If the
659/// probe is lost, it is sent another 2 times before concluding that the MTU is lower than the
660/// packet's size.
661///
662/// MTU discovery runs on a schedule (e.g. every 600 seconds) specified through
663/// [`MtuDiscoveryConfig::interval`]. The first run happens right after the handshake, and
664/// subsequent discoveries are scheduled to run when the interval has elapsed, starting from the
665/// last time when MTU discovery completed.
666///
667/// Since the search space for MTUs is quite big (the smallest possible MTU is 1200, and the highest
668/// is 65527), Quinn performs a binary search to keep the number of probes as low as possible. The
669/// lower bound of the search is equal to [`TransportConfig::initial_mtu`] in the
670/// initial MTU discovery run, and is equal to the currently discovered MTU in subsequent runs. The
671/// upper bound is determined by the minimum of [`MtuDiscoveryConfig::upper_bound`] and the
672/// `max_udp_payload_size` transport parameter received from the peer during the handshake.
673///
674/// # Black hole detection
675///
676/// If, at some point, the network path no longer accepts packets of the detected size, packet loss
677/// will eventually trigger black hole detection and reset the detected MTU to 1200. In that case,
678/// MTU discovery will be triggered after [`MtuDiscoveryConfig::black_hole_cooldown`] (ignoring the
679/// timer that was set based on [`MtuDiscoveryConfig::interval`]).
680///
681/// # Interaction between peers
682///
683/// There is no guarantee that the MTU on the path between A and B is the same as the MTU of the
684/// path between B and A. Therefore, each peer in the connection needs to run MTU discovery
685/// independently in order to discover the path's MTU.
686#[derive(Clone, Debug)]
687pub struct MtuDiscoveryConfig {
688    pub(crate) interval: Duration,
689    pub(crate) upper_bound: u16,
690    pub(crate) minimum_change: u16,
691    pub(crate) black_hole_cooldown: Duration,
692}
693
694impl MtuDiscoveryConfig {
695    /// Specifies the time to wait after completing MTU discovery before starting a new MTU
696    /// discovery run.
697    ///
698    /// Defaults to 600 seconds, as recommended by [RFC
699    /// 8899](https://www.rfc-editor.org/rfc/rfc8899).
700    pub fn interval(&mut self, value: Duration) -> &mut Self {
701        self.interval = value;
702        self
703    }
704
705    /// Specifies the upper bound to the max UDP payload size that MTU discovery will search for.
706    ///
707    /// Defaults to 1452, to stay within Ethernet's MTU when using IPv4 and IPv6. The highest
708    /// allowed value is 65527, which corresponds to the maximum permitted UDP payload on IPv6.
709    ///
710    /// It is safe to use an arbitrarily high upper bound, regardless of the network path's MTU. The
711    /// only drawback is that MTU discovery might take more time to finish.
712    pub fn upper_bound(&mut self, value: u16) -> &mut Self {
713        self.upper_bound = value.min(MAX_UDP_PAYLOAD);
714        self
715    }
716
717    /// Specifies the amount of time that MTU discovery should wait after a black hole was detected
718    /// before running again. Defaults to one minute.
719    ///
720    /// Black hole detection can be spuriously triggered in case of congestion, so it makes sense to
721    /// try MTU discovery again after a short period of time.
722    pub fn black_hole_cooldown(&mut self, value: Duration) -> &mut Self {
723        self.black_hole_cooldown = value;
724        self
725    }
726
727    /// Specifies the minimum MTU change to stop the MTU discovery phase.
728    /// Defaults to 20.
729    pub fn minimum_change(&mut self, value: u16) -> &mut Self {
730        self.minimum_change = value;
731        self
732    }
733}
734
735impl Default for MtuDiscoveryConfig {
736    fn default() -> Self {
737        Self {
738            interval: Duration::from_secs(600),
739            upper_bound: 1452,
740            black_hole_cooldown: Duration::from_secs(60),
741            minimum_change: 20,
742        }
743    }
744}
745
746/// Maximum duration of inactivity to accept before timing out the connection
747///
748/// This wraps an underlying [`VarInt`], representing the duration in milliseconds. Values can be
749/// constructed by converting directly from `VarInt`, or using `TryFrom<Duration>`.
750///
751/// ```
752/// # use std::{convert::TryFrom, time::Duration};
753/// # use quinn_proto::{IdleTimeout, VarIntBoundsExceeded, VarInt};
754/// # fn main() -> Result<(), VarIntBoundsExceeded> {
755/// // A `VarInt`-encoded value in milliseconds
756/// let timeout = IdleTimeout::from(VarInt::from_u32(10_000));
757///
758/// // Try to convert a `Duration` into a `VarInt`-encoded timeout
759/// let timeout = IdleTimeout::try_from(Duration::from_secs(10))?;
760/// # Ok(())
761/// # }
762/// ```
763#[derive(Default, Copy, Clone, Eq, Hash, Ord, PartialEq, PartialOrd)]
764pub struct IdleTimeout(VarInt);
765
766impl From<VarInt> for IdleTimeout {
767    fn from(inner: VarInt) -> Self {
768        Self(inner)
769    }
770}
771
772impl std::convert::TryFrom<Duration> for IdleTimeout {
773    type Error = VarIntBoundsExceeded;
774
775    fn try_from(timeout: Duration) -> Result<Self, Self::Error> {
776        let inner = VarInt::try_from(timeout.as_millis())?;
777        Ok(Self(inner))
778    }
779}
780
781impl fmt::Debug for IdleTimeout {
782    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
783        self.0.fmt(f)
784    }
785}