quinn_proto/congestion/bbr/
mod.rs

1use std::any::Any;
2use std::fmt::Debug;
3use std::sync::Arc;
4
5use rand::{Rng, SeedableRng};
6
7use crate::congestion::ControllerMetrics;
8use crate::congestion::bbr::bw_estimation::BandwidthEstimation;
9use crate::congestion::bbr::min_max::MinMax;
10use crate::connection::RttEstimator;
11use crate::{Duration, Instant};
12
13use super::{BASE_DATAGRAM_SIZE, Controller, ControllerFactory};
14
15mod bw_estimation;
16mod min_max;
17
18/// Experimental! Use at your own risk.
19///
20/// Aims for reduced buffer bloat and improved performance over high bandwidth-delay product networks.
21/// Based on google's quiche implementation <https://source.chromium.org/chromium/chromium/src/+/master:net/third_party/quiche/src/quic/core/congestion_control/bbr_sender.cc>
22/// of BBR <https://datatracker.ietf.org/doc/html/draft-cardwell-iccrg-bbr-congestion-control>.
23/// More discussion and links at <https://groups.google.com/g/bbr-dev>.
24#[derive(Debug, Clone)]
25pub struct Bbr {
26    config: Arc<BbrConfig>,
27    current_mtu: u64,
28    max_bandwidth: BandwidthEstimation,
29    acked_bytes: u64,
30    mode: Mode,
31    loss_state: LossState,
32    recovery_state: RecoveryState,
33    recovery_window: u64,
34    is_at_full_bandwidth: bool,
35    pacing_gain: f32,
36    high_gain: f32,
37    drain_gain: f32,
38    cwnd_gain: f32,
39    high_cwnd_gain: f32,
40    last_cycle_start: Option<Instant>,
41    current_cycle_offset: u8,
42    init_cwnd: u64,
43    min_cwnd: u64,
44    prev_in_flight_count: u64,
45    exit_probe_rtt_at: Option<Instant>,
46    probe_rtt_last_started_at: Option<Instant>,
47    min_rtt: Duration,
48    exiting_quiescence: bool,
49    pacing_rate: u64,
50    max_acked_packet_number: u64,
51    max_sent_packet_number: u64,
52    end_recovery_at_packet_number: u64,
53    cwnd: u64,
54    current_round_trip_end_packet_number: u64,
55    round_count: u64,
56    bw_at_last_round: u64,
57    round_wo_bw_gain: u64,
58    ack_aggregation: AckAggregationState,
59    random_number_generator: rand::rngs::StdRng,
60}
61
62impl Bbr {
63    /// Construct a state using the given `config` and current time `now`
64    pub fn new(config: Arc<BbrConfig>, current_mtu: u16) -> Self {
65        let initial_window = config.initial_window;
66        Self {
67            config,
68            current_mtu: current_mtu as u64,
69            max_bandwidth: BandwidthEstimation::default(),
70            acked_bytes: 0,
71            mode: Mode::Startup,
72            loss_state: Default::default(),
73            recovery_state: RecoveryState::NotInRecovery,
74            recovery_window: 0,
75            is_at_full_bandwidth: false,
76            pacing_gain: K_DEFAULT_HIGH_GAIN,
77            high_gain: K_DEFAULT_HIGH_GAIN,
78            drain_gain: 1.0 / K_DEFAULT_HIGH_GAIN,
79            cwnd_gain: K_DEFAULT_HIGH_GAIN,
80            high_cwnd_gain: K_DEFAULT_HIGH_GAIN,
81            last_cycle_start: None,
82            current_cycle_offset: 0,
83            init_cwnd: initial_window,
84            min_cwnd: calculate_min_window(current_mtu as u64),
85            prev_in_flight_count: 0,
86            exit_probe_rtt_at: None,
87            probe_rtt_last_started_at: None,
88            min_rtt: Default::default(),
89            exiting_quiescence: false,
90            pacing_rate: 0,
91            max_acked_packet_number: 0,
92            max_sent_packet_number: 0,
93            end_recovery_at_packet_number: 0,
94            cwnd: initial_window,
95            current_round_trip_end_packet_number: 0,
96            round_count: 0,
97            bw_at_last_round: 0,
98            round_wo_bw_gain: 0,
99            ack_aggregation: AckAggregationState::default(),
100            random_number_generator: rand::rngs::StdRng::from_os_rng(),
101        }
102    }
103
104    fn enter_startup_mode(&mut self) {
105        self.mode = Mode::Startup;
106        self.pacing_gain = self.high_gain;
107        self.cwnd_gain = self.high_cwnd_gain;
108    }
109
110    fn enter_probe_bandwidth_mode(&mut self, now: Instant) {
111        self.mode = Mode::ProbeBw;
112        self.cwnd_gain = K_DERIVED_HIGH_CWNDGAIN;
113        self.last_cycle_start = Some(now);
114        // Pick a random offset for the gain cycle out of {0, 2..7} range. 1 is
115        // excluded because in that case increased gain and decreased gain would not
116        // follow each other.
117        let mut rand_index = self
118            .random_number_generator
119            .random_range(0..K_PACING_GAIN.len() as u8 - 1);
120        if rand_index >= 1 {
121            rand_index += 1;
122        }
123        self.current_cycle_offset = rand_index;
124        self.pacing_gain = K_PACING_GAIN[rand_index as usize];
125    }
126
127    fn update_recovery_state(&mut self, is_round_start: bool) {
128        // Exit recovery when there are no losses for a round.
129        if self.loss_state.has_losses() {
130            self.end_recovery_at_packet_number = self.max_sent_packet_number;
131        }
132        match self.recovery_state {
133            // Enter conservation on the first loss.
134            RecoveryState::NotInRecovery if self.loss_state.has_losses() => {
135                self.recovery_state = RecoveryState::Conservation;
136                // This will cause the |recovery_window| to be set to the
137                // correct value in CalculateRecoveryWindow().
138                self.recovery_window = 0;
139                // Since the conservation phase is meant to be lasting for a whole
140                // round, extend the current round as if it were started right now.
141                self.current_round_trip_end_packet_number = self.max_sent_packet_number;
142            }
143            RecoveryState::Growth | RecoveryState::Conservation => {
144                if self.recovery_state == RecoveryState::Conservation && is_round_start {
145                    self.recovery_state = RecoveryState::Growth;
146                }
147                // Exit recovery if appropriate.
148                if !self.loss_state.has_losses()
149                    && self.max_acked_packet_number > self.end_recovery_at_packet_number
150                {
151                    self.recovery_state = RecoveryState::NotInRecovery;
152                }
153            }
154            _ => {}
155        }
156    }
157
158    fn update_gain_cycle_phase(&mut self, now: Instant, in_flight: u64) {
159        // In most cases, the cycle is advanced after an RTT passes.
160        let mut should_advance_gain_cycling = self
161            .last_cycle_start
162            .map(|last_cycle_start| now.duration_since(last_cycle_start) > self.min_rtt)
163            .unwrap_or(false);
164        // If the pacing gain is above 1.0, the connection is trying to probe the
165        // bandwidth by increasing the number of bytes in flight to at least
166        // pacing_gain * BDP.  Make sure that it actually reaches the target, as
167        // long as there are no losses suggesting that the buffers are not able to
168        // hold that much.
169        if self.pacing_gain > 1.0
170            && !self.loss_state.has_losses()
171            && self.prev_in_flight_count < self.get_target_cwnd(self.pacing_gain)
172        {
173            should_advance_gain_cycling = false;
174        }
175
176        // If pacing gain is below 1.0, the connection is trying to drain the extra
177        // queue which could have been incurred by probing prior to it.  If the
178        // number of bytes in flight falls down to the estimated BDP value earlier,
179        // conclude that the queue has been successfully drained and exit this cycle
180        // early.
181        if self.pacing_gain < 1.0 && in_flight <= self.get_target_cwnd(1.0) {
182            should_advance_gain_cycling = true;
183        }
184
185        if should_advance_gain_cycling {
186            self.current_cycle_offset = (self.current_cycle_offset + 1) % K_PACING_GAIN.len() as u8;
187            self.last_cycle_start = Some(now);
188            // Stay in low gain mode until the target BDP is hit.  Low gain mode
189            // will be exited immediately when the target BDP is achieved.
190            if DRAIN_TO_TARGET
191                && self.pacing_gain < 1.0
192                && (K_PACING_GAIN[self.current_cycle_offset as usize] - 1.0).abs() < f32::EPSILON
193                && in_flight > self.get_target_cwnd(1.0)
194            {
195                return;
196            }
197            self.pacing_gain = K_PACING_GAIN[self.current_cycle_offset as usize];
198        }
199    }
200
201    fn maybe_exit_startup_or_drain(&mut self, now: Instant, in_flight: u64) {
202        if self.mode == Mode::Startup && self.is_at_full_bandwidth {
203            self.mode = Mode::Drain;
204            self.pacing_gain = self.drain_gain;
205            self.cwnd_gain = self.high_cwnd_gain;
206        }
207        if self.mode == Mode::Drain && in_flight <= self.get_target_cwnd(1.0) {
208            self.enter_probe_bandwidth_mode(now);
209        }
210    }
211
212    fn is_min_rtt_expired(&self, now: Instant, app_limited: bool) -> bool {
213        !app_limited
214            && self
215                .probe_rtt_last_started_at
216                .map(|last| now.saturating_duration_since(last) > Duration::from_secs(10))
217                .unwrap_or(true)
218    }
219
220    fn maybe_enter_or_exit_probe_rtt(
221        &mut self,
222        now: Instant,
223        is_round_start: bool,
224        bytes_in_flight: u64,
225        app_limited: bool,
226    ) {
227        let min_rtt_expired = self.is_min_rtt_expired(now, app_limited);
228        if min_rtt_expired && !self.exiting_quiescence && self.mode != Mode::ProbeRtt {
229            self.mode = Mode::ProbeRtt;
230            self.pacing_gain = 1.0;
231            // Do not decide on the time to exit ProbeRtt until the
232            // |bytes_in_flight| is at the target small value.
233            self.exit_probe_rtt_at = None;
234            self.probe_rtt_last_started_at = Some(now);
235        }
236
237        if self.mode == Mode::ProbeRtt {
238            if self.exit_probe_rtt_at.is_none() {
239                // If the window has reached the appropriate size, schedule exiting
240                // ProbeRtt.  The CWND during ProbeRtt is
241                // kMinimumCongestionWindow, but we allow an extra packet since QUIC
242                // checks CWND before sending a packet.
243                if bytes_in_flight < self.get_probe_rtt_cwnd() + self.current_mtu {
244                    const K_PROBE_RTT_TIME: Duration = Duration::from_millis(200);
245                    self.exit_probe_rtt_at = Some(now + K_PROBE_RTT_TIME);
246                }
247            } else if is_round_start && now >= self.exit_probe_rtt_at.unwrap() {
248                if !self.is_at_full_bandwidth {
249                    self.enter_startup_mode();
250                } else {
251                    self.enter_probe_bandwidth_mode(now);
252                }
253            }
254        }
255
256        self.exiting_quiescence = false;
257    }
258
259    fn get_target_cwnd(&self, gain: f32) -> u64 {
260        let bw = self.max_bandwidth.get_estimate();
261        let bdp = self.min_rtt.as_micros() as u64 * bw;
262        let bdpf = bdp as f64;
263        let cwnd = ((gain as f64 * bdpf) / 1_000_000f64) as u64;
264        // BDP estimate will be zero if no bandwidth samples are available yet.
265        if cwnd == 0 {
266            return self.init_cwnd;
267        }
268        cwnd.max(self.min_cwnd)
269    }
270
271    fn get_probe_rtt_cwnd(&self) -> u64 {
272        const K_MODERATE_PROBE_RTT_MULTIPLIER: f32 = 0.75;
273        if PROBE_RTT_BASED_ON_BDP {
274            return self.get_target_cwnd(K_MODERATE_PROBE_RTT_MULTIPLIER);
275        }
276        self.min_cwnd
277    }
278
279    fn calculate_pacing_rate(&mut self) {
280        let bw = self.max_bandwidth.get_estimate();
281        if bw == 0 {
282            return;
283        }
284        let target_rate = (bw as f64 * self.pacing_gain as f64) as u64;
285        if self.is_at_full_bandwidth {
286            self.pacing_rate = target_rate;
287            return;
288        }
289
290        // Pace at the rate of initial_window / RTT as soon as RTT measurements are
291        // available.
292        if self.pacing_rate == 0 && self.min_rtt.as_nanos() != 0 {
293            self.pacing_rate =
294                BandwidthEstimation::bw_from_delta(self.init_cwnd, self.min_rtt).unwrap();
295            return;
296        }
297
298        // Do not decrease the pacing rate during startup.
299        if self.pacing_rate < target_rate {
300            self.pacing_rate = target_rate;
301        }
302    }
303
304    fn calculate_cwnd(&mut self, bytes_acked: u64, excess_acked: u64) {
305        if self.mode == Mode::ProbeRtt {
306            return;
307        }
308        let mut target_window = self.get_target_cwnd(self.cwnd_gain);
309        if self.is_at_full_bandwidth {
310            // Add the max recently measured ack aggregation to CWND.
311            target_window += self.ack_aggregation.max_ack_height.get();
312        } else {
313            // Add the most recent excess acked.  Because CWND never decreases in
314            // STARTUP, this will automatically create a very localized max filter.
315            target_window += excess_acked;
316        }
317        // Instead of immediately setting the target CWND as the new one, BBR grows
318        // the CWND towards |target_window| by only increasing it |bytes_acked| at a
319        // time.
320        if self.is_at_full_bandwidth {
321            self.cwnd = target_window.min(self.cwnd + bytes_acked);
322        } else if (self.cwnd_gain < target_window as f32) || (self.acked_bytes < self.init_cwnd) {
323            // If the connection is not yet out of startup phase, do not decrease
324            // the window.
325            self.cwnd += bytes_acked;
326        }
327
328        // Enforce the limits on the congestion window.
329        if self.cwnd < self.min_cwnd {
330            self.cwnd = self.min_cwnd;
331        }
332    }
333
334    fn calculate_recovery_window(&mut self, bytes_acked: u64, bytes_lost: u64, in_flight: u64) {
335        if !self.recovery_state.in_recovery() {
336            return;
337        }
338        // Set up the initial recovery window.
339        if self.recovery_window == 0 {
340            self.recovery_window = self.min_cwnd.max(in_flight + bytes_acked);
341            return;
342        }
343
344        // Remove losses from the recovery window, while accounting for a potential
345        // integer underflow.
346        if self.recovery_window >= bytes_lost {
347            self.recovery_window -= bytes_lost;
348        } else {
349            // k_max_segment_size = current_mtu
350            self.recovery_window = self.current_mtu;
351        }
352        // In CONSERVATION mode, just subtracting losses is sufficient.  In GROWTH,
353        // release additional |bytes_acked| to achieve a slow-start-like behavior.
354        if self.recovery_state == RecoveryState::Growth {
355            self.recovery_window += bytes_acked;
356        }
357
358        // Sanity checks.  Ensure that we always allow to send at least an MSS or
359        // |bytes_acked| in response, whichever is larger.
360        self.recovery_window = self
361            .recovery_window
362            .max(in_flight + bytes_acked)
363            .max(self.min_cwnd);
364    }
365
366    /// <https://datatracker.ietf.org/doc/html/draft-cardwell-iccrg-bbr-congestion-control#section-4.3.2.2>
367    fn check_if_full_bw_reached(&mut self, app_limited: bool) {
368        if app_limited {
369            return;
370        }
371        let target = (self.bw_at_last_round as f64 * K_STARTUP_GROWTH_TARGET as f64) as u64;
372        let bw = self.max_bandwidth.get_estimate();
373        if bw >= target {
374            self.bw_at_last_round = bw;
375            self.round_wo_bw_gain = 0;
376            self.ack_aggregation.max_ack_height.reset();
377            return;
378        }
379
380        self.round_wo_bw_gain += 1;
381        if self.round_wo_bw_gain >= K_ROUND_TRIPS_WITHOUT_GROWTH_BEFORE_EXITING_STARTUP as u64
382            || (self.recovery_state.in_recovery())
383        {
384            self.is_at_full_bandwidth = true;
385        }
386    }
387}
388
389impl Controller for Bbr {
390    fn on_sent(&mut self, now: Instant, bytes: u64, last_packet_number: u64) {
391        self.max_sent_packet_number = last_packet_number;
392        self.max_bandwidth.on_sent(now, bytes);
393    }
394
395    fn on_ack(
396        &mut self,
397        now: Instant,
398        sent: Instant,
399        bytes: u64,
400        app_limited: bool,
401        rtt: &RttEstimator,
402    ) {
403        self.max_bandwidth
404            .on_ack(now, sent, bytes, self.round_count, app_limited);
405        self.acked_bytes += bytes;
406        if self.is_min_rtt_expired(now, app_limited) || self.min_rtt > rtt.min() {
407            self.min_rtt = rtt.min();
408        }
409    }
410
411    fn on_end_acks(
412        &mut self,
413        now: Instant,
414        in_flight: u64,
415        app_limited: bool,
416        largest_packet_num_acked: Option<u64>,
417    ) {
418        let bytes_acked = self.max_bandwidth.bytes_acked_this_window();
419        let excess_acked = self.ack_aggregation.update_ack_aggregation_bytes(
420            bytes_acked,
421            now,
422            self.round_count,
423            self.max_bandwidth.get_estimate(),
424        );
425        self.max_bandwidth.end_acks(self.round_count, app_limited);
426        if let Some(largest_acked_packet) = largest_packet_num_acked {
427            self.max_acked_packet_number = largest_acked_packet;
428        }
429
430        let mut is_round_start = false;
431        if bytes_acked > 0 {
432            is_round_start =
433                self.max_acked_packet_number > self.current_round_trip_end_packet_number;
434            if is_round_start {
435                self.current_round_trip_end_packet_number = self.max_sent_packet_number;
436                self.round_count += 1;
437            }
438        }
439
440        self.update_recovery_state(is_round_start);
441
442        if self.mode == Mode::ProbeBw {
443            self.update_gain_cycle_phase(now, in_flight);
444        }
445
446        if is_round_start && !self.is_at_full_bandwidth {
447            self.check_if_full_bw_reached(app_limited);
448        }
449
450        self.maybe_exit_startup_or_drain(now, in_flight);
451
452        self.maybe_enter_or_exit_probe_rtt(now, is_round_start, in_flight, app_limited);
453
454        // After the model is updated, recalculate the pacing rate and congestion window.
455        self.calculate_pacing_rate();
456        self.calculate_cwnd(bytes_acked, excess_acked);
457        self.calculate_recovery_window(bytes_acked, self.loss_state.lost_bytes, in_flight);
458
459        self.prev_in_flight_count = in_flight;
460        self.loss_state.reset();
461    }
462
463    fn on_congestion_event(
464        &mut self,
465        _now: Instant,
466        _sent: Instant,
467        _is_persistent_congestion: bool,
468        lost_bytes: u64,
469    ) {
470        self.loss_state.lost_bytes += lost_bytes;
471    }
472
473    fn on_mtu_update(&mut self, new_mtu: u16) {
474        self.current_mtu = new_mtu as u64;
475        self.min_cwnd = calculate_min_window(self.current_mtu);
476        self.init_cwnd = self.config.initial_window.max(self.min_cwnd);
477        self.cwnd = self.cwnd.max(self.min_cwnd);
478    }
479
480    fn window(&self) -> u64 {
481        if self.mode == Mode::ProbeRtt {
482            return self.get_probe_rtt_cwnd();
483        } else if self.recovery_state.in_recovery() && self.mode != Mode::Startup {
484            return self.cwnd.min(self.recovery_window);
485        }
486        self.cwnd
487    }
488
489    fn metrics(&self) -> ControllerMetrics {
490        ControllerMetrics {
491            congestion_window: self.window(),
492            ssthresh: None,
493            pacing_rate: Some(self.pacing_rate * 8),
494        }
495    }
496
497    fn clone_box(&self) -> Box<dyn Controller> {
498        Box::new(self.clone())
499    }
500
501    fn initial_window(&self) -> u64 {
502        self.config.initial_window
503    }
504
505    fn into_any(self: Box<Self>) -> Box<dyn Any> {
506        self
507    }
508}
509
510/// Configuration for the [`Bbr`] congestion controller
511#[derive(Debug, Clone)]
512pub struct BbrConfig {
513    initial_window: u64,
514}
515
516impl BbrConfig {
517    /// Default limit on the amount of outstanding data in bytes.
518    ///
519    /// Recommended value: `min(10 * max_datagram_size, max(2 * max_datagram_size, 14720))`
520    pub fn initial_window(&mut self, value: u64) -> &mut Self {
521        self.initial_window = value;
522        self
523    }
524}
525
526impl Default for BbrConfig {
527    fn default() -> Self {
528        Self {
529            initial_window: K_MAX_INITIAL_CONGESTION_WINDOW * BASE_DATAGRAM_SIZE,
530        }
531    }
532}
533
534impl ControllerFactory for BbrConfig {
535    fn build(self: Arc<Self>, _now: Instant, current_mtu: u16) -> Box<dyn Controller> {
536        Box::new(Bbr::new(self, current_mtu))
537    }
538}
539
540#[derive(Debug, Default, Copy, Clone)]
541struct AckAggregationState {
542    max_ack_height: MinMax,
543    aggregation_epoch_start_time: Option<Instant>,
544    aggregation_epoch_bytes: u64,
545}
546
547impl AckAggregationState {
548    fn update_ack_aggregation_bytes(
549        &mut self,
550        newly_acked_bytes: u64,
551        now: Instant,
552        round: u64,
553        max_bandwidth: u64,
554    ) -> u64 {
555        // Compute how many bytes are expected to be delivered, assuming max
556        // bandwidth is correct.
557        let expected_bytes_acked = max_bandwidth
558            * now
559                .saturating_duration_since(self.aggregation_epoch_start_time.unwrap_or(now))
560                .as_micros() as u64
561            / 1_000_000;
562
563        // Reset the current aggregation epoch as soon as the ack arrival rate is
564        // less than or equal to the max bandwidth.
565        if self.aggregation_epoch_bytes <= expected_bytes_acked {
566            // Reset to start measuring a new aggregation epoch.
567            self.aggregation_epoch_bytes = newly_acked_bytes;
568            self.aggregation_epoch_start_time = Some(now);
569            return 0;
570        }
571
572        // Compute how many extra bytes were delivered vs max bandwidth.
573        // Include the bytes most recently acknowledged to account for stretch acks.
574        self.aggregation_epoch_bytes += newly_acked_bytes;
575        let diff = self.aggregation_epoch_bytes - expected_bytes_acked;
576        self.max_ack_height.update_max(round, diff);
577        diff
578    }
579}
580
581#[derive(Debug, Clone, Copy, Eq, PartialEq)]
582enum Mode {
583    // Startup phase of the connection.
584    Startup,
585    // After achieving the highest possible bandwidth during the startup, lower
586    // the pacing rate in order to drain the queue.
587    Drain,
588    // Cruising mode.
589    ProbeBw,
590    // Temporarily slow down sending in order to empty the buffer and measure
591    // the real minimum RTT.
592    ProbeRtt,
593}
594
595// Indicates how the congestion control limits the amount of bytes in flight.
596#[derive(Debug, Clone, Copy, Eq, PartialEq)]
597enum RecoveryState {
598    // Do not limit.
599    NotInRecovery,
600    // Allow an extra outstanding byte for each byte acknowledged.
601    Conservation,
602    // Allow two extra outstanding bytes for each byte acknowledged (slow
603    // start).
604    Growth,
605}
606
607impl RecoveryState {
608    pub(super) fn in_recovery(&self) -> bool {
609        !matches!(self, Self::NotInRecovery)
610    }
611}
612
613#[derive(Debug, Clone, Default)]
614struct LossState {
615    lost_bytes: u64,
616}
617
618impl LossState {
619    pub(super) fn reset(&mut self) {
620        self.lost_bytes = 0;
621    }
622
623    pub(super) fn has_losses(&self) -> bool {
624        self.lost_bytes != 0
625    }
626}
627
628fn calculate_min_window(current_mtu: u64) -> u64 {
629    4 * current_mtu
630}
631
632// The gain used for the STARTUP, equal to 2/ln(2).
633const K_DEFAULT_HIGH_GAIN: f32 = 2.885;
634// The newly derived CWND gain for STARTUP, 2.
635const K_DERIVED_HIGH_CWNDGAIN: f32 = 2.0;
636// The cycle of gains used during the ProbeBw stage.
637const K_PACING_GAIN: [f32; 8] = [1.25, 0.75, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0];
638
639const K_STARTUP_GROWTH_TARGET: f32 = 1.25;
640const K_ROUND_TRIPS_WITHOUT_GROWTH_BEFORE_EXITING_STARTUP: u8 = 3;
641
642// Do not allow initial congestion window to be greater than 200 packets.
643const K_MAX_INITIAL_CONGESTION_WINDOW: u64 = 200;
644
645const PROBE_RTT_BASED_ON_BDP: bool = true;
646const DRAIN_TO_TARGET: bool = true;