s2n_quic_core/recovery/bbr/
probe_bw.rs

1// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
2// SPDX-License-Identifier: Apache-2.0
3
4use crate::{
5    counter::{Counter, Saturating},
6    event,
7    event::IntoEvent,
8    random,
9    recovery::{
10        bandwidth::RateSample,
11        bbr,
12        bbr::{
13            congestion, data_rate, data_volume, round, ApplicationSettings, BbrCongestionController,
14        },
15        congestion_controller::Publisher,
16    },
17    time::Timestamp,
18};
19use core::time::Duration;
20use num_rational::Ratio;
21use num_traits::One;
22
23const MAX_BW_PROBE_UP_ROUNDS: u8 = 30;
24
25/// Max number of packet-timed rounds to wait before probing for bandwidth
26const MAX_BW_PROBE_ROUNDS: u8 = 63;
27
28/// The number of discontiguous bursts of loss required before inflight_hi is lowered
29/// Value from:
30/// https://source.chromium.org/chromium/chromium/src/+/main:net/third_party/quiche/src/quiche/quic/core/quic_protocol_flags_list.h;l=139;bpv=1;bpt=0
31pub(super) const PROBE_BW_FULL_LOSS_COUNT: u8 = 2;
32
33//= https://tools.ietf.org/id/draft-cardwell-iccrg-bbr-congestion-control-02#4.3.3.4
34//# After ProbeBW_REFILL refills the pipe, ProbeBW_UP probes for possible increases in
35//# available bandwidth by using a BBR.pacing_gain of 1.25, sending faster than the current
36//# estimated available bandwidth.
37const UP_PACING_GAIN: Ratio<u64> = Ratio::new_raw(5, 4);
38
39/// Cwnd gain used in the Probe BW state
40///
41/// This value is defined in the table in
42/// https://www.ietf.org/archive/id/draft-cardwell-iccrg-bbr-congestion-control-02.html#section-4.6.1
43pub(crate) const CWND_GAIN: Ratio<u64> = Ratio::new_raw(2, 1);
44
45//= https://tools.ietf.org/id/draft-cardwell-iccrg-bbr-congestion-control-02#4.3.3
46//# a BBR flow in ProbeBW mode cycles through the four
47//# Probe bw states - DOWN, CRUISE, REFILL, and UP
48#[derive(Clone, Copy, Debug, PartialEq, Eq)]
49pub(crate) enum CyclePhase {
50    /// Send slower than the network is delivering data, to reduce the amount of data in flight
51    Down,
52    /// Send at the same rate the network is delivering data
53    Cruise,
54    /// Try to fully utilize the network bottleneck without creating any significant queue pressure
55    Refill,
56    /// Probe for possible increases in available bandwidth
57    Up,
58}
59
60impl CyclePhase {
61    /// The dynamic gain factor used to scale BBR.bw to produce BBR.pacing_rate
62    pub fn pacing_gain(&self, app_settings: &ApplicationSettings) -> Ratio<u64> {
63        //= https://tools.ietf.org/id/draft-cardwell-iccrg-bbr-congestion-control-02#4.3.3.1
64        //# In the ProbeBW_DOWN phase of the cycle, a BBR flow pursues the deceleration tactic,
65        //# to try to send slower than the network is delivering data, to reduce the amount of data
66        //# in flight, with all of the standard motivations for the deceleration tactic (discussed
67        //# in "State Machine Tactics", above). It does this by switching to a BBR.pacing_gain of
68        //# 0.9, sending at 90% of BBR.bw.
69        const DOWN_PACING_GAIN: Ratio<u64> = Ratio::new_raw(9, 10);
70        //= https://tools.ietf.org/id/draft-cardwell-iccrg-bbr-congestion-control-02#4.3.3.3
71        //# During ProbeBW_REFILL BBR uses a BBR.pacing_gain of 1.0, to send at a rate that
72        //# matches the current estimated available bandwidth
73        //
74        //= https://tools.ietf.org/id/draft-cardwell-iccrg-bbr-congestion-control-02#4.3.3.2
75        //# In the ProbeBW_CRUISE phase of the cycle, a BBR flow pursues the "cruising" tactic
76        //# (discussed in "State Machine Tactics", above), attempting to send at the same rate
77        //# the network is delivering data. It tries to match the sending rate to the flow's
78        //# current available bandwidth, to try to achieve high utilization of the available
79        //# bandwidth without increasing queue pressure. It does this by switching to a
80        //# pacing_gain of 1.0, sending at 100% of BBR.bw.
81        const CRUISE_REFILL_PACING_GAIN: Ratio<u64> = Ratio::new_raw(1, 1);
82
83        let probe_bw_up_pacing_gain = app_settings
84            .probe_bw_up_pacing_gain()
85            .unwrap_or(UP_PACING_GAIN);
86
87        match self {
88            CyclePhase::Down => DOWN_PACING_GAIN,
89            CyclePhase::Cruise | CyclePhase::Refill => CRUISE_REFILL_PACING_GAIN,
90            CyclePhase::Up => probe_bw_up_pacing_gain,
91        }
92    }
93
94    /// Transition to the given `new_phase`
95    fn transition_to<Pub: Publisher>(&mut self, new_phase: CyclePhase, publisher: &mut Pub) {
96        if cfg!(debug_assertions) {
97            match new_phase {
98                CyclePhase::Down => assert_eq!(*self, CyclePhase::Up),
99                CyclePhase::Cruise => assert_eq!(*self, CyclePhase::Down),
100                CyclePhase::Refill => {
101                    assert!(*self == CyclePhase::Down || *self == CyclePhase::Cruise)
102                }
103                CyclePhase::Up => assert_eq!(*self, CyclePhase::Refill),
104            }
105        }
106
107        publisher.on_bbr_state_changed(new_phase.into_event());
108
109        *self = new_phase;
110    }
111}
112
113impl IntoEvent<event::builder::BbrState> for CyclePhase {
114    #[inline]
115    fn into_event(self) -> event::builder::BbrState {
116        use event::builder::BbrState;
117        match self {
118            CyclePhase::Down => BbrState::ProbeBwDown,
119            CyclePhase::Cruise => BbrState::ProbeBwCruise,
120            CyclePhase::Refill => BbrState::ProbeBwRefill,
121            CyclePhase::Up => BbrState::ProbeBwUp,
122        }
123    }
124}
125
126/// How the incoming ACK stream relates to our bandwidth probing
127#[derive(Clone, Copy, Debug, PartialEq, Eq)]
128pub(crate) enum AckPhase {
129    /// not probing; not getting probe feedback
130    Init,
131    /// stopped probing; still getting feedback
132    ProbeStopping,
133    /// sending at est. bw to fill pipe
134    Refilling,
135    /// inflight rising to probe bw
136    ProbeStarting,
137    /// getting feedback from bw probing
138    ProbeFeedback,
139}
140
141impl AckPhase {
142    /// Transition to the given `new_phase`
143    fn transition_to(&mut self, new_phase: AckPhase) {
144        if cfg!(debug_assertions) {
145            match new_phase {
146                AckPhase::ProbeStopping => {
147                    assert!(
148                        *self == AckPhase::Init
149                            || *self == AckPhase::ProbeStarting
150                            || *self == AckPhase::ProbeFeedback
151                    )
152                }
153                AckPhase::Refilling => {
154                    assert!(*self == AckPhase::Init || *self == AckPhase::ProbeStopping)
155                }
156                AckPhase::ProbeStarting => assert_eq!(*self, AckPhase::Refilling),
157                AckPhase::ProbeFeedback => assert_eq!(*self, AckPhase::ProbeStarting),
158                AckPhase::Init => assert_eq!(*self, AckPhase::ProbeStopping),
159            }
160        }
161
162        *self = new_phase;
163    }
164}
165
166#[derive(Clone, Debug)]
167pub(crate) struct State {
168    /// The current mode for deciding how fast to send
169    cycle_phase: CyclePhase,
170    /// How the incoming ACK stream relates to our bandwidth probing
171    ack_phase: AckPhase,
172    /// A random duration to wait until probing for bandwidth
173    bw_probe_wait: Duration,
174    /// Packet-timed rounds since probed bw
175    rounds_since_bw_probe: Counter<u8, Saturating>,
176    /// Bytes delivered per inflight_hi increment
177    bw_probe_up_cnt: u32,
178    /// Bytes ACKed since inflight_hi increment
179    bw_probe_up_acks: u32,
180    /// cwnd-limited rounds in PROBE_UP
181    bw_probe_up_rounds: u8,
182    /// Time of this cycle phase start
183    cycle_start_timestamp: Option<Timestamp>,
184}
185
186impl State {
187    /// Constructs new `probe_bw::State`
188    pub(super) fn new() -> Self {
189        Self {
190            cycle_phase: CyclePhase::Up,
191            ack_phase: AckPhase::Init,
192            bw_probe_wait: Duration::ZERO,
193            rounds_since_bw_probe: Counter::default(),
194            bw_probe_up_cnt: u32::MAX,
195            bw_probe_up_acks: 0,
196            bw_probe_up_rounds: 0,
197            cycle_start_timestamp: None,
198        }
199    }
200
201    /// Returns the current `probe_bw::CyclePhase`
202    pub fn cycle_phase(&self) -> CyclePhase {
203        self.cycle_phase
204    }
205
206    pub fn on_round_start(&mut self) {
207        self.rounds_since_bw_probe += 1;
208    }
209
210    /// Returns true if enough time has passed to transition the cycle phase
211    pub fn is_time_to_probe_bw(
212        &self,
213        target_inflight: u32,
214        max_data_size: u16,
215        now: Timestamp,
216    ) -> bool {
217        //= https://tools.ietf.org/id/draft-cardwell-iccrg-bbr-congestion-control-02#4.3.3.5.3
218        //# BBRCheckTimeToProbeBW()
219        //#   if (BBRHasElapsedInPhase(BBR.bw_probe_wait) ||
220        //#       BBRIsRenoCoexistenceProbeTime())
221        //#     BBRStartProbeBW_REFILL()
222        //#     return true
223        //#   return false
224
225        debug_assert!(
226            self.cycle_phase == CyclePhase::Down || self.cycle_phase == CyclePhase::Cruise
227        );
228
229        if self.has_elapsed_in_phase(self.bw_probe_wait, now)
230            || self.is_reno_coexistence_probe_time(target_inflight, max_data_size)
231        {
232            return true;
233        }
234        false
235    }
236
237    /// Probe for possible increases in bandwidth
238    fn probe_inflight_hi_upward(
239        &mut self,
240        bytes_acknowledged: usize,
241        data_volume_model: &mut data_volume::Model,
242        cwnd: u32,
243        max_data_size: u16,
244        round_start: bool,
245    ) {
246        //= https://tools.ietf.org/id/draft-cardwell-iccrg-bbr-congestion-control-02#4.3.3.6
247        //# BBRProbeInflightHiUpward()
248        //#   if (!is_cwnd_limited or cwnd < BBR.inflight_hi)
249        //#       return  /* not fully using inflight_hi, so don't grow it */
250        //#   BBR.bw_probe_up_acks += rs.newly_acked
251        //#   if (BBR.bw_probe_up_acks >= BBR.probe_up_cnt)
252        //#      delta = BBR.bw_probe_up_acks / BBR.probe_up_cnt
253        //#      BBR.bw_probe_up_acks -= delta * BBR.bw_probe_up_cnt
254        //#      BBR.inflight_hi += delta
255        //#   if (BBR.round_start)
256        //#      BBRRaiseInflightHiSlope()
257
258        // is_cwnd_limited and cwnd < BBR.inflight_hi is checked upstream
259
260        self.bw_probe_up_acks += bytes_acknowledged as u32;
261        // Increase inflight_hi by the number of bw_probe_up_cnt bytes within bw_probe_up_acks
262        if self.bw_probe_up_acks >= self.bw_probe_up_cnt {
263            let delta = self.bw_probe_up_acks / self.bw_probe_up_cnt;
264            self.bw_probe_up_acks -= delta * self.bw_probe_up_cnt;
265            let inflight_hi =
266                data_volume_model.inflight_hi() + (delta as u64 * max_data_size as u64);
267            data_volume_model.update_upper_bound(inflight_hi);
268        }
269        if round_start {
270            self.raise_inflight_hi_slope(cwnd, max_data_size);
271        }
272    }
273
274    /// Raise inflight_hi slope if appropriate
275    fn raise_inflight_hi_slope(&mut self, cwnd: u32, max_data_size: u16) {
276        //= https://tools.ietf.org/id/draft-cardwell-iccrg-bbr-congestion-control-02#4.3.3.6
277        //# BBRRaiseInflightHiSlope():
278        //#   growth_this_round = 1MSS << BBR.bw_probe_up_rounds
279        //#   BBR.bw_probe_up_rounds = min(BBR.bw_probe_up_rounds + 1, 30)
280        //#   BBR.probe_up_cnt = max(cwnd / growth_this_round, 1)
281
282        //= https://tools.ietf.org/id/draft-cardwell-iccrg-bbr-congestion-control-02#4.3.3.4
283        //# BBR takes an approach where the additive increase to BBR.inflight_hi
284        //# exponentially doubles each round trip
285        let growth_this_round = 1 << self.bw_probe_up_rounds;
286        // The MAX_BW_PROBE_UP_ROUNDS (30) number below means `growth_this_round` is capped at 1G
287        // and the lower bound of `bw_probe_up_cnt` is (practically) 1 mss, at this speed inflight_hi
288        // grows by approximately 1 packet per packet acked.
289        self.bw_probe_up_rounds = (self.bw_probe_up_rounds + 1).min(MAX_BW_PROBE_UP_ROUNDS);
290        self.bw_probe_up_cnt = (cwnd / growth_this_round).max(max_data_size as u32);
291    }
292
293    /// True if the given `interval` duration has elapsed since the current cycle phase began
294    fn has_elapsed_in_phase(&self, interval: Duration, now: Timestamp) -> bool {
295        //= https://tools.ietf.org/id/draft-cardwell-iccrg-bbr-congestion-control-02#4.3.3.6
296        //# BBRHasElapsedInPhase(interval)
297        //#   return Now() > BBR.cycle_stamp + interval
298
299        self.cycle_start_timestamp
300            .is_some_and(|cycle_stamp| now > cycle_stamp + interval)
301    }
302
303    /// Bandwidth probing can cause loss. To help coexistence with loss-based
304    /// congestion control we spread out our probing in a Reno-conscious way. Due to
305    /// the shape of the Reno sawtooth, the time required between loss epochs for an
306    /// idealized Reno flow is a number of round trips that is the BDP of that
307    /// flow. We count packet-timed round trips directly, since measured RTT can
308    /// vary widely, and Reno is driven by packet-timed round trips.
309    fn is_reno_coexistence_probe_time(&self, target_inflight: u32, max_data_size: u16) -> bool {
310        //= https://tools.ietf.org/id/draft-cardwell-iccrg-bbr-congestion-control-02#4.3.3.5.3
311        //# BBRIsRenoCoexistenceProbeTime()
312        //#   reno_rounds = BBRTargetInflight()
313        //#   rounds = min(reno_rounds, 63)
314        //#   return BBR.rounds_since_bw_probe >= rounds
315
316        let reno_rounds = target_inflight / max_data_size as u32;
317        let rounds = reno_rounds
318            .try_into()
319            .unwrap_or(u8::MAX)
320            .min(MAX_BW_PROBE_ROUNDS);
321        self.rounds_since_bw_probe >= rounds
322    }
323
324    /// Start the `Cruise` cycle phase
325    fn start_cruise<Pub: Publisher>(&mut self, publisher: &mut Pub) {
326        //= https://tools.ietf.org/id/draft-cardwell-iccrg-bbr-congestion-control-02#4.3.3.6
327        //# BBRStartProbeBW_CRUISE()
328        //#   BBR.state = ProbeBW_CRUISE
329
330        self.cycle_phase
331            .transition_to(CyclePhase::Cruise, publisher);
332    }
333
334    /// Start the `Up` cycle phase
335    fn start_up<Pub: Publisher>(
336        &mut self,
337        round_counter: &mut round::Counter,
338        delivered_bytes: u64,
339        cwnd: u32,
340        max_data_size: u16,
341        now: Timestamp,
342        publisher: &mut Pub,
343    ) {
344        //= https://tools.ietf.org/id/draft-cardwell-iccrg-bbr-congestion-control-02#4.3.3.6
345        //# BBRStartProbeBW_UP()
346        //#   BBR.ack_phase = ACKS_PROBE_STARTING
347        //#   BBRStartRound()
348        //#   BBR.cycle_stamp = Now() /* start wall clock */
349        //#   BBR.state = ProbeBW_UP
350        //#   BBRRaiseInflightHiSlope()
351
352        self.ack_phase.transition_to(AckPhase::ProbeStarting);
353        round_counter.set_round_end(delivered_bytes);
354        self.cycle_start_timestamp = Some(now);
355        self.cycle_phase.transition_to(CyclePhase::Up, publisher);
356        self.raise_inflight_hi_slope(cwnd, max_data_size);
357    }
358
359    /// Start the `Refill` cycle phase
360    fn start_refill<Pub: Publisher>(
361        &mut self,
362        data_volume_model: &mut data_volume::Model,
363        data_rate_model: &mut data_rate::Model,
364        round_counter: &mut round::Counter,
365        delivered_bytes: u64,
366        publisher: &mut Pub,
367    ) {
368        //= https://tools.ietf.org/id/draft-cardwell-iccrg-bbr-congestion-control-02#4.3.3.6
369        //# BBRStartProbeBW_REFILL()
370        //# BBRResetLowerBounds()
371        //#   BBR.bw_probe_up_rounds = 0
372        //#   BBR.bw_probe_up_acks = 0
373        //#   BBR.ack_phase = ACKS_REFILLING
374        //#   BBRStartRound()
375        //#   BBR.state = ProbeBW_REFILL
376
377        data_volume_model.reset_lower_bound();
378        data_rate_model.reset_lower_bound();
379        self.bw_probe_up_rounds = 0;
380        self.bw_probe_up_acks = 0;
381        self.ack_phase.transition_to(AckPhase::Refilling);
382        round_counter.set_round_end(delivered_bytes);
383        self.cycle_phase
384            .transition_to(CyclePhase::Refill, publisher);
385    }
386
387    /// Start the `Down` cycle phase
388    fn start_down<Pub: Publisher>(
389        &mut self,
390        congestion_state: &mut congestion::State,
391        round_counter: &mut round::Counter,
392        delivered_bytes: u64,
393        random_generator: &mut dyn random::Generator,
394        now: Timestamp,
395        publisher: &mut Pub,
396    ) {
397        //= https://tools.ietf.org/id/draft-cardwell-iccrg-bbr-congestion-control-02#4.3.3.6
398        //# BBRStartProbeBW_DOWN()
399        //#   BBRResetCongestionSignals()
400        //#   BBR.probe_up_cnt = Infinity /* not growing inflight_hi */
401        //#   BBRPickProbeWait()
402        //#   BBR.cycle_stamp = Now()  /* start wall clock */
403        //#   BBR.ack_phase  = ACKS_PROBE_STOPPING
404        //#   BBRStartRound()
405        //#   BBR.state = ProbeBW_DOWN
406
407        congestion_state.reset();
408        self.bw_probe_up_cnt = u32::MAX;
409        self.pick_probe_wait(random_generator);
410        self.cycle_start_timestamp = Some(now);
411        self.ack_phase.transition_to(AckPhase::ProbeStopping);
412        round_counter.set_round_end(delivered_bytes);
413        self.cycle_phase.transition_to(CyclePhase::Down, publisher);
414    }
415
416    /// Randomly determine how long to wait before probing again
417    ///
418    /// Note: This uses a method for determining a number in a random range that has a very slight
419    ///       bias. In practice, this bias should not result in a detectable impact to BBR performance.
420    fn pick_probe_wait(&mut self, random_generator: &mut dyn random::Generator) {
421        //= https://tools.ietf.org/id/draft-cardwell-iccrg-bbr-congestion-control-02#4.3.3.5.3
422        //# BBRPickProbeWait()
423        //#    /* Decide random round-trip bound for wait: */
424        //#     BBR.rounds_since_bw_probe =
425        //#       random_int_between(0, 1); /* 0 or 1 */
426        //#     /* Decide the random wall clock bound for wait: */
427        //#     BBR.bw_probe_wait =
428        //#       2sec + random_float_between(0.0, 1.0) /* 0..1 sec */
429        self.rounds_since_bw_probe
430            .set(random::gen_range_biased(random_generator, 0..=1) as u8);
431        self.bw_probe_wait =
432            Duration::from_millis(random::gen_range_biased(random_generator, 2000..=3000) as u64);
433    }
434
435    #[cfg(test)]
436    pub fn set_cycle_phase_for_test(&mut self, cycle_phase: CyclePhase) {
437        self.cycle_phase = cycle_phase;
438
439        match cycle_phase {
440            CyclePhase::Down => self.ack_phase = AckPhase::ProbeStopping,
441            CyclePhase::Refill => self.ack_phase = AckPhase::Refilling,
442            CyclePhase::Up => self.ack_phase = AckPhase::ProbeStarting,
443            CyclePhase::Cruise => {}
444        }
445    }
446}
447
448/// Methods related to the ProbeBW state
449impl BbrCongestionController {
450    /// Enters the `ProbeBw` state
451    ///
452    /// If `cruise_immediately` is true, `CyclePhase::Cruise` will be entered immediately
453    /// after entering `CyclePhase::Down`
454    #[inline]
455    pub(super) fn enter_probe_bw<Pub: Publisher>(
456        &mut self,
457        cruise_immediately: bool,
458        random_generator: &mut dyn random::Generator,
459        now: Timestamp,
460        publisher: &mut Pub,
461    ) {
462        //= https://tools.ietf.org/id/draft-cardwell-iccrg-bbr-congestion-control-02#4.3.3.6
463        //# BBREnterProbeBW():
464        //#     BBRStartProbeBW_DOWN()
465
466        let mut state = State::new();
467        state.start_down(
468            &mut self.congestion_state,
469            &mut self.round_counter,
470            self.bw_estimator.delivered_bytes(),
471            random_generator,
472            now,
473            publisher,
474        );
475
476        if cruise_immediately {
477            state.start_cruise(publisher);
478        }
479
480        // New BBR state requires updating the model
481        self.try_fast_path = false;
482        self.state
483            .transition_to(bbr::State::ProbeBw(state), publisher);
484    }
485
486    /// Transition the current Probe BW cycle phase if necessary
487    #[inline]
488    pub(super) fn update_probe_bw_cycle_phase<Pub: Publisher>(
489        &mut self,
490        random_generator: &mut dyn random::Generator,
491        now: Timestamp,
492        publisher: &mut Pub,
493    ) {
494        //= https://tools.ietf.org/id/draft-cardwell-iccrg-bbr-congestion-control-02#4.3.3.6
495        //# BBRUpdateProbeBWCyclePhase():
496        //#    if (!BBR.filled_pipe)
497        //#      return  /* only handling steady-state behavior here */
498        //#    BBRAdaptUpperBounds()
499        //#    if (!IsInAProbeBWState())
500        //#      return /* only handling ProbeBW states here: */
501        //#
502        //#    switch (state)
503        //#
504        //#    ProbeBW_DOWN:
505        //#      if (BBRCheckTimeToProbeBW())
506        //#        return /* already decided state transition */
507        //#      if (BBRCheckTimeToCruise())
508        //#        BBRStartProbeBW_CRUISE()
509        //#
510        //#    ProbeBW_CRUISE:
511        //#      if (BBRCheckTimeToProbeBW())
512        //#        return /* already decided state transition */
513        //#
514        //#    ProbeBW_REFILL:
515        //#      /* After one round of REFILL, start UP */
516        //#      if (BBR.round_start)
517        //#        BBR.bw_probe_samples = 1
518        //#        BBRStartProbeBW_UP()
519        //#
520        //#    ProbeBW_UP:
521        //#      if (BBRHasElapsedInPhase(BBR.min_rtt) and
522        //#          inflight > BBRInflight(BBR.max_bw, 1.25))
523        //#       BBRStartProbeBW_DOWN()
524
525        debug_assert!(
526            self.full_pipe_estimator.filled_pipe(),
527            "only handling steady-state behavior here"
528        );
529
530        debug_assert!(
531            self.state.is_probing_bw(),
532            "only handling ProbeBw states here"
533        );
534
535        let target_inflight = self.target_inflight();
536        let inflight = self.inflight(
537            self.data_rate_model.max_bw(),
538            self.state.pacing_gain(&self.app_settings),
539        );
540        let time_to_cruise = self.is_time_to_cruise(now);
541
542        if let bbr::State::ProbeBw(ref mut probe_bw_state) = self.state {
543            let prior_cycle_phase = probe_bw_state.cycle_phase();
544
545            if self.round_counter.round_start() {
546                probe_bw_state.on_round_start();
547            }
548
549            match probe_bw_state.cycle_phase() {
550                CyclePhase::Down | CyclePhase::Cruise => {
551                    if probe_bw_state.is_time_to_probe_bw(
552                        target_inflight,
553                        self.max_datagram_size,
554                        now,
555                    ) {
556                        probe_bw_state.start_refill(
557                            &mut self.data_volume_model,
558                            &mut self.data_rate_model,
559                            &mut self.round_counter,
560                            self.bw_estimator.delivered_bytes(),
561                            publisher,
562                        );
563                    } else if probe_bw_state.cycle_phase == CyclePhase::Down && time_to_cruise {
564                        probe_bw_state.start_cruise(publisher);
565                    }
566                }
567                CyclePhase::Refill => {
568                    // After one round of Refill, start Up
569                    if self.round_counter.round_start() {
570                        self.bw_probe_samples = true;
571                        probe_bw_state.start_up(
572                            &mut self.round_counter,
573                            self.bw_estimator.delivered_bytes(),
574                            self.cwnd,
575                            self.max_datagram_size,
576                            now,
577                            publisher,
578                        );
579                    }
580                }
581                CyclePhase::Up => {
582                    let min_rtt = self
583                        .data_volume_model
584                        .min_rtt()
585                        .expect("at least one RTT has passed");
586
587                    if probe_bw_state.has_elapsed_in_phase(min_rtt, now)
588                        && self.bytes_in_flight > inflight
589                    {
590                        probe_bw_state.start_down(
591                            &mut self.congestion_state,
592                            &mut self.round_counter,
593                            self.bw_estimator.delivered_bytes(),
594                            random_generator,
595                            now,
596                            publisher,
597                        );
598                    }
599                }
600            }
601
602            if prior_cycle_phase != probe_bw_state.cycle_phase() {
603                // New phase, so need to update cwnd and pacing rate
604                self.try_fast_path = false;
605            }
606        }
607    }
608
609    /// Adapt the upper bounds lower or higher depending on the loss rate
610    #[inline]
611    pub(super) fn adapt_upper_bounds<Pub: Publisher>(
612        &mut self,
613        bytes_acknowledged: usize,
614        random_generator: &mut dyn random::Generator,
615        now: Timestamp,
616        publisher: &mut Pub,
617    ) {
618        //= https://tools.ietf.org/id/draft-cardwell-iccrg-bbr-congestion-control-02#4.3.3.6
619        //# BBRAdaptUpperBounds()
620        //#   if (BBR.ack_phase == ACKS_PROBE_STARTING and BBR.round_start)
621        //#      /* starting to get bw probing samples */
622        //#      BBR.ack_phase = ACKS_PROBE_FEEDBACK
623        //#    if (BBR.ack_phase == ACKS_PROBE_STOPPING and BBR.round_start)
624        //#      /* end of samples from bw probing phase */
625        //#      if (IsInAProbeBWState() and !rs.is_app_limited)
626        //#        BBRAdvanceMaxBwFilter()
627        //#
628        //#    if (!CheckInflightTooHigh())
629        //#      /* Loss rate is safe. Adjust upper bounds upward. */
630        //#      if (BBR.inflight_hi == Infinity or BBR.bw_hi == Infinity)
631        //#        return /* no upper bounds to raise */
632        //#      if (rs.tx_in_flight > BBR.inflight_hi)
633        //#        BBR.inflight_hi = rs.tx_in_flight
634
635        debug_assert!(
636            self.full_pipe_estimator.filled_pipe(),
637            "only handling steady-state behavior here"
638        );
639
640        let rate_sample = self.bw_estimator.rate_sample();
641
642        // Update AckPhase once per round
643        if self.round_counter.round_start() {
644            self.update_ack_phase(rate_sample);
645        }
646
647        if Self::is_inflight_too_high(
648            rate_sample,
649            self.max_datagram_size,
650            self.congestion_state.loss_bursts_in_round(),
651            PROBE_BW_FULL_LOSS_COUNT,
652            &self.app_settings,
653        ) {
654            if self.bw_probe_samples {
655                // Inflight is too high and the sample is from bandwidth probing: lower inflight downward
656                self.on_inflight_too_high(
657                    rate_sample.is_app_limited,
658                    rate_sample.bytes_in_flight,
659                    random_generator,
660                    now,
661                    publisher,
662                );
663            }
664        } else {
665            // Loss rate is safe. Adjust upper bounds upward
666
667            if self.data_volume_model.inflight_hi() == u64::MAX {
668                // no upper bounds to raise
669                return;
670            }
671
672            // draft-cardwell-iccrg-bbr-congestion-control-02 also considers raising bw_hi at this
673            // point, but since the draft never lowers bw_hi from its initial value of Infinity, this
674            // doesn't have any effect. bw_hi in the current Linux V2Alpha BBR2 branch corresponds
675            // to max_hi from the draft, there is no equivalent to the bw_hi in the draft
676            // TODO: Update this logic based on subsequent draft updates or consider lowering
677            //       bw_hi in `on_inflight_too_high`
678            if rate_sample.bytes_in_flight as u64 > self.data_volume_model.inflight_hi() {
679                self.data_volume_model
680                    .update_upper_bound(rate_sample.bytes_in_flight as u64);
681            }
682
683            if let bbr::State::ProbeBw(ref mut probe_bw_state) = self.state {
684                if probe_bw_state.cycle_phase() == CyclePhase::Up
685                    && self.cwnd_limited_in_round
686                    && self.cwnd as u64 >= self.data_volume_model.inflight_hi()
687                {
688                    // inflight_hi is being fully utilized, so probe if we can increase it
689                    probe_bw_state.probe_inflight_hi_upward(
690                        bytes_acknowledged,
691                        &mut self.data_volume_model,
692                        self.cwnd,
693                        self.max_datagram_size,
694                        self.round_counter.round_start(),
695                    );
696                }
697            }
698        }
699    }
700
701    /// Update AckPhase and advance the Max BW filter if necessary
702    #[inline]
703    fn update_ack_phase(&mut self, rate_sample: RateSample) {
704        //= https://tools.ietf.org/id/draft-cardwell-iccrg-bbr-congestion-control-02#4.3.3.6
705        //#   if (BBR.ack_phase == ACKS_PROBE_STARTING and BBR.round_start)
706        //#      /* starting to get bw probing samples */
707        //#      BBR.ack_phase = ACKS_PROBE_FEEDBACK
708        //#    if (BBR.ack_phase == ACKS_PROBE_STOPPING and BBR.round_start)
709        //#      /* end of samples from bw probing phase */
710        //#      if (IsInAProbeBWState() and !rs.is_app_limited)
711        //#        BBRAdvanceMaxBwFilter()
712
713        debug_assert!(self.round_counter.round_start());
714
715        if let bbr::State::ProbeBw(ref mut probe_bw_state) = self.state {
716            match probe_bw_state.ack_phase {
717                AckPhase::ProbeStarting => {
718                    // starting to get bw probing samples
719                    probe_bw_state
720                        .ack_phase
721                        .transition_to(AckPhase::ProbeFeedback);
722                }
723                AckPhase::ProbeStopping => {
724                    // end of samples from bw probing phase
725                    self.bw_probe_samples = false;
726                    probe_bw_state.ack_phase.transition_to(AckPhase::Init);
727                    if !rate_sample.is_app_limited {
728                        self.data_rate_model.advance_max_bw_filter();
729                    }
730                }
731                _ => {}
732            }
733        } else {
734            self.bw_probe_samples = false;
735        }
736    }
737
738    /// Called when loss indicates the current inflight amount is too high
739    #[inline]
740    pub(super) fn on_inflight_too_high<Pub: Publisher>(
741        &mut self,
742        is_app_limited: bool,
743        bytes_in_flight: u32,
744        random_generator: &mut dyn random::Generator,
745        now: Timestamp,
746        publisher: &mut Pub,
747    ) {
748        //= https://tools.ietf.org/id/draft-cardwell-iccrg-bbr-congestion-control-02#4.5.6.2
749        //# BBRHandleInflightTooHigh()
750        //# BBR.bw_probe_samples = 0;  /* only react once per bw probe */
751        //#    if (!rs.is_app_limited)
752        //#      BBR.inflight_hi = max(rs.tx_in_flight,
753        //#                            BBRTargetInflight() * BBRBeta))
754        //#    If (BBR.state == ProbeBW_UP)
755        //#      BBRStartProbeBW_DOWN()
756
757        self.bw_probe_samples = false; // only react once per bw probe
758        if !is_app_limited {
759            self.data_volume_model.update_upper_bound(
760                (bytes_in_flight as u64)
761                    .max((bbr::BETA * self.target_inflight() as u64).to_integer()),
762            )
763        }
764
765        if let bbr::State::ProbeBw(ref mut probe_bw_state) = self.state {
766            if probe_bw_state.cycle_phase() == CyclePhase::Up {
767                probe_bw_state.start_down(
768                    &mut self.congestion_state,
769                    &mut self.round_counter,
770                    self.bw_estimator.delivered_bytes(),
771                    random_generator,
772                    now,
773                    publisher,
774                );
775            }
776        }
777    }
778
779    /// Returns true if it is time to transition from `Down` to `Cruise`
780    #[inline]
781    fn is_time_to_cruise(&self, now: Timestamp) -> bool {
782        if let (bbr::State::ProbeBw(probe_bw_state), Some(min_rtt)) =
783            (&self.state, self.data_volume_model.min_rtt())
784        {
785            // Chromium and Linux TCP both limit the time spent in ProbeBW_Down to min_rtt
786            // See https://github.com/google/bbr/blob/1a45fd4faf30229a3d3116de7bfe9d2f933d3562/net/ipv4/tcp_bbr2.c#L1982-L1981
787            //  and https://source.chromium.org/chromium/chromium/src/+/main:net/third_party/quiche/src/quiche/quic/core/congestion_control/bbr2_probe_bw.cc;l=276
788            if probe_bw_state.has_elapsed_in_phase(min_rtt, now) {
789                return true;
790            }
791        }
792
793        //= https://tools.ietf.org/id/draft-cardwell-iccrg-bbr-congestion-control-02#4.3.3.6
794        //# BBRCheckTimeToCruise())
795        //#   if (inflight > BBRInflightWithHeadroom())
796        //#      return false /* not enough headroom */
797        //#   if (inflight <= BBRInflight(BBR.max_bw, 1.0))
798        //#      return true  /* inflight <= estimated BDP */
799        if self.bytes_in_flight > self.inflight_with_headroom() {
800            return false; // not enough headroom
801        }
802        if self.bytes_in_flight <= self.inflight(self.data_rate_model.max_bw(), Ratio::one()) {
803            return true; // inflight <= estimated BDP
804        }
805        false
806    }
807}
808
809#[cfg(test)]
810mod tests {
811    use super::*;
812    use crate::{
813        path,
814        path::MINIMUM_MAX_DATAGRAM_SIZE,
815        recovery::{
816            bandwidth::{Bandwidth, PacketInfo},
817            congestion_controller::PathPublisher,
818        },
819        time::{Clock, NoopClock},
820    };
821
822    #[test]
823    fn pacing_gain() {
824        //= https://tools.ietf.org/id/draft-cardwell-iccrg-bbr-congestion-control-02#4.3.3.1
825        //= type=test
826        //# In the ProbeBW_DOWN phase of the cycle, a BBR flow pursues the deceleration tactic,
827        //# to try to send slower than the network is delivering data, to reduce the amount of data
828        //# in flight, with all of the standard motivations for the deceleration tactic (discussed
829        //# in "State Machine Tactics", above). It does this by switching to a BBR.pacing_gain of
830        //# 0.9, sending at 90% of BBR.bw.
831        assert_eq!(
832            Ratio::new_raw(9, 10),
833            CyclePhase::Down.pacing_gain(&Default::default())
834        );
835
836        //= https://tools.ietf.org/id/draft-cardwell-iccrg-bbr-congestion-control-02#4.3.3.4
837        //= type=test
838        //# After ProbeBW_REFILL refills the pipe, ProbeBW_UP probes for possible increases in
839        //# available bandwidth by using a BBR.pacing_gain of 1.25, sending faster than the current
840        //# estimated available bandwidth.
841        assert_eq!(
842            Ratio::new_raw(5, 4),
843            CyclePhase::Up.pacing_gain(&Default::default())
844        );
845
846        //= https://tools.ietf.org/id/draft-cardwell-iccrg-bbr-congestion-control-02#4.3.3.3
847        //= type=test
848        //# During ProbeBW_REFILL BBR uses a BBR.pacing_gain of 1.0, to send at a rate that
849        //# matches the current estimated available bandwidth
850        assert_eq!(
851            Ratio::new_raw(1, 1),
852            CyclePhase::Refill.pacing_gain(&Default::default())
853        );
854
855        //= https://tools.ietf.org/id/draft-cardwell-iccrg-bbr-congestion-control-02#4.3.3.2
856        //= type=test
857        //# In the ProbeBW_CRUISE phase of the cycle, a BBR flow pursues the "cruising" tactic
858        //# (discussed in "State Machine Tactics", above), attempting to send at the same rate
859        //# the network is delivering data. It tries to match the sending rate to the flow's
860        //# current available bandwidth, to try to achieve high utilization of the available
861        //# bandwidth without increasing queue pressure. It does this by switching to a
862        //# pacing_gain of 1.0, sending at 100% of BBR.bw.
863        assert_eq!(
864            Ratio::new_raw(1, 1),
865            CyclePhase::Cruise.pacing_gain(&Default::default())
866        );
867    }
868
869    // ApplicationSettings.probe_bw_up_pacing_gain
870    #[test]
871    fn probe_bw_up_pacing_gain_with_app_settings() {
872        let mut app_settings = Default::default();
873        assert_eq!(UP_PACING_GAIN, CyclePhase::Up.pacing_gain(&app_settings));
874
875        app_settings.probe_bw_up_pacing_gain = Some(50);
876        assert_eq!(
877            Ratio::new_raw(50, 100),
878            CyclePhase::Up.pacing_gain(&app_settings)
879        );
880    }
881
882    #[test]
883    fn new_probe_bw_state() {
884        let state = State::new();
885
886        assert_eq!(CyclePhase::Up, state.cycle_phase);
887        assert_eq!(AckPhase::Init, state.ack_phase);
888        assert_eq!(Duration::ZERO, state.bw_probe_wait);
889        assert_eq!(Counter::new(0), state.rounds_since_bw_probe);
890        assert_eq!(0, state.bw_probe_up_acks);
891        assert_eq!(0, state.bw_probe_up_rounds);
892        assert_eq!(None, state.cycle_start_timestamp);
893    }
894
895    #[test]
896    fn is_time_to_probe_bw() {
897        let mut state = State::new();
898        let now = NoopClock.get_time();
899        let mut publisher = event::testing::Publisher::snapshot();
900        let mut publisher = PathPublisher::new(&mut publisher, path::Id::test_id());
901        state
902            .cycle_phase
903            .transition_to(CyclePhase::Down, &mut publisher);
904
905        // cycle_stamp hasn't been set yet
906        assert!(!state.is_time_to_probe_bw(12000, 1200, now));
907
908        state.cycle_start_timestamp = Some(now);
909        let bw_probe_wait = Duration::from_millis(500);
910        state.bw_probe_wait = bw_probe_wait;
911        // not ready to probe yet
912        assert!(!state.is_time_to_probe_bw(12000, 1200, now + bw_probe_wait));
913        // now we're ready to probe
914        assert!(state.is_time_to_probe_bw(
915            100,
916            1200,
917            now + bw_probe_wait + Duration::from_millis(1)
918        ));
919
920        state.rounds_since_bw_probe = Counter::new(10);
921        // 13200 / 1200 = 11 reno rounds, not in reno coexistence probe time
922        assert!(!state.is_time_to_probe_bw(13200, 1200, now));
923        // 12000 / 1200 = 10 reno rounds, now we are in reno coexistence probe time
924        assert!(state.is_time_to_probe_bw(12000, 1200, now));
925
926        // At high BDPs, we probe when MAX_BW_PROBE_ROUNDS is reached
927        state.rounds_since_bw_probe = Counter::new(MAX_BW_PROBE_ROUNDS - 1);
928        assert!(!state.is_time_to_probe_bw(u32::MAX, 1200, now));
929        state.rounds_since_bw_probe = Counter::new(MAX_BW_PROBE_ROUNDS);
930        assert!(state.is_time_to_probe_bw(u32::MAX, 1200, now));
931    }
932
933    #[test]
934    fn probe_inflight_hi_upward() {
935        let mut state = State::new();
936
937        let bytes_acknowledged = 2400;
938        let mut data_volume_model = data_volume::Model::new();
939        let cwnd = 12000;
940        let max_data_size = 1200;
941        let round_start = true;
942
943        state.bw_probe_up_rounds = 3;
944        data_volume_model.update_upper_bound(12000);
945
946        state.probe_inflight_hi_upward(
947            bytes_acknowledged,
948            &mut data_volume_model,
949            cwnd,
950            max_data_size,
951            round_start,
952        );
953
954        assert_eq!(bytes_acknowledged as u32, state.bw_probe_up_acks);
955        assert_eq!(12000, data_volume_model.inflight_hi());
956        assert_eq!(4, state.bw_probe_up_rounds);
957        // bw_probe_up_cnt = cwnd (12000) / 1 << 3
958        assert_eq!(cwnd / 8, state.bw_probe_up_cnt);
959
960        let new_bytes_acknowledged = (cwnd / 8) as usize;
961        state.probe_inflight_hi_upward(
962            new_bytes_acknowledged,
963            &mut data_volume_model,
964            cwnd,
965            max_data_size,
966            false,
967        );
968
969        // bw_probe_up_acks = bytes_acknowledged + new_bytes_acknowledged = 3900
970        // delta = 3900 / bw_probe_up_cnt  = 3900 / 1500 = 2
971        // bw_probe_up_acks = bw_probe_up_acks - delta * bw_probe_up_cnt = 3900 - 2 * 1500 = 900
972        assert_eq!(900, state.bw_probe_up_acks);
973        // inflight_hi = inflight_hi + delta * max_data_size = 12000 + 2 * 1200 = 14400
974        assert_eq!(14400, data_volume_model.inflight_hi());
975        // bw_probe_up_rounds stays the same, since round_start was false
976        assert_eq!(4, state.bw_probe_up_rounds);
977        // bw_probe_up_cnt stays the same, since round_start was false
978        assert_eq!(cwnd / 8, state.bw_probe_up_cnt);
979    }
980
981    #[test]
982    fn start_cruise() {
983        let mut state = State::new();
984        let mut publisher = event::testing::Publisher::snapshot();
985        let mut publisher = PathPublisher::new(&mut publisher, path::Id::test_id());
986        state
987            .cycle_phase
988            .transition_to(CyclePhase::Down, &mut publisher);
989
990        state.start_cruise(&mut publisher);
991
992        assert_eq!(CyclePhase::Cruise, state.cycle_phase());
993    }
994
995    #[test]
996    fn start_up() {
997        let mut state = State::new();
998        let mut round_counter = round::Counter::default();
999        let mut publisher = event::testing::Publisher::snapshot();
1000        let mut publisher = PathPublisher::new(&mut publisher, path::Id::test_id());
1001        let delivered_bytes = 100;
1002        let cwnd = 12000;
1003        let max_data_size = 1200;
1004        let now = NoopClock.get_time();
1005
1006        state.ack_phase = AckPhase::Refilling;
1007        state.cycle_phase = CyclePhase::Refill;
1008
1009        state.start_up(
1010            &mut round_counter,
1011            delivered_bytes,
1012            cwnd,
1013            max_data_size,
1014            now,
1015            &mut publisher,
1016        );
1017
1018        assert_eq!(CyclePhase::Up, state.cycle_phase());
1019        assert_eq!(AckPhase::ProbeStarting, state.ack_phase);
1020        assert_eq!(Some(now), state.cycle_start_timestamp);
1021
1022        // raise_inflight_hi_slope is called
1023        assert_eq!(1, state.bw_probe_up_rounds);
1024        assert_eq!(cwnd, state.bw_probe_up_cnt);
1025
1026        // verify the end of round is set to delivered_bytes
1027        // verify the end of round is set to delivered_bytes
1028        assert_round_end(round_counter, delivered_bytes);
1029    }
1030
1031    #[test]
1032    fn start_refill() {
1033        let mut state = State::new();
1034        let mut round_counter = round::Counter::default();
1035        let delivered_bytes = 100;
1036        let mut data_volume_model = data_volume::Model::new();
1037        let mut data_rate_model = data_rate::Model::new();
1038        let mut publisher = event::testing::Publisher::snapshot();
1039        let mut publisher = PathPublisher::new(&mut publisher, path::Id::test_id());
1040        data_volume_model.update_lower_bound(12000, 12000, true, false, 1.0);
1041        data_rate_model.update_lower_bound(Bandwidth::ZERO);
1042
1043        state.ack_phase = AckPhase::ProbeStopping;
1044        state.cycle_phase = CyclePhase::Cruise;
1045
1046        state.start_refill(
1047            &mut data_volume_model,
1048            &mut data_rate_model,
1049            &mut round_counter,
1050            delivered_bytes,
1051            &mut publisher,
1052        );
1053
1054        assert_eq!(CyclePhase::Refill, state.cycle_phase());
1055        // Lower bounds are reset
1056        assert_eq!(u64::MAX, data_volume_model.inflight_lo());
1057        assert_eq!(Bandwidth::INFINITY, data_rate_model.bw_lo());
1058
1059        assert_eq!(0, state.bw_probe_up_rounds);
1060        assert_eq!(0, state.bw_probe_up_acks);
1061        assert_eq!(AckPhase::Refilling, state.ack_phase);
1062
1063        // verify the end of round is set to delivered_bytes
1064        assert_round_end(round_counter, delivered_bytes);
1065    }
1066
1067    #[test]
1068    fn start_down() {
1069        let mut state = State::new();
1070        let mut congestion_state = congestion::testing::test_state();
1071        let mut round_counter = round::Counter::default();
1072        let mut publisher = event::testing::Publisher::snapshot();
1073        let mut publisher = PathPublisher::new(&mut publisher, path::Id::test_id());
1074        let delivered_bytes = 100;
1075        let now = NoopClock.get_time();
1076        let random = &mut random::testing::Generator::default();
1077
1078        state.cycle_phase = CyclePhase::Up;
1079
1080        state.start_down(
1081            &mut congestion_state,
1082            &mut round_counter,
1083            delivered_bytes,
1084            random,
1085            now,
1086            &mut publisher,
1087        );
1088
1089        assert_eq!(CyclePhase::Down, state.cycle_phase());
1090        assert_eq!(u32::MAX, state.bw_probe_up_cnt);
1091        assert!(state.rounds_since_bw_probe >= 0 && state.rounds_since_bw_probe <= 1);
1092        assert!(
1093            state.bw_probe_wait >= Duration::from_secs(2)
1094                && state.bw_probe_wait <= Duration::from_secs(3)
1095        );
1096        assert_eq!(Some(now), state.cycle_start_timestamp);
1097        assert_eq!(AckPhase::ProbeStopping, state.ack_phase);
1098
1099        // verify congestion state is reset
1100        congestion::testing::assert_reset(congestion_state);
1101
1102        // verify the end of round is set to delivered_bytes
1103        assert_round_end(round_counter, delivered_bytes);
1104    }
1105
1106    fn assert_round_end(mut round_counter: round::Counter, expected_end: u64) {
1107        let now = NoopClock.get_time();
1108        // verify the end of round is set to delivered_bytes
1109        let mut packet_info = PacketInfo {
1110            delivered_bytes: expected_end - 1,
1111            delivered_time: now,
1112            lost_bytes: 0,
1113            ecn_ce_count: 0,
1114            first_sent_time: now,
1115            bytes_in_flight: 0,
1116            is_app_limited: false,
1117        };
1118        round_counter.on_ack(packet_info, expected_end);
1119        assert!(!round_counter.round_start());
1120
1121        packet_info.delivered_bytes = expected_end;
1122        round_counter.on_ack(packet_info, expected_end);
1123        assert!(round_counter.round_start());
1124    }
1125
1126    #[test]
1127    fn ack_phase_valid_transitions() {
1128        let mut ack_phase = AckPhase::Init;
1129        ack_phase.transition_to(AckPhase::ProbeStopping);
1130        assert_eq!(ack_phase, AckPhase::ProbeStopping);
1131        ack_phase.transition_to(AckPhase::Refilling);
1132        assert_eq!(ack_phase, AckPhase::Refilling);
1133        ack_phase.transition_to(AckPhase::ProbeStarting);
1134        assert_eq!(ack_phase, AckPhase::ProbeStarting);
1135        ack_phase.transition_to(AckPhase::ProbeFeedback);
1136        assert_eq!(ack_phase, AckPhase::ProbeFeedback);
1137        ack_phase.transition_to(AckPhase::ProbeStopping);
1138        assert_eq!(ack_phase, AckPhase::ProbeStopping);
1139        ack_phase.transition_to(AckPhase::Refilling);
1140        assert_eq!(ack_phase, AckPhase::Refilling);
1141        ack_phase.transition_to(AckPhase::ProbeStarting);
1142        assert_eq!(ack_phase, AckPhase::ProbeStarting);
1143        ack_phase.transition_to(AckPhase::ProbeStopping);
1144        assert_eq!(ack_phase, AckPhase::ProbeStopping);
1145        ack_phase.transition_to(AckPhase::Init);
1146        assert_eq!(ack_phase, AckPhase::Init);
1147        ack_phase.transition_to(AckPhase::Refilling);
1148        assert_eq!(ack_phase, AckPhase::Refilling);
1149    }
1150
1151    #[test]
1152    fn enter_probe_bw() {
1153        let mut bbr = BbrCongestionController::new(MINIMUM_MAX_DATAGRAM_SIZE, Default::default());
1154        let mut rng = random::testing::Generator::default();
1155        let mut publisher = event::testing::Publisher::snapshot();
1156        let mut publisher = PathPublisher::new(&mut publisher, path::Id::test_id());
1157        let now = NoopClock.get_time();
1158        bbr.state = bbr::State::Drain;
1159
1160        // cruise_immediately = false
1161        bbr.enter_probe_bw(false, &mut rng, now, &mut publisher);
1162
1163        assert!(bbr.state.is_probing_bw());
1164        if let bbr::State::ProbeBw(probe_bw_state) = bbr.state {
1165            assert_eq!(CyclePhase::Down, probe_bw_state.cycle_phase());
1166        }
1167
1168        assert!(!bbr.try_fast_path);
1169
1170        // cruise_immediately = true
1171        bbr.state = bbr::State::Drain;
1172        bbr.enter_probe_bw(true, &mut rng, now, &mut publisher);
1173        assert!(bbr.state.is_probing_bw_cruise());
1174    }
1175
1176    #[test]
1177    fn update_ack_phase() {
1178        let mut bbr = BbrCongestionController::new(MINIMUM_MAX_DATAGRAM_SIZE, Default::default());
1179        let mut rng = random::testing::Generator::default();
1180        let mut publisher = event::testing::Publisher::snapshot();
1181        let mut publisher = PathPublisher::new(&mut publisher, path::Id::test_id());
1182        let now = NoopClock.get_time();
1183        bbr.state = bbr::State::Drain;
1184
1185        // cruise_immediately = false
1186        bbr.enter_probe_bw(false, &mut rng, now, &mut publisher);
1187
1188        // Start a new round
1189        let packet_info = PacketInfo {
1190            delivered_bytes: 0,
1191            delivered_time: now,
1192            lost_bytes: 0,
1193            ecn_ce_count: 0,
1194            first_sent_time: now,
1195            bytes_in_flight: 3000,
1196            is_app_limited: false,
1197        };
1198        bbr.round_counter.on_ack(packet_info, 5000);
1199
1200        bbr.bw_probe_samples = true;
1201
1202        assert!(bbr.state.is_probing_bw());
1203        if let bbr::State::ProbeBw(ref mut probe_bw_state) = bbr.state {
1204            assert_eq!(probe_bw_state.ack_phase, AckPhase::ProbeStopping);
1205            assert_eq!(bbr.data_rate_model.cycle_count(), 0);
1206        }
1207
1208        let rate_sample = RateSample {
1209            is_app_limited: false,
1210            ..Default::default()
1211        };
1212        bbr.update_ack_phase(rate_sample);
1213
1214        // Moving from ProbeStopping to Init increments the cycle count
1215        if let bbr::State::ProbeBw(ref mut probe_bw_state) = bbr.state {
1216            assert_eq!(probe_bw_state.ack_phase, AckPhase::Init);
1217            assert_eq!(bbr.data_rate_model.cycle_count(), 1);
1218            assert!(!bbr.bw_probe_samples);
1219        }
1220
1221        bbr.update_ack_phase(rate_sample);
1222
1223        // Updating the ack phase again does not increment the cycle count
1224        if let bbr::State::ProbeBw(ref mut probe_bw_state) = bbr.state {
1225            assert_eq!(probe_bw_state.ack_phase, AckPhase::Init);
1226            assert_eq!(bbr.data_rate_model.cycle_count(), 1);
1227
1228            // set ack phase for the next test
1229            probe_bw_state.ack_phase = AckPhase::ProbeStarting;
1230        }
1231
1232        bbr.bw_probe_samples = true;
1233        bbr.update_ack_phase(rate_sample);
1234
1235        if let bbr::State::ProbeBw(ref mut probe_bw_state) = bbr.state {
1236            assert_eq!(probe_bw_state.ack_phase, AckPhase::ProbeFeedback);
1237            assert_eq!(bbr.data_rate_model.cycle_count(), 1);
1238            assert!(bbr.bw_probe_samples);
1239        }
1240    }
1241}