Coverage Report

Created: 2025-06-22 06:18

/src/h2o/deps/quicly/lib/cc-cubic.c
Line
Count
Source (jump to first uncovered line)
1
/*
2
 * Copyright (c) 2019 Fastly, Janardhan Iyengar
3
 * Copyright (c) 2020 RWTH Aachen University, COMSYS Network Architectures Group, Leo Blöcher
4
 *
5
 * Permission is hereby granted, free of charge, to any person obtaining a copy
6
 * of this software and associated documentation files (the "Software"), to
7
 * deal in the Software without restriction, including without limitation the
8
 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
9
 * sell copies of the Software, and to permit persons to whom the Software is
10
 * furnished to do so, subject to the following conditions:
11
 *
12
 * The above copyright notice and this permission notice shall be included in
13
 * all copies or substantial portions of the Software.
14
 *
15
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21
 * IN THE SOFTWARE.
22
 */
23
#include <math.h>
24
#include "quicly/cc.h"
25
#include "quicly.h"
26
#include "quicly/pacer.h"
27
28
0
#define QUICLY_MIN_CWND 2
29
30
typedef double cubic_float_t;
31
0
#define QUICLY_CUBIC_C ((cubic_float_t)0.4)
32
0
#define QUICLY_CUBIC_BETA ((cubic_float_t)0.7)
33
34
/* Calculates the time elapsed since the last congestion event (parameter t) */
35
static cubic_float_t calc_cubic_t(const quicly_cc_t *cc, int64_t now)
36
0
{
37
0
    cubic_float_t clock_delta = now - cc->state.cubic.avoidance_start;
38
0
    return clock_delta / 1000; /* ms -> s */
39
0
}
40
41
/* RFC 8312, Equation 1; using bytes as unit instead of MSS */
42
static uint32_t calc_w_cubic(const quicly_cc_t *cc, cubic_float_t t_sec, uint32_t max_udp_payload_size)
43
0
{
44
0
    cubic_float_t tk = t_sec - cc->state.cubic.k;
45
0
    return (QUICLY_CUBIC_C * (tk * tk * tk) * max_udp_payload_size) + cc->state.cubic.w_max;
46
0
}
47
48
/* RFC 8312, Equation 2 */
49
/* K depends solely on W_max, so we update both together on congestion events */
50
static void update_cubic_k(quicly_cc_t *cc, uint32_t max_udp_payload_size)
51
0
{
52
0
    cubic_float_t w_max_mss = cc->state.cubic.w_max / (cubic_float_t)max_udp_payload_size;
53
0
    cc->state.cubic.k = cbrt(w_max_mss * ((1 - QUICLY_CUBIC_BETA) / QUICLY_CUBIC_C));
54
0
}
55
56
/* RFC 8312, Equation 4; using bytes as unit instead of MSS */
57
static uint32_t calc_w_est(const quicly_cc_t *cc, cubic_float_t t_sec, cubic_float_t rtt_sec, uint32_t max_udp_payload_size)
58
0
{
59
0
    return (cc->state.cubic.w_max * QUICLY_CUBIC_BETA) +
60
0
           ((3 * (1 - QUICLY_CUBIC_BETA) / (1 + QUICLY_CUBIC_BETA)) * (t_sec / rtt_sec) * max_udp_payload_size);
61
0
}
62
63
/* TODO: Avoid increase if sender was application limited. */
64
static void cubic_on_acked(quicly_cc_t *cc, const quicly_loss_t *loss, uint32_t bytes, uint64_t largest_acked, uint32_t inflight,
65
                           int cc_limited, uint64_t next_pn, int64_t now, uint32_t max_udp_payload_size)
66
0
{
67
0
    assert(inflight >= bytes);
68
    /* Do not increase congestion window while in recovery (but jumpstart may do something different). */
69
0
    if (largest_acked < cc->recovery_end) {
70
0
        quicly_cc_jumpstart_on_acked(cc, 1, bytes, largest_acked, inflight, next_pn);
71
0
        return;
72
0
    }
73
74
0
    quicly_cc_jumpstart_on_acked(cc, 0, bytes, largest_acked, inflight, next_pn);
75
76
    /* TODO: respect cc_limited */
77
78
    /* Slow start. */
79
0
    if (cc->cwnd < cc->ssthresh) {
80
0
        cc->cwnd += bytes;
81
0
        if (cc->cwnd_maximum < cc->cwnd)
82
0
            cc->cwnd_maximum = cc->cwnd;
83
0
        return;
84
0
    }
85
86
    /* Congestion avoidance. */
87
0
    cubic_float_t t_sec = calc_cubic_t(cc, now);
88
0
    cubic_float_t rtt_sec = loss->rtt.smoothed / (cubic_float_t)1000; /* ms -> s */
89
90
0
    uint32_t w_cubic = calc_w_cubic(cc, t_sec, max_udp_payload_size);
91
0
    uint32_t w_est = calc_w_est(cc, t_sec, rtt_sec, max_udp_payload_size);
92
93
0
    if (w_cubic < w_est) {
94
        /* RFC 8312, Section 4.2; TCP-Friendly Region */
95
        /* Prevent cwnd from shrinking if W_est is reduced due to RTT increase */
96
0
        if (w_est > cc->cwnd)
97
0
            cc->cwnd = w_est;
98
0
    } else {
99
        /* RFC 8312, Section 4.3/4.4; CUBIC Region */
100
0
        cubic_float_t w_cubic_target = calc_w_cubic(cc, t_sec + rtt_sec, max_udp_payload_size);
101
        /* After fast convergence W_max < W_last_max holds, and hence W_cubic(0) = beta * W_max < beta * W_last_max = cwnd.
102
         * cwnd could thus shrink without this check (but only after fast convergence). */
103
0
        if (w_cubic_target > cc->cwnd)
104
            /* (W_cubic(t+RTT) - cwnd)/cwnd * MSS = (W_cubic(t+RTT)/cwnd - 1) * MSS */
105
0
            cc->cwnd += ((w_cubic_target / cc->cwnd) - 1) * max_udp_payload_size;
106
0
    }
107
108
0
    if (cc->cwnd_maximum < cc->cwnd)
109
0
        cc->cwnd_maximum = cc->cwnd;
110
0
}
111
112
static void cubic_on_lost(quicly_cc_t *cc, const quicly_loss_t *loss, uint32_t bytes, uint64_t lost_pn, uint64_t next_pn,
113
                          int64_t now, uint32_t max_udp_payload_size)
114
0
{
115
0
    quicly_cc__update_ecn_episodes(cc, bytes, lost_pn);
116
117
    /* Nothing to do if loss is in recovery window. */
118
0
    if (lost_pn < cc->recovery_end)
119
0
        return;
120
0
    cc->recovery_end = next_pn;
121
122
    /* if detected loss before receiving all acks for jumpstart, restore original CWND */
123
0
    if (cc->ssthresh == UINT32_MAX)
124
0
        quicly_cc_jumpstart_on_first_loss(cc, lost_pn);
125
126
0
    ++cc->num_loss_episodes;
127
0
    if (cc->cwnd_exiting_slow_start == 0) {
128
0
        cc->cwnd_exiting_slow_start = cc->cwnd;
129
0
        cc->exit_slow_start_at = now;
130
0
    }
131
132
0
    cc->state.cubic.avoidance_start = now;
133
0
    cc->state.cubic.w_max = cc->cwnd;
134
135
    /* RFC 8312, Section 4.6; Fast Convergence */
136
    /* w_last_max is initialized to zero; therefore this condition is false when exiting slow start */
137
0
    if (cc->state.cubic.w_max < cc->state.cubic.w_last_max) {
138
0
        cc->state.cubic.w_last_max = cc->state.cubic.w_max;
139
0
        cc->state.cubic.w_max *= (1.0 + QUICLY_CUBIC_BETA) / 2.0;
140
0
    } else {
141
0
        cc->state.cubic.w_last_max = cc->state.cubic.w_max;
142
0
    }
143
0
    update_cubic_k(cc, max_udp_payload_size);
144
145
    /* RFC 8312, Section 4.5; Multiplicative Decrease */
146
0
    cc->cwnd *= cc->ssthresh == UINT32_MAX ? 0.5 : QUICLY_CUBIC_BETA; /* without HyStart++, we overshoot by 2x in slowstart */
147
0
    if (cc->cwnd < QUICLY_MIN_CWND * max_udp_payload_size)
148
0
        cc->cwnd = QUICLY_MIN_CWND * max_udp_payload_size;
149
0
    cc->ssthresh = cc->cwnd;
150
151
0
    if (cc->cwnd_minimum > cc->cwnd)
152
0
        cc->cwnd_minimum = cc->cwnd;
153
0
}
154
155
static void cubic_on_persistent_congestion(quicly_cc_t *cc, const quicly_loss_t *loss, int64_t now)
156
0
{
157
    /* TODO */
158
0
}
159
160
static void cubic_on_sent(quicly_cc_t *cc, const quicly_loss_t *loss, uint32_t bytes, int64_t now)
161
0
{
162
    /* Prevent extreme cwnd growth following an idle period caused by application limit.
163
     * This fixes the W_cubic/W_est calculations by effectively subtracting the idle period
164
     * The sender is coming out of quiescence if the current packet is the only one in flight.
165
     * (see https://github.com/torvalds/linux/commit/30927520dbae297182990bb21d08762bcc35ce1d). */
166
0
    if (loss->sentmap.bytes_in_flight <= bytes && cc->state.cubic.avoidance_start != 0 && cc->state.cubic.last_sent_time != 0) {
167
0
        int64_t delta = now - cc->state.cubic.last_sent_time;
168
0
        if (delta > 0)
169
0
            cc->state.cubic.avoidance_start += delta;
170
0
    }
171
172
0
    cc->state.cubic.last_sent_time = now;
173
0
}
174
175
static void cubic_reset(quicly_cc_t *cc, uint32_t initcwnd)
176
0
{
177
0
    memset(cc, 0, sizeof(quicly_cc_t));
178
0
    cc->type = &quicly_cc_type_cubic;
179
0
    cc->cwnd = cc->cwnd_initial = cc->cwnd_maximum = initcwnd;
180
0
    cc->ssthresh = cc->cwnd_minimum = UINT32_MAX;
181
0
    cc->exit_slow_start_at = INT64_MAX;
182
183
0
    quicly_cc_jumpstart_reset(cc);
184
0
}
185
186
static int cubic_on_switch(quicly_cc_t *cc)
187
0
{
188
0
    if (cc->type == &quicly_cc_type_cubic)
189
0
        return 1;
190
191
0
    if (cc->type == &quicly_cc_type_reno || cc->type == &quicly_cc_type_pico) {
192
        /* When in slow start, state can be reused as-is; otherwise, restart. */
193
0
        if (cc->cwnd_exiting_slow_start == 0) {
194
0
            cc->type = &quicly_cc_type_cubic;
195
0
        } else {
196
0
            cubic_reset(cc, cc->cwnd_initial);
197
0
        }
198
0
        return 1;
199
0
    }
200
201
0
    return 0;
202
0
}
203
204
static void cubic_init(quicly_init_cc_t *self, quicly_cc_t *cc, uint32_t initcwnd, int64_t now)
205
0
{
206
0
    cubic_reset(cc, initcwnd);
207
0
}
208
209
quicly_cc_type_t quicly_cc_type_cubic = {"cubic",         &quicly_cc_cubic_init,          cubic_on_acked,
210
                                         cubic_on_lost,   cubic_on_persistent_congestion, cubic_on_sent,
211
                                         cubic_on_switch, quicly_cc_jumpstart_enter};
212
quicly_init_cc_t quicly_cc_cubic_init = {cubic_init};