Coverage Report

Created: 2025-08-29 06:21

/src/h2o/deps/quicly/lib/cc-pico.c
Line
Count
Source (jump to first uncovered line)
1
/*
2
 * Copyright (c) 2021 Fastly, Kazuho Oku
3
 *
4
 * Permission is hereby granted, free of charge, to any person obtaining a copy
5
 * of this software and associated documentation files (the "Software"), to
6
 * deal in the Software without restriction, including without limitation the
7
 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
8
 * sell copies of the Software, and to permit persons to whom the Software is
9
 * furnished to do so, subject to the following conditions:
10
 *
11
 * The above copyright notice and this permission notice shall be included in
12
 * all copies or substantial portions of the Software.
13
 *
14
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
17
 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
20
 * IN THE SOFTWARE.
21
 */
22
#include <math.h>
23
#include "quicly/pacer.h"
24
#include "quicly/cc.h"
25
#include "quicly.h"
26
27
/**
28
 * Calculates the increase ratio to be used in congestion avoidance phase.
29
 */
30
static uint32_t calc_bytes_per_mtu_increase(uint32_t cwnd, uint32_t rtt, uint32_t mtu)
31
0
{
32
    /* Reno: CWND size after reduction */
33
0
    uint32_t reno = cwnd * QUICLY_RENO_BETA;
34
35
    /* Cubic: Cubic reaches original CWND (i.e., Wmax) in K seconds, therefore:
36
     *   amount_to_increase = 0.3 * Wmax
37
     *   amount_to_be_acked = K * Wmax / RTT_at_Wmax
38
     * where
39
     *   K = (0.3 / 0.4 * Wmax / MTU)^(1/3)
40
     *
41
     * Hence:
42
     *   bytes_per_mtu_increase = amount_to_be_acked / amount_to_increase * MTU
43
     *     = (K * Wmax / RTT_at_Wmax) / (0.3 * Wmax) * MTU
44
     *     = K * MTU / (0.3 * RTT_at_Wmax)
45
     *
46
     * In addition, we have to adjust the value to take fast convergence into account. On a path with stable capacity, 50% of
47
     * congestion events adjust Wmax to 0.85x of before calculating K. If that happens, the modified K (K') is:
48
     *
49
     *   K' = (0.3 / 0.4 * 0.85 * Wmax / MTU)^(1/3) = 0.85^(1/3) * K
50
     *
51
     * where K' represents the time to reach 0.85 * Wmax. As the cubic curve is point symmetric at the point where this curve
52
     * reaches 0.85 * Wmax, it would take 2 * K' seconds to reach Wmax.
53
     *
54
     * Therefore, by amortizing the two modes, the congestion period of Cubic with fast convergence is calculated as:
55
     *
56
     *   bytes_per_mtu_increase = ((1 + 0.85^(1/3) * 2) / 2) * K * MTU / (0.3 * RTT_at_Wmax)
57
     */
58
0
    uint32_t cubic = 1.447 / 0.3 * 1000 * cbrt(0.3 / 0.4 * cwnd / mtu) / rtt * mtu;
59
60
0
    return reno < cubic ? reno : cubic;
61
0
}
62
63
/* TODO: Avoid increase if sender was application limited. */
64
static void pico_on_acked(quicly_cc_t *cc, const quicly_loss_t *loss, uint32_t bytes, uint64_t largest_acked, uint32_t inflight,
65
                          int cc_limited, uint64_t next_pn, int64_t now, uint32_t max_udp_payload_size)
66
0
{
67
0
    assert(inflight >= bytes);
68
69
    /* Do not increase congestion window while in recovery (but jumpstart may do something different). */
70
0
    if (largest_acked < cc->recovery_end) {
71
0
        quicly_cc_jumpstart_on_acked(cc, 1, bytes, largest_acked, inflight, next_pn);
72
0
        return;
73
0
    }
74
75
0
    quicly_cc_jumpstart_on_acked(cc, 0, bytes, largest_acked, inflight, next_pn);
76
77
0
    if (!cc_limited)
78
0
        return;
79
80
0
    cc->state.pico.stash += bytes;
81
82
    /* Calculate the amount of bytes required to be acked for incrementing CWND by one MTU. */
83
0
    uint32_t bytes_per_mtu_increase;
84
0
    if (cc->cwnd < cc->ssthresh) {
85
0
        bytes_per_mtu_increase = max_udp_payload_size;
86
0
    } else {
87
0
        bytes_per_mtu_increase = cc->state.pico.bytes_per_mtu_increase;
88
0
    }
89
90
    /* Bail out if we do not yet have enough bytes being acked. */
91
0
    if (cc->state.pico.stash < bytes_per_mtu_increase)
92
0
        return;
93
94
    /* Update CWND, reducing stash relative to the amount we've adjusted the CWND */
95
0
    uint32_t count = cc->state.pico.stash / bytes_per_mtu_increase;
96
0
    cc->cwnd += count * max_udp_payload_size;
97
0
    cc->state.pico.stash -= count * bytes_per_mtu_increase;
98
99
0
    if (cc->cwnd_maximum < cc->cwnd)
100
0
        cc->cwnd_maximum = cc->cwnd;
101
0
}
102
103
static void pico_on_lost(quicly_cc_t *cc, const quicly_loss_t *loss, uint32_t bytes, uint64_t lost_pn, uint64_t next_pn,
104
                         int64_t now, uint32_t max_udp_payload_size)
105
0
{
106
0
    quicly_cc__update_ecn_episodes(cc, bytes, lost_pn);
107
108
    /* Nothing to do if loss is in recovery window. */
109
0
    if (lost_pn < cc->recovery_end)
110
0
        return;
111
0
    cc->recovery_end = next_pn;
112
113
    /* if detected loss before receiving all acks for jumpstart, restore original CWND */
114
0
    if (cc->ssthresh == UINT32_MAX)
115
0
        quicly_cc_jumpstart_on_first_loss(cc, lost_pn);
116
117
0
    ++cc->num_loss_episodes;
118
0
    if (cc->cwnd_exiting_slow_start == 0) {
119
0
        cc->cwnd_exiting_slow_start = cc->cwnd;
120
0
        cc->exit_slow_start_at = now;
121
0
    }
122
123
    /* Calculate increase rate. */
124
0
    cc->state.pico.bytes_per_mtu_increase = calc_bytes_per_mtu_increase(cc->cwnd, loss->rtt.smoothed, max_udp_payload_size);
125
126
    /* Reduce congestion window. */
127
0
    cc->cwnd *= cc->ssthresh == UINT32_MAX ? 0.5 : QUICLY_RENO_BETA; /* without HyStart++, we overshoot by 2x in slowstart */
128
0
    if (cc->cwnd < QUICLY_MIN_CWND * max_udp_payload_size)
129
0
        cc->cwnd = QUICLY_MIN_CWND * max_udp_payload_size;
130
0
    cc->ssthresh = cc->cwnd;
131
132
0
    if (cc->cwnd_minimum > cc->cwnd)
133
0
        cc->cwnd_minimum = cc->cwnd;
134
0
}
135
136
static void pico_on_persistent_congestion(quicly_cc_t *cc, const quicly_loss_t *loss, int64_t now)
137
0
{
138
    /* TODO */
139
0
}
140
141
static void pico_on_sent(quicly_cc_t *cc, const quicly_loss_t *loss, uint32_t bytes, int64_t now)
142
0
{
143
    /* Unused */
144
0
}
145
146
static void pico_init_pico_state(quicly_cc_t *cc, uint32_t stash)
147
0
{
148
0
    cc->state.pico.stash = stash;
149
0
    cc->state.pico.bytes_per_mtu_increase = cc->cwnd * QUICLY_RENO_BETA; /* use Reno, for simplicity */
150
0
}
151
152
static void pico_reset(quicly_cc_t *cc, uint32_t initcwnd)
153
0
{
154
0
    *cc = (quicly_cc_t){
155
0
        .type = &quicly_cc_type_pico,
156
0
        .cwnd = initcwnd,
157
0
        .cwnd_initial = initcwnd,
158
0
        .cwnd_maximum = initcwnd,
159
0
        .cwnd_minimum = UINT32_MAX,
160
0
        .exit_slow_start_at = INT64_MAX,
161
0
        .ssthresh = UINT32_MAX,
162
0
    };
163
0
    pico_init_pico_state(cc, 0);
164
165
0
    quicly_cc_jumpstart_reset(cc);
166
0
}
167
168
static int pico_on_switch(quicly_cc_t *cc)
169
0
{
170
0
    if (cc->type == &quicly_cc_type_pico) {
171
0
        return 1; /* nothing to do */
172
0
    } else if (cc->type == &quicly_cc_type_reno) {
173
0
        cc->type = &quicly_cc_type_pico;
174
0
        pico_init_pico_state(cc, cc->state.reno.stash);
175
0
        return 1;
176
0
    } else if (cc->type == &quicly_cc_type_cubic) {
177
        /* When in slow start, state can be reused as-is; otherwise, restart. */
178
0
        if (cc->cwnd_exiting_slow_start == 0) {
179
0
            cc->type = &quicly_cc_type_pico;
180
0
            pico_init_pico_state(cc, 0);
181
0
        } else {
182
0
            pico_reset(cc, cc->cwnd_initial);
183
0
        }
184
0
        return 1;
185
0
    }
186
187
0
    return 0;
188
0
}
189
190
static void pico_init(quicly_init_cc_t *self, quicly_cc_t *cc, uint32_t initcwnd, int64_t now)
191
0
{
192
0
    pico_reset(cc, initcwnd);
193
0
}
194
195
quicly_cc_type_t quicly_cc_type_pico = {"pico",         &quicly_cc_pico_init,          pico_on_acked,
196
                                        pico_on_lost,   pico_on_persistent_congestion, pico_on_sent,
197
                                        pico_on_switch, quicly_cc_jumpstart_enter};
198
quicly_init_cc_t quicly_cc_pico_init = {pico_init};