/src/h2o/deps/quicly/lib/cc-reno.c
Line | Count | Source (jump to first uncovered line) |
1 | | /* |
2 | | * Copyright (c) 2019 Fastly, Janardhan Iyengar |
3 | | * |
4 | | * Permission is hereby granted, free of charge, to any person obtaining a copy |
5 | | * of this software and associated documentation files (the "Software"), to |
6 | | * deal in the Software without restriction, including without limitation the |
7 | | * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or |
8 | | * sell copies of the Software, and to permit persons to whom the Software is |
9 | | * furnished to do so, subject to the following conditions: |
10 | | * |
11 | | * The above copyright notice and this permission notice shall be included in |
12 | | * all copies or substantial portions of the Software. |
13 | | * |
14 | | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
15 | | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
16 | | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE |
17 | | * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
18 | | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING |
19 | | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS |
20 | | * IN THE SOFTWARE. |
21 | | */ |
22 | | #include "quicly/cc.h" |
23 | | #include "quicly.h" |
24 | | |
25 | | /* TODO: Avoid increase if sender was application limited. */ |
26 | | static void reno_on_acked(quicly_cc_t *cc, const quicly_loss_t *loss, uint32_t bytes, uint64_t largest_acked, uint32_t inflight, |
27 | | int cc_limited, uint64_t next_pn, int64_t now, uint32_t max_udp_payload_size) |
28 | 0 | { |
29 | 0 | assert(inflight >= bytes); |
30 | | |
31 | | /* Do not increase congestion window while in recovery (but jumpstart may do something different). */ |
32 | 0 | if (largest_acked < cc->recovery_end) { |
33 | 0 | quicly_cc_jumpstart_on_acked(cc, 1, bytes, largest_acked, inflight, next_pn); |
34 | 0 | return; |
35 | 0 | } |
36 | | |
37 | 0 | quicly_cc_jumpstart_on_acked(cc, 0, bytes, largest_acked, inflight, next_pn); |
38 | | |
39 | | /* Slow start. */ |
40 | 0 | if (cc->cwnd < cc->ssthresh) { |
41 | 0 | if (cc_limited) { |
42 | 0 | cc->cwnd += bytes; |
43 | 0 | if (cc->cwnd_maximum < cc->cwnd) |
44 | 0 | cc->cwnd_maximum = cc->cwnd; |
45 | 0 | } |
46 | 0 | return; |
47 | 0 | } |
48 | | /* Congestion avoidance. */ |
49 | 0 | if (!cc_limited) |
50 | 0 | return; |
51 | 0 | cc->state.reno.stash += bytes; |
52 | 0 | if (cc->state.reno.stash < cc->cwnd) |
53 | 0 | return; |
54 | | /* Increase congestion window by 1 MSS per congestion window acked. */ |
55 | 0 | uint32_t count = cc->state.reno.stash / cc->cwnd; |
56 | 0 | cc->state.reno.stash -= count * cc->cwnd; |
57 | 0 | cc->cwnd += count * max_udp_payload_size; |
58 | 0 | if (cc->cwnd_maximum < cc->cwnd) |
59 | 0 | cc->cwnd_maximum = cc->cwnd; |
60 | 0 | } |
61 | | |
62 | | void quicly_cc_reno_on_lost(quicly_cc_t *cc, const quicly_loss_t *loss, uint32_t bytes, uint64_t lost_pn, uint64_t next_pn, |
63 | | int64_t now, uint32_t max_udp_payload_size) |
64 | 0 | { |
65 | 0 | quicly_cc__update_ecn_episodes(cc, bytes, lost_pn); |
66 | | |
67 | | /* Nothing to do if loss is in recovery window. */ |
68 | 0 | if (lost_pn < cc->recovery_end) |
69 | 0 | return; |
70 | 0 | cc->recovery_end = next_pn; |
71 | | |
72 | | /* if detected loss before receiving all acks for jumpstart, restore original CWND */ |
73 | 0 | if (cc->ssthresh == UINT32_MAX) |
74 | 0 | quicly_cc_jumpstart_on_first_loss(cc, lost_pn); |
75 | |
|
76 | 0 | ++cc->num_loss_episodes; |
77 | 0 | if (cc->cwnd_exiting_slow_start == 0) { |
78 | 0 | cc->cwnd_exiting_slow_start = cc->cwnd; |
79 | 0 | cc->exit_slow_start_at = now; |
80 | 0 | } |
81 | | |
82 | | /* Reduce congestion window. */ |
83 | 0 | cc->cwnd *= cc->ssthresh == UINT32_MAX ? 0.5 : QUICLY_RENO_BETA; /* without HyStart++, we overshoot by 2x in slowstart */ |
84 | 0 | if (cc->cwnd < QUICLY_MIN_CWND * max_udp_payload_size) |
85 | 0 | cc->cwnd = QUICLY_MIN_CWND * max_udp_payload_size; |
86 | 0 | cc->ssthresh = cc->cwnd; |
87 | |
|
88 | 0 | if (cc->cwnd_minimum > cc->cwnd) |
89 | 0 | cc->cwnd_minimum = cc->cwnd; |
90 | 0 | } |
91 | | |
92 | | void quicly_cc_reno_on_persistent_congestion(quicly_cc_t *cc, const quicly_loss_t *loss, int64_t now) |
93 | 0 | { |
94 | | /* TODO */ |
95 | 0 | } |
96 | | |
97 | | void quicly_cc_reno_on_sent(quicly_cc_t *cc, const quicly_loss_t *loss, uint32_t bytes, int64_t now) |
98 | 0 | { |
99 | | /* Unused */ |
100 | 0 | } |
101 | | |
102 | | static void reno_reset(quicly_cc_t *cc, uint32_t initcwnd) |
103 | 0 | { |
104 | 0 | memset(cc, 0, sizeof(quicly_cc_t)); |
105 | 0 | cc->type = &quicly_cc_type_reno; |
106 | 0 | cc->cwnd = cc->cwnd_initial = cc->cwnd_maximum = initcwnd; |
107 | 0 | cc->exit_slow_start_at = INT64_MAX; |
108 | 0 | cc->ssthresh = cc->cwnd_minimum = UINT32_MAX; |
109 | |
|
110 | 0 | quicly_cc_jumpstart_reset(cc); |
111 | 0 | } |
112 | | |
113 | | static int reno_on_switch(quicly_cc_t *cc) |
114 | 0 | { |
115 | 0 | if (cc->type == &quicly_cc_type_reno) { |
116 | 0 | return 1; /* nothing to do */ |
117 | 0 | } else if (cc->type == &quicly_cc_type_pico) { |
118 | 0 | cc->type = &quicly_cc_type_reno; |
119 | 0 | cc->state.reno.stash = cc->state.pico.stash; |
120 | 0 | return 1; |
121 | 0 | } else if (cc->type == &quicly_cc_type_cubic) { |
122 | | /* When in slow start, state can be reused as-is; otherwise, restart. */ |
123 | 0 | if (cc->cwnd_exiting_slow_start == 0) { |
124 | 0 | cc->type = &quicly_cc_type_reno; |
125 | 0 | } else { |
126 | 0 | reno_reset(cc, cc->cwnd_initial); |
127 | 0 | } |
128 | 0 | return 1; |
129 | 0 | } |
130 | | |
131 | 0 | return 0; |
132 | 0 | } |
133 | | |
134 | | static void reno_init(quicly_init_cc_t *self, quicly_cc_t *cc, uint32_t initcwnd, int64_t now) |
135 | 0 | { |
136 | 0 | reno_reset(cc, initcwnd); |
137 | 0 | } |
138 | | |
139 | | quicly_cc_type_t quicly_cc_type_reno = {"reno", |
140 | | &quicly_cc_reno_init, |
141 | | reno_on_acked, |
142 | | quicly_cc_reno_on_lost, |
143 | | quicly_cc_reno_on_persistent_congestion, |
144 | | quicly_cc_reno_on_sent, |
145 | | reno_on_switch, |
146 | | quicly_cc_jumpstart_enter}; |
147 | | quicly_init_cc_t quicly_cc_reno_init = {reno_init}; |
148 | | |
149 | | quicly_cc_type_t *quicly_cc_all_types[] = {&quicly_cc_type_reno, &quicly_cc_type_cubic, &quicly_cc_type_pico, NULL}; |
150 | | |
151 | | uint32_t quicly_cc_calc_initial_cwnd(uint32_t max_packets, uint16_t max_udp_payload_size) |
152 | 0 | { |
153 | 0 | static const uint32_t mtu_max = 1472; |
154 | | |
155 | | /* apply filters to the two arguments */ |
156 | 0 | if (max_packets < QUICLY_MIN_CWND) |
157 | 0 | max_packets = QUICLY_MIN_CWND; |
158 | 0 | if (max_udp_payload_size > mtu_max) |
159 | 0 | max_udp_payload_size = mtu_max; |
160 | |
|
161 | 0 | uint64_t cwnd_bytes = (uint64_t)max_packets * max_udp_payload_size; |
162 | 0 | return cwnd_bytes <= UINT32_MAX ? (uint32_t)cwnd_bytes : UINT32_MAX; |
163 | 0 | } |