/src/openthread/third_party/tcplp/bsdtcp/tcp_sack.c
Line | Count | Source |
1 | | /*- |
2 | | * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1994, 1995 |
3 | | * The Regents of the University of California. |
4 | | * All rights reserved. |
5 | | * |
6 | | * Redistribution and use in source and binary forms, with or without |
7 | | * modification, are permitted provided that the following conditions |
8 | | * are met: |
9 | | * 1. Redistributions of source code must retain the above copyright |
10 | | * notice, this list of conditions and the following disclaimer. |
11 | | * 2. Redistributions in binary form must reproduce the above copyright |
12 | | * notice, this list of conditions and the following disclaimer in the |
13 | | * documentation and/or other materials provided with the distribution. |
14 | | * 4. Neither the name of the University nor the names of its contributors |
15 | | * may be used to endorse or promote products derived from this software |
16 | | * without specific prior written permission. |
17 | | * |
18 | | * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND |
19 | | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
20 | | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
21 | | * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE |
22 | | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL |
23 | | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS |
24 | | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) |
25 | | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT |
26 | | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY |
27 | | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF |
28 | | * SUCH DAMAGE. |
29 | | * |
30 | | * @(#)tcp_sack.c 8.12 (Berkeley) 5/24/95 |
31 | | */ |
32 | | |
33 | | /*- |
34 | | * @@(#)COPYRIGHT 1.1 (NRL) 17 January 1995 |
35 | | * |
36 | | * NRL grants permission for redistribution and use in source and binary |
37 | | * forms, with or without modification, of the software and documentation |
38 | | * created at NRL provided that the following conditions are met: |
39 | | * |
40 | | * 1. Redistributions of source code must retain the above copyright |
41 | | * notice, this list of conditions and the following disclaimer. |
42 | | * 2. Redistributions in binary form must reproduce the above copyright |
43 | | * notice, this list of conditions and the following disclaimer in the |
44 | | * documentation and/or other materials provided with the distribution. |
45 | | * 3. All advertising materials mentioning features or use of this software |
46 | | * must display the following acknowledgements: |
47 | | * This product includes software developed by the University of |
48 | | * California, Berkeley and its contributors. |
49 | | * This product includes software developed at the Information |
50 | | * Technology Division, US Naval Research Laboratory. |
51 | | * 4. Neither the name of the NRL nor the names of its contributors |
52 | | * may be used to endorse or promote products derived from this software |
53 | | * without specific prior written permission. |
54 | | * |
55 | | * THE SOFTWARE PROVIDED BY NRL IS PROVIDED BY NRL AND CONTRIBUTORS ``AS |
56 | | * IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED |
57 | | * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A |
58 | | * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NRL OR |
59 | | * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, |
60 | | * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, |
61 | | * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR |
62 | | * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF |
63 | | * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING |
64 | | * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS |
65 | | * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
66 | | * |
67 | | * The views and conclusions contained in the software and documentation |
68 | | * are those of the authors and should not be interpreted as representing |
69 | | * official policies, either expressed or implied, of the US Naval |
70 | | * Research Laboratory (NRL). |
71 | | */ |
72 | | |
73 | | /* samkumar: Removed a bunch of #include's and VNET declarations. */ |
74 | | |
75 | | #include <strings.h> |
76 | | #include "tcp.h" |
77 | | #include "tcp_fsm.h" |
78 | | #include "tcp_seq.h" |
79 | | #include "tcp_timer.h" |
80 | | #include "tcp_var.h" |
81 | | #include "sys/queue.h" |
82 | | |
83 | | enum tcp_sack_consts { |
84 | | V_tcp_sack_maxholes = MAX_SACKHOLES |
85 | | }; |
86 | | |
87 | | /* |
88 | | * samkumar: Removed tcp_sack_globalmaxholes and tcp_sack_globalholes. |
89 | | * There used to be a counter, V_tcp_sack_globalholes, that kept track of the |
90 | | * total number of SACK holes allocated across all TCP connections. |
91 | | */ |
92 | | |
93 | | /* |
94 | | * samkumar: I added these three functions. The first, tcp_sack_init, |
95 | | * initializes a per-connection pool of SACK holes. |
96 | | * |
97 | | * The next two, sackhole_alloc and sackhole_free, allocate and deallocate SACK |
98 | | * holes from the pool. Previously, the FreeBSD code would allocate SACK holes |
99 | | * dynamically, for example, using the code |
100 | | * "hole = (struct sackhole *)uma_zalloc(V_sack_hole_zone, M_NOWAIT);". |
101 | | * TCPlp avoids dynamic memory allocation in the TCP implementation, so we |
102 | | * replace it with this per-connection pool. |
103 | | */ |
104 | | |
105 | | void |
106 | | tcp_sack_init(struct tcpcb* tp) |
107 | 1 | { |
108 | 1 | bmp_init(tp->sackhole_bmp, SACKHOLE_BMP_SIZE); |
109 | 1 | } |
110 | | |
111 | 0 | struct sackhole* sackhole_alloc(struct tcpcb* tp) { |
112 | 0 | size_t freeindex = bmp_countset(tp->sackhole_bmp, SACKHOLE_BMP_SIZE, 0, SACKHOLE_BMP_SIZE); |
113 | 0 | if (freeindex >= SACKHOLE_BMP_SIZE) { |
114 | 0 | return NULL; // all sackholes are allocated already! |
115 | 0 | } |
116 | 0 | bmp_setrange(tp->sackhole_bmp, freeindex, 1); |
117 | 0 | return &tp->sackhole_pool[freeindex]; |
118 | 0 | } |
119 | | |
120 | 0 | void sackhole_free(struct tcpcb* tp, struct sackhole* tofree) { |
121 | 0 | size_t freeindex = (size_t) (tofree - &tp->sackhole_pool[0]); |
122 | 0 | KASSERT(tofree == &tp->sackhole_pool[freeindex], ("sackhole pool unaligned")); |
123 | 0 | bmp_clrrange(tp->sackhole_bmp, freeindex, 1); |
124 | 0 | } |
125 | | |
126 | | /* |
127 | | * samkumar: Throughout the remaining functions, I have replaced allocation and |
128 | | * deallocation of SACK holes, which previously used uma_zalloc and uma_zfree, |
129 | | * with calls to sackhole_alloc and sackhole_free. I've also removed code for |
130 | | * locking, global stats collection, global SACK hole limits, and debugging |
131 | | * probes. |
132 | | */ |
133 | | |
134 | | |
135 | | /* |
136 | | * This function is called upon receipt of new valid data (while not in |
137 | | * header prediction mode), and it updates the ordered list of sacks. |
138 | | */ |
139 | | void |
140 | | tcp_update_sack_list(struct tcpcb *tp, tcp_seq rcv_start, tcp_seq rcv_end) |
141 | 0 | { |
142 | | /* |
143 | | * First reported block MUST be the most recent one. Subsequent |
144 | | * blocks SHOULD be in the order in which they arrived at the |
145 | | * receiver. These two conditions make the implementation fully |
146 | | * compliant with RFC 2018. |
147 | | */ |
148 | 0 | struct sackblk head_blk, saved_blks[MAX_SACK_BLKS]; |
149 | 0 | int num_head, num_saved, i; |
150 | | |
151 | | /* Check arguments. */ |
152 | 0 | KASSERT(SEQ_LT(rcv_start, rcv_end), ("rcv_start < rcv_end")); |
153 | | |
154 | | /* SACK block for the received segment. */ |
155 | 0 | head_blk.start = rcv_start; |
156 | 0 | head_blk.end = rcv_end; |
157 | | |
158 | | /* |
159 | | * Merge updated SACK blocks into head_blk, and save unchanged SACK |
160 | | * blocks into saved_blks[]. num_saved will have the number of the |
161 | | * saved SACK blocks. |
162 | | */ |
163 | 0 | num_saved = 0; |
164 | 0 | for (i = 0; i < tp->rcv_numsacks; i++) { |
165 | 0 | tcp_seq start = tp->sackblks[i].start; |
166 | 0 | tcp_seq end = tp->sackblks[i].end; |
167 | 0 | if (SEQ_GEQ(start, end) || SEQ_LEQ(start, tp->rcv_nxt)) { |
168 | | /* |
169 | | * Discard this SACK block. |
170 | | */ |
171 | 0 | } else if (SEQ_LEQ(head_blk.start, end) && |
172 | 0 | SEQ_GEQ(head_blk.end, start)) { |
173 | | /* |
174 | | * Merge this SACK block into head_blk. This SACK |
175 | | * block itself will be discarded. |
176 | | */ |
177 | 0 | if (SEQ_GT(head_blk.start, start)) |
178 | 0 | head_blk.start = start; |
179 | 0 | if (SEQ_LT(head_blk.end, end)) |
180 | 0 | head_blk.end = end; |
181 | 0 | } else { |
182 | | /* |
183 | | * Save this SACK block. |
184 | | */ |
185 | 0 | saved_blks[num_saved].start = start; |
186 | 0 | saved_blks[num_saved].end = end; |
187 | 0 | num_saved++; |
188 | 0 | } |
189 | 0 | } |
190 | | |
191 | | /* |
192 | | * Update SACK list in tp->sackblks[]. |
193 | | */ |
194 | 0 | num_head = 0; |
195 | 0 | if (SEQ_GT(head_blk.start, tp->rcv_nxt)) { |
196 | | /* |
197 | | * The received data segment is an out-of-order segment. Put |
198 | | * head_blk at the top of SACK list. |
199 | | */ |
200 | 0 | tp->sackblks[0] = head_blk; |
201 | 0 | num_head = 1; |
202 | | /* |
203 | | * If the number of saved SACK blocks exceeds its limit, |
204 | | * discard the last SACK block. |
205 | | */ |
206 | 0 | if (num_saved >= MAX_SACK_BLKS) |
207 | 0 | num_saved--; |
208 | 0 | } |
209 | 0 | if (num_saved > 0) { |
210 | | /* |
211 | | * Copy the saved SACK blocks back. |
212 | | */ |
213 | 0 | bcopy(saved_blks, &tp->sackblks[num_head], |
214 | 0 | sizeof(struct sackblk) * num_saved); |
215 | 0 | } |
216 | | |
217 | | /* Save the number of SACK blocks. */ |
218 | 0 | tp->rcv_numsacks = num_head + num_saved; |
219 | 0 | } |
220 | | |
221 | | /* |
222 | | * Delete all receiver-side SACK information. |
223 | | */ |
224 | | void |
225 | | tcp_clean_sackreport(struct tcpcb *tp) |
226 | 0 | { |
227 | 0 | int i; |
228 | |
|
229 | 0 | tp->rcv_numsacks = 0; |
230 | 0 | for (i = 0; i < MAX_SACK_BLKS; i++) |
231 | 0 | tp->sackblks[i].start = tp->sackblks[i].end=0; |
232 | 0 | } |
233 | | |
234 | | /* |
235 | | * Allocate struct sackhole. |
236 | | */ |
237 | | static struct sackhole * |
238 | | tcp_sackhole_alloc(struct tcpcb *tp, tcp_seq start, tcp_seq end) |
239 | 0 | { |
240 | 0 | struct sackhole *hole; |
241 | | |
242 | | /* |
243 | | * samkumar: This if block also used to also return NULL if |
244 | | * V_tcp_sack_globalholes >= V_tcp_sack_globalmaxholes |
245 | | * but I removed that check since it doesn't make sense to enforce a global |
246 | | * limit on SACK holes when we have a fixed-size pool (moreover, a separate |
247 | | * pool per connection). The per-connection limit is sufficient. |
248 | | */ |
249 | 0 | if (tp->snd_numholes >= V_tcp_sack_maxholes) { |
250 | 0 | return NULL; |
251 | 0 | } |
252 | | |
253 | 0 | hole = sackhole_alloc(tp); |
254 | 0 | if (hole == NULL) |
255 | 0 | return NULL; |
256 | | |
257 | 0 | hole->start = start; |
258 | 0 | hole->end = end; |
259 | 0 | hole->rxmit = start; |
260 | |
|
261 | 0 | tp->snd_numholes++; |
262 | |
|
263 | 0 | return hole; |
264 | 0 | } |
265 | | |
266 | | /* |
267 | | * Free struct sackhole. |
268 | | */ |
269 | | static void |
270 | | tcp_sackhole_free(struct tcpcb *tp, struct sackhole *hole) |
271 | 0 | { |
272 | 0 | sackhole_free(tp, hole); |
273 | |
|
274 | 0 | tp->snd_numholes--; |
275 | |
|
276 | 0 | KASSERT(tp->snd_numholes >= 0, ("tp->snd_numholes >= 0")); |
277 | 0 | } |
278 | | |
279 | | /* |
280 | | * Insert new SACK hole into scoreboard. |
281 | | */ |
282 | | static struct sackhole * |
283 | | tcp_sackhole_insert(struct tcpcb *tp, tcp_seq start, tcp_seq end, |
284 | | struct sackhole *after) |
285 | 0 | { |
286 | 0 | struct sackhole *hole; |
287 | | |
288 | | /* Allocate a new SACK hole. */ |
289 | 0 | hole = tcp_sackhole_alloc(tp, start, end); |
290 | 0 | if (hole == NULL) |
291 | 0 | return NULL; |
292 | | |
293 | | /* Insert the new SACK hole into scoreboard. */ |
294 | 0 | if (after != NULL) |
295 | 0 | TAILQ_INSERT_AFTER(&tp->snd_holes, after, hole, scblink); |
296 | 0 | else |
297 | 0 | TAILQ_INSERT_TAIL(&tp->snd_holes, hole, scblink); |
298 | | |
299 | | /* Update SACK hint. */ |
300 | 0 | if (tp->sackhint.nexthole == NULL) |
301 | 0 | tp->sackhint.nexthole = hole; |
302 | |
|
303 | 0 | return hole; |
304 | 0 | } |
305 | | |
306 | | /* |
307 | | * Remove SACK hole from scoreboard. |
308 | | */ |
309 | | static void |
310 | | tcp_sackhole_remove(struct tcpcb *tp, struct sackhole *hole) |
311 | 0 | { |
312 | | |
313 | | /* Update SACK hint. */ |
314 | 0 | if (tp->sackhint.nexthole == hole) |
315 | 0 | tp->sackhint.nexthole = TAILQ_NEXT(hole, scblink); |
316 | | |
317 | | /* Remove this SACK hole. */ |
318 | 0 | TAILQ_REMOVE(&tp->snd_holes, hole, scblink); |
319 | | |
320 | | /* Free this SACK hole. */ |
321 | 0 | tcp_sackhole_free(tp, hole); |
322 | 0 | } |
323 | | |
324 | | /* |
325 | | * Process cumulative ACK and the TCP SACK option to update the scoreboard. |
326 | | * tp->snd_holes is an ordered list of holes (oldest to newest, in terms of |
327 | | * the sequence space). |
328 | | */ |
329 | | void |
330 | | tcp_sack_doack(struct tcpcb *tp, struct tcpopt *to, tcp_seq th_ack) |
331 | 0 | { |
332 | 0 | struct sackhole *cur, *temp; |
333 | 0 | struct sackblk sack, sack_blocks[TCP_MAX_SACK + 1], *sblkp; |
334 | 0 | int i, j, num_sack_blks; |
335 | |
|
336 | 0 | num_sack_blks = 0; |
337 | | /* |
338 | | * If SND.UNA will be advanced by SEG.ACK, and if SACK holes exist, |
339 | | * treat [SND.UNA, SEG.ACK) as if it is a SACK block. |
340 | | */ |
341 | 0 | if (SEQ_LT(tp->snd_una, th_ack) && !TAILQ_EMPTY(&tp->snd_holes)) { |
342 | 0 | sack_blocks[num_sack_blks].start = tp->snd_una; |
343 | 0 | sack_blocks[num_sack_blks++].end = th_ack; |
344 | 0 | } |
345 | | /* |
346 | | * Append received valid SACK blocks to sack_blocks[], but only if we |
347 | | * received new blocks from the other side. |
348 | | */ |
349 | 0 | if (to->to_flags & TOF_SACK) { |
350 | 0 | for (i = 0; i < to->to_nsacks; i++) { |
351 | 0 | bcopy((to->to_sacks + i * TCPOLEN_SACK), |
352 | 0 | &sack, sizeof(sack)); |
353 | 0 | sack.start = ntohl(sack.start); |
354 | 0 | sack.end = ntohl(sack.end); |
355 | 0 | if (SEQ_GT(sack.end, sack.start) && |
356 | 0 | SEQ_GT(sack.start, tp->snd_una) && |
357 | 0 | SEQ_GT(sack.start, th_ack) && |
358 | 0 | SEQ_LT(sack.start, tp->snd_max) && |
359 | 0 | SEQ_GT(sack.end, tp->snd_una) && |
360 | 0 | SEQ_LEQ(sack.end, tp->snd_max)) |
361 | 0 | sack_blocks[num_sack_blks++] = sack; |
362 | 0 | } |
363 | 0 | } |
364 | | /* |
365 | | * Return if SND.UNA is not advanced and no valid SACK block is |
366 | | * received. |
367 | | */ |
368 | 0 | if (num_sack_blks == 0) |
369 | 0 | return; |
370 | | |
371 | | /* |
372 | | * Sort the SACK blocks so we can update the scoreboard with just one |
373 | | * pass. The overhead of sorting upto 4+1 elements is less than |
374 | | * making upto 4+1 passes over the scoreboard. |
375 | | */ |
376 | 0 | for (i = 0; i < num_sack_blks; i++) { |
377 | 0 | for (j = i + 1; j < num_sack_blks; j++) { |
378 | 0 | if (SEQ_GT(sack_blocks[i].end, sack_blocks[j].end)) { |
379 | 0 | sack = sack_blocks[i]; |
380 | 0 | sack_blocks[i] = sack_blocks[j]; |
381 | 0 | sack_blocks[j] = sack; |
382 | 0 | } |
383 | 0 | } |
384 | 0 | } |
385 | 0 | if (TAILQ_EMPTY(&tp->snd_holes)) |
386 | | /* |
387 | | * Empty scoreboard. Need to initialize snd_fack (it may be |
388 | | * uninitialized or have a bogus value). Scoreboard holes |
389 | | * (from the sack blocks received) are created later below |
390 | | * (in the logic that adds holes to the tail of the |
391 | | * scoreboard). |
392 | | */ |
393 | 0 | tp->snd_fack = SEQ_MAX(tp->snd_una, th_ack); |
394 | | /* |
395 | | * In the while-loop below, incoming SACK blocks (sack_blocks[]) and |
396 | | * SACK holes (snd_holes) are traversed from their tails with just |
397 | | * one pass in order to reduce the number of compares especially when |
398 | | * the bandwidth-delay product is large. |
399 | | * |
400 | | * Note: Typically, in the first RTT of SACK recovery, the highest |
401 | | * three or four SACK blocks with the same ack number are received. |
402 | | * In the second RTT, if retransmitted data segments are not lost, |
403 | | * the highest three or four SACK blocks with ack number advancing |
404 | | * are received. |
405 | | */ |
406 | 0 | sblkp = &sack_blocks[num_sack_blks - 1]; /* Last SACK block */ |
407 | 0 | tp->sackhint.last_sack_ack = sblkp->end; |
408 | 0 | if (SEQ_LT(tp->snd_fack, sblkp->start)) { |
409 | | /* |
410 | | * The highest SACK block is beyond fack. Append new SACK |
411 | | * hole at the tail. If the second or later highest SACK |
412 | | * blocks are also beyond the current fack, they will be |
413 | | * inserted by way of hole splitting in the while-loop below. |
414 | | */ |
415 | 0 | temp = tcp_sackhole_insert(tp, tp->snd_fack,sblkp->start,NULL); |
416 | 0 | if (temp != NULL) { |
417 | 0 | tp->snd_fack = sblkp->end; |
418 | | /* Go to the previous sack block. */ |
419 | 0 | sblkp--; |
420 | 0 | } else { |
421 | | /* |
422 | | * We failed to add a new hole based on the current |
423 | | * sack block. Skip over all the sack blocks that |
424 | | * fall completely to the right of snd_fack and |
425 | | * proceed to trim the scoreboard based on the |
426 | | * remaining sack blocks. This also trims the |
427 | | * scoreboard for th_ack (which is sack_blocks[0]). |
428 | | */ |
429 | 0 | while (sblkp >= sack_blocks && |
430 | 0 | SEQ_LT(tp->snd_fack, sblkp->start)) |
431 | 0 | sblkp--; |
432 | 0 | if (sblkp >= sack_blocks && |
433 | 0 | SEQ_LT(tp->snd_fack, sblkp->end)) |
434 | 0 | tp->snd_fack = sblkp->end; |
435 | 0 | } |
436 | 0 | } else if (SEQ_LT(tp->snd_fack, sblkp->end)) |
437 | | /* fack is advanced. */ |
438 | 0 | tp->snd_fack = sblkp->end; |
439 | | /* We must have at least one SACK hole in scoreboard. */ |
440 | 0 | KASSERT(!TAILQ_EMPTY(&tp->snd_holes), |
441 | 0 | ("SACK scoreboard must not be empty")); |
442 | 0 | cur = TAILQ_LAST(&tp->snd_holes, sackhole_head); /* Last SACK hole. */ |
443 | | /* |
444 | | * Since the incoming sack blocks are sorted, we can process them |
445 | | * making one sweep of the scoreboard. |
446 | | */ |
447 | 0 | while (sblkp >= sack_blocks && cur != NULL) { |
448 | 0 | if (SEQ_GEQ(sblkp->start, cur->end)) { |
449 | | /* |
450 | | * SACKs data beyond the current hole. Go to the |
451 | | * previous sack block. |
452 | | */ |
453 | 0 | sblkp--; |
454 | 0 | continue; |
455 | 0 | } |
456 | 0 | if (SEQ_LEQ(sblkp->end, cur->start)) { |
457 | | /* |
458 | | * SACKs data before the current hole. Go to the |
459 | | * previous hole. |
460 | | */ |
461 | 0 | cur = TAILQ_PREV(cur, sackhole_head, scblink); |
462 | 0 | continue; |
463 | 0 | } |
464 | 0 | tp->sackhint.sack_bytes_rexmit -= (cur->rxmit - cur->start); |
465 | 0 | KASSERT(tp->sackhint.sack_bytes_rexmit >= 0, |
466 | 0 | ("sackhint bytes rtx >= 0")); |
467 | 0 | if (SEQ_LEQ(sblkp->start, cur->start)) { |
468 | | /* Data acks at least the beginning of hole. */ |
469 | 0 | if (SEQ_GEQ(sblkp->end, cur->end)) { |
470 | | /* Acks entire hole, so delete hole. */ |
471 | 0 | temp = cur; |
472 | 0 | cur = TAILQ_PREV(cur, sackhole_head, scblink); |
473 | 0 | tcp_sackhole_remove(tp, temp); |
474 | | /* |
475 | | * The sack block may ack all or part of the |
476 | | * next hole too, so continue onto the next |
477 | | * hole. |
478 | | */ |
479 | 0 | continue; |
480 | 0 | } else { |
481 | | /* Move start of hole forward. */ |
482 | 0 | cur->start = sblkp->end; |
483 | 0 | cur->rxmit = SEQ_MAX(cur->rxmit, cur->start); |
484 | 0 | } |
485 | 0 | } else { |
486 | | /* Data acks at least the end of hole. */ |
487 | 0 | if (SEQ_GEQ(sblkp->end, cur->end)) { |
488 | | /* Move end of hole backward. */ |
489 | 0 | cur->end = sblkp->start; |
490 | 0 | cur->rxmit = SEQ_MIN(cur->rxmit, cur->end); |
491 | 0 | } else { |
492 | | /* |
493 | | * ACKs some data in middle of a hole; need |
494 | | * to split current hole |
495 | | */ |
496 | 0 | temp = tcp_sackhole_insert(tp, sblkp->end, |
497 | 0 | cur->end, cur); |
498 | 0 | if (temp != NULL) { |
499 | 0 | if (SEQ_GT(cur->rxmit, temp->rxmit)) { |
500 | 0 | temp->rxmit = cur->rxmit; |
501 | 0 | tp->sackhint.sack_bytes_rexmit |
502 | 0 | += (temp->rxmit |
503 | 0 | - temp->start); |
504 | 0 | } |
505 | 0 | cur->end = sblkp->start; |
506 | 0 | cur->rxmit = SEQ_MIN(cur->rxmit, |
507 | 0 | cur->end); |
508 | 0 | } |
509 | 0 | } |
510 | 0 | } |
511 | 0 | tp->sackhint.sack_bytes_rexmit += (cur->rxmit - cur->start); |
512 | | /* |
513 | | * Testing sblkp->start against cur->start tells us whether |
514 | | * we're done with the sack block or the sack hole. |
515 | | * Accordingly, we advance one or the other. |
516 | | */ |
517 | 0 | if (SEQ_LEQ(sblkp->start, cur->start)) |
518 | 0 | cur = TAILQ_PREV(cur, sackhole_head, scblink); |
519 | 0 | else |
520 | 0 | sblkp--; |
521 | 0 | } |
522 | 0 | } |
523 | | |
524 | | /* |
525 | | * Free all SACK holes to clear the scoreboard. |
526 | | */ |
527 | | void |
528 | | tcp_free_sackholes(struct tcpcb *tp) |
529 | 0 | { |
530 | 0 | struct sackhole *q; |
531 | |
|
532 | 0 | while ((q = TAILQ_FIRST(&tp->snd_holes)) != NULL) |
533 | 0 | tcp_sackhole_remove(tp, q); |
534 | 0 | tp->sackhint.sack_bytes_rexmit = 0; |
535 | |
|
536 | 0 | KASSERT(tp->snd_numholes == 0, ("tp->snd_numholes == 0")); |
537 | 0 | KASSERT(tp->sackhint.nexthole == NULL, |
538 | 0 | ("tp->sackhint.nexthole == NULL")); |
539 | 0 | } |
540 | | |
541 | | /* |
542 | | * Partial ack handling within a sack recovery episode. Keeping this very |
543 | | * simple for now. When a partial ack is received, force snd_cwnd to a value |
544 | | * that will allow the sender to transmit no more than 2 segments. If |
545 | | * necessary, a better scheme can be adopted at a later point, but for now, |
546 | | * the goal is to prevent the sender from bursting a large amount of data in |
547 | | * the midst of sack recovery. |
548 | | */ |
549 | | void |
550 | | tcp_sack_partialack(struct tcpcb *tp, struct tcphdr *th) |
551 | 0 | { |
552 | 0 | int num_segs = 1; |
553 | |
|
554 | 0 | tcp_timer_activate(tp, TT_REXMT, 0); |
555 | 0 | tp->t_rtttime = 0; |
556 | | /* Send one or 2 segments based on how much new data was acked. */ |
557 | 0 | if ((BYTES_THIS_ACK(tp, th) / tp->t_maxseg) >= 2) |
558 | 0 | num_segs = 2; |
559 | 0 | tp->snd_cwnd = (tp->sackhint.sack_bytes_rexmit + |
560 | 0 | (tp->snd_nxt - tp->sack_newdata) + num_segs * tp->t_maxseg); |
561 | 0 | if (tp->snd_cwnd > tp->snd_ssthresh) |
562 | 0 | tp->snd_cwnd = tp->snd_ssthresh; |
563 | 0 | tp->t_flags |= TF_ACKNOW; |
564 | 0 | (void) tcplp_output(tp); |
565 | 0 | } |
566 | | |
567 | | /* |
568 | | * samkumar: Removed this function for now, but I left it in as a comment |
569 | | * (using #if 0) in case it is useful later for debugging. |
570 | | */ |
571 | | #if 0 |
572 | | /* |
573 | | * Debug version of tcp_sack_output() that walks the scoreboard. Used for |
574 | | * now to sanity check the hint. |
575 | | */ |
576 | | static struct sackhole * |
577 | | tcp_sack_output_debug(struct tcpcb *tp, int *sack_bytes_rexmt) |
578 | | { |
579 | | struct sackhole *p; |
580 | | |
581 | | INP_WLOCK_ASSERT(tp->t_inpcb); |
582 | | *sack_bytes_rexmt = 0; |
583 | | TAILQ_FOREACH(p, &tp->snd_holes, scblink) { |
584 | | if (SEQ_LT(p->rxmit, p->end)) { |
585 | | if (SEQ_LT(p->rxmit, tp->snd_una)) {/* old SACK hole */ |
586 | | continue; |
587 | | } |
588 | | *sack_bytes_rexmt += (p->rxmit - p->start); |
589 | | break; |
590 | | } |
591 | | *sack_bytes_rexmt += (p->rxmit - p->start); |
592 | | } |
593 | | return (p); |
594 | | } |
595 | | #endif |
596 | | |
597 | | /* |
598 | | * Returns the next hole to retransmit and the number of retransmitted bytes |
599 | | * from the scoreboard. We store both the next hole and the number of |
600 | | * retransmitted bytes as hints (and recompute these on the fly upon SACK/ACK |
601 | | * reception). This avoids scoreboard traversals completely. |
602 | | * |
603 | | * The loop here will traverse *at most* one link. Here's the argument. For |
604 | | * the loop to traverse more than 1 link before finding the next hole to |
605 | | * retransmit, we would need to have at least 1 node following the current |
606 | | * hint with (rxmit == end). But, for all holes following the current hint, |
607 | | * (start == rxmit), since we have not yet retransmitted from them. |
608 | | * Therefore, in order to traverse more 1 link in the loop below, we need to |
609 | | * have at least one node following the current hint with (start == rxmit == |
610 | | * end). But that can't happen, (start == end) means that all the data in |
611 | | * that hole has been sacked, in which case, the hole would have been removed |
612 | | * from the scoreboard. |
613 | | */ |
614 | | struct sackhole * |
615 | | tcp_sack_output(struct tcpcb *tp, int *sack_bytes_rexmt) |
616 | 0 | { |
617 | 0 | struct sackhole *hole = NULL; |
618 | |
|
619 | 0 | *sack_bytes_rexmt = tp->sackhint.sack_bytes_rexmit; |
620 | 0 | hole = tp->sackhint.nexthole; |
621 | 0 | if (hole == NULL || SEQ_LT(hole->rxmit, hole->end)) |
622 | 0 | goto out; |
623 | 0 | while ((hole = TAILQ_NEXT(hole, scblink)) != NULL) { |
624 | 0 | if (SEQ_LT(hole->rxmit, hole->end)) { |
625 | 0 | tp->sackhint.nexthole = hole; |
626 | 0 | break; |
627 | 0 | } |
628 | 0 | } |
629 | 0 | out: |
630 | 0 | return (hole); |
631 | 0 | } |
632 | | |
633 | | /* |
634 | | * After a timeout, the SACK list may be rebuilt. This SACK information |
635 | | * should be used to avoid retransmitting SACKed data. This function |
636 | | * traverses the SACK list to see if snd_nxt should be moved forward. |
637 | | */ |
638 | | void |
639 | | tcp_sack_adjust(struct tcpcb *tp) |
640 | 0 | { |
641 | 0 | struct sackhole *p, *cur = TAILQ_FIRST(&tp->snd_holes); |
642 | |
|
643 | 0 | if (cur == NULL) |
644 | 0 | return; /* No holes */ |
645 | 0 | if (SEQ_GEQ(tp->snd_nxt, tp->snd_fack)) |
646 | 0 | return; /* We're already beyond any SACKed blocks */ |
647 | | /*- |
648 | | * Two cases for which we want to advance snd_nxt: |
649 | | * i) snd_nxt lies between end of one hole and beginning of another |
650 | | * ii) snd_nxt lies between end of last hole and snd_fack |
651 | | */ |
652 | 0 | while ((p = TAILQ_NEXT(cur, scblink)) != NULL) { |
653 | 0 | if (SEQ_LT(tp->snd_nxt, cur->end)) |
654 | 0 | return; |
655 | 0 | if (SEQ_GEQ(tp->snd_nxt, p->start)) |
656 | 0 | cur = p; |
657 | 0 | else { |
658 | 0 | tp->snd_nxt = p->start; |
659 | 0 | return; |
660 | 0 | } |
661 | 0 | } |
662 | 0 | if (SEQ_LT(tp->snd_nxt, cur->end)) |
663 | 0 | return; |
664 | 0 | tp->snd_nxt = tp->snd_fack; |
665 | 0 | } |