/src/usrsctp/usrsctplib/netinet/sctp_indata.c
Line | Count | Source |
1 | | /*- |
2 | | * SPDX-License-Identifier: BSD-3-Clause |
3 | | * |
4 | | * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved. |
5 | | * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved. |
6 | | * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved. |
7 | | * |
8 | | * Redistribution and use in source and binary forms, with or without |
9 | | * modification, are permitted provided that the following conditions are met: |
10 | | * |
11 | | * a) Redistributions of source code must retain the above copyright notice, |
12 | | * this list of conditions and the following disclaimer. |
13 | | * |
14 | | * b) Redistributions in binary form must reproduce the above copyright |
15 | | * notice, this list of conditions and the following disclaimer in |
16 | | * the documentation and/or other materials provided with the distribution. |
17 | | * |
18 | | * c) Neither the name of Cisco Systems, Inc. nor the names of its |
19 | | * contributors may be used to endorse or promote products derived |
20 | | * from this software without specific prior written permission. |
21 | | * |
22 | | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
23 | | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, |
24 | | * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
25 | | * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE |
26 | | * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR |
27 | | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF |
28 | | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS |
29 | | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN |
30 | | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) |
31 | | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF |
32 | | * THE POSSIBILITY OF SUCH DAMAGE. |
33 | | */ |
34 | | |
35 | | #include <netinet/sctp_os.h> |
36 | | #if defined(__FreeBSD__) && !defined(__Userspace__) |
37 | | #include <sys/proc.h> |
38 | | #endif |
39 | | #include <netinet/sctp_var.h> |
40 | | #include <netinet/sctp_sysctl.h> |
41 | | #include <netinet/sctp_header.h> |
42 | | #include <netinet/sctp_pcb.h> |
43 | | #include <netinet/sctputil.h> |
44 | | #include <netinet/sctp_output.h> |
45 | | #include <netinet/sctp_uio.h> |
46 | | #include <netinet/sctp_auth.h> |
47 | | #include <netinet/sctp_timer.h> |
48 | | #include <netinet/sctp_asconf.h> |
49 | | #include <netinet/sctp_indata.h> |
50 | | #include <netinet/sctp_bsd_addr.h> |
51 | | #include <netinet/sctp_input.h> |
52 | | #include <netinet/sctp_crc32.h> |
53 | | #if defined(__FreeBSD__) && !defined(__Userspace__) |
54 | | #include <netinet/sctp_lock_bsd.h> |
55 | | #endif |
56 | | /* |
57 | | * NOTES: On the outbound side of things I need to check the sack timer to |
58 | | * see if I should generate a sack into the chunk queue (if I have data to |
59 | | * send that is and will be sending it .. for bundling. |
60 | | * |
61 | | * The callback in sctp_usrreq.c will get called when the socket is read from. |
62 | | * This will cause sctp_service_queues() to get called on the top entry in |
63 | | * the list. |
64 | | */ |
65 | | static uint32_t |
66 | | sctp_add_chk_to_control(struct sctp_queued_to_read *control, |
67 | | struct sctp_stream_in *strm, |
68 | | struct sctp_tcb *stcb, |
69 | | struct sctp_association *asoc, |
70 | | struct sctp_tmit_chunk *chk, int hold_rlock); |
71 | | |
72 | | void |
73 | | sctp_set_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc) |
74 | 14.8k | { |
75 | 14.8k | asoc->my_rwnd = sctp_calc_rwnd(stcb, asoc); |
76 | 14.8k | } |
77 | | |
78 | | /* Calculate what the rwnd would be */ |
79 | | uint32_t |
80 | | sctp_calc_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc) |
81 | 23.4k | { |
82 | 23.4k | uint32_t calc = 0; |
83 | | |
84 | | /* |
85 | | * This is really set wrong with respect to a 1-2-m socket. Since |
86 | | * the sb_cc is the count that everyone as put up. When we re-write |
87 | | * sctp_soreceive then we will fix this so that ONLY this |
88 | | * associations data is taken into account. |
89 | | */ |
90 | 23.4k | if (stcb->sctp_socket == NULL) { |
91 | 0 | return (calc); |
92 | 0 | } |
93 | | |
94 | 23.4k | KASSERT(asoc->cnt_on_reasm_queue > 0 || asoc->size_on_reasm_queue == 0, |
95 | 23.4k | ("size_on_reasm_queue is %u", asoc->size_on_reasm_queue)); |
96 | 23.4k | KASSERT(asoc->cnt_on_all_streams > 0 || asoc->size_on_all_streams == 0, |
97 | 23.4k | ("size_on_all_streams is %u", asoc->size_on_all_streams)); |
98 | 23.4k | if (stcb->asoc.sb_cc == 0 && |
99 | 8.31k | asoc->cnt_on_reasm_queue == 0 && |
100 | 7.98k | asoc->cnt_on_all_streams == 0) { |
101 | | /* Full rwnd granted */ |
102 | 7.89k | calc = max(SCTP_SB_LIMIT_RCV(stcb->sctp_socket), SCTP_MINIMAL_RWND); |
103 | 7.89k | return (calc); |
104 | 7.89k | } |
105 | | /* get actual space */ |
106 | 15.5k | calc = (uint32_t) sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv); |
107 | | /* |
108 | | * take out what has NOT been put on socket queue and we yet hold |
109 | | * for putting up. |
110 | | */ |
111 | 15.5k | calc = sctp_sbspace_sub(calc, (uint32_t)(asoc->size_on_reasm_queue + |
112 | 15.5k | asoc->cnt_on_reasm_queue * MSIZE)); |
113 | 15.5k | calc = sctp_sbspace_sub(calc, (uint32_t)(asoc->size_on_all_streams + |
114 | 15.5k | asoc->cnt_on_all_streams * MSIZE)); |
115 | 15.5k | if (calc == 0) { |
116 | | /* out of space */ |
117 | 1.18k | return (calc); |
118 | 1.18k | } |
119 | | |
120 | | /* what is the overhead of all these rwnd's */ |
121 | 14.3k | calc = sctp_sbspace_sub(calc, stcb->asoc.my_rwnd_control_len); |
122 | | /* If the window gets too small due to ctrl-stuff, reduce it |
123 | | * to 1, even it is 0. SWS engaged |
124 | | */ |
125 | 14.3k | if (calc < stcb->asoc.my_rwnd_control_len) { |
126 | 6.95k | calc = 1; |
127 | 6.95k | } |
128 | 14.3k | return (calc); |
129 | 15.5k | } |
130 | | |
131 | | /* |
132 | | * Build out our readq entry based on the incoming packet. |
133 | | */ |
134 | | struct sctp_queued_to_read * |
135 | | sctp_build_readq_entry(struct sctp_tcb *stcb, |
136 | | struct sctp_nets *net, |
137 | | uint32_t tsn, uint32_t ppid, |
138 | | uint32_t context, uint16_t sid, |
139 | | uint32_t mid, uint8_t flags, |
140 | | struct mbuf *dm) |
141 | 693k | { |
142 | 693k | struct sctp_queued_to_read *read_queue_e = NULL; |
143 | | |
144 | 693k | sctp_alloc_a_readq(stcb, read_queue_e); |
145 | 693k | if (read_queue_e == NULL) { |
146 | 0 | goto failed_build; |
147 | 0 | } |
148 | 693k | memset(read_queue_e, 0, sizeof(struct sctp_queued_to_read)); |
149 | 693k | read_queue_e->sinfo_stream = sid; |
150 | 693k | read_queue_e->sinfo_flags = (flags << 8); |
151 | 693k | read_queue_e->sinfo_ppid = ppid; |
152 | 693k | read_queue_e->sinfo_context = context; |
153 | 693k | read_queue_e->sinfo_tsn = tsn; |
154 | 693k | read_queue_e->sinfo_cumtsn = tsn; |
155 | 693k | read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb); |
156 | 693k | read_queue_e->mid = mid; |
157 | 693k | read_queue_e->top_fsn = read_queue_e->fsn_included = 0xffffffff; |
158 | 693k | TAILQ_INIT(&read_queue_e->reasm); |
159 | 693k | read_queue_e->whoFrom = net; |
160 | 693k | atomic_add_int(&net->ref_count, 1); |
161 | 693k | read_queue_e->data = dm; |
162 | 693k | read_queue_e->stcb = stcb; |
163 | 693k | read_queue_e->port_from = stcb->rport; |
164 | 693k | if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { |
165 | 0 | read_queue_e->do_not_ref_stcb = 1; |
166 | 0 | } |
167 | 693k | failed_build: |
168 | 693k | return (read_queue_e); |
169 | 693k | } |
170 | | |
171 | | struct mbuf * |
172 | | sctp_build_ctl_nchunk(struct sctp_inpcb *inp, struct sctp_sndrcvinfo *sinfo) |
173 | 0 | { |
174 | 0 | struct sctp_extrcvinfo *seinfo; |
175 | 0 | struct sctp_sndrcvinfo *outinfo; |
176 | 0 | struct sctp_rcvinfo *rcvinfo; |
177 | 0 | struct sctp_nxtinfo *nxtinfo; |
178 | | #if defined(_WIN32) |
179 | | WSACMSGHDR *cmh; |
180 | | #else |
181 | 0 | struct cmsghdr *cmh; |
182 | 0 | #endif |
183 | 0 | struct mbuf *ret; |
184 | 0 | int len; |
185 | 0 | int use_extended; |
186 | 0 | int provide_nxt; |
187 | |
|
188 | 0 | if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) && |
189 | 0 | sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) && |
190 | 0 | sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) { |
191 | | /* user does not want any ancillary data */ |
192 | 0 | return (NULL); |
193 | 0 | } |
194 | | |
195 | 0 | len = 0; |
196 | 0 | if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) { |
197 | 0 | len += CMSG_SPACE(sizeof(struct sctp_rcvinfo)); |
198 | 0 | } |
199 | 0 | seinfo = (struct sctp_extrcvinfo *)sinfo; |
200 | 0 | if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO) && |
201 | 0 | (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_AVAIL)) { |
202 | 0 | provide_nxt = 1; |
203 | 0 | len += CMSG_SPACE(sizeof(struct sctp_nxtinfo)); |
204 | 0 | } else { |
205 | 0 | provide_nxt = 0; |
206 | 0 | } |
207 | 0 | if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) { |
208 | 0 | if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) { |
209 | 0 | use_extended = 1; |
210 | 0 | len += CMSG_SPACE(sizeof(struct sctp_extrcvinfo)); |
211 | 0 | } else { |
212 | 0 | use_extended = 0; |
213 | 0 | len += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo)); |
214 | 0 | } |
215 | 0 | } else { |
216 | 0 | use_extended = 0; |
217 | 0 | } |
218 | |
|
219 | 0 | ret = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA); |
220 | 0 | if (ret == NULL) { |
221 | | /* No space */ |
222 | 0 | return (ret); |
223 | 0 | } |
224 | 0 | SCTP_BUF_LEN(ret) = 0; |
225 | | |
226 | | /* We need a CMSG header followed by the struct */ |
227 | | #if defined(_WIN32) |
228 | | cmh = mtod(ret, WSACMSGHDR *); |
229 | | #else |
230 | 0 | cmh = mtod(ret, struct cmsghdr *); |
231 | 0 | #endif |
232 | | /* |
233 | | * Make sure that there is no un-initialized padding between |
234 | | * the cmsg header and cmsg data and after the cmsg data. |
235 | | */ |
236 | 0 | memset(cmh, 0, len); |
237 | 0 | if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) { |
238 | 0 | cmh->cmsg_level = IPPROTO_SCTP; |
239 | 0 | cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_rcvinfo)); |
240 | 0 | cmh->cmsg_type = SCTP_RCVINFO; |
241 | 0 | rcvinfo = (struct sctp_rcvinfo *)CMSG_DATA(cmh); |
242 | 0 | rcvinfo->rcv_sid = sinfo->sinfo_stream; |
243 | 0 | rcvinfo->rcv_ssn = sinfo->sinfo_ssn; |
244 | 0 | rcvinfo->rcv_flags = sinfo->sinfo_flags; |
245 | 0 | rcvinfo->rcv_ppid = sinfo->sinfo_ppid; |
246 | 0 | rcvinfo->rcv_tsn = sinfo->sinfo_tsn; |
247 | 0 | rcvinfo->rcv_cumtsn = sinfo->sinfo_cumtsn; |
248 | 0 | rcvinfo->rcv_context = sinfo->sinfo_context; |
249 | 0 | rcvinfo->rcv_assoc_id = sinfo->sinfo_assoc_id; |
250 | | #if defined(_WIN32) |
251 | | cmh = (WSACMSGHDR *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_rcvinfo))); |
252 | | #else |
253 | 0 | cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_rcvinfo))); |
254 | 0 | #endif |
255 | 0 | SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_rcvinfo)); |
256 | 0 | } |
257 | 0 | if (provide_nxt) { |
258 | 0 | cmh->cmsg_level = IPPROTO_SCTP; |
259 | 0 | cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_nxtinfo)); |
260 | 0 | cmh->cmsg_type = SCTP_NXTINFO; |
261 | 0 | nxtinfo = (struct sctp_nxtinfo *)CMSG_DATA(cmh); |
262 | 0 | nxtinfo->nxt_sid = seinfo->serinfo_next_stream; |
263 | 0 | nxtinfo->nxt_flags = 0; |
264 | 0 | if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_IS_UNORDERED) { |
265 | 0 | nxtinfo->nxt_flags |= SCTP_UNORDERED; |
266 | 0 | } |
267 | 0 | if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_IS_NOTIFICATION) { |
268 | 0 | nxtinfo->nxt_flags |= SCTP_NOTIFICATION; |
269 | 0 | } |
270 | 0 | if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_ISCOMPLETE) { |
271 | 0 | nxtinfo->nxt_flags |= SCTP_COMPLETE; |
272 | 0 | } |
273 | 0 | nxtinfo->nxt_ppid = seinfo->serinfo_next_ppid; |
274 | 0 | nxtinfo->nxt_length = seinfo->serinfo_next_length; |
275 | 0 | nxtinfo->nxt_assoc_id = seinfo->serinfo_next_aid; |
276 | | #if defined(_WIN32) |
277 | | cmh = (WSACMSGHDR *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_nxtinfo))); |
278 | | #else |
279 | 0 | cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_nxtinfo))); |
280 | 0 | #endif |
281 | 0 | SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_nxtinfo)); |
282 | 0 | } |
283 | 0 | if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) { |
284 | 0 | cmh->cmsg_level = IPPROTO_SCTP; |
285 | 0 | outinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmh); |
286 | 0 | if (use_extended) { |
287 | 0 | cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_extrcvinfo)); |
288 | 0 | cmh->cmsg_type = SCTP_EXTRCV; |
289 | 0 | memcpy(outinfo, sinfo, sizeof(struct sctp_extrcvinfo)); |
290 | 0 | SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_extrcvinfo)); |
291 | 0 | } else { |
292 | 0 | cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo)); |
293 | 0 | cmh->cmsg_type = SCTP_SNDRCV; |
294 | 0 | *outinfo = *sinfo; |
295 | 0 | SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo)); |
296 | 0 | } |
297 | 0 | } |
298 | 0 | return (ret); |
299 | 0 | } |
300 | | |
301 | | static void |
302 | | sctp_mark_non_revokable(struct sctp_association *asoc, uint32_t tsn) |
303 | 2.30k | { |
304 | 2.30k | uint32_t gap, i; |
305 | 2.30k | int in_r, in_nr; |
306 | | |
307 | 2.30k | if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) { |
308 | 0 | return; |
309 | 0 | } |
310 | 2.30k | if (SCTP_TSN_GE(asoc->cumulative_tsn, tsn)) { |
311 | | /* |
312 | | * This tsn is behind the cum ack and thus we don't |
313 | | * need to worry about it being moved from one to the other. |
314 | | */ |
315 | 3 | return; |
316 | 3 | } |
317 | 2.29k | SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn); |
318 | 2.29k | in_r = SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap); |
319 | 2.29k | in_nr = SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap); |
320 | 2.29k | KASSERT(in_r || in_nr, ("%s: Things are really messed up now", __func__)); |
321 | 2.29k | if (!in_nr) { |
322 | 2.24k | SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap); |
323 | 2.24k | if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) { |
324 | 1.31k | asoc->highest_tsn_inside_nr_map = tsn; |
325 | 1.31k | } |
326 | 2.24k | } |
327 | 2.29k | if (in_r) { |
328 | 2.24k | SCTP_UNSET_TSN_PRESENT(asoc->mapping_array, gap); |
329 | 2.24k | if (tsn == asoc->highest_tsn_inside_map) { |
330 | | /* We must back down to see what the new highest is. */ |
331 | 1.72M | for (i = tsn - 1; SCTP_TSN_GE(i, asoc->mapping_array_base_tsn); i--) { |
332 | 1.72M | SCTP_CALC_TSN_TO_GAP(gap, i, asoc->mapping_array_base_tsn); |
333 | 1.72M | if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) { |
334 | 272 | asoc->highest_tsn_inside_map = i; |
335 | 272 | break; |
336 | 272 | } |
337 | 1.72M | } |
338 | 1.33k | if (!SCTP_TSN_GE(i, asoc->mapping_array_base_tsn)) { |
339 | 1.06k | asoc->highest_tsn_inside_map = asoc->mapping_array_base_tsn - 1; |
340 | 1.06k | } |
341 | 1.33k | } |
342 | 2.24k | } |
343 | 2.29k | } |
344 | | |
345 | | static int |
346 | | sctp_place_control_in_stream(struct sctp_stream_in *strm, |
347 | | struct sctp_association *asoc, |
348 | | struct sctp_queued_to_read *control) |
349 | 3.61k | { |
350 | 3.61k | struct sctp_queued_to_read *at; |
351 | 3.61k | struct sctp_readhead *q; |
352 | 3.61k | uint8_t flags, unordered; |
353 | | |
354 | 3.61k | flags = (control->sinfo_flags >> 8); |
355 | 3.61k | unordered = flags & SCTP_DATA_UNORDERED; |
356 | 3.61k | if (unordered) { |
357 | 1.52k | q = &strm->uno_inqueue; |
358 | 1.52k | if (asoc->idata_supported == 0) { |
359 | 738 | if (!TAILQ_EMPTY(q)) { |
360 | | /* Only one stream can be here in old style -- abort */ |
361 | 3 | return (-1); |
362 | 3 | } |
363 | 738 | TAILQ_INSERT_TAIL(q, control, next_instrm); |
364 | 735 | control->on_strm_q = SCTP_ON_UNORDERED; |
365 | 735 | return (0); |
366 | 738 | } |
367 | 2.08k | } else { |
368 | 2.08k | q = &strm->inqueue; |
369 | 2.08k | } |
370 | 2.87k | if ((flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) { |
371 | 662 | control->end_added = 1; |
372 | 662 | control->first_frag_seen = 1; |
373 | 662 | control->last_frag_seen = 1; |
374 | 662 | } |
375 | 2.87k | if (TAILQ_EMPTY(q)) { |
376 | | /* Empty queue */ |
377 | 1.71k | TAILQ_INSERT_HEAD(q, control, next_instrm); |
378 | 1.71k | if (unordered) { |
379 | 315 | control->on_strm_q = SCTP_ON_UNORDERED; |
380 | 1.40k | } else { |
381 | 1.40k | control->on_strm_q = SCTP_ON_ORDERED; |
382 | 1.40k | } |
383 | 1.71k | return (0); |
384 | 1.71k | } else { |
385 | 2.68k | TAILQ_FOREACH(at, q, next_instrm) { |
386 | 2.68k | if (SCTP_MID_GT(asoc->idata_supported, at->mid, control->mid)) { |
387 | | /* |
388 | | * one in queue is bigger than the |
389 | | * new one, insert before this one |
390 | | */ |
391 | 769 | TAILQ_INSERT_BEFORE(at, control, next_instrm); |
392 | 769 | if (unordered) { |
393 | 373 | control->on_strm_q = SCTP_ON_UNORDERED; |
394 | 396 | } else { |
395 | 396 | control->on_strm_q = SCTP_ON_ORDERED; |
396 | 396 | } |
397 | 769 | break; |
398 | 1.91k | } else if (SCTP_MID_EQ(asoc->idata_supported, at->mid, control->mid)) { |
399 | | /* |
400 | | * Gak, He sent me a duplicate msg |
401 | | * id number?? return -1 to abort. |
402 | | */ |
403 | 1 | return (-1); |
404 | 1.91k | } else { |
405 | 1.91k | if (TAILQ_NEXT(at, next_instrm) == NULL) { |
406 | | /* |
407 | | * We are at the end, insert |
408 | | * it after this one |
409 | | */ |
410 | 387 | if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) { |
411 | 0 | sctp_log_strm_del(control, at, |
412 | 0 | SCTP_STR_LOG_FROM_INSERT_TL); |
413 | 0 | } |
414 | 387 | TAILQ_INSERT_AFTER(q, at, control, next_instrm); |
415 | 387 | if (unordered) { |
416 | 96 | control->on_strm_q = SCTP_ON_UNORDERED; |
417 | 291 | } else { |
418 | 291 | control->on_strm_q = SCTP_ON_ORDERED; |
419 | 291 | } |
420 | 387 | break; |
421 | 387 | } |
422 | 1.91k | } |
423 | 2.68k | } |
424 | 1.15k | } |
425 | 1.15k | return (0); |
426 | 2.87k | } |
427 | | |
428 | | static void |
429 | | sctp_abort_in_reasm(struct sctp_tcb *stcb, |
430 | | struct sctp_queued_to_read *control, |
431 | | struct sctp_tmit_chunk *chk, |
432 | | int *abort_flag, int opspot) |
433 | 290 | { |
434 | 290 | char msg[SCTP_DIAG_INFO_LEN]; |
435 | 290 | struct mbuf *oper; |
436 | | |
437 | 290 | if (stcb->asoc.idata_supported) { |
438 | 238 | SCTP_SNPRINTF(msg, sizeof(msg), |
439 | 238 | "Reass %x,CF:%x,TSN=%8.8x,SID=%4.4x,FSN=%8.8x,MID:%8.8x", |
440 | 238 | opspot, |
441 | 238 | control->fsn_included, |
442 | 238 | chk->rec.data.tsn, |
443 | 238 | chk->rec.data.sid, |
444 | 238 | chk->rec.data.fsn, chk->rec.data.mid); |
445 | 238 | } else { |
446 | 52 | SCTP_SNPRINTF(msg, sizeof(msg), |
447 | 52 | "Reass %x,CI:%x,TSN=%8.8x,SID=%4.4x,FSN=%4.4x,SSN:%4.4x", |
448 | 52 | opspot, |
449 | 52 | control->fsn_included, |
450 | 52 | chk->rec.data.tsn, |
451 | 52 | chk->rec.data.sid, |
452 | 52 | chk->rec.data.fsn, |
453 | 52 | (uint16_t)chk->rec.data.mid); |
454 | 52 | } |
455 | 290 | oper = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); |
456 | 290 | sctp_m_freem(chk->data); |
457 | 290 | chk->data = NULL; |
458 | 290 | sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED); |
459 | 290 | stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_1; |
460 | 290 | sctp_abort_an_association(stcb->sctp_ep, stcb, oper, false, SCTP_SO_NOT_LOCKED); |
461 | 290 | *abort_flag = 1; |
462 | 290 | } |
463 | | |
464 | | static void |
465 | | sctp_clean_up_control(struct sctp_tcb *stcb, struct sctp_queued_to_read *control) |
466 | 4 | { |
467 | | /* |
468 | | * The control could not be placed and must be cleaned. |
469 | | */ |
470 | 4 | struct sctp_tmit_chunk *chk, *nchk; |
471 | 4 | TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, nchk) { |
472 | 0 | TAILQ_REMOVE(&control->reasm, chk, sctp_next); |
473 | 0 | if (chk->data) |
474 | 0 | sctp_m_freem(chk->data); |
475 | 0 | chk->data = NULL; |
476 | 0 | sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED); |
477 | 0 | } |
478 | 4 | sctp_free_remote_addr(control->whoFrom); |
479 | 4 | if (control->data) { |
480 | 4 | sctp_m_freem(control->data); |
481 | 4 | control->data = NULL; |
482 | 4 | } |
483 | 4 | sctp_free_a_readq(stcb, control); |
484 | 4 | } |
485 | | |
486 | | /* |
487 | | * Queue the chunk either right into the socket buffer if it is the next one |
488 | | * to go OR put it in the correct place in the delivery queue. If we do |
489 | | * append to the so_buf, keep doing so until we are out of order as |
490 | | * long as the control's entered are non-fragmented. |
491 | | */ |
492 | | static void |
493 | | sctp_queue_data_to_stream(struct sctp_tcb *stcb, |
494 | | struct sctp_association *asoc, |
495 | | struct sctp_queued_to_read *control, int *abort_flag, int *need_reasm) |
496 | 936 | { |
497 | | /* |
498 | | * FIX-ME maybe? What happens when the ssn wraps? If we are getting |
499 | | * all the data in one stream this could happen quite rapidly. One |
500 | | * could use the TSN to keep track of things, but this scheme breaks |
501 | | * down in the other type of stream usage that could occur. Send a |
502 | | * single msg to stream 0, send 4Billion messages to stream 1, now |
503 | | * send a message to stream 0. You have a situation where the TSN |
504 | | * has wrapped but not in the stream. Is this worth worrying about |
505 | | * or should we just change our queue sort at the bottom to be by |
506 | | * TSN. |
507 | | * |
508 | | * Could it also be legal for a peer to send ssn 1 with TSN 2 and ssn 2 |
509 | | * with TSN 1? If the peer is doing some sort of funky TSN/SSN |
510 | | * assignment this could happen... and I don't see how this would be |
511 | | * a violation. So for now I am undecided an will leave the sort by |
512 | | * SSN alone. Maybe a hybrid approach is the answer |
513 | | * |
514 | | */ |
515 | 936 | struct sctp_queued_to_read *at; |
516 | 936 | int queue_needed; |
517 | 936 | uint32_t nxt_todel; |
518 | 936 | struct mbuf *op_err; |
519 | 936 | struct sctp_stream_in *strm; |
520 | 936 | char msg[SCTP_DIAG_INFO_LEN]; |
521 | | |
522 | 936 | strm = &asoc->strmin[control->sinfo_stream]; |
523 | 936 | if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) { |
524 | 0 | sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INTO_STRD); |
525 | 0 | } |
526 | 936 | if (SCTP_MID_GT((asoc->idata_supported), strm->last_mid_delivered, control->mid)) { |
527 | | /* The incoming sseq is behind where we last delivered? */ |
528 | 88 | SCTPDBG(SCTP_DEBUG_INDATA1, "Duplicate S-SEQ: %u delivered: %u from peer, Abort association\n", |
529 | 88 | strm->last_mid_delivered, control->mid); |
530 | | /* |
531 | | * throw it in the stream so it gets cleaned up in |
532 | | * association destruction |
533 | | */ |
534 | 88 | TAILQ_INSERT_HEAD(&strm->inqueue, control, next_instrm); |
535 | 88 | if (asoc->idata_supported) { |
536 | 31 | SCTP_SNPRINTF(msg, sizeof(msg), "Delivered MID=%8.8x, got TSN=%8.8x, SID=%4.4x, MID=%8.8x", |
537 | 31 | strm->last_mid_delivered, control->sinfo_tsn, |
538 | 31 | control->sinfo_stream, control->mid); |
539 | 57 | } else { |
540 | 57 | SCTP_SNPRINTF(msg, sizeof(msg), "Delivered SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x", |
541 | 57 | (uint16_t)strm->last_mid_delivered, |
542 | 57 | control->sinfo_tsn, |
543 | 57 | control->sinfo_stream, |
544 | 57 | (uint16_t)control->mid); |
545 | 57 | } |
546 | 88 | op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); |
547 | 88 | stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_2; |
548 | 88 | sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, false, SCTP_SO_NOT_LOCKED); |
549 | 88 | *abort_flag = 1; |
550 | 88 | return; |
551 | 88 | } |
552 | 848 | queue_needed = 1; |
553 | 848 | asoc->size_on_all_streams += control->length; |
554 | 848 | sctp_ucount_incr(asoc->cnt_on_all_streams); |
555 | 848 | nxt_todel = strm->last_mid_delivered + 1; |
556 | 848 | if (SCTP_MID_EQ(asoc->idata_supported, nxt_todel, control->mid)) { |
557 | | #if defined(__APPLE__) && !defined(__Userspace__) |
558 | | struct socket *so; |
559 | | |
560 | | so = SCTP_INP_SO(stcb->sctp_ep); |
561 | | atomic_add_int(&stcb->asoc.refcnt, 1); |
562 | | SCTP_TCB_UNLOCK(stcb); |
563 | | SCTP_SOCKET_LOCK(so, 1); |
564 | | SCTP_TCB_LOCK(stcb); |
565 | | atomic_subtract_int(&stcb->asoc.refcnt, 1); |
566 | | if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { |
567 | | SCTP_SOCKET_UNLOCK(so, 1); |
568 | | return; |
569 | | } |
570 | | #endif |
571 | | /* can be delivered right away? */ |
572 | 159 | if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) { |
573 | 0 | sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_IMMED_DEL); |
574 | 0 | } |
575 | | /* EY it wont be queued if it could be delivered directly */ |
576 | 159 | queue_needed = 0; |
577 | 159 | if (asoc->size_on_all_streams >= control->length) { |
578 | 159 | asoc->size_on_all_streams -= control->length; |
579 | 159 | } else { |
580 | 0 | #ifdef INVARIANTS |
581 | 0 | panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length); |
582 | | #else |
583 | | asoc->size_on_all_streams = 0; |
584 | | #endif |
585 | 0 | } |
586 | 159 | sctp_ucount_decr(asoc->cnt_on_all_streams); |
587 | 159 | strm->last_mid_delivered++; |
588 | 159 | sctp_mark_non_revokable(asoc, control->sinfo_tsn); |
589 | 159 | sctp_add_to_readq(stcb->sctp_ep, stcb, |
590 | 159 | control, |
591 | 159 | &stcb->sctp_socket->so_rcv, 1, |
592 | 159 | SCTP_READ_LOCK_NOT_HELD, SCTP_SO_LOCKED); |
593 | 159 | TAILQ_FOREACH_SAFE(control, &strm->inqueue, next_instrm, at) { |
594 | | /* all delivered */ |
595 | 57 | nxt_todel = strm->last_mid_delivered + 1; |
596 | 57 | if (SCTP_MID_EQ(asoc->idata_supported, nxt_todel, control->mid) && |
597 | 23 | (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG)) { |
598 | 15 | if (control->on_strm_q == SCTP_ON_ORDERED) { |
599 | 15 | TAILQ_REMOVE(&strm->inqueue, control, next_instrm); |
600 | 15 | if (asoc->size_on_all_streams >= control->length) { |
601 | 15 | asoc->size_on_all_streams -= control->length; |
602 | 15 | } else { |
603 | 0 | #ifdef INVARIANTS |
604 | 0 | panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length); |
605 | | #else |
606 | | asoc->size_on_all_streams = 0; |
607 | | #endif |
608 | 0 | } |
609 | 15 | sctp_ucount_decr(asoc->cnt_on_all_streams); |
610 | 15 | #ifdef INVARIANTS |
611 | 15 | } else { |
612 | 0 | panic("Huh control: %p is on_strm_q: %d", |
613 | 0 | control, control->on_strm_q); |
614 | 0 | #endif |
615 | 0 | } |
616 | 15 | control->on_strm_q = 0; |
617 | 15 | strm->last_mid_delivered++; |
618 | | /* |
619 | | * We ignore the return of deliver_data here |
620 | | * since we always can hold the chunk on the |
621 | | * d-queue. And we have a finite number that |
622 | | * can be delivered from the strq. |
623 | | */ |
624 | 15 | if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) { |
625 | 0 | sctp_log_strm_del(control, NULL, |
626 | 0 | SCTP_STR_LOG_FROM_IMMED_DEL); |
627 | 0 | } |
628 | 15 | sctp_mark_non_revokable(asoc, control->sinfo_tsn); |
629 | 15 | sctp_add_to_readq(stcb->sctp_ep, stcb, |
630 | 15 | control, |
631 | 15 | &stcb->sctp_socket->so_rcv, 1, |
632 | 15 | SCTP_READ_LOCK_NOT_HELD, |
633 | 15 | SCTP_SO_LOCKED); |
634 | 15 | continue; |
635 | 42 | } else if (SCTP_MID_EQ(asoc->idata_supported, nxt_todel, control->mid)) { |
636 | 8 | *need_reasm = 1; |
637 | 8 | } |
638 | 42 | break; |
639 | 57 | } |
640 | | #if defined(__APPLE__) && !defined(__Userspace__) |
641 | | SCTP_SOCKET_UNLOCK(so, 1); |
642 | | #endif |
643 | 159 | } |
644 | 848 | if (queue_needed) { |
645 | | /* |
646 | | * Ok, we did not deliver this guy, find the correct place |
647 | | * to put it on the queue. |
648 | | */ |
649 | 689 | if (sctp_place_control_in_stream(strm, asoc, control)) { |
650 | 4 | SCTP_SNPRINTF(msg, sizeof(msg), |
651 | 4 | "Queue to str MID: %u duplicate", control->mid); |
652 | 4 | sctp_clean_up_control(stcb, control); |
653 | 4 | op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); |
654 | 4 | stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_3; |
655 | 4 | sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, false, SCTP_SO_NOT_LOCKED); |
656 | 4 | *abort_flag = 1; |
657 | 4 | } |
658 | 689 | } |
659 | 848 | } |
660 | | |
661 | | static void |
662 | | sctp_setup_tail_pointer(struct sctp_queued_to_read *control) |
663 | 1.76k | { |
664 | 1.76k | struct mbuf *m, *prev = NULL; |
665 | 1.76k | struct sctp_tcb *stcb; |
666 | | |
667 | 1.76k | stcb = control->stcb; |
668 | 1.76k | control->held_length = 0; |
669 | 1.76k | control->length = 0; |
670 | 1.76k | m = control->data; |
671 | 4.11k | while (m) { |
672 | 2.34k | if (SCTP_BUF_LEN(m) == 0) { |
673 | | /* Skip mbufs with NO length */ |
674 | 413 | if (prev == NULL) { |
675 | | /* First one */ |
676 | 413 | control->data = sctp_m_free(m); |
677 | 413 | m = control->data; |
678 | 413 | } else { |
679 | 0 | SCTP_BUF_NEXT(prev) = sctp_m_free(m); |
680 | 0 | m = SCTP_BUF_NEXT(prev); |
681 | 0 | } |
682 | 413 | if (m == NULL) { |
683 | 0 | control->tail_mbuf = prev; |
684 | 0 | } |
685 | 413 | continue; |
686 | 413 | } |
687 | 1.93k | prev = m; |
688 | 1.93k | atomic_add_int(&control->length, SCTP_BUF_LEN(m)); |
689 | 1.93k | if (control->on_read_q) { |
690 | | /* |
691 | | * On read queue so we must increment the |
692 | | * SB stuff, we assume caller has done any locks of SB. |
693 | | */ |
694 | 0 | sctp_sballoc(stcb, &stcb->sctp_socket->so_rcv, m); |
695 | 0 | } |
696 | 1.93k | m = SCTP_BUF_NEXT(m); |
697 | 1.93k | } |
698 | 1.76k | if (prev) { |
699 | 1.76k | control->tail_mbuf = prev; |
700 | 1.76k | } |
701 | 1.76k | } |
702 | | |
703 | | static void |
704 | | sctp_add_to_tail_pointer(struct sctp_queued_to_read *control, struct mbuf *m, uint32_t *added) |
705 | 404 | { |
706 | 404 | struct mbuf *prev=NULL; |
707 | 404 | struct sctp_tcb *stcb; |
708 | | |
709 | 404 | stcb = control->stcb; |
710 | 404 | if (stcb == NULL) { |
711 | 0 | #ifdef INVARIANTS |
712 | 0 | panic("Control broken"); |
713 | | #else |
714 | | return; |
715 | | #endif |
716 | 0 | } |
717 | 404 | if (control->tail_mbuf == NULL) { |
718 | | /* TSNH */ |
719 | 0 | sctp_m_freem(control->data); |
720 | 0 | control->data = m; |
721 | 0 | sctp_setup_tail_pointer(control); |
722 | 0 | return; |
723 | 0 | } |
724 | 404 | control->tail_mbuf->m_next = m; |
725 | 952 | while (m) { |
726 | 548 | if (SCTP_BUF_LEN(m) == 0) { |
727 | | /* Skip mbufs with NO length */ |
728 | 74 | if (prev == NULL) { |
729 | | /* First one */ |
730 | 74 | control->tail_mbuf->m_next = sctp_m_free(m); |
731 | 74 | m = control->tail_mbuf->m_next; |
732 | 74 | } else { |
733 | 0 | SCTP_BUF_NEXT(prev) = sctp_m_free(m); |
734 | 0 | m = SCTP_BUF_NEXT(prev); |
735 | 0 | } |
736 | 74 | if (m == NULL) { |
737 | 0 | control->tail_mbuf = prev; |
738 | 0 | } |
739 | 74 | continue; |
740 | 74 | } |
741 | 474 | prev = m; |
742 | 474 | if (control->on_read_q) { |
743 | | /* |
744 | | * On read queue so we must increment the |
745 | | * SB stuff, we assume caller has done any locks of SB. |
746 | | */ |
747 | 0 | sctp_sballoc(stcb, &stcb->sctp_socket->so_rcv, m); |
748 | 0 | } |
749 | 474 | *added += SCTP_BUF_LEN(m); |
750 | 474 | atomic_add_int(&control->length, SCTP_BUF_LEN(m)); |
751 | 474 | m = SCTP_BUF_NEXT(m); |
752 | 474 | } |
753 | 404 | if (prev) { |
754 | 404 | control->tail_mbuf = prev; |
755 | 404 | } |
756 | 404 | } |
757 | | |
758 | | static void |
759 | | sctp_build_readq_entry_from_ctl(struct sctp_queued_to_read *nc, struct sctp_queued_to_read *control) |
760 | 145 | { |
761 | 145 | memset(nc, 0, sizeof(struct sctp_queued_to_read)); |
762 | 145 | nc->sinfo_stream = control->sinfo_stream; |
763 | 145 | nc->mid = control->mid; |
764 | 145 | TAILQ_INIT(&nc->reasm); |
765 | 145 | nc->top_fsn = control->top_fsn; |
766 | 145 | nc->mid = control->mid; |
767 | 145 | nc->sinfo_flags = control->sinfo_flags; |
768 | 145 | nc->sinfo_ppid = control->sinfo_ppid; |
769 | 145 | nc->sinfo_context = control->sinfo_context; |
770 | 145 | nc->fsn_included = 0xffffffff; |
771 | 145 | nc->sinfo_tsn = control->sinfo_tsn; |
772 | 145 | nc->sinfo_cumtsn = control->sinfo_cumtsn; |
773 | 145 | nc->sinfo_assoc_id = control->sinfo_assoc_id; |
774 | 145 | nc->whoFrom = control->whoFrom; |
775 | 145 | atomic_add_int(&nc->whoFrom->ref_count, 1); |
776 | 145 | nc->stcb = control->stcb; |
777 | 145 | nc->port_from = control->port_from; |
778 | 145 | nc->do_not_ref_stcb = control->do_not_ref_stcb; |
779 | 145 | } |
780 | | |
781 | | static int |
782 | | sctp_handle_old_unordered_data(struct sctp_tcb *stcb, |
783 | | struct sctp_association *asoc, |
784 | | struct sctp_stream_in *strm, |
785 | | struct sctp_queued_to_read *control, |
786 | | uint32_t pd_point, |
787 | | int inp_read_lock_held) |
788 | 2.46k | { |
789 | | /* Special handling for the old un-ordered data chunk. |
790 | | * All the chunks/TSN's go to mid 0. So |
791 | | * we have to do the old style watching to see |
792 | | * if we have it all. If you return one, no other |
793 | | * control entries on the un-ordered queue will |
794 | | * be looked at. In theory there should be no others |
795 | | * entries in reality, unless the guy is sending both |
796 | | * unordered NDATA and unordered DATA... |
797 | | */ |
798 | 2.46k | struct sctp_tmit_chunk *chk, *lchk, *tchk; |
799 | 2.46k | uint32_t fsn; |
800 | 2.46k | struct sctp_queued_to_read *nc; |
801 | 2.46k | int cnt_added; |
802 | | |
803 | 2.46k | if (control->first_frag_seen == 0) { |
804 | | /* Nothing we can do, we have not seen the first piece yet */ |
805 | 995 | return (1); |
806 | 995 | } |
807 | | /* Collapse any we can */ |
808 | 1.46k | cnt_added = 0; |
809 | 1.54k | restart: |
810 | 1.54k | fsn = control->fsn_included + 1; |
811 | | /* Now what can we add? */ |
812 | 1.54k | TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, lchk) { |
813 | 1.23k | if (chk->rec.data.fsn == fsn) { |
814 | | /* Ok lets add it */ |
815 | 348 | sctp_alloc_a_readq(stcb, nc); |
816 | 348 | if (nc == NULL) { |
817 | 0 | break; |
818 | 0 | } |
819 | 348 | memset(nc, 0, sizeof(struct sctp_queued_to_read)); |
820 | 348 | TAILQ_REMOVE(&control->reasm, chk, sctp_next); |
821 | 348 | sctp_add_chk_to_control(control, strm, stcb, asoc, chk, inp_read_lock_held); |
822 | 348 | fsn++; |
823 | 348 | cnt_added++; |
824 | 348 | chk = NULL; |
825 | 348 | if (control->end_added) { |
826 | | /* We are done */ |
827 | 180 | if (!TAILQ_EMPTY(&control->reasm)) { |
828 | | /* |
829 | | * Ok we have to move anything left on |
830 | | * the control queue to a new control. |
831 | | */ |
832 | 145 | sctp_build_readq_entry_from_ctl(nc, control); |
833 | 145 | tchk = TAILQ_FIRST(&control->reasm); |
834 | 145 | if (tchk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) { |
835 | 116 | TAILQ_REMOVE(&control->reasm, tchk, sctp_next); |
836 | 116 | if (asoc->size_on_reasm_queue >= tchk->send_size) { |
837 | 116 | asoc->size_on_reasm_queue -= tchk->send_size; |
838 | 116 | } else { |
839 | 0 | #ifdef INVARIANTS |
840 | 0 | panic("size_on_reasm_queue = %u smaller than chunk length %u", asoc->size_on_reasm_queue, tchk->send_size); |
841 | | #else |
842 | | asoc->size_on_reasm_queue = 0; |
843 | | #endif |
844 | 0 | } |
845 | 116 | sctp_ucount_decr(asoc->cnt_on_reasm_queue); |
846 | 116 | nc->first_frag_seen = 1; |
847 | 116 | nc->fsn_included = tchk->rec.data.fsn; |
848 | 116 | nc->data = tchk->data; |
849 | 116 | nc->sinfo_ppid = tchk->rec.data.ppid; |
850 | 116 | nc->sinfo_tsn = tchk->rec.data.tsn; |
851 | 116 | sctp_mark_non_revokable(asoc, tchk->rec.data.tsn); |
852 | 116 | tchk->data = NULL; |
853 | 116 | sctp_free_a_chunk(stcb, tchk, SCTP_SO_NOT_LOCKED); |
854 | 116 | sctp_setup_tail_pointer(nc); |
855 | 116 | tchk = TAILQ_FIRST(&control->reasm); |
856 | 116 | } |
857 | | /* Spin the rest onto the queue */ |
858 | 578 | while (tchk) { |
859 | 433 | TAILQ_REMOVE(&control->reasm, tchk, sctp_next); |
860 | 433 | TAILQ_INSERT_TAIL(&nc->reasm, tchk, sctp_next); |
861 | 433 | tchk = TAILQ_FIRST(&control->reasm); |
862 | 433 | } |
863 | | /* Now lets add it to the queue after removing control */ |
864 | 145 | TAILQ_INSERT_TAIL(&strm->uno_inqueue, nc, next_instrm); |
865 | 145 | nc->on_strm_q = SCTP_ON_UNORDERED; |
866 | 145 | if (control->on_strm_q) { |
867 | 145 | TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm); |
868 | 145 | control->on_strm_q = 0; |
869 | 145 | } |
870 | 145 | } |
871 | 180 | if (control->pdapi_started) { |
872 | 0 | strm->pd_api_started = 0; |
873 | 0 | control->pdapi_started = 0; |
874 | 0 | } |
875 | 180 | if (control->on_strm_q) { |
876 | 35 | TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm); |
877 | 35 | control->on_strm_q = 0; |
878 | 35 | SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs); |
879 | 35 | } |
880 | 180 | if (control->on_read_q == 0) { |
881 | 180 | sctp_add_to_readq(stcb->sctp_ep, stcb, control, |
882 | 180 | &stcb->sctp_socket->so_rcv, control->end_added, |
883 | 180 | inp_read_lock_held, SCTP_SO_NOT_LOCKED); |
884 | 180 | #if defined(__Userspace__) |
885 | 180 | } else { |
886 | 0 | sctp_invoke_recv_callback(stcb->sctp_ep, stcb, control, inp_read_lock_held); |
887 | 0 | #endif |
888 | 0 | } |
889 | 180 | sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED); |
890 | 180 | if ((nc->first_frag_seen) && !TAILQ_EMPTY(&nc->reasm)) { |
891 | | /* Switch to the new guy and continue */ |
892 | 76 | control = nc; |
893 | 76 | goto restart; |
894 | 104 | } else { |
895 | 104 | if (nc->on_strm_q == 0) { |
896 | 35 | sctp_free_a_readq(stcb, nc); |
897 | 35 | } |
898 | 104 | } |
899 | 104 | return (1); |
900 | 180 | } else { |
901 | 168 | sctp_free_a_readq(stcb, nc); |
902 | 168 | } |
903 | 883 | } else { |
904 | | /* Can't add more */ |
905 | 883 | break; |
906 | 883 | } |
907 | 1.23k | } |
908 | 1.36k | if (cnt_added && strm->pd_api_started) { |
909 | 0 | #if defined(__Userspace__) |
910 | 0 | sctp_invoke_recv_callback(stcb->sctp_ep, stcb, control, inp_read_lock_held); |
911 | 0 | #endif |
912 | 0 | sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED); |
913 | 0 | } |
914 | 1.36k | if ((control->length > pd_point) && (strm->pd_api_started == 0)) { |
915 | 0 | strm->pd_api_started = 1; |
916 | 0 | control->pdapi_started = 1; |
917 | 0 | sctp_add_to_readq(stcb->sctp_ep, stcb, control, |
918 | 0 | &stcb->sctp_socket->so_rcv, control->end_added, |
919 | 0 | inp_read_lock_held, SCTP_SO_NOT_LOCKED); |
920 | 0 | sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED); |
921 | 0 | return (0); |
922 | 1.36k | } else { |
923 | 1.36k | return (1); |
924 | 1.36k | } |
925 | 1.36k | } |
926 | | |
927 | | static void |
928 | | sctp_inject_old_unordered_data(struct sctp_tcb *stcb, |
929 | | struct sctp_association *asoc, |
930 | | struct sctp_queued_to_read *control, |
931 | | struct sctp_tmit_chunk *chk, |
932 | | int *abort_flag) |
933 | 2.35k | { |
934 | 2.35k | struct sctp_tmit_chunk *at; |
935 | 2.35k | int inserted; |
936 | | /* |
937 | | * Here we need to place the chunk into the control structure |
938 | | * sorted in the correct order. |
939 | | */ |
940 | 2.35k | if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) { |
941 | | /* Its the very first one. */ |
942 | 1.40k | SCTPDBG(SCTP_DEBUG_XXX, |
943 | 1.40k | "chunk is a first fsn: %u becomes fsn_included\n", |
944 | 1.40k | chk->rec.data.fsn); |
945 | 1.40k | at = TAILQ_FIRST(&control->reasm); |
946 | 1.40k | if (at && SCTP_TSN_GT(chk->rec.data.fsn, at->rec.data.fsn)) { |
947 | | /* |
948 | | * The first chunk in the reassembly is |
949 | | * a smaller TSN than this one, even though |
950 | | * this has a first, it must be from a subsequent |
951 | | * msg. |
952 | | */ |
953 | 533 | goto place_chunk; |
954 | 533 | } |
955 | 876 | if (control->first_frag_seen) { |
956 | | /* |
957 | | * In old un-ordered we can reassembly on |
958 | | * one control multiple messages. As long |
959 | | * as the next FIRST is greater then the old |
960 | | * first (TSN i.e. FSN wise) |
961 | | */ |
962 | 370 | struct mbuf *tdata; |
963 | 370 | uint32_t tmp; |
964 | | |
965 | 370 | if (SCTP_TSN_GT(chk->rec.data.fsn, control->fsn_included)) { |
966 | | /* Easy way the start of a new guy beyond the lowest */ |
967 | 216 | goto place_chunk; |
968 | 216 | } |
969 | 154 | if ((chk->rec.data.fsn == control->fsn_included) || |
970 | 154 | (control->pdapi_started)) { |
971 | | /* |
972 | | * Ok this should not happen, if it does |
973 | | * we started the pd-api on the higher TSN (since |
974 | | * the equals part is a TSN failure it must be that). |
975 | | * |
976 | | * We are completely hosed in that case since I have |
977 | | * no way to recover. This really will only happen |
978 | | * if we can get more TSN's higher before the pd-api-point. |
979 | | */ |
980 | 0 | sctp_abort_in_reasm(stcb, control, chk, |
981 | 0 | abort_flag, |
982 | 0 | SCTP_FROM_SCTP_INDATA + SCTP_LOC_4); |
983 | |
|
984 | 0 | return; |
985 | 0 | } |
986 | | /* |
987 | | * Ok we have two firsts and the one we just got |
988 | | * is smaller than the one we previously placed.. yuck! |
989 | | * We must swap them out. |
990 | | */ |
991 | | /* swap the mbufs */ |
992 | 154 | tdata = control->data; |
993 | 154 | control->data = chk->data; |
994 | 154 | chk->data = tdata; |
995 | | /* Save the lengths */ |
996 | 154 | chk->send_size = control->length; |
997 | | /* Recompute length of control and tail pointer */ |
998 | 154 | sctp_setup_tail_pointer(control); |
999 | | /* Fix the FSN included */ |
1000 | 154 | tmp = control->fsn_included; |
1001 | 154 | control->fsn_included = chk->rec.data.fsn; |
1002 | 154 | chk->rec.data.fsn = tmp; |
1003 | | /* Fix the TSN included */ |
1004 | 154 | tmp = control->sinfo_tsn; |
1005 | 154 | control->sinfo_tsn = chk->rec.data.tsn; |
1006 | 154 | chk->rec.data.tsn = tmp; |
1007 | | /* Fix the PPID included */ |
1008 | 154 | tmp = control->sinfo_ppid; |
1009 | 154 | control->sinfo_ppid = chk->rec.data.ppid; |
1010 | 154 | chk->rec.data.ppid = tmp; |
1011 | | /* Fix tail pointer */ |
1012 | 154 | goto place_chunk; |
1013 | 154 | } |
1014 | 506 | control->first_frag_seen = 1; |
1015 | 506 | control->fsn_included = chk->rec.data.fsn; |
1016 | 506 | control->top_fsn = chk->rec.data.fsn; |
1017 | 506 | control->sinfo_tsn = chk->rec.data.tsn; |
1018 | 506 | control->sinfo_ppid = chk->rec.data.ppid; |
1019 | 506 | control->data = chk->data; |
1020 | 506 | sctp_mark_non_revokable(asoc, chk->rec.data.tsn); |
1021 | 506 | chk->data = NULL; |
1022 | 506 | sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED); |
1023 | 506 | sctp_setup_tail_pointer(control); |
1024 | 506 | return; |
1025 | 506 | } |
1026 | 1.84k | place_chunk: |
1027 | 1.84k | inserted = 0; |
1028 | 5.68k | TAILQ_FOREACH(at, &control->reasm, sctp_next) { |
1029 | 5.68k | if (SCTP_TSN_GT(at->rec.data.fsn, chk->rec.data.fsn)) { |
1030 | | /* |
1031 | | * This one in queue is bigger than the new one, insert |
1032 | | * the new one before at. |
1033 | | */ |
1034 | 1.05k | asoc->size_on_reasm_queue += chk->send_size; |
1035 | 1.05k | sctp_ucount_incr(asoc->cnt_on_reasm_queue); |
1036 | 1.05k | inserted = 1; |
1037 | 1.05k | TAILQ_INSERT_BEFORE(at, chk, sctp_next); |
1038 | 1.05k | break; |
1039 | 4.62k | } else if (at->rec.data.fsn == chk->rec.data.fsn) { |
1040 | | /* |
1041 | | * They sent a duplicate fsn number. This |
1042 | | * really should not happen since the FSN is |
1043 | | * a TSN and it should have been dropped earlier. |
1044 | | */ |
1045 | 0 | sctp_abort_in_reasm(stcb, control, chk, |
1046 | 0 | abort_flag, |
1047 | 0 | SCTP_FROM_SCTP_INDATA + SCTP_LOC_5); |
1048 | 0 | return; |
1049 | 0 | } |
1050 | 5.68k | } |
1051 | 1.84k | if (inserted == 0) { |
1052 | | /* Its at the end */ |
1053 | 792 | asoc->size_on_reasm_queue += chk->send_size; |
1054 | 792 | sctp_ucount_incr(asoc->cnt_on_reasm_queue); |
1055 | 792 | control->top_fsn = chk->rec.data.fsn; |
1056 | 792 | TAILQ_INSERT_TAIL(&control->reasm, chk, sctp_next); |
1057 | 792 | } |
1058 | 1.84k | } |
1059 | | |
1060 | | static int |
1061 | | sctp_deliver_reasm_check(struct sctp_tcb *stcb, struct sctp_association *asoc, |
1062 | | struct sctp_stream_in *strm, int inp_read_lock_held) |
1063 | 4.98k | { |
1064 | | /* |
1065 | | * Given a stream, strm, see if any of |
1066 | | * the SSN's on it that are fragmented |
1067 | | * are ready to deliver. If so go ahead |
1068 | | * and place them on the read queue. In |
1069 | | * so placing if we have hit the end, then |
1070 | | * we need to remove them from the stream's queue. |
1071 | | */ |
1072 | 4.98k | struct sctp_queued_to_read *control, *nctl = NULL; |
1073 | 4.98k | uint32_t next_to_del; |
1074 | 4.98k | uint32_t pd_point; |
1075 | 4.98k | int ret = 0; |
1076 | | |
1077 | 4.98k | if (stcb->sctp_socket) { |
1078 | 4.98k | pd_point = min(SCTP_SB_LIMIT_RCV(stcb->sctp_socket) >> SCTP_PARTIAL_DELIVERY_SHIFT, |
1079 | 4.98k | stcb->sctp_ep->partial_delivery_point); |
1080 | 4.98k | } else { |
1081 | 0 | pd_point = stcb->sctp_ep->partial_delivery_point; |
1082 | 0 | } |
1083 | 4.98k | control = TAILQ_FIRST(&strm->uno_inqueue); |
1084 | | |
1085 | 4.98k | if ((control != NULL) && |
1086 | 3.46k | (asoc->idata_supported == 0)) { |
1087 | | /* Special handling needed for "old" data format */ |
1088 | 2.46k | if (sctp_handle_old_unordered_data(stcb, asoc, strm, control, pd_point, inp_read_lock_held)) { |
1089 | 2.46k | goto done_un; |
1090 | 2.46k | } |
1091 | 2.46k | } |
1092 | 2.52k | if (strm->pd_api_started) { |
1093 | | /* Can't add more */ |
1094 | 0 | return (0); |
1095 | 0 | } |
1096 | 6.44k | while (control) { |
1097 | 3.91k | SCTPDBG(SCTP_DEBUG_XXX, "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u -uo\n", |
1098 | 3.91k | control, control->end_added, control->mid, control->top_fsn, control->fsn_included); |
1099 | 3.91k | nctl = TAILQ_NEXT(control, next_instrm); |
1100 | 3.91k | if (control->end_added) { |
1101 | | /* We just put the last bit on */ |
1102 | 17 | if (control->on_strm_q) { |
1103 | 17 | #ifdef INVARIANTS |
1104 | 17 | if (control->on_strm_q != SCTP_ON_UNORDERED) { |
1105 | 0 | panic("Huh control: %p on_q: %d -- not unordered?", |
1106 | 0 | control, control->on_strm_q); |
1107 | 0 | } |
1108 | 17 | #endif |
1109 | 17 | SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs); |
1110 | 17 | TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm); |
1111 | 17 | if (asoc->size_on_all_streams >= control->length) { |
1112 | 17 | asoc->size_on_all_streams -= control->length; |
1113 | 17 | } else { |
1114 | 0 | #ifdef INVARIANTS |
1115 | 0 | panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length); |
1116 | | #else |
1117 | | asoc->size_on_all_streams = 0; |
1118 | | #endif |
1119 | 0 | } |
1120 | 17 | sctp_ucount_decr(asoc->cnt_on_all_streams); |
1121 | 17 | control->on_strm_q = 0; |
1122 | 17 | } |
1123 | 17 | if (control->on_read_q == 0) { |
1124 | 17 | sctp_add_to_readq(stcb->sctp_ep, stcb, |
1125 | 17 | control, |
1126 | 17 | &stcb->sctp_socket->so_rcv, control->end_added, |
1127 | 17 | inp_read_lock_held, SCTP_SO_NOT_LOCKED); |
1128 | 17 | } |
1129 | 3.90k | } else { |
1130 | | /* Can we do a PD-API for this un-ordered guy? */ |
1131 | 3.90k | if ((control->length >= pd_point) && (strm->pd_api_started == 0)) { |
1132 | 0 | strm->pd_api_started = 1; |
1133 | 0 | control->pdapi_started = 1; |
1134 | 0 | sctp_add_to_readq(stcb->sctp_ep, stcb, |
1135 | 0 | control, |
1136 | 0 | &stcb->sctp_socket->so_rcv, control->end_added, |
1137 | 0 | inp_read_lock_held, SCTP_SO_NOT_LOCKED); |
1138 | |
|
1139 | 0 | break; |
1140 | 0 | } |
1141 | 3.90k | } |
1142 | 3.91k | control = nctl; |
1143 | 3.91k | } |
1144 | 4.98k | done_un: |
1145 | 4.98k | control = TAILQ_FIRST(&strm->inqueue); |
1146 | 4.98k | if (strm->pd_api_started) { |
1147 | | /* Can't add more */ |
1148 | 0 | return (0); |
1149 | 0 | } |
1150 | 4.98k | if (control == NULL) { |
1151 | 2.37k | return (ret); |
1152 | 2.37k | } |
1153 | 2.61k | if (SCTP_MID_EQ(asoc->idata_supported, strm->last_mid_delivered, control->mid)) { |
1154 | | /* Ok the guy at the top was being partially delivered |
1155 | | * completed, so we remove it. Note |
1156 | | * the pd_api flag was taken off when the |
1157 | | * chunk was merged on in sctp_queue_data_for_reasm below. |
1158 | | */ |
1159 | 260 | nctl = TAILQ_NEXT(control, next_instrm); |
1160 | 260 | SCTPDBG(SCTP_DEBUG_XXX, |
1161 | 260 | "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u (lastdel: %u)- o\n", |
1162 | 260 | control, control->end_added, control->mid, |
1163 | 260 | control->top_fsn, control->fsn_included, |
1164 | 260 | strm->last_mid_delivered); |
1165 | 260 | if (control->end_added) { |
1166 | 17 | if (control->on_strm_q) { |
1167 | 17 | #ifdef INVARIANTS |
1168 | 17 | if (control->on_strm_q != SCTP_ON_ORDERED) { |
1169 | 0 | panic("Huh control: %p on_q: %d -- not ordered?", |
1170 | 0 | control, control->on_strm_q); |
1171 | 0 | } |
1172 | 17 | #endif |
1173 | 17 | SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs); |
1174 | 17 | TAILQ_REMOVE(&strm->inqueue, control, next_instrm); |
1175 | 17 | if (asoc->size_on_all_streams >= control->length) { |
1176 | 17 | asoc->size_on_all_streams -= control->length; |
1177 | 17 | } else { |
1178 | 0 | #ifdef INVARIANTS |
1179 | 0 | panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length); |
1180 | | #else |
1181 | | asoc->size_on_all_streams = 0; |
1182 | | #endif |
1183 | 0 | } |
1184 | 17 | sctp_ucount_decr(asoc->cnt_on_all_streams); |
1185 | 17 | control->on_strm_q = 0; |
1186 | 17 | } |
1187 | 17 | if (strm->pd_api_started && control->pdapi_started) { |
1188 | 0 | control->pdapi_started = 0; |
1189 | 0 | strm->pd_api_started = 0; |
1190 | 0 | } |
1191 | 17 | if (control->on_read_q == 0) { |
1192 | 17 | sctp_add_to_readq(stcb->sctp_ep, stcb, |
1193 | 17 | control, |
1194 | 17 | &stcb->sctp_socket->so_rcv, control->end_added, |
1195 | 17 | inp_read_lock_held, SCTP_SO_NOT_LOCKED); |
1196 | 17 | } |
1197 | 17 | control = nctl; |
1198 | 17 | } |
1199 | 260 | } |
1200 | 2.61k | if (strm->pd_api_started) { |
1201 | | /* Can't add more must have gotten an un-ordered above being partially delivered. */ |
1202 | 0 | return (0); |
1203 | 0 | } |
1204 | 2.63k | deliver_more: |
1205 | 2.63k | next_to_del = strm->last_mid_delivered + 1; |
1206 | 2.63k | if (control) { |
1207 | 2.61k | SCTPDBG(SCTP_DEBUG_XXX, |
1208 | 2.61k | "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u (nxtdel: %u)- o\n", |
1209 | 2.61k | control, control->end_added, control->mid, control->top_fsn, control->fsn_included, |
1210 | 2.61k | next_to_del); |
1211 | 2.61k | nctl = TAILQ_NEXT(control, next_instrm); |
1212 | 2.61k | if (SCTP_MID_EQ(asoc->idata_supported, control->mid, next_to_del) && |
1213 | 1.02k | (control->first_frag_seen)) { |
1214 | 411 | int done; |
1215 | | |
1216 | | /* Ok we can deliver it onto the stream. */ |
1217 | 411 | if (control->end_added) { |
1218 | | /* We are done with it afterwards */ |
1219 | 14 | if (control->on_strm_q) { |
1220 | 14 | #ifdef INVARIANTS |
1221 | 14 | if (control->on_strm_q != SCTP_ON_ORDERED) { |
1222 | 0 | panic("Huh control: %p on_q: %d -- not ordered?", |
1223 | 0 | control, control->on_strm_q); |
1224 | 0 | } |
1225 | 14 | #endif |
1226 | 14 | SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs); |
1227 | 14 | TAILQ_REMOVE(&strm->inqueue, control, next_instrm); |
1228 | 14 | if (asoc->size_on_all_streams >= control->length) { |
1229 | 14 | asoc->size_on_all_streams -= control->length; |
1230 | 14 | } else { |
1231 | 0 | #ifdef INVARIANTS |
1232 | 0 | panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length); |
1233 | | #else |
1234 | | asoc->size_on_all_streams = 0; |
1235 | | #endif |
1236 | 0 | } |
1237 | 14 | sctp_ucount_decr(asoc->cnt_on_all_streams); |
1238 | 14 | control->on_strm_q = 0; |
1239 | 14 | } |
1240 | 14 | ret++; |
1241 | 14 | } |
1242 | 411 | if (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) { |
1243 | | /* A singleton now slipping through - mark it non-revokable too */ |
1244 | 5 | sctp_mark_non_revokable(asoc, control->sinfo_tsn); |
1245 | 406 | } else if (control->end_added == 0) { |
1246 | | /* Check if we can defer adding until its all there */ |
1247 | 397 | if ((control->length < pd_point) || (strm->pd_api_started)) { |
1248 | | /* Don't need it or cannot add more (one being delivered that way) */ |
1249 | 397 | goto out; |
1250 | 397 | } |
1251 | 397 | } |
1252 | 14 | done = (control->end_added) && (control->last_frag_seen); |
1253 | 14 | if (control->on_read_q == 0) { |
1254 | 14 | if (!done) { |
1255 | 0 | if (asoc->size_on_all_streams >= control->length) { |
1256 | 0 | asoc->size_on_all_streams -= control->length; |
1257 | 0 | } else { |
1258 | 0 | #ifdef INVARIANTS |
1259 | 0 | panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length); |
1260 | | #else |
1261 | | asoc->size_on_all_streams = 0; |
1262 | | #endif |
1263 | 0 | } |
1264 | 0 | strm->pd_api_started = 1; |
1265 | 0 | control->pdapi_started = 1; |
1266 | 0 | } |
1267 | 14 | sctp_add_to_readq(stcb->sctp_ep, stcb, |
1268 | 14 | control, |
1269 | 14 | &stcb->sctp_socket->so_rcv, control->end_added, |
1270 | 14 | inp_read_lock_held, SCTP_SO_NOT_LOCKED); |
1271 | 14 | } |
1272 | 14 | strm->last_mid_delivered = next_to_del; |
1273 | 14 | if (done) { |
1274 | 14 | control = nctl; |
1275 | 14 | goto deliver_more; |
1276 | 14 | } |
1277 | 14 | } |
1278 | 2.61k | } |
1279 | 2.61k | out: |
1280 | 2.61k | return (ret); |
1281 | 2.63k | } |
1282 | | |
1283 | | uint32_t |
1284 | | sctp_add_chk_to_control(struct sctp_queued_to_read *control, |
1285 | | struct sctp_stream_in *strm, |
1286 | | struct sctp_tcb *stcb, struct sctp_association *asoc, |
1287 | | struct sctp_tmit_chunk *chk, int hold_rlock) |
1288 | 404 | { |
1289 | | /* |
1290 | | * Given a control and a chunk, merge the |
1291 | | * data from the chk onto the control and free |
1292 | | * up the chunk resources. |
1293 | | */ |
1294 | 404 | uint32_t added = 0; |
1295 | 404 | bool i_locked = false; |
1296 | | |
1297 | 404 | if (control->on_read_q) { |
1298 | 0 | if (hold_rlock == 0) { |
1299 | | /* Its being pd-api'd so we must do some locks. */ |
1300 | 0 | SCTP_INP_READ_LOCK(stcb->sctp_ep); |
1301 | 0 | i_locked = true; |
1302 | 0 | } |
1303 | 0 | if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) { |
1304 | 0 | goto out; |
1305 | 0 | } |
1306 | 0 | } |
1307 | 404 | if (control->data == NULL) { |
1308 | 0 | control->data = chk->data; |
1309 | 0 | sctp_setup_tail_pointer(control); |
1310 | 404 | } else { |
1311 | 404 | sctp_add_to_tail_pointer(control, chk->data, &added); |
1312 | 404 | } |
1313 | 404 | control->fsn_included = chk->rec.data.fsn; |
1314 | 404 | asoc->size_on_reasm_queue -= chk->send_size; |
1315 | 404 | sctp_ucount_decr(asoc->cnt_on_reasm_queue); |
1316 | 404 | sctp_mark_non_revokable(asoc, chk->rec.data.tsn); |
1317 | 404 | chk->data = NULL; |
1318 | 404 | if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) { |
1319 | 123 | control->first_frag_seen = 1; |
1320 | 123 | control->sinfo_tsn = chk->rec.data.tsn; |
1321 | 123 | control->sinfo_ppid = chk->rec.data.ppid; |
1322 | 123 | } |
1323 | 404 | if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) { |
1324 | | /* Its complete */ |
1325 | 206 | if ((control->on_strm_q) && (control->on_read_q)) { |
1326 | 0 | if (control->pdapi_started) { |
1327 | 0 | control->pdapi_started = 0; |
1328 | 0 | strm->pd_api_started = 0; |
1329 | 0 | } |
1330 | 0 | if (control->on_strm_q == SCTP_ON_UNORDERED) { |
1331 | | /* Unordered */ |
1332 | 0 | TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm); |
1333 | 0 | control->on_strm_q = 0; |
1334 | 0 | } else if (control->on_strm_q == SCTP_ON_ORDERED) { |
1335 | | /* Ordered */ |
1336 | 0 | TAILQ_REMOVE(&strm->inqueue, control, next_instrm); |
1337 | | /* |
1338 | | * Don't need to decrement size_on_all_streams, |
1339 | | * since control is on the read queue. |
1340 | | */ |
1341 | 0 | sctp_ucount_decr(asoc->cnt_on_all_streams); |
1342 | 0 | control->on_strm_q = 0; |
1343 | 0 | #ifdef INVARIANTS |
1344 | 0 | } else if (control->on_strm_q) { |
1345 | 0 | panic("Unknown state on ctrl: %p on_strm_q: %d", control, |
1346 | 0 | control->on_strm_q); |
1347 | 0 | #endif |
1348 | 0 | } |
1349 | 0 | } |
1350 | 206 | control->end_added = 1; |
1351 | 206 | control->last_frag_seen = 1; |
1352 | 206 | } |
1353 | 404 | out: |
1354 | 404 | if (i_locked) { |
1355 | 0 | SCTP_INP_READ_UNLOCK(stcb->sctp_ep); |
1356 | 0 | } |
1357 | 404 | sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED); |
1358 | 404 | return (added); |
1359 | 404 | } |
1360 | | |
1361 | | /* |
1362 | | * Dump onto the re-assembly queue, in its proper place. After dumping on the |
1363 | | * queue, see if anything can be delivered. If so pull it off (or as much as |
1364 | | * we can. If we run out of space then we must dump what we can and set the |
1365 | | * appropriate flag to say we queued what we could. |
1366 | | */ |
1367 | | static void |
1368 | | sctp_queue_data_for_reasm(struct sctp_tcb *stcb, struct sctp_association *asoc, |
1369 | | struct sctp_queued_to_read *control, |
1370 | | struct sctp_tmit_chunk *chk, |
1371 | | int created_control, |
1372 | | int *abort_flag, uint32_t tsn) |
1373 | 5.27k | { |
1374 | 5.27k | uint32_t next_fsn; |
1375 | 5.27k | struct sctp_tmit_chunk *at, *nat; |
1376 | 5.27k | struct sctp_stream_in *strm; |
1377 | 5.27k | int do_wakeup, unordered; |
1378 | 5.27k | uint32_t lenadded; |
1379 | | |
1380 | 5.27k | strm = &asoc->strmin[control->sinfo_stream]; |
1381 | | /* |
1382 | | * For old un-ordered data chunks. |
1383 | | */ |
1384 | 5.27k | if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) { |
1385 | 3.26k | unordered = 1; |
1386 | 3.26k | } else { |
1387 | 2.00k | unordered = 0; |
1388 | 2.00k | } |
1389 | | /* Must be added to the stream-in queue */ |
1390 | 5.27k | if (created_control) { |
1391 | 2.92k | if ((unordered == 0) || (asoc->idata_supported)) { |
1392 | 2.21k | sctp_ucount_incr(asoc->cnt_on_all_streams); |
1393 | 2.21k | } |
1394 | 2.92k | if (sctp_place_control_in_stream(strm, asoc, control)) { |
1395 | | /* Duplicate SSN? */ |
1396 | 0 | sctp_abort_in_reasm(stcb, control, chk, |
1397 | 0 | abort_flag, |
1398 | 0 | SCTP_FROM_SCTP_INDATA + SCTP_LOC_6); |
1399 | 0 | sctp_clean_up_control(stcb, control); |
1400 | 0 | return; |
1401 | 0 | } |
1402 | 2.92k | if ((tsn == (asoc->cumulative_tsn + 1) && (asoc->idata_supported == 0))) { |
1403 | | /* Ok we created this control and now |
1404 | | * lets validate that its legal i.e. there |
1405 | | * is a B bit set, if not and we have |
1406 | | * up to the cum-ack then its invalid. |
1407 | | */ |
1408 | 44 | if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) { |
1409 | 6 | sctp_abort_in_reasm(stcb, control, chk, |
1410 | 6 | abort_flag, |
1411 | 6 | SCTP_FROM_SCTP_INDATA + SCTP_LOC_7); |
1412 | 6 | return; |
1413 | 6 | } |
1414 | 44 | } |
1415 | 2.92k | } |
1416 | 5.26k | if ((asoc->idata_supported == 0) && (unordered == 1)) { |
1417 | 2.35k | sctp_inject_old_unordered_data(stcb, asoc, control, chk, abort_flag); |
1418 | 2.35k | return; |
1419 | 2.35k | } |
1420 | | /* |
1421 | | * Ok we must queue the chunk into the reasembly portion: |
1422 | | * o if its the first it goes to the control mbuf. |
1423 | | * o if its not first but the next in sequence it goes to the control, |
1424 | | * and each succeeding one in order also goes. |
1425 | | * o if its not in order we place it on the list in its place. |
1426 | | */ |
1427 | 2.91k | if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) { |
1428 | | /* Its the very first one. */ |
1429 | 991 | SCTPDBG(SCTP_DEBUG_XXX, |
1430 | 991 | "chunk is a first fsn: %u becomes fsn_included\n", |
1431 | 991 | chk->rec.data.fsn); |
1432 | 991 | if (control->first_frag_seen) { |
1433 | | /* |
1434 | | * Error on senders part, they either |
1435 | | * sent us two data chunks with FIRST, |
1436 | | * or they sent two un-ordered chunks that |
1437 | | * were fragmented at the same time in the same stream. |
1438 | | */ |
1439 | 3 | sctp_abort_in_reasm(stcb, control, chk, |
1440 | 3 | abort_flag, |
1441 | 3 | SCTP_FROM_SCTP_INDATA + SCTP_LOC_8); |
1442 | 3 | return; |
1443 | 3 | } |
1444 | 988 | control->first_frag_seen = 1; |
1445 | 988 | control->sinfo_ppid = chk->rec.data.ppid; |
1446 | 988 | control->sinfo_tsn = chk->rec.data.tsn; |
1447 | 988 | control->fsn_included = chk->rec.data.fsn; |
1448 | 988 | control->data = chk->data; |
1449 | 988 | sctp_mark_non_revokable(asoc, chk->rec.data.tsn); |
1450 | 988 | chk->data = NULL; |
1451 | 988 | sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED); |
1452 | 988 | sctp_setup_tail_pointer(control); |
1453 | 988 | asoc->size_on_all_streams += control->length; |
1454 | 1.92k | } else { |
1455 | | /* Place the chunk in our list */ |
1456 | 1.92k | int inserted=0; |
1457 | 1.92k | if (control->last_frag_seen == 0) { |
1458 | | /* Still willing to raise highest FSN seen */ |
1459 | 1.69k | if (SCTP_TSN_GT(chk->rec.data.fsn, control->top_fsn)) { |
1460 | 216 | SCTPDBG(SCTP_DEBUG_XXX, |
1461 | 216 | "We have a new top_fsn: %u\n", |
1462 | 216 | chk->rec.data.fsn); |
1463 | 216 | control->top_fsn = chk->rec.data.fsn; |
1464 | 216 | } |
1465 | 1.69k | if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) { |
1466 | 605 | SCTPDBG(SCTP_DEBUG_XXX, |
1467 | 605 | "The last fsn is now in place fsn: %u\n", |
1468 | 605 | chk->rec.data.fsn); |
1469 | 605 | control->last_frag_seen = 1; |
1470 | 605 | if (SCTP_TSN_GT(control->top_fsn, chk->rec.data.fsn)) { |
1471 | 83 | SCTPDBG(SCTP_DEBUG_XXX, |
1472 | 83 | "New fsn: %u is not at top_fsn: %u -- abort\n", |
1473 | 83 | chk->rec.data.fsn, |
1474 | 83 | control->top_fsn); |
1475 | 83 | sctp_abort_in_reasm(stcb, control, chk, |
1476 | 83 | abort_flag, |
1477 | 83 | SCTP_FROM_SCTP_INDATA + SCTP_LOC_9); |
1478 | 83 | return; |
1479 | 83 | } |
1480 | 605 | } |
1481 | 1.61k | if (asoc->idata_supported || control->first_frag_seen) { |
1482 | | /* |
1483 | | * For IDATA we always check since we know that |
1484 | | * the first fragment is 0. For old DATA we have |
1485 | | * to receive the first before we know the first FSN |
1486 | | * (which is the TSN). |
1487 | | */ |
1488 | 932 | if (SCTP_TSN_GE(control->fsn_included, chk->rec.data.fsn)) { |
1489 | | /* We have already delivered up to this so its a dup */ |
1490 | 83 | sctp_abort_in_reasm(stcb, control, chk, |
1491 | 83 | abort_flag, |
1492 | 83 | SCTP_FROM_SCTP_INDATA + SCTP_LOC_10); |
1493 | 83 | return; |
1494 | 83 | } |
1495 | 932 | } |
1496 | 1.61k | } else { |
1497 | 227 | if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) { |
1498 | | /* Second last? huh? */ |
1499 | 1 | SCTPDBG(SCTP_DEBUG_XXX, |
1500 | 1 | "Duplicate last fsn: %u (top: %u) -- abort\n", |
1501 | 1 | chk->rec.data.fsn, control->top_fsn); |
1502 | 1 | sctp_abort_in_reasm(stcb, control, |
1503 | 1 | chk, abort_flag, |
1504 | 1 | SCTP_FROM_SCTP_INDATA + SCTP_LOC_11); |
1505 | 1 | return; |
1506 | 1 | } |
1507 | 226 | if (asoc->idata_supported || control->first_frag_seen) { |
1508 | | /* |
1509 | | * For IDATA we always check since we know that |
1510 | | * the first fragment is 0. For old DATA we have |
1511 | | * to receive the first before we know the first FSN |
1512 | | * (which is the TSN). |
1513 | | */ |
1514 | | |
1515 | 179 | if (SCTP_TSN_GE(control->fsn_included, chk->rec.data.fsn)) { |
1516 | | /* We have already delivered up to this so its a dup */ |
1517 | 47 | SCTPDBG(SCTP_DEBUG_XXX, |
1518 | 47 | "New fsn: %u is already seen in included_fsn: %u -- abort\n", |
1519 | 47 | chk->rec.data.fsn, control->fsn_included); |
1520 | 47 | sctp_abort_in_reasm(stcb, control, chk, |
1521 | 47 | abort_flag, |
1522 | 47 | SCTP_FROM_SCTP_INDATA + SCTP_LOC_12); |
1523 | 47 | return; |
1524 | 47 | } |
1525 | 179 | } |
1526 | | /* validate not beyond top FSN if we have seen last one */ |
1527 | 179 | if (SCTP_TSN_GT(chk->rec.data.fsn, control->top_fsn)) { |
1528 | 65 | SCTPDBG(SCTP_DEBUG_XXX, |
1529 | 65 | "New fsn: %u is beyond or at top_fsn: %u -- abort\n", |
1530 | 65 | chk->rec.data.fsn, |
1531 | 65 | control->top_fsn); |
1532 | 65 | sctp_abort_in_reasm(stcb, control, chk, |
1533 | 65 | abort_flag, |
1534 | 65 | SCTP_FROM_SCTP_INDATA + SCTP_LOC_13); |
1535 | 65 | return; |
1536 | 65 | } |
1537 | 179 | } |
1538 | | /* |
1539 | | * If we reach here, we need to place the |
1540 | | * new chunk in the reassembly for this |
1541 | | * control. |
1542 | | */ |
1543 | 1.64k | SCTPDBG(SCTP_DEBUG_XXX, |
1544 | 1.64k | "chunk is a not first fsn: %u needs to be inserted\n", |
1545 | 1.64k | chk->rec.data.fsn); |
1546 | 1.64k | TAILQ_FOREACH(at, &control->reasm, sctp_next) { |
1547 | 649 | if (SCTP_TSN_GT(at->rec.data.fsn, chk->rec.data.fsn)) { |
1548 | 239 | if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) { |
1549 | | /* Last not at the end? huh? */ |
1550 | 1 | SCTPDBG(SCTP_DEBUG_XXX, |
1551 | 1 | "Last fragment not last in list: -- abort\n"); |
1552 | 1 | sctp_abort_in_reasm(stcb, control, |
1553 | 1 | chk, abort_flag, |
1554 | 1 | SCTP_FROM_SCTP_INDATA + SCTP_LOC_14); |
1555 | 1 | return; |
1556 | 1 | } |
1557 | | /* |
1558 | | * This one in queue is bigger than the new one, insert |
1559 | | * the new one before at. |
1560 | | */ |
1561 | 238 | SCTPDBG(SCTP_DEBUG_XXX, |
1562 | 238 | "Insert it before fsn: %u\n", |
1563 | 238 | at->rec.data.fsn); |
1564 | 238 | asoc->size_on_reasm_queue += chk->send_size; |
1565 | 238 | sctp_ucount_incr(asoc->cnt_on_reasm_queue); |
1566 | 238 | TAILQ_INSERT_BEFORE(at, chk, sctp_next); |
1567 | 238 | inserted = 1; |
1568 | 238 | break; |
1569 | 410 | } else if (at->rec.data.fsn == chk->rec.data.fsn) { |
1570 | | /* Gak, He sent me a duplicate str seq number */ |
1571 | | /* |
1572 | | * foo bar, I guess I will just free this new guy, |
1573 | | * should we abort too? FIX ME MAYBE? Or it COULD be |
1574 | | * that the SSN's have wrapped. Maybe I should |
1575 | | * compare to TSN somehow... sigh for now just blow |
1576 | | * away the chunk! |
1577 | | */ |
1578 | 1 | SCTPDBG(SCTP_DEBUG_XXX, |
1579 | 1 | "Duplicate to fsn: %u -- abort\n", |
1580 | 1 | at->rec.data.fsn); |
1581 | 1 | sctp_abort_in_reasm(stcb, control, |
1582 | 1 | chk, abort_flag, |
1583 | 1 | SCTP_FROM_SCTP_INDATA + SCTP_LOC_15); |
1584 | 1 | return; |
1585 | 1 | } |
1586 | 649 | } |
1587 | 1.64k | if (inserted == 0) { |
1588 | | /* Goes on the end */ |
1589 | 1.40k | SCTPDBG(SCTP_DEBUG_XXX, "Inserting at tail of list fsn: %u\n", |
1590 | 1.40k | chk->rec.data.fsn); |
1591 | 1.40k | asoc->size_on_reasm_queue += chk->send_size; |
1592 | 1.40k | sctp_ucount_incr(asoc->cnt_on_reasm_queue); |
1593 | 1.40k | TAILQ_INSERT_TAIL(&control->reasm, chk, sctp_next); |
1594 | 1.40k | } |
1595 | 1.64k | } |
1596 | | /* |
1597 | | * Ok lets see if we can suck any up into the control |
1598 | | * structure that are in seq if it makes sense. |
1599 | | */ |
1600 | 2.62k | do_wakeup = 0; |
1601 | | /* |
1602 | | * If the first fragment has not been |
1603 | | * seen there is no sense in looking. |
1604 | | */ |
1605 | 2.62k | if (control->first_frag_seen) { |
1606 | 1.16k | next_fsn = control->fsn_included + 1; |
1607 | 1.16k | TAILQ_FOREACH_SAFE(at, &control->reasm, sctp_next, nat) { |
1608 | 222 | if (at->rec.data.fsn == next_fsn) { |
1609 | | /* We can add this one now to the control */ |
1610 | 56 | SCTPDBG(SCTP_DEBUG_XXX, |
1611 | 56 | "Adding more to control: %p at: %p fsn: %u next_fsn: %u included: %u\n", |
1612 | 56 | control, at, |
1613 | 56 | at->rec.data.fsn, |
1614 | 56 | next_fsn, control->fsn_included); |
1615 | 56 | TAILQ_REMOVE(&control->reasm, at, sctp_next); |
1616 | 56 | lenadded = sctp_add_chk_to_control(control, strm, stcb, asoc, at, SCTP_READ_LOCK_NOT_HELD); |
1617 | 56 | if (control->on_read_q) { |
1618 | 0 | do_wakeup = 1; |
1619 | 56 | } else { |
1620 | | /* |
1621 | | * We only add to the size-on-all-streams |
1622 | | * if its not on the read q. The read q |
1623 | | * flag will cause a sballoc so its accounted |
1624 | | * for there. |
1625 | | */ |
1626 | 56 | asoc->size_on_all_streams += lenadded; |
1627 | 56 | } |
1628 | 56 | next_fsn++; |
1629 | 56 | if (control->end_added && control->pdapi_started) { |
1630 | 0 | if (strm->pd_api_started) { |
1631 | 0 | strm->pd_api_started = 0; |
1632 | 0 | control->pdapi_started = 0; |
1633 | 0 | } |
1634 | 0 | if (control->on_read_q == 0) { |
1635 | 0 | sctp_add_to_readq(stcb->sctp_ep, stcb, |
1636 | 0 | control, |
1637 | 0 | &stcb->sctp_socket->so_rcv, control->end_added, |
1638 | 0 | SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); |
1639 | 0 | } |
1640 | 0 | break; |
1641 | 0 | } |
1642 | 166 | } else { |
1643 | 166 | break; |
1644 | 166 | } |
1645 | 222 | } |
1646 | 1.16k | } |
1647 | 2.62k | if (do_wakeup) { |
1648 | 0 | #if defined(__Userspace__) |
1649 | 0 | sctp_invoke_recv_callback(stcb->sctp_ep, stcb, control, SCTP_READ_LOCK_NOT_HELD); |
1650 | 0 | #endif |
1651 | | /* Need to wakeup the reader */ |
1652 | 0 | sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED); |
1653 | 0 | } |
1654 | 2.62k | } |
1655 | | |
1656 | | static struct sctp_queued_to_read * |
1657 | | sctp_find_reasm_entry(struct sctp_stream_in *strm, uint32_t mid, int ordered, int idata_supported) |
1658 | 6.86k | { |
1659 | 6.86k | struct sctp_queued_to_read *control; |
1660 | | |
1661 | 6.86k | if (ordered) { |
1662 | 3.14k | TAILQ_FOREACH(control, &strm->inqueue, next_instrm) { |
1663 | 2.70k | if (SCTP_MID_EQ(idata_supported, control->mid, mid)) { |
1664 | 584 | break; |
1665 | 584 | } |
1666 | 2.70k | } |
1667 | 3.72k | } else { |
1668 | 3.72k | if (idata_supported) { |
1669 | 2.09k | TAILQ_FOREACH(control, &strm->uno_inqueue, next_instrm) { |
1670 | 2.09k | if (SCTP_MID_EQ(idata_supported, control->mid, mid)) { |
1671 | 127 | break; |
1672 | 127 | } |
1673 | 2.09k | } |
1674 | 2.72k | } else { |
1675 | 2.72k | control = TAILQ_FIRST(&strm->uno_inqueue); |
1676 | 2.72k | } |
1677 | 3.72k | } |
1678 | 6.86k | return (control); |
1679 | 6.86k | } |
1680 | | |
1681 | | static int |
1682 | | sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc, |
1683 | | struct mbuf **m, int offset, int chk_length, |
1684 | | struct sctp_nets *net, uint32_t *high_tsn, int *abort_flag, |
1685 | | int *break_flag, int last_chunk, uint8_t chk_type) |
1686 | 14.1k | { |
1687 | 14.1k | struct sctp_tmit_chunk *chk = NULL; /* make gcc happy */ |
1688 | 14.1k | struct sctp_stream_in *strm; |
1689 | 14.1k | uint32_t tsn, fsn, gap, mid; |
1690 | 14.1k | struct mbuf *dmbuf; |
1691 | 14.1k | int the_len; |
1692 | 14.1k | int need_reasm_check = 0; |
1693 | 14.1k | uint16_t sid; |
1694 | 14.1k | struct mbuf *op_err; |
1695 | 14.1k | char msg[SCTP_DIAG_INFO_LEN]; |
1696 | 14.1k | struct sctp_queued_to_read *control, *ncontrol; |
1697 | 14.1k | uint32_t ppid; |
1698 | 14.1k | uint8_t chk_flags; |
1699 | 14.1k | struct sctp_stream_reset_list *liste; |
1700 | 14.1k | int ordered; |
1701 | 14.1k | size_t clen; |
1702 | 14.1k | int created_control = 0; |
1703 | | |
1704 | 14.1k | if (chk_type == SCTP_IDATA) { |
1705 | 4.40k | struct sctp_idata_chunk *chunk, chunk_buf; |
1706 | | |
1707 | 4.40k | chunk = (struct sctp_idata_chunk *)sctp_m_getptr(*m, offset, |
1708 | 4.40k | sizeof(struct sctp_idata_chunk), (uint8_t *)&chunk_buf); |
1709 | 4.40k | chk_flags = chunk->ch.chunk_flags; |
1710 | 4.40k | clen = sizeof(struct sctp_idata_chunk); |
1711 | 4.40k | tsn = ntohl(chunk->dp.tsn); |
1712 | 4.40k | sid = ntohs(chunk->dp.sid); |
1713 | 4.40k | mid = ntohl(chunk->dp.mid); |
1714 | 4.40k | if (chk_flags & SCTP_DATA_FIRST_FRAG) { |
1715 | 2.95k | fsn = 0; |
1716 | 2.95k | ppid = chunk->dp.ppid_fsn.ppid; |
1717 | 2.95k | } else { |
1718 | 1.45k | fsn = ntohl(chunk->dp.ppid_fsn.fsn); |
1719 | 1.45k | ppid = 0xffffffff; /* Use as an invalid value. */ |
1720 | 1.45k | } |
1721 | 9.71k | } else { |
1722 | 9.71k | struct sctp_data_chunk *chunk, chunk_buf; |
1723 | | |
1724 | 9.71k | chunk = (struct sctp_data_chunk *)sctp_m_getptr(*m, offset, |
1725 | 9.71k | sizeof(struct sctp_data_chunk), (uint8_t *)&chunk_buf); |
1726 | 9.71k | chk_flags = chunk->ch.chunk_flags; |
1727 | 9.71k | clen = sizeof(struct sctp_data_chunk); |
1728 | 9.71k | tsn = ntohl(chunk->dp.tsn); |
1729 | 9.71k | sid = ntohs(chunk->dp.sid); |
1730 | 9.71k | mid = (uint32_t)(ntohs(chunk->dp.ssn)); |
1731 | 9.71k | fsn = tsn; |
1732 | 9.71k | ppid = chunk->dp.ppid; |
1733 | 9.71k | } |
1734 | 14.1k | if ((size_t)chk_length == clen) { |
1735 | | /* |
1736 | | * Need to send an abort since we had a |
1737 | | * empty data chunk. |
1738 | | */ |
1739 | 8 | op_err = sctp_generate_no_user_data_cause(tsn); |
1740 | 8 | stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_16; |
1741 | 8 | sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, false, SCTP_SO_NOT_LOCKED); |
1742 | 8 | *abort_flag = 1; |
1743 | 8 | return (0); |
1744 | 8 | } |
1745 | 14.1k | if ((chk_flags & SCTP_DATA_SACK_IMMEDIATELY) == SCTP_DATA_SACK_IMMEDIATELY) { |
1746 | 5.25k | asoc->send_sack = 1; |
1747 | 5.25k | } |
1748 | 14.1k | ordered = ((chk_flags & SCTP_DATA_UNORDERED) == 0); |
1749 | 14.1k | if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) { |
1750 | 0 | sctp_log_map(tsn, asoc->cumulative_tsn, asoc->highest_tsn_inside_map, SCTP_MAP_TSN_ENTERS); |
1751 | 0 | } |
1752 | 14.1k | if (stcb == NULL) { |
1753 | 0 | return (0); |
1754 | 0 | } |
1755 | 14.1k | SCTP_LTRACE_CHK(stcb->sctp_ep, stcb, chk_type, tsn); |
1756 | 14.1k | if (SCTP_TSN_GE(asoc->cumulative_tsn, tsn)) { |
1757 | | /* It is a duplicate */ |
1758 | 2.36k | SCTP_STAT_INCR(sctps_recvdupdata); |
1759 | 2.36k | if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) { |
1760 | | /* Record a dup for the next outbound sack */ |
1761 | 1.64k | asoc->dup_tsns[asoc->numduptsns] = tsn; |
1762 | 1.64k | asoc->numduptsns++; |
1763 | 1.64k | } |
1764 | 2.36k | asoc->send_sack = 1; |
1765 | 2.36k | return (0); |
1766 | 2.36k | } |
1767 | | /* Calculate the number of TSN's between the base and this TSN */ |
1768 | 11.7k | SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn); |
1769 | 11.7k | if (gap >= (SCTP_MAPPING_ARRAY << 3)) { |
1770 | | /* Can't hold the bit in the mapping at max array, toss it */ |
1771 | 1.84k | return (0); |
1772 | 1.84k | } |
1773 | 9.90k | if (gap >= (uint32_t) (asoc->mapping_array_size << 3)) { |
1774 | 2.35k | SCTP_TCB_LOCK_ASSERT(stcb); |
1775 | 2.35k | if (sctp_expand_mapping_array(asoc, gap)) { |
1776 | | /* Can't expand, drop it */ |
1777 | 0 | return (0); |
1778 | 0 | } |
1779 | 2.35k | } |
1780 | 9.90k | if (SCTP_TSN_GT(tsn, *high_tsn)) { |
1781 | 3.53k | *high_tsn = tsn; |
1782 | 3.53k | } |
1783 | | /* See if we have received this one already */ |
1784 | 9.90k | if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap) || |
1785 | 8.24k | SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap)) { |
1786 | 2.68k | SCTP_STAT_INCR(sctps_recvdupdata); |
1787 | 2.68k | if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) { |
1788 | | /* Record a dup for the next outbound sack */ |
1789 | 936 | asoc->dup_tsns[asoc->numduptsns] = tsn; |
1790 | 936 | asoc->numduptsns++; |
1791 | 936 | } |
1792 | 2.68k | asoc->send_sack = 1; |
1793 | 2.68k | return (0); |
1794 | 2.68k | } |
1795 | | /* |
1796 | | * Check to see about the GONE flag, duplicates would cause a sack |
1797 | | * to be sent up above |
1798 | | */ |
1799 | 7.21k | if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || |
1800 | 7.21k | (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || |
1801 | 7.21k | (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET))) { |
1802 | | /* |
1803 | | * wait a minute, this guy is gone, there is no longer a |
1804 | | * receiver. Send peer an ABORT! |
1805 | | */ |
1806 | 0 | op_err = sctp_generate_cause(SCTP_CAUSE_OUT_OF_RESC, ""); |
1807 | 0 | sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, false, SCTP_SO_NOT_LOCKED); |
1808 | 0 | *abort_flag = 1; |
1809 | 0 | return (0); |
1810 | 0 | } |
1811 | | /* |
1812 | | * Now before going further we see if there is room. If NOT then we |
1813 | | * MAY let one through only IF this TSN is the one we are waiting |
1814 | | * for on a partial delivery API. |
1815 | | */ |
1816 | | |
1817 | | /* Is the stream valid? */ |
1818 | 7.21k | if (sid >= asoc->streamincnt) { |
1819 | 351 | struct sctp_error_invalid_stream *cause; |
1820 | | |
1821 | 351 | op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_error_invalid_stream), |
1822 | 351 | 0, M_NOWAIT, 1, MT_DATA); |
1823 | 351 | if (op_err != NULL) { |
1824 | | /* add some space up front so prepend will work well */ |
1825 | 351 | SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr)); |
1826 | 351 | cause = mtod(op_err, struct sctp_error_invalid_stream *); |
1827 | | /* |
1828 | | * Error causes are just param's and this one has |
1829 | | * two back to back phdr, one with the error type |
1830 | | * and size, the other with the streamid and a rsvd |
1831 | | */ |
1832 | 351 | SCTP_BUF_LEN(op_err) = sizeof(struct sctp_error_invalid_stream); |
1833 | 351 | cause->cause.code = htons(SCTP_CAUSE_INVALID_STREAM); |
1834 | 351 | cause->cause.length = htons(sizeof(struct sctp_error_invalid_stream)); |
1835 | 351 | cause->stream_id = htons(sid); |
1836 | 351 | cause->reserved = htons(0); |
1837 | 351 | sctp_queue_op_err(stcb, op_err); |
1838 | 351 | } |
1839 | 351 | SCTP_STAT_INCR(sctps_badsid); |
1840 | 351 | SCTP_TCB_LOCK_ASSERT(stcb); |
1841 | 351 | SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap); |
1842 | 351 | if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) { |
1843 | 264 | asoc->highest_tsn_inside_nr_map = tsn; |
1844 | 264 | } |
1845 | 351 | if (tsn == (asoc->cumulative_tsn + 1)) { |
1846 | | /* Update cum-ack */ |
1847 | 21 | asoc->cumulative_tsn = tsn; |
1848 | 21 | } |
1849 | 351 | return (0); |
1850 | 351 | } |
1851 | | /* |
1852 | | * If its a fragmented message, lets see if we can |
1853 | | * find the control on the reassembly queues. |
1854 | | */ |
1855 | 6.86k | if ((chk_type == SCTP_IDATA) && |
1856 | 2.22k | ((chk_flags & SCTP_DATA_FIRST_FRAG) == 0) && |
1857 | 1.05k | (fsn == 0)) { |
1858 | | /* |
1859 | | * The first *must* be fsn 0, and other |
1860 | | * (middle/end) pieces can *not* be fsn 0. |
1861 | | * XXX: This can happen in case of a wrap around. |
1862 | | * Ignore is for now. |
1863 | | */ |
1864 | 1 | SCTP_SNPRINTF(msg, sizeof(msg), "FSN zero for MID=%8.8x, but flags=%2.2x", mid, chk_flags); |
1865 | 1 | goto err_out; |
1866 | 1 | } |
1867 | 6.86k | control = sctp_find_reasm_entry(&asoc->strmin[sid], mid, ordered, asoc->idata_supported); |
1868 | 6.86k | SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags:0x%x look for control on queues %p\n", |
1869 | 6.86k | chk_flags, control); |
1870 | 6.86k | if ((chk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) { |
1871 | | /* See if we can find the re-assembly entity */ |
1872 | 5.30k | if (control != NULL) { |
1873 | | /* We found something, does it belong? */ |
1874 | 2.35k | if (ordered && (mid != control->mid)) { |
1875 | 0 | SCTP_SNPRINTF(msg, sizeof(msg), "Reassembly problem (MID=%8.8x)", mid); |
1876 | 5 | err_out: |
1877 | 5 | op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); |
1878 | 5 | stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_17; |
1879 | 5 | sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, false, SCTP_SO_NOT_LOCKED); |
1880 | 5 | *abort_flag = 1; |
1881 | 5 | return (0); |
1882 | 0 | } |
1883 | 2.35k | if (ordered && ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED)) { |
1884 | | /* We can't have a switched order with an unordered chunk */ |
1885 | 0 | SCTP_SNPRINTF(msg, sizeof(msg), |
1886 | 0 | "All fragments of a user message must be ordered or unordered (TSN=%8.8x)", |
1887 | 0 | tsn); |
1888 | 0 | goto err_out; |
1889 | 0 | } |
1890 | 2.35k | if (!ordered && (((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) == 0)) { |
1891 | | /* We can't have a switched unordered with a ordered chunk */ |
1892 | 0 | SCTP_SNPRINTF(msg, sizeof(msg), |
1893 | 0 | "All fragments of a user message must be ordered or unordered (TSN=%8.8x)", |
1894 | 0 | tsn); |
1895 | 0 | goto err_out; |
1896 | 0 | } |
1897 | 2.35k | } |
1898 | 5.30k | } else { |
1899 | | /* Its a complete segment. Lets validate we |
1900 | | * don't have a re-assembly going on with |
1901 | | * the same Stream/Seq (for ordered) or in |
1902 | | * the same Stream for unordered. |
1903 | | */ |
1904 | 1.56k | if (control != NULL) { |
1905 | 67 | if (ordered || asoc->idata_supported) { |
1906 | 3 | SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags: 0x%x dup detected on MID: %u\n", |
1907 | 3 | chk_flags, mid); |
1908 | 3 | SCTP_SNPRINTF(msg, sizeof(msg), "Duplicate MID=%8.8x detected.", mid); |
1909 | 3 | goto err_out; |
1910 | 64 | } else { |
1911 | 64 | if ((control->first_frag_seen) && |
1912 | 38 | (tsn == control->fsn_included + 1) && |
1913 | 1 | (control->end_added == 0)) { |
1914 | 1 | SCTP_SNPRINTF(msg, sizeof(msg), |
1915 | 1 | "Illegal message sequence, missing end for MID: %8.8x", |
1916 | 1 | control->fsn_included); |
1917 | 1 | goto err_out; |
1918 | 63 | } else { |
1919 | 63 | control = NULL; |
1920 | 63 | } |
1921 | 64 | } |
1922 | 67 | } |
1923 | 1.56k | } |
1924 | | /* now do the tests */ |
1925 | 6.86k | if (((asoc->cnt_on_all_streams + |
1926 | 6.86k | asoc->cnt_on_reasm_queue + |
1927 | 6.86k | asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) || |
1928 | 6.86k | (((int)asoc->my_rwnd) <= 0)) { |
1929 | | /* |
1930 | | * When we have NO room in the rwnd we check to make sure |
1931 | | * the reader is doing its job... |
1932 | | */ |
1933 | 87 | if (SCTP_SBAVAIL(&stcb->sctp_socket->so_rcv) > 0) { |
1934 | | /* some to read, wake-up */ |
1935 | | #if defined(__APPLE__) && !defined(__Userspace__) |
1936 | | struct socket *so; |
1937 | | |
1938 | | so = SCTP_INP_SO(stcb->sctp_ep); |
1939 | | atomic_add_int(&stcb->asoc.refcnt, 1); |
1940 | | SCTP_TCB_UNLOCK(stcb); |
1941 | | SCTP_SOCKET_LOCK(so, 1); |
1942 | | SCTP_TCB_LOCK(stcb); |
1943 | | atomic_subtract_int(&stcb->asoc.refcnt, 1); |
1944 | | if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) { |
1945 | | /* assoc was freed while we were unlocked */ |
1946 | | SCTP_SOCKET_UNLOCK(so, 1); |
1947 | | return (0); |
1948 | | } |
1949 | | #endif |
1950 | 87 | sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket); |
1951 | | #if defined(__APPLE__) && !defined(__Userspace__) |
1952 | | SCTP_SOCKET_UNLOCK(so, 1); |
1953 | | #endif |
1954 | 87 | } |
1955 | | /* now is it in the mapping array of what we have accepted? */ |
1956 | 87 | if (chk_type == SCTP_DATA) { |
1957 | 85 | if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map) && |
1958 | 55 | SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) { |
1959 | | /* Nope not in the valid range dump it */ |
1960 | 16 | dump_packet: |
1961 | 16 | sctp_set_rwnd(stcb, asoc); |
1962 | 16 | if ((asoc->cnt_on_all_streams + |
1963 | 16 | asoc->cnt_on_reasm_queue + |
1964 | 16 | asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) { |
1965 | 0 | SCTP_STAT_INCR(sctps_datadropchklmt); |
1966 | 16 | } else { |
1967 | 16 | SCTP_STAT_INCR(sctps_datadroprwnd); |
1968 | 16 | } |
1969 | 16 | *break_flag = 1; |
1970 | 16 | return (0); |
1971 | 14 | } |
1972 | 85 | } else { |
1973 | 2 | if (control == NULL) { |
1974 | 2 | goto dump_packet; |
1975 | 2 | } |
1976 | 0 | if (SCTP_TSN_GT(fsn, control->top_fsn)) { |
1977 | 0 | goto dump_packet; |
1978 | 0 | } |
1979 | 0 | } |
1980 | 87 | } |
1981 | | #ifdef SCTP_ASOCLOG_OF_TSNS |
1982 | | SCTP_TCB_LOCK_ASSERT(stcb); |
1983 | | if (asoc->tsn_in_at >= SCTP_TSN_LOG_SIZE) { |
1984 | | asoc->tsn_in_at = 0; |
1985 | | asoc->tsn_in_wrapped = 1; |
1986 | | } |
1987 | | asoc->in_tsnlog[asoc->tsn_in_at].tsn = tsn; |
1988 | | asoc->in_tsnlog[asoc->tsn_in_at].strm = sid; |
1989 | | asoc->in_tsnlog[asoc->tsn_in_at].seq = mid; |
1990 | | asoc->in_tsnlog[asoc->tsn_in_at].sz = chk_length; |
1991 | | asoc->in_tsnlog[asoc->tsn_in_at].flgs = chunk_flags; |
1992 | | asoc->in_tsnlog[asoc->tsn_in_at].stcb = (void *)stcb; |
1993 | | asoc->in_tsnlog[asoc->tsn_in_at].in_pos = asoc->tsn_in_at; |
1994 | | asoc->in_tsnlog[asoc->tsn_in_at].in_out = 1; |
1995 | | asoc->tsn_in_at++; |
1996 | | #endif |
1997 | | /* |
1998 | | * Before we continue lets validate that we are not being fooled by |
1999 | | * an evil attacker. We can only have Nk chunks based on our TSN |
2000 | | * spread allowed by the mapping array N * 8 bits, so there is no |
2001 | | * way our stream sequence numbers could have wrapped. We of course |
2002 | | * only validate the FIRST fragment so the bit must be set. |
2003 | | */ |
2004 | 6.84k | if ((chk_flags & SCTP_DATA_FIRST_FRAG) && |
2005 | 3.97k | (TAILQ_EMPTY(&asoc->resetHead)) && |
2006 | 3.10k | (chk_flags & SCTP_DATA_UNORDERED) == 0 && |
2007 | 1.07k | SCTP_MID_GE(asoc->idata_supported, asoc->strmin[sid].last_mid_delivered, mid)) { |
2008 | | /* The incoming sseq is behind where we last delivered? */ |
2009 | 54 | SCTPDBG(SCTP_DEBUG_INDATA1, "EVIL/Broken-Dup S-SEQ: %u delivered: %u from peer, Abort!\n", |
2010 | 54 | mid, asoc->strmin[sid].last_mid_delivered); |
2011 | | |
2012 | 54 | if (asoc->idata_supported) { |
2013 | 36 | SCTP_SNPRINTF(msg, sizeof(msg), "Delivered MID=%8.8x, got TSN=%8.8x, SID=%4.4x, MID=%8.8x", |
2014 | 36 | asoc->strmin[sid].last_mid_delivered, |
2015 | 36 | tsn, |
2016 | 36 | sid, |
2017 | 36 | mid); |
2018 | 36 | } else { |
2019 | 18 | SCTP_SNPRINTF(msg, sizeof(msg), "Delivered SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x", |
2020 | 18 | (uint16_t)asoc->strmin[sid].last_mid_delivered, |
2021 | 18 | tsn, |
2022 | 18 | sid, |
2023 | 18 | (uint16_t)mid); |
2024 | 18 | } |
2025 | 54 | op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); |
2026 | 54 | stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_18; |
2027 | 54 | sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, false, SCTP_SO_NOT_LOCKED); |
2028 | 54 | *abort_flag = 1; |
2029 | 54 | return (0); |
2030 | 54 | } |
2031 | 6.79k | if (chk_type == SCTP_IDATA) { |
2032 | 2.18k | the_len = (chk_length - sizeof(struct sctp_idata_chunk)); |
2033 | 4.61k | } else { |
2034 | 4.61k | the_len = (chk_length - sizeof(struct sctp_data_chunk)); |
2035 | 4.61k | } |
2036 | 6.79k | if (last_chunk == 0) { |
2037 | 6.63k | if (chk_type == SCTP_IDATA) { |
2038 | 2.13k | dmbuf = SCTP_M_COPYM(*m, |
2039 | 2.13k | (offset + sizeof(struct sctp_idata_chunk)), |
2040 | 2.13k | the_len, M_NOWAIT); |
2041 | 4.50k | } else { |
2042 | 4.50k | dmbuf = SCTP_M_COPYM(*m, |
2043 | 4.50k | (offset + sizeof(struct sctp_data_chunk)), |
2044 | 4.50k | the_len, M_NOWAIT); |
2045 | 4.50k | } |
2046 | | #ifdef SCTP_MBUF_LOGGING |
2047 | | if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) { |
2048 | | sctp_log_mbc(dmbuf, SCTP_MBUF_ICOPY); |
2049 | | } |
2050 | | #endif |
2051 | 6.63k | } else { |
2052 | | /* We can steal the last chunk */ |
2053 | 156 | int l_len; |
2054 | 156 | dmbuf = *m; |
2055 | | /* lop off the top part */ |
2056 | 156 | if (chk_type == SCTP_IDATA) { |
2057 | 45 | m_adj(dmbuf, (offset + sizeof(struct sctp_idata_chunk))); |
2058 | 111 | } else { |
2059 | 111 | m_adj(dmbuf, (offset + sizeof(struct sctp_data_chunk))); |
2060 | 111 | } |
2061 | 156 | if (SCTP_BUF_NEXT(dmbuf) == NULL) { |
2062 | 61 | l_len = SCTP_BUF_LEN(dmbuf); |
2063 | 95 | } else { |
2064 | | /* need to count up the size hopefully |
2065 | | * does not hit this to often :-0 |
2066 | | */ |
2067 | 95 | struct mbuf *lat; |
2068 | | |
2069 | 95 | l_len = 0; |
2070 | 887 | for (lat = dmbuf; lat; lat = SCTP_BUF_NEXT(lat)) { |
2071 | 792 | l_len += SCTP_BUF_LEN(lat); |
2072 | 792 | } |
2073 | 95 | } |
2074 | 156 | if (l_len > the_len) { |
2075 | | /* Trim the end round bytes off too */ |
2076 | 135 | m_adj(dmbuf, -(l_len - the_len)); |
2077 | 135 | } |
2078 | 156 | } |
2079 | 6.79k | if (dmbuf == NULL) { |
2080 | 0 | SCTP_STAT_INCR(sctps_nomem); |
2081 | 0 | return (0); |
2082 | 0 | } |
2083 | | /* |
2084 | | * Now no matter what, we need a control, get one |
2085 | | * if we don't have one (we may have gotten it |
2086 | | * above when we found the message was fragmented |
2087 | | */ |
2088 | 6.79k | if (control == NULL) { |
2089 | 4.44k | sctp_alloc_a_readq(stcb, control); |
2090 | 4.44k | sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn, |
2091 | 4.44k | ppid, |
2092 | 4.44k | sid, |
2093 | 4.44k | chk_flags, |
2094 | 4.44k | NULL, fsn, mid); |
2095 | 4.44k | if (control == NULL) { |
2096 | 0 | SCTP_STAT_INCR(sctps_nomem); |
2097 | 0 | return (0); |
2098 | 0 | } |
2099 | 4.44k | if ((chk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) { |
2100 | 1.52k | struct mbuf *mm; |
2101 | | |
2102 | 1.52k | control->data = dmbuf; |
2103 | 1.52k | control->tail_mbuf = NULL; |
2104 | 3.56k | for (mm = control->data; mm; mm = mm->m_next) { |
2105 | 2.04k | control->length += SCTP_BUF_LEN(mm); |
2106 | 2.04k | if (SCTP_BUF_NEXT(mm) == NULL) { |
2107 | 1.52k | control->tail_mbuf = mm; |
2108 | 1.52k | } |
2109 | 2.04k | } |
2110 | 1.52k | control->end_added = 1; |
2111 | 1.52k | control->last_frag_seen = 1; |
2112 | 1.52k | control->first_frag_seen = 1; |
2113 | 1.52k | control->fsn_included = fsn; |
2114 | 1.52k | control->top_fsn = fsn; |
2115 | 1.52k | } |
2116 | 4.44k | created_control = 1; |
2117 | 4.44k | } |
2118 | 6.79k | SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags: 0x%x ordered: %d MID: %u control: %p\n", |
2119 | 6.79k | chk_flags, ordered, mid, control); |
2120 | 6.79k | if ((chk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG && |
2121 | 6.79k | TAILQ_EMPTY(&asoc->resetHead) && |
2122 | 792 | ((ordered == 0) || |
2123 | 548 | (SCTP_MID_EQ(asoc->idata_supported, asoc->strmin[sid].last_mid_delivered + 1, mid) && |
2124 | 548 | TAILQ_EMPTY(&asoc->strmin[sid].inqueue)))) { |
2125 | | /* Candidate for express delivery */ |
2126 | | /* |
2127 | | * Its not fragmented, No PD-API is up, Nothing in the |
2128 | | * delivery queue, Its un-ordered OR ordered and the next to |
2129 | | * deliver AND nothing else is stuck on the stream queue, |
2130 | | * And there is room for it in the socket buffer. Lets just |
2131 | | * stuff it up the buffer.... |
2132 | | */ |
2133 | 281 | SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap); |
2134 | 281 | if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) { |
2135 | 162 | asoc->highest_tsn_inside_nr_map = tsn; |
2136 | 162 | } |
2137 | 281 | SCTPDBG(SCTP_DEBUG_XXX, "Injecting control: %p to be read (MID: %u)\n", |
2138 | 281 | control, mid); |
2139 | | |
2140 | 281 | sctp_add_to_readq(stcb->sctp_ep, stcb, |
2141 | 281 | control, &stcb->sctp_socket->so_rcv, |
2142 | 281 | 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); |
2143 | | |
2144 | 281 | if ((chk_flags & SCTP_DATA_UNORDERED) == 0) { |
2145 | | /* for ordered, bump what we delivered */ |
2146 | 37 | asoc->strmin[sid].last_mid_delivered++; |
2147 | 37 | } |
2148 | 281 | SCTP_STAT_INCR(sctps_recvexpress); |
2149 | 281 | if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) { |
2150 | 0 | sctp_log_strm_del_alt(stcb, tsn, mid, sid, |
2151 | 0 | SCTP_STR_LOG_FROM_EXPRS_DEL); |
2152 | 0 | } |
2153 | 281 | control = NULL; |
2154 | 281 | goto finish_express_del; |
2155 | 281 | } |
2156 | | |
2157 | | /* Now will we need a chunk too? */ |
2158 | 6.51k | if ((chk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) { |
2159 | 5.27k | sctp_alloc_a_chunk(stcb, chk); |
2160 | 5.27k | if (chk == NULL) { |
2161 | | /* No memory so we drop the chunk */ |
2162 | 0 | SCTP_STAT_INCR(sctps_nomem); |
2163 | 0 | if (last_chunk == 0) { |
2164 | | /* we copied it, free the copy */ |
2165 | 0 | sctp_m_freem(dmbuf); |
2166 | 0 | } |
2167 | 0 | return (0); |
2168 | 0 | } |
2169 | 5.27k | chk->rec.data.tsn = tsn; |
2170 | 5.27k | chk->no_fr_allowed = 0; |
2171 | 5.27k | chk->rec.data.fsn = fsn; |
2172 | 5.27k | chk->rec.data.mid = mid; |
2173 | 5.27k | chk->rec.data.sid = sid; |
2174 | 5.27k | chk->rec.data.ppid = ppid; |
2175 | 5.27k | chk->rec.data.context = stcb->asoc.context; |
2176 | 5.27k | chk->rec.data.doing_fast_retransmit = 0; |
2177 | 5.27k | chk->rec.data.rcv_flags = chk_flags; |
2178 | 5.27k | chk->asoc = asoc; |
2179 | 5.27k | chk->send_size = the_len; |
2180 | 5.27k | chk->whoTo = net; |
2181 | 5.27k | SCTPDBG(SCTP_DEBUG_XXX, "Building ck: %p for control: %p to be read (MID: %u)\n", |
2182 | 5.27k | chk, |
2183 | 5.27k | control, mid); |
2184 | 5.27k | atomic_add_int(&net->ref_count, 1); |
2185 | 5.27k | chk->data = dmbuf; |
2186 | 5.27k | } |
2187 | | /* Set the appropriate TSN mark */ |
2188 | 6.51k | if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) { |
2189 | 0 | SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap); |
2190 | 0 | if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) { |
2191 | 0 | asoc->highest_tsn_inside_nr_map = tsn; |
2192 | 0 | } |
2193 | 6.51k | } else { |
2194 | 6.51k | SCTP_SET_TSN_PRESENT(asoc->mapping_array, gap); |
2195 | 6.51k | if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map)) { |
2196 | 3.79k | asoc->highest_tsn_inside_map = tsn; |
2197 | 3.79k | } |
2198 | 6.51k | } |
2199 | | /* Now is it complete (i.e. not fragmented)? */ |
2200 | 6.51k | if ((chk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) { |
2201 | | /* |
2202 | | * Special check for when streams are resetting. We |
2203 | | * could be more smart about this and check the |
2204 | | * actual stream to see if it is not being reset.. |
2205 | | * that way we would not create a HOLB when amongst |
2206 | | * streams being reset and those not being reset. |
2207 | | * |
2208 | | */ |
2209 | 1.24k | if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) && |
2210 | 730 | SCTP_TSN_GT(tsn, liste->tsn)) { |
2211 | | /* |
2212 | | * yep its past where we need to reset... go |
2213 | | * ahead and queue it. |
2214 | | */ |
2215 | 400 | if (TAILQ_EMPTY(&asoc->pending_reply_queue)) { |
2216 | | /* first one on */ |
2217 | 146 | TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next); |
2218 | 254 | } else { |
2219 | 254 | struct sctp_queued_to_read *lcontrol, *nlcontrol; |
2220 | 254 | unsigned char inserted = 0; |
2221 | 882 | TAILQ_FOREACH_SAFE(lcontrol, &asoc->pending_reply_queue, next, nlcontrol) { |
2222 | 882 | if (SCTP_TSN_GT(control->sinfo_tsn, lcontrol->sinfo_tsn)) { |
2223 | 717 | continue; |
2224 | 717 | } else { |
2225 | | /* found it */ |
2226 | 165 | TAILQ_INSERT_BEFORE(lcontrol, control, next); |
2227 | 165 | inserted = 1; |
2228 | 165 | break; |
2229 | 165 | } |
2230 | 882 | } |
2231 | 254 | if (inserted == 0) { |
2232 | | /* |
2233 | | * must be put at end, use |
2234 | | * prevP (all setup from |
2235 | | * loop) to setup nextP. |
2236 | | */ |
2237 | 89 | TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next); |
2238 | 89 | } |
2239 | 254 | } |
2240 | 400 | goto finish_express_del; |
2241 | 400 | } |
2242 | 841 | if (chk_flags & SCTP_DATA_UNORDERED) { |
2243 | | /* queue directly into socket buffer */ |
2244 | 107 | SCTPDBG(SCTP_DEBUG_XXX, "Unordered data to be read control: %p MID: %u\n", |
2245 | 107 | control, mid); |
2246 | 107 | sctp_mark_non_revokable(asoc, control->sinfo_tsn); |
2247 | 107 | sctp_add_to_readq(stcb->sctp_ep, stcb, |
2248 | 107 | control, |
2249 | 107 | &stcb->sctp_socket->so_rcv, 1, |
2250 | 107 | SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); |
2251 | | |
2252 | 734 | } else { |
2253 | 734 | SCTPDBG(SCTP_DEBUG_XXX, "Queue control: %p for reordering MID: %u\n", control, |
2254 | 734 | mid); |
2255 | 734 | sctp_queue_data_to_stream(stcb, asoc, control, abort_flag, &need_reasm_check); |
2256 | 734 | if (*abort_flag) { |
2257 | 54 | if (last_chunk) { |
2258 | 1 | *m = NULL; |
2259 | 1 | } |
2260 | 54 | return (0); |
2261 | 54 | } |
2262 | 734 | } |
2263 | 787 | goto finish_express_del; |
2264 | 841 | } |
2265 | | /* If we reach here its a reassembly */ |
2266 | 5.27k | need_reasm_check = 1; |
2267 | 5.27k | SCTPDBG(SCTP_DEBUG_XXX, |
2268 | 5.27k | "Queue data to stream for reasm control: %p MID: %u\n", |
2269 | 5.27k | control, mid); |
2270 | 5.27k | sctp_queue_data_for_reasm(stcb, asoc, control, chk, created_control, abort_flag, tsn); |
2271 | 5.27k | if (*abort_flag) { |
2272 | | /* |
2273 | | * the assoc is now gone and chk was put onto the |
2274 | | * reasm queue, which has all been freed. |
2275 | | */ |
2276 | 290 | if (last_chunk) { |
2277 | 14 | *m = NULL; |
2278 | 14 | } |
2279 | 290 | return (0); |
2280 | 290 | } |
2281 | 6.44k | finish_express_del: |
2282 | | /* Here we tidy up things */ |
2283 | 6.44k | if (tsn == (asoc->cumulative_tsn + 1)) { |
2284 | | /* Update cum-ack */ |
2285 | 135 | asoc->cumulative_tsn = tsn; |
2286 | 135 | } |
2287 | 6.44k | if (last_chunk) { |
2288 | 141 | *m = NULL; |
2289 | 141 | } |
2290 | 6.44k | if (ordered) { |
2291 | 2.81k | SCTP_STAT_INCR_COUNTER64(sctps_inorderchunks); |
2292 | 3.63k | } else { |
2293 | 3.63k | SCTP_STAT_INCR_COUNTER64(sctps_inunorderchunks); |
2294 | 3.63k | } |
2295 | 6.44k | SCTP_STAT_INCR(sctps_recvdata); |
2296 | | /* Set it present please */ |
2297 | 6.44k | if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) { |
2298 | 0 | sctp_log_strm_del_alt(stcb, tsn, mid, sid, SCTP_STR_LOG_FROM_MARK_TSN); |
2299 | 0 | } |
2300 | 6.44k | if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) { |
2301 | 0 | sctp_log_map(asoc->mapping_array_base_tsn, asoc->cumulative_tsn, |
2302 | 0 | asoc->highest_tsn_inside_map, SCTP_MAP_PREPARE_SLIDE); |
2303 | 0 | } |
2304 | 6.44k | if (need_reasm_check) { |
2305 | 4.98k | (void)sctp_deliver_reasm_check(stcb, asoc, &asoc->strmin[sid], SCTP_READ_LOCK_NOT_HELD); |
2306 | 4.98k | need_reasm_check = 0; |
2307 | 4.98k | } |
2308 | | /* check the special flag for stream resets */ |
2309 | 6.44k | if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) && |
2310 | 954 | SCTP_TSN_GE(asoc->cumulative_tsn, liste->tsn)) { |
2311 | | /* |
2312 | | * we have finished working through the backlogged TSN's now |
2313 | | * time to reset streams. 1: call reset function. 2: free |
2314 | | * pending_reply space 3: distribute any chunks in |
2315 | | * pending_reply_queue. |
2316 | | */ |
2317 | 149 | sctp_reset_in_stream(stcb, liste->number_entries, liste->list_of_streams); |
2318 | 149 | TAILQ_REMOVE(&asoc->resetHead, liste, next_resp); |
2319 | 149 | sctp_send_deferred_reset_response(stcb, liste, SCTP_STREAM_RESET_RESULT_PERFORMED); |
2320 | 149 | SCTP_FREE(liste, SCTP_M_STRESET); |
2321 | | /*sa_ignore FREED_MEMORY*/ |
2322 | 149 | liste = TAILQ_FIRST(&asoc->resetHead); |
2323 | 149 | if (TAILQ_EMPTY(&asoc->resetHead)) { |
2324 | | /* All can be removed */ |
2325 | 102 | TAILQ_FOREACH_SAFE(control, &asoc->pending_reply_queue, next, ncontrol) { |
2326 | 102 | TAILQ_REMOVE(&asoc->pending_reply_queue, control, next); |
2327 | 102 | strm = &asoc->strmin[control->sinfo_stream]; |
2328 | 102 | sctp_queue_data_to_stream(stcb, asoc, control, abort_flag, &need_reasm_check); |
2329 | 102 | if (*abort_flag) { |
2330 | 13 | return (0); |
2331 | 13 | } |
2332 | 89 | if (need_reasm_check) { |
2333 | 1 | (void)sctp_deliver_reasm_check(stcb, asoc, strm, SCTP_READ_LOCK_NOT_HELD); |
2334 | 1 | need_reasm_check = 0; |
2335 | 1 | } |
2336 | 89 | } |
2337 | 92 | } else { |
2338 | 133 | TAILQ_FOREACH_SAFE(control, &asoc->pending_reply_queue, next, ncontrol) { |
2339 | 133 | if (SCTP_TSN_GT(control->sinfo_tsn, liste->tsn)) { |
2340 | 33 | break; |
2341 | 33 | } |
2342 | | /* |
2343 | | * if control->sinfo_tsn is <= liste->tsn we can |
2344 | | * process it which is the NOT of |
2345 | | * control->sinfo_tsn > liste->tsn |
2346 | | */ |
2347 | 133 | TAILQ_REMOVE(&asoc->pending_reply_queue, control, next); |
2348 | 100 | strm = &asoc->strmin[control->sinfo_stream]; |
2349 | 100 | sctp_queue_data_to_stream(stcb, asoc, control, abort_flag, &need_reasm_check); |
2350 | 100 | if (*abort_flag) { |
2351 | 25 | return (0); |
2352 | 25 | } |
2353 | 75 | if (need_reasm_check) { |
2354 | 2 | (void)sctp_deliver_reasm_check(stcb, asoc, strm, SCTP_READ_LOCK_NOT_HELD); |
2355 | 2 | need_reasm_check = 0; |
2356 | 2 | } |
2357 | 75 | } |
2358 | 92 | } |
2359 | 149 | } |
2360 | 6.41k | return (1); |
2361 | 6.44k | } |
2362 | | |
2363 | | static const int8_t sctp_map_lookup_tab[256] = { |
2364 | | 0, 1, 0, 2, 0, 1, 0, 3, |
2365 | | 0, 1, 0, 2, 0, 1, 0, 4, |
2366 | | 0, 1, 0, 2, 0, 1, 0, 3, |
2367 | | 0, 1, 0, 2, 0, 1, 0, 5, |
2368 | | 0, 1, 0, 2, 0, 1, 0, 3, |
2369 | | 0, 1, 0, 2, 0, 1, 0, 4, |
2370 | | 0, 1, 0, 2, 0, 1, 0, 3, |
2371 | | 0, 1, 0, 2, 0, 1, 0, 6, |
2372 | | 0, 1, 0, 2, 0, 1, 0, 3, |
2373 | | 0, 1, 0, 2, 0, 1, 0, 4, |
2374 | | 0, 1, 0, 2, 0, 1, 0, 3, |
2375 | | 0, 1, 0, 2, 0, 1, 0, 5, |
2376 | | 0, 1, 0, 2, 0, 1, 0, 3, |
2377 | | 0, 1, 0, 2, 0, 1, 0, 4, |
2378 | | 0, 1, 0, 2, 0, 1, 0, 3, |
2379 | | 0, 1, 0, 2, 0, 1, 0, 7, |
2380 | | 0, 1, 0, 2, 0, 1, 0, 3, |
2381 | | 0, 1, 0, 2, 0, 1, 0, 4, |
2382 | | 0, 1, 0, 2, 0, 1, 0, 3, |
2383 | | 0, 1, 0, 2, 0, 1, 0, 5, |
2384 | | 0, 1, 0, 2, 0, 1, 0, 3, |
2385 | | 0, 1, 0, 2, 0, 1, 0, 4, |
2386 | | 0, 1, 0, 2, 0, 1, 0, 3, |
2387 | | 0, 1, 0, 2, 0, 1, 0, 6, |
2388 | | 0, 1, 0, 2, 0, 1, 0, 3, |
2389 | | 0, 1, 0, 2, 0, 1, 0, 4, |
2390 | | 0, 1, 0, 2, 0, 1, 0, 3, |
2391 | | 0, 1, 0, 2, 0, 1, 0, 5, |
2392 | | 0, 1, 0, 2, 0, 1, 0, 3, |
2393 | | 0, 1, 0, 2, 0, 1, 0, 4, |
2394 | | 0, 1, 0, 2, 0, 1, 0, 3, |
2395 | | 0, 1, 0, 2, 0, 1, 0, 8 |
2396 | | }; |
2397 | | |
2398 | | void |
2399 | | sctp_slide_mapping_arrays(struct sctp_tcb *stcb) |
2400 | 16.0k | { |
2401 | | /* |
2402 | | * Now we also need to check the mapping array in a couple of ways. |
2403 | | * 1) Did we move the cum-ack point? |
2404 | | * |
2405 | | * When you first glance at this you might think |
2406 | | * that all entries that make up the position |
2407 | | * of the cum-ack would be in the nr-mapping array |
2408 | | * only.. i.e. things up to the cum-ack are always |
2409 | | * deliverable. Thats true with one exception, when |
2410 | | * its a fragmented message we may not deliver the data |
2411 | | * until some threshold (or all of it) is in place. So |
2412 | | * we must OR the nr_mapping_array and mapping_array to |
2413 | | * get a true picture of the cum-ack. |
2414 | | */ |
2415 | 16.0k | struct sctp_association *asoc; |
2416 | 16.0k | int at; |
2417 | 16.0k | uint8_t val; |
2418 | 16.0k | int slide_from, slide_end, lgap, distance; |
2419 | 16.0k | uint32_t old_cumack, old_base, old_highest, highest_tsn; |
2420 | | |
2421 | 16.0k | asoc = &stcb->asoc; |
2422 | | |
2423 | 16.0k | old_cumack = asoc->cumulative_tsn; |
2424 | 16.0k | old_base = asoc->mapping_array_base_tsn; |
2425 | 16.0k | old_highest = asoc->highest_tsn_inside_map; |
2426 | | /* |
2427 | | * We could probably improve this a small bit by calculating the |
2428 | | * offset of the current cum-ack as the starting point. |
2429 | | */ |
2430 | 16.0k | at = 0; |
2431 | 17.3k | for (slide_from = 0; slide_from < stcb->asoc.mapping_array_size; slide_from++) { |
2432 | 17.3k | val = asoc->nr_mapping_array[slide_from] | asoc->mapping_array[slide_from]; |
2433 | 17.3k | if (val == 0xff) { |
2434 | 1.35k | at += 8; |
2435 | 16.0k | } else { |
2436 | | /* there is a 0 bit */ |
2437 | 16.0k | at += sctp_map_lookup_tab[val]; |
2438 | 16.0k | break; |
2439 | 16.0k | } |
2440 | 17.3k | } |
2441 | 16.0k | asoc->cumulative_tsn = asoc->mapping_array_base_tsn + (at-1); |
2442 | | |
2443 | 16.0k | if (SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_map) && |
2444 | 2.07k | SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_nr_map)) { |
2445 | 0 | #ifdef INVARIANTS |
2446 | 0 | panic("huh, cumack 0x%x greater than high-tsn 0x%x in map", |
2447 | 0 | asoc->cumulative_tsn, asoc->highest_tsn_inside_map); |
2448 | | #else |
2449 | | SCTP_PRINTF("huh, cumack 0x%x greater than high-tsn 0x%x in map - should panic?\n", |
2450 | | asoc->cumulative_tsn, asoc->highest_tsn_inside_map); |
2451 | | sctp_print_mapping_array(asoc); |
2452 | | if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) { |
2453 | | sctp_log_map(0, 6, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT); |
2454 | | } |
2455 | | asoc->highest_tsn_inside_map = asoc->cumulative_tsn; |
2456 | | asoc->highest_tsn_inside_nr_map = asoc->cumulative_tsn; |
2457 | | #endif |
2458 | 0 | } |
2459 | 16.0k | if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) { |
2460 | 3.57k | highest_tsn = asoc->highest_tsn_inside_nr_map; |
2461 | 12.4k | } else { |
2462 | 12.4k | highest_tsn = asoc->highest_tsn_inside_map; |
2463 | 12.4k | } |
2464 | 16.0k | if ((asoc->cumulative_tsn == highest_tsn) && (at >= 8)) { |
2465 | | /* The complete array was completed by a single FR */ |
2466 | | /* highest becomes the cum-ack */ |
2467 | 233 | int clr; |
2468 | 233 | #ifdef INVARIANTS |
2469 | 233 | unsigned int i; |
2470 | 233 | #endif |
2471 | | |
2472 | | /* clear the array */ |
2473 | 233 | clr = ((at+7) >> 3); |
2474 | 233 | if (clr > asoc->mapping_array_size) { |
2475 | 0 | clr = asoc->mapping_array_size; |
2476 | 0 | } |
2477 | 233 | memset(asoc->mapping_array, 0, clr); |
2478 | 233 | memset(asoc->nr_mapping_array, 0, clr); |
2479 | 233 | #ifdef INVARIANTS |
2480 | 3.96k | for (i = 0; i < asoc->mapping_array_size; i++) { |
2481 | 3.72k | if ((asoc->mapping_array[i]) || (asoc->nr_mapping_array[i])) { |
2482 | 0 | SCTP_PRINTF("Error Mapping array's not clean at clear\n"); |
2483 | 0 | sctp_print_mapping_array(asoc); |
2484 | 0 | } |
2485 | 3.72k | } |
2486 | 233 | #endif |
2487 | 233 | asoc->mapping_array_base_tsn = asoc->cumulative_tsn + 1; |
2488 | 233 | asoc->highest_tsn_inside_nr_map = asoc->highest_tsn_inside_map = asoc->cumulative_tsn; |
2489 | 15.8k | } else if (at >= 8) { |
2490 | | /* we can slide the mapping array down */ |
2491 | | /* slide_from holds where we hit the first NON 0xff byte */ |
2492 | | |
2493 | | /* |
2494 | | * now calculate the ceiling of the move using our highest |
2495 | | * TSN value |
2496 | | */ |
2497 | 17 | SCTP_CALC_TSN_TO_GAP(lgap, highest_tsn, asoc->mapping_array_base_tsn); |
2498 | 17 | slide_end = (lgap >> 3); |
2499 | 17 | if (slide_end < slide_from) { |
2500 | 0 | sctp_print_mapping_array(asoc); |
2501 | 0 | #ifdef INVARIANTS |
2502 | 0 | panic("impossible slide"); |
2503 | | #else |
2504 | | SCTP_PRINTF("impossible slide lgap: %x slide_end: %x slide_from: %x? at: %d\n", |
2505 | | lgap, slide_end, slide_from, at); |
2506 | | return; |
2507 | | #endif |
2508 | 0 | } |
2509 | 17 | if (slide_end > asoc->mapping_array_size) { |
2510 | 0 | #ifdef INVARIANTS |
2511 | 0 | panic("would overrun buffer"); |
2512 | | #else |
2513 | | SCTP_PRINTF("Gak, would have overrun map end: %d slide_end: %d\n", |
2514 | | asoc->mapping_array_size, slide_end); |
2515 | | slide_end = asoc->mapping_array_size; |
2516 | | #endif |
2517 | 0 | } |
2518 | 17 | distance = (slide_end - slide_from) + 1; |
2519 | 17 | if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) { |
2520 | 0 | sctp_log_map(old_base, old_cumack, old_highest, |
2521 | 0 | SCTP_MAP_PREPARE_SLIDE); |
2522 | 0 | sctp_log_map((uint32_t) slide_from, (uint32_t) slide_end, |
2523 | 0 | (uint32_t) lgap, SCTP_MAP_SLIDE_FROM); |
2524 | 0 | } |
2525 | 17 | if (distance + slide_from > asoc->mapping_array_size || |
2526 | 17 | distance < 0) { |
2527 | | /* |
2528 | | * Here we do NOT slide forward the array so that |
2529 | | * hopefully when more data comes in to fill it up |
2530 | | * we will be able to slide it forward. Really I |
2531 | | * don't think this should happen :-0 |
2532 | | */ |
2533 | 0 | if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) { |
2534 | 0 | sctp_log_map((uint32_t) distance, (uint32_t) slide_from, |
2535 | 0 | (uint32_t) asoc->mapping_array_size, |
2536 | 0 | SCTP_MAP_SLIDE_NONE); |
2537 | 0 | } |
2538 | 17 | } else { |
2539 | 17 | int ii; |
2540 | | |
2541 | 1.91k | for (ii = 0; ii < distance; ii++) { |
2542 | 1.89k | asoc->mapping_array[ii] = asoc->mapping_array[slide_from + ii]; |
2543 | 1.89k | asoc->nr_mapping_array[ii] = asoc->nr_mapping_array[slide_from + ii]; |
2544 | 1.89k | } |
2545 | 836 | for (ii = distance; ii < asoc->mapping_array_size; ii++) { |
2546 | 819 | asoc->mapping_array[ii] = 0; |
2547 | 819 | asoc->nr_mapping_array[ii] = 0; |
2548 | 819 | } |
2549 | 17 | if (asoc->highest_tsn_inside_map + 1 == asoc->mapping_array_base_tsn) { |
2550 | 10 | asoc->highest_tsn_inside_map += (slide_from << 3); |
2551 | 10 | } |
2552 | 17 | if (asoc->highest_tsn_inside_nr_map + 1 == asoc->mapping_array_base_tsn) { |
2553 | 1 | asoc->highest_tsn_inside_nr_map += (slide_from << 3); |
2554 | 1 | } |
2555 | 17 | asoc->mapping_array_base_tsn += (slide_from << 3); |
2556 | 17 | if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) { |
2557 | 0 | sctp_log_map(asoc->mapping_array_base_tsn, |
2558 | 0 | asoc->cumulative_tsn, asoc->highest_tsn_inside_map, |
2559 | 0 | SCTP_MAP_SLIDE_RESULT); |
2560 | 0 | } |
2561 | 17 | } |
2562 | 17 | } |
2563 | 16.0k | } |
2564 | | |
2565 | | void |
2566 | | sctp_sack_check(struct sctp_tcb *stcb, int was_a_gap) |
2567 | 5.21k | { |
2568 | 5.21k | struct sctp_association *asoc; |
2569 | 5.21k | uint32_t highest_tsn; |
2570 | 5.21k | int is_a_gap; |
2571 | | |
2572 | 5.21k | sctp_slide_mapping_arrays(stcb); |
2573 | 5.21k | asoc = &stcb->asoc; |
2574 | 5.21k | if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) { |
2575 | 853 | highest_tsn = asoc->highest_tsn_inside_nr_map; |
2576 | 4.36k | } else { |
2577 | 4.36k | highest_tsn = asoc->highest_tsn_inside_map; |
2578 | 4.36k | } |
2579 | | /* Is there a gap now? */ |
2580 | 5.21k | is_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn); |
2581 | | |
2582 | | /* |
2583 | | * Now we need to see if we need to queue a sack or just start the |
2584 | | * timer (if allowed). |
2585 | | */ |
2586 | 5.21k | if (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_SENT) { |
2587 | | /* |
2588 | | * Ok special case, in SHUTDOWN-SENT case. here we |
2589 | | * maker sure SACK timer is off and instead send a |
2590 | | * SHUTDOWN and a SACK |
2591 | | */ |
2592 | 0 | if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) { |
2593 | 0 | sctp_timer_stop(SCTP_TIMER_TYPE_RECV, |
2594 | 0 | stcb->sctp_ep, stcb, NULL, |
2595 | 0 | SCTP_FROM_SCTP_INDATA + SCTP_LOC_19); |
2596 | 0 | } |
2597 | 0 | sctp_send_shutdown(stcb, |
2598 | 0 | ((stcb->asoc.alternate) ? stcb->asoc.alternate : stcb->asoc.primary_destination)); |
2599 | 0 | if (is_a_gap) { |
2600 | 0 | sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED); |
2601 | 0 | } |
2602 | 5.21k | } else { |
2603 | | /* |
2604 | | * CMT DAC algorithm: increase number of packets |
2605 | | * received since last ack |
2606 | | */ |
2607 | 5.21k | stcb->asoc.cmt_dac_pkts_rcvd++; |
2608 | | |
2609 | 5.21k | if ((stcb->asoc.send_sack == 1) || /* We need to send a SACK */ |
2610 | 233 | ((was_a_gap) && (is_a_gap == 0)) || /* was a gap, but no |
2611 | | * longer is one */ |
2612 | 233 | (stcb->asoc.numduptsns) || /* we have dup's */ |
2613 | 233 | (is_a_gap) || /* is still a gap */ |
2614 | 118 | (stcb->asoc.delayed_ack == 0) || /* Delayed sack disabled */ |
2615 | 5.09k | (stcb->asoc.data_pkts_seen >= stcb->asoc.sack_freq)) { /* hit limit of pkts */ |
2616 | 5.09k | if ((stcb->asoc.sctp_cmt_on_off > 0) && |
2617 | 0 | (SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) && |
2618 | 0 | (stcb->asoc.send_sack == 0) && |
2619 | 0 | (stcb->asoc.numduptsns == 0) && |
2620 | 0 | (stcb->asoc.delayed_ack) && |
2621 | 0 | (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer))) { |
2622 | | /* |
2623 | | * CMT DAC algorithm: With CMT, |
2624 | | * delay acks even in the face of |
2625 | | * reordering. Therefore, if acks |
2626 | | * that do not have to be sent |
2627 | | * because of the above reasons, |
2628 | | * will be delayed. That is, acks |
2629 | | * that would have been sent due to |
2630 | | * gap reports will be delayed with |
2631 | | * DAC. Start the delayed ack timer. |
2632 | | */ |
2633 | 0 | sctp_timer_start(SCTP_TIMER_TYPE_RECV, |
2634 | 0 | stcb->sctp_ep, stcb, NULL); |
2635 | 5.09k | } else { |
2636 | | /* |
2637 | | * Ok we must build a SACK since the |
2638 | | * timer is pending, we got our |
2639 | | * first packet OR there are gaps or |
2640 | | * duplicates. |
2641 | | */ |
2642 | 5.09k | sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL, |
2643 | 5.09k | SCTP_FROM_SCTP_INDATA + SCTP_LOC_20); |
2644 | 5.09k | sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED); |
2645 | 5.09k | } |
2646 | 5.09k | } else { |
2647 | 118 | if (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) { |
2648 | 118 | sctp_timer_start(SCTP_TIMER_TYPE_RECV, |
2649 | 118 | stcb->sctp_ep, stcb, NULL); |
2650 | 118 | } |
2651 | 118 | } |
2652 | 5.21k | } |
2653 | 5.21k | } |
2654 | | |
2655 | | int |
2656 | | sctp_process_data(struct mbuf **mm, int iphlen, int *offset, int length, |
2657 | | struct sctp_inpcb *inp, struct sctp_tcb *stcb, |
2658 | | struct sctp_nets *net, uint32_t *high_tsn) |
2659 | 5.20k | { |
2660 | 5.20k | struct sctp_chunkhdr *ch, chunk_buf; |
2661 | 5.20k | struct sctp_association *asoc; |
2662 | 5.20k | int num_chunks = 0; /* number of control chunks processed */ |
2663 | 5.20k | int stop_proc = 0; |
2664 | 5.20k | int break_flag, last_chunk; |
2665 | 5.20k | int abort_flag = 0, was_a_gap; |
2666 | 5.20k | struct mbuf *m; |
2667 | 5.20k | uint32_t highest_tsn; |
2668 | 5.20k | uint16_t chk_length; |
2669 | | |
2670 | | /* set the rwnd */ |
2671 | 5.20k | sctp_set_rwnd(stcb, &stcb->asoc); |
2672 | | |
2673 | 5.20k | m = *mm; |
2674 | 5.20k | SCTP_TCB_LOCK_ASSERT(stcb); |
2675 | 5.20k | asoc = &stcb->asoc; |
2676 | 5.20k | if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) { |
2677 | 65 | highest_tsn = asoc->highest_tsn_inside_nr_map; |
2678 | 5.14k | } else { |
2679 | 5.14k | highest_tsn = asoc->highest_tsn_inside_map; |
2680 | 5.14k | } |
2681 | 5.20k | was_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn); |
2682 | | /* |
2683 | | * setup where we got the last DATA packet from for any SACK that |
2684 | | * may need to go out. Don't bump the net. This is done ONLY when a |
2685 | | * chunk is assigned. |
2686 | | */ |
2687 | 5.20k | asoc->last_data_chunk_from = net; |
2688 | | |
2689 | | /*- |
2690 | | * Now before we proceed we must figure out if this is a wasted |
2691 | | * cluster... i.e. it is a small packet sent in and yet the driver |
2692 | | * underneath allocated a full cluster for it. If so we must copy it |
2693 | | * to a smaller mbuf and free up the cluster mbuf. This will help |
2694 | | * with cluster starvation. |
2695 | | */ |
2696 | 5.20k | if (SCTP_BUF_LEN(m) < (long)MLEN && SCTP_BUF_NEXT(m) == NULL) { |
2697 | | /* we only handle mbufs that are singletons.. not chains */ |
2698 | 2.19k | m = sctp_get_mbuf_for_msg(SCTP_BUF_LEN(m), 0, M_NOWAIT, 1, MT_DATA); |
2699 | 2.19k | if (m) { |
2700 | | /* ok lets see if we can copy the data up */ |
2701 | 2.19k | caddr_t *from, *to; |
2702 | | /* get the pointers and copy */ |
2703 | 2.19k | to = mtod(m, caddr_t *); |
2704 | 2.19k | from = mtod((*mm), caddr_t *); |
2705 | 2.19k | memcpy(to, from, SCTP_BUF_LEN((*mm))); |
2706 | | /* copy the length and free up the old */ |
2707 | 2.19k | SCTP_BUF_LEN(m) = SCTP_BUF_LEN((*mm)); |
2708 | 2.19k | sctp_m_freem(*mm); |
2709 | | /* success, back copy */ |
2710 | 2.19k | *mm = m; |
2711 | 2.19k | } else { |
2712 | | /* We are in trouble in the mbuf world .. yikes */ |
2713 | 0 | m = *mm; |
2714 | 0 | } |
2715 | 2.19k | } |
2716 | | /* get pointer to the first chunk header */ |
2717 | 5.20k | ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset, |
2718 | 5.20k | sizeof(struct sctp_chunkhdr), |
2719 | 5.20k | (uint8_t *)&chunk_buf); |
2720 | 5.20k | if (ch == NULL) { |
2721 | 0 | return (1); |
2722 | 0 | } |
2723 | | /* |
2724 | | * process all DATA chunks... |
2725 | | */ |
2726 | 5.20k | *high_tsn = asoc->cumulative_tsn; |
2727 | 5.20k | break_flag = 0; |
2728 | 5.20k | asoc->data_pkts_seen++; |
2729 | 22.5k | while (stop_proc == 0) { |
2730 | | /* validate chunk length */ |
2731 | 18.2k | chk_length = ntohs(ch->chunk_length); |
2732 | 18.2k | if (length - *offset < chk_length) { |
2733 | | /* all done, mutulated chunk */ |
2734 | 679 | stop_proc = 1; |
2735 | 679 | continue; |
2736 | 679 | } |
2737 | 17.5k | if ((asoc->idata_supported == 1) && |
2738 | 6.91k | (ch->chunk_type == SCTP_DATA)) { |
2739 | 208 | struct mbuf *op_err; |
2740 | 208 | char msg[SCTP_DIAG_INFO_LEN]; |
2741 | | |
2742 | 208 | SCTP_SNPRINTF(msg, sizeof(msg), "%s", "DATA chunk received when I-DATA was negotiated"); |
2743 | 208 | op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); |
2744 | 208 | stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_21; |
2745 | 208 | sctp_abort_an_association(inp, stcb, op_err, false, SCTP_SO_NOT_LOCKED); |
2746 | 208 | return (2); |
2747 | 208 | } |
2748 | 17.3k | if ((asoc->idata_supported == 0) && |
2749 | 10.6k | (ch->chunk_type == SCTP_IDATA)) { |
2750 | 5 | struct mbuf *op_err; |
2751 | 5 | char msg[SCTP_DIAG_INFO_LEN]; |
2752 | | |
2753 | 5 | SCTP_SNPRINTF(msg, sizeof(msg), "%s", "I-DATA chunk received when DATA was negotiated"); |
2754 | 5 | op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); |
2755 | 5 | stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_22; |
2756 | 5 | sctp_abort_an_association(inp, stcb, op_err, false, SCTP_SO_NOT_LOCKED); |
2757 | 5 | return (2); |
2758 | 5 | } |
2759 | 17.3k | if ((ch->chunk_type == SCTP_DATA) || |
2760 | 14.2k | (ch->chunk_type == SCTP_IDATA)) { |
2761 | 14.2k | uint16_t clen; |
2762 | | |
2763 | 14.2k | if (ch->chunk_type == SCTP_DATA) { |
2764 | 9.80k | clen = sizeof(struct sctp_data_chunk); |
2765 | 9.80k | } else { |
2766 | 4.42k | clen = sizeof(struct sctp_idata_chunk); |
2767 | 4.42k | } |
2768 | 14.2k | if (chk_length < clen) { |
2769 | | /* |
2770 | | * Need to send an abort since we had a |
2771 | | * invalid data chunk. |
2772 | | */ |
2773 | 109 | struct mbuf *op_err; |
2774 | 109 | char msg[SCTP_DIAG_INFO_LEN]; |
2775 | | |
2776 | 109 | SCTP_SNPRINTF(msg, sizeof(msg), "%s chunk of length %u", |
2777 | 109 | ch->chunk_type == SCTP_DATA ? "DATA" : "I-DATA", |
2778 | 109 | chk_length); |
2779 | 109 | op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); |
2780 | 109 | stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_23; |
2781 | 109 | sctp_abort_an_association(inp, stcb, op_err, false, SCTP_SO_NOT_LOCKED); |
2782 | 109 | return (2); |
2783 | 109 | } |
2784 | | #ifdef SCTP_AUDITING_ENABLED |
2785 | | sctp_audit_log(0xB1, 0); |
2786 | | #endif |
2787 | 14.1k | if (SCTP_SIZE32(chk_length) == (length - *offset)) { |
2788 | 2.17k | last_chunk = 1; |
2789 | 11.9k | } else { |
2790 | 11.9k | last_chunk = 0; |
2791 | 11.9k | } |
2792 | 14.1k | if (sctp_process_a_data_chunk(stcb, asoc, mm, *offset, |
2793 | 14.1k | chk_length, net, high_tsn, &abort_flag, &break_flag, |
2794 | 14.1k | last_chunk, ch->chunk_type)) { |
2795 | 6.41k | num_chunks++; |
2796 | 6.41k | } |
2797 | 14.1k | if (abort_flag) |
2798 | 449 | return (2); |
2799 | | |
2800 | 13.6k | if (break_flag) { |
2801 | | /* |
2802 | | * Set because of out of rwnd space and no |
2803 | | * drop rep space left. |
2804 | | */ |
2805 | 16 | stop_proc = 1; |
2806 | 16 | continue; |
2807 | 16 | } |
2808 | 13.6k | } else { |
2809 | | /* not a data chunk in the data region */ |
2810 | 3.12k | switch (ch->chunk_type) { |
2811 | 3 | case SCTP_INITIATION: |
2812 | 4 | case SCTP_INITIATION_ACK: |
2813 | 6 | case SCTP_SELECTIVE_ACK: |
2814 | 8 | case SCTP_NR_SELECTIVE_ACK: |
2815 | 27 | case SCTP_HEARTBEAT_REQUEST: |
2816 | 29 | case SCTP_HEARTBEAT_ACK: |
2817 | 30 | case SCTP_ABORT_ASSOCIATION: |
2818 | 31 | case SCTP_SHUTDOWN: |
2819 | 35 | case SCTP_SHUTDOWN_ACK: |
2820 | 53 | case SCTP_OPERATION_ERROR: |
2821 | 54 | case SCTP_COOKIE_ECHO: |
2822 | 55 | case SCTP_COOKIE_ACK: |
2823 | 56 | case SCTP_ECN_ECHO: |
2824 | 57 | case SCTP_ECN_CWR: |
2825 | 58 | case SCTP_SHUTDOWN_COMPLETE: |
2826 | 60 | case SCTP_AUTHENTICATION: |
2827 | 61 | case SCTP_ASCONF_ACK: |
2828 | 62 | case SCTP_PACKET_DROPPED: |
2829 | 63 | case SCTP_STREAM_RESET: |
2830 | 67 | case SCTP_FORWARD_CUM_TSN: |
2831 | 69 | case SCTP_ASCONF: |
2832 | 69 | { |
2833 | | /* |
2834 | | * Now, what do we do with KNOWN chunks that |
2835 | | * are NOT in the right place? |
2836 | | * |
2837 | | * For now, I do nothing but ignore them. We |
2838 | | * may later want to add sysctl stuff to |
2839 | | * switch out and do either an ABORT() or |
2840 | | * possibly process them. |
2841 | | */ |
2842 | 69 | struct mbuf *op_err; |
2843 | 69 | char msg[SCTP_DIAG_INFO_LEN]; |
2844 | | |
2845 | 69 | SCTP_SNPRINTF(msg, sizeof(msg), "DATA chunk followed by chunk of type %2.2x", |
2846 | 69 | ch->chunk_type); |
2847 | 69 | op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); |
2848 | 69 | sctp_abort_an_association(inp, stcb, op_err, false, SCTP_SO_NOT_LOCKED); |
2849 | 69 | return (2); |
2850 | 67 | } |
2851 | 3.05k | default: |
2852 | | /* |
2853 | | * Unknown chunk type: use bit rules after |
2854 | | * checking length |
2855 | | */ |
2856 | 3.05k | if (chk_length < sizeof(struct sctp_chunkhdr)) { |
2857 | | /* |
2858 | | * Need to send an abort since we had a |
2859 | | * invalid chunk. |
2860 | | */ |
2861 | 29 | struct mbuf *op_err; |
2862 | 29 | char msg[SCTP_DIAG_INFO_LEN]; |
2863 | | |
2864 | 29 | SCTP_SNPRINTF(msg, sizeof(msg), "Chunk of length %u", chk_length); |
2865 | 29 | op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); |
2866 | 29 | stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_24; |
2867 | 29 | sctp_abort_an_association(inp, stcb, op_err, false, SCTP_SO_NOT_LOCKED); |
2868 | 29 | return (2); |
2869 | 29 | } |
2870 | 3.02k | if (ch->chunk_type & 0x40) { |
2871 | | /* Add a error report to the queue */ |
2872 | 2.76k | struct mbuf *op_err; |
2873 | 2.76k | struct sctp_gen_error_cause *cause; |
2874 | | |
2875 | 2.76k | op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_gen_error_cause), |
2876 | 2.76k | 0, M_NOWAIT, 1, MT_DATA); |
2877 | 2.76k | if (op_err != NULL) { |
2878 | 2.76k | cause = mtod(op_err, struct sctp_gen_error_cause *); |
2879 | 2.76k | cause->code = htons(SCTP_CAUSE_UNRECOG_CHUNK); |
2880 | 2.76k | cause->length = htons((uint16_t)(chk_length + sizeof(struct sctp_gen_error_cause))); |
2881 | 2.76k | SCTP_BUF_LEN(op_err) = sizeof(struct sctp_gen_error_cause); |
2882 | 2.76k | SCTP_BUF_NEXT(op_err) = SCTP_M_COPYM(m, *offset, chk_length, M_NOWAIT); |
2883 | 2.76k | if (SCTP_BUF_NEXT(op_err) != NULL) { |
2884 | 2.76k | sctp_queue_op_err(stcb, op_err); |
2885 | 2.76k | } else { |
2886 | 0 | sctp_m_freem(op_err); |
2887 | 0 | } |
2888 | 2.76k | } |
2889 | 2.76k | } |
2890 | 3.02k | if ((ch->chunk_type & 0x80) == 0) { |
2891 | | /* discard the rest of this packet */ |
2892 | 17 | stop_proc = 1; |
2893 | 17 | } /* else skip this bad chunk and |
2894 | | * continue... */ |
2895 | 3.02k | break; |
2896 | 3.12k | } /* switch of chunk type */ |
2897 | 3.12k | } |
2898 | 16.6k | *offset += SCTP_SIZE32(chk_length); |
2899 | 16.6k | if ((*offset >= length) || stop_proc) { |
2900 | | /* no more data left in the mbuf chain */ |
2901 | 3.54k | stop_proc = 1; |
2902 | 3.54k | continue; |
2903 | 3.54k | } |
2904 | 13.1k | ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset, |
2905 | 13.1k | sizeof(struct sctp_chunkhdr), |
2906 | 13.1k | (uint8_t *)&chunk_buf); |
2907 | 13.1k | if (ch == NULL) { |
2908 | 100 | *offset = length; |
2909 | 100 | stop_proc = 1; |
2910 | 100 | continue; |
2911 | 100 | } |
2912 | 13.1k | } |
2913 | 4.33k | if (break_flag) { |
2914 | | /* |
2915 | | * we need to report rwnd overrun drops. |
2916 | | */ |
2917 | 16 | sctp_send_packet_dropped(stcb, net, *mm, length, iphlen, 0); |
2918 | 16 | } |
2919 | 4.33k | if (num_chunks) { |
2920 | | /* |
2921 | | * Did we get data, if so update the time for auto-close and |
2922 | | * give peer credit for being alive. |
2923 | | */ |
2924 | 1.63k | SCTP_STAT_INCR(sctps_recvpktwithdata); |
2925 | 1.63k | if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) { |
2926 | 0 | sctp_misc_ints(SCTP_THRESHOLD_CLEAR, |
2927 | 0 | stcb->asoc.overall_error_count, |
2928 | 0 | 0, |
2929 | 0 | SCTP_FROM_SCTP_INDATA, |
2930 | 0 | __LINE__); |
2931 | 0 | } |
2932 | 1.63k | stcb->asoc.overall_error_count = 0; |
2933 | 1.63k | (void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_last_rcvd); |
2934 | 1.63k | } |
2935 | | /* now service all of the reassm queue if needed */ |
2936 | 4.33k | if (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_SENT) { |
2937 | | /* Assure that we ack right away */ |
2938 | 0 | stcb->asoc.send_sack = 1; |
2939 | 0 | } |
2940 | | /* Start a sack timer or QUEUE a SACK for sending */ |
2941 | 4.33k | sctp_sack_check(stcb, was_a_gap); |
2942 | 4.33k | return (0); |
2943 | 5.20k | } |
2944 | | |
2945 | | static int |
2946 | | sctp_process_segment_range(struct sctp_tcb *stcb, struct sctp_tmit_chunk **p_tp1, uint32_t last_tsn, |
2947 | | uint16_t frag_strt, uint16_t frag_end, int nr_sacking, |
2948 | | int *num_frs, |
2949 | | uint32_t *biggest_newly_acked_tsn, |
2950 | | uint32_t *this_sack_lowest_newack, |
2951 | | int *rto_ok) |
2952 | 0 | { |
2953 | 0 | struct sctp_tmit_chunk *tp1; |
2954 | 0 | unsigned int theTSN; |
2955 | 0 | int j, wake_him = 0, circled = 0; |
2956 | | |
2957 | | /* Recover the tp1 we last saw */ |
2958 | 0 | tp1 = *p_tp1; |
2959 | 0 | if (tp1 == NULL) { |
2960 | 0 | tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue); |
2961 | 0 | } |
2962 | 0 | for (j = frag_strt; j <= frag_end; j++) { |
2963 | 0 | theTSN = j + last_tsn; |
2964 | 0 | while (tp1) { |
2965 | 0 | if (tp1->rec.data.doing_fast_retransmit) |
2966 | 0 | (*num_frs) += 1; |
2967 | | |
2968 | | /*- |
2969 | | * CMT: CUCv2 algorithm. For each TSN being |
2970 | | * processed from the sent queue, track the |
2971 | | * next expected pseudo-cumack, or |
2972 | | * rtx_pseudo_cumack, if required. Separate |
2973 | | * cumack trackers for first transmissions, |
2974 | | * and retransmissions. |
2975 | | */ |
2976 | 0 | if ((tp1->sent < SCTP_DATAGRAM_RESEND) && |
2977 | 0 | (tp1->whoTo->find_pseudo_cumack == 1) && |
2978 | 0 | (tp1->snd_count == 1)) { |
2979 | 0 | tp1->whoTo->pseudo_cumack = tp1->rec.data.tsn; |
2980 | 0 | tp1->whoTo->find_pseudo_cumack = 0; |
2981 | 0 | } |
2982 | 0 | if ((tp1->sent < SCTP_DATAGRAM_RESEND) && |
2983 | 0 | (tp1->whoTo->find_rtx_pseudo_cumack == 1) && |
2984 | 0 | (tp1->snd_count > 1)) { |
2985 | 0 | tp1->whoTo->rtx_pseudo_cumack = tp1->rec.data.tsn; |
2986 | 0 | tp1->whoTo->find_rtx_pseudo_cumack = 0; |
2987 | 0 | } |
2988 | 0 | if (tp1->rec.data.tsn == theTSN) { |
2989 | 0 | if (tp1->sent != SCTP_DATAGRAM_UNSENT) { |
2990 | | /*- |
2991 | | * must be held until |
2992 | | * cum-ack passes |
2993 | | */ |
2994 | 0 | if (tp1->sent < SCTP_DATAGRAM_RESEND) { |
2995 | | /*- |
2996 | | * If it is less than RESEND, it is |
2997 | | * now no-longer in flight. |
2998 | | * Higher values may already be set |
2999 | | * via previous Gap Ack Blocks... |
3000 | | * i.e. ACKED or RESEND. |
3001 | | */ |
3002 | 0 | if (SCTP_TSN_GT(tp1->rec.data.tsn, |
3003 | 0 | *biggest_newly_acked_tsn)) { |
3004 | 0 | *biggest_newly_acked_tsn = tp1->rec.data.tsn; |
3005 | 0 | } |
3006 | | /*- |
3007 | | * CMT: SFR algo (and HTNA) - set |
3008 | | * saw_newack to 1 for dest being |
3009 | | * newly acked. update |
3010 | | * this_sack_highest_newack if |
3011 | | * appropriate. |
3012 | | */ |
3013 | 0 | if (tp1->rec.data.chunk_was_revoked == 0) |
3014 | 0 | tp1->whoTo->saw_newack = 1; |
3015 | |
|
3016 | 0 | if (SCTP_TSN_GT(tp1->rec.data.tsn, |
3017 | 0 | tp1->whoTo->this_sack_highest_newack)) { |
3018 | 0 | tp1->whoTo->this_sack_highest_newack = |
3019 | 0 | tp1->rec.data.tsn; |
3020 | 0 | } |
3021 | | /*- |
3022 | | * CMT DAC algo: also update |
3023 | | * this_sack_lowest_newack |
3024 | | */ |
3025 | 0 | if (*this_sack_lowest_newack == 0) { |
3026 | 0 | if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) { |
3027 | 0 | sctp_log_sack(*this_sack_lowest_newack, |
3028 | 0 | last_tsn, |
3029 | 0 | tp1->rec.data.tsn, |
3030 | 0 | 0, |
3031 | 0 | 0, |
3032 | 0 | SCTP_LOG_TSN_ACKED); |
3033 | 0 | } |
3034 | 0 | *this_sack_lowest_newack = tp1->rec.data.tsn; |
3035 | 0 | } |
3036 | | /*- |
3037 | | * CMT: CUCv2 algorithm. If (rtx-)pseudo-cumack for corresp |
3038 | | * dest is being acked, then we have a new (rtx-)pseudo-cumack. Set |
3039 | | * new_(rtx_)pseudo_cumack to TRUE so that the cwnd for this dest can be |
3040 | | * updated. Also trigger search for the next expected (rtx-)pseudo-cumack. |
3041 | | * Separate pseudo_cumack trackers for first transmissions and |
3042 | | * retransmissions. |
3043 | | */ |
3044 | 0 | if (tp1->rec.data.tsn == tp1->whoTo->pseudo_cumack) { |
3045 | 0 | if (tp1->rec.data.chunk_was_revoked == 0) { |
3046 | 0 | tp1->whoTo->new_pseudo_cumack = 1; |
3047 | 0 | } |
3048 | 0 | tp1->whoTo->find_pseudo_cumack = 1; |
3049 | 0 | } |
3050 | 0 | if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) { |
3051 | 0 | sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.tsn, SCTP_CWND_LOG_FROM_SACK); |
3052 | 0 | } |
3053 | 0 | if (tp1->rec.data.tsn == tp1->whoTo->rtx_pseudo_cumack) { |
3054 | 0 | if (tp1->rec.data.chunk_was_revoked == 0) { |
3055 | 0 | tp1->whoTo->new_pseudo_cumack = 1; |
3056 | 0 | } |
3057 | 0 | tp1->whoTo->find_rtx_pseudo_cumack = 1; |
3058 | 0 | } |
3059 | 0 | if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) { |
3060 | 0 | sctp_log_sack(*biggest_newly_acked_tsn, |
3061 | 0 | last_tsn, |
3062 | 0 | tp1->rec.data.tsn, |
3063 | 0 | frag_strt, |
3064 | 0 | frag_end, |
3065 | 0 | SCTP_LOG_TSN_ACKED); |
3066 | 0 | } |
3067 | 0 | if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) { |
3068 | 0 | sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_GAP, |
3069 | 0 | tp1->whoTo->flight_size, |
3070 | 0 | tp1->book_size, |
3071 | 0 | (uint32_t)(uintptr_t)tp1->whoTo, |
3072 | 0 | tp1->rec.data.tsn); |
3073 | 0 | } |
3074 | 0 | sctp_flight_size_decrease(tp1); |
3075 | 0 | if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) { |
3076 | 0 | (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged)(tp1->whoTo, |
3077 | 0 | tp1); |
3078 | 0 | } |
3079 | 0 | sctp_total_flight_decrease(stcb, tp1); |
3080 | |
|
3081 | 0 | tp1->whoTo->net_ack += tp1->send_size; |
3082 | 0 | if (tp1->snd_count < 2) { |
3083 | | /*- |
3084 | | * True non-retransmitted chunk |
3085 | | */ |
3086 | 0 | tp1->whoTo->net_ack2 += tp1->send_size; |
3087 | | |
3088 | | /*- |
3089 | | * update RTO too ? |
3090 | | */ |
3091 | 0 | if (tp1->do_rtt) { |
3092 | 0 | if (*rto_ok && |
3093 | 0 | sctp_calculate_rto(stcb, |
3094 | 0 | &stcb->asoc, |
3095 | 0 | tp1->whoTo, |
3096 | 0 | &tp1->sent_rcv_time, |
3097 | 0 | SCTP_RTT_FROM_DATA)) { |
3098 | 0 | *rto_ok = 0; |
3099 | 0 | } |
3100 | 0 | if (tp1->whoTo->rto_needed == 0) { |
3101 | 0 | tp1->whoTo->rto_needed = 1; |
3102 | 0 | } |
3103 | 0 | tp1->do_rtt = 0; |
3104 | 0 | } |
3105 | 0 | } |
3106 | 0 | } |
3107 | 0 | if (tp1->sent <= SCTP_DATAGRAM_RESEND) { |
3108 | 0 | if (SCTP_TSN_GT(tp1->rec.data.tsn, |
3109 | 0 | stcb->asoc.this_sack_highest_gap)) { |
3110 | 0 | stcb->asoc.this_sack_highest_gap = |
3111 | 0 | tp1->rec.data.tsn; |
3112 | 0 | } |
3113 | 0 | if (tp1->sent == SCTP_DATAGRAM_RESEND) { |
3114 | 0 | sctp_ucount_decr(stcb->asoc.sent_queue_retran_cnt); |
3115 | | #ifdef SCTP_AUDITING_ENABLED |
3116 | | sctp_audit_log(0xB2, |
3117 | | (stcb->asoc.sent_queue_retran_cnt & 0x000000ff)); |
3118 | | #endif |
3119 | 0 | } |
3120 | 0 | } |
3121 | | /*- |
3122 | | * All chunks NOT UNSENT fall through here and are marked |
3123 | | * (leave PR-SCTP ones that are to skip alone though) |
3124 | | */ |
3125 | 0 | if ((tp1->sent != SCTP_FORWARD_TSN_SKIP) && |
3126 | 0 | (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) { |
3127 | 0 | tp1->sent = SCTP_DATAGRAM_MARKED; |
3128 | 0 | } |
3129 | 0 | if (tp1->rec.data.chunk_was_revoked) { |
3130 | | /* deflate the cwnd */ |
3131 | 0 | tp1->whoTo->cwnd -= tp1->book_size; |
3132 | 0 | tp1->rec.data.chunk_was_revoked = 0; |
3133 | 0 | } |
3134 | | /* NR Sack code here */ |
3135 | 0 | if (nr_sacking && |
3136 | 0 | (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) { |
3137 | 0 | if (stcb->asoc.strmout[tp1->rec.data.sid].chunks_on_queues > 0) { |
3138 | 0 | stcb->asoc.strmout[tp1->rec.data.sid].chunks_on_queues--; |
3139 | 0 | #ifdef INVARIANTS |
3140 | 0 | } else { |
3141 | 0 | panic("No chunks on the queues for sid %u.", tp1->rec.data.sid); |
3142 | 0 | #endif |
3143 | 0 | } |
3144 | 0 | if ((stcb->asoc.strmout[tp1->rec.data.sid].chunks_on_queues == 0) && |
3145 | 0 | (stcb->asoc.strmout[tp1->rec.data.sid].state == SCTP_STREAM_RESET_PENDING) && |
3146 | 0 | TAILQ_EMPTY(&stcb->asoc.strmout[tp1->rec.data.sid].outqueue)) { |
3147 | 0 | stcb->asoc.trigger_reset = 1; |
3148 | 0 | } |
3149 | 0 | tp1->sent = SCTP_DATAGRAM_NR_ACKED; |
3150 | 0 | if (tp1->data) { |
3151 | | /* sa_ignore NO_NULL_CHK */ |
3152 | 0 | sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1); |
3153 | 0 | sctp_m_freem(tp1->data); |
3154 | 0 | tp1->data = NULL; |
3155 | 0 | } |
3156 | 0 | wake_him++; |
3157 | 0 | } |
3158 | 0 | } |
3159 | 0 | break; |
3160 | 0 | } /* if (tp1->tsn == theTSN) */ |
3161 | 0 | if (SCTP_TSN_GT(tp1->rec.data.tsn, theTSN)) { |
3162 | 0 | break; |
3163 | 0 | } |
3164 | 0 | tp1 = TAILQ_NEXT(tp1, sctp_next); |
3165 | 0 | if ((tp1 == NULL) && (circled == 0)) { |
3166 | 0 | circled++; |
3167 | 0 | tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue); |
3168 | 0 | } |
3169 | 0 | } /* end while (tp1) */ |
3170 | 0 | if (tp1 == NULL) { |
3171 | 0 | circled = 0; |
3172 | 0 | tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue); |
3173 | 0 | } |
3174 | | /* In case the fragments were not in order we must reset */ |
3175 | 0 | } /* end for (j = fragStart */ |
3176 | 0 | *p_tp1 = tp1; |
3177 | 0 | return (wake_him); /* Return value only used for nr-sack */ |
3178 | 0 | } |
3179 | | |
3180 | | static int |
3181 | | sctp_handle_segments(struct mbuf *m, int *offset, struct sctp_tcb *stcb, struct sctp_association *asoc, |
3182 | | uint32_t last_tsn, uint32_t *biggest_tsn_acked, |
3183 | | uint32_t *biggest_newly_acked_tsn, uint32_t *this_sack_lowest_newack, |
3184 | | int num_seg, int num_nr_seg, int *rto_ok) |
3185 | 0 | { |
3186 | 0 | struct sctp_gap_ack_block *frag, block; |
3187 | 0 | struct sctp_tmit_chunk *tp1; |
3188 | 0 | int i; |
3189 | 0 | int num_frs = 0; |
3190 | 0 | int chunk_freed; |
3191 | 0 | int non_revocable; |
3192 | 0 | uint16_t frag_strt, frag_end, prev_frag_end; |
3193 | |
|
3194 | 0 | tp1 = TAILQ_FIRST(&asoc->sent_queue); |
3195 | 0 | prev_frag_end = 0; |
3196 | 0 | chunk_freed = 0; |
3197 | |
|
3198 | 0 | for (i = 0; i < (num_seg + num_nr_seg); i++) { |
3199 | 0 | if (i == num_seg) { |
3200 | 0 | prev_frag_end = 0; |
3201 | 0 | tp1 = TAILQ_FIRST(&asoc->sent_queue); |
3202 | 0 | } |
3203 | 0 | frag = (struct sctp_gap_ack_block *)sctp_m_getptr(m, *offset, |
3204 | 0 | sizeof(struct sctp_gap_ack_block), (uint8_t *) &block); |
3205 | 0 | *offset += sizeof(block); |
3206 | 0 | if (frag == NULL) { |
3207 | 0 | return (chunk_freed); |
3208 | 0 | } |
3209 | 0 | frag_strt = ntohs(frag->start); |
3210 | 0 | frag_end = ntohs(frag->end); |
3211 | |
|
3212 | 0 | if (frag_strt > frag_end) { |
3213 | | /* This gap report is malformed, skip it. */ |
3214 | 0 | continue; |
3215 | 0 | } |
3216 | 0 | if (frag_strt <= prev_frag_end) { |
3217 | | /* This gap report is not in order, so restart. */ |
3218 | 0 | tp1 = TAILQ_FIRST(&asoc->sent_queue); |
3219 | 0 | } |
3220 | 0 | if (SCTP_TSN_GT((last_tsn + frag_end), *biggest_tsn_acked)) { |
3221 | 0 | *biggest_tsn_acked = last_tsn + frag_end; |
3222 | 0 | } |
3223 | 0 | if (i < num_seg) { |
3224 | 0 | non_revocable = 0; |
3225 | 0 | } else { |
3226 | 0 | non_revocable = 1; |
3227 | 0 | } |
3228 | 0 | if (sctp_process_segment_range(stcb, &tp1, last_tsn, frag_strt, frag_end, |
3229 | 0 | non_revocable, &num_frs, biggest_newly_acked_tsn, |
3230 | 0 | this_sack_lowest_newack, rto_ok)) { |
3231 | 0 | chunk_freed = 1; |
3232 | 0 | } |
3233 | 0 | prev_frag_end = frag_end; |
3234 | 0 | } |
3235 | 0 | if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { |
3236 | 0 | if (num_frs) |
3237 | 0 | sctp_log_fr(*biggest_tsn_acked, |
3238 | 0 | *biggest_newly_acked_tsn, |
3239 | 0 | last_tsn, SCTP_FR_LOG_BIGGEST_TSNS); |
3240 | 0 | } |
3241 | 0 | return (chunk_freed); |
3242 | 0 | } |
3243 | | |
3244 | | static void |
3245 | | sctp_check_for_revoked(struct sctp_tcb *stcb, |
3246 | | struct sctp_association *asoc, uint32_t cumack, |
3247 | | uint32_t biggest_tsn_acked) |
3248 | 0 | { |
3249 | 0 | struct sctp_tmit_chunk *tp1; |
3250 | |
|
3251 | 0 | TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) { |
3252 | 0 | if (SCTP_TSN_GT(tp1->rec.data.tsn, cumack)) { |
3253 | | /* |
3254 | | * ok this guy is either ACK or MARKED. If it is |
3255 | | * ACKED it has been previously acked but not this |
3256 | | * time i.e. revoked. If it is MARKED it was ACK'ed |
3257 | | * again. |
3258 | | */ |
3259 | 0 | if (SCTP_TSN_GT(tp1->rec.data.tsn, biggest_tsn_acked)) { |
3260 | 0 | break; |
3261 | 0 | } |
3262 | 0 | if (tp1->sent == SCTP_DATAGRAM_ACKED) { |
3263 | | /* it has been revoked */ |
3264 | 0 | tp1->sent = SCTP_DATAGRAM_SENT; |
3265 | 0 | tp1->rec.data.chunk_was_revoked = 1; |
3266 | | /* We must add this stuff back in to |
3267 | | * assure timers and such get started. |
3268 | | */ |
3269 | 0 | if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) { |
3270 | 0 | sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE, |
3271 | 0 | tp1->whoTo->flight_size, |
3272 | 0 | tp1->book_size, |
3273 | 0 | (uint32_t)(uintptr_t)tp1->whoTo, |
3274 | 0 | tp1->rec.data.tsn); |
3275 | 0 | } |
3276 | 0 | sctp_flight_size_increase(tp1); |
3277 | 0 | sctp_total_flight_increase(stcb, tp1); |
3278 | | /* We inflate the cwnd to compensate for our |
3279 | | * artificial inflation of the flight_size. |
3280 | | */ |
3281 | 0 | tp1->whoTo->cwnd += tp1->book_size; |
3282 | 0 | if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) { |
3283 | 0 | sctp_log_sack(asoc->last_acked_seq, |
3284 | 0 | cumack, |
3285 | 0 | tp1->rec.data.tsn, |
3286 | 0 | 0, |
3287 | 0 | 0, |
3288 | 0 | SCTP_LOG_TSN_REVOKED); |
3289 | 0 | } |
3290 | 0 | } else if (tp1->sent == SCTP_DATAGRAM_MARKED) { |
3291 | | /* it has been re-acked in this SACK */ |
3292 | 0 | tp1->sent = SCTP_DATAGRAM_ACKED; |
3293 | 0 | } |
3294 | 0 | } |
3295 | 0 | if (tp1->sent == SCTP_DATAGRAM_UNSENT) |
3296 | 0 | break; |
3297 | 0 | } |
3298 | 0 | } |
3299 | | |
3300 | | static void |
3301 | | sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc, |
3302 | | uint32_t biggest_tsn_acked, uint32_t biggest_tsn_newly_acked, uint32_t this_sack_lowest_newack, int accum_moved) |
3303 | 0 | { |
3304 | 0 | struct sctp_tmit_chunk *tp1; |
3305 | 0 | int strike_flag = 0; |
3306 | 0 | struct timeval now; |
3307 | 0 | uint32_t sending_seq; |
3308 | 0 | struct sctp_nets *net; |
3309 | 0 | int num_dests_sacked = 0; |
3310 | | |
3311 | | /* |
3312 | | * select the sending_seq, this is either the next thing ready to be |
3313 | | * sent but not transmitted, OR, the next seq we assign. |
3314 | | */ |
3315 | 0 | tp1 = TAILQ_FIRST(&stcb->asoc.send_queue); |
3316 | 0 | if (tp1 == NULL) { |
3317 | 0 | sending_seq = asoc->sending_seq; |
3318 | 0 | } else { |
3319 | 0 | sending_seq = tp1->rec.data.tsn; |
3320 | 0 | } |
3321 | | |
3322 | | /* CMT DAC algo: finding out if SACK is a mixed SACK */ |
3323 | 0 | if ((asoc->sctp_cmt_on_off > 0) && |
3324 | 0 | SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) { |
3325 | 0 | TAILQ_FOREACH(net, &asoc->nets, sctp_next) { |
3326 | 0 | if (net->saw_newack) |
3327 | 0 | num_dests_sacked++; |
3328 | 0 | } |
3329 | 0 | } |
3330 | 0 | if (stcb->asoc.prsctp_supported) { |
3331 | 0 | (void)SCTP_GETTIME_TIMEVAL(&now); |
3332 | 0 | } |
3333 | 0 | TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) { |
3334 | 0 | strike_flag = 0; |
3335 | 0 | if (tp1->no_fr_allowed) { |
3336 | | /* this one had a timeout or something */ |
3337 | 0 | continue; |
3338 | 0 | } |
3339 | 0 | if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { |
3340 | 0 | if (tp1->sent < SCTP_DATAGRAM_RESEND) |
3341 | 0 | sctp_log_fr(biggest_tsn_newly_acked, |
3342 | 0 | tp1->rec.data.tsn, |
3343 | 0 | tp1->sent, |
3344 | 0 | SCTP_FR_LOG_CHECK_STRIKE); |
3345 | 0 | } |
3346 | 0 | if (SCTP_TSN_GT(tp1->rec.data.tsn, biggest_tsn_acked) || |
3347 | 0 | tp1->sent == SCTP_DATAGRAM_UNSENT) { |
3348 | | /* done */ |
3349 | 0 | break; |
3350 | 0 | } |
3351 | 0 | if (stcb->asoc.prsctp_supported) { |
3352 | 0 | if ((PR_SCTP_TTL_ENABLED(tp1->flags)) && tp1->sent < SCTP_DATAGRAM_ACKED) { |
3353 | | /* Is it expired? */ |
3354 | 0 | #if !(defined(__FreeBSD__) && !defined(__Userspace__)) |
3355 | 0 | if (timercmp(&now, &tp1->rec.data.timetodrop, >)) { |
3356 | | #else |
3357 | | if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) { |
3358 | | #endif |
3359 | | /* Yes so drop it */ |
3360 | 0 | if (tp1->data != NULL) { |
3361 | 0 | (void)sctp_release_pr_sctp_chunk(stcb, tp1, 1, |
3362 | 0 | SCTP_SO_NOT_LOCKED); |
3363 | 0 | } |
3364 | 0 | continue; |
3365 | 0 | } |
3366 | 0 | } |
3367 | 0 | } |
3368 | 0 | if (SCTP_TSN_GT(tp1->rec.data.tsn, asoc->this_sack_highest_gap) && |
3369 | 0 | !(accum_moved && asoc->fast_retran_loss_recovery)) { |
3370 | | /* we are beyond the tsn in the sack */ |
3371 | 0 | break; |
3372 | 0 | } |
3373 | 0 | if (tp1->sent >= SCTP_DATAGRAM_RESEND) { |
3374 | | /* either a RESEND, ACKED, or MARKED */ |
3375 | | /* skip */ |
3376 | 0 | if (tp1->sent == SCTP_FORWARD_TSN_SKIP) { |
3377 | | /* Continue strikin FWD-TSN chunks */ |
3378 | 0 | tp1->rec.data.fwd_tsn_cnt++; |
3379 | 0 | } |
3380 | 0 | continue; |
3381 | 0 | } |
3382 | | /* |
3383 | | * CMT : SFR algo (covers part of DAC and HTNA as well) |
3384 | | */ |
3385 | 0 | if (tp1->whoTo && tp1->whoTo->saw_newack == 0) { |
3386 | | /* |
3387 | | * No new acks were received for data sent to this |
3388 | | * dest. Therefore, according to the SFR algo for |
3389 | | * CMT, no data sent to this dest can be marked for |
3390 | | * FR using this SACK. |
3391 | | */ |
3392 | 0 | continue; |
3393 | 0 | } else if (tp1->whoTo && |
3394 | 0 | SCTP_TSN_GT(tp1->rec.data.tsn, |
3395 | 0 | tp1->whoTo->this_sack_highest_newack) && |
3396 | 0 | !(accum_moved && asoc->fast_retran_loss_recovery)) { |
3397 | | /* |
3398 | | * CMT: New acks were received for data sent to |
3399 | | * this dest. But no new acks were seen for data |
3400 | | * sent after tp1. Therefore, according to the SFR |
3401 | | * algo for CMT, tp1 cannot be marked for FR using |
3402 | | * this SACK. This step covers part of the DAC algo |
3403 | | * and the HTNA algo as well. |
3404 | | */ |
3405 | 0 | continue; |
3406 | 0 | } |
3407 | | /* |
3408 | | * Here we check to see if we were have already done a FR |
3409 | | * and if so we see if the biggest TSN we saw in the sack is |
3410 | | * smaller than the recovery point. If so we don't strike |
3411 | | * the tsn... otherwise we CAN strike the TSN. |
3412 | | */ |
3413 | | /* |
3414 | | * @@@ JRI: Check for CMT |
3415 | | * if (accum_moved && asoc->fast_retran_loss_recovery && (sctp_cmt_on_off == 0)) { |
3416 | | */ |
3417 | 0 | if (accum_moved && asoc->fast_retran_loss_recovery) { |
3418 | | /* |
3419 | | * Strike the TSN if in fast-recovery and cum-ack |
3420 | | * moved. |
3421 | | */ |
3422 | 0 | if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { |
3423 | 0 | sctp_log_fr(biggest_tsn_newly_acked, |
3424 | 0 | tp1->rec.data.tsn, |
3425 | 0 | tp1->sent, |
3426 | 0 | SCTP_FR_LOG_STRIKE_CHUNK); |
3427 | 0 | } |
3428 | 0 | if (tp1->sent < SCTP_DATAGRAM_RESEND) { |
3429 | 0 | tp1->sent++; |
3430 | 0 | } |
3431 | 0 | if ((asoc->sctp_cmt_on_off > 0) && |
3432 | 0 | SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) { |
3433 | | /* |
3434 | | * CMT DAC algorithm: If SACK flag is set to |
3435 | | * 0, then lowest_newack test will not pass |
3436 | | * because it would have been set to the |
3437 | | * cumack earlier. If not already to be |
3438 | | * rtx'd, If not a mixed sack and if tp1 is |
3439 | | * not between two sacked TSNs, then mark by |
3440 | | * one more. |
3441 | | * NOTE that we are marking by one additional time since the SACK DAC flag indicates that |
3442 | | * two packets have been received after this missing TSN. |
3443 | | */ |
3444 | 0 | if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) && |
3445 | 0 | SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.tsn)) { |
3446 | 0 | if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { |
3447 | 0 | sctp_log_fr(16 + num_dests_sacked, |
3448 | 0 | tp1->rec.data.tsn, |
3449 | 0 | tp1->sent, |
3450 | 0 | SCTP_FR_LOG_STRIKE_CHUNK); |
3451 | 0 | } |
3452 | 0 | tp1->sent++; |
3453 | 0 | } |
3454 | 0 | } |
3455 | 0 | } else if ((tp1->rec.data.doing_fast_retransmit) && |
3456 | 0 | (asoc->sctp_cmt_on_off == 0)) { |
3457 | | /* |
3458 | | * For those that have done a FR we must take |
3459 | | * special consideration if we strike. I.e the |
3460 | | * biggest_newly_acked must be higher than the |
3461 | | * sending_seq at the time we did the FR. |
3462 | | */ |
3463 | 0 | if ( |
3464 | | #ifdef SCTP_FR_TO_ALTERNATE |
3465 | | /* |
3466 | | * If FR's go to new networks, then we must only do |
3467 | | * this for singly homed asoc's. However if the FR's |
3468 | | * go to the same network (Armando's work) then its |
3469 | | * ok to FR multiple times. |
3470 | | */ |
3471 | | (asoc->numnets < 2) |
3472 | | #else |
3473 | 0 | (1) |
3474 | 0 | #endif |
3475 | 0 | ) { |
3476 | 0 | if (SCTP_TSN_GE(biggest_tsn_newly_acked, |
3477 | 0 | tp1->rec.data.fast_retran_tsn)) { |
3478 | | /* |
3479 | | * Strike the TSN, since this ack is |
3480 | | * beyond where things were when we |
3481 | | * did a FR. |
3482 | | */ |
3483 | 0 | if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { |
3484 | 0 | sctp_log_fr(biggest_tsn_newly_acked, |
3485 | 0 | tp1->rec.data.tsn, |
3486 | 0 | tp1->sent, |
3487 | 0 | SCTP_FR_LOG_STRIKE_CHUNK); |
3488 | 0 | } |
3489 | 0 | if (tp1->sent < SCTP_DATAGRAM_RESEND) { |
3490 | 0 | tp1->sent++; |
3491 | 0 | } |
3492 | 0 | strike_flag = 1; |
3493 | 0 | if ((asoc->sctp_cmt_on_off > 0) && |
3494 | 0 | SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) { |
3495 | | /* |
3496 | | * CMT DAC algorithm: If |
3497 | | * SACK flag is set to 0, |
3498 | | * then lowest_newack test |
3499 | | * will not pass because it |
3500 | | * would have been set to |
3501 | | * the cumack earlier. If |
3502 | | * not already to be rtx'd, |
3503 | | * If not a mixed sack and |
3504 | | * if tp1 is not between two |
3505 | | * sacked TSNs, then mark by |
3506 | | * one more. |
3507 | | * NOTE that we are marking by one additional time since the SACK DAC flag indicates that |
3508 | | * two packets have been received after this missing TSN. |
3509 | | */ |
3510 | 0 | if ((tp1->sent < SCTP_DATAGRAM_RESEND) && |
3511 | 0 | (num_dests_sacked == 1) && |
3512 | 0 | SCTP_TSN_GT(this_sack_lowest_newack, |
3513 | 0 | tp1->rec.data.tsn)) { |
3514 | 0 | if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { |
3515 | 0 | sctp_log_fr(32 + num_dests_sacked, |
3516 | 0 | tp1->rec.data.tsn, |
3517 | 0 | tp1->sent, |
3518 | 0 | SCTP_FR_LOG_STRIKE_CHUNK); |
3519 | 0 | } |
3520 | 0 | if (tp1->sent < SCTP_DATAGRAM_RESEND) { |
3521 | 0 | tp1->sent++; |
3522 | 0 | } |
3523 | 0 | } |
3524 | 0 | } |
3525 | 0 | } |
3526 | 0 | } |
3527 | | /* |
3528 | | * JRI: TODO: remove code for HTNA algo. CMT's |
3529 | | * SFR algo covers HTNA. |
3530 | | */ |
3531 | 0 | } else if (SCTP_TSN_GT(tp1->rec.data.tsn, |
3532 | 0 | biggest_tsn_newly_acked)) { |
3533 | | /* |
3534 | | * We don't strike these: This is the HTNA |
3535 | | * algorithm i.e. we don't strike If our TSN is |
3536 | | * larger than the Highest TSN Newly Acked. |
3537 | | */ |
3538 | 0 | ; |
3539 | 0 | } else { |
3540 | | /* Strike the TSN */ |
3541 | 0 | if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { |
3542 | 0 | sctp_log_fr(biggest_tsn_newly_acked, |
3543 | 0 | tp1->rec.data.tsn, |
3544 | 0 | tp1->sent, |
3545 | 0 | SCTP_FR_LOG_STRIKE_CHUNK); |
3546 | 0 | } |
3547 | 0 | if (tp1->sent < SCTP_DATAGRAM_RESEND) { |
3548 | 0 | tp1->sent++; |
3549 | 0 | } |
3550 | 0 | if ((asoc->sctp_cmt_on_off > 0) && |
3551 | 0 | SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) { |
3552 | | /* |
3553 | | * CMT DAC algorithm: If SACK flag is set to |
3554 | | * 0, then lowest_newack test will not pass |
3555 | | * because it would have been set to the |
3556 | | * cumack earlier. If not already to be |
3557 | | * rtx'd, If not a mixed sack and if tp1 is |
3558 | | * not between two sacked TSNs, then mark by |
3559 | | * one more. |
3560 | | * NOTE that we are marking by one additional time since the SACK DAC flag indicates that |
3561 | | * two packets have been received after this missing TSN. |
3562 | | */ |
3563 | 0 | if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) && |
3564 | 0 | SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.tsn)) { |
3565 | 0 | if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { |
3566 | 0 | sctp_log_fr(48 + num_dests_sacked, |
3567 | 0 | tp1->rec.data.tsn, |
3568 | 0 | tp1->sent, |
3569 | 0 | SCTP_FR_LOG_STRIKE_CHUNK); |
3570 | 0 | } |
3571 | 0 | tp1->sent++; |
3572 | 0 | } |
3573 | 0 | } |
3574 | 0 | } |
3575 | 0 | if (tp1->sent == SCTP_DATAGRAM_RESEND) { |
3576 | 0 | struct sctp_nets *alt; |
3577 | | |
3578 | | /* fix counts and things */ |
3579 | 0 | if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) { |
3580 | 0 | sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_RSND, |
3581 | 0 | (tp1->whoTo ? (tp1->whoTo->flight_size) : 0), |
3582 | 0 | tp1->book_size, |
3583 | 0 | (uint32_t)(uintptr_t)tp1->whoTo, |
3584 | 0 | tp1->rec.data.tsn); |
3585 | 0 | } |
3586 | 0 | if (tp1->whoTo) { |
3587 | 0 | tp1->whoTo->net_ack++; |
3588 | 0 | sctp_flight_size_decrease(tp1); |
3589 | 0 | if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) { |
3590 | 0 | (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged)(tp1->whoTo, |
3591 | 0 | tp1); |
3592 | 0 | } |
3593 | 0 | } |
3594 | |
|
3595 | 0 | if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) { |
3596 | 0 | sctp_log_rwnd(SCTP_INCREASE_PEER_RWND, |
3597 | 0 | asoc->peers_rwnd, tp1->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)); |
3598 | 0 | } |
3599 | | /* add back to the rwnd */ |
3600 | 0 | asoc->peers_rwnd += (tp1->send_size + SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)); |
3601 | | |
3602 | | /* remove from the total flight */ |
3603 | 0 | sctp_total_flight_decrease(stcb, tp1); |
3604 | |
|
3605 | 0 | if ((stcb->asoc.prsctp_supported) && |
3606 | 0 | (PR_SCTP_RTX_ENABLED(tp1->flags))) { |
3607 | | /* Has it been retransmitted tv_sec times? - we store the retran count there. */ |
3608 | 0 | if (tp1->snd_count > tp1->rec.data.timetodrop.tv_sec) { |
3609 | | /* Yes, so drop it */ |
3610 | 0 | if (tp1->data != NULL) { |
3611 | 0 | (void)sctp_release_pr_sctp_chunk(stcb, tp1, 1, |
3612 | 0 | SCTP_SO_NOT_LOCKED); |
3613 | 0 | } |
3614 | | /* Make sure to flag we had a FR */ |
3615 | 0 | if (tp1->whoTo != NULL) { |
3616 | 0 | tp1->whoTo->net_ack++; |
3617 | 0 | } |
3618 | 0 | continue; |
3619 | 0 | } |
3620 | 0 | } |
3621 | | /* SCTP_PRINTF("OK, we are now ready to FR this guy\n"); */ |
3622 | 0 | if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { |
3623 | 0 | sctp_log_fr(tp1->rec.data.tsn, tp1->snd_count, |
3624 | 0 | 0, SCTP_FR_MARKED); |
3625 | 0 | } |
3626 | 0 | if (strike_flag) { |
3627 | | /* This is a subsequent FR */ |
3628 | 0 | SCTP_STAT_INCR(sctps_sendmultfastretrans); |
3629 | 0 | } |
3630 | 0 | sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); |
3631 | 0 | if (asoc->sctp_cmt_on_off > 0) { |
3632 | | /* |
3633 | | * CMT: Using RTX_SSTHRESH policy for CMT. |
3634 | | * If CMT is being used, then pick dest with |
3635 | | * largest ssthresh for any retransmission. |
3636 | | */ |
3637 | 0 | tp1->no_fr_allowed = 1; |
3638 | 0 | alt = tp1->whoTo; |
3639 | | /*sa_ignore NO_NULL_CHK*/ |
3640 | 0 | if (asoc->sctp_cmt_pf > 0) { |
3641 | | /* JRS 5/18/07 - If CMT PF is on, use the PF version of find_alt_net() */ |
3642 | 0 | alt = sctp_find_alternate_net(stcb, alt, 2); |
3643 | 0 | } else { |
3644 | | /* JRS 5/18/07 - If only CMT is on, use the CMT version of find_alt_net() */ |
3645 | | /*sa_ignore NO_NULL_CHK*/ |
3646 | 0 | alt = sctp_find_alternate_net(stcb, alt, 1); |
3647 | 0 | } |
3648 | 0 | if (alt == NULL) { |
3649 | 0 | alt = tp1->whoTo; |
3650 | 0 | } |
3651 | | /* |
3652 | | * CUCv2: If a different dest is picked for |
3653 | | * the retransmission, then new |
3654 | | * (rtx-)pseudo_cumack needs to be tracked |
3655 | | * for orig dest. Let CUCv2 track new (rtx-) |
3656 | | * pseudo-cumack always. |
3657 | | */ |
3658 | 0 | if (tp1->whoTo) { |
3659 | 0 | tp1->whoTo->find_pseudo_cumack = 1; |
3660 | 0 | tp1->whoTo->find_rtx_pseudo_cumack = 1; |
3661 | 0 | } |
3662 | 0 | } else {/* CMT is OFF */ |
3663 | | #ifdef SCTP_FR_TO_ALTERNATE |
3664 | | /* Can we find an alternate? */ |
3665 | | alt = sctp_find_alternate_net(stcb, tp1->whoTo, 0); |
3666 | | #else |
3667 | | /* |
3668 | | * default behavior is to NOT retransmit |
3669 | | * FR's to an alternate. Armando Caro's |
3670 | | * paper details why. |
3671 | | */ |
3672 | 0 | alt = tp1->whoTo; |
3673 | 0 | #endif |
3674 | 0 | } |
3675 | |
|
3676 | 0 | tp1->rec.data.doing_fast_retransmit = 1; |
3677 | | /* mark the sending seq for possible subsequent FR's */ |
3678 | | /* |
3679 | | * SCTP_PRINTF("Marking TSN for FR new value %x\n", |
3680 | | * (uint32_t)tpi->rec.data.tsn); |
3681 | | */ |
3682 | 0 | if (TAILQ_EMPTY(&asoc->send_queue)) { |
3683 | | /* |
3684 | | * If the queue of send is empty then its |
3685 | | * the next sequence number that will be |
3686 | | * assigned so we subtract one from this to |
3687 | | * get the one we last sent. |
3688 | | */ |
3689 | 0 | tp1->rec.data.fast_retran_tsn = sending_seq; |
3690 | 0 | } else { |
3691 | | /* |
3692 | | * If there are chunks on the send queue |
3693 | | * (unsent data that has made it from the |
3694 | | * stream queues but not out the door, we |
3695 | | * take the first one (which will have the |
3696 | | * lowest TSN) and subtract one to get the |
3697 | | * one we last sent. |
3698 | | */ |
3699 | 0 | struct sctp_tmit_chunk *ttt; |
3700 | |
|
3701 | 0 | ttt = TAILQ_FIRST(&asoc->send_queue); |
3702 | 0 | tp1->rec.data.fast_retran_tsn = |
3703 | 0 | ttt->rec.data.tsn; |
3704 | 0 | } |
3705 | |
|
3706 | 0 | if (tp1->do_rtt) { |
3707 | | /* |
3708 | | * this guy had a RTO calculation pending on |
3709 | | * it, cancel it |
3710 | | */ |
3711 | 0 | if ((tp1->whoTo != NULL) && |
3712 | 0 | (tp1->whoTo->rto_needed == 0)) { |
3713 | 0 | tp1->whoTo->rto_needed = 1; |
3714 | 0 | } |
3715 | 0 | tp1->do_rtt = 0; |
3716 | 0 | } |
3717 | 0 | if (alt != tp1->whoTo) { |
3718 | | /* yes, there is an alternate. */ |
3719 | 0 | sctp_free_remote_addr(tp1->whoTo); |
3720 | | /*sa_ignore FREED_MEMORY*/ |
3721 | 0 | tp1->whoTo = alt; |
3722 | 0 | atomic_add_int(&alt->ref_count, 1); |
3723 | 0 | } |
3724 | 0 | } |
3725 | 0 | } |
3726 | 0 | } |
3727 | | |
3728 | | struct sctp_tmit_chunk * |
3729 | | sctp_try_advance_peer_ack_point(struct sctp_tcb *stcb, |
3730 | | struct sctp_association *asoc) |
3731 | 0 | { |
3732 | 0 | struct sctp_tmit_chunk *tp1, *tp2, *a_adv = NULL; |
3733 | 0 | struct timeval now; |
3734 | 0 | int now_filled = 0; |
3735 | |
|
3736 | 0 | if (asoc->prsctp_supported == 0) { |
3737 | 0 | return (NULL); |
3738 | 0 | } |
3739 | 0 | TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) { |
3740 | 0 | if (tp1->sent != SCTP_FORWARD_TSN_SKIP && |
3741 | 0 | tp1->sent != SCTP_DATAGRAM_RESEND && |
3742 | 0 | tp1->sent != SCTP_DATAGRAM_NR_ACKED) { |
3743 | | /* no chance to advance, out of here */ |
3744 | 0 | break; |
3745 | 0 | } |
3746 | 0 | if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) { |
3747 | 0 | if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) || |
3748 | 0 | (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) { |
3749 | 0 | sctp_misc_ints(SCTP_FWD_TSN_CHECK, |
3750 | 0 | asoc->advanced_peer_ack_point, |
3751 | 0 | tp1->rec.data.tsn, 0, 0); |
3752 | 0 | } |
3753 | 0 | } |
3754 | 0 | if (!PR_SCTP_ENABLED(tp1->flags)) { |
3755 | | /* |
3756 | | * We can't fwd-tsn past any that are reliable aka |
3757 | | * retransmitted until the asoc fails. |
3758 | | */ |
3759 | 0 | break; |
3760 | 0 | } |
3761 | 0 | if (!now_filled) { |
3762 | 0 | (void)SCTP_GETTIME_TIMEVAL(&now); |
3763 | 0 | now_filled = 1; |
3764 | 0 | } |
3765 | | /* |
3766 | | * now we got a chunk which is marked for another |
3767 | | * retransmission to a PR-stream but has run out its chances |
3768 | | * already maybe OR has been marked to skip now. Can we skip |
3769 | | * it if its a resend? |
3770 | | */ |
3771 | 0 | if (tp1->sent == SCTP_DATAGRAM_RESEND && |
3772 | 0 | (PR_SCTP_TTL_ENABLED(tp1->flags))) { |
3773 | | /* |
3774 | | * Now is this one marked for resend and its time is |
3775 | | * now up? |
3776 | | */ |
3777 | 0 | #if !(defined(__FreeBSD__) && !defined(__Userspace__)) |
3778 | 0 | if (timercmp(&now, &tp1->rec.data.timetodrop, >)) { |
3779 | | #else |
3780 | | if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) { |
3781 | | #endif |
3782 | | /* Yes so drop it */ |
3783 | 0 | if (tp1->data) { |
3784 | 0 | (void)sctp_release_pr_sctp_chunk(stcb, tp1, |
3785 | 0 | 1, SCTP_SO_NOT_LOCKED); |
3786 | 0 | } |
3787 | 0 | } else { |
3788 | | /* |
3789 | | * No, we are done when hit one for resend |
3790 | | * whos time as not expired. |
3791 | | */ |
3792 | 0 | break; |
3793 | 0 | } |
3794 | 0 | } |
3795 | | /* |
3796 | | * Ok now if this chunk is marked to drop it we can clean up |
3797 | | * the chunk, advance our peer ack point and we can check |
3798 | | * the next chunk. |
3799 | | */ |
3800 | 0 | if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) || |
3801 | 0 | (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) { |
3802 | | /* advance PeerAckPoint goes forward */ |
3803 | 0 | if (SCTP_TSN_GT(tp1->rec.data.tsn, asoc->advanced_peer_ack_point)) { |
3804 | 0 | asoc->advanced_peer_ack_point = tp1->rec.data.tsn; |
3805 | 0 | a_adv = tp1; |
3806 | 0 | } else if (tp1->rec.data.tsn == asoc->advanced_peer_ack_point) { |
3807 | | /* No update but we do save the chk */ |
3808 | 0 | a_adv = tp1; |
3809 | 0 | } |
3810 | 0 | } else { |
3811 | | /* |
3812 | | * If it is still in RESEND we can advance no |
3813 | | * further |
3814 | | */ |
3815 | 0 | break; |
3816 | 0 | } |
3817 | 0 | } |
3818 | 0 | return (a_adv); |
3819 | 0 | } |
3820 | | |
3821 | | static int |
3822 | | sctp_fs_audit(struct sctp_association *asoc) |
3823 | 0 | { |
3824 | 0 | struct sctp_tmit_chunk *chk; |
3825 | 0 | int inflight = 0, resend = 0, inbetween = 0, acked = 0, above = 0; |
3826 | 0 | int ret; |
3827 | | #ifndef INVARIANTS |
3828 | | int entry_flight, entry_cnt; |
3829 | | #endif |
3830 | |
|
3831 | 0 | ret = 0; |
3832 | | #ifndef INVARIANTS |
3833 | | entry_flight = asoc->total_flight; |
3834 | | entry_cnt = asoc->total_flight_count; |
3835 | | #endif |
3836 | 0 | if (asoc->pr_sctp_cnt >= asoc->sent_queue_cnt) |
3837 | 0 | return (0); |
3838 | | |
3839 | 0 | TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) { |
3840 | 0 | if (chk->sent < SCTP_DATAGRAM_RESEND) { |
3841 | 0 | SCTP_PRINTF("Chk TSN: %u size: %d inflight cnt: %d\n", |
3842 | 0 | chk->rec.data.tsn, |
3843 | 0 | chk->send_size, |
3844 | 0 | chk->snd_count); |
3845 | 0 | inflight++; |
3846 | 0 | } else if (chk->sent == SCTP_DATAGRAM_RESEND) { |
3847 | 0 | resend++; |
3848 | 0 | } else if (chk->sent < SCTP_DATAGRAM_ACKED) { |
3849 | 0 | inbetween++; |
3850 | 0 | } else if (chk->sent > SCTP_DATAGRAM_ACKED) { |
3851 | 0 | above++; |
3852 | 0 | } else { |
3853 | 0 | acked++; |
3854 | 0 | } |
3855 | 0 | } |
3856 | |
|
3857 | 0 | if ((inflight > 0) || (inbetween > 0)) { |
3858 | 0 | #ifdef INVARIANTS |
3859 | 0 | panic("Flight size-express incorrect F: %d I: %d R: %d Ab: %d ACK: %d", |
3860 | 0 | inflight, inbetween, resend, above, acked); |
3861 | | #else |
3862 | | SCTP_PRINTF("asoc->total_flight: %d cnt: %d\n", |
3863 | | entry_flight, entry_cnt); |
3864 | | SCTP_PRINTF("Flight size-express incorrect F: %d I: %d R: %d Ab: %d ACK: %d\n", |
3865 | | inflight, inbetween, resend, above, acked); |
3866 | | ret = 1; |
3867 | | #endif |
3868 | 0 | } |
3869 | 0 | return (ret); |
3870 | 0 | } |
3871 | | |
3872 | | static void |
3873 | | sctp_window_probe_recovery(struct sctp_tcb *stcb, |
3874 | | struct sctp_association *asoc, |
3875 | | struct sctp_tmit_chunk *tp1) |
3876 | 0 | { |
3877 | 0 | tp1->window_probe = 0; |
3878 | 0 | if ((tp1->sent >= SCTP_DATAGRAM_ACKED) || (tp1->data == NULL)) { |
3879 | | /* TSN's skipped we do NOT move back. */ |
3880 | 0 | sctp_misc_ints(SCTP_FLIGHT_LOG_DWN_WP_FWD, |
3881 | 0 | tp1->whoTo ? tp1->whoTo->flight_size : 0, |
3882 | 0 | tp1->book_size, |
3883 | 0 | (uint32_t)(uintptr_t)tp1->whoTo, |
3884 | 0 | tp1->rec.data.tsn); |
3885 | 0 | return; |
3886 | 0 | } |
3887 | | /* First setup this by shrinking flight */ |
3888 | 0 | if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) { |
3889 | 0 | (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged)(tp1->whoTo, |
3890 | 0 | tp1); |
3891 | 0 | } |
3892 | 0 | sctp_flight_size_decrease(tp1); |
3893 | 0 | sctp_total_flight_decrease(stcb, tp1); |
3894 | | /* Now mark for resend */ |
3895 | 0 | tp1->sent = SCTP_DATAGRAM_RESEND; |
3896 | 0 | sctp_ucount_incr(asoc->sent_queue_retran_cnt); |
3897 | |
|
3898 | 0 | if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) { |
3899 | 0 | sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_WP, |
3900 | 0 | tp1->whoTo->flight_size, |
3901 | 0 | tp1->book_size, |
3902 | 0 | (uint32_t)(uintptr_t)tp1->whoTo, |
3903 | 0 | tp1->rec.data.tsn); |
3904 | 0 | } |
3905 | 0 | } |
3906 | | |
3907 | | void |
3908 | | sctp_express_handle_sack(struct sctp_tcb *stcb, uint32_t cumack, |
3909 | | uint32_t rwnd, int *abort_now, int ecne_seen) |
3910 | 1.11k | { |
3911 | 1.11k | struct sctp_nets *net; |
3912 | 1.11k | struct sctp_association *asoc; |
3913 | 1.11k | struct sctp_tmit_chunk *tp1, *tp2; |
3914 | 1.11k | uint32_t old_rwnd; |
3915 | 1.11k | int win_probe_recovery = 0; |
3916 | 1.11k | int win_probe_recovered = 0; |
3917 | 1.11k | int j, done_once = 0; |
3918 | 1.11k | int rto_ok = 1; |
3919 | 1.11k | uint32_t send_s; |
3920 | | |
3921 | 1.11k | if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) { |
3922 | 0 | sctp_misc_ints(SCTP_SACK_LOG_EXPRESS, cumack, |
3923 | 0 | rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd); |
3924 | 0 | } |
3925 | 1.11k | SCTP_TCB_LOCK_ASSERT(stcb); |
3926 | | #ifdef SCTP_ASOCLOG_OF_TSNS |
3927 | | stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cumack; |
3928 | | stcb->asoc.cumack_log_at++; |
3929 | | if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) { |
3930 | | stcb->asoc.cumack_log_at = 0; |
3931 | | } |
3932 | | #endif |
3933 | 1.11k | asoc = &stcb->asoc; |
3934 | 1.11k | old_rwnd = asoc->peers_rwnd; |
3935 | 1.11k | if (SCTP_TSN_GT(asoc->last_acked_seq, cumack)) { |
3936 | | /* old ack */ |
3937 | 989 | return; |
3938 | 989 | } else if (asoc->last_acked_seq == cumack) { |
3939 | | /* Window update sack */ |
3940 | 0 | asoc->peers_rwnd = sctp_sbspace_sub(rwnd, |
3941 | 0 | (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)))); |
3942 | 0 | if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) { |
3943 | | /* SWS sender side engages */ |
3944 | 0 | asoc->peers_rwnd = 0; |
3945 | 0 | } |
3946 | 0 | if (asoc->peers_rwnd > old_rwnd) { |
3947 | 0 | goto again; |
3948 | 0 | } |
3949 | 0 | return; |
3950 | 0 | } |
3951 | | |
3952 | | /* First setup for CC stuff */ |
3953 | 1.11k | TAILQ_FOREACH(net, &asoc->nets, sctp_next) { |
3954 | 239 | if (SCTP_TSN_GT(cumack, net->cwr_window_tsn)) { |
3955 | | /* Drag along the window_tsn for cwr's */ |
3956 | 239 | net->cwr_window_tsn = cumack; |
3957 | 239 | } |
3958 | 239 | net->prev_cwnd = net->cwnd; |
3959 | 239 | net->net_ack = 0; |
3960 | 239 | net->net_ack2 = 0; |
3961 | | |
3962 | | /* |
3963 | | * CMT: Reset CUC and Fast recovery algo variables before |
3964 | | * SACK processing |
3965 | | */ |
3966 | 239 | net->new_pseudo_cumack = 0; |
3967 | 239 | net->will_exit_fast_recovery = 0; |
3968 | 239 | if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) { |
3969 | 0 | (*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack)(stcb, net); |
3970 | 0 | } |
3971 | 239 | } |
3972 | 123 | if (!TAILQ_EMPTY(&asoc->sent_queue)) { |
3973 | 68 | tp1 = TAILQ_LAST(&asoc->sent_queue, |
3974 | 68 | sctpchunk_listhead); |
3975 | 68 | send_s = tp1->rec.data.tsn + 1; |
3976 | 68 | } else { |
3977 | 55 | send_s = asoc->sending_seq; |
3978 | 55 | } |
3979 | 123 | if (SCTP_TSN_GE(cumack, send_s)) { |
3980 | 123 | struct mbuf *op_err; |
3981 | 123 | char msg[SCTP_DIAG_INFO_LEN]; |
3982 | | |
3983 | 123 | *abort_now = 1; |
3984 | | /* XXX */ |
3985 | 123 | SCTP_SNPRINTF(msg, sizeof(msg), |
3986 | 123 | "Cum ack %8.8x greater or equal than TSN %8.8x", |
3987 | 123 | cumack, send_s); |
3988 | 123 | op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); |
3989 | 123 | stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25; |
3990 | 123 | sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, false, SCTP_SO_NOT_LOCKED); |
3991 | 123 | return; |
3992 | 123 | } |
3993 | 0 | asoc->this_sack_highest_gap = cumack; |
3994 | 0 | if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) { |
3995 | 0 | sctp_misc_ints(SCTP_THRESHOLD_CLEAR, |
3996 | 0 | stcb->asoc.overall_error_count, |
3997 | 0 | 0, |
3998 | 0 | SCTP_FROM_SCTP_INDATA, |
3999 | 0 | __LINE__); |
4000 | 0 | } |
4001 | 0 | stcb->asoc.overall_error_count = 0; |
4002 | 0 | if (SCTP_TSN_GT(cumack, asoc->last_acked_seq)) { |
4003 | | /* process the new consecutive TSN first */ |
4004 | 0 | TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) { |
4005 | 0 | if (SCTP_TSN_GE(cumack, tp1->rec.data.tsn)) { |
4006 | 0 | if (tp1->sent == SCTP_DATAGRAM_UNSENT) { |
4007 | 0 | SCTP_PRINTF("Warning, an unsent is now acked?\n"); |
4008 | 0 | } |
4009 | 0 | if (tp1->sent < SCTP_DATAGRAM_ACKED) { |
4010 | | /* |
4011 | | * If it is less than ACKED, it is |
4012 | | * now no-longer in flight. Higher |
4013 | | * values may occur during marking |
4014 | | */ |
4015 | 0 | if (tp1->sent < SCTP_DATAGRAM_RESEND) { |
4016 | 0 | if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) { |
4017 | 0 | sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA, |
4018 | 0 | tp1->whoTo->flight_size, |
4019 | 0 | tp1->book_size, |
4020 | 0 | (uint32_t)(uintptr_t)tp1->whoTo, |
4021 | 0 | tp1->rec.data.tsn); |
4022 | 0 | } |
4023 | 0 | sctp_flight_size_decrease(tp1); |
4024 | 0 | if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) { |
4025 | 0 | (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged)(tp1->whoTo, |
4026 | 0 | tp1); |
4027 | 0 | } |
4028 | | /* sa_ignore NO_NULL_CHK */ |
4029 | 0 | sctp_total_flight_decrease(stcb, tp1); |
4030 | 0 | } |
4031 | 0 | tp1->whoTo->net_ack += tp1->send_size; |
4032 | 0 | if (tp1->snd_count < 2) { |
4033 | | /* |
4034 | | * True non-retransmitted |
4035 | | * chunk |
4036 | | */ |
4037 | 0 | tp1->whoTo->net_ack2 += |
4038 | 0 | tp1->send_size; |
4039 | | |
4040 | | /* update RTO too? */ |
4041 | 0 | if (tp1->do_rtt) { |
4042 | 0 | if (rto_ok && |
4043 | 0 | sctp_calculate_rto(stcb, |
4044 | 0 | &stcb->asoc, |
4045 | 0 | tp1->whoTo, |
4046 | 0 | &tp1->sent_rcv_time, |
4047 | 0 | SCTP_RTT_FROM_DATA)) { |
4048 | 0 | rto_ok = 0; |
4049 | 0 | } |
4050 | 0 | if (tp1->whoTo->rto_needed == 0) { |
4051 | 0 | tp1->whoTo->rto_needed = 1; |
4052 | 0 | } |
4053 | 0 | tp1->do_rtt = 0; |
4054 | 0 | } |
4055 | 0 | } |
4056 | | /* |
4057 | | * CMT: CUCv2 algorithm. From the |
4058 | | * cumack'd TSNs, for each TSN being |
4059 | | * acked for the first time, set the |
4060 | | * following variables for the |
4061 | | * corresp destination. |
4062 | | * new_pseudo_cumack will trigger a |
4063 | | * cwnd update. |
4064 | | * find_(rtx_)pseudo_cumack will |
4065 | | * trigger search for the next |
4066 | | * expected (rtx-)pseudo-cumack. |
4067 | | */ |
4068 | 0 | tp1->whoTo->new_pseudo_cumack = 1; |
4069 | 0 | tp1->whoTo->find_pseudo_cumack = 1; |
4070 | 0 | tp1->whoTo->find_rtx_pseudo_cumack = 1; |
4071 | 0 | if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) { |
4072 | | /* sa_ignore NO_NULL_CHK */ |
4073 | 0 | sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.tsn, SCTP_CWND_LOG_FROM_SACK); |
4074 | 0 | } |
4075 | 0 | } |
4076 | 0 | if (tp1->sent == SCTP_DATAGRAM_RESEND) { |
4077 | 0 | sctp_ucount_decr(asoc->sent_queue_retran_cnt); |
4078 | 0 | } |
4079 | 0 | if (tp1->rec.data.chunk_was_revoked) { |
4080 | | /* deflate the cwnd */ |
4081 | 0 | tp1->whoTo->cwnd -= tp1->book_size; |
4082 | 0 | tp1->rec.data.chunk_was_revoked = 0; |
4083 | 0 | } |
4084 | 0 | if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) { |
4085 | 0 | if (asoc->strmout[tp1->rec.data.sid].chunks_on_queues > 0) { |
4086 | 0 | asoc->strmout[tp1->rec.data.sid].chunks_on_queues--; |
4087 | 0 | #ifdef INVARIANTS |
4088 | 0 | } else { |
4089 | 0 | panic("No chunks on the queues for sid %u.", tp1->rec.data.sid); |
4090 | 0 | #endif |
4091 | 0 | } |
4092 | 0 | } |
4093 | 0 | if ((asoc->strmout[tp1->rec.data.sid].chunks_on_queues == 0) && |
4094 | 0 | (asoc->strmout[tp1->rec.data.sid].state == SCTP_STREAM_RESET_PENDING) && |
4095 | 0 | TAILQ_EMPTY(&asoc->strmout[tp1->rec.data.sid].outqueue)) { |
4096 | 0 | asoc->trigger_reset = 1; |
4097 | 0 | } |
4098 | 0 | TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next); |
4099 | 0 | if (tp1->data) { |
4100 | | /* sa_ignore NO_NULL_CHK */ |
4101 | 0 | sctp_free_bufspace(stcb, asoc, tp1, 1); |
4102 | 0 | sctp_m_freem(tp1->data); |
4103 | 0 | tp1->data = NULL; |
4104 | 0 | } |
4105 | 0 | if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) { |
4106 | 0 | sctp_log_sack(asoc->last_acked_seq, |
4107 | 0 | cumack, |
4108 | 0 | tp1->rec.data.tsn, |
4109 | 0 | 0, |
4110 | 0 | 0, |
4111 | 0 | SCTP_LOG_FREE_SENT); |
4112 | 0 | } |
4113 | 0 | asoc->sent_queue_cnt--; |
4114 | 0 | sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED); |
4115 | 0 | } else { |
4116 | 0 | break; |
4117 | 0 | } |
4118 | 0 | } |
4119 | 0 | } |
4120 | 0 | #if defined(__Userspace__) |
4121 | 0 | if (stcb->sctp_ep->recv_callback) { |
4122 | 0 | if (stcb->sctp_socket) { |
4123 | 0 | uint32_t inqueue_bytes, sb_free_now; |
4124 | 0 | struct sctp_inpcb *inp; |
4125 | |
|
4126 | 0 | inp = stcb->sctp_ep; |
4127 | 0 | inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * sizeof(struct sctp_data_chunk)); |
4128 | 0 | sb_free_now = SCTP_SB_LIMIT_SND(stcb->sctp_socket) - (inqueue_bytes + stcb->asoc.sb_send_resv); |
4129 | | |
4130 | | /* check if the amount free in the send socket buffer crossed the threshold */ |
4131 | 0 | if (inp->send_callback && |
4132 | 0 | (((inp->send_sb_threshold > 0) && |
4133 | 0 | (sb_free_now >= inp->send_sb_threshold) && |
4134 | 0 | (stcb->asoc.chunks_on_out_queue <= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue))) || |
4135 | 0 | (inp->send_sb_threshold == 0))) { |
4136 | 0 | atomic_add_int(&stcb->asoc.refcnt, 1); |
4137 | 0 | SCTP_TCB_UNLOCK(stcb); |
4138 | 0 | inp->send_callback(stcb->sctp_socket, sb_free_now, inp->ulp_info); |
4139 | 0 | SCTP_TCB_LOCK(stcb); |
4140 | 0 | atomic_subtract_int(&stcb->asoc.refcnt, 1); |
4141 | 0 | } |
4142 | 0 | } |
4143 | 0 | } else if (stcb->sctp_socket) { |
4144 | | #else |
4145 | | /* sa_ignore NO_NULL_CHK */ |
4146 | | if (stcb->sctp_socket) { |
4147 | | #endif |
4148 | | #if defined(__APPLE__) && !defined(__Userspace__) |
4149 | | struct socket *so; |
4150 | | |
4151 | | #endif |
4152 | 0 | SOCKBUF_LOCK(&stcb->sctp_socket->so_snd); |
4153 | 0 | if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) { |
4154 | | /* sa_ignore NO_NULL_CHK */ |
4155 | 0 | sctp_wakeup_log(stcb, 1, SCTP_WAKESND_FROM_SACK); |
4156 | 0 | } |
4157 | | #if defined(__APPLE__) && !defined(__Userspace__) |
4158 | | so = SCTP_INP_SO(stcb->sctp_ep); |
4159 | | atomic_add_int(&stcb->asoc.refcnt, 1); |
4160 | | SCTP_TCB_UNLOCK(stcb); |
4161 | | SCTP_SOCKET_LOCK(so, 1); |
4162 | | SCTP_TCB_LOCK(stcb); |
4163 | | atomic_subtract_int(&stcb->asoc.refcnt, 1); |
4164 | | if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) { |
4165 | | /* assoc was freed while we were unlocked */ |
4166 | | SCTP_SOCKET_UNLOCK(so, 1); |
4167 | | return; |
4168 | | } |
4169 | | #endif |
4170 | 0 | sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket); |
4171 | | #if defined(__APPLE__) && !defined(__Userspace__) |
4172 | | SCTP_SOCKET_UNLOCK(so, 1); |
4173 | | #endif |
4174 | 0 | } else { |
4175 | 0 | if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) { |
4176 | 0 | sctp_wakeup_log(stcb, 1, SCTP_NOWAKE_FROM_SACK); |
4177 | 0 | } |
4178 | 0 | } |
4179 | | |
4180 | | /* JRS - Use the congestion control given in the CC module */ |
4181 | 0 | if ((asoc->last_acked_seq != cumack) && (ecne_seen == 0)) { |
4182 | 0 | TAILQ_FOREACH(net, &asoc->nets, sctp_next) { |
4183 | 0 | if (net->net_ack2 > 0) { |
4184 | | /* |
4185 | | * Karn's rule applies to clearing error count, this |
4186 | | * is optional. |
4187 | | */ |
4188 | 0 | net->error_count = 0; |
4189 | 0 | if ((net->dest_state & SCTP_ADDR_REACHABLE) == 0) { |
4190 | | /* addr came good */ |
4191 | 0 | net->dest_state |= SCTP_ADDR_REACHABLE; |
4192 | 0 | sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb, |
4193 | 0 | 0, (void *)net, SCTP_SO_NOT_LOCKED); |
4194 | 0 | } |
4195 | 0 | if (net == stcb->asoc.primary_destination) { |
4196 | 0 | if (stcb->asoc.alternate) { |
4197 | | /* release the alternate, primary is good */ |
4198 | 0 | sctp_free_remote_addr(stcb->asoc.alternate); |
4199 | 0 | stcb->asoc.alternate = NULL; |
4200 | 0 | } |
4201 | 0 | } |
4202 | 0 | if (net->dest_state & SCTP_ADDR_PF) { |
4203 | 0 | net->dest_state &= ~SCTP_ADDR_PF; |
4204 | 0 | sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, |
4205 | 0 | stcb->sctp_ep, stcb, net, |
4206 | 0 | SCTP_FROM_SCTP_INDATA + SCTP_LOC_26); |
4207 | 0 | sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net); |
4208 | 0 | asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net); |
4209 | | /* Done with this net */ |
4210 | 0 | net->net_ack = 0; |
4211 | 0 | } |
4212 | | /* restore any doubled timers */ |
4213 | 0 | net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv; |
4214 | 0 | if (net->RTO < stcb->asoc.minrto) { |
4215 | 0 | net->RTO = stcb->asoc.minrto; |
4216 | 0 | } |
4217 | 0 | if (net->RTO > stcb->asoc.maxrto) { |
4218 | 0 | net->RTO = stcb->asoc.maxrto; |
4219 | 0 | } |
4220 | 0 | } |
4221 | 0 | } |
4222 | 0 | asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, 1, 0, 0); |
4223 | 0 | } |
4224 | 0 | asoc->last_acked_seq = cumack; |
4225 | |
|
4226 | 0 | if (TAILQ_EMPTY(&asoc->sent_queue)) { |
4227 | | /* nothing left in-flight */ |
4228 | 0 | TAILQ_FOREACH(net, &asoc->nets, sctp_next) { |
4229 | 0 | net->flight_size = 0; |
4230 | 0 | net->partial_bytes_acked = 0; |
4231 | 0 | } |
4232 | 0 | asoc->total_flight = 0; |
4233 | 0 | asoc->total_flight_count = 0; |
4234 | 0 | } |
4235 | | |
4236 | | /* RWND update */ |
4237 | 0 | asoc->peers_rwnd = sctp_sbspace_sub(rwnd, |
4238 | 0 | (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)))); |
4239 | 0 | if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) { |
4240 | | /* SWS sender side engages */ |
4241 | 0 | asoc->peers_rwnd = 0; |
4242 | 0 | } |
4243 | 0 | if (asoc->peers_rwnd > old_rwnd) { |
4244 | 0 | win_probe_recovery = 1; |
4245 | 0 | } |
4246 | | /* Now assure a timer where data is queued at */ |
4247 | 0 | again: |
4248 | 0 | j = 0; |
4249 | 0 | TAILQ_FOREACH(net, &asoc->nets, sctp_next) { |
4250 | 0 | if (win_probe_recovery && (net->window_probe)) { |
4251 | 0 | win_probe_recovered = 1; |
4252 | | /* |
4253 | | * Find first chunk that was used with window probe |
4254 | | * and clear the sent |
4255 | | */ |
4256 | | /* sa_ignore FREED_MEMORY */ |
4257 | 0 | TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) { |
4258 | 0 | if (tp1->window_probe) { |
4259 | | /* move back to data send queue */ |
4260 | 0 | sctp_window_probe_recovery(stcb, asoc, tp1); |
4261 | 0 | break; |
4262 | 0 | } |
4263 | 0 | } |
4264 | 0 | } |
4265 | 0 | if (net->flight_size) { |
4266 | 0 | j++; |
4267 | 0 | sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, net); |
4268 | 0 | if (net->window_probe) { |
4269 | 0 | net->window_probe = 0; |
4270 | 0 | } |
4271 | 0 | } else { |
4272 | 0 | if (net->window_probe) { |
4273 | | /* In window probes we must assure a timer is still running there */ |
4274 | 0 | net->window_probe = 0; |
4275 | 0 | if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) { |
4276 | 0 | sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, net); |
4277 | 0 | } |
4278 | 0 | } else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) { |
4279 | 0 | sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, |
4280 | 0 | stcb, net, |
4281 | 0 | SCTP_FROM_SCTP_INDATA + SCTP_LOC_27); |
4282 | 0 | } |
4283 | 0 | } |
4284 | 0 | } |
4285 | 0 | if ((j == 0) && |
4286 | 0 | (!TAILQ_EMPTY(&asoc->sent_queue)) && |
4287 | 0 | (asoc->sent_queue_retran_cnt == 0) && |
4288 | 0 | (win_probe_recovered == 0) && |
4289 | 0 | (done_once == 0)) { |
4290 | | /* huh, this should not happen unless all packets |
4291 | | * are PR-SCTP and marked to skip of course. |
4292 | | */ |
4293 | 0 | if (sctp_fs_audit(asoc)) { |
4294 | 0 | TAILQ_FOREACH(net, &asoc->nets, sctp_next) { |
4295 | 0 | net->flight_size = 0; |
4296 | 0 | } |
4297 | 0 | asoc->total_flight = 0; |
4298 | 0 | asoc->total_flight_count = 0; |
4299 | 0 | asoc->sent_queue_retran_cnt = 0; |
4300 | 0 | TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) { |
4301 | 0 | if (tp1->sent < SCTP_DATAGRAM_RESEND) { |
4302 | 0 | sctp_flight_size_increase(tp1); |
4303 | 0 | sctp_total_flight_increase(stcb, tp1); |
4304 | 0 | } else if (tp1->sent == SCTP_DATAGRAM_RESEND) { |
4305 | 0 | sctp_ucount_incr(asoc->sent_queue_retran_cnt); |
4306 | 0 | } |
4307 | 0 | } |
4308 | 0 | } |
4309 | 0 | done_once = 1; |
4310 | 0 | goto again; |
4311 | 0 | } |
4312 | | /**********************************/ |
4313 | | /* Now what about shutdown issues */ |
4314 | | /**********************************/ |
4315 | 0 | if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) { |
4316 | | /* nothing left on sendqueue.. consider done */ |
4317 | | /* clean up */ |
4318 | 0 | if ((asoc->stream_queue_cnt == 1) && |
4319 | 0 | ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) || |
4320 | 0 | (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) && |
4321 | 0 | ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete)(stcb, asoc))) { |
4322 | 0 | SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_PARTIAL_MSG_LEFT); |
4323 | 0 | } |
4324 | 0 | if (((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) || |
4325 | 0 | (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) && |
4326 | 0 | (asoc->stream_queue_cnt == 1) && |
4327 | 0 | (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) { |
4328 | 0 | struct mbuf *op_err; |
4329 | |
|
4330 | 0 | *abort_now = 1; |
4331 | | /* XXX */ |
4332 | 0 | op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, ""); |
4333 | 0 | stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_28; |
4334 | 0 | sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, false, SCTP_SO_NOT_LOCKED); |
4335 | 0 | return; |
4336 | 0 | } |
4337 | 0 | if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) && |
4338 | 0 | (asoc->stream_queue_cnt == 0)) { |
4339 | 0 | struct sctp_nets *netp; |
4340 | |
|
4341 | 0 | if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) || |
4342 | 0 | (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) { |
4343 | 0 | SCTP_STAT_DECR_GAUGE32(sctps_currestab); |
4344 | 0 | } |
4345 | 0 | SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_SENT); |
4346 | 0 | sctp_stop_timers_for_shutdown(stcb); |
4347 | 0 | if (asoc->alternate) { |
4348 | 0 | netp = asoc->alternate; |
4349 | 0 | } else { |
4350 | 0 | netp = asoc->primary_destination; |
4351 | 0 | } |
4352 | 0 | sctp_send_shutdown(stcb, netp); |
4353 | 0 | sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, |
4354 | 0 | stcb->sctp_ep, stcb, netp); |
4355 | 0 | sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, |
4356 | 0 | stcb->sctp_ep, stcb, NULL); |
4357 | 0 | } else if ((SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED) && |
4358 | 0 | (asoc->stream_queue_cnt == 0)) { |
4359 | 0 | struct sctp_nets *netp; |
4360 | |
|
4361 | 0 | SCTP_STAT_DECR_GAUGE32(sctps_currestab); |
4362 | 0 | SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_ACK_SENT); |
4363 | 0 | sctp_stop_timers_for_shutdown(stcb); |
4364 | 0 | if (asoc->alternate) { |
4365 | 0 | netp = asoc->alternate; |
4366 | 0 | } else { |
4367 | 0 | netp = asoc->primary_destination; |
4368 | 0 | } |
4369 | 0 | sctp_send_shutdown_ack(stcb, netp); |
4370 | 0 | sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK, |
4371 | 0 | stcb->sctp_ep, stcb, netp); |
4372 | 0 | } |
4373 | 0 | } |
4374 | | /*********************************************/ |
4375 | | /* Here we perform PR-SCTP procedures */ |
4376 | | /* (section 4.2) */ |
4377 | | /*********************************************/ |
4378 | | /* C1. update advancedPeerAckPoint */ |
4379 | 0 | if (SCTP_TSN_GT(cumack, asoc->advanced_peer_ack_point)) { |
4380 | 0 | asoc->advanced_peer_ack_point = cumack; |
4381 | 0 | } |
4382 | | /* PR-Sctp issues need to be addressed too */ |
4383 | 0 | if ((asoc->prsctp_supported) && (asoc->pr_sctp_cnt > 0)) { |
4384 | 0 | struct sctp_tmit_chunk *lchk; |
4385 | 0 | uint32_t old_adv_peer_ack_point; |
4386 | |
|
4387 | 0 | old_adv_peer_ack_point = asoc->advanced_peer_ack_point; |
4388 | 0 | lchk = sctp_try_advance_peer_ack_point(stcb, asoc); |
4389 | | /* C3. See if we need to send a Fwd-TSN */ |
4390 | 0 | if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cumack)) { |
4391 | | /* |
4392 | | * ISSUE with ECN, see FWD-TSN processing. |
4393 | | */ |
4394 | 0 | if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) { |
4395 | 0 | send_forward_tsn(stcb, asoc); |
4396 | 0 | } else if (lchk) { |
4397 | | /* try to FR fwd-tsn's that get lost too */ |
4398 | 0 | if (lchk->rec.data.fwd_tsn_cnt >= 3) { |
4399 | 0 | send_forward_tsn(stcb, asoc); |
4400 | 0 | } |
4401 | 0 | } |
4402 | 0 | } |
4403 | 0 | for (; lchk != NULL; lchk = TAILQ_NEXT(lchk, sctp_next)) { |
4404 | 0 | if (lchk->whoTo != NULL) { |
4405 | 0 | break; |
4406 | 0 | } |
4407 | 0 | } |
4408 | 0 | if (lchk != NULL) { |
4409 | | /* Assure a timer is up */ |
4410 | 0 | sctp_timer_start(SCTP_TIMER_TYPE_SEND, |
4411 | 0 | stcb->sctp_ep, stcb, lchk->whoTo); |
4412 | 0 | } |
4413 | 0 | } |
4414 | 0 | if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) { |
4415 | 0 | sctp_misc_ints(SCTP_SACK_RWND_UPDATE, |
4416 | 0 | rwnd, |
4417 | 0 | stcb->asoc.peers_rwnd, |
4418 | 0 | stcb->asoc.total_flight, |
4419 | 0 | stcb->asoc.total_output_queue_size); |
4420 | 0 | } |
4421 | 0 | } |
4422 | | |
4423 | | void |
4424 | | sctp_handle_sack(struct mbuf *m, int offset_seg, int offset_dup, |
4425 | | struct sctp_tcb *stcb, |
4426 | | uint16_t num_seg, uint16_t num_nr_seg, uint16_t num_dup, |
4427 | | int *abort_now, uint8_t flags, |
4428 | | uint32_t cum_ack, uint32_t rwnd, int ecne_seen) |
4429 | 706 | { |
4430 | 706 | struct sctp_association *asoc; |
4431 | 706 | struct sctp_tmit_chunk *tp1, *tp2; |
4432 | 706 | uint32_t last_tsn, biggest_tsn_acked, biggest_tsn_newly_acked, this_sack_lowest_newack; |
4433 | 706 | uint16_t wake_him = 0; |
4434 | 706 | uint32_t send_s = 0; |
4435 | 706 | long j; |
4436 | 706 | int accum_moved = 0; |
4437 | 706 | int will_exit_fast_recovery = 0; |
4438 | 706 | uint32_t a_rwnd, old_rwnd; |
4439 | 706 | int win_probe_recovery = 0; |
4440 | 706 | int win_probe_recovered = 0; |
4441 | 706 | struct sctp_nets *net = NULL; |
4442 | 706 | int done_once; |
4443 | 706 | int rto_ok = 1; |
4444 | 706 | uint8_t reneged_all = 0; |
4445 | 706 | uint8_t cmt_dac_flag; |
4446 | | /* |
4447 | | * we take any chance we can to service our queues since we cannot |
4448 | | * get awoken when the socket is read from :< |
4449 | | */ |
4450 | | /* |
4451 | | * Now perform the actual SACK handling: 1) Verify that it is not an |
4452 | | * old sack, if so discard. 2) If there is nothing left in the send |
4453 | | * queue (cum-ack is equal to last acked) then you have a duplicate |
4454 | | * too, update any rwnd change and verify no timers are running. |
4455 | | * then return. 3) Process any new consecutive data i.e. cum-ack |
4456 | | * moved process these first and note that it moved. 4) Process any |
4457 | | * sack blocks. 5) Drop any acked from the queue. 6) Check for any |
4458 | | * revoked blocks and mark. 7) Update the cwnd. 8) Nothing left, |
4459 | | * sync up flightsizes and things, stop all timers and also check |
4460 | | * for shutdown_pending state. If so then go ahead and send off the |
4461 | | * shutdown. If in shutdown recv, send off the shutdown-ack and |
4462 | | * start that timer, Ret. 9) Strike any non-acked things and do FR |
4463 | | * procedure if needed being sure to set the FR flag. 10) Do pr-sctp |
4464 | | * procedures. 11) Apply any FR penalties. 12) Assure we will SACK |
4465 | | * if in shutdown_recv state. |
4466 | | */ |
4467 | 706 | SCTP_TCB_LOCK_ASSERT(stcb); |
4468 | | /* CMT DAC algo */ |
4469 | 706 | this_sack_lowest_newack = 0; |
4470 | 706 | SCTP_STAT_INCR(sctps_slowpath_sack); |
4471 | 706 | last_tsn = cum_ack; |
4472 | 706 | cmt_dac_flag = flags & SCTP_SACK_CMT_DAC; |
4473 | | #ifdef SCTP_ASOCLOG_OF_TSNS |
4474 | | stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cum_ack; |
4475 | | stcb->asoc.cumack_log_at++; |
4476 | | if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) { |
4477 | | stcb->asoc.cumack_log_at = 0; |
4478 | | } |
4479 | | #endif |
4480 | 706 | a_rwnd = rwnd; |
4481 | | |
4482 | 706 | if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) { |
4483 | 0 | sctp_misc_ints(SCTP_SACK_LOG_NORMAL, cum_ack, |
4484 | 0 | rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd); |
4485 | 0 | } |
4486 | | |
4487 | 706 | old_rwnd = stcb->asoc.peers_rwnd; |
4488 | 706 | if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) { |
4489 | 0 | sctp_misc_ints(SCTP_THRESHOLD_CLEAR, |
4490 | 0 | stcb->asoc.overall_error_count, |
4491 | 0 | 0, |
4492 | 0 | SCTP_FROM_SCTP_INDATA, |
4493 | 0 | __LINE__); |
4494 | 0 | } |
4495 | 706 | stcb->asoc.overall_error_count = 0; |
4496 | 706 | asoc = &stcb->asoc; |
4497 | 706 | if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) { |
4498 | 0 | sctp_log_sack(asoc->last_acked_seq, |
4499 | 0 | cum_ack, |
4500 | 0 | 0, |
4501 | 0 | num_seg, |
4502 | 0 | num_dup, |
4503 | 0 | SCTP_LOG_NEW_SACK); |
4504 | 0 | } |
4505 | 706 | if ((num_dup) && (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE)) { |
4506 | 0 | uint16_t i; |
4507 | 0 | uint32_t *dupdata, dblock; |
4508 | |
|
4509 | 0 | for (i = 0; i < num_dup; i++) { |
4510 | 0 | dupdata = (uint32_t *)sctp_m_getptr(m, offset_dup + i * sizeof(uint32_t), |
4511 | 0 | sizeof(uint32_t), (uint8_t *)&dblock); |
4512 | 0 | if (dupdata == NULL) { |
4513 | 0 | break; |
4514 | 0 | } |
4515 | 0 | sctp_log_fr(*dupdata, 0, 0, SCTP_FR_DUPED); |
4516 | 0 | } |
4517 | 0 | } |
4518 | | /* reality check */ |
4519 | 706 | if (!TAILQ_EMPTY(&asoc->sent_queue)) { |
4520 | 373 | tp1 = TAILQ_LAST(&asoc->sent_queue, |
4521 | 373 | sctpchunk_listhead); |
4522 | 373 | send_s = tp1->rec.data.tsn + 1; |
4523 | 373 | } else { |
4524 | 333 | tp1 = NULL; |
4525 | 333 | send_s = asoc->sending_seq; |
4526 | 333 | } |
4527 | 706 | if (SCTP_TSN_GE(cum_ack, send_s)) { |
4528 | 53 | struct mbuf *op_err; |
4529 | 53 | char msg[SCTP_DIAG_INFO_LEN]; |
4530 | | |
4531 | | /* |
4532 | | * no way, we have not even sent this TSN out yet. |
4533 | | * Peer is hopelessly messed up with us. |
4534 | | */ |
4535 | 53 | SCTP_PRINTF("NEW cum_ack:%x send_s:%x is smaller or equal\n", |
4536 | 53 | cum_ack, send_s); |
4537 | 53 | if (tp1) { |
4538 | 4 | SCTP_PRINTF("Got send_s from tsn:%x + 1 of tp1: %p\n", |
4539 | 4 | tp1->rec.data.tsn, (void *)tp1); |
4540 | 4 | } |
4541 | 53 | hopeless_peer: |
4542 | 53 | *abort_now = 1; |
4543 | | /* XXX */ |
4544 | 53 | SCTP_SNPRINTF(msg, sizeof(msg), |
4545 | 53 | "Cum ack %8.8x greater or equal than TSN %8.8x", |
4546 | 53 | cum_ack, send_s); |
4547 | 53 | op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); |
4548 | 53 | stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_29; |
4549 | 53 | sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, false, SCTP_SO_NOT_LOCKED); |
4550 | 53 | return; |
4551 | 53 | } |
4552 | | /**********************/ |
4553 | | /* 1) check the range */ |
4554 | | /**********************/ |
4555 | 653 | if (SCTP_TSN_GT(asoc->last_acked_seq, last_tsn)) { |
4556 | | /* acking something behind */ |
4557 | 653 | return; |
4558 | 653 | } |
4559 | | |
4560 | | /* update the Rwnd of the peer */ |
4561 | 0 | if (TAILQ_EMPTY(&asoc->sent_queue) && |
4562 | 0 | TAILQ_EMPTY(&asoc->send_queue) && |
4563 | 0 | (asoc->stream_queue_cnt == 0)) { |
4564 | | /* nothing left on send/sent and strmq */ |
4565 | 0 | if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) { |
4566 | 0 | sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK, |
4567 | 0 | asoc->peers_rwnd, 0, 0, a_rwnd); |
4568 | 0 | } |
4569 | 0 | asoc->peers_rwnd = a_rwnd; |
4570 | 0 | if (asoc->sent_queue_retran_cnt) { |
4571 | 0 | asoc->sent_queue_retran_cnt = 0; |
4572 | 0 | } |
4573 | 0 | if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) { |
4574 | | /* SWS sender side engages */ |
4575 | 0 | asoc->peers_rwnd = 0; |
4576 | 0 | } |
4577 | | /* stop any timers */ |
4578 | 0 | TAILQ_FOREACH(net, &asoc->nets, sctp_next) { |
4579 | 0 | sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, |
4580 | 0 | stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_30); |
4581 | 0 | net->partial_bytes_acked = 0; |
4582 | 0 | net->flight_size = 0; |
4583 | 0 | } |
4584 | 0 | asoc->total_flight = 0; |
4585 | 0 | asoc->total_flight_count = 0; |
4586 | 0 | return; |
4587 | 0 | } |
4588 | | /* |
4589 | | * We init netAckSz and netAckSz2 to 0. These are used to track 2 |
4590 | | * things. The total byte count acked is tracked in netAckSz AND |
4591 | | * netAck2 is used to track the total bytes acked that are un- |
4592 | | * ambiguous and were never retransmitted. We track these on a per |
4593 | | * destination address basis. |
4594 | | */ |
4595 | 0 | TAILQ_FOREACH(net, &asoc->nets, sctp_next) { |
4596 | 0 | if (SCTP_TSN_GT(cum_ack, net->cwr_window_tsn)) { |
4597 | | /* Drag along the window_tsn for cwr's */ |
4598 | 0 | net->cwr_window_tsn = cum_ack; |
4599 | 0 | } |
4600 | 0 | net->prev_cwnd = net->cwnd; |
4601 | 0 | net->net_ack = 0; |
4602 | 0 | net->net_ack2 = 0; |
4603 | | |
4604 | | /* |
4605 | | * CMT: Reset CUC and Fast recovery algo variables before |
4606 | | * SACK processing |
4607 | | */ |
4608 | 0 | net->new_pseudo_cumack = 0; |
4609 | 0 | net->will_exit_fast_recovery = 0; |
4610 | 0 | if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) { |
4611 | 0 | (*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack)(stcb, net); |
4612 | 0 | } |
4613 | | |
4614 | | /* |
4615 | | * CMT: SFR algo (and HTNA) - this_sack_highest_newack has |
4616 | | * to be greater than the cumack. Also reset saw_newack to 0 |
4617 | | * for all dests. |
4618 | | */ |
4619 | 0 | net->saw_newack = 0; |
4620 | 0 | net->this_sack_highest_newack = last_tsn; |
4621 | 0 | } |
4622 | | /* process the new consecutive TSN first */ |
4623 | 0 | TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) { |
4624 | 0 | if (SCTP_TSN_GE(last_tsn, tp1->rec.data.tsn)) { |
4625 | 0 | if (tp1->sent != SCTP_DATAGRAM_UNSENT) { |
4626 | 0 | accum_moved = 1; |
4627 | 0 | if (tp1->sent < SCTP_DATAGRAM_ACKED) { |
4628 | | /* |
4629 | | * If it is less than ACKED, it is |
4630 | | * now no-longer in flight. Higher |
4631 | | * values may occur during marking |
4632 | | */ |
4633 | 0 | if ((tp1->whoTo->dest_state & |
4634 | 0 | SCTP_ADDR_UNCONFIRMED) && |
4635 | 0 | (tp1->snd_count < 2)) { |
4636 | | /* |
4637 | | * If there was no retran |
4638 | | * and the address is |
4639 | | * un-confirmed and we sent |
4640 | | * there and are now |
4641 | | * sacked.. its confirmed, |
4642 | | * mark it so. |
4643 | | */ |
4644 | 0 | tp1->whoTo->dest_state &= |
4645 | 0 | ~SCTP_ADDR_UNCONFIRMED; |
4646 | 0 | } |
4647 | 0 | if (tp1->sent < SCTP_DATAGRAM_RESEND) { |
4648 | 0 | if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) { |
4649 | 0 | sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA, |
4650 | 0 | tp1->whoTo->flight_size, |
4651 | 0 | tp1->book_size, |
4652 | 0 | (uint32_t)(uintptr_t)tp1->whoTo, |
4653 | 0 | tp1->rec.data.tsn); |
4654 | 0 | } |
4655 | 0 | sctp_flight_size_decrease(tp1); |
4656 | 0 | sctp_total_flight_decrease(stcb, tp1); |
4657 | 0 | if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) { |
4658 | 0 | (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged)(tp1->whoTo, |
4659 | 0 | tp1); |
4660 | 0 | } |
4661 | 0 | } |
4662 | 0 | tp1->whoTo->net_ack += tp1->send_size; |
4663 | | |
4664 | | /* CMT SFR and DAC algos */ |
4665 | 0 | this_sack_lowest_newack = tp1->rec.data.tsn; |
4666 | 0 | tp1->whoTo->saw_newack = 1; |
4667 | |
|
4668 | 0 | if (tp1->snd_count < 2) { |
4669 | | /* |
4670 | | * True non-retransmitted |
4671 | | * chunk |
4672 | | */ |
4673 | 0 | tp1->whoTo->net_ack2 += |
4674 | 0 | tp1->send_size; |
4675 | | |
4676 | | /* update RTO too? */ |
4677 | 0 | if (tp1->do_rtt) { |
4678 | 0 | if (rto_ok && |
4679 | 0 | sctp_calculate_rto(stcb, |
4680 | 0 | &stcb->asoc, |
4681 | 0 | tp1->whoTo, |
4682 | 0 | &tp1->sent_rcv_time, |
4683 | 0 | SCTP_RTT_FROM_DATA)) { |
4684 | 0 | rto_ok = 0; |
4685 | 0 | } |
4686 | 0 | if (tp1->whoTo->rto_needed == 0) { |
4687 | 0 | tp1->whoTo->rto_needed = 1; |
4688 | 0 | } |
4689 | 0 | tp1->do_rtt = 0; |
4690 | 0 | } |
4691 | 0 | } |
4692 | | /* |
4693 | | * CMT: CUCv2 algorithm. From the |
4694 | | * cumack'd TSNs, for each TSN being |
4695 | | * acked for the first time, set the |
4696 | | * following variables for the |
4697 | | * corresp destination. |
4698 | | * new_pseudo_cumack will trigger a |
4699 | | * cwnd update. |
4700 | | * find_(rtx_)pseudo_cumack will |
4701 | | * trigger search for the next |
4702 | | * expected (rtx-)pseudo-cumack. |
4703 | | */ |
4704 | 0 | tp1->whoTo->new_pseudo_cumack = 1; |
4705 | 0 | tp1->whoTo->find_pseudo_cumack = 1; |
4706 | 0 | tp1->whoTo->find_rtx_pseudo_cumack = 1; |
4707 | 0 | if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) { |
4708 | 0 | sctp_log_sack(asoc->last_acked_seq, |
4709 | 0 | cum_ack, |
4710 | 0 | tp1->rec.data.tsn, |
4711 | 0 | 0, |
4712 | 0 | 0, |
4713 | 0 | SCTP_LOG_TSN_ACKED); |
4714 | 0 | } |
4715 | 0 | if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) { |
4716 | 0 | sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.tsn, SCTP_CWND_LOG_FROM_SACK); |
4717 | 0 | } |
4718 | 0 | } |
4719 | 0 | if (tp1->sent == SCTP_DATAGRAM_RESEND) { |
4720 | 0 | sctp_ucount_decr(asoc->sent_queue_retran_cnt); |
4721 | | #ifdef SCTP_AUDITING_ENABLED |
4722 | | sctp_audit_log(0xB3, |
4723 | | (asoc->sent_queue_retran_cnt & 0x000000ff)); |
4724 | | #endif |
4725 | 0 | } |
4726 | 0 | if (tp1->rec.data.chunk_was_revoked) { |
4727 | | /* deflate the cwnd */ |
4728 | 0 | tp1->whoTo->cwnd -= tp1->book_size; |
4729 | 0 | tp1->rec.data.chunk_was_revoked = 0; |
4730 | 0 | } |
4731 | 0 | if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) { |
4732 | 0 | tp1->sent = SCTP_DATAGRAM_ACKED; |
4733 | 0 | } |
4734 | 0 | } |
4735 | 0 | } else { |
4736 | 0 | break; |
4737 | 0 | } |
4738 | 0 | } |
4739 | 0 | biggest_tsn_newly_acked = biggest_tsn_acked = last_tsn; |
4740 | | /* always set this up to cum-ack */ |
4741 | 0 | asoc->this_sack_highest_gap = last_tsn; |
4742 | |
|
4743 | 0 | if ((num_seg > 0) || (num_nr_seg > 0)) { |
4744 | | /* |
4745 | | * thisSackHighestGap will increase while handling NEW |
4746 | | * segments this_sack_highest_newack will increase while |
4747 | | * handling NEWLY ACKED chunks. this_sack_lowest_newack is |
4748 | | * used for CMT DAC algo. saw_newack will also change. |
4749 | | */ |
4750 | 0 | if (sctp_handle_segments(m, &offset_seg, stcb, asoc, last_tsn, &biggest_tsn_acked, |
4751 | 0 | &biggest_tsn_newly_acked, &this_sack_lowest_newack, |
4752 | 0 | num_seg, num_nr_seg, &rto_ok)) { |
4753 | 0 | wake_him++; |
4754 | 0 | } |
4755 | | /* |
4756 | | * validate the biggest_tsn_acked in the gap acks if |
4757 | | * strict adherence is wanted. |
4758 | | */ |
4759 | 0 | if (SCTP_TSN_GE(biggest_tsn_acked, send_s)) { |
4760 | | /* |
4761 | | * peer is either confused or we are under |
4762 | | * attack. We must abort. |
4763 | | */ |
4764 | 0 | SCTP_PRINTF("Hopeless peer! biggest_tsn_acked:%x largest seq:%x\n", |
4765 | 0 | biggest_tsn_acked, send_s); |
4766 | 0 | goto hopeless_peer; |
4767 | 0 | } |
4768 | 0 | } |
4769 | | /*******************************************/ |
4770 | | /* cancel ALL T3-send timer if accum moved */ |
4771 | | /*******************************************/ |
4772 | 0 | if (asoc->sctp_cmt_on_off > 0) { |
4773 | 0 | TAILQ_FOREACH(net, &asoc->nets, sctp_next) { |
4774 | 0 | if (net->new_pseudo_cumack) |
4775 | 0 | sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, |
4776 | 0 | stcb, net, |
4777 | 0 | SCTP_FROM_SCTP_INDATA + SCTP_LOC_31); |
4778 | 0 | } |
4779 | 0 | } else { |
4780 | 0 | if (accum_moved) { |
4781 | 0 | TAILQ_FOREACH(net, &asoc->nets, sctp_next) { |
4782 | 0 | sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, |
4783 | 0 | stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_32); |
4784 | 0 | } |
4785 | 0 | } |
4786 | 0 | } |
4787 | | /********************************************/ |
4788 | | /* drop the acked chunks from the sentqueue */ |
4789 | | /********************************************/ |
4790 | 0 | asoc->last_acked_seq = cum_ack; |
4791 | |
|
4792 | 0 | TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) { |
4793 | 0 | if (SCTP_TSN_GT(tp1->rec.data.tsn, cum_ack)) { |
4794 | 0 | break; |
4795 | 0 | } |
4796 | 0 | if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) { |
4797 | 0 | if (asoc->strmout[tp1->rec.data.sid].chunks_on_queues > 0) { |
4798 | 0 | asoc->strmout[tp1->rec.data.sid].chunks_on_queues--; |
4799 | 0 | #ifdef INVARIANTS |
4800 | 0 | } else { |
4801 | 0 | panic("No chunks on the queues for sid %u.", tp1->rec.data.sid); |
4802 | 0 | #endif |
4803 | 0 | } |
4804 | 0 | } |
4805 | 0 | if ((asoc->strmout[tp1->rec.data.sid].chunks_on_queues == 0) && |
4806 | 0 | (asoc->strmout[tp1->rec.data.sid].state == SCTP_STREAM_RESET_PENDING) && |
4807 | 0 | TAILQ_EMPTY(&asoc->strmout[tp1->rec.data.sid].outqueue)) { |
4808 | 0 | asoc->trigger_reset = 1; |
4809 | 0 | } |
4810 | 0 | TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next); |
4811 | 0 | if (PR_SCTP_ENABLED(tp1->flags)) { |
4812 | 0 | if (asoc->pr_sctp_cnt != 0) |
4813 | 0 | asoc->pr_sctp_cnt--; |
4814 | 0 | } |
4815 | 0 | asoc->sent_queue_cnt--; |
4816 | 0 | if (tp1->data) { |
4817 | | /* sa_ignore NO_NULL_CHK */ |
4818 | 0 | sctp_free_bufspace(stcb, asoc, tp1, 1); |
4819 | 0 | sctp_m_freem(tp1->data); |
4820 | 0 | tp1->data = NULL; |
4821 | 0 | if (asoc->prsctp_supported && PR_SCTP_BUF_ENABLED(tp1->flags)) { |
4822 | 0 | asoc->sent_queue_cnt_removeable--; |
4823 | 0 | } |
4824 | 0 | } |
4825 | 0 | if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) { |
4826 | 0 | sctp_log_sack(asoc->last_acked_seq, |
4827 | 0 | cum_ack, |
4828 | 0 | tp1->rec.data.tsn, |
4829 | 0 | 0, |
4830 | 0 | 0, |
4831 | 0 | SCTP_LOG_FREE_SENT); |
4832 | 0 | } |
4833 | 0 | sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED); |
4834 | 0 | wake_him++; |
4835 | 0 | } |
4836 | 0 | if (TAILQ_EMPTY(&asoc->sent_queue) && (asoc->total_flight > 0)) { |
4837 | 0 | #ifdef INVARIANTS |
4838 | 0 | panic("Warning flight size is positive and should be 0"); |
4839 | | #else |
4840 | | SCTP_PRINTF("Warning flight size incorrect should be 0 is %d\n", |
4841 | | asoc->total_flight); |
4842 | | #endif |
4843 | 0 | asoc->total_flight = 0; |
4844 | 0 | } |
4845 | | |
4846 | 0 | #if defined(__Userspace__) |
4847 | 0 | if (stcb->sctp_ep->recv_callback) { |
4848 | 0 | if (stcb->sctp_socket) { |
4849 | 0 | uint32_t inqueue_bytes, sb_free_now; |
4850 | 0 | struct sctp_inpcb *inp; |
4851 | |
|
4852 | 0 | inp = stcb->sctp_ep; |
4853 | 0 | inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * sizeof(struct sctp_data_chunk)); |
4854 | 0 | sb_free_now = SCTP_SB_LIMIT_SND(stcb->sctp_socket) - (inqueue_bytes + stcb->asoc.sb_send_resv); |
4855 | | |
4856 | | /* check if the amount free in the send socket buffer crossed the threshold */ |
4857 | 0 | if (inp->send_callback && |
4858 | 0 | (((inp->send_sb_threshold > 0) && (sb_free_now >= inp->send_sb_threshold)) || |
4859 | 0 | (inp->send_sb_threshold == 0))) { |
4860 | 0 | atomic_add_int(&stcb->asoc.refcnt, 1); |
4861 | 0 | SCTP_TCB_UNLOCK(stcb); |
4862 | 0 | inp->send_callback(stcb->sctp_socket, sb_free_now, inp->ulp_info); |
4863 | 0 | SCTP_TCB_LOCK(stcb); |
4864 | 0 | atomic_subtract_int(&stcb->asoc.refcnt, 1); |
4865 | 0 | } |
4866 | 0 | } |
4867 | 0 | } else if ((wake_him) && (stcb->sctp_socket)) { |
4868 | | #else |
4869 | | /* sa_ignore NO_NULL_CHK */ |
4870 | | if ((wake_him) && (stcb->sctp_socket)) { |
4871 | | #endif |
4872 | | #if defined(__APPLE__) && !defined(__Userspace__) |
4873 | | struct socket *so; |
4874 | | |
4875 | | #endif |
4876 | 0 | SOCKBUF_LOCK(&stcb->sctp_socket->so_snd); |
4877 | 0 | if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) { |
4878 | 0 | sctp_wakeup_log(stcb, wake_him, SCTP_WAKESND_FROM_SACK); |
4879 | 0 | } |
4880 | | #if defined(__APPLE__) && !defined(__Userspace__) |
4881 | | so = SCTP_INP_SO(stcb->sctp_ep); |
4882 | | atomic_add_int(&stcb->asoc.refcnt, 1); |
4883 | | SCTP_TCB_UNLOCK(stcb); |
4884 | | SCTP_SOCKET_LOCK(so, 1); |
4885 | | SCTP_TCB_LOCK(stcb); |
4886 | | atomic_subtract_int(&stcb->asoc.refcnt, 1); |
4887 | | if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) { |
4888 | | /* assoc was freed while we were unlocked */ |
4889 | | SCTP_SOCKET_UNLOCK(so, 1); |
4890 | | return; |
4891 | | } |
4892 | | #endif |
4893 | 0 | sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket); |
4894 | | #if defined(__APPLE__) && !defined(__Userspace__) |
4895 | | SCTP_SOCKET_UNLOCK(so, 1); |
4896 | | #endif |
4897 | 0 | } else { |
4898 | 0 | if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) { |
4899 | 0 | sctp_wakeup_log(stcb, wake_him, SCTP_NOWAKE_FROM_SACK); |
4900 | 0 | } |
4901 | 0 | } |
4902 | | |
4903 | 0 | if (asoc->fast_retran_loss_recovery && accum_moved) { |
4904 | 0 | if (SCTP_TSN_GE(asoc->last_acked_seq, asoc->fast_recovery_tsn)) { |
4905 | | /* Setup so we will exit RFC2582 fast recovery */ |
4906 | 0 | will_exit_fast_recovery = 1; |
4907 | 0 | } |
4908 | 0 | } |
4909 | | /* |
4910 | | * Check for revoked fragments: |
4911 | | * |
4912 | | * if Previous sack - Had no frags then we can't have any revoked if |
4913 | | * Previous sack - Had frag's then - If we now have frags aka |
4914 | | * num_seg > 0 call sctp_check_for_revoked() to tell if peer revoked |
4915 | | * some of them. else - The peer revoked all ACKED fragments, since |
4916 | | * we had some before and now we have NONE. |
4917 | | */ |
4918 | |
|
4919 | 0 | if (num_seg) { |
4920 | 0 | sctp_check_for_revoked(stcb, asoc, cum_ack, biggest_tsn_acked); |
4921 | 0 | asoc->saw_sack_with_frags = 1; |
4922 | 0 | } else if (asoc->saw_sack_with_frags) { |
4923 | 0 | int cnt_revoked = 0; |
4924 | | |
4925 | | /* Peer revoked all dg's marked or acked */ |
4926 | 0 | TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) { |
4927 | 0 | if (tp1->sent == SCTP_DATAGRAM_ACKED) { |
4928 | 0 | tp1->sent = SCTP_DATAGRAM_SENT; |
4929 | 0 | if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) { |
4930 | 0 | sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE, |
4931 | 0 | tp1->whoTo->flight_size, |
4932 | 0 | tp1->book_size, |
4933 | 0 | (uint32_t)(uintptr_t)tp1->whoTo, |
4934 | 0 | tp1->rec.data.tsn); |
4935 | 0 | } |
4936 | 0 | sctp_flight_size_increase(tp1); |
4937 | 0 | sctp_total_flight_increase(stcb, tp1); |
4938 | 0 | tp1->rec.data.chunk_was_revoked = 1; |
4939 | | /* |
4940 | | * To ensure that this increase in |
4941 | | * flightsize, which is artificial, |
4942 | | * does not throttle the sender, we |
4943 | | * also increase the cwnd |
4944 | | * artificially. |
4945 | | */ |
4946 | 0 | tp1->whoTo->cwnd += tp1->book_size; |
4947 | 0 | cnt_revoked++; |
4948 | 0 | } |
4949 | 0 | } |
4950 | 0 | if (cnt_revoked) { |
4951 | 0 | reneged_all = 1; |
4952 | 0 | } |
4953 | 0 | asoc->saw_sack_with_frags = 0; |
4954 | 0 | } |
4955 | 0 | if (num_nr_seg > 0) |
4956 | 0 | asoc->saw_sack_with_nr_frags = 1; |
4957 | 0 | else |
4958 | 0 | asoc->saw_sack_with_nr_frags = 0; |
4959 | | |
4960 | | /* JRS - Use the congestion control given in the CC module */ |
4961 | 0 | if (ecne_seen == 0) { |
4962 | 0 | TAILQ_FOREACH(net, &asoc->nets, sctp_next) { |
4963 | 0 | if (net->net_ack2 > 0) { |
4964 | | /* |
4965 | | * Karn's rule applies to clearing error count, this |
4966 | | * is optional. |
4967 | | */ |
4968 | 0 | net->error_count = 0; |
4969 | 0 | if ((net->dest_state & SCTP_ADDR_REACHABLE) == 0) { |
4970 | | /* addr came good */ |
4971 | 0 | net->dest_state |= SCTP_ADDR_REACHABLE; |
4972 | 0 | sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb, |
4973 | 0 | 0, (void *)net, SCTP_SO_NOT_LOCKED); |
4974 | 0 | } |
4975 | |
|
4976 | 0 | if (net == stcb->asoc.primary_destination) { |
4977 | 0 | if (stcb->asoc.alternate) { |
4978 | | /* release the alternate, primary is good */ |
4979 | 0 | sctp_free_remote_addr(stcb->asoc.alternate); |
4980 | 0 | stcb->asoc.alternate = NULL; |
4981 | 0 | } |
4982 | 0 | } |
4983 | | |
4984 | 0 | if (net->dest_state & SCTP_ADDR_PF) { |
4985 | 0 | net->dest_state &= ~SCTP_ADDR_PF; |
4986 | 0 | sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, |
4987 | 0 | stcb->sctp_ep, stcb, net, |
4988 | 0 | SCTP_FROM_SCTP_INDATA + SCTP_LOC_33); |
4989 | 0 | sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net); |
4990 | 0 | asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net); |
4991 | | /* Done with this net */ |
4992 | 0 | net->net_ack = 0; |
4993 | 0 | } |
4994 | | /* restore any doubled timers */ |
4995 | 0 | net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv; |
4996 | 0 | if (net->RTO < stcb->asoc.minrto) { |
4997 | 0 | net->RTO = stcb->asoc.minrto; |
4998 | 0 | } |
4999 | 0 | if (net->RTO > stcb->asoc.maxrto) { |
5000 | 0 | net->RTO = stcb->asoc.maxrto; |
5001 | 0 | } |
5002 | 0 | } |
5003 | 0 | } |
5004 | 0 | asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, accum_moved, reneged_all, will_exit_fast_recovery); |
5005 | 0 | } |
5006 | | |
5007 | 0 | if (TAILQ_EMPTY(&asoc->sent_queue)) { |
5008 | | /* nothing left in-flight */ |
5009 | 0 | TAILQ_FOREACH(net, &asoc->nets, sctp_next) { |
5010 | | /* stop all timers */ |
5011 | 0 | sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, |
5012 | 0 | stcb, net, |
5013 | 0 | SCTP_FROM_SCTP_INDATA + SCTP_LOC_34); |
5014 | 0 | net->flight_size = 0; |
5015 | 0 | net->partial_bytes_acked = 0; |
5016 | 0 | } |
5017 | 0 | asoc->total_flight = 0; |
5018 | 0 | asoc->total_flight_count = 0; |
5019 | 0 | } |
5020 | | |
5021 | | /**********************************/ |
5022 | | /* Now what about shutdown issues */ |
5023 | | /**********************************/ |
5024 | 0 | if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) { |
5025 | | /* nothing left on sendqueue.. consider done */ |
5026 | 0 | if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) { |
5027 | 0 | sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK, |
5028 | 0 | asoc->peers_rwnd, 0, 0, a_rwnd); |
5029 | 0 | } |
5030 | 0 | asoc->peers_rwnd = a_rwnd; |
5031 | 0 | if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) { |
5032 | | /* SWS sender side engages */ |
5033 | 0 | asoc->peers_rwnd = 0; |
5034 | 0 | } |
5035 | | /* clean up */ |
5036 | 0 | if ((asoc->stream_queue_cnt == 1) && |
5037 | 0 | ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) || |
5038 | 0 | (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) && |
5039 | 0 | ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete)(stcb, asoc))) { |
5040 | 0 | SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_PARTIAL_MSG_LEFT); |
5041 | 0 | } |
5042 | 0 | if (((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) || |
5043 | 0 | (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) && |
5044 | 0 | (asoc->stream_queue_cnt == 1) && |
5045 | 0 | (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) { |
5046 | 0 | struct mbuf *op_err; |
5047 | |
|
5048 | 0 | *abort_now = 1; |
5049 | | /* XXX */ |
5050 | 0 | op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, ""); |
5051 | 0 | stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_35; |
5052 | 0 | sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, false, SCTP_SO_NOT_LOCKED); |
5053 | 0 | return; |
5054 | 0 | } |
5055 | 0 | if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) && |
5056 | 0 | (asoc->stream_queue_cnt == 0)) { |
5057 | 0 | struct sctp_nets *netp; |
5058 | |
|
5059 | 0 | if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) || |
5060 | 0 | (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) { |
5061 | 0 | SCTP_STAT_DECR_GAUGE32(sctps_currestab); |
5062 | 0 | } |
5063 | 0 | SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_SENT); |
5064 | 0 | sctp_stop_timers_for_shutdown(stcb); |
5065 | 0 | if (asoc->alternate) { |
5066 | 0 | netp = asoc->alternate; |
5067 | 0 | } else { |
5068 | 0 | netp = asoc->primary_destination; |
5069 | 0 | } |
5070 | 0 | sctp_send_shutdown(stcb, netp); |
5071 | 0 | sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, |
5072 | 0 | stcb->sctp_ep, stcb, netp); |
5073 | 0 | sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, |
5074 | 0 | stcb->sctp_ep, stcb, NULL); |
5075 | 0 | return; |
5076 | 0 | } else if ((SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED) && |
5077 | 0 | (asoc->stream_queue_cnt == 0)) { |
5078 | 0 | struct sctp_nets *netp; |
5079 | |
|
5080 | 0 | SCTP_STAT_DECR_GAUGE32(sctps_currestab); |
5081 | 0 | SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_ACK_SENT); |
5082 | 0 | sctp_stop_timers_for_shutdown(stcb); |
5083 | 0 | if (asoc->alternate) { |
5084 | 0 | netp = asoc->alternate; |
5085 | 0 | } else { |
5086 | 0 | netp = asoc->primary_destination; |
5087 | 0 | } |
5088 | 0 | sctp_send_shutdown_ack(stcb, netp); |
5089 | 0 | sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK, |
5090 | 0 | stcb->sctp_ep, stcb, netp); |
5091 | 0 | return; |
5092 | 0 | } |
5093 | 0 | } |
5094 | | /* |
5095 | | * Now here we are going to recycle net_ack for a different use... |
5096 | | * HEADS UP. |
5097 | | */ |
5098 | 0 | TAILQ_FOREACH(net, &asoc->nets, sctp_next) { |
5099 | 0 | net->net_ack = 0; |
5100 | 0 | } |
5101 | | |
5102 | | /* |
5103 | | * CMT DAC algorithm: If SACK DAC flag was 0, then no extra marking |
5104 | | * to be done. Setting this_sack_lowest_newack to the cum_ack will |
5105 | | * automatically ensure that. |
5106 | | */ |
5107 | 0 | if ((asoc->sctp_cmt_on_off > 0) && |
5108 | 0 | SCTP_BASE_SYSCTL(sctp_cmt_use_dac) && |
5109 | 0 | (cmt_dac_flag == 0)) { |
5110 | 0 | this_sack_lowest_newack = cum_ack; |
5111 | 0 | } |
5112 | 0 | if ((num_seg > 0) || (num_nr_seg > 0)) { |
5113 | 0 | sctp_strike_gap_ack_chunks(stcb, asoc, biggest_tsn_acked, |
5114 | 0 | biggest_tsn_newly_acked, this_sack_lowest_newack, accum_moved); |
5115 | 0 | } |
5116 | | /* JRS - Use the congestion control given in the CC module */ |
5117 | 0 | asoc->cc_functions.sctp_cwnd_update_after_fr(stcb, asoc); |
5118 | | |
5119 | | /* Now are we exiting loss recovery ? */ |
5120 | 0 | if (will_exit_fast_recovery) { |
5121 | | /* Ok, we must exit fast recovery */ |
5122 | 0 | asoc->fast_retran_loss_recovery = 0; |
5123 | 0 | } |
5124 | 0 | if ((asoc->sat_t3_loss_recovery) && |
5125 | 0 | SCTP_TSN_GE(asoc->last_acked_seq, asoc->sat_t3_recovery_tsn)) { |
5126 | | /* end satellite t3 loss recovery */ |
5127 | 0 | asoc->sat_t3_loss_recovery = 0; |
5128 | 0 | } |
5129 | | /* |
5130 | | * CMT Fast recovery |
5131 | | */ |
5132 | 0 | TAILQ_FOREACH(net, &asoc->nets, sctp_next) { |
5133 | 0 | if (net->will_exit_fast_recovery) { |
5134 | | /* Ok, we must exit fast recovery */ |
5135 | 0 | net->fast_retran_loss_recovery = 0; |
5136 | 0 | } |
5137 | 0 | } |
5138 | | |
5139 | | /* Adjust and set the new rwnd value */ |
5140 | 0 | if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) { |
5141 | 0 | sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK, |
5142 | 0 | asoc->peers_rwnd, asoc->total_flight, (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)), a_rwnd); |
5143 | 0 | } |
5144 | 0 | asoc->peers_rwnd = sctp_sbspace_sub(a_rwnd, |
5145 | 0 | (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)))); |
5146 | 0 | if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) { |
5147 | | /* SWS sender side engages */ |
5148 | 0 | asoc->peers_rwnd = 0; |
5149 | 0 | } |
5150 | 0 | if (asoc->peers_rwnd > old_rwnd) { |
5151 | 0 | win_probe_recovery = 1; |
5152 | 0 | } |
5153 | | |
5154 | | /* |
5155 | | * Now we must setup so we have a timer up for anyone with |
5156 | | * outstanding data. |
5157 | | */ |
5158 | 0 | done_once = 0; |
5159 | 0 | again: |
5160 | 0 | j = 0; |
5161 | 0 | TAILQ_FOREACH(net, &asoc->nets, sctp_next) { |
5162 | 0 | if (win_probe_recovery && (net->window_probe)) { |
5163 | 0 | win_probe_recovered = 1; |
5164 | | /*- |
5165 | | * Find first chunk that was used with |
5166 | | * window probe and clear the event. Put |
5167 | | * it back into the send queue as if has |
5168 | | * not been sent. |
5169 | | */ |
5170 | 0 | TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) { |
5171 | 0 | if (tp1->window_probe) { |
5172 | 0 | sctp_window_probe_recovery(stcb, asoc, tp1); |
5173 | 0 | break; |
5174 | 0 | } |
5175 | 0 | } |
5176 | 0 | } |
5177 | 0 | if (net->flight_size) { |
5178 | 0 | j++; |
5179 | 0 | if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) { |
5180 | 0 | sctp_timer_start(SCTP_TIMER_TYPE_SEND, |
5181 | 0 | stcb->sctp_ep, stcb, net); |
5182 | 0 | } |
5183 | 0 | if (net->window_probe) { |
5184 | 0 | net->window_probe = 0; |
5185 | 0 | } |
5186 | 0 | } else { |
5187 | 0 | if (net->window_probe) { |
5188 | | /* In window probes we must assure a timer is still running there */ |
5189 | 0 | if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) { |
5190 | 0 | sctp_timer_start(SCTP_TIMER_TYPE_SEND, |
5191 | 0 | stcb->sctp_ep, stcb, net); |
5192 | 0 | } |
5193 | 0 | } else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) { |
5194 | 0 | sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, |
5195 | 0 | stcb, net, |
5196 | 0 | SCTP_FROM_SCTP_INDATA + SCTP_LOC_36); |
5197 | 0 | } |
5198 | 0 | } |
5199 | 0 | } |
5200 | 0 | if ((j == 0) && |
5201 | 0 | (!TAILQ_EMPTY(&asoc->sent_queue)) && |
5202 | 0 | (asoc->sent_queue_retran_cnt == 0) && |
5203 | 0 | (win_probe_recovered == 0) && |
5204 | 0 | (done_once == 0)) { |
5205 | | /* huh, this should not happen unless all packets |
5206 | | * are PR-SCTP and marked to skip of course. |
5207 | | */ |
5208 | 0 | if (sctp_fs_audit(asoc)) { |
5209 | 0 | TAILQ_FOREACH(net, &asoc->nets, sctp_next) { |
5210 | 0 | net->flight_size = 0; |
5211 | 0 | } |
5212 | 0 | asoc->total_flight = 0; |
5213 | 0 | asoc->total_flight_count = 0; |
5214 | 0 | asoc->sent_queue_retran_cnt = 0; |
5215 | 0 | TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) { |
5216 | 0 | if (tp1->sent < SCTP_DATAGRAM_RESEND) { |
5217 | 0 | sctp_flight_size_increase(tp1); |
5218 | 0 | sctp_total_flight_increase(stcb, tp1); |
5219 | 0 | } else if (tp1->sent == SCTP_DATAGRAM_RESEND) { |
5220 | 0 | sctp_ucount_incr(asoc->sent_queue_retran_cnt); |
5221 | 0 | } |
5222 | 0 | } |
5223 | 0 | } |
5224 | 0 | done_once = 1; |
5225 | 0 | goto again; |
5226 | 0 | } |
5227 | | /*********************************************/ |
5228 | | /* Here we perform PR-SCTP procedures */ |
5229 | | /* (section 4.2) */ |
5230 | | /*********************************************/ |
5231 | | /* C1. update advancedPeerAckPoint */ |
5232 | 0 | if (SCTP_TSN_GT(cum_ack, asoc->advanced_peer_ack_point)) { |
5233 | 0 | asoc->advanced_peer_ack_point = cum_ack; |
5234 | 0 | } |
5235 | | /* C2. try to further move advancedPeerAckPoint ahead */ |
5236 | 0 | if ((asoc->prsctp_supported) && (asoc->pr_sctp_cnt > 0)) { |
5237 | 0 | struct sctp_tmit_chunk *lchk; |
5238 | 0 | uint32_t old_adv_peer_ack_point; |
5239 | |
|
5240 | 0 | old_adv_peer_ack_point = asoc->advanced_peer_ack_point; |
5241 | 0 | lchk = sctp_try_advance_peer_ack_point(stcb, asoc); |
5242 | | /* C3. See if we need to send a Fwd-TSN */ |
5243 | 0 | if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cum_ack)) { |
5244 | | /* |
5245 | | * ISSUE with ECN, see FWD-TSN processing. |
5246 | | */ |
5247 | 0 | if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) { |
5248 | 0 | sctp_misc_ints(SCTP_FWD_TSN_CHECK, |
5249 | 0 | 0xee, cum_ack, asoc->advanced_peer_ack_point, |
5250 | 0 | old_adv_peer_ack_point); |
5251 | 0 | } |
5252 | 0 | if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) { |
5253 | 0 | send_forward_tsn(stcb, asoc); |
5254 | 0 | } else if (lchk) { |
5255 | | /* try to FR fwd-tsn's that get lost too */ |
5256 | 0 | if (lchk->rec.data.fwd_tsn_cnt >= 3) { |
5257 | 0 | send_forward_tsn(stcb, asoc); |
5258 | 0 | } |
5259 | 0 | } |
5260 | 0 | } |
5261 | 0 | for (; lchk != NULL; lchk = TAILQ_NEXT(lchk, sctp_next)) { |
5262 | 0 | if (lchk->whoTo != NULL) { |
5263 | 0 | break; |
5264 | 0 | } |
5265 | 0 | } |
5266 | 0 | if (lchk != NULL) { |
5267 | | /* Assure a timer is up */ |
5268 | 0 | sctp_timer_start(SCTP_TIMER_TYPE_SEND, |
5269 | 0 | stcb->sctp_ep, stcb, lchk->whoTo); |
5270 | 0 | } |
5271 | 0 | } |
5272 | 0 | if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) { |
5273 | 0 | sctp_misc_ints(SCTP_SACK_RWND_UPDATE, |
5274 | 0 | a_rwnd, |
5275 | 0 | stcb->asoc.peers_rwnd, |
5276 | 0 | stcb->asoc.total_flight, |
5277 | 0 | stcb->asoc.total_output_queue_size); |
5278 | 0 | } |
5279 | 0 | } |
5280 | | |
5281 | | void |
5282 | | sctp_update_acked(struct sctp_tcb *stcb, struct sctp_shutdown_chunk *cp, int *abort_flag) |
5283 | 1.09k | { |
5284 | | /* Copy cum-ack */ |
5285 | 1.09k | uint32_t cum_ack, a_rwnd; |
5286 | | |
5287 | 1.09k | cum_ack = ntohl(cp->cumulative_tsn_ack); |
5288 | | /* Arrange so a_rwnd does NOT change */ |
5289 | 1.09k | a_rwnd = stcb->asoc.peers_rwnd + stcb->asoc.total_flight; |
5290 | | |
5291 | | /* Now call the express sack handling */ |
5292 | 1.09k | sctp_express_handle_sack(stcb, cum_ack, a_rwnd, abort_flag, 0); |
5293 | 1.09k | } |
5294 | | |
5295 | | static void |
5296 | | sctp_kick_prsctp_reorder_queue(struct sctp_tcb *stcb, |
5297 | | struct sctp_stream_in *strmin) |
5298 | 7.80k | { |
5299 | 7.80k | struct sctp_queued_to_read *control, *ncontrol; |
5300 | 7.80k | struct sctp_association *asoc; |
5301 | 7.80k | uint32_t mid; |
5302 | 7.80k | int need_reasm_check = 0; |
5303 | | |
5304 | 7.80k | KASSERT(stcb != NULL, ("stcb == NULL")); |
5305 | 7.80k | SCTP_TCB_LOCK_ASSERT(stcb); |
5306 | 7.80k | SCTP_INP_READ_LOCK_ASSERT(stcb->sctp_ep); |
5307 | | |
5308 | 7.80k | asoc = &stcb->asoc; |
5309 | 7.80k | mid = strmin->last_mid_delivered; |
5310 | | /* |
5311 | | * First deliver anything prior to and including the stream no that |
5312 | | * came in. |
5313 | | */ |
5314 | 7.80k | TAILQ_FOREACH_SAFE(control, &strmin->inqueue, next_instrm, ncontrol) { |
5315 | 0 | if (SCTP_MID_GE(asoc->idata_supported, mid, control->mid)) { |
5316 | | /* this is deliverable now */ |
5317 | 0 | if (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) { |
5318 | 0 | if (control->on_strm_q) { |
5319 | 0 | if (control->on_strm_q == SCTP_ON_ORDERED) { |
5320 | 0 | TAILQ_REMOVE(&strmin->inqueue, control, next_instrm); |
5321 | 0 | } else if (control->on_strm_q == SCTP_ON_UNORDERED) { |
5322 | 0 | TAILQ_REMOVE(&strmin->uno_inqueue, control, next_instrm); |
5323 | 0 | #ifdef INVARIANTS |
5324 | 0 | } else { |
5325 | 0 | panic("strmin: %p ctl: %p unknown %d", |
5326 | 0 | strmin, control, control->on_strm_q); |
5327 | 0 | #endif |
5328 | 0 | } |
5329 | 0 | control->on_strm_q = 0; |
5330 | 0 | } |
5331 | | /* subtract pending on streams */ |
5332 | 0 | if (asoc->size_on_all_streams >= control->length) { |
5333 | 0 | asoc->size_on_all_streams -= control->length; |
5334 | 0 | } else { |
5335 | 0 | #ifdef INVARIANTS |
5336 | 0 | panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length); |
5337 | | #else |
5338 | | asoc->size_on_all_streams = 0; |
5339 | | #endif |
5340 | 0 | } |
5341 | 0 | sctp_ucount_decr(asoc->cnt_on_all_streams); |
5342 | | /* deliver it to at least the delivery-q */ |
5343 | 0 | if (stcb->sctp_socket) { |
5344 | 0 | sctp_mark_non_revokable(asoc, control->sinfo_tsn); |
5345 | 0 | sctp_add_to_readq(stcb->sctp_ep, stcb, control, |
5346 | 0 | &stcb->sctp_socket->so_rcv, 1, |
5347 | 0 | SCTP_READ_LOCK_HELD, SCTP_SO_NOT_LOCKED); |
5348 | 0 | } |
5349 | 0 | } else { |
5350 | | /* Its a fragmented message */ |
5351 | 0 | if (control->first_frag_seen) { |
5352 | | /* Make it so this is next to deliver, we restore later */ |
5353 | 0 | strmin->last_mid_delivered = control->mid - 1; |
5354 | 0 | need_reasm_check = 1; |
5355 | 0 | break; |
5356 | 0 | } |
5357 | 0 | } |
5358 | 0 | } else { |
5359 | | /* no more delivery now. */ |
5360 | 0 | break; |
5361 | 0 | } |
5362 | 0 | } |
5363 | 7.80k | if (need_reasm_check) { |
5364 | 0 | int ret; |
5365 | 0 | ret = sctp_deliver_reasm_check(stcb, &stcb->asoc, strmin, SCTP_READ_LOCK_HELD); |
5366 | 0 | if (SCTP_MID_GT(asoc->idata_supported, mid, strmin->last_mid_delivered)) { |
5367 | | /* Restore the next to deliver unless we are ahead */ |
5368 | 0 | strmin->last_mid_delivered = mid; |
5369 | 0 | } |
5370 | 0 | if (ret == 0) { |
5371 | | /* Left the front Partial one on */ |
5372 | 0 | return; |
5373 | 0 | } |
5374 | 0 | need_reasm_check = 0; |
5375 | 0 | } |
5376 | | /* |
5377 | | * now we must deliver things in queue the normal way if any are |
5378 | | * now ready. |
5379 | | */ |
5380 | 7.80k | mid = strmin->last_mid_delivered + 1; |
5381 | 7.80k | TAILQ_FOREACH_SAFE(control, &strmin->inqueue, next_instrm, ncontrol) { |
5382 | 0 | if (SCTP_MID_EQ(asoc->idata_supported, mid, control->mid)) { |
5383 | 0 | if (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) { |
5384 | | /* this is deliverable now */ |
5385 | 0 | if (control->on_strm_q) { |
5386 | 0 | if (control->on_strm_q == SCTP_ON_ORDERED) { |
5387 | 0 | TAILQ_REMOVE(&strmin->inqueue, control, next_instrm); |
5388 | 0 | } else if (control->on_strm_q == SCTP_ON_UNORDERED) { |
5389 | 0 | TAILQ_REMOVE(&strmin->uno_inqueue, control, next_instrm); |
5390 | 0 | #ifdef INVARIANTS |
5391 | 0 | } else { |
5392 | 0 | panic("strmin: %p ctl: %p unknown %d", |
5393 | 0 | strmin, control, control->on_strm_q); |
5394 | 0 | #endif |
5395 | 0 | } |
5396 | 0 | control->on_strm_q = 0; |
5397 | 0 | } |
5398 | | /* subtract pending on streams */ |
5399 | 0 | if (asoc->size_on_all_streams >= control->length) { |
5400 | 0 | asoc->size_on_all_streams -= control->length; |
5401 | 0 | } else { |
5402 | 0 | #ifdef INVARIANTS |
5403 | 0 | panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length); |
5404 | | #else |
5405 | | asoc->size_on_all_streams = 0; |
5406 | | #endif |
5407 | 0 | } |
5408 | 0 | sctp_ucount_decr(asoc->cnt_on_all_streams); |
5409 | | /* deliver it to at least the delivery-q */ |
5410 | 0 | strmin->last_mid_delivered = control->mid; |
5411 | 0 | if (stcb->sctp_socket) { |
5412 | 0 | sctp_mark_non_revokable(asoc, control->sinfo_tsn); |
5413 | 0 | sctp_add_to_readq(stcb->sctp_ep, stcb, control, |
5414 | 0 | &stcb->sctp_socket->so_rcv, 1, |
5415 | 0 | SCTP_READ_LOCK_HELD, SCTP_SO_NOT_LOCKED); |
5416 | 0 | } |
5417 | 0 | mid = strmin->last_mid_delivered + 1; |
5418 | 0 | } else { |
5419 | | /* Its a fragmented message */ |
5420 | 0 | if (control->first_frag_seen) { |
5421 | | /* Make it so this is next to deliver */ |
5422 | 0 | strmin->last_mid_delivered = control->mid - 1; |
5423 | 0 | need_reasm_check = 1; |
5424 | 0 | break; |
5425 | 0 | } |
5426 | 0 | } |
5427 | 0 | } else { |
5428 | 0 | break; |
5429 | 0 | } |
5430 | 0 | } |
5431 | 7.80k | if (need_reasm_check) { |
5432 | 0 | (void)sctp_deliver_reasm_check(stcb, &stcb->asoc, strmin, SCTP_READ_LOCK_HELD); |
5433 | 0 | } |
5434 | 7.80k | } |
5435 | | |
5436 | | static void |
5437 | | sctp_flush_reassm_for_str_seq(struct sctp_tcb *stcb, |
5438 | | struct sctp_association *asoc, struct sctp_stream_in *strm, |
5439 | | struct sctp_queued_to_read *control, int ordered, uint32_t cumtsn) |
5440 | 0 | { |
5441 | 0 | struct sctp_tmit_chunk *chk, *nchk; |
5442 | | |
5443 | | /* |
5444 | | * For now large messages held on the stream reasm that are |
5445 | | * complete will be tossed too. We could in theory do more |
5446 | | * work to spin through and stop after dumping one msg aka |
5447 | | * seeing the start of a new msg at the head, and call the |
5448 | | * delivery function... to see if it can be delivered... But |
5449 | | * for now we just dump everything on the queue. |
5450 | | */ |
5451 | |
|
5452 | 0 | KASSERT(stcb != NULL, ("stcb == NULL")); |
5453 | 0 | SCTP_TCB_LOCK_ASSERT(stcb); |
5454 | 0 | SCTP_INP_READ_LOCK_ASSERT(stcb->sctp_ep); |
5455 | | |
5456 | 0 | if (!asoc->idata_supported && !ordered && |
5457 | 0 | control->first_frag_seen && |
5458 | 0 | SCTP_TSN_GT(control->fsn_included, cumtsn)) { |
5459 | 0 | return; |
5460 | 0 | } |
5461 | 0 | TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, nchk) { |
5462 | | /* Purge hanging chunks */ |
5463 | 0 | if (!asoc->idata_supported && !ordered) { |
5464 | 0 | if (SCTP_TSN_GT(chk->rec.data.tsn, cumtsn)) { |
5465 | 0 | break; |
5466 | 0 | } |
5467 | 0 | } |
5468 | 0 | TAILQ_REMOVE(&control->reasm, chk, sctp_next); |
5469 | 0 | if (asoc->size_on_reasm_queue >= chk->send_size) { |
5470 | 0 | asoc->size_on_reasm_queue -= chk->send_size; |
5471 | 0 | } else { |
5472 | 0 | #ifdef INVARIANTS |
5473 | 0 | panic("size_on_reasm_queue = %u smaller than chunk length %u", asoc->size_on_reasm_queue, chk->send_size); |
5474 | | #else |
5475 | | asoc->size_on_reasm_queue = 0; |
5476 | | #endif |
5477 | 0 | } |
5478 | 0 | sctp_ucount_decr(asoc->cnt_on_reasm_queue); |
5479 | 0 | if (chk->data) { |
5480 | 0 | sctp_m_freem(chk->data); |
5481 | 0 | chk->data = NULL; |
5482 | 0 | } |
5483 | 0 | sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED); |
5484 | 0 | } |
5485 | 0 | if (!TAILQ_EMPTY(&control->reasm)) { |
5486 | 0 | KASSERT(!asoc->idata_supported, |
5487 | 0 | ("Reassembly queue not empty for I-DATA")); |
5488 | 0 | KASSERT(!ordered, |
5489 | 0 | ("Reassembly queue not empty for ordered data")); |
5490 | 0 | if (control->data) { |
5491 | 0 | sctp_m_freem(control->data); |
5492 | 0 | control->data = NULL; |
5493 | 0 | } |
5494 | 0 | control->fsn_included = 0xffffffff; |
5495 | 0 | control->first_frag_seen = 0; |
5496 | 0 | control->last_frag_seen = 0; |
5497 | 0 | if (control->on_read_q) { |
5498 | | /* |
5499 | | * We have to purge it from there, |
5500 | | * hopefully this will work :-) |
5501 | | */ |
5502 | 0 | TAILQ_REMOVE(&stcb->sctp_ep->read_queue, control, next); |
5503 | 0 | control->on_read_q = 0; |
5504 | 0 | } |
5505 | 0 | chk = TAILQ_FIRST(&control->reasm); |
5506 | 0 | if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) { |
5507 | 0 | TAILQ_REMOVE(&control->reasm, chk, sctp_next); |
5508 | 0 | sctp_add_chk_to_control(control, strm, stcb, asoc, |
5509 | 0 | chk, SCTP_READ_LOCK_HELD); |
5510 | 0 | } |
5511 | 0 | sctp_deliver_reasm_check(stcb, asoc, strm, SCTP_READ_LOCK_HELD); |
5512 | 0 | return; |
5513 | 0 | } |
5514 | 0 | if (control->on_strm_q == SCTP_ON_ORDERED) { |
5515 | 0 | TAILQ_REMOVE(&strm->inqueue, control, next_instrm); |
5516 | 0 | if (asoc->size_on_all_streams >= control->length) { |
5517 | 0 | asoc->size_on_all_streams -= control->length; |
5518 | 0 | } else { |
5519 | 0 | #ifdef INVARIANTS |
5520 | 0 | panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length); |
5521 | | #else |
5522 | | asoc->size_on_all_streams = 0; |
5523 | | #endif |
5524 | 0 | } |
5525 | 0 | sctp_ucount_decr(asoc->cnt_on_all_streams); |
5526 | 0 | control->on_strm_q = 0; |
5527 | 0 | } else if (control->on_strm_q == SCTP_ON_UNORDERED) { |
5528 | 0 | TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm); |
5529 | 0 | control->on_strm_q = 0; |
5530 | 0 | #ifdef INVARIANTS |
5531 | 0 | } else if (control->on_strm_q) { |
5532 | 0 | panic("strm: %p ctl: %p unknown %d", |
5533 | 0 | strm, control, control->on_strm_q); |
5534 | 0 | #endif |
5535 | 0 | } |
5536 | 0 | control->on_strm_q = 0; |
5537 | 0 | if (control->on_read_q == 0) { |
5538 | 0 | sctp_free_remote_addr(control->whoFrom); |
5539 | 0 | if (control->data) { |
5540 | 0 | sctp_m_freem(control->data); |
5541 | 0 | control->data = NULL; |
5542 | 0 | } |
5543 | 0 | sctp_free_a_readq(stcb, control); |
5544 | 0 | } |
5545 | 0 | } |
5546 | | |
5547 | | void |
5548 | | sctp_handle_forward_tsn(struct sctp_tcb *stcb, |
5549 | | struct sctp_forward_tsn_chunk *fwd, |
5550 | | int *abort_flag, struct mbuf *m , int offset) |
5551 | 3.30k | { |
5552 | | /* The pr-sctp fwd tsn */ |
5553 | | /* |
5554 | | * here we will perform all the data receiver side steps for |
5555 | | * processing FwdTSN, as required in by pr-sctp draft: |
5556 | | * |
5557 | | * Assume we get FwdTSN(x): |
5558 | | * |
5559 | | * 1) update local cumTSN to x |
5560 | | * 2) try to further advance cumTSN to x + others we have |
5561 | | * 3) examine and update re-ordering queue on pr-in-streams |
5562 | | * 4) clean up re-assembly queue |
5563 | | * 5) Send a sack to report where we are. |
5564 | | */ |
5565 | 3.30k | struct sctp_association *asoc; |
5566 | 3.30k | uint32_t new_cum_tsn, gap; |
5567 | 3.30k | unsigned int i, fwd_sz, m_size; |
5568 | 3.30k | struct sctp_stream_in *strm; |
5569 | 3.30k | struct sctp_queued_to_read *control, *ncontrol; |
5570 | | |
5571 | 3.30k | asoc = &stcb->asoc; |
5572 | 3.30k | if ((fwd_sz = ntohs(fwd->ch.chunk_length)) < sizeof(struct sctp_forward_tsn_chunk)) { |
5573 | 0 | SCTPDBG(SCTP_DEBUG_INDATA1, |
5574 | 0 | "Bad size too small/big fwd-tsn\n"); |
5575 | 0 | return; |
5576 | 0 | } |
5577 | 3.30k | m_size = (stcb->asoc.mapping_array_size << 3); |
5578 | | /*************************************************************/ |
5579 | | /* 1. Here we update local cumTSN and shift the bitmap array */ |
5580 | | /*************************************************************/ |
5581 | 3.30k | new_cum_tsn = ntohl(fwd->new_cumulative_tsn); |
5582 | | |
5583 | 3.30k | if (SCTP_TSN_GE(asoc->cumulative_tsn, new_cum_tsn)) { |
5584 | | /* Already got there ... */ |
5585 | 1.98k | return; |
5586 | 1.98k | } |
5587 | | /* |
5588 | | * now we know the new TSN is more advanced, let's find the actual |
5589 | | * gap |
5590 | | */ |
5591 | 1.32k | SCTP_CALC_TSN_TO_GAP(gap, new_cum_tsn, asoc->mapping_array_base_tsn); |
5592 | 1.32k | asoc->cumulative_tsn = new_cum_tsn; |
5593 | 1.32k | if (gap >= m_size) { |
5594 | 828 | if ((long)gap > sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv)) { |
5595 | 98 | struct mbuf *op_err; |
5596 | 98 | char msg[SCTP_DIAG_INFO_LEN]; |
5597 | | |
5598 | | /* |
5599 | | * out of range (of single byte chunks in the rwnd I |
5600 | | * give out). This must be an attacker. |
5601 | | */ |
5602 | 98 | *abort_flag = 1; |
5603 | 98 | SCTP_SNPRINTF(msg, sizeof(msg), |
5604 | 98 | "New cum ack %8.8x too high, highest TSN %8.8x", |
5605 | 98 | new_cum_tsn, asoc->highest_tsn_inside_map); |
5606 | 98 | op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); |
5607 | 98 | stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_37; |
5608 | 98 | sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, false, SCTP_SO_NOT_LOCKED); |
5609 | 98 | return; |
5610 | 98 | } |
5611 | 730 | SCTP_STAT_INCR(sctps_fwdtsn_map_over); |
5612 | | |
5613 | 730 | memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size); |
5614 | 730 | asoc->mapping_array_base_tsn = new_cum_tsn + 1; |
5615 | 730 | asoc->highest_tsn_inside_map = new_cum_tsn; |
5616 | | |
5617 | 730 | memset(stcb->asoc.nr_mapping_array, 0, stcb->asoc.mapping_array_size); |
5618 | 730 | asoc->highest_tsn_inside_nr_map = new_cum_tsn; |
5619 | | |
5620 | 730 | if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) { |
5621 | 0 | sctp_log_map(0, 3, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT); |
5622 | 0 | } |
5623 | 730 | } else { |
5624 | 497 | SCTP_TCB_LOCK_ASSERT(stcb); |
5625 | 12.8k | for (i = 0; i <= gap; i++) { |
5626 | 12.3k | if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, i) && |
5627 | 12.3k | !SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, i)) { |
5628 | 12.2k | SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, i); |
5629 | 12.2k | if (SCTP_TSN_GT(asoc->mapping_array_base_tsn + i, asoc->highest_tsn_inside_nr_map)) { |
5630 | 12.2k | asoc->highest_tsn_inside_nr_map = asoc->mapping_array_base_tsn + i; |
5631 | 12.2k | } |
5632 | 12.2k | } |
5633 | 12.3k | } |
5634 | 497 | } |
5635 | | /*************************************************************/ |
5636 | | /* 2. Clear up re-assembly queue */ |
5637 | | /*************************************************************/ |
5638 | | |
5639 | | /* This is now done as part of clearing up the stream/seq */ |
5640 | 1.22k | if (asoc->idata_supported == 0) { |
5641 | 583 | uint16_t sid; |
5642 | | |
5643 | | /* Flush all the un-ordered data based on cum-tsn */ |
5644 | 583 | SCTP_INP_READ_LOCK(stcb->sctp_ep); |
5645 | 111k | for (sid = 0; sid < asoc->streamincnt; sid++) { |
5646 | 110k | strm = &asoc->strmin[sid]; |
5647 | 110k | if (!TAILQ_EMPTY(&strm->uno_inqueue)) { |
5648 | 0 | sctp_flush_reassm_for_str_seq(stcb, asoc, strm, TAILQ_FIRST(&strm->uno_inqueue), 0, new_cum_tsn); |
5649 | 0 | } |
5650 | 110k | } |
5651 | 583 | SCTP_INP_READ_UNLOCK(stcb->sctp_ep); |
5652 | 583 | } |
5653 | | /*******************************************************/ |
5654 | | /* 3. Update the PR-stream re-ordering queues and fix */ |
5655 | | /* delivery issues as needed. */ |
5656 | | /*******************************************************/ |
5657 | 1.22k | fwd_sz -= sizeof(*fwd); |
5658 | 1.22k | if (m && fwd_sz) { |
5659 | | /* New method. */ |
5660 | 734 | unsigned int num_str; |
5661 | 734 | uint32_t mid; |
5662 | 734 | uint16_t sid; |
5663 | 734 | uint16_t ordered, flags; |
5664 | 734 | struct sctp_strseq *stseq, strseqbuf; |
5665 | 734 | struct sctp_strseq_mid *stseq_m, strseqbuf_m; |
5666 | 734 | offset += sizeof(*fwd); |
5667 | | |
5668 | 734 | SCTP_INP_READ_LOCK(stcb->sctp_ep); |
5669 | 734 | if (asoc->idata_supported) { |
5670 | 466 | num_str = fwd_sz / sizeof(struct sctp_strseq_mid); |
5671 | 466 | } else { |
5672 | 268 | num_str = fwd_sz / sizeof(struct sctp_strseq); |
5673 | 268 | } |
5674 | 8.53k | for (i = 0; i < num_str; i++) { |
5675 | 8.01k | if (asoc->idata_supported) { |
5676 | 2.42k | stseq_m = (struct sctp_strseq_mid *)sctp_m_getptr(m, offset, |
5677 | 2.42k | sizeof(struct sctp_strseq_mid), |
5678 | 2.42k | (uint8_t *)&strseqbuf_m); |
5679 | 2.42k | offset += sizeof(struct sctp_strseq_mid); |
5680 | 2.42k | if (stseq_m == NULL) { |
5681 | 0 | break; |
5682 | 0 | } |
5683 | 2.42k | sid = ntohs(stseq_m->sid); |
5684 | 2.42k | mid = ntohl(stseq_m->mid); |
5685 | 2.42k | flags = ntohs(stseq_m->flags); |
5686 | 2.42k | if (flags & PR_SCTP_UNORDERED_FLAG) { |
5687 | 745 | ordered = 0; |
5688 | 1.68k | } else { |
5689 | 1.68k | ordered = 1; |
5690 | 1.68k | } |
5691 | 5.58k | } else { |
5692 | 5.58k | stseq = (struct sctp_strseq *)sctp_m_getptr(m, offset, |
5693 | 5.58k | sizeof(struct sctp_strseq), |
5694 | 5.58k | (uint8_t *)&strseqbuf); |
5695 | 5.58k | offset += sizeof(struct sctp_strseq); |
5696 | 5.58k | if (stseq == NULL) { |
5697 | 0 | break; |
5698 | 0 | } |
5699 | 5.58k | sid = ntohs(stseq->sid); |
5700 | 5.58k | mid = (uint32_t)ntohs(stseq->ssn); |
5701 | 5.58k | ordered = 1; |
5702 | 5.58k | } |
5703 | | /* Convert */ |
5704 | | |
5705 | | /* now process */ |
5706 | | |
5707 | | /* |
5708 | | * Ok we now look for the stream/seq on the read queue |
5709 | | * where its not all delivered. If we find it we transmute the |
5710 | | * read entry into a PDI_ABORTED. |
5711 | | */ |
5712 | 8.01k | if (sid >= asoc->streamincnt) { |
5713 | | /* screwed up streams, stop! */ |
5714 | 217 | break; |
5715 | 217 | } |
5716 | 7.80k | if ((asoc->str_of_pdapi == sid) && |
5717 | 5.98k | (asoc->ssn_of_pdapi == mid)) { |
5718 | | /* If this is the one we were partially delivering |
5719 | | * now then we no longer are. Note this will change |
5720 | | * with the reassembly re-write. |
5721 | | */ |
5722 | 1.90k | asoc->fragmented_delivery_inprogress = 0; |
5723 | 1.90k | } |
5724 | 7.80k | strm = &asoc->strmin[sid]; |
5725 | 7.80k | if (ordered) { |
5726 | 7.10k | TAILQ_FOREACH_SAFE(control, &strm->inqueue, next_instrm, ncontrol) { |
5727 | 0 | if (SCTP_MID_GE(asoc->idata_supported, mid, control->mid)) { |
5728 | 0 | sctp_flush_reassm_for_str_seq(stcb, asoc, strm, control, ordered, new_cum_tsn); |
5729 | 0 | } |
5730 | 0 | } |
5731 | 7.10k | } else { |
5732 | 696 | if (asoc->idata_supported) { |
5733 | 696 | TAILQ_FOREACH_SAFE(control, &strm->uno_inqueue, next_instrm, ncontrol) { |
5734 | 0 | if (SCTP_MID_GE(asoc->idata_supported, mid, control->mid)) { |
5735 | 0 | sctp_flush_reassm_for_str_seq(stcb, asoc, strm, control, ordered, new_cum_tsn); |
5736 | 0 | } |
5737 | 0 | } |
5738 | 696 | } else { |
5739 | 0 | if (!TAILQ_EMPTY(&strm->uno_inqueue)) { |
5740 | 0 | sctp_flush_reassm_for_str_seq(stcb, asoc, strm, TAILQ_FIRST(&strm->uno_inqueue), ordered, new_cum_tsn); |
5741 | 0 | } |
5742 | 0 | } |
5743 | 696 | } |
5744 | 18.3M | TAILQ_FOREACH(control, &stcb->sctp_ep->read_queue, next) { |
5745 | 18.3M | if ((control->sinfo_stream == sid) && |
5746 | 17.3M | (SCTP_MID_EQ(asoc->idata_supported, control->mid, mid))) { |
5747 | 1.34k | control->pdapi_aborted = 1; |
5748 | 1.34k | control->end_added = 1; |
5749 | 1.34k | if (control->on_strm_q == SCTP_ON_ORDERED) { |
5750 | 0 | TAILQ_REMOVE(&strm->inqueue, control, next_instrm); |
5751 | 0 | if (asoc->size_on_all_streams >= control->length) { |
5752 | 0 | asoc->size_on_all_streams -= control->length; |
5753 | 0 | } else { |
5754 | 0 | #ifdef INVARIANTS |
5755 | 0 | panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length); |
5756 | | #else |
5757 | | asoc->size_on_all_streams = 0; |
5758 | | #endif |
5759 | 0 | } |
5760 | 0 | sctp_ucount_decr(asoc->cnt_on_all_streams); |
5761 | 1.34k | } else if (control->on_strm_q == SCTP_ON_UNORDERED) { |
5762 | 0 | TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm); |
5763 | 0 | #ifdef INVARIANTS |
5764 | 1.34k | } else if (control->on_strm_q) { |
5765 | 0 | panic("strm: %p ctl: %p unknown %d", |
5766 | 0 | strm, control, control->on_strm_q); |
5767 | 0 | #endif |
5768 | 0 | } |
5769 | 1.34k | control->on_strm_q = 0; |
5770 | 1.34k | sctp_ulp_notify(SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION, |
5771 | 1.34k | stcb, |
5772 | 1.34k | SCTP_PARTIAL_DELIVERY_ABORTED, |
5773 | 1.34k | (void *)control, |
5774 | 1.34k | SCTP_SO_NOT_LOCKED); |
5775 | 1.34k | break; |
5776 | 18.3M | } else if ((control->sinfo_stream == sid) && |
5777 | 17.3M | SCTP_MID_GT(asoc->idata_supported, control->mid, mid)) { |
5778 | | /* We are past our victim SSN */ |
5779 | 166 | break; |
5780 | 166 | } |
5781 | 18.3M | } |
5782 | 7.80k | if (SCTP_MID_GT(asoc->idata_supported, mid, strm->last_mid_delivered)) { |
5783 | | /* Update the sequence number */ |
5784 | 1.12k | strm->last_mid_delivered = mid; |
5785 | 1.12k | } |
5786 | | /* now kick the stream the new way */ |
5787 | | /*sa_ignore NO_NULL_CHK*/ |
5788 | 7.80k | sctp_kick_prsctp_reorder_queue(stcb, strm); |
5789 | 7.80k | } |
5790 | 734 | SCTP_INP_READ_UNLOCK(stcb->sctp_ep); |
5791 | 734 | } |
5792 | | /* |
5793 | | * Now slide thing forward. |
5794 | | */ |
5795 | 1.22k | sctp_slide_mapping_arrays(stcb); |
5796 | 1.22k | } |