/src/usrsctp/usrsctplib/netinet/sctp_indata.c
Line | Count | Source |
1 | | /*- |
2 | | * SPDX-License-Identifier: BSD-3-Clause |
3 | | * |
4 | | * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved. |
5 | | * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved. |
6 | | * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved. |
7 | | * |
8 | | * Redistribution and use in source and binary forms, with or without |
9 | | * modification, are permitted provided that the following conditions are met: |
10 | | * |
11 | | * a) Redistributions of source code must retain the above copyright notice, |
12 | | * this list of conditions and the following disclaimer. |
13 | | * |
14 | | * b) Redistributions in binary form must reproduce the above copyright |
15 | | * notice, this list of conditions and the following disclaimer in |
16 | | * the documentation and/or other materials provided with the distribution. |
17 | | * |
18 | | * c) Neither the name of Cisco Systems, Inc. nor the names of its |
19 | | * contributors may be used to endorse or promote products derived |
20 | | * from this software without specific prior written permission. |
21 | | * |
22 | | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
23 | | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, |
24 | | * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
25 | | * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE |
26 | | * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR |
27 | | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF |
28 | | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS |
29 | | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN |
30 | | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) |
31 | | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF |
32 | | * THE POSSIBILITY OF SUCH DAMAGE. |
33 | | */ |
34 | | |
35 | | #include <netinet/sctp_os.h> |
36 | | #if defined(__FreeBSD__) && !defined(__Userspace__) |
37 | | #include <sys/proc.h> |
38 | | #endif |
39 | | #include <netinet/sctp_var.h> |
40 | | #include <netinet/sctp_sysctl.h> |
41 | | #include <netinet/sctp_header.h> |
42 | | #include <netinet/sctp_pcb.h> |
43 | | #include <netinet/sctputil.h> |
44 | | #include <netinet/sctp_output.h> |
45 | | #include <netinet/sctp_uio.h> |
46 | | #include <netinet/sctp_auth.h> |
47 | | #include <netinet/sctp_timer.h> |
48 | | #include <netinet/sctp_asconf.h> |
49 | | #include <netinet/sctp_indata.h> |
50 | | #include <netinet/sctp_bsd_addr.h> |
51 | | #include <netinet/sctp_input.h> |
52 | | #include <netinet/sctp_crc32.h> |
53 | | #if defined(__FreeBSD__) && !defined(__Userspace__) |
54 | | #include <netinet/sctp_lock_bsd.h> |
55 | | #endif |
56 | | #if defined(_WIN32) && defined(__MINGW32__) |
57 | | #include <minmax.h> |
58 | | #endif |
59 | | /* |
60 | | * NOTES: On the outbound side of things I need to check the sack timer to |
61 | | * see if I should generate a sack into the chunk queue (if I have data to |
62 | | * send that is and will be sending it .. for bundling. |
63 | | * |
64 | | * The callback in sctp_usrreq.c will get called when the socket is read from. |
65 | | * This will cause sctp_service_queues() to get called on the top entry in |
66 | | * the list. |
67 | | */ |
68 | | static uint32_t |
69 | | sctp_add_chk_to_control(struct sctp_queued_to_read *control, |
70 | | struct sctp_stream_in *strm, |
71 | | struct sctp_tcb *stcb, |
72 | | struct sctp_association *asoc, |
73 | | struct sctp_tmit_chunk *chk, int hold_rlock); |
74 | | |
75 | | void |
76 | | sctp_set_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc) |
77 | 1 | { |
78 | 1 | asoc->my_rwnd = sctp_calc_rwnd(stcb, asoc); |
79 | 1 | } |
80 | | |
81 | | /* Calculate what the rwnd would be */ |
82 | | uint32_t |
83 | | sctp_calc_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc) |
84 | 1 | { |
85 | 1 | uint32_t calc = 0; |
86 | | |
87 | | /* |
88 | | * This is really set wrong with respect to a 1-2-m socket. Since |
89 | | * the sb_cc is the count that everyone as put up. When we re-write |
90 | | * sctp_soreceive then we will fix this so that ONLY this |
91 | | * associations data is taken into account. |
92 | | */ |
93 | 1 | if (stcb->sctp_socket == NULL) { |
94 | 0 | return (calc); |
95 | 0 | } |
96 | | |
97 | 1 | KASSERT(asoc->cnt_on_reasm_queue > 0 || asoc->size_on_reasm_queue == 0, |
98 | 1 | ("size_on_reasm_queue is %u", asoc->size_on_reasm_queue)); |
99 | 1 | KASSERT(asoc->cnt_on_all_streams > 0 || asoc->size_on_all_streams == 0, |
100 | 1 | ("size_on_all_streams is %u", asoc->size_on_all_streams)); |
101 | 1 | if (stcb->asoc.sb_cc == 0 && |
102 | 1 | asoc->cnt_on_reasm_queue == 0 && |
103 | 1 | asoc->cnt_on_all_streams == 0) { |
104 | | /* Full rwnd granted */ |
105 | 1 | calc = max(SCTP_SB_LIMIT_RCV(stcb->sctp_socket), SCTP_MINIMAL_RWND); |
106 | 1 | return (calc); |
107 | 1 | } |
108 | | /* get actual space */ |
109 | 0 | calc = (uint32_t) sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv); |
110 | | /* |
111 | | * take out what has NOT been put on socket queue and we yet hold |
112 | | * for putting up. |
113 | | */ |
114 | 0 | calc = sctp_sbspace_sub(calc, (uint32_t)(asoc->size_on_reasm_queue + |
115 | 0 | asoc->cnt_on_reasm_queue * MSIZE)); |
116 | 0 | calc = sctp_sbspace_sub(calc, (uint32_t)(asoc->size_on_all_streams + |
117 | 0 | asoc->cnt_on_all_streams * MSIZE)); |
118 | 0 | if (calc == 0) { |
119 | | /* out of space */ |
120 | 0 | return (calc); |
121 | 0 | } |
122 | | |
123 | | /* what is the overhead of all these rwnd's */ |
124 | 0 | calc = sctp_sbspace_sub(calc, stcb->asoc.my_rwnd_control_len); |
125 | | /* If the window gets too small due to ctrl-stuff, reduce it |
126 | | * to 1, even it is 0. SWS engaged |
127 | | */ |
128 | 0 | if (calc < stcb->asoc.my_rwnd_control_len) { |
129 | 0 | calc = 1; |
130 | 0 | } |
131 | 0 | return (calc); |
132 | 0 | } |
133 | | |
134 | | /* |
135 | | * Build out our readq entry based on the incoming packet. |
136 | | */ |
137 | | struct sctp_queued_to_read * |
138 | | sctp_build_readq_entry(struct sctp_tcb *stcb, |
139 | | struct sctp_nets *net, |
140 | | uint32_t tsn, uint32_t ppid, |
141 | | uint32_t context, uint16_t sid, |
142 | | uint32_t mid, uint8_t flags, |
143 | | struct mbuf *dm) |
144 | 3.25k | { |
145 | 3.25k | struct sctp_queued_to_read *read_queue_e = NULL; |
146 | | |
147 | 3.25k | sctp_alloc_a_readq(stcb, read_queue_e); |
148 | 3.25k | if (read_queue_e == NULL) { |
149 | 0 | goto failed_build; |
150 | 0 | } |
151 | 3.25k | memset(read_queue_e, 0, sizeof(struct sctp_queued_to_read)); |
152 | 3.25k | read_queue_e->sinfo_stream = sid; |
153 | 3.25k | read_queue_e->sinfo_flags = (flags << 8); |
154 | 3.25k | read_queue_e->sinfo_ppid = ppid; |
155 | 3.25k | read_queue_e->sinfo_context = context; |
156 | 3.25k | read_queue_e->sinfo_tsn = tsn; |
157 | 3.25k | read_queue_e->sinfo_cumtsn = tsn; |
158 | 3.25k | read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb); |
159 | 3.25k | read_queue_e->mid = mid; |
160 | 3.25k | read_queue_e->top_fsn = read_queue_e->fsn_included = 0xffffffff; |
161 | 3.25k | TAILQ_INIT(&read_queue_e->reasm); |
162 | 3.25k | read_queue_e->whoFrom = net; |
163 | 3.25k | atomic_add_int(&net->ref_count, 1); |
164 | 3.25k | read_queue_e->data = dm; |
165 | 3.25k | read_queue_e->stcb = stcb; |
166 | 3.25k | read_queue_e->port_from = stcb->rport; |
167 | 3.25k | if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { |
168 | 0 | read_queue_e->do_not_ref_stcb = 1; |
169 | 0 | } |
170 | 3.25k | failed_build: |
171 | 3.25k | return (read_queue_e); |
172 | 3.25k | } |
173 | | |
174 | | struct mbuf * |
175 | | sctp_build_ctl_nchunk(struct sctp_inpcb *inp, struct sctp_sndrcvinfo *sinfo) |
176 | 0 | { |
177 | 0 | struct sctp_extrcvinfo *seinfo; |
178 | 0 | struct sctp_sndrcvinfo *outinfo; |
179 | 0 | struct sctp_rcvinfo *rcvinfo; |
180 | 0 | struct sctp_nxtinfo *nxtinfo; |
181 | | #if defined(_WIN32) |
182 | | WSACMSGHDR *cmh; |
183 | | #else |
184 | 0 | struct cmsghdr *cmh; |
185 | 0 | #endif |
186 | 0 | struct mbuf *ret; |
187 | 0 | int len; |
188 | 0 | int use_extended; |
189 | 0 | int provide_nxt; |
190 | |
|
191 | 0 | if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) && |
192 | 0 | sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) && |
193 | 0 | sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) { |
194 | | /* user does not want any ancillary data */ |
195 | 0 | return (NULL); |
196 | 0 | } |
197 | | |
198 | 0 | len = 0; |
199 | 0 | if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) { |
200 | 0 | len += CMSG_SPACE(sizeof(struct sctp_rcvinfo)); |
201 | 0 | } |
202 | 0 | seinfo = (struct sctp_extrcvinfo *)sinfo; |
203 | 0 | if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO) && |
204 | 0 | (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_AVAIL)) { |
205 | 0 | provide_nxt = 1; |
206 | 0 | len += CMSG_SPACE(sizeof(struct sctp_nxtinfo)); |
207 | 0 | } else { |
208 | 0 | provide_nxt = 0; |
209 | 0 | } |
210 | 0 | if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) { |
211 | 0 | if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) { |
212 | 0 | use_extended = 1; |
213 | 0 | len += CMSG_SPACE(sizeof(struct sctp_extrcvinfo)); |
214 | 0 | } else { |
215 | 0 | use_extended = 0; |
216 | 0 | len += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo)); |
217 | 0 | } |
218 | 0 | } else { |
219 | 0 | use_extended = 0; |
220 | 0 | } |
221 | |
|
222 | 0 | ret = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA); |
223 | 0 | if (ret == NULL) { |
224 | | /* No space */ |
225 | 0 | return (ret); |
226 | 0 | } |
227 | 0 | SCTP_BUF_LEN(ret) = 0; |
228 | | |
229 | | /* We need a CMSG header followed by the struct */ |
230 | | #if defined(_WIN32) |
231 | | cmh = mtod(ret, WSACMSGHDR *); |
232 | | #else |
233 | 0 | cmh = mtod(ret, struct cmsghdr *); |
234 | 0 | #endif |
235 | | /* |
236 | | * Make sure that there is no un-initialized padding between |
237 | | * the cmsg header and cmsg data and after the cmsg data. |
238 | | */ |
239 | 0 | memset(cmh, 0, len); |
240 | 0 | if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) { |
241 | 0 | cmh->cmsg_level = IPPROTO_SCTP; |
242 | 0 | cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_rcvinfo)); |
243 | 0 | cmh->cmsg_type = SCTP_RCVINFO; |
244 | 0 | rcvinfo = (struct sctp_rcvinfo *)CMSG_DATA(cmh); |
245 | 0 | rcvinfo->rcv_sid = sinfo->sinfo_stream; |
246 | 0 | rcvinfo->rcv_ssn = sinfo->sinfo_ssn; |
247 | 0 | rcvinfo->rcv_flags = sinfo->sinfo_flags; |
248 | 0 | rcvinfo->rcv_ppid = sinfo->sinfo_ppid; |
249 | 0 | rcvinfo->rcv_tsn = sinfo->sinfo_tsn; |
250 | 0 | rcvinfo->rcv_cumtsn = sinfo->sinfo_cumtsn; |
251 | 0 | rcvinfo->rcv_context = sinfo->sinfo_context; |
252 | 0 | rcvinfo->rcv_assoc_id = sinfo->sinfo_assoc_id; |
253 | | #if defined(_WIN32) |
254 | | cmh = (WSACMSGHDR *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_rcvinfo))); |
255 | | #else |
256 | 0 | cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_rcvinfo))); |
257 | 0 | #endif |
258 | 0 | SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_rcvinfo)); |
259 | 0 | } |
260 | 0 | if (provide_nxt) { |
261 | 0 | cmh->cmsg_level = IPPROTO_SCTP; |
262 | 0 | cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_nxtinfo)); |
263 | 0 | cmh->cmsg_type = SCTP_NXTINFO; |
264 | 0 | nxtinfo = (struct sctp_nxtinfo *)CMSG_DATA(cmh); |
265 | 0 | nxtinfo->nxt_sid = seinfo->serinfo_next_stream; |
266 | 0 | nxtinfo->nxt_flags = 0; |
267 | 0 | if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_IS_UNORDERED) { |
268 | 0 | nxtinfo->nxt_flags |= SCTP_UNORDERED; |
269 | 0 | } |
270 | 0 | if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_IS_NOTIFICATION) { |
271 | 0 | nxtinfo->nxt_flags |= SCTP_NOTIFICATION; |
272 | 0 | } |
273 | 0 | if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_ISCOMPLETE) { |
274 | 0 | nxtinfo->nxt_flags |= SCTP_COMPLETE; |
275 | 0 | } |
276 | 0 | nxtinfo->nxt_ppid = seinfo->serinfo_next_ppid; |
277 | 0 | nxtinfo->nxt_length = seinfo->serinfo_next_length; |
278 | 0 | nxtinfo->nxt_assoc_id = seinfo->serinfo_next_aid; |
279 | | #if defined(_WIN32) |
280 | | cmh = (WSACMSGHDR *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_nxtinfo))); |
281 | | #else |
282 | 0 | cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_nxtinfo))); |
283 | 0 | #endif |
284 | 0 | SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_nxtinfo)); |
285 | 0 | } |
286 | 0 | if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) { |
287 | 0 | cmh->cmsg_level = IPPROTO_SCTP; |
288 | 0 | outinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmh); |
289 | 0 | if (use_extended) { |
290 | 0 | cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_extrcvinfo)); |
291 | 0 | cmh->cmsg_type = SCTP_EXTRCV; |
292 | 0 | memcpy(outinfo, sinfo, sizeof(struct sctp_extrcvinfo)); |
293 | 0 | SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_extrcvinfo)); |
294 | 0 | } else { |
295 | 0 | cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo)); |
296 | 0 | cmh->cmsg_type = SCTP_SNDRCV; |
297 | 0 | *outinfo = *sinfo; |
298 | 0 | SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo)); |
299 | 0 | } |
300 | 0 | } |
301 | 0 | return (ret); |
302 | 0 | } |
303 | | |
304 | | static void |
305 | | sctp_mark_non_revokable(struct sctp_association *asoc, uint32_t tsn) |
306 | 0 | { |
307 | 0 | uint32_t gap, i; |
308 | 0 | int in_r, in_nr; |
309 | |
|
310 | 0 | if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) { |
311 | 0 | return; |
312 | 0 | } |
313 | 0 | if (SCTP_TSN_GE(asoc->cumulative_tsn, tsn)) { |
314 | | /* |
315 | | * This tsn is behind the cum ack and thus we don't |
316 | | * need to worry about it being moved from one to the other. |
317 | | */ |
318 | 0 | return; |
319 | 0 | } |
320 | 0 | SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn); |
321 | 0 | in_r = SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap); |
322 | 0 | in_nr = SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap); |
323 | 0 | KASSERT(in_r || in_nr, ("%s: Things are really messed up now", __func__)); |
324 | 0 | if (!in_nr) { |
325 | 0 | SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap); |
326 | 0 | if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) { |
327 | 0 | asoc->highest_tsn_inside_nr_map = tsn; |
328 | 0 | } |
329 | 0 | } |
330 | 0 | if (in_r) { |
331 | 0 | SCTP_UNSET_TSN_PRESENT(asoc->mapping_array, gap); |
332 | 0 | if (tsn == asoc->highest_tsn_inside_map) { |
333 | | /* We must back down to see what the new highest is. */ |
334 | 0 | for (i = tsn - 1; SCTP_TSN_GE(i, asoc->mapping_array_base_tsn); i--) { |
335 | 0 | SCTP_CALC_TSN_TO_GAP(gap, i, asoc->mapping_array_base_tsn); |
336 | 0 | if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) { |
337 | 0 | asoc->highest_tsn_inside_map = i; |
338 | 0 | break; |
339 | 0 | } |
340 | 0 | } |
341 | 0 | if (!SCTP_TSN_GE(i, asoc->mapping_array_base_tsn)) { |
342 | 0 | asoc->highest_tsn_inside_map = asoc->mapping_array_base_tsn - 1; |
343 | 0 | } |
344 | 0 | } |
345 | 0 | } |
346 | 0 | } |
347 | | |
348 | | static int |
349 | | sctp_place_control_in_stream(struct sctp_stream_in *strm, |
350 | | struct sctp_association *asoc, |
351 | | struct sctp_queued_to_read *control) |
352 | 0 | { |
353 | 0 | struct sctp_queued_to_read *at; |
354 | 0 | struct sctp_readhead *q; |
355 | 0 | uint8_t flags, unordered; |
356 | |
|
357 | 0 | flags = (control->sinfo_flags >> 8); |
358 | 0 | unordered = flags & SCTP_DATA_UNORDERED; |
359 | 0 | if (unordered) { |
360 | 0 | q = &strm->uno_inqueue; |
361 | 0 | if (asoc->idata_supported == 0) { |
362 | 0 | if (!TAILQ_EMPTY(q)) { |
363 | | /* Only one stream can be here in old style -- abort */ |
364 | 0 | return (-1); |
365 | 0 | } |
366 | 0 | TAILQ_INSERT_TAIL(q, control, next_instrm); |
367 | 0 | control->on_strm_q = SCTP_ON_UNORDERED; |
368 | 0 | return (0); |
369 | 0 | } |
370 | 0 | } else { |
371 | 0 | q = &strm->inqueue; |
372 | 0 | } |
373 | 0 | if ((flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) { |
374 | 0 | control->end_added = 1; |
375 | 0 | control->first_frag_seen = 1; |
376 | 0 | control->last_frag_seen = 1; |
377 | 0 | } |
378 | 0 | if (TAILQ_EMPTY(q)) { |
379 | | /* Empty queue */ |
380 | 0 | TAILQ_INSERT_HEAD(q, control, next_instrm); |
381 | 0 | if (unordered) { |
382 | 0 | control->on_strm_q = SCTP_ON_UNORDERED; |
383 | 0 | } else { |
384 | 0 | control->on_strm_q = SCTP_ON_ORDERED; |
385 | 0 | } |
386 | 0 | return (0); |
387 | 0 | } else { |
388 | 0 | TAILQ_FOREACH(at, q, next_instrm) { |
389 | 0 | if (SCTP_MID_GT(asoc->idata_supported, at->mid, control->mid)) { |
390 | | /* |
391 | | * one in queue is bigger than the |
392 | | * new one, insert before this one |
393 | | */ |
394 | 0 | TAILQ_INSERT_BEFORE(at, control, next_instrm); |
395 | 0 | if (unordered) { |
396 | 0 | control->on_strm_q = SCTP_ON_UNORDERED; |
397 | 0 | } else { |
398 | 0 | control->on_strm_q = SCTP_ON_ORDERED; |
399 | 0 | } |
400 | 0 | break; |
401 | 0 | } else if (SCTP_MID_EQ(asoc->idata_supported, at->mid, control->mid)) { |
402 | | /* |
403 | | * Gak, He sent me a duplicate msg |
404 | | * id number?? return -1 to abort. |
405 | | */ |
406 | 0 | return (-1); |
407 | 0 | } else { |
408 | 0 | if (TAILQ_NEXT(at, next_instrm) == NULL) { |
409 | | /* |
410 | | * We are at the end, insert |
411 | | * it after this one |
412 | | */ |
413 | 0 | if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) { |
414 | 0 | sctp_log_strm_del(control, at, |
415 | 0 | SCTP_STR_LOG_FROM_INSERT_TL); |
416 | 0 | } |
417 | 0 | TAILQ_INSERT_AFTER(q, at, control, next_instrm); |
418 | 0 | if (unordered) { |
419 | 0 | control->on_strm_q = SCTP_ON_UNORDERED; |
420 | 0 | } else { |
421 | 0 | control->on_strm_q = SCTP_ON_ORDERED; |
422 | 0 | } |
423 | 0 | break; |
424 | 0 | } |
425 | 0 | } |
426 | 0 | } |
427 | 0 | } |
428 | 0 | return (0); |
429 | 0 | } |
430 | | |
431 | | static void |
432 | | sctp_abort_in_reasm(struct sctp_tcb *stcb, |
433 | | struct sctp_queued_to_read *control, |
434 | | struct sctp_tmit_chunk *chk, |
435 | | int *abort_flag, int opspot) |
436 | 0 | { |
437 | 0 | char msg[SCTP_DIAG_INFO_LEN]; |
438 | 0 | struct mbuf *oper; |
439 | |
|
440 | 0 | if (stcb->asoc.idata_supported) { |
441 | 0 | SCTP_SNPRINTF(msg, sizeof(msg), |
442 | 0 | "Reass %x,CF:%x,TSN=%8.8x,SID=%4.4x,FSN=%8.8x,MID:%8.8x", |
443 | 0 | opspot, |
444 | 0 | control->fsn_included, |
445 | 0 | chk->rec.data.tsn, |
446 | 0 | chk->rec.data.sid, |
447 | 0 | chk->rec.data.fsn, chk->rec.data.mid); |
448 | 0 | } else { |
449 | 0 | SCTP_SNPRINTF(msg, sizeof(msg), |
450 | 0 | "Reass %x,CI:%x,TSN=%8.8x,SID=%4.4x,FSN=%4.4x,SSN:%4.4x", |
451 | 0 | opspot, |
452 | 0 | control->fsn_included, |
453 | 0 | chk->rec.data.tsn, |
454 | 0 | chk->rec.data.sid, |
455 | 0 | chk->rec.data.fsn, |
456 | 0 | (uint16_t)chk->rec.data.mid); |
457 | 0 | } |
458 | 0 | oper = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); |
459 | 0 | sctp_m_freem(chk->data); |
460 | 0 | chk->data = NULL; |
461 | 0 | sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED); |
462 | 0 | stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_1; |
463 | 0 | sctp_abort_an_association(stcb->sctp_ep, stcb, oper, false, SCTP_SO_NOT_LOCKED); |
464 | 0 | *abort_flag = 1; |
465 | 0 | } |
466 | | |
467 | | static void |
468 | | sctp_clean_up_control(struct sctp_tcb *stcb, struct sctp_queued_to_read *control) |
469 | 0 | { |
470 | | /* |
471 | | * The control could not be placed and must be cleaned. |
472 | | */ |
473 | 0 | struct sctp_tmit_chunk *chk, *nchk; |
474 | 0 | TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, nchk) { |
475 | 0 | TAILQ_REMOVE(&control->reasm, chk, sctp_next); |
476 | 0 | if (chk->data) |
477 | 0 | sctp_m_freem(chk->data); |
478 | 0 | chk->data = NULL; |
479 | 0 | sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED); |
480 | 0 | } |
481 | 0 | sctp_free_remote_addr(control->whoFrom); |
482 | 0 | if (control->data) { |
483 | 0 | sctp_m_freem(control->data); |
484 | 0 | control->data = NULL; |
485 | 0 | } |
486 | 0 | sctp_free_a_readq(stcb, control); |
487 | 0 | } |
488 | | |
489 | | /* |
490 | | * Queue the chunk either right into the socket buffer if it is the next one |
491 | | * to go OR put it in the correct place in the delivery queue. If we do |
492 | | * append to the so_buf, keep doing so until we are out of order as |
493 | | * long as the control's entered are non-fragmented. |
494 | | */ |
495 | | static void |
496 | | sctp_queue_data_to_stream(struct sctp_tcb *stcb, |
497 | | struct sctp_association *asoc, |
498 | | struct sctp_queued_to_read *control, int *abort_flag, int *need_reasm) |
499 | 0 | { |
500 | | /* |
501 | | * FIX-ME maybe? What happens when the ssn wraps? If we are getting |
502 | | * all the data in one stream this could happen quite rapidly. One |
503 | | * could use the TSN to keep track of things, but this scheme breaks |
504 | | * down in the other type of stream usage that could occur. Send a |
505 | | * single msg to stream 0, send 4Billion messages to stream 1, now |
506 | | * send a message to stream 0. You have a situation where the TSN |
507 | | * has wrapped but not in the stream. Is this worth worrying about |
508 | | * or should we just change our queue sort at the bottom to be by |
509 | | * TSN. |
510 | | * |
511 | | * Could it also be legal for a peer to send ssn 1 with TSN 2 and ssn 2 |
512 | | * with TSN 1? If the peer is doing some sort of funky TSN/SSN |
513 | | * assignment this could happen... and I don't see how this would be |
514 | | * a violation. So for now I am undecided an will leave the sort by |
515 | | * SSN alone. Maybe a hybrid approach is the answer |
516 | | * |
517 | | */ |
518 | 0 | struct sctp_queued_to_read *at; |
519 | 0 | int queue_needed; |
520 | 0 | uint32_t nxt_todel; |
521 | 0 | struct mbuf *op_err; |
522 | 0 | struct sctp_stream_in *strm; |
523 | 0 | char msg[SCTP_DIAG_INFO_LEN]; |
524 | |
|
525 | 0 | strm = &asoc->strmin[control->sinfo_stream]; |
526 | 0 | if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) { |
527 | 0 | sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INTO_STRD); |
528 | 0 | } |
529 | 0 | if (SCTP_MID_GT((asoc->idata_supported), strm->last_mid_delivered, control->mid)) { |
530 | | /* The incoming sseq is behind where we last delivered? */ |
531 | 0 | SCTPDBG(SCTP_DEBUG_INDATA1, "Duplicate S-SEQ: %u delivered: %u from peer, Abort association\n", |
532 | 0 | strm->last_mid_delivered, control->mid); |
533 | | /* |
534 | | * throw it in the stream so it gets cleaned up in |
535 | | * association destruction |
536 | | */ |
537 | 0 | TAILQ_INSERT_HEAD(&strm->inqueue, control, next_instrm); |
538 | 0 | if (asoc->idata_supported) { |
539 | 0 | SCTP_SNPRINTF(msg, sizeof(msg), "Delivered MID=%8.8x, got TSN=%8.8x, SID=%4.4x, MID=%8.8x", |
540 | 0 | strm->last_mid_delivered, control->sinfo_tsn, |
541 | 0 | control->sinfo_stream, control->mid); |
542 | 0 | } else { |
543 | 0 | SCTP_SNPRINTF(msg, sizeof(msg), "Delivered SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x", |
544 | 0 | (uint16_t)strm->last_mid_delivered, |
545 | 0 | control->sinfo_tsn, |
546 | 0 | control->sinfo_stream, |
547 | 0 | (uint16_t)control->mid); |
548 | 0 | } |
549 | 0 | op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); |
550 | 0 | stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_2; |
551 | 0 | sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, false, SCTP_SO_NOT_LOCKED); |
552 | 0 | *abort_flag = 1; |
553 | 0 | return; |
554 | 0 | } |
555 | 0 | queue_needed = 1; |
556 | 0 | asoc->size_on_all_streams += control->length; |
557 | 0 | sctp_ucount_incr(asoc->cnt_on_all_streams); |
558 | 0 | nxt_todel = strm->last_mid_delivered + 1; |
559 | 0 | if (SCTP_MID_EQ(asoc->idata_supported, nxt_todel, control->mid)) { |
560 | | #if defined(__APPLE__) && !defined(__Userspace__) |
561 | | struct socket *so; |
562 | | |
563 | | so = SCTP_INP_SO(stcb->sctp_ep); |
564 | | atomic_add_int(&stcb->asoc.refcnt, 1); |
565 | | SCTP_TCB_UNLOCK(stcb); |
566 | | SCTP_SOCKET_LOCK(so, 1); |
567 | | SCTP_TCB_LOCK(stcb); |
568 | | atomic_subtract_int(&stcb->asoc.refcnt, 1); |
569 | | if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { |
570 | | SCTP_SOCKET_UNLOCK(so, 1); |
571 | | return; |
572 | | } |
573 | | #endif |
574 | | /* can be delivered right away? */ |
575 | 0 | if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) { |
576 | 0 | sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_IMMED_DEL); |
577 | 0 | } |
578 | | /* EY it wont be queued if it could be delivered directly */ |
579 | 0 | queue_needed = 0; |
580 | 0 | if (asoc->size_on_all_streams >= control->length) { |
581 | 0 | asoc->size_on_all_streams -= control->length; |
582 | 0 | } else { |
583 | 0 | #ifdef INVARIANTS |
584 | 0 | panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length); |
585 | | #else |
586 | | asoc->size_on_all_streams = 0; |
587 | | #endif |
588 | 0 | } |
589 | 0 | sctp_ucount_decr(asoc->cnt_on_all_streams); |
590 | 0 | strm->last_mid_delivered++; |
591 | 0 | sctp_mark_non_revokable(asoc, control->sinfo_tsn); |
592 | 0 | sctp_add_to_readq(stcb->sctp_ep, stcb, |
593 | 0 | control, |
594 | 0 | &stcb->sctp_socket->so_rcv, 1, |
595 | 0 | SCTP_READ_LOCK_NOT_HELD, SCTP_SO_LOCKED); |
596 | 0 | TAILQ_FOREACH_SAFE(control, &strm->inqueue, next_instrm, at) { |
597 | | /* all delivered */ |
598 | 0 | nxt_todel = strm->last_mid_delivered + 1; |
599 | 0 | if (SCTP_MID_EQ(asoc->idata_supported, nxt_todel, control->mid) && |
600 | 0 | (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG)) { |
601 | 0 | if (control->on_strm_q == SCTP_ON_ORDERED) { |
602 | 0 | TAILQ_REMOVE(&strm->inqueue, control, next_instrm); |
603 | 0 | if (asoc->size_on_all_streams >= control->length) { |
604 | 0 | asoc->size_on_all_streams -= control->length; |
605 | 0 | } else { |
606 | 0 | #ifdef INVARIANTS |
607 | 0 | panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length); |
608 | | #else |
609 | | asoc->size_on_all_streams = 0; |
610 | | #endif |
611 | 0 | } |
612 | 0 | sctp_ucount_decr(asoc->cnt_on_all_streams); |
613 | 0 | #ifdef INVARIANTS |
614 | 0 | } else { |
615 | 0 | panic("Huh control: %p is on_strm_q: %d", |
616 | 0 | control, control->on_strm_q); |
617 | 0 | #endif |
618 | 0 | } |
619 | 0 | control->on_strm_q = 0; |
620 | 0 | strm->last_mid_delivered++; |
621 | | /* |
622 | | * We ignore the return of deliver_data here |
623 | | * since we always can hold the chunk on the |
624 | | * d-queue. And we have a finite number that |
625 | | * can be delivered from the strq. |
626 | | */ |
627 | 0 | if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) { |
628 | 0 | sctp_log_strm_del(control, NULL, |
629 | 0 | SCTP_STR_LOG_FROM_IMMED_DEL); |
630 | 0 | } |
631 | 0 | sctp_mark_non_revokable(asoc, control->sinfo_tsn); |
632 | 0 | sctp_add_to_readq(stcb->sctp_ep, stcb, |
633 | 0 | control, |
634 | 0 | &stcb->sctp_socket->so_rcv, 1, |
635 | 0 | SCTP_READ_LOCK_NOT_HELD, |
636 | 0 | SCTP_SO_LOCKED); |
637 | 0 | continue; |
638 | 0 | } else if (SCTP_MID_EQ(asoc->idata_supported, nxt_todel, control->mid)) { |
639 | 0 | *need_reasm = 1; |
640 | 0 | } |
641 | 0 | break; |
642 | 0 | } |
643 | | #if defined(__APPLE__) && !defined(__Userspace__) |
644 | | SCTP_SOCKET_UNLOCK(so, 1); |
645 | | #endif |
646 | 0 | } |
647 | 0 | if (queue_needed) { |
648 | | /* |
649 | | * Ok, we did not deliver this guy, find the correct place |
650 | | * to put it on the queue. |
651 | | */ |
652 | 0 | if (sctp_place_control_in_stream(strm, asoc, control)) { |
653 | 0 | SCTP_SNPRINTF(msg, sizeof(msg), |
654 | 0 | "Queue to str MID: %u duplicate", control->mid); |
655 | 0 | sctp_clean_up_control(stcb, control); |
656 | 0 | op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); |
657 | 0 | stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_3; |
658 | 0 | sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, false, SCTP_SO_NOT_LOCKED); |
659 | 0 | *abort_flag = 1; |
660 | 0 | } |
661 | 0 | } |
662 | 0 | } |
663 | | |
664 | | static void |
665 | | sctp_setup_tail_pointer(struct sctp_queued_to_read *control) |
666 | 0 | { |
667 | 0 | struct mbuf *m, *prev = NULL; |
668 | 0 | struct sctp_tcb *stcb; |
669 | |
|
670 | 0 | stcb = control->stcb; |
671 | 0 | control->held_length = 0; |
672 | 0 | control->length = 0; |
673 | 0 | m = control->data; |
674 | 0 | while (m) { |
675 | 0 | if (SCTP_BUF_LEN(m) == 0) { |
676 | | /* Skip mbufs with NO length */ |
677 | 0 | if (prev == NULL) { |
678 | | /* First one */ |
679 | 0 | control->data = sctp_m_free(m); |
680 | 0 | m = control->data; |
681 | 0 | } else { |
682 | 0 | SCTP_BUF_NEXT(prev) = sctp_m_free(m); |
683 | 0 | m = SCTP_BUF_NEXT(prev); |
684 | 0 | } |
685 | 0 | if (m == NULL) { |
686 | 0 | control->tail_mbuf = prev; |
687 | 0 | } |
688 | 0 | continue; |
689 | 0 | } |
690 | 0 | prev = m; |
691 | 0 | atomic_add_int(&control->length, SCTP_BUF_LEN(m)); |
692 | 0 | if (control->on_read_q) { |
693 | | /* |
694 | | * On read queue so we must increment the |
695 | | * SB stuff, we assume caller has done any locks of SB. |
696 | | */ |
697 | 0 | sctp_sballoc(stcb, &stcb->sctp_socket->so_rcv, m); |
698 | 0 | } |
699 | 0 | m = SCTP_BUF_NEXT(m); |
700 | 0 | } |
701 | 0 | if (prev) { |
702 | 0 | control->tail_mbuf = prev; |
703 | 0 | } |
704 | 0 | } |
705 | | |
706 | | static void |
707 | | sctp_add_to_tail_pointer(struct sctp_queued_to_read *control, struct mbuf *m, uint32_t *added) |
708 | 0 | { |
709 | 0 | struct mbuf *prev=NULL; |
710 | 0 | struct sctp_tcb *stcb; |
711 | |
|
712 | 0 | stcb = control->stcb; |
713 | 0 | if (stcb == NULL) { |
714 | 0 | #ifdef INVARIANTS |
715 | 0 | panic("Control broken"); |
716 | | #else |
717 | | return; |
718 | | #endif |
719 | 0 | } |
720 | 0 | if (control->tail_mbuf == NULL) { |
721 | | /* TSNH */ |
722 | 0 | sctp_m_freem(control->data); |
723 | 0 | control->data = m; |
724 | 0 | sctp_setup_tail_pointer(control); |
725 | 0 | return; |
726 | 0 | } |
727 | 0 | control->tail_mbuf->m_next = m; |
728 | 0 | while (m) { |
729 | 0 | if (SCTP_BUF_LEN(m) == 0) { |
730 | | /* Skip mbufs with NO length */ |
731 | 0 | if (prev == NULL) { |
732 | | /* First one */ |
733 | 0 | control->tail_mbuf->m_next = sctp_m_free(m); |
734 | 0 | m = control->tail_mbuf->m_next; |
735 | 0 | } else { |
736 | 0 | SCTP_BUF_NEXT(prev) = sctp_m_free(m); |
737 | 0 | m = SCTP_BUF_NEXT(prev); |
738 | 0 | } |
739 | 0 | if (m == NULL) { |
740 | 0 | control->tail_mbuf = prev; |
741 | 0 | } |
742 | 0 | continue; |
743 | 0 | } |
744 | 0 | prev = m; |
745 | 0 | if (control->on_read_q) { |
746 | | /* |
747 | | * On read queue so we must increment the |
748 | | * SB stuff, we assume caller has done any locks of SB. |
749 | | */ |
750 | 0 | sctp_sballoc(stcb, &stcb->sctp_socket->so_rcv, m); |
751 | 0 | } |
752 | 0 | *added += SCTP_BUF_LEN(m); |
753 | 0 | atomic_add_int(&control->length, SCTP_BUF_LEN(m)); |
754 | 0 | m = SCTP_BUF_NEXT(m); |
755 | 0 | } |
756 | 0 | if (prev) { |
757 | 0 | control->tail_mbuf = prev; |
758 | 0 | } |
759 | 0 | } |
760 | | |
761 | | static void |
762 | | sctp_build_readq_entry_from_ctl(struct sctp_queued_to_read *nc, struct sctp_queued_to_read *control) |
763 | 0 | { |
764 | 0 | memset(nc, 0, sizeof(struct sctp_queued_to_read)); |
765 | 0 | nc->sinfo_stream = control->sinfo_stream; |
766 | 0 | nc->mid = control->mid; |
767 | 0 | TAILQ_INIT(&nc->reasm); |
768 | 0 | nc->top_fsn = control->top_fsn; |
769 | 0 | nc->mid = control->mid; |
770 | 0 | nc->sinfo_flags = control->sinfo_flags; |
771 | 0 | nc->sinfo_ppid = control->sinfo_ppid; |
772 | 0 | nc->sinfo_context = control->sinfo_context; |
773 | 0 | nc->fsn_included = 0xffffffff; |
774 | 0 | nc->sinfo_tsn = control->sinfo_tsn; |
775 | 0 | nc->sinfo_cumtsn = control->sinfo_cumtsn; |
776 | 0 | nc->sinfo_assoc_id = control->sinfo_assoc_id; |
777 | 0 | nc->whoFrom = control->whoFrom; |
778 | 0 | atomic_add_int(&nc->whoFrom->ref_count, 1); |
779 | 0 | nc->stcb = control->stcb; |
780 | 0 | nc->port_from = control->port_from; |
781 | 0 | nc->do_not_ref_stcb = control->do_not_ref_stcb; |
782 | 0 | } |
783 | | |
784 | | static int |
785 | | sctp_handle_old_unordered_data(struct sctp_tcb *stcb, |
786 | | struct sctp_association *asoc, |
787 | | struct sctp_stream_in *strm, |
788 | | struct sctp_queued_to_read *control, |
789 | | uint32_t pd_point, |
790 | | int inp_read_lock_held) |
791 | 0 | { |
792 | | /* Special handling for the old un-ordered data chunk. |
793 | | * All the chunks/TSN's go to mid 0. So |
794 | | * we have to do the old style watching to see |
795 | | * if we have it all. If you return one, no other |
796 | | * control entries on the un-ordered queue will |
797 | | * be looked at. In theory there should be no others |
798 | | * entries in reality, unless the guy is sending both |
799 | | * unordered NDATA and unordered DATA... |
800 | | */ |
801 | 0 | struct sctp_tmit_chunk *chk, *lchk, *tchk; |
802 | 0 | uint32_t fsn; |
803 | 0 | struct sctp_queued_to_read *nc; |
804 | 0 | int cnt_added; |
805 | |
|
806 | 0 | if (control->first_frag_seen == 0) { |
807 | | /* Nothing we can do, we have not seen the first piece yet */ |
808 | 0 | return (1); |
809 | 0 | } |
810 | | /* Collapse any we can */ |
811 | 0 | cnt_added = 0; |
812 | 0 | restart: |
813 | 0 | fsn = control->fsn_included + 1; |
814 | | /* Now what can we add? */ |
815 | 0 | TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, lchk) { |
816 | 0 | if (chk->rec.data.fsn == fsn) { |
817 | | /* Ok lets add it */ |
818 | 0 | sctp_alloc_a_readq(stcb, nc); |
819 | 0 | if (nc == NULL) { |
820 | 0 | break; |
821 | 0 | } |
822 | 0 | memset(nc, 0, sizeof(struct sctp_queued_to_read)); |
823 | 0 | TAILQ_REMOVE(&control->reasm, chk, sctp_next); |
824 | 0 | sctp_add_chk_to_control(control, strm, stcb, asoc, chk, inp_read_lock_held); |
825 | 0 | fsn++; |
826 | 0 | cnt_added++; |
827 | 0 | chk = NULL; |
828 | 0 | if (control->end_added) { |
829 | | /* We are done */ |
830 | 0 | if (!TAILQ_EMPTY(&control->reasm)) { |
831 | | /* |
832 | | * Ok we have to move anything left on |
833 | | * the control queue to a new control. |
834 | | */ |
835 | 0 | sctp_build_readq_entry_from_ctl(nc, control); |
836 | 0 | tchk = TAILQ_FIRST(&control->reasm); |
837 | 0 | if (tchk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) { |
838 | 0 | TAILQ_REMOVE(&control->reasm, tchk, sctp_next); |
839 | 0 | if (asoc->size_on_reasm_queue >= tchk->send_size) { |
840 | 0 | asoc->size_on_reasm_queue -= tchk->send_size; |
841 | 0 | } else { |
842 | 0 | #ifdef INVARIANTS |
843 | 0 | panic("size_on_reasm_queue = %u smaller than chunk length %u", asoc->size_on_reasm_queue, tchk->send_size); |
844 | | #else |
845 | | asoc->size_on_reasm_queue = 0; |
846 | | #endif |
847 | 0 | } |
848 | 0 | sctp_ucount_decr(asoc->cnt_on_reasm_queue); |
849 | 0 | nc->first_frag_seen = 1; |
850 | 0 | nc->fsn_included = tchk->rec.data.fsn; |
851 | 0 | nc->data = tchk->data; |
852 | 0 | nc->sinfo_ppid = tchk->rec.data.ppid; |
853 | 0 | nc->sinfo_tsn = tchk->rec.data.tsn; |
854 | 0 | sctp_mark_non_revokable(asoc, tchk->rec.data.tsn); |
855 | 0 | tchk->data = NULL; |
856 | 0 | sctp_free_a_chunk(stcb, tchk, SCTP_SO_NOT_LOCKED); |
857 | 0 | sctp_setup_tail_pointer(nc); |
858 | 0 | tchk = TAILQ_FIRST(&control->reasm); |
859 | 0 | } |
860 | | /* Spin the rest onto the queue */ |
861 | 0 | while (tchk) { |
862 | 0 | TAILQ_REMOVE(&control->reasm, tchk, sctp_next); |
863 | 0 | TAILQ_INSERT_TAIL(&nc->reasm, tchk, sctp_next); |
864 | 0 | tchk = TAILQ_FIRST(&control->reasm); |
865 | 0 | } |
866 | | /* Now lets add it to the queue after removing control */ |
867 | 0 | TAILQ_INSERT_TAIL(&strm->uno_inqueue, nc, next_instrm); |
868 | 0 | nc->on_strm_q = SCTP_ON_UNORDERED; |
869 | 0 | if (control->on_strm_q) { |
870 | 0 | TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm); |
871 | 0 | control->on_strm_q = 0; |
872 | 0 | } |
873 | 0 | } |
874 | 0 | if (control->pdapi_started) { |
875 | 0 | strm->pd_api_started = 0; |
876 | 0 | control->pdapi_started = 0; |
877 | 0 | } |
878 | 0 | if (control->on_strm_q) { |
879 | 0 | TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm); |
880 | 0 | control->on_strm_q = 0; |
881 | 0 | SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs); |
882 | 0 | } |
883 | 0 | if (control->on_read_q == 0) { |
884 | 0 | sctp_add_to_readq(stcb->sctp_ep, stcb, control, |
885 | 0 | &stcb->sctp_socket->so_rcv, control->end_added, |
886 | 0 | inp_read_lock_held, SCTP_SO_NOT_LOCKED); |
887 | 0 | #if defined(__Userspace__) |
888 | 0 | } else { |
889 | 0 | sctp_invoke_recv_callback(stcb->sctp_ep, stcb, control, inp_read_lock_held); |
890 | 0 | #endif |
891 | 0 | } |
892 | 0 | sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED); |
893 | 0 | if ((nc->first_frag_seen) && !TAILQ_EMPTY(&nc->reasm)) { |
894 | | /* Switch to the new guy and continue */ |
895 | 0 | control = nc; |
896 | 0 | goto restart; |
897 | 0 | } else { |
898 | 0 | if (nc->on_strm_q == 0) { |
899 | 0 | sctp_free_a_readq(stcb, nc); |
900 | 0 | } |
901 | 0 | } |
902 | 0 | return (1); |
903 | 0 | } else { |
904 | 0 | sctp_free_a_readq(stcb, nc); |
905 | 0 | } |
906 | 0 | } else { |
907 | | /* Can't add more */ |
908 | 0 | break; |
909 | 0 | } |
910 | 0 | } |
911 | 0 | if (cnt_added && strm->pd_api_started) { |
912 | 0 | #if defined(__Userspace__) |
913 | 0 | sctp_invoke_recv_callback(stcb->sctp_ep, stcb, control, inp_read_lock_held); |
914 | 0 | #endif |
915 | 0 | sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED); |
916 | 0 | } |
917 | 0 | if ((control->length > pd_point) && (strm->pd_api_started == 0)) { |
918 | 0 | strm->pd_api_started = 1; |
919 | 0 | control->pdapi_started = 1; |
920 | 0 | sctp_add_to_readq(stcb->sctp_ep, stcb, control, |
921 | 0 | &stcb->sctp_socket->so_rcv, control->end_added, |
922 | 0 | inp_read_lock_held, SCTP_SO_NOT_LOCKED); |
923 | 0 | sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED); |
924 | 0 | return (0); |
925 | 0 | } else { |
926 | 0 | return (1); |
927 | 0 | } |
928 | 0 | } |
929 | | |
930 | | static void |
931 | | sctp_inject_old_unordered_data(struct sctp_tcb *stcb, |
932 | | struct sctp_association *asoc, |
933 | | struct sctp_queued_to_read *control, |
934 | | struct sctp_tmit_chunk *chk, |
935 | | int *abort_flag) |
936 | 0 | { |
937 | 0 | struct sctp_tmit_chunk *at; |
938 | 0 | int inserted; |
939 | | /* |
940 | | * Here we need to place the chunk into the control structure |
941 | | * sorted in the correct order. |
942 | | */ |
943 | 0 | if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) { |
944 | | /* Its the very first one. */ |
945 | 0 | SCTPDBG(SCTP_DEBUG_XXX, |
946 | 0 | "chunk is a first fsn: %u becomes fsn_included\n", |
947 | 0 | chk->rec.data.fsn); |
948 | 0 | at = TAILQ_FIRST(&control->reasm); |
949 | 0 | if (at && SCTP_TSN_GT(chk->rec.data.fsn, at->rec.data.fsn)) { |
950 | | /* |
951 | | * The first chunk in the reassembly is |
952 | | * a smaller TSN than this one, even though |
953 | | * this has a first, it must be from a subsequent |
954 | | * msg. |
955 | | */ |
956 | 0 | goto place_chunk; |
957 | 0 | } |
958 | 0 | if (control->first_frag_seen) { |
959 | | /* |
960 | | * In old un-ordered we can reassembly on |
961 | | * one control multiple messages. As long |
962 | | * as the next FIRST is greater then the old |
963 | | * first (TSN i.e. FSN wise) |
964 | | */ |
965 | 0 | struct mbuf *tdata; |
966 | 0 | uint32_t tmp; |
967 | |
|
968 | 0 | if (SCTP_TSN_GT(chk->rec.data.fsn, control->fsn_included)) { |
969 | | /* Easy way the start of a new guy beyond the lowest */ |
970 | 0 | goto place_chunk; |
971 | 0 | } |
972 | 0 | if ((chk->rec.data.fsn == control->fsn_included) || |
973 | 0 | (control->pdapi_started)) { |
974 | | /* |
975 | | * Ok this should not happen, if it does |
976 | | * we started the pd-api on the higher TSN (since |
977 | | * the equals part is a TSN failure it must be that). |
978 | | * |
979 | | * We are completely hosed in that case since I have |
980 | | * no way to recover. This really will only happen |
981 | | * if we can get more TSN's higher before the pd-api-point. |
982 | | */ |
983 | 0 | sctp_abort_in_reasm(stcb, control, chk, |
984 | 0 | abort_flag, |
985 | 0 | SCTP_FROM_SCTP_INDATA + SCTP_LOC_4); |
986 | |
|
987 | 0 | return; |
988 | 0 | } |
989 | | /* |
990 | | * Ok we have two firsts and the one we just got |
991 | | * is smaller than the one we previously placed.. yuck! |
992 | | * We must swap them out. |
993 | | */ |
994 | | /* swap the mbufs */ |
995 | 0 | tdata = control->data; |
996 | 0 | control->data = chk->data; |
997 | 0 | chk->data = tdata; |
998 | | /* Save the lengths */ |
999 | 0 | chk->send_size = control->length; |
1000 | | /* Recompute length of control and tail pointer */ |
1001 | 0 | sctp_setup_tail_pointer(control); |
1002 | | /* Fix the FSN included */ |
1003 | 0 | tmp = control->fsn_included; |
1004 | 0 | control->fsn_included = chk->rec.data.fsn; |
1005 | 0 | chk->rec.data.fsn = tmp; |
1006 | | /* Fix the TSN included */ |
1007 | 0 | tmp = control->sinfo_tsn; |
1008 | 0 | control->sinfo_tsn = chk->rec.data.tsn; |
1009 | 0 | chk->rec.data.tsn = tmp; |
1010 | | /* Fix the PPID included */ |
1011 | 0 | tmp = control->sinfo_ppid; |
1012 | 0 | control->sinfo_ppid = chk->rec.data.ppid; |
1013 | 0 | chk->rec.data.ppid = tmp; |
1014 | | /* Fix tail pointer */ |
1015 | 0 | goto place_chunk; |
1016 | 0 | } |
1017 | 0 | control->first_frag_seen = 1; |
1018 | 0 | control->fsn_included = chk->rec.data.fsn; |
1019 | 0 | control->top_fsn = chk->rec.data.fsn; |
1020 | 0 | control->sinfo_tsn = chk->rec.data.tsn; |
1021 | 0 | control->sinfo_ppid = chk->rec.data.ppid; |
1022 | 0 | control->data = chk->data; |
1023 | 0 | sctp_mark_non_revokable(asoc, chk->rec.data.tsn); |
1024 | 0 | chk->data = NULL; |
1025 | 0 | sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED); |
1026 | 0 | sctp_setup_tail_pointer(control); |
1027 | 0 | return; |
1028 | 0 | } |
1029 | 0 | place_chunk: |
1030 | 0 | inserted = 0; |
1031 | 0 | TAILQ_FOREACH(at, &control->reasm, sctp_next) { |
1032 | 0 | if (SCTP_TSN_GT(at->rec.data.fsn, chk->rec.data.fsn)) { |
1033 | | /* |
1034 | | * This one in queue is bigger than the new one, insert |
1035 | | * the new one before at. |
1036 | | */ |
1037 | 0 | asoc->size_on_reasm_queue += chk->send_size; |
1038 | 0 | sctp_ucount_incr(asoc->cnt_on_reasm_queue); |
1039 | 0 | inserted = 1; |
1040 | 0 | TAILQ_INSERT_BEFORE(at, chk, sctp_next); |
1041 | 0 | break; |
1042 | 0 | } else if (at->rec.data.fsn == chk->rec.data.fsn) { |
1043 | | /* |
1044 | | * They sent a duplicate fsn number. This |
1045 | | * really should not happen since the FSN is |
1046 | | * a TSN and it should have been dropped earlier. |
1047 | | */ |
1048 | 0 | sctp_abort_in_reasm(stcb, control, chk, |
1049 | 0 | abort_flag, |
1050 | 0 | SCTP_FROM_SCTP_INDATA + SCTP_LOC_5); |
1051 | 0 | return; |
1052 | 0 | } |
1053 | 0 | } |
1054 | 0 | if (inserted == 0) { |
1055 | | /* Its at the end */ |
1056 | 0 | asoc->size_on_reasm_queue += chk->send_size; |
1057 | 0 | sctp_ucount_incr(asoc->cnt_on_reasm_queue); |
1058 | 0 | control->top_fsn = chk->rec.data.fsn; |
1059 | 0 | TAILQ_INSERT_TAIL(&control->reasm, chk, sctp_next); |
1060 | 0 | } |
1061 | 0 | } |
1062 | | |
1063 | | static int |
1064 | | sctp_deliver_reasm_check(struct sctp_tcb *stcb, struct sctp_association *asoc, |
1065 | | struct sctp_stream_in *strm, int inp_read_lock_held) |
1066 | 0 | { |
1067 | | /* |
1068 | | * Given a stream, strm, see if any of |
1069 | | * the SSN's on it that are fragmented |
1070 | | * are ready to deliver. If so go ahead |
1071 | | * and place them on the read queue. In |
1072 | | * so placing if we have hit the end, then |
1073 | | * we need to remove them from the stream's queue. |
1074 | | */ |
1075 | 0 | struct sctp_queued_to_read *control, *nctl = NULL; |
1076 | 0 | uint32_t next_to_del; |
1077 | 0 | uint32_t pd_point; |
1078 | 0 | int ret = 0; |
1079 | |
|
1080 | 0 | if (stcb->sctp_socket) { |
1081 | 0 | pd_point = min(SCTP_SB_LIMIT_RCV(stcb->sctp_socket) >> SCTP_PARTIAL_DELIVERY_SHIFT, |
1082 | 0 | stcb->sctp_ep->partial_delivery_point); |
1083 | 0 | } else { |
1084 | 0 | pd_point = stcb->sctp_ep->partial_delivery_point; |
1085 | 0 | } |
1086 | 0 | control = TAILQ_FIRST(&strm->uno_inqueue); |
1087 | |
|
1088 | 0 | if ((control != NULL) && |
1089 | 0 | (asoc->idata_supported == 0)) { |
1090 | | /* Special handling needed for "old" data format */ |
1091 | 0 | if (sctp_handle_old_unordered_data(stcb, asoc, strm, control, pd_point, inp_read_lock_held)) { |
1092 | 0 | goto done_un; |
1093 | 0 | } |
1094 | 0 | } |
1095 | 0 | if (strm->pd_api_started) { |
1096 | | /* Can't add more */ |
1097 | 0 | return (0); |
1098 | 0 | } |
1099 | 0 | while (control) { |
1100 | 0 | SCTPDBG(SCTP_DEBUG_XXX, "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u -uo\n", |
1101 | 0 | control, control->end_added, control->mid, control->top_fsn, control->fsn_included); |
1102 | 0 | nctl = TAILQ_NEXT(control, next_instrm); |
1103 | 0 | if (control->end_added) { |
1104 | | /* We just put the last bit on */ |
1105 | 0 | if (control->on_strm_q) { |
1106 | 0 | #ifdef INVARIANTS |
1107 | 0 | if (control->on_strm_q != SCTP_ON_UNORDERED) { |
1108 | 0 | panic("Huh control: %p on_q: %d -- not unordered?", |
1109 | 0 | control, control->on_strm_q); |
1110 | 0 | } |
1111 | 0 | #endif |
1112 | 0 | SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs); |
1113 | 0 | TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm); |
1114 | 0 | if (asoc->size_on_all_streams >= control->length) { |
1115 | 0 | asoc->size_on_all_streams -= control->length; |
1116 | 0 | } else { |
1117 | 0 | #ifdef INVARIANTS |
1118 | 0 | panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length); |
1119 | | #else |
1120 | | asoc->size_on_all_streams = 0; |
1121 | | #endif |
1122 | 0 | } |
1123 | 0 | sctp_ucount_decr(asoc->cnt_on_all_streams); |
1124 | 0 | control->on_strm_q = 0; |
1125 | 0 | } |
1126 | 0 | if (control->on_read_q == 0) { |
1127 | 0 | sctp_add_to_readq(stcb->sctp_ep, stcb, |
1128 | 0 | control, |
1129 | 0 | &stcb->sctp_socket->so_rcv, control->end_added, |
1130 | 0 | inp_read_lock_held, SCTP_SO_NOT_LOCKED); |
1131 | 0 | } |
1132 | 0 | } else { |
1133 | | /* Can we do a PD-API for this un-ordered guy? */ |
1134 | 0 | if ((control->length >= pd_point) && (strm->pd_api_started == 0)) { |
1135 | 0 | strm->pd_api_started = 1; |
1136 | 0 | control->pdapi_started = 1; |
1137 | 0 | sctp_add_to_readq(stcb->sctp_ep, stcb, |
1138 | 0 | control, |
1139 | 0 | &stcb->sctp_socket->so_rcv, control->end_added, |
1140 | 0 | inp_read_lock_held, SCTP_SO_NOT_LOCKED); |
1141 | |
|
1142 | 0 | break; |
1143 | 0 | } |
1144 | 0 | } |
1145 | 0 | control = nctl; |
1146 | 0 | } |
1147 | 0 | done_un: |
1148 | 0 | control = TAILQ_FIRST(&strm->inqueue); |
1149 | 0 | if (strm->pd_api_started) { |
1150 | | /* Can't add more */ |
1151 | 0 | return (0); |
1152 | 0 | } |
1153 | 0 | if (control == NULL) { |
1154 | 0 | return (ret); |
1155 | 0 | } |
1156 | 0 | if (SCTP_MID_EQ(asoc->idata_supported, strm->last_mid_delivered, control->mid)) { |
1157 | | /* Ok the guy at the top was being partially delivered |
1158 | | * completed, so we remove it. Note |
1159 | | * the pd_api flag was taken off when the |
1160 | | * chunk was merged on in sctp_queue_data_for_reasm below. |
1161 | | */ |
1162 | 0 | nctl = TAILQ_NEXT(control, next_instrm); |
1163 | 0 | SCTPDBG(SCTP_DEBUG_XXX, |
1164 | 0 | "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u (lastdel: %u)- o\n", |
1165 | 0 | control, control->end_added, control->mid, |
1166 | 0 | control->top_fsn, control->fsn_included, |
1167 | 0 | strm->last_mid_delivered); |
1168 | 0 | if (control->end_added) { |
1169 | 0 | if (control->on_strm_q) { |
1170 | 0 | #ifdef INVARIANTS |
1171 | 0 | if (control->on_strm_q != SCTP_ON_ORDERED) { |
1172 | 0 | panic("Huh control: %p on_q: %d -- not ordered?", |
1173 | 0 | control, control->on_strm_q); |
1174 | 0 | } |
1175 | 0 | #endif |
1176 | 0 | SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs); |
1177 | 0 | TAILQ_REMOVE(&strm->inqueue, control, next_instrm); |
1178 | 0 | if (asoc->size_on_all_streams >= control->length) { |
1179 | 0 | asoc->size_on_all_streams -= control->length; |
1180 | 0 | } else { |
1181 | 0 | #ifdef INVARIANTS |
1182 | 0 | panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length); |
1183 | | #else |
1184 | | asoc->size_on_all_streams = 0; |
1185 | | #endif |
1186 | 0 | } |
1187 | 0 | sctp_ucount_decr(asoc->cnt_on_all_streams); |
1188 | 0 | control->on_strm_q = 0; |
1189 | 0 | } |
1190 | 0 | if (strm->pd_api_started && control->pdapi_started) { |
1191 | 0 | control->pdapi_started = 0; |
1192 | 0 | strm->pd_api_started = 0; |
1193 | 0 | } |
1194 | 0 | if (control->on_read_q == 0) { |
1195 | 0 | sctp_add_to_readq(stcb->sctp_ep, stcb, |
1196 | 0 | control, |
1197 | 0 | &stcb->sctp_socket->so_rcv, control->end_added, |
1198 | 0 | inp_read_lock_held, SCTP_SO_NOT_LOCKED); |
1199 | 0 | } |
1200 | 0 | control = nctl; |
1201 | 0 | } |
1202 | 0 | } |
1203 | 0 | if (strm->pd_api_started) { |
1204 | | /* Can't add more must have gotten an un-ordered above being partially delivered. */ |
1205 | 0 | return (0); |
1206 | 0 | } |
1207 | 0 | deliver_more: |
1208 | 0 | next_to_del = strm->last_mid_delivered + 1; |
1209 | 0 | if (control) { |
1210 | 0 | SCTPDBG(SCTP_DEBUG_XXX, |
1211 | 0 | "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u (nxtdel: %u)- o\n", |
1212 | 0 | control, control->end_added, control->mid, control->top_fsn, control->fsn_included, |
1213 | 0 | next_to_del); |
1214 | 0 | nctl = TAILQ_NEXT(control, next_instrm); |
1215 | 0 | if (SCTP_MID_EQ(asoc->idata_supported, control->mid, next_to_del) && |
1216 | 0 | (control->first_frag_seen)) { |
1217 | 0 | int done; |
1218 | | |
1219 | | /* Ok we can deliver it onto the stream. */ |
1220 | 0 | if (control->end_added) { |
1221 | | /* We are done with it afterwards */ |
1222 | 0 | if (control->on_strm_q) { |
1223 | 0 | #ifdef INVARIANTS |
1224 | 0 | if (control->on_strm_q != SCTP_ON_ORDERED) { |
1225 | 0 | panic("Huh control: %p on_q: %d -- not ordered?", |
1226 | 0 | control, control->on_strm_q); |
1227 | 0 | } |
1228 | 0 | #endif |
1229 | 0 | SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs); |
1230 | 0 | TAILQ_REMOVE(&strm->inqueue, control, next_instrm); |
1231 | 0 | if (asoc->size_on_all_streams >= control->length) { |
1232 | 0 | asoc->size_on_all_streams -= control->length; |
1233 | 0 | } else { |
1234 | 0 | #ifdef INVARIANTS |
1235 | 0 | panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length); |
1236 | | #else |
1237 | | asoc->size_on_all_streams = 0; |
1238 | | #endif |
1239 | 0 | } |
1240 | 0 | sctp_ucount_decr(asoc->cnt_on_all_streams); |
1241 | 0 | control->on_strm_q = 0; |
1242 | 0 | } |
1243 | 0 | ret++; |
1244 | 0 | } |
1245 | 0 | if (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) { |
1246 | | /* A singleton now slipping through - mark it non-revokable too */ |
1247 | 0 | sctp_mark_non_revokable(asoc, control->sinfo_tsn); |
1248 | 0 | } else if (control->end_added == 0) { |
1249 | | /* Check if we can defer adding until its all there */ |
1250 | 0 | if ((control->length < pd_point) || (strm->pd_api_started)) { |
1251 | | /* Don't need it or cannot add more (one being delivered that way) */ |
1252 | 0 | goto out; |
1253 | 0 | } |
1254 | 0 | } |
1255 | 0 | done = (control->end_added) && (control->last_frag_seen); |
1256 | 0 | if (control->on_read_q == 0) { |
1257 | 0 | if (!done) { |
1258 | 0 | if (asoc->size_on_all_streams >= control->length) { |
1259 | 0 | asoc->size_on_all_streams -= control->length; |
1260 | 0 | } else { |
1261 | 0 | #ifdef INVARIANTS |
1262 | 0 | panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length); |
1263 | | #else |
1264 | | asoc->size_on_all_streams = 0; |
1265 | | #endif |
1266 | 0 | } |
1267 | 0 | strm->pd_api_started = 1; |
1268 | 0 | control->pdapi_started = 1; |
1269 | 0 | } |
1270 | 0 | sctp_add_to_readq(stcb->sctp_ep, stcb, |
1271 | 0 | control, |
1272 | 0 | &stcb->sctp_socket->so_rcv, control->end_added, |
1273 | 0 | inp_read_lock_held, SCTP_SO_NOT_LOCKED); |
1274 | 0 | } |
1275 | 0 | strm->last_mid_delivered = next_to_del; |
1276 | 0 | if (done) { |
1277 | 0 | control = nctl; |
1278 | 0 | goto deliver_more; |
1279 | 0 | } |
1280 | 0 | } |
1281 | 0 | } |
1282 | 0 | out: |
1283 | 0 | return (ret); |
1284 | 0 | } |
1285 | | |
1286 | | uint32_t |
1287 | | sctp_add_chk_to_control(struct sctp_queued_to_read *control, |
1288 | | struct sctp_stream_in *strm, |
1289 | | struct sctp_tcb *stcb, struct sctp_association *asoc, |
1290 | | struct sctp_tmit_chunk *chk, int hold_rlock) |
1291 | 0 | { |
1292 | | /* |
1293 | | * Given a control and a chunk, merge the |
1294 | | * data from the chk onto the control and free |
1295 | | * up the chunk resources. |
1296 | | */ |
1297 | 0 | uint32_t added = 0; |
1298 | 0 | bool i_locked = false; |
1299 | |
|
1300 | 0 | if (control->on_read_q) { |
1301 | 0 | if (hold_rlock == 0) { |
1302 | | /* Its being pd-api'd so we must do some locks. */ |
1303 | 0 | SCTP_INP_READ_LOCK(stcb->sctp_ep); |
1304 | 0 | i_locked = true; |
1305 | 0 | } |
1306 | 0 | if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) { |
1307 | 0 | goto out; |
1308 | 0 | } |
1309 | 0 | } |
1310 | 0 | if (control->data == NULL) { |
1311 | 0 | control->data = chk->data; |
1312 | 0 | sctp_setup_tail_pointer(control); |
1313 | 0 | } else { |
1314 | 0 | sctp_add_to_tail_pointer(control, chk->data, &added); |
1315 | 0 | } |
1316 | 0 | control->fsn_included = chk->rec.data.fsn; |
1317 | 0 | asoc->size_on_reasm_queue -= chk->send_size; |
1318 | 0 | sctp_ucount_decr(asoc->cnt_on_reasm_queue); |
1319 | 0 | sctp_mark_non_revokable(asoc, chk->rec.data.tsn); |
1320 | 0 | chk->data = NULL; |
1321 | 0 | if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) { |
1322 | 0 | control->first_frag_seen = 1; |
1323 | 0 | control->sinfo_tsn = chk->rec.data.tsn; |
1324 | 0 | control->sinfo_ppid = chk->rec.data.ppid; |
1325 | 0 | } |
1326 | 0 | if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) { |
1327 | | /* Its complete */ |
1328 | 0 | if ((control->on_strm_q) && (control->on_read_q)) { |
1329 | 0 | if (control->pdapi_started) { |
1330 | 0 | control->pdapi_started = 0; |
1331 | 0 | strm->pd_api_started = 0; |
1332 | 0 | } |
1333 | 0 | if (control->on_strm_q == SCTP_ON_UNORDERED) { |
1334 | | /* Unordered */ |
1335 | 0 | TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm); |
1336 | 0 | control->on_strm_q = 0; |
1337 | 0 | } else if (control->on_strm_q == SCTP_ON_ORDERED) { |
1338 | | /* Ordered */ |
1339 | 0 | TAILQ_REMOVE(&strm->inqueue, control, next_instrm); |
1340 | | /* |
1341 | | * Don't need to decrement size_on_all_streams, |
1342 | | * since control is on the read queue. |
1343 | | */ |
1344 | 0 | sctp_ucount_decr(asoc->cnt_on_all_streams); |
1345 | 0 | control->on_strm_q = 0; |
1346 | 0 | #ifdef INVARIANTS |
1347 | 0 | } else if (control->on_strm_q) { |
1348 | 0 | panic("Unknown state on ctrl: %p on_strm_q: %d", control, |
1349 | 0 | control->on_strm_q); |
1350 | 0 | #endif |
1351 | 0 | } |
1352 | 0 | } |
1353 | 0 | control->end_added = 1; |
1354 | 0 | control->last_frag_seen = 1; |
1355 | 0 | } |
1356 | 0 | out: |
1357 | 0 | if (i_locked) { |
1358 | 0 | SCTP_INP_READ_UNLOCK(stcb->sctp_ep); |
1359 | 0 | } |
1360 | 0 | sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED); |
1361 | 0 | return (added); |
1362 | 0 | } |
1363 | | |
1364 | | /* |
1365 | | * Dump onto the re-assembly queue, in its proper place. After dumping on the |
1366 | | * queue, see if anything can be delivered. If so pull it off (or as much as |
1367 | | * we can. If we run out of space then we must dump what we can and set the |
1368 | | * appropriate flag to say we queued what we could. |
1369 | | */ |
1370 | | static void |
1371 | | sctp_queue_data_for_reasm(struct sctp_tcb *stcb, struct sctp_association *asoc, |
1372 | | struct sctp_queued_to_read *control, |
1373 | | struct sctp_tmit_chunk *chk, |
1374 | | int created_control, |
1375 | | int *abort_flag, uint32_t tsn) |
1376 | 0 | { |
1377 | 0 | uint32_t next_fsn; |
1378 | 0 | struct sctp_tmit_chunk *at, *nat; |
1379 | 0 | struct sctp_stream_in *strm; |
1380 | 0 | int do_wakeup, unordered; |
1381 | 0 | uint32_t lenadded; |
1382 | |
|
1383 | 0 | strm = &asoc->strmin[control->sinfo_stream]; |
1384 | | /* |
1385 | | * For old un-ordered data chunks. |
1386 | | */ |
1387 | 0 | if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) { |
1388 | 0 | unordered = 1; |
1389 | 0 | } else { |
1390 | 0 | unordered = 0; |
1391 | 0 | } |
1392 | | /* Must be added to the stream-in queue */ |
1393 | 0 | if (created_control) { |
1394 | 0 | if ((unordered == 0) || (asoc->idata_supported)) { |
1395 | 0 | sctp_ucount_incr(asoc->cnt_on_all_streams); |
1396 | 0 | } |
1397 | 0 | if (sctp_place_control_in_stream(strm, asoc, control)) { |
1398 | | /* Duplicate SSN? */ |
1399 | 0 | sctp_abort_in_reasm(stcb, control, chk, |
1400 | 0 | abort_flag, |
1401 | 0 | SCTP_FROM_SCTP_INDATA + SCTP_LOC_6); |
1402 | 0 | sctp_clean_up_control(stcb, control); |
1403 | 0 | return; |
1404 | 0 | } |
1405 | 0 | if ((tsn == (asoc->cumulative_tsn + 1) && (asoc->idata_supported == 0))) { |
1406 | | /* Ok we created this control and now |
1407 | | * lets validate that its legal i.e. there |
1408 | | * is a B bit set, if not and we have |
1409 | | * up to the cum-ack then its invalid. |
1410 | | */ |
1411 | 0 | if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) { |
1412 | 0 | sctp_abort_in_reasm(stcb, control, chk, |
1413 | 0 | abort_flag, |
1414 | 0 | SCTP_FROM_SCTP_INDATA + SCTP_LOC_7); |
1415 | 0 | return; |
1416 | 0 | } |
1417 | 0 | } |
1418 | 0 | } |
1419 | 0 | if ((asoc->idata_supported == 0) && (unordered == 1)) { |
1420 | 0 | sctp_inject_old_unordered_data(stcb, asoc, control, chk, abort_flag); |
1421 | 0 | return; |
1422 | 0 | } |
1423 | | /* |
1424 | | * Ok we must queue the chunk into the reasembly portion: |
1425 | | * o if its the first it goes to the control mbuf. |
1426 | | * o if its not first but the next in sequence it goes to the control, |
1427 | | * and each succeeding one in order also goes. |
1428 | | * o if its not in order we place it on the list in its place. |
1429 | | */ |
1430 | 0 | if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) { |
1431 | | /* Its the very first one. */ |
1432 | 0 | SCTPDBG(SCTP_DEBUG_XXX, |
1433 | 0 | "chunk is a first fsn: %u becomes fsn_included\n", |
1434 | 0 | chk->rec.data.fsn); |
1435 | 0 | if (control->first_frag_seen) { |
1436 | | /* |
1437 | | * Error on senders part, they either |
1438 | | * sent us two data chunks with FIRST, |
1439 | | * or they sent two un-ordered chunks that |
1440 | | * were fragmented at the same time in the same stream. |
1441 | | */ |
1442 | 0 | sctp_abort_in_reasm(stcb, control, chk, |
1443 | 0 | abort_flag, |
1444 | 0 | SCTP_FROM_SCTP_INDATA + SCTP_LOC_8); |
1445 | 0 | return; |
1446 | 0 | } |
1447 | 0 | control->first_frag_seen = 1; |
1448 | 0 | control->sinfo_ppid = chk->rec.data.ppid; |
1449 | 0 | control->sinfo_tsn = chk->rec.data.tsn; |
1450 | 0 | control->fsn_included = chk->rec.data.fsn; |
1451 | 0 | control->data = chk->data; |
1452 | 0 | sctp_mark_non_revokable(asoc, chk->rec.data.tsn); |
1453 | 0 | chk->data = NULL; |
1454 | 0 | sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED); |
1455 | 0 | sctp_setup_tail_pointer(control); |
1456 | 0 | asoc->size_on_all_streams += control->length; |
1457 | 0 | } else { |
1458 | | /* Place the chunk in our list */ |
1459 | 0 | int inserted=0; |
1460 | 0 | if (control->last_frag_seen == 0) { |
1461 | | /* Still willing to raise highest FSN seen */ |
1462 | 0 | if (SCTP_TSN_GT(chk->rec.data.fsn, control->top_fsn)) { |
1463 | 0 | SCTPDBG(SCTP_DEBUG_XXX, |
1464 | 0 | "We have a new top_fsn: %u\n", |
1465 | 0 | chk->rec.data.fsn); |
1466 | 0 | control->top_fsn = chk->rec.data.fsn; |
1467 | 0 | } |
1468 | 0 | if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) { |
1469 | 0 | SCTPDBG(SCTP_DEBUG_XXX, |
1470 | 0 | "The last fsn is now in place fsn: %u\n", |
1471 | 0 | chk->rec.data.fsn); |
1472 | 0 | control->last_frag_seen = 1; |
1473 | 0 | if (SCTP_TSN_GT(control->top_fsn, chk->rec.data.fsn)) { |
1474 | 0 | SCTPDBG(SCTP_DEBUG_XXX, |
1475 | 0 | "New fsn: %u is not at top_fsn: %u -- abort\n", |
1476 | 0 | chk->rec.data.fsn, |
1477 | 0 | control->top_fsn); |
1478 | 0 | sctp_abort_in_reasm(stcb, control, chk, |
1479 | 0 | abort_flag, |
1480 | 0 | SCTP_FROM_SCTP_INDATA + SCTP_LOC_9); |
1481 | 0 | return; |
1482 | 0 | } |
1483 | 0 | } |
1484 | 0 | if (asoc->idata_supported || control->first_frag_seen) { |
1485 | | /* |
1486 | | * For IDATA we always check since we know that |
1487 | | * the first fragment is 0. For old DATA we have |
1488 | | * to receive the first before we know the first FSN |
1489 | | * (which is the TSN). |
1490 | | */ |
1491 | 0 | if (SCTP_TSN_GE(control->fsn_included, chk->rec.data.fsn)) { |
1492 | | /* We have already delivered up to this so its a dup */ |
1493 | 0 | sctp_abort_in_reasm(stcb, control, chk, |
1494 | 0 | abort_flag, |
1495 | 0 | SCTP_FROM_SCTP_INDATA + SCTP_LOC_10); |
1496 | 0 | return; |
1497 | 0 | } |
1498 | 0 | } |
1499 | 0 | } else { |
1500 | 0 | if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) { |
1501 | | /* Second last? huh? */ |
1502 | 0 | SCTPDBG(SCTP_DEBUG_XXX, |
1503 | 0 | "Duplicate last fsn: %u (top: %u) -- abort\n", |
1504 | 0 | chk->rec.data.fsn, control->top_fsn); |
1505 | 0 | sctp_abort_in_reasm(stcb, control, |
1506 | 0 | chk, abort_flag, |
1507 | 0 | SCTP_FROM_SCTP_INDATA + SCTP_LOC_11); |
1508 | 0 | return; |
1509 | 0 | } |
1510 | 0 | if (asoc->idata_supported || control->first_frag_seen) { |
1511 | | /* |
1512 | | * For IDATA we always check since we know that |
1513 | | * the first fragment is 0. For old DATA we have |
1514 | | * to receive the first before we know the first FSN |
1515 | | * (which is the TSN). |
1516 | | */ |
1517 | |
|
1518 | 0 | if (SCTP_TSN_GE(control->fsn_included, chk->rec.data.fsn)) { |
1519 | | /* We have already delivered up to this so its a dup */ |
1520 | 0 | SCTPDBG(SCTP_DEBUG_XXX, |
1521 | 0 | "New fsn: %u is already seen in included_fsn: %u -- abort\n", |
1522 | 0 | chk->rec.data.fsn, control->fsn_included); |
1523 | 0 | sctp_abort_in_reasm(stcb, control, chk, |
1524 | 0 | abort_flag, |
1525 | 0 | SCTP_FROM_SCTP_INDATA + SCTP_LOC_12); |
1526 | 0 | return; |
1527 | 0 | } |
1528 | 0 | } |
1529 | | /* validate not beyond top FSN if we have seen last one */ |
1530 | 0 | if (SCTP_TSN_GT(chk->rec.data.fsn, control->top_fsn)) { |
1531 | 0 | SCTPDBG(SCTP_DEBUG_XXX, |
1532 | 0 | "New fsn: %u is beyond or at top_fsn: %u -- abort\n", |
1533 | 0 | chk->rec.data.fsn, |
1534 | 0 | control->top_fsn); |
1535 | 0 | sctp_abort_in_reasm(stcb, control, chk, |
1536 | 0 | abort_flag, |
1537 | 0 | SCTP_FROM_SCTP_INDATA + SCTP_LOC_13); |
1538 | 0 | return; |
1539 | 0 | } |
1540 | 0 | } |
1541 | | /* |
1542 | | * If we reach here, we need to place the |
1543 | | * new chunk in the reassembly for this |
1544 | | * control. |
1545 | | */ |
1546 | 0 | SCTPDBG(SCTP_DEBUG_XXX, |
1547 | 0 | "chunk is a not first fsn: %u needs to be inserted\n", |
1548 | 0 | chk->rec.data.fsn); |
1549 | 0 | TAILQ_FOREACH(at, &control->reasm, sctp_next) { |
1550 | 0 | if (SCTP_TSN_GT(at->rec.data.fsn, chk->rec.data.fsn)) { |
1551 | 0 | if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) { |
1552 | | /* Last not at the end? huh? */ |
1553 | 0 | SCTPDBG(SCTP_DEBUG_XXX, |
1554 | 0 | "Last fragment not last in list: -- abort\n"); |
1555 | 0 | sctp_abort_in_reasm(stcb, control, |
1556 | 0 | chk, abort_flag, |
1557 | 0 | SCTP_FROM_SCTP_INDATA + SCTP_LOC_14); |
1558 | 0 | return; |
1559 | 0 | } |
1560 | | /* |
1561 | | * This one in queue is bigger than the new one, insert |
1562 | | * the new one before at. |
1563 | | */ |
1564 | 0 | SCTPDBG(SCTP_DEBUG_XXX, |
1565 | 0 | "Insert it before fsn: %u\n", |
1566 | 0 | at->rec.data.fsn); |
1567 | 0 | asoc->size_on_reasm_queue += chk->send_size; |
1568 | 0 | sctp_ucount_incr(asoc->cnt_on_reasm_queue); |
1569 | 0 | TAILQ_INSERT_BEFORE(at, chk, sctp_next); |
1570 | 0 | inserted = 1; |
1571 | 0 | break; |
1572 | 0 | } else if (at->rec.data.fsn == chk->rec.data.fsn) { |
1573 | | /* Gak, He sent me a duplicate str seq number */ |
1574 | | /* |
1575 | | * foo bar, I guess I will just free this new guy, |
1576 | | * should we abort too? FIX ME MAYBE? Or it COULD be |
1577 | | * that the SSN's have wrapped. Maybe I should |
1578 | | * compare to TSN somehow... sigh for now just blow |
1579 | | * away the chunk! |
1580 | | */ |
1581 | 0 | SCTPDBG(SCTP_DEBUG_XXX, |
1582 | 0 | "Duplicate to fsn: %u -- abort\n", |
1583 | 0 | at->rec.data.fsn); |
1584 | 0 | sctp_abort_in_reasm(stcb, control, |
1585 | 0 | chk, abort_flag, |
1586 | 0 | SCTP_FROM_SCTP_INDATA + SCTP_LOC_15); |
1587 | 0 | return; |
1588 | 0 | } |
1589 | 0 | } |
1590 | 0 | if (inserted == 0) { |
1591 | | /* Goes on the end */ |
1592 | 0 | SCTPDBG(SCTP_DEBUG_XXX, "Inserting at tail of list fsn: %u\n", |
1593 | 0 | chk->rec.data.fsn); |
1594 | 0 | asoc->size_on_reasm_queue += chk->send_size; |
1595 | 0 | sctp_ucount_incr(asoc->cnt_on_reasm_queue); |
1596 | 0 | TAILQ_INSERT_TAIL(&control->reasm, chk, sctp_next); |
1597 | 0 | } |
1598 | 0 | } |
1599 | | /* |
1600 | | * Ok lets see if we can suck any up into the control |
1601 | | * structure that are in seq if it makes sense. |
1602 | | */ |
1603 | 0 | do_wakeup = 0; |
1604 | | /* |
1605 | | * If the first fragment has not been |
1606 | | * seen there is no sense in looking. |
1607 | | */ |
1608 | 0 | if (control->first_frag_seen) { |
1609 | 0 | next_fsn = control->fsn_included + 1; |
1610 | 0 | TAILQ_FOREACH_SAFE(at, &control->reasm, sctp_next, nat) { |
1611 | 0 | if (at->rec.data.fsn == next_fsn) { |
1612 | | /* We can add this one now to the control */ |
1613 | 0 | SCTPDBG(SCTP_DEBUG_XXX, |
1614 | 0 | "Adding more to control: %p at: %p fsn: %u next_fsn: %u included: %u\n", |
1615 | 0 | control, at, |
1616 | 0 | at->rec.data.fsn, |
1617 | 0 | next_fsn, control->fsn_included); |
1618 | 0 | TAILQ_REMOVE(&control->reasm, at, sctp_next); |
1619 | 0 | lenadded = sctp_add_chk_to_control(control, strm, stcb, asoc, at, SCTP_READ_LOCK_NOT_HELD); |
1620 | 0 | if (control->on_read_q) { |
1621 | 0 | do_wakeup = 1; |
1622 | 0 | } else { |
1623 | | /* |
1624 | | * We only add to the size-on-all-streams |
1625 | | * if its not on the read q. The read q |
1626 | | * flag will cause a sballoc so its accounted |
1627 | | * for there. |
1628 | | */ |
1629 | 0 | asoc->size_on_all_streams += lenadded; |
1630 | 0 | } |
1631 | 0 | next_fsn++; |
1632 | 0 | if (control->end_added && control->pdapi_started) { |
1633 | 0 | if (strm->pd_api_started) { |
1634 | 0 | strm->pd_api_started = 0; |
1635 | 0 | control->pdapi_started = 0; |
1636 | 0 | } |
1637 | 0 | if (control->on_read_q == 0) { |
1638 | 0 | sctp_add_to_readq(stcb->sctp_ep, stcb, |
1639 | 0 | control, |
1640 | 0 | &stcb->sctp_socket->so_rcv, control->end_added, |
1641 | 0 | SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); |
1642 | 0 | } |
1643 | 0 | break; |
1644 | 0 | } |
1645 | 0 | } else { |
1646 | 0 | break; |
1647 | 0 | } |
1648 | 0 | } |
1649 | 0 | } |
1650 | 0 | if (do_wakeup) { |
1651 | 0 | #if defined(__Userspace__) |
1652 | 0 | sctp_invoke_recv_callback(stcb->sctp_ep, stcb, control, SCTP_READ_LOCK_NOT_HELD); |
1653 | 0 | #endif |
1654 | | /* Need to wakeup the reader */ |
1655 | 0 | sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED); |
1656 | 0 | } |
1657 | 0 | } |
1658 | | |
1659 | | static struct sctp_queued_to_read * |
1660 | | sctp_find_reasm_entry(struct sctp_stream_in *strm, uint32_t mid, int ordered, int idata_supported) |
1661 | 0 | { |
1662 | 0 | struct sctp_queued_to_read *control; |
1663 | |
|
1664 | 0 | if (ordered) { |
1665 | 0 | TAILQ_FOREACH(control, &strm->inqueue, next_instrm) { |
1666 | 0 | if (SCTP_MID_EQ(idata_supported, control->mid, mid)) { |
1667 | 0 | break; |
1668 | 0 | } |
1669 | 0 | } |
1670 | 0 | } else { |
1671 | 0 | if (idata_supported) { |
1672 | 0 | TAILQ_FOREACH(control, &strm->uno_inqueue, next_instrm) { |
1673 | 0 | if (SCTP_MID_EQ(idata_supported, control->mid, mid)) { |
1674 | 0 | break; |
1675 | 0 | } |
1676 | 0 | } |
1677 | 0 | } else { |
1678 | 0 | control = TAILQ_FIRST(&strm->uno_inqueue); |
1679 | 0 | } |
1680 | 0 | } |
1681 | 0 | return (control); |
1682 | 0 | } |
1683 | | |
1684 | | static int |
1685 | | sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc, |
1686 | | struct mbuf **m, int offset, int chk_length, |
1687 | | struct sctp_nets *net, uint32_t *high_tsn, int *abort_flag, |
1688 | | int *break_flag, int last_chunk, uint8_t chk_type) |
1689 | 0 | { |
1690 | 0 | struct sctp_tmit_chunk *chk = NULL; /* make gcc happy */ |
1691 | 0 | struct sctp_stream_in *strm; |
1692 | 0 | uint32_t tsn, fsn, gap, mid; |
1693 | 0 | struct mbuf *dmbuf; |
1694 | 0 | int the_len; |
1695 | 0 | int need_reasm_check = 0; |
1696 | 0 | uint16_t sid; |
1697 | 0 | struct mbuf *op_err; |
1698 | 0 | char msg[SCTP_DIAG_INFO_LEN]; |
1699 | 0 | struct sctp_queued_to_read *control, *ncontrol; |
1700 | 0 | uint32_t ppid; |
1701 | 0 | uint8_t chk_flags; |
1702 | 0 | struct sctp_stream_reset_list *liste; |
1703 | 0 | int ordered; |
1704 | 0 | size_t clen; |
1705 | 0 | int created_control = 0; |
1706 | |
|
1707 | 0 | if (chk_type == SCTP_IDATA) { |
1708 | 0 | struct sctp_idata_chunk *chunk, chunk_buf; |
1709 | |
|
1710 | 0 | chunk = (struct sctp_idata_chunk *)sctp_m_getptr(*m, offset, |
1711 | 0 | sizeof(struct sctp_idata_chunk), (uint8_t *)&chunk_buf); |
1712 | 0 | chk_flags = chunk->ch.chunk_flags; |
1713 | 0 | clen = sizeof(struct sctp_idata_chunk); |
1714 | 0 | tsn = ntohl(chunk->dp.tsn); |
1715 | 0 | sid = ntohs(chunk->dp.sid); |
1716 | 0 | mid = ntohl(chunk->dp.mid); |
1717 | 0 | if (chk_flags & SCTP_DATA_FIRST_FRAG) { |
1718 | 0 | fsn = 0; |
1719 | 0 | ppid = chunk->dp.ppid_fsn.ppid; |
1720 | 0 | } else { |
1721 | 0 | fsn = ntohl(chunk->dp.ppid_fsn.fsn); |
1722 | 0 | ppid = 0xffffffff; /* Use as an invalid value. */ |
1723 | 0 | } |
1724 | 0 | } else { |
1725 | 0 | struct sctp_data_chunk *chunk, chunk_buf; |
1726 | |
|
1727 | 0 | chunk = (struct sctp_data_chunk *)sctp_m_getptr(*m, offset, |
1728 | 0 | sizeof(struct sctp_data_chunk), (uint8_t *)&chunk_buf); |
1729 | 0 | chk_flags = chunk->ch.chunk_flags; |
1730 | 0 | clen = sizeof(struct sctp_data_chunk); |
1731 | 0 | tsn = ntohl(chunk->dp.tsn); |
1732 | 0 | sid = ntohs(chunk->dp.sid); |
1733 | 0 | mid = (uint32_t)(ntohs(chunk->dp.ssn)); |
1734 | 0 | fsn = tsn; |
1735 | 0 | ppid = chunk->dp.ppid; |
1736 | 0 | } |
1737 | 0 | if ((size_t)chk_length == clen) { |
1738 | | /* |
1739 | | * Need to send an abort since we had a |
1740 | | * empty data chunk. |
1741 | | */ |
1742 | 0 | op_err = sctp_generate_no_user_data_cause(tsn); |
1743 | 0 | stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_16; |
1744 | 0 | sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, false, SCTP_SO_NOT_LOCKED); |
1745 | 0 | *abort_flag = 1; |
1746 | 0 | return (0); |
1747 | 0 | } |
1748 | 0 | if ((chk_flags & SCTP_DATA_SACK_IMMEDIATELY) == SCTP_DATA_SACK_IMMEDIATELY) { |
1749 | 0 | asoc->send_sack = 1; |
1750 | 0 | } |
1751 | 0 | ordered = ((chk_flags & SCTP_DATA_UNORDERED) == 0); |
1752 | 0 | if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) { |
1753 | 0 | sctp_log_map(tsn, asoc->cumulative_tsn, asoc->highest_tsn_inside_map, SCTP_MAP_TSN_ENTERS); |
1754 | 0 | } |
1755 | 0 | if (stcb == NULL) { |
1756 | 0 | return (0); |
1757 | 0 | } |
1758 | 0 | SCTP_LTRACE_CHK(stcb->sctp_ep, stcb, chk_type, tsn); |
1759 | 0 | if (SCTP_TSN_GE(asoc->cumulative_tsn, tsn)) { |
1760 | | /* It is a duplicate */ |
1761 | 0 | SCTP_STAT_INCR(sctps_recvdupdata); |
1762 | 0 | if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) { |
1763 | | /* Record a dup for the next outbound sack */ |
1764 | 0 | asoc->dup_tsns[asoc->numduptsns] = tsn; |
1765 | 0 | asoc->numduptsns++; |
1766 | 0 | } |
1767 | 0 | asoc->send_sack = 1; |
1768 | 0 | return (0); |
1769 | 0 | } |
1770 | | /* Calculate the number of TSN's between the base and this TSN */ |
1771 | 0 | SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn); |
1772 | 0 | if (gap >= (SCTP_MAPPING_ARRAY << 3)) { |
1773 | | /* Can't hold the bit in the mapping at max array, toss it */ |
1774 | 0 | return (0); |
1775 | 0 | } |
1776 | 0 | if (gap >= (uint32_t) (asoc->mapping_array_size << 3)) { |
1777 | 0 | SCTP_TCB_LOCK_ASSERT(stcb); |
1778 | 0 | if (sctp_expand_mapping_array(asoc, gap)) { |
1779 | | /* Can't expand, drop it */ |
1780 | 0 | return (0); |
1781 | 0 | } |
1782 | 0 | } |
1783 | 0 | if (SCTP_TSN_GT(tsn, *high_tsn)) { |
1784 | 0 | *high_tsn = tsn; |
1785 | 0 | } |
1786 | | /* See if we have received this one already */ |
1787 | 0 | if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap) || |
1788 | 0 | SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap)) { |
1789 | 0 | SCTP_STAT_INCR(sctps_recvdupdata); |
1790 | 0 | if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) { |
1791 | | /* Record a dup for the next outbound sack */ |
1792 | 0 | asoc->dup_tsns[asoc->numduptsns] = tsn; |
1793 | 0 | asoc->numduptsns++; |
1794 | 0 | } |
1795 | 0 | asoc->send_sack = 1; |
1796 | 0 | return (0); |
1797 | 0 | } |
1798 | | /* |
1799 | | * Check to see about the GONE flag, duplicates would cause a sack |
1800 | | * to be sent up above |
1801 | | */ |
1802 | 0 | if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || |
1803 | 0 | (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || |
1804 | 0 | (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET))) { |
1805 | | /* |
1806 | | * wait a minute, this guy is gone, there is no longer a |
1807 | | * receiver. Send peer an ABORT! |
1808 | | */ |
1809 | 0 | op_err = sctp_generate_cause(SCTP_CAUSE_OUT_OF_RESC, ""); |
1810 | 0 | sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, false, SCTP_SO_NOT_LOCKED); |
1811 | 0 | *abort_flag = 1; |
1812 | 0 | return (0); |
1813 | 0 | } |
1814 | | /* |
1815 | | * Now before going further we see if there is room. If NOT then we |
1816 | | * MAY let one through only IF this TSN is the one we are waiting |
1817 | | * for on a partial delivery API. |
1818 | | */ |
1819 | | |
1820 | | /* Is the stream valid? */ |
1821 | 0 | if (sid >= asoc->streamincnt) { |
1822 | 0 | struct sctp_error_invalid_stream *cause; |
1823 | |
|
1824 | 0 | op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_error_invalid_stream), |
1825 | 0 | 0, M_NOWAIT, 1, MT_DATA); |
1826 | 0 | if (op_err != NULL) { |
1827 | | /* add some space up front so prepend will work well */ |
1828 | 0 | SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr)); |
1829 | 0 | cause = mtod(op_err, struct sctp_error_invalid_stream *); |
1830 | | /* |
1831 | | * Error causes are just param's and this one has |
1832 | | * two back to back phdr, one with the error type |
1833 | | * and size, the other with the streamid and a rsvd |
1834 | | */ |
1835 | 0 | SCTP_BUF_LEN(op_err) = sizeof(struct sctp_error_invalid_stream); |
1836 | 0 | cause->cause.code = htons(SCTP_CAUSE_INVALID_STREAM); |
1837 | 0 | cause->cause.length = htons(sizeof(struct sctp_error_invalid_stream)); |
1838 | 0 | cause->stream_id = htons(sid); |
1839 | 0 | cause->reserved = htons(0); |
1840 | 0 | sctp_queue_op_err(stcb, op_err); |
1841 | 0 | } |
1842 | 0 | SCTP_STAT_INCR(sctps_badsid); |
1843 | 0 | SCTP_TCB_LOCK_ASSERT(stcb); |
1844 | 0 | SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap); |
1845 | 0 | if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) { |
1846 | 0 | asoc->highest_tsn_inside_nr_map = tsn; |
1847 | 0 | } |
1848 | 0 | if (tsn == (asoc->cumulative_tsn + 1)) { |
1849 | | /* Update cum-ack */ |
1850 | 0 | asoc->cumulative_tsn = tsn; |
1851 | 0 | } |
1852 | 0 | return (0); |
1853 | 0 | } |
1854 | | /* |
1855 | | * If its a fragmented message, lets see if we can |
1856 | | * find the control on the reassembly queues. |
1857 | | */ |
1858 | 0 | if ((chk_type == SCTP_IDATA) && |
1859 | 0 | ((chk_flags & SCTP_DATA_FIRST_FRAG) == 0) && |
1860 | 0 | (fsn == 0)) { |
1861 | | /* |
1862 | | * The first *must* be fsn 0, and other |
1863 | | * (middle/end) pieces can *not* be fsn 0. |
1864 | | * XXX: This can happen in case of a wrap around. |
1865 | | * Ignore is for now. |
1866 | | */ |
1867 | 0 | SCTP_SNPRINTF(msg, sizeof(msg), "FSN zero for MID=%8.8x, but flags=%2.2x", mid, chk_flags); |
1868 | 0 | goto err_out; |
1869 | 0 | } |
1870 | 0 | control = sctp_find_reasm_entry(&asoc->strmin[sid], mid, ordered, asoc->idata_supported); |
1871 | 0 | SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags:0x%x look for control on queues %p\n", |
1872 | 0 | chk_flags, control); |
1873 | 0 | if ((chk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) { |
1874 | | /* See if we can find the re-assembly entity */ |
1875 | 0 | if (control != NULL) { |
1876 | | /* We found something, does it belong? */ |
1877 | 0 | if (ordered && (mid != control->mid)) { |
1878 | 0 | SCTP_SNPRINTF(msg, sizeof(msg), "Reassembly problem (MID=%8.8x)", mid); |
1879 | 0 | err_out: |
1880 | 0 | op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); |
1881 | 0 | stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_17; |
1882 | 0 | sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, false, SCTP_SO_NOT_LOCKED); |
1883 | 0 | *abort_flag = 1; |
1884 | 0 | return (0); |
1885 | 0 | } |
1886 | 0 | if (ordered && ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED)) { |
1887 | | /* We can't have a switched order with an unordered chunk */ |
1888 | 0 | SCTP_SNPRINTF(msg, sizeof(msg), |
1889 | 0 | "All fragments of a user message must be ordered or unordered (TSN=%8.8x)", |
1890 | 0 | tsn); |
1891 | 0 | goto err_out; |
1892 | 0 | } |
1893 | 0 | if (!ordered && (((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) == 0)) { |
1894 | | /* We can't have a switched unordered with a ordered chunk */ |
1895 | 0 | SCTP_SNPRINTF(msg, sizeof(msg), |
1896 | 0 | "All fragments of a user message must be ordered or unordered (TSN=%8.8x)", |
1897 | 0 | tsn); |
1898 | 0 | goto err_out; |
1899 | 0 | } |
1900 | 0 | } |
1901 | 0 | } else { |
1902 | | /* Its a complete segment. Lets validate we |
1903 | | * don't have a re-assembly going on with |
1904 | | * the same Stream/Seq (for ordered) or in |
1905 | | * the same Stream for unordered. |
1906 | | */ |
1907 | 0 | if (control != NULL) { |
1908 | 0 | if (ordered || asoc->idata_supported) { |
1909 | 0 | SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags: 0x%x dup detected on MID: %u\n", |
1910 | 0 | chk_flags, mid); |
1911 | 0 | SCTP_SNPRINTF(msg, sizeof(msg), "Duplicate MID=%8.8x detected.", mid); |
1912 | 0 | goto err_out; |
1913 | 0 | } else { |
1914 | 0 | if ((control->first_frag_seen) && |
1915 | 0 | (tsn == control->fsn_included + 1) && |
1916 | 0 | (control->end_added == 0)) { |
1917 | 0 | SCTP_SNPRINTF(msg, sizeof(msg), |
1918 | 0 | "Illegal message sequence, missing end for MID: %8.8x", |
1919 | 0 | control->fsn_included); |
1920 | 0 | goto err_out; |
1921 | 0 | } else { |
1922 | 0 | control = NULL; |
1923 | 0 | } |
1924 | 0 | } |
1925 | 0 | } |
1926 | 0 | } |
1927 | | /* now do the tests */ |
1928 | 0 | if (((asoc->cnt_on_all_streams + |
1929 | 0 | asoc->cnt_on_reasm_queue + |
1930 | 0 | asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) || |
1931 | 0 | (((int)asoc->my_rwnd) <= 0)) { |
1932 | | /* |
1933 | | * When we have NO room in the rwnd we check to make sure |
1934 | | * the reader is doing its job... |
1935 | | */ |
1936 | 0 | if (SCTP_SBAVAIL(&stcb->sctp_socket->so_rcv) > 0) { |
1937 | | /* some to read, wake-up */ |
1938 | | #if defined(__APPLE__) && !defined(__Userspace__) |
1939 | | struct socket *so; |
1940 | | |
1941 | | so = SCTP_INP_SO(stcb->sctp_ep); |
1942 | | atomic_add_int(&stcb->asoc.refcnt, 1); |
1943 | | SCTP_TCB_UNLOCK(stcb); |
1944 | | SCTP_SOCKET_LOCK(so, 1); |
1945 | | SCTP_TCB_LOCK(stcb); |
1946 | | atomic_subtract_int(&stcb->asoc.refcnt, 1); |
1947 | | if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) { |
1948 | | /* assoc was freed while we were unlocked */ |
1949 | | SCTP_SOCKET_UNLOCK(so, 1); |
1950 | | return (0); |
1951 | | } |
1952 | | #endif |
1953 | 0 | sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket); |
1954 | | #if defined(__APPLE__) && !defined(__Userspace__) |
1955 | | SCTP_SOCKET_UNLOCK(so, 1); |
1956 | | #endif |
1957 | 0 | } |
1958 | | /* now is it in the mapping array of what we have accepted? */ |
1959 | 0 | if (chk_type == SCTP_DATA) { |
1960 | 0 | if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map) && |
1961 | 0 | SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) { |
1962 | | /* Nope not in the valid range dump it */ |
1963 | 0 | dump_packet: |
1964 | 0 | sctp_set_rwnd(stcb, asoc); |
1965 | 0 | if ((asoc->cnt_on_all_streams + |
1966 | 0 | asoc->cnt_on_reasm_queue + |
1967 | 0 | asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) { |
1968 | 0 | SCTP_STAT_INCR(sctps_datadropchklmt); |
1969 | 0 | } else { |
1970 | 0 | SCTP_STAT_INCR(sctps_datadroprwnd); |
1971 | 0 | } |
1972 | 0 | *break_flag = 1; |
1973 | 0 | return (0); |
1974 | 0 | } |
1975 | 0 | } else { |
1976 | 0 | if (control == NULL) { |
1977 | 0 | goto dump_packet; |
1978 | 0 | } |
1979 | 0 | if (SCTP_TSN_GT(fsn, control->top_fsn)) { |
1980 | 0 | goto dump_packet; |
1981 | 0 | } |
1982 | 0 | } |
1983 | 0 | } |
1984 | | #ifdef SCTP_ASOCLOG_OF_TSNS |
1985 | | SCTP_TCB_LOCK_ASSERT(stcb); |
1986 | | if (asoc->tsn_in_at >= SCTP_TSN_LOG_SIZE) { |
1987 | | asoc->tsn_in_at = 0; |
1988 | | asoc->tsn_in_wrapped = 1; |
1989 | | } |
1990 | | asoc->in_tsnlog[asoc->tsn_in_at].tsn = tsn; |
1991 | | asoc->in_tsnlog[asoc->tsn_in_at].strm = sid; |
1992 | | asoc->in_tsnlog[asoc->tsn_in_at].seq = mid; |
1993 | | asoc->in_tsnlog[asoc->tsn_in_at].sz = chk_length; |
1994 | | asoc->in_tsnlog[asoc->tsn_in_at].flgs = chunk_flags; |
1995 | | asoc->in_tsnlog[asoc->tsn_in_at].stcb = (void *)stcb; |
1996 | | asoc->in_tsnlog[asoc->tsn_in_at].in_pos = asoc->tsn_in_at; |
1997 | | asoc->in_tsnlog[asoc->tsn_in_at].in_out = 1; |
1998 | | asoc->tsn_in_at++; |
1999 | | #endif |
2000 | | /* |
2001 | | * Before we continue lets validate that we are not being fooled by |
2002 | | * an evil attacker. We can only have Nk chunks based on our TSN |
2003 | | * spread allowed by the mapping array N * 8 bits, so there is no |
2004 | | * way our stream sequence numbers could have wrapped. We of course |
2005 | | * only validate the FIRST fragment so the bit must be set. |
2006 | | */ |
2007 | 0 | if ((chk_flags & SCTP_DATA_FIRST_FRAG) && |
2008 | 0 | (TAILQ_EMPTY(&asoc->resetHead)) && |
2009 | 0 | (chk_flags & SCTP_DATA_UNORDERED) == 0 && |
2010 | 0 | SCTP_MID_GE(asoc->idata_supported, asoc->strmin[sid].last_mid_delivered, mid)) { |
2011 | | /* The incoming sseq is behind where we last delivered? */ |
2012 | 0 | SCTPDBG(SCTP_DEBUG_INDATA1, "EVIL/Broken-Dup S-SEQ: %u delivered: %u from peer, Abort!\n", |
2013 | 0 | mid, asoc->strmin[sid].last_mid_delivered); |
2014 | |
|
2015 | 0 | if (asoc->idata_supported) { |
2016 | 0 | SCTP_SNPRINTF(msg, sizeof(msg), "Delivered MID=%8.8x, got TSN=%8.8x, SID=%4.4x, MID=%8.8x", |
2017 | 0 | asoc->strmin[sid].last_mid_delivered, |
2018 | 0 | tsn, |
2019 | 0 | sid, |
2020 | 0 | mid); |
2021 | 0 | } else { |
2022 | 0 | SCTP_SNPRINTF(msg, sizeof(msg), "Delivered SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x", |
2023 | 0 | (uint16_t)asoc->strmin[sid].last_mid_delivered, |
2024 | 0 | tsn, |
2025 | 0 | sid, |
2026 | 0 | (uint16_t)mid); |
2027 | 0 | } |
2028 | 0 | op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); |
2029 | 0 | stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_18; |
2030 | 0 | sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, false, SCTP_SO_NOT_LOCKED); |
2031 | 0 | *abort_flag = 1; |
2032 | 0 | return (0); |
2033 | 0 | } |
2034 | 0 | if (chk_type == SCTP_IDATA) { |
2035 | 0 | the_len = (chk_length - sizeof(struct sctp_idata_chunk)); |
2036 | 0 | } else { |
2037 | 0 | the_len = (chk_length - sizeof(struct sctp_data_chunk)); |
2038 | 0 | } |
2039 | 0 | if (last_chunk == 0) { |
2040 | 0 | if (chk_type == SCTP_IDATA) { |
2041 | 0 | dmbuf = SCTP_M_COPYM(*m, |
2042 | 0 | (offset + sizeof(struct sctp_idata_chunk)), |
2043 | 0 | the_len, M_NOWAIT); |
2044 | 0 | } else { |
2045 | 0 | dmbuf = SCTP_M_COPYM(*m, |
2046 | 0 | (offset + sizeof(struct sctp_data_chunk)), |
2047 | 0 | the_len, M_NOWAIT); |
2048 | 0 | } |
2049 | | #ifdef SCTP_MBUF_LOGGING |
2050 | | if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) { |
2051 | | sctp_log_mbc(dmbuf, SCTP_MBUF_ICOPY); |
2052 | | } |
2053 | | #endif |
2054 | 0 | } else { |
2055 | | /* We can steal the last chunk */ |
2056 | 0 | int l_len; |
2057 | 0 | dmbuf = *m; |
2058 | | /* lop off the top part */ |
2059 | 0 | if (chk_type == SCTP_IDATA) { |
2060 | 0 | m_adj(dmbuf, (offset + sizeof(struct sctp_idata_chunk))); |
2061 | 0 | } else { |
2062 | 0 | m_adj(dmbuf, (offset + sizeof(struct sctp_data_chunk))); |
2063 | 0 | } |
2064 | 0 | if (SCTP_BUF_NEXT(dmbuf) == NULL) { |
2065 | 0 | l_len = SCTP_BUF_LEN(dmbuf); |
2066 | 0 | } else { |
2067 | | /* need to count up the size hopefully |
2068 | | * does not hit this to often :-0 |
2069 | | */ |
2070 | 0 | struct mbuf *lat; |
2071 | |
|
2072 | 0 | l_len = 0; |
2073 | 0 | for (lat = dmbuf; lat; lat = SCTP_BUF_NEXT(lat)) { |
2074 | 0 | l_len += SCTP_BUF_LEN(lat); |
2075 | 0 | } |
2076 | 0 | } |
2077 | 0 | if (l_len > the_len) { |
2078 | | /* Trim the end round bytes off too */ |
2079 | 0 | m_adj(dmbuf, -(l_len - the_len)); |
2080 | 0 | } |
2081 | 0 | } |
2082 | 0 | if (dmbuf == NULL) { |
2083 | 0 | SCTP_STAT_INCR(sctps_nomem); |
2084 | 0 | return (0); |
2085 | 0 | } |
2086 | | /* |
2087 | | * Now no matter what, we need a control, get one |
2088 | | * if we don't have one (we may have gotten it |
2089 | | * above when we found the message was fragmented |
2090 | | */ |
2091 | 0 | if (control == NULL) { |
2092 | 0 | sctp_alloc_a_readq(stcb, control); |
2093 | 0 | sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn, |
2094 | 0 | ppid, |
2095 | 0 | sid, |
2096 | 0 | chk_flags, |
2097 | 0 | NULL, fsn, mid); |
2098 | 0 | if (control == NULL) { |
2099 | 0 | SCTP_STAT_INCR(sctps_nomem); |
2100 | 0 | return (0); |
2101 | 0 | } |
2102 | 0 | if ((chk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) { |
2103 | 0 | struct mbuf *mm; |
2104 | |
|
2105 | 0 | control->data = dmbuf; |
2106 | 0 | control->tail_mbuf = NULL; |
2107 | 0 | for (mm = control->data; mm; mm = mm->m_next) { |
2108 | 0 | control->length += SCTP_BUF_LEN(mm); |
2109 | 0 | if (SCTP_BUF_NEXT(mm) == NULL) { |
2110 | 0 | control->tail_mbuf = mm; |
2111 | 0 | } |
2112 | 0 | } |
2113 | 0 | control->end_added = 1; |
2114 | 0 | control->last_frag_seen = 1; |
2115 | 0 | control->first_frag_seen = 1; |
2116 | 0 | control->fsn_included = fsn; |
2117 | 0 | control->top_fsn = fsn; |
2118 | 0 | } |
2119 | 0 | created_control = 1; |
2120 | 0 | } |
2121 | 0 | SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags: 0x%x ordered: %d MID: %u control: %p\n", |
2122 | 0 | chk_flags, ordered, mid, control); |
2123 | 0 | if ((chk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG && |
2124 | 0 | TAILQ_EMPTY(&asoc->resetHead) && |
2125 | 0 | ((ordered == 0) || |
2126 | 0 | (SCTP_MID_EQ(asoc->idata_supported, asoc->strmin[sid].last_mid_delivered + 1, mid) && |
2127 | 0 | TAILQ_EMPTY(&asoc->strmin[sid].inqueue)))) { |
2128 | | /* Candidate for express delivery */ |
2129 | | /* |
2130 | | * Its not fragmented, No PD-API is up, Nothing in the |
2131 | | * delivery queue, Its un-ordered OR ordered and the next to |
2132 | | * deliver AND nothing else is stuck on the stream queue, |
2133 | | * And there is room for it in the socket buffer. Lets just |
2134 | | * stuff it up the buffer.... |
2135 | | */ |
2136 | 0 | SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap); |
2137 | 0 | if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) { |
2138 | 0 | asoc->highest_tsn_inside_nr_map = tsn; |
2139 | 0 | } |
2140 | 0 | SCTPDBG(SCTP_DEBUG_XXX, "Injecting control: %p to be read (MID: %u)\n", |
2141 | 0 | control, mid); |
2142 | |
|
2143 | 0 | sctp_add_to_readq(stcb->sctp_ep, stcb, |
2144 | 0 | control, &stcb->sctp_socket->so_rcv, |
2145 | 0 | 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); |
2146 | |
|
2147 | 0 | if ((chk_flags & SCTP_DATA_UNORDERED) == 0) { |
2148 | | /* for ordered, bump what we delivered */ |
2149 | 0 | asoc->strmin[sid].last_mid_delivered++; |
2150 | 0 | } |
2151 | 0 | SCTP_STAT_INCR(sctps_recvexpress); |
2152 | 0 | if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) { |
2153 | 0 | sctp_log_strm_del_alt(stcb, tsn, mid, sid, |
2154 | 0 | SCTP_STR_LOG_FROM_EXPRS_DEL); |
2155 | 0 | } |
2156 | 0 | control = NULL; |
2157 | 0 | goto finish_express_del; |
2158 | 0 | } |
2159 | | |
2160 | | /* Now will we need a chunk too? */ |
2161 | 0 | if ((chk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) { |
2162 | 0 | sctp_alloc_a_chunk(stcb, chk); |
2163 | 0 | if (chk == NULL) { |
2164 | | /* No memory so we drop the chunk */ |
2165 | 0 | SCTP_STAT_INCR(sctps_nomem); |
2166 | 0 | if (last_chunk == 0) { |
2167 | | /* we copied it, free the copy */ |
2168 | 0 | sctp_m_freem(dmbuf); |
2169 | 0 | } |
2170 | 0 | return (0); |
2171 | 0 | } |
2172 | 0 | chk->rec.data.tsn = tsn; |
2173 | 0 | chk->no_fr_allowed = 0; |
2174 | 0 | chk->rec.data.fsn = fsn; |
2175 | 0 | chk->rec.data.mid = mid; |
2176 | 0 | chk->rec.data.sid = sid; |
2177 | 0 | chk->rec.data.ppid = ppid; |
2178 | 0 | chk->rec.data.context = stcb->asoc.context; |
2179 | 0 | chk->rec.data.doing_fast_retransmit = 0; |
2180 | 0 | chk->rec.data.rcv_flags = chk_flags; |
2181 | 0 | chk->asoc = asoc; |
2182 | 0 | chk->send_size = the_len; |
2183 | 0 | chk->whoTo = net; |
2184 | 0 | SCTPDBG(SCTP_DEBUG_XXX, "Building ck: %p for control: %p to be read (MID: %u)\n", |
2185 | 0 | chk, |
2186 | 0 | control, mid); |
2187 | 0 | atomic_add_int(&net->ref_count, 1); |
2188 | 0 | chk->data = dmbuf; |
2189 | 0 | } |
2190 | | /* Set the appropriate TSN mark */ |
2191 | 0 | if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) { |
2192 | 0 | SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap); |
2193 | 0 | if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) { |
2194 | 0 | asoc->highest_tsn_inside_nr_map = tsn; |
2195 | 0 | } |
2196 | 0 | } else { |
2197 | 0 | SCTP_SET_TSN_PRESENT(asoc->mapping_array, gap); |
2198 | 0 | if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map)) { |
2199 | 0 | asoc->highest_tsn_inside_map = tsn; |
2200 | 0 | } |
2201 | 0 | } |
2202 | | /* Now is it complete (i.e. not fragmented)? */ |
2203 | 0 | if ((chk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) { |
2204 | | /* |
2205 | | * Special check for when streams are resetting. We |
2206 | | * could be more smart about this and check the |
2207 | | * actual stream to see if it is not being reset.. |
2208 | | * that way we would not create a HOLB when amongst |
2209 | | * streams being reset and those not being reset. |
2210 | | * |
2211 | | */ |
2212 | 0 | if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) && |
2213 | 0 | SCTP_TSN_GT(tsn, liste->tsn)) { |
2214 | | /* |
2215 | | * yep its past where we need to reset... go |
2216 | | * ahead and queue it. |
2217 | | */ |
2218 | 0 | if (TAILQ_EMPTY(&asoc->pending_reply_queue)) { |
2219 | | /* first one on */ |
2220 | 0 | TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next); |
2221 | 0 | } else { |
2222 | 0 | struct sctp_queued_to_read *lcontrol, *nlcontrol; |
2223 | 0 | unsigned char inserted = 0; |
2224 | 0 | TAILQ_FOREACH_SAFE(lcontrol, &asoc->pending_reply_queue, next, nlcontrol) { |
2225 | 0 | if (SCTP_TSN_GT(control->sinfo_tsn, lcontrol->sinfo_tsn)) { |
2226 | 0 | continue; |
2227 | 0 | } else { |
2228 | | /* found it */ |
2229 | 0 | TAILQ_INSERT_BEFORE(lcontrol, control, next); |
2230 | 0 | inserted = 1; |
2231 | 0 | break; |
2232 | 0 | } |
2233 | 0 | } |
2234 | 0 | if (inserted == 0) { |
2235 | | /* |
2236 | | * must be put at end, use |
2237 | | * prevP (all setup from |
2238 | | * loop) to setup nextP. |
2239 | | */ |
2240 | 0 | TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next); |
2241 | 0 | } |
2242 | 0 | } |
2243 | 0 | goto finish_express_del; |
2244 | 0 | } |
2245 | 0 | if (chk_flags & SCTP_DATA_UNORDERED) { |
2246 | | /* queue directly into socket buffer */ |
2247 | 0 | SCTPDBG(SCTP_DEBUG_XXX, "Unordered data to be read control: %p MID: %u\n", |
2248 | 0 | control, mid); |
2249 | 0 | sctp_mark_non_revokable(asoc, control->sinfo_tsn); |
2250 | 0 | sctp_add_to_readq(stcb->sctp_ep, stcb, |
2251 | 0 | control, |
2252 | 0 | &stcb->sctp_socket->so_rcv, 1, |
2253 | 0 | SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); |
2254 | |
|
2255 | 0 | } else { |
2256 | 0 | SCTPDBG(SCTP_DEBUG_XXX, "Queue control: %p for reordering MID: %u\n", control, |
2257 | 0 | mid); |
2258 | 0 | sctp_queue_data_to_stream(stcb, asoc, control, abort_flag, &need_reasm_check); |
2259 | 0 | if (*abort_flag) { |
2260 | 0 | if (last_chunk) { |
2261 | 0 | *m = NULL; |
2262 | 0 | } |
2263 | 0 | return (0); |
2264 | 0 | } |
2265 | 0 | } |
2266 | 0 | goto finish_express_del; |
2267 | 0 | } |
2268 | | /* If we reach here its a reassembly */ |
2269 | 0 | need_reasm_check = 1; |
2270 | 0 | SCTPDBG(SCTP_DEBUG_XXX, |
2271 | 0 | "Queue data to stream for reasm control: %p MID: %u\n", |
2272 | 0 | control, mid); |
2273 | 0 | sctp_queue_data_for_reasm(stcb, asoc, control, chk, created_control, abort_flag, tsn); |
2274 | 0 | if (*abort_flag) { |
2275 | | /* |
2276 | | * the assoc is now gone and chk was put onto the |
2277 | | * reasm queue, which has all been freed. |
2278 | | */ |
2279 | 0 | if (last_chunk) { |
2280 | 0 | *m = NULL; |
2281 | 0 | } |
2282 | 0 | return (0); |
2283 | 0 | } |
2284 | 0 | finish_express_del: |
2285 | | /* Here we tidy up things */ |
2286 | 0 | if (tsn == (asoc->cumulative_tsn + 1)) { |
2287 | | /* Update cum-ack */ |
2288 | 0 | asoc->cumulative_tsn = tsn; |
2289 | 0 | } |
2290 | 0 | if (last_chunk) { |
2291 | 0 | *m = NULL; |
2292 | 0 | } |
2293 | 0 | if (ordered) { |
2294 | 0 | SCTP_STAT_INCR_COUNTER64(sctps_inorderchunks); |
2295 | 0 | } else { |
2296 | 0 | SCTP_STAT_INCR_COUNTER64(sctps_inunorderchunks); |
2297 | 0 | } |
2298 | 0 | SCTP_STAT_INCR(sctps_recvdata); |
2299 | | /* Set it present please */ |
2300 | 0 | if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) { |
2301 | 0 | sctp_log_strm_del_alt(stcb, tsn, mid, sid, SCTP_STR_LOG_FROM_MARK_TSN); |
2302 | 0 | } |
2303 | 0 | if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) { |
2304 | 0 | sctp_log_map(asoc->mapping_array_base_tsn, asoc->cumulative_tsn, |
2305 | 0 | asoc->highest_tsn_inside_map, SCTP_MAP_PREPARE_SLIDE); |
2306 | 0 | } |
2307 | 0 | if (need_reasm_check) { |
2308 | 0 | (void)sctp_deliver_reasm_check(stcb, asoc, &asoc->strmin[sid], SCTP_READ_LOCK_NOT_HELD); |
2309 | 0 | need_reasm_check = 0; |
2310 | 0 | } |
2311 | | /* check the special flag for stream resets */ |
2312 | 0 | if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) && |
2313 | 0 | SCTP_TSN_GE(asoc->cumulative_tsn, liste->tsn)) { |
2314 | | /* |
2315 | | * we have finished working through the backlogged TSN's now |
2316 | | * time to reset streams. 1: call reset function. 2: free |
2317 | | * pending_reply space 3: distribute any chunks in |
2318 | | * pending_reply_queue. |
2319 | | */ |
2320 | 0 | sctp_reset_in_stream(stcb, liste->number_entries, liste->list_of_streams); |
2321 | 0 | TAILQ_REMOVE(&asoc->resetHead, liste, next_resp); |
2322 | 0 | sctp_send_deferred_reset_response(stcb, liste, SCTP_STREAM_RESET_RESULT_PERFORMED); |
2323 | 0 | SCTP_FREE(liste, SCTP_M_STRESET); |
2324 | | /*sa_ignore FREED_MEMORY*/ |
2325 | 0 | liste = TAILQ_FIRST(&asoc->resetHead); |
2326 | 0 | if (TAILQ_EMPTY(&asoc->resetHead)) { |
2327 | | /* All can be removed */ |
2328 | 0 | TAILQ_FOREACH_SAFE(control, &asoc->pending_reply_queue, next, ncontrol) { |
2329 | 0 | TAILQ_REMOVE(&asoc->pending_reply_queue, control, next); |
2330 | 0 | strm = &asoc->strmin[control->sinfo_stream]; |
2331 | 0 | sctp_queue_data_to_stream(stcb, asoc, control, abort_flag, &need_reasm_check); |
2332 | 0 | if (*abort_flag) { |
2333 | 0 | return (0); |
2334 | 0 | } |
2335 | 0 | if (need_reasm_check) { |
2336 | 0 | (void)sctp_deliver_reasm_check(stcb, asoc, strm, SCTP_READ_LOCK_NOT_HELD); |
2337 | 0 | need_reasm_check = 0; |
2338 | 0 | } |
2339 | 0 | } |
2340 | 0 | } else { |
2341 | 0 | TAILQ_FOREACH_SAFE(control, &asoc->pending_reply_queue, next, ncontrol) { |
2342 | 0 | if (SCTP_TSN_GT(control->sinfo_tsn, liste->tsn)) { |
2343 | 0 | break; |
2344 | 0 | } |
2345 | | /* |
2346 | | * if control->sinfo_tsn is <= liste->tsn we can |
2347 | | * process it which is the NOT of |
2348 | | * control->sinfo_tsn > liste->tsn |
2349 | | */ |
2350 | 0 | TAILQ_REMOVE(&asoc->pending_reply_queue, control, next); |
2351 | 0 | strm = &asoc->strmin[control->sinfo_stream]; |
2352 | 0 | sctp_queue_data_to_stream(stcb, asoc, control, abort_flag, &need_reasm_check); |
2353 | 0 | if (*abort_flag) { |
2354 | 0 | return (0); |
2355 | 0 | } |
2356 | 0 | if (need_reasm_check) { |
2357 | 0 | (void)sctp_deliver_reasm_check(stcb, asoc, strm, SCTP_READ_LOCK_NOT_HELD); |
2358 | 0 | need_reasm_check = 0; |
2359 | 0 | } |
2360 | 0 | } |
2361 | 0 | } |
2362 | 0 | } |
2363 | 0 | return (1); |
2364 | 0 | } |
2365 | | |
2366 | | static const int8_t sctp_map_lookup_tab[256] = { |
2367 | | 0, 1, 0, 2, 0, 1, 0, 3, |
2368 | | 0, 1, 0, 2, 0, 1, 0, 4, |
2369 | | 0, 1, 0, 2, 0, 1, 0, 3, |
2370 | | 0, 1, 0, 2, 0, 1, 0, 5, |
2371 | | 0, 1, 0, 2, 0, 1, 0, 3, |
2372 | | 0, 1, 0, 2, 0, 1, 0, 4, |
2373 | | 0, 1, 0, 2, 0, 1, 0, 3, |
2374 | | 0, 1, 0, 2, 0, 1, 0, 6, |
2375 | | 0, 1, 0, 2, 0, 1, 0, 3, |
2376 | | 0, 1, 0, 2, 0, 1, 0, 4, |
2377 | | 0, 1, 0, 2, 0, 1, 0, 3, |
2378 | | 0, 1, 0, 2, 0, 1, 0, 5, |
2379 | | 0, 1, 0, 2, 0, 1, 0, 3, |
2380 | | 0, 1, 0, 2, 0, 1, 0, 4, |
2381 | | 0, 1, 0, 2, 0, 1, 0, 3, |
2382 | | 0, 1, 0, 2, 0, 1, 0, 7, |
2383 | | 0, 1, 0, 2, 0, 1, 0, 3, |
2384 | | 0, 1, 0, 2, 0, 1, 0, 4, |
2385 | | 0, 1, 0, 2, 0, 1, 0, 3, |
2386 | | 0, 1, 0, 2, 0, 1, 0, 5, |
2387 | | 0, 1, 0, 2, 0, 1, 0, 3, |
2388 | | 0, 1, 0, 2, 0, 1, 0, 4, |
2389 | | 0, 1, 0, 2, 0, 1, 0, 3, |
2390 | | 0, 1, 0, 2, 0, 1, 0, 6, |
2391 | | 0, 1, 0, 2, 0, 1, 0, 3, |
2392 | | 0, 1, 0, 2, 0, 1, 0, 4, |
2393 | | 0, 1, 0, 2, 0, 1, 0, 3, |
2394 | | 0, 1, 0, 2, 0, 1, 0, 5, |
2395 | | 0, 1, 0, 2, 0, 1, 0, 3, |
2396 | | 0, 1, 0, 2, 0, 1, 0, 4, |
2397 | | 0, 1, 0, 2, 0, 1, 0, 3, |
2398 | | 0, 1, 0, 2, 0, 1, 0, 8 |
2399 | | }; |
2400 | | |
2401 | | void |
2402 | | sctp_slide_mapping_arrays(struct sctp_tcb *stcb) |
2403 | 0 | { |
2404 | | /* |
2405 | | * Now we also need to check the mapping array in a couple of ways. |
2406 | | * 1) Did we move the cum-ack point? |
2407 | | * |
2408 | | * When you first glance at this you might think |
2409 | | * that all entries that make up the position |
2410 | | * of the cum-ack would be in the nr-mapping array |
2411 | | * only.. i.e. things up to the cum-ack are always |
2412 | | * deliverable. Thats true with one exception, when |
2413 | | * its a fragmented message we may not deliver the data |
2414 | | * until some threshold (or all of it) is in place. So |
2415 | | * we must OR the nr_mapping_array and mapping_array to |
2416 | | * get a true picture of the cum-ack. |
2417 | | */ |
2418 | 0 | struct sctp_association *asoc; |
2419 | 0 | int at; |
2420 | 0 | uint8_t val; |
2421 | 0 | int slide_from, slide_end, lgap, distance; |
2422 | 0 | uint32_t old_cumack, old_base, old_highest, highest_tsn; |
2423 | |
|
2424 | 0 | asoc = &stcb->asoc; |
2425 | |
|
2426 | 0 | old_cumack = asoc->cumulative_tsn; |
2427 | 0 | old_base = asoc->mapping_array_base_tsn; |
2428 | 0 | old_highest = asoc->highest_tsn_inside_map; |
2429 | | /* |
2430 | | * We could probably improve this a small bit by calculating the |
2431 | | * offset of the current cum-ack as the starting point. |
2432 | | */ |
2433 | 0 | at = 0; |
2434 | 0 | for (slide_from = 0; slide_from < stcb->asoc.mapping_array_size; slide_from++) { |
2435 | 0 | val = asoc->nr_mapping_array[slide_from] | asoc->mapping_array[slide_from]; |
2436 | 0 | if (val == 0xff) { |
2437 | 0 | at += 8; |
2438 | 0 | } else { |
2439 | | /* there is a 0 bit */ |
2440 | 0 | at += sctp_map_lookup_tab[val]; |
2441 | 0 | break; |
2442 | 0 | } |
2443 | 0 | } |
2444 | 0 | asoc->cumulative_tsn = asoc->mapping_array_base_tsn + (at-1); |
2445 | |
|
2446 | 0 | if (SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_map) && |
2447 | 0 | SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_nr_map)) { |
2448 | 0 | #ifdef INVARIANTS |
2449 | 0 | panic("huh, cumack 0x%x greater than high-tsn 0x%x in map", |
2450 | 0 | asoc->cumulative_tsn, asoc->highest_tsn_inside_map); |
2451 | | #else |
2452 | | SCTP_PRINTF("huh, cumack 0x%x greater than high-tsn 0x%x in map - should panic?\n", |
2453 | | asoc->cumulative_tsn, asoc->highest_tsn_inside_map); |
2454 | | sctp_print_mapping_array(asoc); |
2455 | | if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) { |
2456 | | sctp_log_map(0, 6, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT); |
2457 | | } |
2458 | | asoc->highest_tsn_inside_map = asoc->cumulative_tsn; |
2459 | | asoc->highest_tsn_inside_nr_map = asoc->cumulative_tsn; |
2460 | | #endif |
2461 | 0 | } |
2462 | 0 | if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) { |
2463 | 0 | highest_tsn = asoc->highest_tsn_inside_nr_map; |
2464 | 0 | } else { |
2465 | 0 | highest_tsn = asoc->highest_tsn_inside_map; |
2466 | 0 | } |
2467 | 0 | if ((asoc->cumulative_tsn == highest_tsn) && (at >= 8)) { |
2468 | | /* The complete array was completed by a single FR */ |
2469 | | /* highest becomes the cum-ack */ |
2470 | 0 | int clr; |
2471 | 0 | #ifdef INVARIANTS |
2472 | 0 | unsigned int i; |
2473 | 0 | #endif |
2474 | | |
2475 | | /* clear the array */ |
2476 | 0 | clr = ((at+7) >> 3); |
2477 | 0 | if (clr > asoc->mapping_array_size) { |
2478 | 0 | clr = asoc->mapping_array_size; |
2479 | 0 | } |
2480 | 0 | memset(asoc->mapping_array, 0, clr); |
2481 | 0 | memset(asoc->nr_mapping_array, 0, clr); |
2482 | 0 | #ifdef INVARIANTS |
2483 | 0 | for (i = 0; i < asoc->mapping_array_size; i++) { |
2484 | 0 | if ((asoc->mapping_array[i]) || (asoc->nr_mapping_array[i])) { |
2485 | 0 | SCTP_PRINTF("Error Mapping array's not clean at clear\n"); |
2486 | 0 | sctp_print_mapping_array(asoc); |
2487 | 0 | } |
2488 | 0 | } |
2489 | 0 | #endif |
2490 | 0 | asoc->mapping_array_base_tsn = asoc->cumulative_tsn + 1; |
2491 | 0 | asoc->highest_tsn_inside_nr_map = asoc->highest_tsn_inside_map = asoc->cumulative_tsn; |
2492 | 0 | } else if (at >= 8) { |
2493 | | /* we can slide the mapping array down */ |
2494 | | /* slide_from holds where we hit the first NON 0xff byte */ |
2495 | | |
2496 | | /* |
2497 | | * now calculate the ceiling of the move using our highest |
2498 | | * TSN value |
2499 | | */ |
2500 | 0 | SCTP_CALC_TSN_TO_GAP(lgap, highest_tsn, asoc->mapping_array_base_tsn); |
2501 | 0 | slide_end = (lgap >> 3); |
2502 | 0 | if (slide_end < slide_from) { |
2503 | 0 | sctp_print_mapping_array(asoc); |
2504 | 0 | #ifdef INVARIANTS |
2505 | 0 | panic("impossible slide"); |
2506 | | #else |
2507 | | SCTP_PRINTF("impossible slide lgap: %x slide_end: %x slide_from: %x? at: %d\n", |
2508 | | lgap, slide_end, slide_from, at); |
2509 | | return; |
2510 | | #endif |
2511 | 0 | } |
2512 | 0 | if (slide_end > asoc->mapping_array_size) { |
2513 | 0 | #ifdef INVARIANTS |
2514 | 0 | panic("would overrun buffer"); |
2515 | | #else |
2516 | | SCTP_PRINTF("Gak, would have overrun map end: %d slide_end: %d\n", |
2517 | | asoc->mapping_array_size, slide_end); |
2518 | | slide_end = asoc->mapping_array_size; |
2519 | | #endif |
2520 | 0 | } |
2521 | 0 | distance = (slide_end - slide_from) + 1; |
2522 | 0 | if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) { |
2523 | 0 | sctp_log_map(old_base, old_cumack, old_highest, |
2524 | 0 | SCTP_MAP_PREPARE_SLIDE); |
2525 | 0 | sctp_log_map((uint32_t) slide_from, (uint32_t) slide_end, |
2526 | 0 | (uint32_t) lgap, SCTP_MAP_SLIDE_FROM); |
2527 | 0 | } |
2528 | 0 | if (distance + slide_from > asoc->mapping_array_size || |
2529 | 0 | distance < 0) { |
2530 | | /* |
2531 | | * Here we do NOT slide forward the array so that |
2532 | | * hopefully when more data comes in to fill it up |
2533 | | * we will be able to slide it forward. Really I |
2534 | | * don't think this should happen :-0 |
2535 | | */ |
2536 | 0 | if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) { |
2537 | 0 | sctp_log_map((uint32_t) distance, (uint32_t) slide_from, |
2538 | 0 | (uint32_t) asoc->mapping_array_size, |
2539 | 0 | SCTP_MAP_SLIDE_NONE); |
2540 | 0 | } |
2541 | 0 | } else { |
2542 | 0 | int ii; |
2543 | |
|
2544 | 0 | for (ii = 0; ii < distance; ii++) { |
2545 | 0 | asoc->mapping_array[ii] = asoc->mapping_array[slide_from + ii]; |
2546 | 0 | asoc->nr_mapping_array[ii] = asoc->nr_mapping_array[slide_from + ii]; |
2547 | 0 | } |
2548 | 0 | for (ii = distance; ii < asoc->mapping_array_size; ii++) { |
2549 | 0 | asoc->mapping_array[ii] = 0; |
2550 | 0 | asoc->nr_mapping_array[ii] = 0; |
2551 | 0 | } |
2552 | 0 | if (asoc->highest_tsn_inside_map + 1 == asoc->mapping_array_base_tsn) { |
2553 | 0 | asoc->highest_tsn_inside_map += (slide_from << 3); |
2554 | 0 | } |
2555 | 0 | if (asoc->highest_tsn_inside_nr_map + 1 == asoc->mapping_array_base_tsn) { |
2556 | 0 | asoc->highest_tsn_inside_nr_map += (slide_from << 3); |
2557 | 0 | } |
2558 | 0 | asoc->mapping_array_base_tsn += (slide_from << 3); |
2559 | 0 | if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) { |
2560 | 0 | sctp_log_map(asoc->mapping_array_base_tsn, |
2561 | 0 | asoc->cumulative_tsn, asoc->highest_tsn_inside_map, |
2562 | 0 | SCTP_MAP_SLIDE_RESULT); |
2563 | 0 | } |
2564 | 0 | } |
2565 | 0 | } |
2566 | 0 | } |
2567 | | |
2568 | | void |
2569 | | sctp_sack_check(struct sctp_tcb *stcb, int was_a_gap) |
2570 | 0 | { |
2571 | 0 | struct sctp_association *asoc; |
2572 | 0 | uint32_t highest_tsn; |
2573 | 0 | int is_a_gap; |
2574 | |
|
2575 | 0 | sctp_slide_mapping_arrays(stcb); |
2576 | 0 | asoc = &stcb->asoc; |
2577 | 0 | if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) { |
2578 | 0 | highest_tsn = asoc->highest_tsn_inside_nr_map; |
2579 | 0 | } else { |
2580 | 0 | highest_tsn = asoc->highest_tsn_inside_map; |
2581 | 0 | } |
2582 | | /* Is there a gap now? */ |
2583 | 0 | is_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn); |
2584 | | |
2585 | | /* |
2586 | | * Now we need to see if we need to queue a sack or just start the |
2587 | | * timer (if allowed). |
2588 | | */ |
2589 | 0 | if (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_SENT) { |
2590 | | /* |
2591 | | * Ok special case, in SHUTDOWN-SENT case. here we |
2592 | | * maker sure SACK timer is off and instead send a |
2593 | | * SHUTDOWN and a SACK |
2594 | | */ |
2595 | 0 | if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) { |
2596 | 0 | sctp_timer_stop(SCTP_TIMER_TYPE_RECV, |
2597 | 0 | stcb->sctp_ep, stcb, NULL, |
2598 | 0 | SCTP_FROM_SCTP_INDATA + SCTP_LOC_19); |
2599 | 0 | } |
2600 | 0 | sctp_send_shutdown(stcb, |
2601 | 0 | ((stcb->asoc.alternate) ? stcb->asoc.alternate : stcb->asoc.primary_destination)); |
2602 | 0 | if (is_a_gap) { |
2603 | 0 | sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED); |
2604 | 0 | } |
2605 | 0 | } else { |
2606 | | /* |
2607 | | * CMT DAC algorithm: increase number of packets |
2608 | | * received since last ack |
2609 | | */ |
2610 | 0 | stcb->asoc.cmt_dac_pkts_rcvd++; |
2611 | |
|
2612 | 0 | if ((stcb->asoc.send_sack == 1) || /* We need to send a SACK */ |
2613 | 0 | ((was_a_gap) && (is_a_gap == 0)) || /* was a gap, but no |
2614 | | * longer is one */ |
2615 | 0 | (stcb->asoc.numduptsns) || /* we have dup's */ |
2616 | 0 | (is_a_gap) || /* is still a gap */ |
2617 | 0 | (stcb->asoc.delayed_ack == 0) || /* Delayed sack disabled */ |
2618 | 0 | (stcb->asoc.data_pkts_seen >= stcb->asoc.sack_freq)) { /* hit limit of pkts */ |
2619 | 0 | if ((stcb->asoc.sctp_cmt_on_off > 0) && |
2620 | 0 | (SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) && |
2621 | 0 | (stcb->asoc.send_sack == 0) && |
2622 | 0 | (stcb->asoc.numduptsns == 0) && |
2623 | 0 | (stcb->asoc.delayed_ack) && |
2624 | 0 | (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer))) { |
2625 | | /* |
2626 | | * CMT DAC algorithm: With CMT, |
2627 | | * delay acks even in the face of |
2628 | | * reordering. Therefore, if acks |
2629 | | * that do not have to be sent |
2630 | | * because of the above reasons, |
2631 | | * will be delayed. That is, acks |
2632 | | * that would have been sent due to |
2633 | | * gap reports will be delayed with |
2634 | | * DAC. Start the delayed ack timer. |
2635 | | */ |
2636 | 0 | sctp_timer_start(SCTP_TIMER_TYPE_RECV, |
2637 | 0 | stcb->sctp_ep, stcb, NULL); |
2638 | 0 | } else { |
2639 | | /* |
2640 | | * Ok we must build a SACK since the |
2641 | | * timer is pending, we got our |
2642 | | * first packet OR there are gaps or |
2643 | | * duplicates. |
2644 | | */ |
2645 | 0 | sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL, |
2646 | 0 | SCTP_FROM_SCTP_INDATA + SCTP_LOC_20); |
2647 | 0 | sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED); |
2648 | 0 | } |
2649 | 0 | } else { |
2650 | 0 | if (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) { |
2651 | 0 | sctp_timer_start(SCTP_TIMER_TYPE_RECV, |
2652 | 0 | stcb->sctp_ep, stcb, NULL); |
2653 | 0 | } |
2654 | 0 | } |
2655 | 0 | } |
2656 | 0 | } |
2657 | | |
2658 | | int |
2659 | | sctp_process_data(struct mbuf **mm, int iphlen, int *offset, int length, |
2660 | | struct sctp_inpcb *inp, struct sctp_tcb *stcb, |
2661 | | struct sctp_nets *net, uint32_t *high_tsn) |
2662 | 1 | { |
2663 | 1 | struct sctp_chunkhdr *ch, chunk_buf; |
2664 | 1 | struct sctp_association *asoc; |
2665 | 1 | int num_chunks = 0; /* number of control chunks processed */ |
2666 | 1 | int stop_proc = 0; |
2667 | 1 | int break_flag, last_chunk; |
2668 | 1 | int abort_flag = 0, was_a_gap; |
2669 | 1 | struct mbuf *m; |
2670 | 1 | uint32_t highest_tsn; |
2671 | 1 | uint16_t chk_length; |
2672 | | |
2673 | | /* set the rwnd */ |
2674 | 1 | sctp_set_rwnd(stcb, &stcb->asoc); |
2675 | | |
2676 | 1 | m = *mm; |
2677 | 1 | SCTP_TCB_LOCK_ASSERT(stcb); |
2678 | 1 | asoc = &stcb->asoc; |
2679 | 1 | if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) { |
2680 | 0 | highest_tsn = asoc->highest_tsn_inside_nr_map; |
2681 | 1 | } else { |
2682 | 1 | highest_tsn = asoc->highest_tsn_inside_map; |
2683 | 1 | } |
2684 | 1 | was_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn); |
2685 | | /* |
2686 | | * setup where we got the last DATA packet from for any SACK that |
2687 | | * may need to go out. Don't bump the net. This is done ONLY when a |
2688 | | * chunk is assigned. |
2689 | | */ |
2690 | 1 | asoc->last_data_chunk_from = net; |
2691 | | |
2692 | | /*- |
2693 | | * Now before we proceed we must figure out if this is a wasted |
2694 | | * cluster... i.e. it is a small packet sent in and yet the driver |
2695 | | * underneath allocated a full cluster for it. If so we must copy it |
2696 | | * to a smaller mbuf and free up the cluster mbuf. This will help |
2697 | | * with cluster starvation. |
2698 | | */ |
2699 | 1 | if (SCTP_BUF_LEN(m) < (long)MLEN && SCTP_BUF_NEXT(m) == NULL) { |
2700 | | /* we only handle mbufs that are singletons.. not chains */ |
2701 | 1 | m = sctp_get_mbuf_for_msg(SCTP_BUF_LEN(m), 0, M_NOWAIT, 1, MT_DATA); |
2702 | 1 | if (m) { |
2703 | | /* ok lets see if we can copy the data up */ |
2704 | 1 | caddr_t *from, *to; |
2705 | | /* get the pointers and copy */ |
2706 | 1 | to = mtod(m, caddr_t *); |
2707 | 1 | from = mtod((*mm), caddr_t *); |
2708 | 1 | memcpy(to, from, SCTP_BUF_LEN((*mm))); |
2709 | | /* copy the length and free up the old */ |
2710 | 1 | SCTP_BUF_LEN(m) = SCTP_BUF_LEN((*mm)); |
2711 | 1 | sctp_m_freem(*mm); |
2712 | | /* success, back copy */ |
2713 | 1 | *mm = m; |
2714 | 1 | } else { |
2715 | | /* We are in trouble in the mbuf world .. yikes */ |
2716 | 0 | m = *mm; |
2717 | 0 | } |
2718 | 1 | } |
2719 | | /* get pointer to the first chunk header */ |
2720 | 1 | ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset, |
2721 | 1 | sizeof(struct sctp_chunkhdr), |
2722 | 1 | (uint8_t *)&chunk_buf); |
2723 | 1 | if (ch == NULL) { |
2724 | 0 | return (1); |
2725 | 0 | } |
2726 | | /* |
2727 | | * process all DATA chunks... |
2728 | | */ |
2729 | 1 | *high_tsn = asoc->cumulative_tsn; |
2730 | 1 | break_flag = 0; |
2731 | 1 | asoc->data_pkts_seen++; |
2732 | 1 | while (stop_proc == 0) { |
2733 | | /* validate chunk length */ |
2734 | 1 | chk_length = ntohs(ch->chunk_length); |
2735 | 1 | if (length - *offset < chk_length) { |
2736 | | /* all done, mutulated chunk */ |
2737 | 0 | stop_proc = 1; |
2738 | 0 | continue; |
2739 | 0 | } |
2740 | 1 | if ((asoc->idata_supported == 1) && |
2741 | 1 | (ch->chunk_type == SCTP_DATA)) { |
2742 | 0 | struct mbuf *op_err; |
2743 | 0 | char msg[SCTP_DIAG_INFO_LEN]; |
2744 | |
|
2745 | 0 | SCTP_SNPRINTF(msg, sizeof(msg), "%s", "DATA chunk received when I-DATA was negotiated"); |
2746 | 0 | op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); |
2747 | 0 | stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_21; |
2748 | 0 | sctp_abort_an_association(inp, stcb, op_err, false, SCTP_SO_NOT_LOCKED); |
2749 | 0 | return (2); |
2750 | 0 | } |
2751 | 1 | if ((asoc->idata_supported == 0) && |
2752 | 0 | (ch->chunk_type == SCTP_IDATA)) { |
2753 | 0 | struct mbuf *op_err; |
2754 | 0 | char msg[SCTP_DIAG_INFO_LEN]; |
2755 | |
|
2756 | 0 | SCTP_SNPRINTF(msg, sizeof(msg), "%s", "I-DATA chunk received when DATA was negotiated"); |
2757 | 0 | op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); |
2758 | 0 | stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_22; |
2759 | 0 | sctp_abort_an_association(inp, stcb, op_err, false, SCTP_SO_NOT_LOCKED); |
2760 | 0 | return (2); |
2761 | 0 | } |
2762 | 1 | if ((ch->chunk_type == SCTP_DATA) || |
2763 | 1 | (ch->chunk_type == SCTP_IDATA)) { |
2764 | 1 | uint16_t clen; |
2765 | | |
2766 | 1 | if (ch->chunk_type == SCTP_DATA) { |
2767 | 0 | clen = sizeof(struct sctp_data_chunk); |
2768 | 1 | } else { |
2769 | 1 | clen = sizeof(struct sctp_idata_chunk); |
2770 | 1 | } |
2771 | 1 | if (chk_length < clen) { |
2772 | | /* |
2773 | | * Need to send an abort since we had a |
2774 | | * invalid data chunk. |
2775 | | */ |
2776 | 1 | struct mbuf *op_err; |
2777 | 1 | char msg[SCTP_DIAG_INFO_LEN]; |
2778 | | |
2779 | 1 | SCTP_SNPRINTF(msg, sizeof(msg), "%s chunk of length %u", |
2780 | 1 | ch->chunk_type == SCTP_DATA ? "DATA" : "I-DATA", |
2781 | 1 | chk_length); |
2782 | 1 | op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); |
2783 | 1 | stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_23; |
2784 | 1 | sctp_abort_an_association(inp, stcb, op_err, false, SCTP_SO_NOT_LOCKED); |
2785 | 1 | return (2); |
2786 | 1 | } |
2787 | | #ifdef SCTP_AUDITING_ENABLED |
2788 | | sctp_audit_log(0xB1, 0); |
2789 | | #endif |
2790 | 0 | if (SCTP_SIZE32(chk_length) == (length - *offset)) { |
2791 | 0 | last_chunk = 1; |
2792 | 0 | } else { |
2793 | 0 | last_chunk = 0; |
2794 | 0 | } |
2795 | 0 | if (sctp_process_a_data_chunk(stcb, asoc, mm, *offset, |
2796 | 0 | chk_length, net, high_tsn, &abort_flag, &break_flag, |
2797 | 0 | last_chunk, ch->chunk_type)) { |
2798 | 0 | num_chunks++; |
2799 | 0 | } |
2800 | 0 | if (abort_flag) |
2801 | 0 | return (2); |
2802 | | |
2803 | 0 | if (break_flag) { |
2804 | | /* |
2805 | | * Set because of out of rwnd space and no |
2806 | | * drop rep space left. |
2807 | | */ |
2808 | 0 | stop_proc = 1; |
2809 | 0 | continue; |
2810 | 0 | } |
2811 | 0 | } else { |
2812 | | /* not a data chunk in the data region */ |
2813 | 0 | switch (ch->chunk_type) { |
2814 | 0 | case SCTP_INITIATION: |
2815 | 0 | case SCTP_INITIATION_ACK: |
2816 | 0 | case SCTP_SELECTIVE_ACK: |
2817 | 0 | case SCTP_NR_SELECTIVE_ACK: |
2818 | 0 | case SCTP_HEARTBEAT_REQUEST: |
2819 | 0 | case SCTP_HEARTBEAT_ACK: |
2820 | 0 | case SCTP_ABORT_ASSOCIATION: |
2821 | 0 | case SCTP_SHUTDOWN: |
2822 | 0 | case SCTP_SHUTDOWN_ACK: |
2823 | 0 | case SCTP_OPERATION_ERROR: |
2824 | 0 | case SCTP_COOKIE_ECHO: |
2825 | 0 | case SCTP_COOKIE_ACK: |
2826 | 0 | case SCTP_ECN_ECHO: |
2827 | 0 | case SCTP_ECN_CWR: |
2828 | 0 | case SCTP_SHUTDOWN_COMPLETE: |
2829 | 0 | case SCTP_AUTHENTICATION: |
2830 | 0 | case SCTP_ASCONF_ACK: |
2831 | 0 | case SCTP_PACKET_DROPPED: |
2832 | 0 | case SCTP_STREAM_RESET: |
2833 | 0 | case SCTP_FORWARD_CUM_TSN: |
2834 | 0 | case SCTP_ASCONF: |
2835 | 0 | { |
2836 | | /* |
2837 | | * Now, what do we do with KNOWN chunks that |
2838 | | * are NOT in the right place? |
2839 | | * |
2840 | | * For now, I do nothing but ignore them. We |
2841 | | * may later want to add sysctl stuff to |
2842 | | * switch out and do either an ABORT() or |
2843 | | * possibly process them. |
2844 | | */ |
2845 | 0 | struct mbuf *op_err; |
2846 | 0 | char msg[SCTP_DIAG_INFO_LEN]; |
2847 | |
|
2848 | 0 | SCTP_SNPRINTF(msg, sizeof(msg), "DATA chunk followed by chunk of type %2.2x", |
2849 | 0 | ch->chunk_type); |
2850 | 0 | op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); |
2851 | 0 | sctp_abort_an_association(inp, stcb, op_err, false, SCTP_SO_NOT_LOCKED); |
2852 | 0 | return (2); |
2853 | 0 | } |
2854 | 0 | default: |
2855 | | /* |
2856 | | * Unknown chunk type: use bit rules after |
2857 | | * checking length |
2858 | | */ |
2859 | 0 | if (chk_length < sizeof(struct sctp_chunkhdr)) { |
2860 | | /* |
2861 | | * Need to send an abort since we had a |
2862 | | * invalid chunk. |
2863 | | */ |
2864 | 0 | struct mbuf *op_err; |
2865 | 0 | char msg[SCTP_DIAG_INFO_LEN]; |
2866 | |
|
2867 | 0 | SCTP_SNPRINTF(msg, sizeof(msg), "Chunk of length %u", chk_length); |
2868 | 0 | op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); |
2869 | 0 | stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_24; |
2870 | 0 | sctp_abort_an_association(inp, stcb, op_err, false, SCTP_SO_NOT_LOCKED); |
2871 | 0 | return (2); |
2872 | 0 | } |
2873 | 0 | if (ch->chunk_type & 0x40) { |
2874 | | /* Add a error report to the queue */ |
2875 | 0 | struct mbuf *op_err; |
2876 | 0 | struct sctp_gen_error_cause *cause; |
2877 | |
|
2878 | 0 | op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_gen_error_cause), |
2879 | 0 | 0, M_NOWAIT, 1, MT_DATA); |
2880 | 0 | if (op_err != NULL) { |
2881 | 0 | cause = mtod(op_err, struct sctp_gen_error_cause *); |
2882 | 0 | cause->code = htons(SCTP_CAUSE_UNRECOG_CHUNK); |
2883 | 0 | cause->length = htons((uint16_t)(chk_length + sizeof(struct sctp_gen_error_cause))); |
2884 | 0 | SCTP_BUF_LEN(op_err) = sizeof(struct sctp_gen_error_cause); |
2885 | 0 | SCTP_BUF_NEXT(op_err) = SCTP_M_COPYM(m, *offset, chk_length, M_NOWAIT); |
2886 | 0 | if (SCTP_BUF_NEXT(op_err) != NULL) { |
2887 | 0 | sctp_queue_op_err(stcb, op_err); |
2888 | 0 | } else { |
2889 | 0 | sctp_m_freem(op_err); |
2890 | 0 | } |
2891 | 0 | } |
2892 | 0 | } |
2893 | 0 | if ((ch->chunk_type & 0x80) == 0) { |
2894 | | /* discard the rest of this packet */ |
2895 | 0 | stop_proc = 1; |
2896 | 0 | } /* else skip this bad chunk and |
2897 | | * continue... */ |
2898 | 0 | break; |
2899 | 0 | } /* switch of chunk type */ |
2900 | 0 | } |
2901 | 0 | *offset += SCTP_SIZE32(chk_length); |
2902 | 0 | if ((*offset >= length) || stop_proc) { |
2903 | | /* no more data left in the mbuf chain */ |
2904 | 0 | stop_proc = 1; |
2905 | 0 | continue; |
2906 | 0 | } |
2907 | 0 | ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset, |
2908 | 0 | sizeof(struct sctp_chunkhdr), |
2909 | 0 | (uint8_t *)&chunk_buf); |
2910 | 0 | if (ch == NULL) { |
2911 | 0 | *offset = length; |
2912 | 0 | stop_proc = 1; |
2913 | 0 | continue; |
2914 | 0 | } |
2915 | 0 | } |
2916 | 0 | if (break_flag) { |
2917 | | /* |
2918 | | * we need to report rwnd overrun drops. |
2919 | | */ |
2920 | 0 | sctp_send_packet_dropped(stcb, net, *mm, length, iphlen, 0); |
2921 | 0 | } |
2922 | 0 | if (num_chunks) { |
2923 | | /* |
2924 | | * Did we get data, if so update the time for auto-close and |
2925 | | * give peer credit for being alive. |
2926 | | */ |
2927 | 0 | SCTP_STAT_INCR(sctps_recvpktwithdata); |
2928 | 0 | if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) { |
2929 | 0 | sctp_misc_ints(SCTP_THRESHOLD_CLEAR, |
2930 | 0 | stcb->asoc.overall_error_count, |
2931 | 0 | 0, |
2932 | 0 | SCTP_FROM_SCTP_INDATA, |
2933 | 0 | __LINE__); |
2934 | 0 | } |
2935 | 0 | stcb->asoc.overall_error_count = 0; |
2936 | 0 | (void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_last_rcvd); |
2937 | 0 | } |
2938 | | /* now service all of the reassm queue if needed */ |
2939 | 0 | if (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_SENT) { |
2940 | | /* Assure that we ack right away */ |
2941 | 0 | stcb->asoc.send_sack = 1; |
2942 | 0 | } |
2943 | | /* Start a sack timer or QUEUE a SACK for sending */ |
2944 | 0 | sctp_sack_check(stcb, was_a_gap); |
2945 | 0 | return (0); |
2946 | 1 | } |
2947 | | |
2948 | | static int |
2949 | | sctp_process_segment_range(struct sctp_tcb *stcb, struct sctp_tmit_chunk **p_tp1, uint32_t last_tsn, |
2950 | | uint16_t frag_strt, uint16_t frag_end, int nr_sacking, |
2951 | | int *num_frs, |
2952 | | uint32_t *biggest_newly_acked_tsn, |
2953 | | uint32_t *this_sack_lowest_newack, |
2954 | | int *rto_ok) |
2955 | 0 | { |
2956 | 0 | struct sctp_tmit_chunk *tp1; |
2957 | 0 | unsigned int theTSN; |
2958 | 0 | int j, wake_him = 0, circled = 0; |
2959 | | |
2960 | | /* Recover the tp1 we last saw */ |
2961 | 0 | tp1 = *p_tp1; |
2962 | 0 | if (tp1 == NULL) { |
2963 | 0 | tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue); |
2964 | 0 | } |
2965 | 0 | for (j = frag_strt; j <= frag_end; j++) { |
2966 | 0 | theTSN = j + last_tsn; |
2967 | 0 | while (tp1) { |
2968 | 0 | if (tp1->rec.data.doing_fast_retransmit) |
2969 | 0 | (*num_frs) += 1; |
2970 | | |
2971 | | /*- |
2972 | | * CMT: CUCv2 algorithm. For each TSN being |
2973 | | * processed from the sent queue, track the |
2974 | | * next expected pseudo-cumack, or |
2975 | | * rtx_pseudo_cumack, if required. Separate |
2976 | | * cumack trackers for first transmissions, |
2977 | | * and retransmissions. |
2978 | | */ |
2979 | 0 | if ((tp1->sent < SCTP_DATAGRAM_RESEND) && |
2980 | 0 | (tp1->whoTo->find_pseudo_cumack == 1) && |
2981 | 0 | (tp1->snd_count == 1)) { |
2982 | 0 | tp1->whoTo->pseudo_cumack = tp1->rec.data.tsn; |
2983 | 0 | tp1->whoTo->find_pseudo_cumack = 0; |
2984 | 0 | } |
2985 | 0 | if ((tp1->sent < SCTP_DATAGRAM_RESEND) && |
2986 | 0 | (tp1->whoTo->find_rtx_pseudo_cumack == 1) && |
2987 | 0 | (tp1->snd_count > 1)) { |
2988 | 0 | tp1->whoTo->rtx_pseudo_cumack = tp1->rec.data.tsn; |
2989 | 0 | tp1->whoTo->find_rtx_pseudo_cumack = 0; |
2990 | 0 | } |
2991 | 0 | if (tp1->rec.data.tsn == theTSN) { |
2992 | 0 | if (tp1->sent != SCTP_DATAGRAM_UNSENT) { |
2993 | | /*- |
2994 | | * must be held until |
2995 | | * cum-ack passes |
2996 | | */ |
2997 | 0 | if (tp1->sent < SCTP_DATAGRAM_RESEND) { |
2998 | | /*- |
2999 | | * If it is less than RESEND, it is |
3000 | | * now no-longer in flight. |
3001 | | * Higher values may already be set |
3002 | | * via previous Gap Ack Blocks... |
3003 | | * i.e. ACKED or RESEND. |
3004 | | */ |
3005 | 0 | if (SCTP_TSN_GT(tp1->rec.data.tsn, |
3006 | 0 | *biggest_newly_acked_tsn)) { |
3007 | 0 | *biggest_newly_acked_tsn = tp1->rec.data.tsn; |
3008 | 0 | } |
3009 | | /*- |
3010 | | * CMT: SFR algo (and HTNA) - set |
3011 | | * saw_newack to 1 for dest being |
3012 | | * newly acked. update |
3013 | | * this_sack_highest_newack if |
3014 | | * appropriate. |
3015 | | */ |
3016 | 0 | if (tp1->rec.data.chunk_was_revoked == 0) |
3017 | 0 | tp1->whoTo->saw_newack = 1; |
3018 | |
|
3019 | 0 | if (SCTP_TSN_GT(tp1->rec.data.tsn, |
3020 | 0 | tp1->whoTo->this_sack_highest_newack)) { |
3021 | 0 | tp1->whoTo->this_sack_highest_newack = |
3022 | 0 | tp1->rec.data.tsn; |
3023 | 0 | } |
3024 | | /*- |
3025 | | * CMT DAC algo: also update |
3026 | | * this_sack_lowest_newack |
3027 | | */ |
3028 | 0 | if (*this_sack_lowest_newack == 0) { |
3029 | 0 | if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) { |
3030 | 0 | sctp_log_sack(*this_sack_lowest_newack, |
3031 | 0 | last_tsn, |
3032 | 0 | tp1->rec.data.tsn, |
3033 | 0 | 0, |
3034 | 0 | 0, |
3035 | 0 | SCTP_LOG_TSN_ACKED); |
3036 | 0 | } |
3037 | 0 | *this_sack_lowest_newack = tp1->rec.data.tsn; |
3038 | 0 | } |
3039 | | /*- |
3040 | | * CMT: CUCv2 algorithm. If (rtx-)pseudo-cumack for corresp |
3041 | | * dest is being acked, then we have a new (rtx-)pseudo-cumack. Set |
3042 | | * new_(rtx_)pseudo_cumack to TRUE so that the cwnd for this dest can be |
3043 | | * updated. Also trigger search for the next expected (rtx-)pseudo-cumack. |
3044 | | * Separate pseudo_cumack trackers for first transmissions and |
3045 | | * retransmissions. |
3046 | | */ |
3047 | 0 | if (tp1->rec.data.tsn == tp1->whoTo->pseudo_cumack) { |
3048 | 0 | if (tp1->rec.data.chunk_was_revoked == 0) { |
3049 | 0 | tp1->whoTo->new_pseudo_cumack = 1; |
3050 | 0 | } |
3051 | 0 | tp1->whoTo->find_pseudo_cumack = 1; |
3052 | 0 | } |
3053 | 0 | if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) { |
3054 | 0 | sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.tsn, SCTP_CWND_LOG_FROM_SACK); |
3055 | 0 | } |
3056 | 0 | if (tp1->rec.data.tsn == tp1->whoTo->rtx_pseudo_cumack) { |
3057 | 0 | if (tp1->rec.data.chunk_was_revoked == 0) { |
3058 | 0 | tp1->whoTo->new_pseudo_cumack = 1; |
3059 | 0 | } |
3060 | 0 | tp1->whoTo->find_rtx_pseudo_cumack = 1; |
3061 | 0 | } |
3062 | 0 | if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) { |
3063 | 0 | sctp_log_sack(*biggest_newly_acked_tsn, |
3064 | 0 | last_tsn, |
3065 | 0 | tp1->rec.data.tsn, |
3066 | 0 | frag_strt, |
3067 | 0 | frag_end, |
3068 | 0 | SCTP_LOG_TSN_ACKED); |
3069 | 0 | } |
3070 | 0 | if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) { |
3071 | 0 | sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_GAP, |
3072 | 0 | tp1->whoTo->flight_size, |
3073 | 0 | tp1->book_size, |
3074 | 0 | (uint32_t)(uintptr_t)tp1->whoTo, |
3075 | 0 | tp1->rec.data.tsn); |
3076 | 0 | } |
3077 | 0 | sctp_flight_size_decrease(tp1); |
3078 | 0 | if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) { |
3079 | 0 | (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged)(tp1->whoTo, |
3080 | 0 | tp1); |
3081 | 0 | } |
3082 | 0 | sctp_total_flight_decrease(stcb, tp1); |
3083 | |
|
3084 | 0 | tp1->whoTo->net_ack += tp1->send_size; |
3085 | 0 | if (tp1->snd_count < 2) { |
3086 | | /*- |
3087 | | * True non-retransmitted chunk |
3088 | | */ |
3089 | 0 | tp1->whoTo->net_ack2 += tp1->send_size; |
3090 | | |
3091 | | /*- |
3092 | | * update RTO too ? |
3093 | | */ |
3094 | 0 | if (tp1->do_rtt) { |
3095 | 0 | if (*rto_ok && |
3096 | 0 | sctp_calculate_rto(stcb, |
3097 | 0 | &stcb->asoc, |
3098 | 0 | tp1->whoTo, |
3099 | 0 | &tp1->sent_rcv_time, |
3100 | 0 | SCTP_RTT_FROM_DATA)) { |
3101 | 0 | *rto_ok = 0; |
3102 | 0 | } |
3103 | 0 | if (tp1->whoTo->rto_needed == 0) { |
3104 | 0 | tp1->whoTo->rto_needed = 1; |
3105 | 0 | } |
3106 | 0 | tp1->do_rtt = 0; |
3107 | 0 | } |
3108 | 0 | } |
3109 | 0 | } |
3110 | 0 | if (tp1->sent <= SCTP_DATAGRAM_RESEND) { |
3111 | 0 | if (SCTP_TSN_GT(tp1->rec.data.tsn, |
3112 | 0 | stcb->asoc.this_sack_highest_gap)) { |
3113 | 0 | stcb->asoc.this_sack_highest_gap = |
3114 | 0 | tp1->rec.data.tsn; |
3115 | 0 | } |
3116 | 0 | if (tp1->sent == SCTP_DATAGRAM_RESEND) { |
3117 | 0 | sctp_ucount_decr(stcb->asoc.sent_queue_retran_cnt); |
3118 | | #ifdef SCTP_AUDITING_ENABLED |
3119 | | sctp_audit_log(0xB2, |
3120 | | (stcb->asoc.sent_queue_retran_cnt & 0x000000ff)); |
3121 | | #endif |
3122 | 0 | } |
3123 | 0 | } |
3124 | | /*- |
3125 | | * All chunks NOT UNSENT fall through here and are marked |
3126 | | * (leave PR-SCTP ones that are to skip alone though) |
3127 | | */ |
3128 | 0 | if ((tp1->sent != SCTP_FORWARD_TSN_SKIP) && |
3129 | 0 | (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) { |
3130 | 0 | tp1->sent = SCTP_DATAGRAM_MARKED; |
3131 | 0 | } |
3132 | 0 | if (tp1->rec.data.chunk_was_revoked) { |
3133 | | /* deflate the cwnd */ |
3134 | 0 | tp1->whoTo->cwnd -= tp1->book_size; |
3135 | 0 | tp1->rec.data.chunk_was_revoked = 0; |
3136 | 0 | } |
3137 | | /* NR Sack code here */ |
3138 | 0 | if (nr_sacking && |
3139 | 0 | (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) { |
3140 | 0 | if (stcb->asoc.strmout[tp1->rec.data.sid].chunks_on_queues > 0) { |
3141 | 0 | stcb->asoc.strmout[tp1->rec.data.sid].chunks_on_queues--; |
3142 | 0 | #ifdef INVARIANTS |
3143 | 0 | } else { |
3144 | 0 | panic("No chunks on the queues for sid %u.", tp1->rec.data.sid); |
3145 | 0 | #endif |
3146 | 0 | } |
3147 | 0 | if ((stcb->asoc.strmout[tp1->rec.data.sid].chunks_on_queues == 0) && |
3148 | 0 | (stcb->asoc.strmout[tp1->rec.data.sid].state == SCTP_STREAM_RESET_PENDING) && |
3149 | 0 | TAILQ_EMPTY(&stcb->asoc.strmout[tp1->rec.data.sid].outqueue)) { |
3150 | 0 | stcb->asoc.trigger_reset = 1; |
3151 | 0 | } |
3152 | 0 | tp1->sent = SCTP_DATAGRAM_NR_ACKED; |
3153 | 0 | if (tp1->data) { |
3154 | | /* sa_ignore NO_NULL_CHK */ |
3155 | 0 | sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1); |
3156 | 0 | sctp_m_freem(tp1->data); |
3157 | 0 | tp1->data = NULL; |
3158 | 0 | } |
3159 | 0 | wake_him++; |
3160 | 0 | } |
3161 | 0 | } |
3162 | 0 | break; |
3163 | 0 | } /* if (tp1->tsn == theTSN) */ |
3164 | 0 | if (SCTP_TSN_GT(tp1->rec.data.tsn, theTSN)) { |
3165 | 0 | break; |
3166 | 0 | } |
3167 | 0 | tp1 = TAILQ_NEXT(tp1, sctp_next); |
3168 | 0 | if ((tp1 == NULL) && (circled == 0)) { |
3169 | 0 | circled++; |
3170 | 0 | tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue); |
3171 | 0 | } |
3172 | 0 | } /* end while (tp1) */ |
3173 | 0 | if (tp1 == NULL) { |
3174 | 0 | circled = 0; |
3175 | 0 | tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue); |
3176 | 0 | } |
3177 | | /* In case the fragments were not in order we must reset */ |
3178 | 0 | } /* end for (j = fragStart */ |
3179 | 0 | *p_tp1 = tp1; |
3180 | 0 | return (wake_him); /* Return value only used for nr-sack */ |
3181 | 0 | } |
3182 | | |
3183 | | static int |
3184 | | sctp_handle_segments(struct mbuf *m, int *offset, struct sctp_tcb *stcb, struct sctp_association *asoc, |
3185 | | uint32_t last_tsn, uint32_t *biggest_tsn_acked, |
3186 | | uint32_t *biggest_newly_acked_tsn, uint32_t *this_sack_lowest_newack, |
3187 | | int num_seg, int num_nr_seg, int *rto_ok) |
3188 | 0 | { |
3189 | 0 | struct sctp_gap_ack_block *frag, block; |
3190 | 0 | struct sctp_tmit_chunk *tp1; |
3191 | 0 | int i; |
3192 | 0 | int num_frs = 0; |
3193 | 0 | int chunk_freed; |
3194 | 0 | int non_revocable; |
3195 | 0 | uint16_t frag_strt, frag_end, prev_frag_end; |
3196 | |
|
3197 | 0 | tp1 = TAILQ_FIRST(&asoc->sent_queue); |
3198 | 0 | prev_frag_end = 0; |
3199 | 0 | chunk_freed = 0; |
3200 | |
|
3201 | 0 | for (i = 0; i < (num_seg + num_nr_seg); i++) { |
3202 | 0 | if (i == num_seg) { |
3203 | 0 | prev_frag_end = 0; |
3204 | 0 | tp1 = TAILQ_FIRST(&asoc->sent_queue); |
3205 | 0 | } |
3206 | 0 | frag = (struct sctp_gap_ack_block *)sctp_m_getptr(m, *offset, |
3207 | 0 | sizeof(struct sctp_gap_ack_block), (uint8_t *) &block); |
3208 | 0 | *offset += sizeof(block); |
3209 | 0 | if (frag == NULL) { |
3210 | 0 | return (chunk_freed); |
3211 | 0 | } |
3212 | 0 | frag_strt = ntohs(frag->start); |
3213 | 0 | frag_end = ntohs(frag->end); |
3214 | |
|
3215 | 0 | if (frag_strt > frag_end) { |
3216 | | /* This gap report is malformed, skip it. */ |
3217 | 0 | continue; |
3218 | 0 | } |
3219 | 0 | if (frag_strt <= prev_frag_end) { |
3220 | | /* This gap report is not in order, so restart. */ |
3221 | 0 | tp1 = TAILQ_FIRST(&asoc->sent_queue); |
3222 | 0 | } |
3223 | 0 | if (SCTP_TSN_GT((last_tsn + frag_end), *biggest_tsn_acked)) { |
3224 | 0 | *biggest_tsn_acked = last_tsn + frag_end; |
3225 | 0 | } |
3226 | 0 | if (i < num_seg) { |
3227 | 0 | non_revocable = 0; |
3228 | 0 | } else { |
3229 | 0 | non_revocable = 1; |
3230 | 0 | } |
3231 | 0 | if (sctp_process_segment_range(stcb, &tp1, last_tsn, frag_strt, frag_end, |
3232 | 0 | non_revocable, &num_frs, biggest_newly_acked_tsn, |
3233 | 0 | this_sack_lowest_newack, rto_ok)) { |
3234 | 0 | chunk_freed = 1; |
3235 | 0 | } |
3236 | 0 | prev_frag_end = frag_end; |
3237 | 0 | } |
3238 | 0 | if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { |
3239 | 0 | if (num_frs) |
3240 | 0 | sctp_log_fr(*biggest_tsn_acked, |
3241 | 0 | *biggest_newly_acked_tsn, |
3242 | 0 | last_tsn, SCTP_FR_LOG_BIGGEST_TSNS); |
3243 | 0 | } |
3244 | 0 | return (chunk_freed); |
3245 | 0 | } |
3246 | | |
3247 | | static void |
3248 | | sctp_check_for_revoked(struct sctp_tcb *stcb, |
3249 | | struct sctp_association *asoc, uint32_t cumack, |
3250 | | uint32_t biggest_tsn_acked) |
3251 | 0 | { |
3252 | 0 | struct sctp_tmit_chunk *tp1; |
3253 | |
|
3254 | 0 | TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) { |
3255 | 0 | if (SCTP_TSN_GT(tp1->rec.data.tsn, cumack)) { |
3256 | | /* |
3257 | | * ok this guy is either ACK or MARKED. If it is |
3258 | | * ACKED it has been previously acked but not this |
3259 | | * time i.e. revoked. If it is MARKED it was ACK'ed |
3260 | | * again. |
3261 | | */ |
3262 | 0 | if (SCTP_TSN_GT(tp1->rec.data.tsn, biggest_tsn_acked)) { |
3263 | 0 | break; |
3264 | 0 | } |
3265 | 0 | if (tp1->sent == SCTP_DATAGRAM_ACKED) { |
3266 | | /* it has been revoked */ |
3267 | 0 | tp1->sent = SCTP_DATAGRAM_SENT; |
3268 | 0 | tp1->rec.data.chunk_was_revoked = 1; |
3269 | | /* We must add this stuff back in to |
3270 | | * assure timers and such get started. |
3271 | | */ |
3272 | 0 | if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) { |
3273 | 0 | sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE, |
3274 | 0 | tp1->whoTo->flight_size, |
3275 | 0 | tp1->book_size, |
3276 | 0 | (uint32_t)(uintptr_t)tp1->whoTo, |
3277 | 0 | tp1->rec.data.tsn); |
3278 | 0 | } |
3279 | 0 | sctp_flight_size_increase(tp1); |
3280 | 0 | sctp_total_flight_increase(stcb, tp1); |
3281 | | /* We inflate the cwnd to compensate for our |
3282 | | * artificial inflation of the flight_size. |
3283 | | */ |
3284 | 0 | tp1->whoTo->cwnd += tp1->book_size; |
3285 | 0 | if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) { |
3286 | 0 | sctp_log_sack(asoc->last_acked_seq, |
3287 | 0 | cumack, |
3288 | 0 | tp1->rec.data.tsn, |
3289 | 0 | 0, |
3290 | 0 | 0, |
3291 | 0 | SCTP_LOG_TSN_REVOKED); |
3292 | 0 | } |
3293 | 0 | } else if (tp1->sent == SCTP_DATAGRAM_MARKED) { |
3294 | | /* it has been re-acked in this SACK */ |
3295 | 0 | tp1->sent = SCTP_DATAGRAM_ACKED; |
3296 | 0 | } |
3297 | 0 | } |
3298 | 0 | if (tp1->sent == SCTP_DATAGRAM_UNSENT) |
3299 | 0 | break; |
3300 | 0 | } |
3301 | 0 | } |
3302 | | |
3303 | | static void |
3304 | | sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc, |
3305 | | uint32_t biggest_tsn_acked, uint32_t biggest_tsn_newly_acked, uint32_t this_sack_lowest_newack, int accum_moved) |
3306 | 0 | { |
3307 | 0 | struct sctp_tmit_chunk *tp1; |
3308 | 0 | int strike_flag = 0; |
3309 | 0 | struct timeval now; |
3310 | 0 | uint32_t sending_seq; |
3311 | 0 | struct sctp_nets *net; |
3312 | 0 | int num_dests_sacked = 0; |
3313 | | |
3314 | | /* |
3315 | | * select the sending_seq, this is either the next thing ready to be |
3316 | | * sent but not transmitted, OR, the next seq we assign. |
3317 | | */ |
3318 | 0 | tp1 = TAILQ_FIRST(&stcb->asoc.send_queue); |
3319 | 0 | if (tp1 == NULL) { |
3320 | 0 | sending_seq = asoc->sending_seq; |
3321 | 0 | } else { |
3322 | 0 | sending_seq = tp1->rec.data.tsn; |
3323 | 0 | } |
3324 | | |
3325 | | /* CMT DAC algo: finding out if SACK is a mixed SACK */ |
3326 | 0 | if ((asoc->sctp_cmt_on_off > 0) && |
3327 | 0 | SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) { |
3328 | 0 | TAILQ_FOREACH(net, &asoc->nets, sctp_next) { |
3329 | 0 | if (net->saw_newack) |
3330 | 0 | num_dests_sacked++; |
3331 | 0 | } |
3332 | 0 | } |
3333 | 0 | if (stcb->asoc.prsctp_supported) { |
3334 | 0 | (void)SCTP_GETTIME_TIMEVAL(&now); |
3335 | 0 | } |
3336 | 0 | TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) { |
3337 | 0 | strike_flag = 0; |
3338 | 0 | if (tp1->no_fr_allowed) { |
3339 | | /* this one had a timeout or something */ |
3340 | 0 | continue; |
3341 | 0 | } |
3342 | 0 | if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { |
3343 | 0 | if (tp1->sent < SCTP_DATAGRAM_RESEND) |
3344 | 0 | sctp_log_fr(biggest_tsn_newly_acked, |
3345 | 0 | tp1->rec.data.tsn, |
3346 | 0 | tp1->sent, |
3347 | 0 | SCTP_FR_LOG_CHECK_STRIKE); |
3348 | 0 | } |
3349 | 0 | if (SCTP_TSN_GT(tp1->rec.data.tsn, biggest_tsn_acked) || |
3350 | 0 | tp1->sent == SCTP_DATAGRAM_UNSENT) { |
3351 | | /* done */ |
3352 | 0 | break; |
3353 | 0 | } |
3354 | 0 | if (stcb->asoc.prsctp_supported) { |
3355 | 0 | if ((PR_SCTP_TTL_ENABLED(tp1->flags)) && tp1->sent < SCTP_DATAGRAM_ACKED) { |
3356 | | /* Is it expired? */ |
3357 | 0 | #if !(defined(__FreeBSD__) && !defined(__Userspace__)) |
3358 | 0 | if (timercmp(&now, &tp1->rec.data.timetodrop, >)) { |
3359 | | #else |
3360 | | if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) { |
3361 | | #endif |
3362 | | /* Yes so drop it */ |
3363 | 0 | if (tp1->data != NULL) { |
3364 | 0 | (void)sctp_release_pr_sctp_chunk(stcb, tp1, 1, |
3365 | 0 | SCTP_SO_NOT_LOCKED); |
3366 | 0 | } |
3367 | 0 | continue; |
3368 | 0 | } |
3369 | 0 | } |
3370 | 0 | } |
3371 | 0 | if (SCTP_TSN_GT(tp1->rec.data.tsn, asoc->this_sack_highest_gap) && |
3372 | 0 | !(accum_moved && asoc->fast_retran_loss_recovery)) { |
3373 | | /* we are beyond the tsn in the sack */ |
3374 | 0 | break; |
3375 | 0 | } |
3376 | 0 | if (tp1->sent >= SCTP_DATAGRAM_RESEND) { |
3377 | | /* either a RESEND, ACKED, or MARKED */ |
3378 | | /* skip */ |
3379 | 0 | if (tp1->sent == SCTP_FORWARD_TSN_SKIP) { |
3380 | | /* Continue strikin FWD-TSN chunks */ |
3381 | 0 | tp1->rec.data.fwd_tsn_cnt++; |
3382 | 0 | } |
3383 | 0 | continue; |
3384 | 0 | } |
3385 | | /* |
3386 | | * CMT : SFR algo (covers part of DAC and HTNA as well) |
3387 | | */ |
3388 | 0 | if (tp1->whoTo && tp1->whoTo->saw_newack == 0) { |
3389 | | /* |
3390 | | * No new acks were received for data sent to this |
3391 | | * dest. Therefore, according to the SFR algo for |
3392 | | * CMT, no data sent to this dest can be marked for |
3393 | | * FR using this SACK. |
3394 | | */ |
3395 | 0 | continue; |
3396 | 0 | } else if (tp1->whoTo && |
3397 | 0 | SCTP_TSN_GT(tp1->rec.data.tsn, |
3398 | 0 | tp1->whoTo->this_sack_highest_newack) && |
3399 | 0 | !(accum_moved && asoc->fast_retran_loss_recovery)) { |
3400 | | /* |
3401 | | * CMT: New acks were received for data sent to |
3402 | | * this dest. But no new acks were seen for data |
3403 | | * sent after tp1. Therefore, according to the SFR |
3404 | | * algo for CMT, tp1 cannot be marked for FR using |
3405 | | * this SACK. This step covers part of the DAC algo |
3406 | | * and the HTNA algo as well. |
3407 | | */ |
3408 | 0 | continue; |
3409 | 0 | } |
3410 | | /* |
3411 | | * Here we check to see if we were have already done a FR |
3412 | | * and if so we see if the biggest TSN we saw in the sack is |
3413 | | * smaller than the recovery point. If so we don't strike |
3414 | | * the tsn... otherwise we CAN strike the TSN. |
3415 | | */ |
3416 | | /* |
3417 | | * @@@ JRI: Check for CMT |
3418 | | * if (accum_moved && asoc->fast_retran_loss_recovery && (sctp_cmt_on_off == 0)) { |
3419 | | */ |
3420 | 0 | if (accum_moved && asoc->fast_retran_loss_recovery) { |
3421 | | /* |
3422 | | * Strike the TSN if in fast-recovery and cum-ack |
3423 | | * moved. |
3424 | | */ |
3425 | 0 | if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { |
3426 | 0 | sctp_log_fr(biggest_tsn_newly_acked, |
3427 | 0 | tp1->rec.data.tsn, |
3428 | 0 | tp1->sent, |
3429 | 0 | SCTP_FR_LOG_STRIKE_CHUNK); |
3430 | 0 | } |
3431 | 0 | if (tp1->sent < SCTP_DATAGRAM_RESEND) { |
3432 | 0 | tp1->sent++; |
3433 | 0 | } |
3434 | 0 | if ((asoc->sctp_cmt_on_off > 0) && |
3435 | 0 | SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) { |
3436 | | /* |
3437 | | * CMT DAC algorithm: If SACK flag is set to |
3438 | | * 0, then lowest_newack test will not pass |
3439 | | * because it would have been set to the |
3440 | | * cumack earlier. If not already to be |
3441 | | * rtx'd, If not a mixed sack and if tp1 is |
3442 | | * not between two sacked TSNs, then mark by |
3443 | | * one more. |
3444 | | * NOTE that we are marking by one additional time since the SACK DAC flag indicates that |
3445 | | * two packets have been received after this missing TSN. |
3446 | | */ |
3447 | 0 | if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) && |
3448 | 0 | SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.tsn)) { |
3449 | 0 | if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { |
3450 | 0 | sctp_log_fr(16 + num_dests_sacked, |
3451 | 0 | tp1->rec.data.tsn, |
3452 | 0 | tp1->sent, |
3453 | 0 | SCTP_FR_LOG_STRIKE_CHUNK); |
3454 | 0 | } |
3455 | 0 | tp1->sent++; |
3456 | 0 | } |
3457 | 0 | } |
3458 | 0 | } else if ((tp1->rec.data.doing_fast_retransmit) && |
3459 | 0 | (asoc->sctp_cmt_on_off == 0)) { |
3460 | | /* |
3461 | | * For those that have done a FR we must take |
3462 | | * special consideration if we strike. I.e the |
3463 | | * biggest_newly_acked must be higher than the |
3464 | | * sending_seq at the time we did the FR. |
3465 | | */ |
3466 | 0 | if ( |
3467 | | #ifdef SCTP_FR_TO_ALTERNATE |
3468 | | /* |
3469 | | * If FR's go to new networks, then we must only do |
3470 | | * this for singly homed asoc's. However if the FR's |
3471 | | * go to the same network (Armando's work) then its |
3472 | | * ok to FR multiple times. |
3473 | | */ |
3474 | | (asoc->numnets < 2) |
3475 | | #else |
3476 | 0 | (1) |
3477 | 0 | #endif |
3478 | 0 | ) { |
3479 | 0 | if (SCTP_TSN_GE(biggest_tsn_newly_acked, |
3480 | 0 | tp1->rec.data.fast_retran_tsn)) { |
3481 | | /* |
3482 | | * Strike the TSN, since this ack is |
3483 | | * beyond where things were when we |
3484 | | * did a FR. |
3485 | | */ |
3486 | 0 | if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { |
3487 | 0 | sctp_log_fr(biggest_tsn_newly_acked, |
3488 | 0 | tp1->rec.data.tsn, |
3489 | 0 | tp1->sent, |
3490 | 0 | SCTP_FR_LOG_STRIKE_CHUNK); |
3491 | 0 | } |
3492 | 0 | if (tp1->sent < SCTP_DATAGRAM_RESEND) { |
3493 | 0 | tp1->sent++; |
3494 | 0 | } |
3495 | 0 | strike_flag = 1; |
3496 | 0 | if ((asoc->sctp_cmt_on_off > 0) && |
3497 | 0 | SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) { |
3498 | | /* |
3499 | | * CMT DAC algorithm: If |
3500 | | * SACK flag is set to 0, |
3501 | | * then lowest_newack test |
3502 | | * will not pass because it |
3503 | | * would have been set to |
3504 | | * the cumack earlier. If |
3505 | | * not already to be rtx'd, |
3506 | | * If not a mixed sack and |
3507 | | * if tp1 is not between two |
3508 | | * sacked TSNs, then mark by |
3509 | | * one more. |
3510 | | * NOTE that we are marking by one additional time since the SACK DAC flag indicates that |
3511 | | * two packets have been received after this missing TSN. |
3512 | | */ |
3513 | 0 | if ((tp1->sent < SCTP_DATAGRAM_RESEND) && |
3514 | 0 | (num_dests_sacked == 1) && |
3515 | 0 | SCTP_TSN_GT(this_sack_lowest_newack, |
3516 | 0 | tp1->rec.data.tsn)) { |
3517 | 0 | if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { |
3518 | 0 | sctp_log_fr(32 + num_dests_sacked, |
3519 | 0 | tp1->rec.data.tsn, |
3520 | 0 | tp1->sent, |
3521 | 0 | SCTP_FR_LOG_STRIKE_CHUNK); |
3522 | 0 | } |
3523 | 0 | if (tp1->sent < SCTP_DATAGRAM_RESEND) { |
3524 | 0 | tp1->sent++; |
3525 | 0 | } |
3526 | 0 | } |
3527 | 0 | } |
3528 | 0 | } |
3529 | 0 | } |
3530 | | /* |
3531 | | * JRI: TODO: remove code for HTNA algo. CMT's |
3532 | | * SFR algo covers HTNA. |
3533 | | */ |
3534 | 0 | } else if (SCTP_TSN_GT(tp1->rec.data.tsn, |
3535 | 0 | biggest_tsn_newly_acked)) { |
3536 | | /* |
3537 | | * We don't strike these: This is the HTNA |
3538 | | * algorithm i.e. we don't strike If our TSN is |
3539 | | * larger than the Highest TSN Newly Acked. |
3540 | | */ |
3541 | 0 | ; |
3542 | 0 | } else { |
3543 | | /* Strike the TSN */ |
3544 | 0 | if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { |
3545 | 0 | sctp_log_fr(biggest_tsn_newly_acked, |
3546 | 0 | tp1->rec.data.tsn, |
3547 | 0 | tp1->sent, |
3548 | 0 | SCTP_FR_LOG_STRIKE_CHUNK); |
3549 | 0 | } |
3550 | 0 | if (tp1->sent < SCTP_DATAGRAM_RESEND) { |
3551 | 0 | tp1->sent++; |
3552 | 0 | } |
3553 | 0 | if ((asoc->sctp_cmt_on_off > 0) && |
3554 | 0 | SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) { |
3555 | | /* |
3556 | | * CMT DAC algorithm: If SACK flag is set to |
3557 | | * 0, then lowest_newack test will not pass |
3558 | | * because it would have been set to the |
3559 | | * cumack earlier. If not already to be |
3560 | | * rtx'd, If not a mixed sack and if tp1 is |
3561 | | * not between two sacked TSNs, then mark by |
3562 | | * one more. |
3563 | | * NOTE that we are marking by one additional time since the SACK DAC flag indicates that |
3564 | | * two packets have been received after this missing TSN. |
3565 | | */ |
3566 | 0 | if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) && |
3567 | 0 | SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.tsn)) { |
3568 | 0 | if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { |
3569 | 0 | sctp_log_fr(48 + num_dests_sacked, |
3570 | 0 | tp1->rec.data.tsn, |
3571 | 0 | tp1->sent, |
3572 | 0 | SCTP_FR_LOG_STRIKE_CHUNK); |
3573 | 0 | } |
3574 | 0 | tp1->sent++; |
3575 | 0 | } |
3576 | 0 | } |
3577 | 0 | } |
3578 | 0 | if (tp1->sent == SCTP_DATAGRAM_RESEND) { |
3579 | 0 | struct sctp_nets *alt; |
3580 | | |
3581 | | /* fix counts and things */ |
3582 | 0 | if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) { |
3583 | 0 | sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_RSND, |
3584 | 0 | (tp1->whoTo ? (tp1->whoTo->flight_size) : 0), |
3585 | 0 | tp1->book_size, |
3586 | 0 | (uint32_t)(uintptr_t)tp1->whoTo, |
3587 | 0 | tp1->rec.data.tsn); |
3588 | 0 | } |
3589 | 0 | if (tp1->whoTo) { |
3590 | 0 | tp1->whoTo->net_ack++; |
3591 | 0 | sctp_flight_size_decrease(tp1); |
3592 | 0 | if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) { |
3593 | 0 | (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged)(tp1->whoTo, |
3594 | 0 | tp1); |
3595 | 0 | } |
3596 | 0 | } |
3597 | |
|
3598 | 0 | if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) { |
3599 | 0 | sctp_log_rwnd(SCTP_INCREASE_PEER_RWND, |
3600 | 0 | asoc->peers_rwnd, tp1->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)); |
3601 | 0 | } |
3602 | | /* add back to the rwnd */ |
3603 | 0 | asoc->peers_rwnd += (tp1->send_size + SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)); |
3604 | | |
3605 | | /* remove from the total flight */ |
3606 | 0 | sctp_total_flight_decrease(stcb, tp1); |
3607 | |
|
3608 | 0 | if ((stcb->asoc.prsctp_supported) && |
3609 | 0 | (PR_SCTP_RTX_ENABLED(tp1->flags))) { |
3610 | | /* Has it been retransmitted tv_sec times? - we store the retran count there. */ |
3611 | 0 | if (tp1->snd_count > tp1->rec.data.timetodrop.tv_sec) { |
3612 | | /* Yes, so drop it */ |
3613 | 0 | if (tp1->data != NULL) { |
3614 | 0 | (void)sctp_release_pr_sctp_chunk(stcb, tp1, 1, |
3615 | 0 | SCTP_SO_NOT_LOCKED); |
3616 | 0 | } |
3617 | | /* Make sure to flag we had a FR */ |
3618 | 0 | if (tp1->whoTo != NULL) { |
3619 | 0 | tp1->whoTo->net_ack++; |
3620 | 0 | } |
3621 | 0 | continue; |
3622 | 0 | } |
3623 | 0 | } |
3624 | | /* SCTP_PRINTF("OK, we are now ready to FR this guy\n"); */ |
3625 | 0 | if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { |
3626 | 0 | sctp_log_fr(tp1->rec.data.tsn, tp1->snd_count, |
3627 | 0 | 0, SCTP_FR_MARKED); |
3628 | 0 | } |
3629 | 0 | if (strike_flag) { |
3630 | | /* This is a subsequent FR */ |
3631 | 0 | SCTP_STAT_INCR(sctps_sendmultfastretrans); |
3632 | 0 | } |
3633 | 0 | sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); |
3634 | 0 | if (asoc->sctp_cmt_on_off > 0) { |
3635 | | /* |
3636 | | * CMT: Using RTX_SSTHRESH policy for CMT. |
3637 | | * If CMT is being used, then pick dest with |
3638 | | * largest ssthresh for any retransmission. |
3639 | | */ |
3640 | 0 | tp1->no_fr_allowed = 1; |
3641 | 0 | alt = tp1->whoTo; |
3642 | | /*sa_ignore NO_NULL_CHK*/ |
3643 | 0 | if (asoc->sctp_cmt_pf > 0) { |
3644 | | /* JRS 5/18/07 - If CMT PF is on, use the PF version of find_alt_net() */ |
3645 | 0 | alt = sctp_find_alternate_net(stcb, alt, 2); |
3646 | 0 | } else { |
3647 | | /* JRS 5/18/07 - If only CMT is on, use the CMT version of find_alt_net() */ |
3648 | | /*sa_ignore NO_NULL_CHK*/ |
3649 | 0 | alt = sctp_find_alternate_net(stcb, alt, 1); |
3650 | 0 | } |
3651 | 0 | if (alt == NULL) { |
3652 | 0 | alt = tp1->whoTo; |
3653 | 0 | } |
3654 | | /* |
3655 | | * CUCv2: If a different dest is picked for |
3656 | | * the retransmission, then new |
3657 | | * (rtx-)pseudo_cumack needs to be tracked |
3658 | | * for orig dest. Let CUCv2 track new (rtx-) |
3659 | | * pseudo-cumack always. |
3660 | | */ |
3661 | 0 | if (tp1->whoTo) { |
3662 | 0 | tp1->whoTo->find_pseudo_cumack = 1; |
3663 | 0 | tp1->whoTo->find_rtx_pseudo_cumack = 1; |
3664 | 0 | } |
3665 | 0 | } else {/* CMT is OFF */ |
3666 | | #ifdef SCTP_FR_TO_ALTERNATE |
3667 | | /* Can we find an alternate? */ |
3668 | | alt = sctp_find_alternate_net(stcb, tp1->whoTo, 0); |
3669 | | #else |
3670 | | /* |
3671 | | * default behavior is to NOT retransmit |
3672 | | * FR's to an alternate. Armando Caro's |
3673 | | * paper details why. |
3674 | | */ |
3675 | 0 | alt = tp1->whoTo; |
3676 | 0 | #endif |
3677 | 0 | } |
3678 | |
|
3679 | 0 | tp1->rec.data.doing_fast_retransmit = 1; |
3680 | | /* mark the sending seq for possible subsequent FR's */ |
3681 | | /* |
3682 | | * SCTP_PRINTF("Marking TSN for FR new value %x\n", |
3683 | | * (uint32_t)tpi->rec.data.tsn); |
3684 | | */ |
3685 | 0 | if (TAILQ_EMPTY(&asoc->send_queue)) { |
3686 | | /* |
3687 | | * If the queue of send is empty then its |
3688 | | * the next sequence number that will be |
3689 | | * assigned so we subtract one from this to |
3690 | | * get the one we last sent. |
3691 | | */ |
3692 | 0 | tp1->rec.data.fast_retran_tsn = sending_seq; |
3693 | 0 | } else { |
3694 | | /* |
3695 | | * If there are chunks on the send queue |
3696 | | * (unsent data that has made it from the |
3697 | | * stream queues but not out the door, we |
3698 | | * take the first one (which will have the |
3699 | | * lowest TSN) and subtract one to get the |
3700 | | * one we last sent. |
3701 | | */ |
3702 | 0 | struct sctp_tmit_chunk *ttt; |
3703 | |
|
3704 | 0 | ttt = TAILQ_FIRST(&asoc->send_queue); |
3705 | 0 | tp1->rec.data.fast_retran_tsn = |
3706 | 0 | ttt->rec.data.tsn; |
3707 | 0 | } |
3708 | |
|
3709 | 0 | if (tp1->do_rtt) { |
3710 | | /* |
3711 | | * this guy had a RTO calculation pending on |
3712 | | * it, cancel it |
3713 | | */ |
3714 | 0 | if ((tp1->whoTo != NULL) && |
3715 | 0 | (tp1->whoTo->rto_needed == 0)) { |
3716 | 0 | tp1->whoTo->rto_needed = 1; |
3717 | 0 | } |
3718 | 0 | tp1->do_rtt = 0; |
3719 | 0 | } |
3720 | 0 | if (alt != tp1->whoTo) { |
3721 | | /* yes, there is an alternate. */ |
3722 | 0 | sctp_free_remote_addr(tp1->whoTo); |
3723 | | /*sa_ignore FREED_MEMORY*/ |
3724 | 0 | tp1->whoTo = alt; |
3725 | 0 | atomic_add_int(&alt->ref_count, 1); |
3726 | 0 | } |
3727 | 0 | } |
3728 | 0 | } |
3729 | 0 | } |
3730 | | |
3731 | | struct sctp_tmit_chunk * |
3732 | | sctp_try_advance_peer_ack_point(struct sctp_tcb *stcb, |
3733 | | struct sctp_association *asoc) |
3734 | 0 | { |
3735 | 0 | struct sctp_tmit_chunk *tp1, *tp2, *a_adv = NULL; |
3736 | 0 | struct timeval now; |
3737 | 0 | int now_filled = 0; |
3738 | |
|
3739 | 0 | if (asoc->prsctp_supported == 0) { |
3740 | 0 | return (NULL); |
3741 | 0 | } |
3742 | 0 | TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) { |
3743 | 0 | if (tp1->sent != SCTP_FORWARD_TSN_SKIP && |
3744 | 0 | tp1->sent != SCTP_DATAGRAM_RESEND && |
3745 | 0 | tp1->sent != SCTP_DATAGRAM_NR_ACKED) { |
3746 | | /* no chance to advance, out of here */ |
3747 | 0 | break; |
3748 | 0 | } |
3749 | 0 | if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) { |
3750 | 0 | if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) || |
3751 | 0 | (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) { |
3752 | 0 | sctp_misc_ints(SCTP_FWD_TSN_CHECK, |
3753 | 0 | asoc->advanced_peer_ack_point, |
3754 | 0 | tp1->rec.data.tsn, 0, 0); |
3755 | 0 | } |
3756 | 0 | } |
3757 | 0 | if (!PR_SCTP_ENABLED(tp1->flags)) { |
3758 | | /* |
3759 | | * We can't fwd-tsn past any that are reliable aka |
3760 | | * retransmitted until the asoc fails. |
3761 | | */ |
3762 | 0 | break; |
3763 | 0 | } |
3764 | 0 | if (!now_filled) { |
3765 | 0 | (void)SCTP_GETTIME_TIMEVAL(&now); |
3766 | 0 | now_filled = 1; |
3767 | 0 | } |
3768 | | /* |
3769 | | * now we got a chunk which is marked for another |
3770 | | * retransmission to a PR-stream but has run out its chances |
3771 | | * already maybe OR has been marked to skip now. Can we skip |
3772 | | * it if its a resend? |
3773 | | */ |
3774 | 0 | if (tp1->sent == SCTP_DATAGRAM_RESEND && |
3775 | 0 | (PR_SCTP_TTL_ENABLED(tp1->flags))) { |
3776 | | /* |
3777 | | * Now is this one marked for resend and its time is |
3778 | | * now up? |
3779 | | */ |
3780 | 0 | #if !(defined(__FreeBSD__) && !defined(__Userspace__)) |
3781 | 0 | if (timercmp(&now, &tp1->rec.data.timetodrop, >)) { |
3782 | | #else |
3783 | | if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) { |
3784 | | #endif |
3785 | | /* Yes so drop it */ |
3786 | 0 | if (tp1->data) { |
3787 | 0 | (void)sctp_release_pr_sctp_chunk(stcb, tp1, |
3788 | 0 | 1, SCTP_SO_NOT_LOCKED); |
3789 | 0 | } |
3790 | 0 | } else { |
3791 | | /* |
3792 | | * No, we are done when hit one for resend |
3793 | | * whos time as not expired. |
3794 | | */ |
3795 | 0 | break; |
3796 | 0 | } |
3797 | 0 | } |
3798 | | /* |
3799 | | * Ok now if this chunk is marked to drop it we can clean up |
3800 | | * the chunk, advance our peer ack point and we can check |
3801 | | * the next chunk. |
3802 | | */ |
3803 | 0 | if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) || |
3804 | 0 | (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) { |
3805 | | /* advance PeerAckPoint goes forward */ |
3806 | 0 | if (SCTP_TSN_GT(tp1->rec.data.tsn, asoc->advanced_peer_ack_point)) { |
3807 | 0 | asoc->advanced_peer_ack_point = tp1->rec.data.tsn; |
3808 | 0 | a_adv = tp1; |
3809 | 0 | } else if (tp1->rec.data.tsn == asoc->advanced_peer_ack_point) { |
3810 | | /* No update but we do save the chk */ |
3811 | 0 | a_adv = tp1; |
3812 | 0 | } |
3813 | 0 | } else { |
3814 | | /* |
3815 | | * If it is still in RESEND we can advance no |
3816 | | * further |
3817 | | */ |
3818 | 0 | break; |
3819 | 0 | } |
3820 | 0 | } |
3821 | 0 | return (a_adv); |
3822 | 0 | } |
3823 | | |
3824 | | static int |
3825 | | sctp_fs_audit(struct sctp_association *asoc) |
3826 | 0 | { |
3827 | 0 | struct sctp_tmit_chunk *chk; |
3828 | 0 | int inflight = 0, resend = 0, inbetween = 0, acked = 0, above = 0; |
3829 | 0 | int ret; |
3830 | | #ifndef INVARIANTS |
3831 | | int entry_flight, entry_cnt; |
3832 | | #endif |
3833 | |
|
3834 | 0 | ret = 0; |
3835 | | #ifndef INVARIANTS |
3836 | | entry_flight = asoc->total_flight; |
3837 | | entry_cnt = asoc->total_flight_count; |
3838 | | #endif |
3839 | 0 | if (asoc->pr_sctp_cnt >= asoc->sent_queue_cnt) |
3840 | 0 | return (0); |
3841 | | |
3842 | 0 | TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) { |
3843 | 0 | if (chk->sent < SCTP_DATAGRAM_RESEND) { |
3844 | 0 | SCTP_PRINTF("Chk TSN: %u size: %d inflight cnt: %d\n", |
3845 | 0 | chk->rec.data.tsn, |
3846 | 0 | chk->send_size, |
3847 | 0 | chk->snd_count); |
3848 | 0 | inflight++; |
3849 | 0 | } else if (chk->sent == SCTP_DATAGRAM_RESEND) { |
3850 | 0 | resend++; |
3851 | 0 | } else if (chk->sent < SCTP_DATAGRAM_ACKED) { |
3852 | 0 | inbetween++; |
3853 | 0 | } else if (chk->sent > SCTP_DATAGRAM_ACKED) { |
3854 | 0 | above++; |
3855 | 0 | } else { |
3856 | 0 | acked++; |
3857 | 0 | } |
3858 | 0 | } |
3859 | |
|
3860 | 0 | if ((inflight > 0) || (inbetween > 0)) { |
3861 | 0 | #ifdef INVARIANTS |
3862 | 0 | panic("Flight size-express incorrect F: %d I: %d R: %d Ab: %d ACK: %d", |
3863 | 0 | inflight, inbetween, resend, above, acked); |
3864 | | #else |
3865 | | SCTP_PRINTF("asoc->total_flight: %d cnt: %d\n", |
3866 | | entry_flight, entry_cnt); |
3867 | | SCTP_PRINTF("Flight size-express incorrect F: %d I: %d R: %d Ab: %d ACK: %d\n", |
3868 | | inflight, inbetween, resend, above, acked); |
3869 | | ret = 1; |
3870 | | #endif |
3871 | 0 | } |
3872 | 0 | return (ret); |
3873 | 0 | } |
3874 | | |
3875 | | static void |
3876 | | sctp_window_probe_recovery(struct sctp_tcb *stcb, |
3877 | | struct sctp_association *asoc, |
3878 | | struct sctp_tmit_chunk *tp1) |
3879 | 0 | { |
3880 | 0 | tp1->window_probe = 0; |
3881 | 0 | if ((tp1->sent >= SCTP_DATAGRAM_ACKED) || (tp1->data == NULL)) { |
3882 | | /* TSN's skipped we do NOT move back. */ |
3883 | 0 | sctp_misc_ints(SCTP_FLIGHT_LOG_DWN_WP_FWD, |
3884 | 0 | tp1->whoTo ? tp1->whoTo->flight_size : 0, |
3885 | 0 | tp1->book_size, |
3886 | 0 | (uint32_t)(uintptr_t)tp1->whoTo, |
3887 | 0 | tp1->rec.data.tsn); |
3888 | 0 | return; |
3889 | 0 | } |
3890 | | /* First setup this by shrinking flight */ |
3891 | 0 | if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) { |
3892 | 0 | (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged)(tp1->whoTo, |
3893 | 0 | tp1); |
3894 | 0 | } |
3895 | 0 | sctp_flight_size_decrease(tp1); |
3896 | 0 | sctp_total_flight_decrease(stcb, tp1); |
3897 | | /* Now mark for resend */ |
3898 | 0 | tp1->sent = SCTP_DATAGRAM_RESEND; |
3899 | 0 | sctp_ucount_incr(asoc->sent_queue_retran_cnt); |
3900 | |
|
3901 | 0 | if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) { |
3902 | 0 | sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_WP, |
3903 | 0 | tp1->whoTo->flight_size, |
3904 | 0 | tp1->book_size, |
3905 | 0 | (uint32_t)(uintptr_t)tp1->whoTo, |
3906 | 0 | tp1->rec.data.tsn); |
3907 | 0 | } |
3908 | 0 | } |
3909 | | |
3910 | | void |
3911 | | sctp_express_handle_sack(struct sctp_tcb *stcb, uint32_t cumack, |
3912 | | uint32_t rwnd, int *abort_now, int ecne_seen) |
3913 | 0 | { |
3914 | 0 | struct sctp_nets *net; |
3915 | 0 | struct sctp_association *asoc; |
3916 | 0 | struct sctp_tmit_chunk *tp1, *tp2; |
3917 | 0 | uint32_t old_rwnd; |
3918 | 0 | int win_probe_recovery = 0; |
3919 | 0 | int win_probe_recovered = 0; |
3920 | 0 | int j, done_once = 0; |
3921 | 0 | int rto_ok = 1; |
3922 | 0 | uint32_t send_s; |
3923 | |
|
3924 | 0 | if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) { |
3925 | 0 | sctp_misc_ints(SCTP_SACK_LOG_EXPRESS, cumack, |
3926 | 0 | rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd); |
3927 | 0 | } |
3928 | 0 | SCTP_TCB_LOCK_ASSERT(stcb); |
3929 | | #ifdef SCTP_ASOCLOG_OF_TSNS |
3930 | | stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cumack; |
3931 | | stcb->asoc.cumack_log_at++; |
3932 | | if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) { |
3933 | | stcb->asoc.cumack_log_at = 0; |
3934 | | } |
3935 | | #endif |
3936 | 0 | asoc = &stcb->asoc; |
3937 | 0 | old_rwnd = asoc->peers_rwnd; |
3938 | 0 | if (SCTP_TSN_GT(asoc->last_acked_seq, cumack)) { |
3939 | | /* old ack */ |
3940 | 0 | return; |
3941 | 0 | } else if (asoc->last_acked_seq == cumack) { |
3942 | | /* Window update sack */ |
3943 | 0 | asoc->peers_rwnd = sctp_sbspace_sub(rwnd, |
3944 | 0 | (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)))); |
3945 | 0 | if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) { |
3946 | | /* SWS sender side engages */ |
3947 | 0 | asoc->peers_rwnd = 0; |
3948 | 0 | } |
3949 | 0 | if (asoc->peers_rwnd > old_rwnd) { |
3950 | 0 | goto again; |
3951 | 0 | } |
3952 | 0 | return; |
3953 | 0 | } |
3954 | | |
3955 | | /* First setup for CC stuff */ |
3956 | 0 | TAILQ_FOREACH(net, &asoc->nets, sctp_next) { |
3957 | 0 | if (SCTP_TSN_GT(cumack, net->cwr_window_tsn)) { |
3958 | | /* Drag along the window_tsn for cwr's */ |
3959 | 0 | net->cwr_window_tsn = cumack; |
3960 | 0 | } |
3961 | 0 | net->prev_cwnd = net->cwnd; |
3962 | 0 | net->net_ack = 0; |
3963 | 0 | net->net_ack2 = 0; |
3964 | | |
3965 | | /* |
3966 | | * CMT: Reset CUC and Fast recovery algo variables before |
3967 | | * SACK processing |
3968 | | */ |
3969 | 0 | net->new_pseudo_cumack = 0; |
3970 | 0 | net->will_exit_fast_recovery = 0; |
3971 | 0 | if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) { |
3972 | 0 | (*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack)(stcb, net); |
3973 | 0 | } |
3974 | 0 | } |
3975 | 0 | if (!TAILQ_EMPTY(&asoc->sent_queue)) { |
3976 | 0 | tp1 = TAILQ_LAST(&asoc->sent_queue, |
3977 | 0 | sctpchunk_listhead); |
3978 | 0 | send_s = tp1->rec.data.tsn + 1; |
3979 | 0 | } else { |
3980 | 0 | send_s = asoc->sending_seq; |
3981 | 0 | } |
3982 | 0 | if (SCTP_TSN_GE(cumack, send_s)) { |
3983 | 0 | struct mbuf *op_err; |
3984 | 0 | char msg[SCTP_DIAG_INFO_LEN]; |
3985 | |
|
3986 | 0 | *abort_now = 1; |
3987 | | /* XXX */ |
3988 | 0 | SCTP_SNPRINTF(msg, sizeof(msg), |
3989 | 0 | "Cum ack %8.8x greater or equal than TSN %8.8x", |
3990 | 0 | cumack, send_s); |
3991 | 0 | op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); |
3992 | 0 | stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25; |
3993 | 0 | sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, false, SCTP_SO_NOT_LOCKED); |
3994 | 0 | return; |
3995 | 0 | } |
3996 | 0 | asoc->this_sack_highest_gap = cumack; |
3997 | 0 | if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) { |
3998 | 0 | sctp_misc_ints(SCTP_THRESHOLD_CLEAR, |
3999 | 0 | stcb->asoc.overall_error_count, |
4000 | 0 | 0, |
4001 | 0 | SCTP_FROM_SCTP_INDATA, |
4002 | 0 | __LINE__); |
4003 | 0 | } |
4004 | 0 | stcb->asoc.overall_error_count = 0; |
4005 | 0 | if (SCTP_TSN_GT(cumack, asoc->last_acked_seq)) { |
4006 | | /* process the new consecutive TSN first */ |
4007 | 0 | TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) { |
4008 | 0 | if (SCTP_TSN_GE(cumack, tp1->rec.data.tsn)) { |
4009 | 0 | if (tp1->sent == SCTP_DATAGRAM_UNSENT) { |
4010 | 0 | SCTP_PRINTF("Warning, an unsent is now acked?\n"); |
4011 | 0 | } |
4012 | 0 | if (tp1->sent < SCTP_DATAGRAM_ACKED) { |
4013 | | /* |
4014 | | * If it is less than ACKED, it is |
4015 | | * now no-longer in flight. Higher |
4016 | | * values may occur during marking |
4017 | | */ |
4018 | 0 | if (tp1->sent < SCTP_DATAGRAM_RESEND) { |
4019 | 0 | if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) { |
4020 | 0 | sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA, |
4021 | 0 | tp1->whoTo->flight_size, |
4022 | 0 | tp1->book_size, |
4023 | 0 | (uint32_t)(uintptr_t)tp1->whoTo, |
4024 | 0 | tp1->rec.data.tsn); |
4025 | 0 | } |
4026 | 0 | sctp_flight_size_decrease(tp1); |
4027 | 0 | if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) { |
4028 | 0 | (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged)(tp1->whoTo, |
4029 | 0 | tp1); |
4030 | 0 | } |
4031 | | /* sa_ignore NO_NULL_CHK */ |
4032 | 0 | sctp_total_flight_decrease(stcb, tp1); |
4033 | 0 | } |
4034 | 0 | tp1->whoTo->net_ack += tp1->send_size; |
4035 | 0 | if (tp1->snd_count < 2) { |
4036 | | /* |
4037 | | * True non-retransmitted |
4038 | | * chunk |
4039 | | */ |
4040 | 0 | tp1->whoTo->net_ack2 += |
4041 | 0 | tp1->send_size; |
4042 | | |
4043 | | /* update RTO too? */ |
4044 | 0 | if (tp1->do_rtt) { |
4045 | 0 | if (rto_ok && |
4046 | 0 | sctp_calculate_rto(stcb, |
4047 | 0 | &stcb->asoc, |
4048 | 0 | tp1->whoTo, |
4049 | 0 | &tp1->sent_rcv_time, |
4050 | 0 | SCTP_RTT_FROM_DATA)) { |
4051 | 0 | rto_ok = 0; |
4052 | 0 | } |
4053 | 0 | if (tp1->whoTo->rto_needed == 0) { |
4054 | 0 | tp1->whoTo->rto_needed = 1; |
4055 | 0 | } |
4056 | 0 | tp1->do_rtt = 0; |
4057 | 0 | } |
4058 | 0 | } |
4059 | | /* |
4060 | | * CMT: CUCv2 algorithm. From the |
4061 | | * cumack'd TSNs, for each TSN being |
4062 | | * acked for the first time, set the |
4063 | | * following variables for the |
4064 | | * corresp destination. |
4065 | | * new_pseudo_cumack will trigger a |
4066 | | * cwnd update. |
4067 | | * find_(rtx_)pseudo_cumack will |
4068 | | * trigger search for the next |
4069 | | * expected (rtx-)pseudo-cumack. |
4070 | | */ |
4071 | 0 | tp1->whoTo->new_pseudo_cumack = 1; |
4072 | 0 | tp1->whoTo->find_pseudo_cumack = 1; |
4073 | 0 | tp1->whoTo->find_rtx_pseudo_cumack = 1; |
4074 | 0 | if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) { |
4075 | | /* sa_ignore NO_NULL_CHK */ |
4076 | 0 | sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.tsn, SCTP_CWND_LOG_FROM_SACK); |
4077 | 0 | } |
4078 | 0 | } |
4079 | 0 | if (tp1->sent == SCTP_DATAGRAM_RESEND) { |
4080 | 0 | sctp_ucount_decr(asoc->sent_queue_retran_cnt); |
4081 | 0 | } |
4082 | 0 | if (tp1->rec.data.chunk_was_revoked) { |
4083 | | /* deflate the cwnd */ |
4084 | 0 | tp1->whoTo->cwnd -= tp1->book_size; |
4085 | 0 | tp1->rec.data.chunk_was_revoked = 0; |
4086 | 0 | } |
4087 | 0 | if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) { |
4088 | 0 | if (asoc->strmout[tp1->rec.data.sid].chunks_on_queues > 0) { |
4089 | 0 | asoc->strmout[tp1->rec.data.sid].chunks_on_queues--; |
4090 | 0 | #ifdef INVARIANTS |
4091 | 0 | } else { |
4092 | 0 | panic("No chunks on the queues for sid %u.", tp1->rec.data.sid); |
4093 | 0 | #endif |
4094 | 0 | } |
4095 | 0 | } |
4096 | 0 | if ((asoc->strmout[tp1->rec.data.sid].chunks_on_queues == 0) && |
4097 | 0 | (asoc->strmout[tp1->rec.data.sid].state == SCTP_STREAM_RESET_PENDING) && |
4098 | 0 | TAILQ_EMPTY(&asoc->strmout[tp1->rec.data.sid].outqueue)) { |
4099 | 0 | asoc->trigger_reset = 1; |
4100 | 0 | } |
4101 | 0 | TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next); |
4102 | 0 | if (tp1->data) { |
4103 | | /* sa_ignore NO_NULL_CHK */ |
4104 | 0 | sctp_free_bufspace(stcb, asoc, tp1, 1); |
4105 | 0 | sctp_m_freem(tp1->data); |
4106 | 0 | tp1->data = NULL; |
4107 | 0 | } |
4108 | 0 | if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) { |
4109 | 0 | sctp_log_sack(asoc->last_acked_seq, |
4110 | 0 | cumack, |
4111 | 0 | tp1->rec.data.tsn, |
4112 | 0 | 0, |
4113 | 0 | 0, |
4114 | 0 | SCTP_LOG_FREE_SENT); |
4115 | 0 | } |
4116 | 0 | asoc->sent_queue_cnt--; |
4117 | 0 | sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED); |
4118 | 0 | } else { |
4119 | 0 | break; |
4120 | 0 | } |
4121 | 0 | } |
4122 | 0 | } |
4123 | 0 | #if defined(__Userspace__) |
4124 | 0 | if (stcb->sctp_ep->recv_callback) { |
4125 | 0 | if (stcb->sctp_socket) { |
4126 | 0 | uint32_t inqueue_bytes, sb_free_now; |
4127 | 0 | struct sctp_inpcb *inp; |
4128 | |
|
4129 | 0 | inp = stcb->sctp_ep; |
4130 | 0 | inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * sizeof(struct sctp_data_chunk)); |
4131 | 0 | sb_free_now = SCTP_SB_LIMIT_SND(stcb->sctp_socket) - (inqueue_bytes + stcb->asoc.sb_send_resv); |
4132 | | |
4133 | | /* check if the amount free in the send socket buffer crossed the threshold */ |
4134 | 0 | if (inp->send_callback && |
4135 | 0 | (((inp->send_sb_threshold > 0) && |
4136 | 0 | (sb_free_now >= inp->send_sb_threshold) && |
4137 | 0 | (stcb->asoc.chunks_on_out_queue <= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue))) || |
4138 | 0 | (inp->send_sb_threshold == 0))) { |
4139 | 0 | atomic_add_int(&stcb->asoc.refcnt, 1); |
4140 | 0 | SCTP_TCB_UNLOCK(stcb); |
4141 | 0 | inp->send_callback(stcb->sctp_socket, sb_free_now, inp->ulp_info); |
4142 | 0 | SCTP_TCB_LOCK(stcb); |
4143 | 0 | atomic_subtract_int(&stcb->asoc.refcnt, 1); |
4144 | 0 | } |
4145 | 0 | } |
4146 | 0 | } else if (stcb->sctp_socket) { |
4147 | | #else |
4148 | | /* sa_ignore NO_NULL_CHK */ |
4149 | | if (stcb->sctp_socket) { |
4150 | | #endif |
4151 | | #if defined(__APPLE__) && !defined(__Userspace__) |
4152 | | struct socket *so; |
4153 | | |
4154 | | #endif |
4155 | 0 | SOCKBUF_LOCK(&stcb->sctp_socket->so_snd); |
4156 | 0 | if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) { |
4157 | | /* sa_ignore NO_NULL_CHK */ |
4158 | 0 | sctp_wakeup_log(stcb, 1, SCTP_WAKESND_FROM_SACK); |
4159 | 0 | } |
4160 | | #if defined(__APPLE__) && !defined(__Userspace__) |
4161 | | so = SCTP_INP_SO(stcb->sctp_ep); |
4162 | | atomic_add_int(&stcb->asoc.refcnt, 1); |
4163 | | SCTP_TCB_UNLOCK(stcb); |
4164 | | SCTP_SOCKET_LOCK(so, 1); |
4165 | | SCTP_TCB_LOCK(stcb); |
4166 | | atomic_subtract_int(&stcb->asoc.refcnt, 1); |
4167 | | if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) { |
4168 | | /* assoc was freed while we were unlocked */ |
4169 | | SCTP_SOCKET_UNLOCK(so, 1); |
4170 | | return; |
4171 | | } |
4172 | | #endif |
4173 | 0 | sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket); |
4174 | | #if defined(__APPLE__) && !defined(__Userspace__) |
4175 | | SCTP_SOCKET_UNLOCK(so, 1); |
4176 | | #endif |
4177 | 0 | } else { |
4178 | 0 | if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) { |
4179 | 0 | sctp_wakeup_log(stcb, 1, SCTP_NOWAKE_FROM_SACK); |
4180 | 0 | } |
4181 | 0 | } |
4182 | | |
4183 | | /* JRS - Use the congestion control given in the CC module */ |
4184 | 0 | if ((asoc->last_acked_seq != cumack) && (ecne_seen == 0)) { |
4185 | 0 | TAILQ_FOREACH(net, &asoc->nets, sctp_next) { |
4186 | 0 | if (net->net_ack2 > 0) { |
4187 | | /* |
4188 | | * Karn's rule applies to clearing error count, this |
4189 | | * is optional. |
4190 | | */ |
4191 | 0 | net->error_count = 0; |
4192 | 0 | if ((net->dest_state & SCTP_ADDR_REACHABLE) == 0) { |
4193 | | /* addr came good */ |
4194 | 0 | net->dest_state |= SCTP_ADDR_REACHABLE; |
4195 | 0 | sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb, |
4196 | 0 | 0, (void *)net, SCTP_SO_NOT_LOCKED); |
4197 | 0 | } |
4198 | 0 | if (net == stcb->asoc.primary_destination) { |
4199 | 0 | if (stcb->asoc.alternate) { |
4200 | | /* release the alternate, primary is good */ |
4201 | 0 | sctp_free_remote_addr(stcb->asoc.alternate); |
4202 | 0 | stcb->asoc.alternate = NULL; |
4203 | 0 | } |
4204 | 0 | } |
4205 | 0 | if (net->dest_state & SCTP_ADDR_PF) { |
4206 | 0 | net->dest_state &= ~SCTP_ADDR_PF; |
4207 | 0 | sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, |
4208 | 0 | stcb->sctp_ep, stcb, net, |
4209 | 0 | SCTP_FROM_SCTP_INDATA + SCTP_LOC_26); |
4210 | 0 | sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net); |
4211 | 0 | asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net); |
4212 | | /* Done with this net */ |
4213 | 0 | net->net_ack = 0; |
4214 | 0 | } |
4215 | | /* restore any doubled timers */ |
4216 | 0 | net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv; |
4217 | 0 | if (net->RTO < stcb->asoc.minrto) { |
4218 | 0 | net->RTO = stcb->asoc.minrto; |
4219 | 0 | } |
4220 | 0 | if (net->RTO > stcb->asoc.maxrto) { |
4221 | 0 | net->RTO = stcb->asoc.maxrto; |
4222 | 0 | } |
4223 | 0 | } |
4224 | 0 | } |
4225 | 0 | asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, 1, 0, 0); |
4226 | 0 | } |
4227 | 0 | asoc->last_acked_seq = cumack; |
4228 | |
|
4229 | 0 | if (TAILQ_EMPTY(&asoc->sent_queue)) { |
4230 | | /* nothing left in-flight */ |
4231 | 0 | TAILQ_FOREACH(net, &asoc->nets, sctp_next) { |
4232 | 0 | net->flight_size = 0; |
4233 | 0 | net->partial_bytes_acked = 0; |
4234 | 0 | } |
4235 | 0 | asoc->total_flight = 0; |
4236 | 0 | asoc->total_flight_count = 0; |
4237 | 0 | } |
4238 | | |
4239 | | /* RWND update */ |
4240 | 0 | asoc->peers_rwnd = sctp_sbspace_sub(rwnd, |
4241 | 0 | (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)))); |
4242 | 0 | if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) { |
4243 | | /* SWS sender side engages */ |
4244 | 0 | asoc->peers_rwnd = 0; |
4245 | 0 | } |
4246 | 0 | if (asoc->peers_rwnd > old_rwnd) { |
4247 | 0 | win_probe_recovery = 1; |
4248 | 0 | } |
4249 | | /* Now assure a timer where data is queued at */ |
4250 | 0 | again: |
4251 | 0 | j = 0; |
4252 | 0 | TAILQ_FOREACH(net, &asoc->nets, sctp_next) { |
4253 | 0 | if (win_probe_recovery && (net->window_probe)) { |
4254 | 0 | win_probe_recovered = 1; |
4255 | | /* |
4256 | | * Find first chunk that was used with window probe |
4257 | | * and clear the sent |
4258 | | */ |
4259 | | /* sa_ignore FREED_MEMORY */ |
4260 | 0 | TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) { |
4261 | 0 | if (tp1->window_probe) { |
4262 | | /* move back to data send queue */ |
4263 | 0 | sctp_window_probe_recovery(stcb, asoc, tp1); |
4264 | 0 | break; |
4265 | 0 | } |
4266 | 0 | } |
4267 | 0 | } |
4268 | 0 | if (net->flight_size) { |
4269 | 0 | j++; |
4270 | 0 | sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, net); |
4271 | 0 | if (net->window_probe) { |
4272 | 0 | net->window_probe = 0; |
4273 | 0 | } |
4274 | 0 | } else { |
4275 | 0 | if (net->window_probe) { |
4276 | | /* In window probes we must assure a timer is still running there */ |
4277 | 0 | net->window_probe = 0; |
4278 | 0 | if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) { |
4279 | 0 | sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, net); |
4280 | 0 | } |
4281 | 0 | } else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) { |
4282 | 0 | sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, |
4283 | 0 | stcb, net, |
4284 | 0 | SCTP_FROM_SCTP_INDATA + SCTP_LOC_27); |
4285 | 0 | } |
4286 | 0 | } |
4287 | 0 | } |
4288 | 0 | if ((j == 0) && |
4289 | 0 | (!TAILQ_EMPTY(&asoc->sent_queue)) && |
4290 | 0 | (asoc->sent_queue_retran_cnt == 0) && |
4291 | 0 | (win_probe_recovered == 0) && |
4292 | 0 | (done_once == 0)) { |
4293 | | /* huh, this should not happen unless all packets |
4294 | | * are PR-SCTP and marked to skip of course. |
4295 | | */ |
4296 | 0 | if (sctp_fs_audit(asoc)) { |
4297 | 0 | TAILQ_FOREACH(net, &asoc->nets, sctp_next) { |
4298 | 0 | net->flight_size = 0; |
4299 | 0 | } |
4300 | 0 | asoc->total_flight = 0; |
4301 | 0 | asoc->total_flight_count = 0; |
4302 | 0 | asoc->sent_queue_retran_cnt = 0; |
4303 | 0 | TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) { |
4304 | 0 | if (tp1->sent < SCTP_DATAGRAM_RESEND) { |
4305 | 0 | sctp_flight_size_increase(tp1); |
4306 | 0 | sctp_total_flight_increase(stcb, tp1); |
4307 | 0 | } else if (tp1->sent == SCTP_DATAGRAM_RESEND) { |
4308 | 0 | sctp_ucount_incr(asoc->sent_queue_retran_cnt); |
4309 | 0 | } |
4310 | 0 | } |
4311 | 0 | } |
4312 | 0 | done_once = 1; |
4313 | 0 | goto again; |
4314 | 0 | } |
4315 | | /**********************************/ |
4316 | | /* Now what about shutdown issues */ |
4317 | | /**********************************/ |
4318 | 0 | if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) { |
4319 | | /* nothing left on sendqueue.. consider done */ |
4320 | | /* clean up */ |
4321 | 0 | if ((asoc->stream_queue_cnt == 1) && |
4322 | 0 | ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) || |
4323 | 0 | (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) && |
4324 | 0 | ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete)(stcb, asoc))) { |
4325 | 0 | SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_PARTIAL_MSG_LEFT); |
4326 | 0 | } |
4327 | 0 | if (((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) || |
4328 | 0 | (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) && |
4329 | 0 | (asoc->stream_queue_cnt == 1) && |
4330 | 0 | (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) { |
4331 | 0 | struct mbuf *op_err; |
4332 | |
|
4333 | 0 | *abort_now = 1; |
4334 | | /* XXX */ |
4335 | 0 | op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, ""); |
4336 | 0 | stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_28; |
4337 | 0 | sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, false, SCTP_SO_NOT_LOCKED); |
4338 | 0 | return; |
4339 | 0 | } |
4340 | 0 | if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) && |
4341 | 0 | (asoc->stream_queue_cnt == 0)) { |
4342 | 0 | struct sctp_nets *netp; |
4343 | |
|
4344 | 0 | if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) || |
4345 | 0 | (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) { |
4346 | 0 | SCTP_STAT_DECR_GAUGE32(sctps_currestab); |
4347 | 0 | } |
4348 | 0 | SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_SENT); |
4349 | 0 | sctp_stop_timers_for_shutdown(stcb); |
4350 | 0 | if (asoc->alternate) { |
4351 | 0 | netp = asoc->alternate; |
4352 | 0 | } else { |
4353 | 0 | netp = asoc->primary_destination; |
4354 | 0 | } |
4355 | 0 | sctp_send_shutdown(stcb, netp); |
4356 | 0 | sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, |
4357 | 0 | stcb->sctp_ep, stcb, netp); |
4358 | 0 | sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, |
4359 | 0 | stcb->sctp_ep, stcb, NULL); |
4360 | 0 | } else if ((SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED) && |
4361 | 0 | (asoc->stream_queue_cnt == 0)) { |
4362 | 0 | struct sctp_nets *netp; |
4363 | |
|
4364 | 0 | SCTP_STAT_DECR_GAUGE32(sctps_currestab); |
4365 | 0 | SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_ACK_SENT); |
4366 | 0 | sctp_stop_timers_for_shutdown(stcb); |
4367 | 0 | if (asoc->alternate) { |
4368 | 0 | netp = asoc->alternate; |
4369 | 0 | } else { |
4370 | 0 | netp = asoc->primary_destination; |
4371 | 0 | } |
4372 | 0 | sctp_send_shutdown_ack(stcb, netp); |
4373 | 0 | sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK, |
4374 | 0 | stcb->sctp_ep, stcb, netp); |
4375 | 0 | } |
4376 | 0 | } |
4377 | | /*********************************************/ |
4378 | | /* Here we perform PR-SCTP procedures */ |
4379 | | /* (section 4.2) */ |
4380 | | /*********************************************/ |
4381 | | /* C1. update advancedPeerAckPoint */ |
4382 | 0 | if (SCTP_TSN_GT(cumack, asoc->advanced_peer_ack_point)) { |
4383 | 0 | asoc->advanced_peer_ack_point = cumack; |
4384 | 0 | } |
4385 | | /* PR-Sctp issues need to be addressed too */ |
4386 | 0 | if ((asoc->prsctp_supported) && (asoc->pr_sctp_cnt > 0)) { |
4387 | 0 | struct sctp_tmit_chunk *lchk; |
4388 | 0 | uint32_t old_adv_peer_ack_point; |
4389 | |
|
4390 | 0 | old_adv_peer_ack_point = asoc->advanced_peer_ack_point; |
4391 | 0 | lchk = sctp_try_advance_peer_ack_point(stcb, asoc); |
4392 | | /* C3. See if we need to send a Fwd-TSN */ |
4393 | 0 | if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cumack)) { |
4394 | | /* |
4395 | | * ISSUE with ECN, see FWD-TSN processing. |
4396 | | */ |
4397 | 0 | if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) { |
4398 | 0 | send_forward_tsn(stcb, asoc); |
4399 | 0 | } else if (lchk) { |
4400 | | /* try to FR fwd-tsn's that get lost too */ |
4401 | 0 | if (lchk->rec.data.fwd_tsn_cnt >= 3) { |
4402 | 0 | send_forward_tsn(stcb, asoc); |
4403 | 0 | } |
4404 | 0 | } |
4405 | 0 | } |
4406 | 0 | for (; lchk != NULL; lchk = TAILQ_NEXT(lchk, sctp_next)) { |
4407 | 0 | if (lchk->whoTo != NULL) { |
4408 | 0 | break; |
4409 | 0 | } |
4410 | 0 | } |
4411 | 0 | if (lchk != NULL) { |
4412 | | /* Assure a timer is up */ |
4413 | 0 | sctp_timer_start(SCTP_TIMER_TYPE_SEND, |
4414 | 0 | stcb->sctp_ep, stcb, lchk->whoTo); |
4415 | 0 | } |
4416 | 0 | } |
4417 | 0 | if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) { |
4418 | 0 | sctp_misc_ints(SCTP_SACK_RWND_UPDATE, |
4419 | 0 | rwnd, |
4420 | 0 | stcb->asoc.peers_rwnd, |
4421 | 0 | stcb->asoc.total_flight, |
4422 | 0 | stcb->asoc.total_output_queue_size); |
4423 | 0 | } |
4424 | 0 | } |
4425 | | |
4426 | | void |
4427 | | sctp_handle_sack(struct mbuf *m, int offset_seg, int offset_dup, |
4428 | | struct sctp_tcb *stcb, |
4429 | | uint16_t num_seg, uint16_t num_nr_seg, uint16_t num_dup, |
4430 | | int *abort_now, uint8_t flags, |
4431 | | uint32_t cum_ack, uint32_t rwnd, int ecne_seen) |
4432 | 0 | { |
4433 | 0 | struct sctp_association *asoc; |
4434 | 0 | struct sctp_tmit_chunk *tp1, *tp2; |
4435 | 0 | uint32_t last_tsn, biggest_tsn_acked, biggest_tsn_newly_acked, this_sack_lowest_newack; |
4436 | 0 | uint16_t wake_him = 0; |
4437 | 0 | uint32_t send_s = 0; |
4438 | 0 | long j; |
4439 | 0 | int accum_moved = 0; |
4440 | 0 | int will_exit_fast_recovery = 0; |
4441 | 0 | uint32_t a_rwnd, old_rwnd; |
4442 | 0 | int win_probe_recovery = 0; |
4443 | 0 | int win_probe_recovered = 0; |
4444 | 0 | struct sctp_nets *net = NULL; |
4445 | 0 | int done_once; |
4446 | 0 | int rto_ok = 1; |
4447 | 0 | uint8_t reneged_all = 0; |
4448 | 0 | uint8_t cmt_dac_flag; |
4449 | | /* |
4450 | | * we take any chance we can to service our queues since we cannot |
4451 | | * get awoken when the socket is read from :< |
4452 | | */ |
4453 | | /* |
4454 | | * Now perform the actual SACK handling: 1) Verify that it is not an |
4455 | | * old sack, if so discard. 2) If there is nothing left in the send |
4456 | | * queue (cum-ack is equal to last acked) then you have a duplicate |
4457 | | * too, update any rwnd change and verify no timers are running. |
4458 | | * then return. 3) Process any new consecutive data i.e. cum-ack |
4459 | | * moved process these first and note that it moved. 4) Process any |
4460 | | * sack blocks. 5) Drop any acked from the queue. 6) Check for any |
4461 | | * revoked blocks and mark. 7) Update the cwnd. 8) Nothing left, |
4462 | | * sync up flightsizes and things, stop all timers and also check |
4463 | | * for shutdown_pending state. If so then go ahead and send off the |
4464 | | * shutdown. If in shutdown recv, send off the shutdown-ack and |
4465 | | * start that timer, Ret. 9) Strike any non-acked things and do FR |
4466 | | * procedure if needed being sure to set the FR flag. 10) Do pr-sctp |
4467 | | * procedures. 11) Apply any FR penalties. 12) Assure we will SACK |
4468 | | * if in shutdown_recv state. |
4469 | | */ |
4470 | 0 | SCTP_TCB_LOCK_ASSERT(stcb); |
4471 | | /* CMT DAC algo */ |
4472 | 0 | this_sack_lowest_newack = 0; |
4473 | 0 | SCTP_STAT_INCR(sctps_slowpath_sack); |
4474 | 0 | last_tsn = cum_ack; |
4475 | 0 | cmt_dac_flag = flags & SCTP_SACK_CMT_DAC; |
4476 | | #ifdef SCTP_ASOCLOG_OF_TSNS |
4477 | | stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cum_ack; |
4478 | | stcb->asoc.cumack_log_at++; |
4479 | | if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) { |
4480 | | stcb->asoc.cumack_log_at = 0; |
4481 | | } |
4482 | | #endif |
4483 | 0 | a_rwnd = rwnd; |
4484 | |
|
4485 | 0 | if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) { |
4486 | 0 | sctp_misc_ints(SCTP_SACK_LOG_NORMAL, cum_ack, |
4487 | 0 | rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd); |
4488 | 0 | } |
4489 | |
|
4490 | 0 | old_rwnd = stcb->asoc.peers_rwnd; |
4491 | 0 | if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) { |
4492 | 0 | sctp_misc_ints(SCTP_THRESHOLD_CLEAR, |
4493 | 0 | stcb->asoc.overall_error_count, |
4494 | 0 | 0, |
4495 | 0 | SCTP_FROM_SCTP_INDATA, |
4496 | 0 | __LINE__); |
4497 | 0 | } |
4498 | 0 | stcb->asoc.overall_error_count = 0; |
4499 | 0 | asoc = &stcb->asoc; |
4500 | 0 | if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) { |
4501 | 0 | sctp_log_sack(asoc->last_acked_seq, |
4502 | 0 | cum_ack, |
4503 | 0 | 0, |
4504 | 0 | num_seg, |
4505 | 0 | num_dup, |
4506 | 0 | SCTP_LOG_NEW_SACK); |
4507 | 0 | } |
4508 | 0 | if ((num_dup) && (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE)) { |
4509 | 0 | uint16_t i; |
4510 | 0 | uint32_t *dupdata, dblock; |
4511 | |
|
4512 | 0 | for (i = 0; i < num_dup; i++) { |
4513 | 0 | dupdata = (uint32_t *)sctp_m_getptr(m, offset_dup + i * sizeof(uint32_t), |
4514 | 0 | sizeof(uint32_t), (uint8_t *)&dblock); |
4515 | 0 | if (dupdata == NULL) { |
4516 | 0 | break; |
4517 | 0 | } |
4518 | 0 | sctp_log_fr(*dupdata, 0, 0, SCTP_FR_DUPED); |
4519 | 0 | } |
4520 | 0 | } |
4521 | | /* reality check */ |
4522 | 0 | if (!TAILQ_EMPTY(&asoc->sent_queue)) { |
4523 | 0 | tp1 = TAILQ_LAST(&asoc->sent_queue, |
4524 | 0 | sctpchunk_listhead); |
4525 | 0 | send_s = tp1->rec.data.tsn + 1; |
4526 | 0 | } else { |
4527 | 0 | tp1 = NULL; |
4528 | 0 | send_s = asoc->sending_seq; |
4529 | 0 | } |
4530 | 0 | if (SCTP_TSN_GE(cum_ack, send_s)) { |
4531 | 0 | struct mbuf *op_err; |
4532 | 0 | char msg[SCTP_DIAG_INFO_LEN]; |
4533 | | |
4534 | | /* |
4535 | | * no way, we have not even sent this TSN out yet. |
4536 | | * Peer is hopelessly messed up with us. |
4537 | | */ |
4538 | 0 | SCTP_PRINTF("NEW cum_ack:%x send_s:%x is smaller or equal\n", |
4539 | 0 | cum_ack, send_s); |
4540 | 0 | if (tp1) { |
4541 | 0 | SCTP_PRINTF("Got send_s from tsn:%x + 1 of tp1: %p\n", |
4542 | 0 | tp1->rec.data.tsn, (void *)tp1); |
4543 | 0 | } |
4544 | 0 | hopeless_peer: |
4545 | 0 | *abort_now = 1; |
4546 | | /* XXX */ |
4547 | 0 | SCTP_SNPRINTF(msg, sizeof(msg), |
4548 | 0 | "Cum ack %8.8x greater or equal than TSN %8.8x", |
4549 | 0 | cum_ack, send_s); |
4550 | 0 | op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); |
4551 | 0 | stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_29; |
4552 | 0 | sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, false, SCTP_SO_NOT_LOCKED); |
4553 | 0 | return; |
4554 | 0 | } |
4555 | | /**********************/ |
4556 | | /* 1) check the range */ |
4557 | | /**********************/ |
4558 | 0 | if (SCTP_TSN_GT(asoc->last_acked_seq, last_tsn)) { |
4559 | | /* acking something behind */ |
4560 | 0 | return; |
4561 | 0 | } |
4562 | | |
4563 | | /* update the Rwnd of the peer */ |
4564 | 0 | if (TAILQ_EMPTY(&asoc->sent_queue) && |
4565 | 0 | TAILQ_EMPTY(&asoc->send_queue) && |
4566 | 0 | (asoc->stream_queue_cnt == 0)) { |
4567 | | /* nothing left on send/sent and strmq */ |
4568 | 0 | if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) { |
4569 | 0 | sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK, |
4570 | 0 | asoc->peers_rwnd, 0, 0, a_rwnd); |
4571 | 0 | } |
4572 | 0 | asoc->peers_rwnd = a_rwnd; |
4573 | 0 | if (asoc->sent_queue_retran_cnt) { |
4574 | 0 | asoc->sent_queue_retran_cnt = 0; |
4575 | 0 | } |
4576 | 0 | if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) { |
4577 | | /* SWS sender side engages */ |
4578 | 0 | asoc->peers_rwnd = 0; |
4579 | 0 | } |
4580 | | /* stop any timers */ |
4581 | 0 | TAILQ_FOREACH(net, &asoc->nets, sctp_next) { |
4582 | 0 | sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, |
4583 | 0 | stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_30); |
4584 | 0 | net->partial_bytes_acked = 0; |
4585 | 0 | net->flight_size = 0; |
4586 | 0 | } |
4587 | 0 | asoc->total_flight = 0; |
4588 | 0 | asoc->total_flight_count = 0; |
4589 | 0 | return; |
4590 | 0 | } |
4591 | | /* |
4592 | | * We init netAckSz and netAckSz2 to 0. These are used to track 2 |
4593 | | * things. The total byte count acked is tracked in netAckSz AND |
4594 | | * netAck2 is used to track the total bytes acked that are un- |
4595 | | * ambiguous and were never retransmitted. We track these on a per |
4596 | | * destination address basis. |
4597 | | */ |
4598 | 0 | TAILQ_FOREACH(net, &asoc->nets, sctp_next) { |
4599 | 0 | if (SCTP_TSN_GT(cum_ack, net->cwr_window_tsn)) { |
4600 | | /* Drag along the window_tsn for cwr's */ |
4601 | 0 | net->cwr_window_tsn = cum_ack; |
4602 | 0 | } |
4603 | 0 | net->prev_cwnd = net->cwnd; |
4604 | 0 | net->net_ack = 0; |
4605 | 0 | net->net_ack2 = 0; |
4606 | | |
4607 | | /* |
4608 | | * CMT: Reset CUC and Fast recovery algo variables before |
4609 | | * SACK processing |
4610 | | */ |
4611 | 0 | net->new_pseudo_cumack = 0; |
4612 | 0 | net->will_exit_fast_recovery = 0; |
4613 | 0 | if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) { |
4614 | 0 | (*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack)(stcb, net); |
4615 | 0 | } |
4616 | | |
4617 | | /* |
4618 | | * CMT: SFR algo (and HTNA) - this_sack_highest_newack has |
4619 | | * to be greater than the cumack. Also reset saw_newack to 0 |
4620 | | * for all dests. |
4621 | | */ |
4622 | 0 | net->saw_newack = 0; |
4623 | 0 | net->this_sack_highest_newack = last_tsn; |
4624 | 0 | } |
4625 | | /* process the new consecutive TSN first */ |
4626 | 0 | TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) { |
4627 | 0 | if (SCTP_TSN_GE(last_tsn, tp1->rec.data.tsn)) { |
4628 | 0 | if (tp1->sent != SCTP_DATAGRAM_UNSENT) { |
4629 | 0 | accum_moved = 1; |
4630 | 0 | if (tp1->sent < SCTP_DATAGRAM_ACKED) { |
4631 | | /* |
4632 | | * If it is less than ACKED, it is |
4633 | | * now no-longer in flight. Higher |
4634 | | * values may occur during marking |
4635 | | */ |
4636 | 0 | if ((tp1->whoTo->dest_state & |
4637 | 0 | SCTP_ADDR_UNCONFIRMED) && |
4638 | 0 | (tp1->snd_count < 2)) { |
4639 | | /* |
4640 | | * If there was no retran |
4641 | | * and the address is |
4642 | | * un-confirmed and we sent |
4643 | | * there and are now |
4644 | | * sacked.. its confirmed, |
4645 | | * mark it so. |
4646 | | */ |
4647 | 0 | tp1->whoTo->dest_state &= |
4648 | 0 | ~SCTP_ADDR_UNCONFIRMED; |
4649 | 0 | } |
4650 | 0 | if (tp1->sent < SCTP_DATAGRAM_RESEND) { |
4651 | 0 | if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) { |
4652 | 0 | sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA, |
4653 | 0 | tp1->whoTo->flight_size, |
4654 | 0 | tp1->book_size, |
4655 | 0 | (uint32_t)(uintptr_t)tp1->whoTo, |
4656 | 0 | tp1->rec.data.tsn); |
4657 | 0 | } |
4658 | 0 | sctp_flight_size_decrease(tp1); |
4659 | 0 | sctp_total_flight_decrease(stcb, tp1); |
4660 | 0 | if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) { |
4661 | 0 | (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged)(tp1->whoTo, |
4662 | 0 | tp1); |
4663 | 0 | } |
4664 | 0 | } |
4665 | 0 | tp1->whoTo->net_ack += tp1->send_size; |
4666 | | |
4667 | | /* CMT SFR and DAC algos */ |
4668 | 0 | this_sack_lowest_newack = tp1->rec.data.tsn; |
4669 | 0 | tp1->whoTo->saw_newack = 1; |
4670 | |
|
4671 | 0 | if (tp1->snd_count < 2) { |
4672 | | /* |
4673 | | * True non-retransmitted |
4674 | | * chunk |
4675 | | */ |
4676 | 0 | tp1->whoTo->net_ack2 += |
4677 | 0 | tp1->send_size; |
4678 | | |
4679 | | /* update RTO too? */ |
4680 | 0 | if (tp1->do_rtt) { |
4681 | 0 | if (rto_ok && |
4682 | 0 | sctp_calculate_rto(stcb, |
4683 | 0 | &stcb->asoc, |
4684 | 0 | tp1->whoTo, |
4685 | 0 | &tp1->sent_rcv_time, |
4686 | 0 | SCTP_RTT_FROM_DATA)) { |
4687 | 0 | rto_ok = 0; |
4688 | 0 | } |
4689 | 0 | if (tp1->whoTo->rto_needed == 0) { |
4690 | 0 | tp1->whoTo->rto_needed = 1; |
4691 | 0 | } |
4692 | 0 | tp1->do_rtt = 0; |
4693 | 0 | } |
4694 | 0 | } |
4695 | | /* |
4696 | | * CMT: CUCv2 algorithm. From the |
4697 | | * cumack'd TSNs, for each TSN being |
4698 | | * acked for the first time, set the |
4699 | | * following variables for the |
4700 | | * corresp destination. |
4701 | | * new_pseudo_cumack will trigger a |
4702 | | * cwnd update. |
4703 | | * find_(rtx_)pseudo_cumack will |
4704 | | * trigger search for the next |
4705 | | * expected (rtx-)pseudo-cumack. |
4706 | | */ |
4707 | 0 | tp1->whoTo->new_pseudo_cumack = 1; |
4708 | 0 | tp1->whoTo->find_pseudo_cumack = 1; |
4709 | 0 | tp1->whoTo->find_rtx_pseudo_cumack = 1; |
4710 | 0 | if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) { |
4711 | 0 | sctp_log_sack(asoc->last_acked_seq, |
4712 | 0 | cum_ack, |
4713 | 0 | tp1->rec.data.tsn, |
4714 | 0 | 0, |
4715 | 0 | 0, |
4716 | 0 | SCTP_LOG_TSN_ACKED); |
4717 | 0 | } |
4718 | 0 | if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) { |
4719 | 0 | sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.tsn, SCTP_CWND_LOG_FROM_SACK); |
4720 | 0 | } |
4721 | 0 | } |
4722 | 0 | if (tp1->sent == SCTP_DATAGRAM_RESEND) { |
4723 | 0 | sctp_ucount_decr(asoc->sent_queue_retran_cnt); |
4724 | | #ifdef SCTP_AUDITING_ENABLED |
4725 | | sctp_audit_log(0xB3, |
4726 | | (asoc->sent_queue_retran_cnt & 0x000000ff)); |
4727 | | #endif |
4728 | 0 | } |
4729 | 0 | if (tp1->rec.data.chunk_was_revoked) { |
4730 | | /* deflate the cwnd */ |
4731 | 0 | tp1->whoTo->cwnd -= tp1->book_size; |
4732 | 0 | tp1->rec.data.chunk_was_revoked = 0; |
4733 | 0 | } |
4734 | 0 | if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) { |
4735 | 0 | tp1->sent = SCTP_DATAGRAM_ACKED; |
4736 | 0 | } |
4737 | 0 | } |
4738 | 0 | } else { |
4739 | 0 | break; |
4740 | 0 | } |
4741 | 0 | } |
4742 | 0 | biggest_tsn_newly_acked = biggest_tsn_acked = last_tsn; |
4743 | | /* always set this up to cum-ack */ |
4744 | 0 | asoc->this_sack_highest_gap = last_tsn; |
4745 | |
|
4746 | 0 | if ((num_seg > 0) || (num_nr_seg > 0)) { |
4747 | | /* |
4748 | | * thisSackHighestGap will increase while handling NEW |
4749 | | * segments this_sack_highest_newack will increase while |
4750 | | * handling NEWLY ACKED chunks. this_sack_lowest_newack is |
4751 | | * used for CMT DAC algo. saw_newack will also change. |
4752 | | */ |
4753 | 0 | if (sctp_handle_segments(m, &offset_seg, stcb, asoc, last_tsn, &biggest_tsn_acked, |
4754 | 0 | &biggest_tsn_newly_acked, &this_sack_lowest_newack, |
4755 | 0 | num_seg, num_nr_seg, &rto_ok)) { |
4756 | 0 | wake_him++; |
4757 | 0 | } |
4758 | | /* |
4759 | | * validate the biggest_tsn_acked in the gap acks if |
4760 | | * strict adherence is wanted. |
4761 | | */ |
4762 | 0 | if (SCTP_TSN_GE(biggest_tsn_acked, send_s)) { |
4763 | | /* |
4764 | | * peer is either confused or we are under |
4765 | | * attack. We must abort. |
4766 | | */ |
4767 | 0 | SCTP_PRINTF("Hopeless peer! biggest_tsn_acked:%x largest seq:%x\n", |
4768 | 0 | biggest_tsn_acked, send_s); |
4769 | 0 | goto hopeless_peer; |
4770 | 0 | } |
4771 | 0 | } |
4772 | | /*******************************************/ |
4773 | | /* cancel ALL T3-send timer if accum moved */ |
4774 | | /*******************************************/ |
4775 | 0 | if (asoc->sctp_cmt_on_off > 0) { |
4776 | 0 | TAILQ_FOREACH(net, &asoc->nets, sctp_next) { |
4777 | 0 | if (net->new_pseudo_cumack) |
4778 | 0 | sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, |
4779 | 0 | stcb, net, |
4780 | 0 | SCTP_FROM_SCTP_INDATA + SCTP_LOC_31); |
4781 | 0 | } |
4782 | 0 | } else { |
4783 | 0 | if (accum_moved) { |
4784 | 0 | TAILQ_FOREACH(net, &asoc->nets, sctp_next) { |
4785 | 0 | sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, |
4786 | 0 | stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_32); |
4787 | 0 | } |
4788 | 0 | } |
4789 | 0 | } |
4790 | | /********************************************/ |
4791 | | /* drop the acked chunks from the sentqueue */ |
4792 | | /********************************************/ |
4793 | 0 | asoc->last_acked_seq = cum_ack; |
4794 | |
|
4795 | 0 | TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) { |
4796 | 0 | if (SCTP_TSN_GT(tp1->rec.data.tsn, cum_ack)) { |
4797 | 0 | break; |
4798 | 0 | } |
4799 | 0 | if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) { |
4800 | 0 | if (asoc->strmout[tp1->rec.data.sid].chunks_on_queues > 0) { |
4801 | 0 | asoc->strmout[tp1->rec.data.sid].chunks_on_queues--; |
4802 | 0 | #ifdef INVARIANTS |
4803 | 0 | } else { |
4804 | 0 | panic("No chunks on the queues for sid %u.", tp1->rec.data.sid); |
4805 | 0 | #endif |
4806 | 0 | } |
4807 | 0 | } |
4808 | 0 | if ((asoc->strmout[tp1->rec.data.sid].chunks_on_queues == 0) && |
4809 | 0 | (asoc->strmout[tp1->rec.data.sid].state == SCTP_STREAM_RESET_PENDING) && |
4810 | 0 | TAILQ_EMPTY(&asoc->strmout[tp1->rec.data.sid].outqueue)) { |
4811 | 0 | asoc->trigger_reset = 1; |
4812 | 0 | } |
4813 | 0 | TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next); |
4814 | 0 | if (PR_SCTP_ENABLED(tp1->flags)) { |
4815 | 0 | if (asoc->pr_sctp_cnt != 0) |
4816 | 0 | asoc->pr_sctp_cnt--; |
4817 | 0 | } |
4818 | 0 | asoc->sent_queue_cnt--; |
4819 | 0 | if (tp1->data) { |
4820 | | /* sa_ignore NO_NULL_CHK */ |
4821 | 0 | sctp_free_bufspace(stcb, asoc, tp1, 1); |
4822 | 0 | sctp_m_freem(tp1->data); |
4823 | 0 | tp1->data = NULL; |
4824 | 0 | if (asoc->prsctp_supported && PR_SCTP_BUF_ENABLED(tp1->flags)) { |
4825 | 0 | asoc->sent_queue_cnt_removeable--; |
4826 | 0 | } |
4827 | 0 | } |
4828 | 0 | if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) { |
4829 | 0 | sctp_log_sack(asoc->last_acked_seq, |
4830 | 0 | cum_ack, |
4831 | 0 | tp1->rec.data.tsn, |
4832 | 0 | 0, |
4833 | 0 | 0, |
4834 | 0 | SCTP_LOG_FREE_SENT); |
4835 | 0 | } |
4836 | 0 | sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED); |
4837 | 0 | wake_him++; |
4838 | 0 | } |
4839 | 0 | if (TAILQ_EMPTY(&asoc->sent_queue) && (asoc->total_flight > 0)) { |
4840 | 0 | #ifdef INVARIANTS |
4841 | 0 | panic("Warning flight size is positive and should be 0"); |
4842 | | #else |
4843 | | SCTP_PRINTF("Warning flight size incorrect should be 0 is %d\n", |
4844 | | asoc->total_flight); |
4845 | | #endif |
4846 | 0 | asoc->total_flight = 0; |
4847 | 0 | } |
4848 | | |
4849 | 0 | #if defined(__Userspace__) |
4850 | 0 | if (stcb->sctp_ep->recv_callback) { |
4851 | 0 | if (stcb->sctp_socket) { |
4852 | 0 | uint32_t inqueue_bytes, sb_free_now; |
4853 | 0 | struct sctp_inpcb *inp; |
4854 | |
|
4855 | 0 | inp = stcb->sctp_ep; |
4856 | 0 | inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * sizeof(struct sctp_data_chunk)); |
4857 | 0 | sb_free_now = SCTP_SB_LIMIT_SND(stcb->sctp_socket) - (inqueue_bytes + stcb->asoc.sb_send_resv); |
4858 | | |
4859 | | /* check if the amount free in the send socket buffer crossed the threshold */ |
4860 | 0 | if (inp->send_callback && |
4861 | 0 | (((inp->send_sb_threshold > 0) && (sb_free_now >= inp->send_sb_threshold)) || |
4862 | 0 | (inp->send_sb_threshold == 0))) { |
4863 | 0 | atomic_add_int(&stcb->asoc.refcnt, 1); |
4864 | 0 | SCTP_TCB_UNLOCK(stcb); |
4865 | 0 | inp->send_callback(stcb->sctp_socket, sb_free_now, inp->ulp_info); |
4866 | 0 | SCTP_TCB_LOCK(stcb); |
4867 | 0 | atomic_subtract_int(&stcb->asoc.refcnt, 1); |
4868 | 0 | } |
4869 | 0 | } |
4870 | 0 | } else if ((wake_him) && (stcb->sctp_socket)) { |
4871 | | #else |
4872 | | /* sa_ignore NO_NULL_CHK */ |
4873 | | if ((wake_him) && (stcb->sctp_socket)) { |
4874 | | #endif |
4875 | | #if defined(__APPLE__) && !defined(__Userspace__) |
4876 | | struct socket *so; |
4877 | | |
4878 | | #endif |
4879 | 0 | SOCKBUF_LOCK(&stcb->sctp_socket->so_snd); |
4880 | 0 | if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) { |
4881 | 0 | sctp_wakeup_log(stcb, wake_him, SCTP_WAKESND_FROM_SACK); |
4882 | 0 | } |
4883 | | #if defined(__APPLE__) && !defined(__Userspace__) |
4884 | | so = SCTP_INP_SO(stcb->sctp_ep); |
4885 | | atomic_add_int(&stcb->asoc.refcnt, 1); |
4886 | | SCTP_TCB_UNLOCK(stcb); |
4887 | | SCTP_SOCKET_LOCK(so, 1); |
4888 | | SCTP_TCB_LOCK(stcb); |
4889 | | atomic_subtract_int(&stcb->asoc.refcnt, 1); |
4890 | | if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) { |
4891 | | /* assoc was freed while we were unlocked */ |
4892 | | SCTP_SOCKET_UNLOCK(so, 1); |
4893 | | return; |
4894 | | } |
4895 | | #endif |
4896 | 0 | sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket); |
4897 | | #if defined(__APPLE__) && !defined(__Userspace__) |
4898 | | SCTP_SOCKET_UNLOCK(so, 1); |
4899 | | #endif |
4900 | 0 | } else { |
4901 | 0 | if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) { |
4902 | 0 | sctp_wakeup_log(stcb, wake_him, SCTP_NOWAKE_FROM_SACK); |
4903 | 0 | } |
4904 | 0 | } |
4905 | | |
4906 | 0 | if (asoc->fast_retran_loss_recovery && accum_moved) { |
4907 | 0 | if (SCTP_TSN_GE(asoc->last_acked_seq, asoc->fast_recovery_tsn)) { |
4908 | | /* Setup so we will exit RFC2582 fast recovery */ |
4909 | 0 | will_exit_fast_recovery = 1; |
4910 | 0 | } |
4911 | 0 | } |
4912 | | /* |
4913 | | * Check for revoked fragments: |
4914 | | * |
4915 | | * if Previous sack - Had no frags then we can't have any revoked if |
4916 | | * Previous sack - Had frag's then - If we now have frags aka |
4917 | | * num_seg > 0 call sctp_check_for_revoked() to tell if peer revoked |
4918 | | * some of them. else - The peer revoked all ACKED fragments, since |
4919 | | * we had some before and now we have NONE. |
4920 | | */ |
4921 | |
|
4922 | 0 | if (num_seg) { |
4923 | 0 | sctp_check_for_revoked(stcb, asoc, cum_ack, biggest_tsn_acked); |
4924 | 0 | asoc->saw_sack_with_frags = 1; |
4925 | 0 | } else if (asoc->saw_sack_with_frags) { |
4926 | 0 | int cnt_revoked = 0; |
4927 | | |
4928 | | /* Peer revoked all dg's marked or acked */ |
4929 | 0 | TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) { |
4930 | 0 | if (tp1->sent == SCTP_DATAGRAM_ACKED) { |
4931 | 0 | tp1->sent = SCTP_DATAGRAM_SENT; |
4932 | 0 | if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) { |
4933 | 0 | sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE, |
4934 | 0 | tp1->whoTo->flight_size, |
4935 | 0 | tp1->book_size, |
4936 | 0 | (uint32_t)(uintptr_t)tp1->whoTo, |
4937 | 0 | tp1->rec.data.tsn); |
4938 | 0 | } |
4939 | 0 | sctp_flight_size_increase(tp1); |
4940 | 0 | sctp_total_flight_increase(stcb, tp1); |
4941 | 0 | tp1->rec.data.chunk_was_revoked = 1; |
4942 | | /* |
4943 | | * To ensure that this increase in |
4944 | | * flightsize, which is artificial, |
4945 | | * does not throttle the sender, we |
4946 | | * also increase the cwnd |
4947 | | * artificially. |
4948 | | */ |
4949 | 0 | tp1->whoTo->cwnd += tp1->book_size; |
4950 | 0 | cnt_revoked++; |
4951 | 0 | } |
4952 | 0 | } |
4953 | 0 | if (cnt_revoked) { |
4954 | 0 | reneged_all = 1; |
4955 | 0 | } |
4956 | 0 | asoc->saw_sack_with_frags = 0; |
4957 | 0 | } |
4958 | 0 | if (num_nr_seg > 0) |
4959 | 0 | asoc->saw_sack_with_nr_frags = 1; |
4960 | 0 | else |
4961 | 0 | asoc->saw_sack_with_nr_frags = 0; |
4962 | | |
4963 | | /* JRS - Use the congestion control given in the CC module */ |
4964 | 0 | if (ecne_seen == 0) { |
4965 | 0 | TAILQ_FOREACH(net, &asoc->nets, sctp_next) { |
4966 | 0 | if (net->net_ack2 > 0) { |
4967 | | /* |
4968 | | * Karn's rule applies to clearing error count, this |
4969 | | * is optional. |
4970 | | */ |
4971 | 0 | net->error_count = 0; |
4972 | 0 | if ((net->dest_state & SCTP_ADDR_REACHABLE) == 0) { |
4973 | | /* addr came good */ |
4974 | 0 | net->dest_state |= SCTP_ADDR_REACHABLE; |
4975 | 0 | sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb, |
4976 | 0 | 0, (void *)net, SCTP_SO_NOT_LOCKED); |
4977 | 0 | } |
4978 | |
|
4979 | 0 | if (net == stcb->asoc.primary_destination) { |
4980 | 0 | if (stcb->asoc.alternate) { |
4981 | | /* release the alternate, primary is good */ |
4982 | 0 | sctp_free_remote_addr(stcb->asoc.alternate); |
4983 | 0 | stcb->asoc.alternate = NULL; |
4984 | 0 | } |
4985 | 0 | } |
4986 | | |
4987 | 0 | if (net->dest_state & SCTP_ADDR_PF) { |
4988 | 0 | net->dest_state &= ~SCTP_ADDR_PF; |
4989 | 0 | sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, |
4990 | 0 | stcb->sctp_ep, stcb, net, |
4991 | 0 | SCTP_FROM_SCTP_INDATA + SCTP_LOC_33); |
4992 | 0 | sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net); |
4993 | 0 | asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net); |
4994 | | /* Done with this net */ |
4995 | 0 | net->net_ack = 0; |
4996 | 0 | } |
4997 | | /* restore any doubled timers */ |
4998 | 0 | net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv; |
4999 | 0 | if (net->RTO < stcb->asoc.minrto) { |
5000 | 0 | net->RTO = stcb->asoc.minrto; |
5001 | 0 | } |
5002 | 0 | if (net->RTO > stcb->asoc.maxrto) { |
5003 | 0 | net->RTO = stcb->asoc.maxrto; |
5004 | 0 | } |
5005 | 0 | } |
5006 | 0 | } |
5007 | 0 | asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, accum_moved, reneged_all, will_exit_fast_recovery); |
5008 | 0 | } |
5009 | | |
5010 | 0 | if (TAILQ_EMPTY(&asoc->sent_queue)) { |
5011 | | /* nothing left in-flight */ |
5012 | 0 | TAILQ_FOREACH(net, &asoc->nets, sctp_next) { |
5013 | | /* stop all timers */ |
5014 | 0 | sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, |
5015 | 0 | stcb, net, |
5016 | 0 | SCTP_FROM_SCTP_INDATA + SCTP_LOC_34); |
5017 | 0 | net->flight_size = 0; |
5018 | 0 | net->partial_bytes_acked = 0; |
5019 | 0 | } |
5020 | 0 | asoc->total_flight = 0; |
5021 | 0 | asoc->total_flight_count = 0; |
5022 | 0 | } |
5023 | | |
5024 | | /**********************************/ |
5025 | | /* Now what about shutdown issues */ |
5026 | | /**********************************/ |
5027 | 0 | if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) { |
5028 | | /* nothing left on sendqueue.. consider done */ |
5029 | 0 | if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) { |
5030 | 0 | sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK, |
5031 | 0 | asoc->peers_rwnd, 0, 0, a_rwnd); |
5032 | 0 | } |
5033 | 0 | asoc->peers_rwnd = a_rwnd; |
5034 | 0 | if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) { |
5035 | | /* SWS sender side engages */ |
5036 | 0 | asoc->peers_rwnd = 0; |
5037 | 0 | } |
5038 | | /* clean up */ |
5039 | 0 | if ((asoc->stream_queue_cnt == 1) && |
5040 | 0 | ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) || |
5041 | 0 | (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) && |
5042 | 0 | ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete)(stcb, asoc))) { |
5043 | 0 | SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_PARTIAL_MSG_LEFT); |
5044 | 0 | } |
5045 | 0 | if (((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) || |
5046 | 0 | (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) && |
5047 | 0 | (asoc->stream_queue_cnt == 1) && |
5048 | 0 | (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) { |
5049 | 0 | struct mbuf *op_err; |
5050 | |
|
5051 | 0 | *abort_now = 1; |
5052 | | /* XXX */ |
5053 | 0 | op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, ""); |
5054 | 0 | stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_35; |
5055 | 0 | sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, false, SCTP_SO_NOT_LOCKED); |
5056 | 0 | return; |
5057 | 0 | } |
5058 | 0 | if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) && |
5059 | 0 | (asoc->stream_queue_cnt == 0)) { |
5060 | 0 | struct sctp_nets *netp; |
5061 | |
|
5062 | 0 | if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) || |
5063 | 0 | (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) { |
5064 | 0 | SCTP_STAT_DECR_GAUGE32(sctps_currestab); |
5065 | 0 | } |
5066 | 0 | SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_SENT); |
5067 | 0 | sctp_stop_timers_for_shutdown(stcb); |
5068 | 0 | if (asoc->alternate) { |
5069 | 0 | netp = asoc->alternate; |
5070 | 0 | } else { |
5071 | 0 | netp = asoc->primary_destination; |
5072 | 0 | } |
5073 | 0 | sctp_send_shutdown(stcb, netp); |
5074 | 0 | sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, |
5075 | 0 | stcb->sctp_ep, stcb, netp); |
5076 | 0 | sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, |
5077 | 0 | stcb->sctp_ep, stcb, NULL); |
5078 | 0 | return; |
5079 | 0 | } else if ((SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED) && |
5080 | 0 | (asoc->stream_queue_cnt == 0)) { |
5081 | 0 | struct sctp_nets *netp; |
5082 | |
|
5083 | 0 | SCTP_STAT_DECR_GAUGE32(sctps_currestab); |
5084 | 0 | SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_ACK_SENT); |
5085 | 0 | sctp_stop_timers_for_shutdown(stcb); |
5086 | 0 | if (asoc->alternate) { |
5087 | 0 | netp = asoc->alternate; |
5088 | 0 | } else { |
5089 | 0 | netp = asoc->primary_destination; |
5090 | 0 | } |
5091 | 0 | sctp_send_shutdown_ack(stcb, netp); |
5092 | 0 | sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK, |
5093 | 0 | stcb->sctp_ep, stcb, netp); |
5094 | 0 | return; |
5095 | 0 | } |
5096 | 0 | } |
5097 | | /* |
5098 | | * Now here we are going to recycle net_ack for a different use... |
5099 | | * HEADS UP. |
5100 | | */ |
5101 | 0 | TAILQ_FOREACH(net, &asoc->nets, sctp_next) { |
5102 | 0 | net->net_ack = 0; |
5103 | 0 | } |
5104 | | |
5105 | | /* |
5106 | | * CMT DAC algorithm: If SACK DAC flag was 0, then no extra marking |
5107 | | * to be done. Setting this_sack_lowest_newack to the cum_ack will |
5108 | | * automatically ensure that. |
5109 | | */ |
5110 | 0 | if ((asoc->sctp_cmt_on_off > 0) && |
5111 | 0 | SCTP_BASE_SYSCTL(sctp_cmt_use_dac) && |
5112 | 0 | (cmt_dac_flag == 0)) { |
5113 | 0 | this_sack_lowest_newack = cum_ack; |
5114 | 0 | } |
5115 | 0 | if ((num_seg > 0) || (num_nr_seg > 0)) { |
5116 | 0 | sctp_strike_gap_ack_chunks(stcb, asoc, biggest_tsn_acked, |
5117 | 0 | biggest_tsn_newly_acked, this_sack_lowest_newack, accum_moved); |
5118 | 0 | } |
5119 | | /* JRS - Use the congestion control given in the CC module */ |
5120 | 0 | asoc->cc_functions.sctp_cwnd_update_after_fr(stcb, asoc); |
5121 | | |
5122 | | /* Now are we exiting loss recovery ? */ |
5123 | 0 | if (will_exit_fast_recovery) { |
5124 | | /* Ok, we must exit fast recovery */ |
5125 | 0 | asoc->fast_retran_loss_recovery = 0; |
5126 | 0 | } |
5127 | 0 | if ((asoc->sat_t3_loss_recovery) && |
5128 | 0 | SCTP_TSN_GE(asoc->last_acked_seq, asoc->sat_t3_recovery_tsn)) { |
5129 | | /* end satellite t3 loss recovery */ |
5130 | 0 | asoc->sat_t3_loss_recovery = 0; |
5131 | 0 | } |
5132 | | /* |
5133 | | * CMT Fast recovery |
5134 | | */ |
5135 | 0 | TAILQ_FOREACH(net, &asoc->nets, sctp_next) { |
5136 | 0 | if (net->will_exit_fast_recovery) { |
5137 | | /* Ok, we must exit fast recovery */ |
5138 | 0 | net->fast_retran_loss_recovery = 0; |
5139 | 0 | } |
5140 | 0 | } |
5141 | | |
5142 | | /* Adjust and set the new rwnd value */ |
5143 | 0 | if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) { |
5144 | 0 | sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK, |
5145 | 0 | asoc->peers_rwnd, asoc->total_flight, (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)), a_rwnd); |
5146 | 0 | } |
5147 | 0 | asoc->peers_rwnd = sctp_sbspace_sub(a_rwnd, |
5148 | 0 | (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)))); |
5149 | 0 | if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) { |
5150 | | /* SWS sender side engages */ |
5151 | 0 | asoc->peers_rwnd = 0; |
5152 | 0 | } |
5153 | 0 | if (asoc->peers_rwnd > old_rwnd) { |
5154 | 0 | win_probe_recovery = 1; |
5155 | 0 | } |
5156 | | |
5157 | | /* |
5158 | | * Now we must setup so we have a timer up for anyone with |
5159 | | * outstanding data. |
5160 | | */ |
5161 | 0 | done_once = 0; |
5162 | 0 | again: |
5163 | 0 | j = 0; |
5164 | 0 | TAILQ_FOREACH(net, &asoc->nets, sctp_next) { |
5165 | 0 | if (win_probe_recovery && (net->window_probe)) { |
5166 | 0 | win_probe_recovered = 1; |
5167 | | /*- |
5168 | | * Find first chunk that was used with |
5169 | | * window probe and clear the event. Put |
5170 | | * it back into the send queue as if has |
5171 | | * not been sent. |
5172 | | */ |
5173 | 0 | TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) { |
5174 | 0 | if (tp1->window_probe) { |
5175 | 0 | sctp_window_probe_recovery(stcb, asoc, tp1); |
5176 | 0 | break; |
5177 | 0 | } |
5178 | 0 | } |
5179 | 0 | } |
5180 | 0 | if (net->flight_size) { |
5181 | 0 | j++; |
5182 | 0 | if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) { |
5183 | 0 | sctp_timer_start(SCTP_TIMER_TYPE_SEND, |
5184 | 0 | stcb->sctp_ep, stcb, net); |
5185 | 0 | } |
5186 | 0 | if (net->window_probe) { |
5187 | 0 | net->window_probe = 0; |
5188 | 0 | } |
5189 | 0 | } else { |
5190 | 0 | if (net->window_probe) { |
5191 | | /* In window probes we must assure a timer is still running there */ |
5192 | 0 | if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) { |
5193 | 0 | sctp_timer_start(SCTP_TIMER_TYPE_SEND, |
5194 | 0 | stcb->sctp_ep, stcb, net); |
5195 | 0 | } |
5196 | 0 | } else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) { |
5197 | 0 | sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, |
5198 | 0 | stcb, net, |
5199 | 0 | SCTP_FROM_SCTP_INDATA + SCTP_LOC_36); |
5200 | 0 | } |
5201 | 0 | } |
5202 | 0 | } |
5203 | 0 | if ((j == 0) && |
5204 | 0 | (!TAILQ_EMPTY(&asoc->sent_queue)) && |
5205 | 0 | (asoc->sent_queue_retran_cnt == 0) && |
5206 | 0 | (win_probe_recovered == 0) && |
5207 | 0 | (done_once == 0)) { |
5208 | | /* huh, this should not happen unless all packets |
5209 | | * are PR-SCTP and marked to skip of course. |
5210 | | */ |
5211 | 0 | if (sctp_fs_audit(asoc)) { |
5212 | 0 | TAILQ_FOREACH(net, &asoc->nets, sctp_next) { |
5213 | 0 | net->flight_size = 0; |
5214 | 0 | } |
5215 | 0 | asoc->total_flight = 0; |
5216 | 0 | asoc->total_flight_count = 0; |
5217 | 0 | asoc->sent_queue_retran_cnt = 0; |
5218 | 0 | TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) { |
5219 | 0 | if (tp1->sent < SCTP_DATAGRAM_RESEND) { |
5220 | 0 | sctp_flight_size_increase(tp1); |
5221 | 0 | sctp_total_flight_increase(stcb, tp1); |
5222 | 0 | } else if (tp1->sent == SCTP_DATAGRAM_RESEND) { |
5223 | 0 | sctp_ucount_incr(asoc->sent_queue_retran_cnt); |
5224 | 0 | } |
5225 | 0 | } |
5226 | 0 | } |
5227 | 0 | done_once = 1; |
5228 | 0 | goto again; |
5229 | 0 | } |
5230 | | /*********************************************/ |
5231 | | /* Here we perform PR-SCTP procedures */ |
5232 | | /* (section 4.2) */ |
5233 | | /*********************************************/ |
5234 | | /* C1. update advancedPeerAckPoint */ |
5235 | 0 | if (SCTP_TSN_GT(cum_ack, asoc->advanced_peer_ack_point)) { |
5236 | 0 | asoc->advanced_peer_ack_point = cum_ack; |
5237 | 0 | } |
5238 | | /* C2. try to further move advancedPeerAckPoint ahead */ |
5239 | 0 | if ((asoc->prsctp_supported) && (asoc->pr_sctp_cnt > 0)) { |
5240 | 0 | struct sctp_tmit_chunk *lchk; |
5241 | 0 | uint32_t old_adv_peer_ack_point; |
5242 | |
|
5243 | 0 | old_adv_peer_ack_point = asoc->advanced_peer_ack_point; |
5244 | 0 | lchk = sctp_try_advance_peer_ack_point(stcb, asoc); |
5245 | | /* C3. See if we need to send a Fwd-TSN */ |
5246 | 0 | if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cum_ack)) { |
5247 | | /* |
5248 | | * ISSUE with ECN, see FWD-TSN processing. |
5249 | | */ |
5250 | 0 | if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) { |
5251 | 0 | sctp_misc_ints(SCTP_FWD_TSN_CHECK, |
5252 | 0 | 0xee, cum_ack, asoc->advanced_peer_ack_point, |
5253 | 0 | old_adv_peer_ack_point); |
5254 | 0 | } |
5255 | 0 | if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) { |
5256 | 0 | send_forward_tsn(stcb, asoc); |
5257 | 0 | } else if (lchk) { |
5258 | | /* try to FR fwd-tsn's that get lost too */ |
5259 | 0 | if (lchk->rec.data.fwd_tsn_cnt >= 3) { |
5260 | 0 | send_forward_tsn(stcb, asoc); |
5261 | 0 | } |
5262 | 0 | } |
5263 | 0 | } |
5264 | 0 | for (; lchk != NULL; lchk = TAILQ_NEXT(lchk, sctp_next)) { |
5265 | 0 | if (lchk->whoTo != NULL) { |
5266 | 0 | break; |
5267 | 0 | } |
5268 | 0 | } |
5269 | 0 | if (lchk != NULL) { |
5270 | | /* Assure a timer is up */ |
5271 | 0 | sctp_timer_start(SCTP_TIMER_TYPE_SEND, |
5272 | 0 | stcb->sctp_ep, stcb, lchk->whoTo); |
5273 | 0 | } |
5274 | 0 | } |
5275 | 0 | if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) { |
5276 | 0 | sctp_misc_ints(SCTP_SACK_RWND_UPDATE, |
5277 | 0 | a_rwnd, |
5278 | 0 | stcb->asoc.peers_rwnd, |
5279 | 0 | stcb->asoc.total_flight, |
5280 | 0 | stcb->asoc.total_output_queue_size); |
5281 | 0 | } |
5282 | 0 | } |
5283 | | |
5284 | | void |
5285 | | sctp_update_acked(struct sctp_tcb *stcb, struct sctp_shutdown_chunk *cp, int *abort_flag) |
5286 | 0 | { |
5287 | | /* Copy cum-ack */ |
5288 | 0 | uint32_t cum_ack, a_rwnd; |
5289 | |
|
5290 | 0 | cum_ack = ntohl(cp->cumulative_tsn_ack); |
5291 | | /* Arrange so a_rwnd does NOT change */ |
5292 | 0 | a_rwnd = stcb->asoc.peers_rwnd + stcb->asoc.total_flight; |
5293 | | |
5294 | | /* Now call the express sack handling */ |
5295 | 0 | sctp_express_handle_sack(stcb, cum_ack, a_rwnd, abort_flag, 0); |
5296 | 0 | } |
5297 | | |
5298 | | static void |
5299 | | sctp_kick_prsctp_reorder_queue(struct sctp_tcb *stcb, |
5300 | | struct sctp_stream_in *strmin) |
5301 | 0 | { |
5302 | 0 | struct sctp_queued_to_read *control, *ncontrol; |
5303 | 0 | struct sctp_association *asoc; |
5304 | 0 | uint32_t mid; |
5305 | 0 | int need_reasm_check = 0; |
5306 | |
|
5307 | 0 | KASSERT(stcb != NULL, ("stcb == NULL")); |
5308 | 0 | SCTP_TCB_LOCK_ASSERT(stcb); |
5309 | 0 | SCTP_INP_READ_LOCK_ASSERT(stcb->sctp_ep); |
5310 | | |
5311 | 0 | asoc = &stcb->asoc; |
5312 | 0 | mid = strmin->last_mid_delivered; |
5313 | | /* |
5314 | | * First deliver anything prior to and including the stream no that |
5315 | | * came in. |
5316 | | */ |
5317 | 0 | TAILQ_FOREACH_SAFE(control, &strmin->inqueue, next_instrm, ncontrol) { |
5318 | 0 | if (SCTP_MID_GE(asoc->idata_supported, mid, control->mid)) { |
5319 | | /* this is deliverable now */ |
5320 | 0 | if (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) { |
5321 | 0 | if (control->on_strm_q) { |
5322 | 0 | if (control->on_strm_q == SCTP_ON_ORDERED) { |
5323 | 0 | TAILQ_REMOVE(&strmin->inqueue, control, next_instrm); |
5324 | 0 | } else if (control->on_strm_q == SCTP_ON_UNORDERED) { |
5325 | 0 | TAILQ_REMOVE(&strmin->uno_inqueue, control, next_instrm); |
5326 | 0 | #ifdef INVARIANTS |
5327 | 0 | } else { |
5328 | 0 | panic("strmin: %p ctl: %p unknown %d", |
5329 | 0 | strmin, control, control->on_strm_q); |
5330 | 0 | #endif |
5331 | 0 | } |
5332 | 0 | control->on_strm_q = 0; |
5333 | 0 | } |
5334 | | /* subtract pending on streams */ |
5335 | 0 | if (asoc->size_on_all_streams >= control->length) { |
5336 | 0 | asoc->size_on_all_streams -= control->length; |
5337 | 0 | } else { |
5338 | 0 | #ifdef INVARIANTS |
5339 | 0 | panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length); |
5340 | | #else |
5341 | | asoc->size_on_all_streams = 0; |
5342 | | #endif |
5343 | 0 | } |
5344 | 0 | sctp_ucount_decr(asoc->cnt_on_all_streams); |
5345 | | /* deliver it to at least the delivery-q */ |
5346 | 0 | if (stcb->sctp_socket) { |
5347 | 0 | sctp_mark_non_revokable(asoc, control->sinfo_tsn); |
5348 | 0 | sctp_add_to_readq(stcb->sctp_ep, stcb, control, |
5349 | 0 | &stcb->sctp_socket->so_rcv, 1, |
5350 | 0 | SCTP_READ_LOCK_HELD, SCTP_SO_NOT_LOCKED); |
5351 | 0 | } |
5352 | 0 | } else { |
5353 | | /* Its a fragmented message */ |
5354 | 0 | if (control->first_frag_seen) { |
5355 | | /* Make it so this is next to deliver, we restore later */ |
5356 | 0 | strmin->last_mid_delivered = control->mid - 1; |
5357 | 0 | need_reasm_check = 1; |
5358 | 0 | break; |
5359 | 0 | } |
5360 | 0 | } |
5361 | 0 | } else { |
5362 | | /* no more delivery now. */ |
5363 | 0 | break; |
5364 | 0 | } |
5365 | 0 | } |
5366 | 0 | if (need_reasm_check) { |
5367 | 0 | int ret; |
5368 | 0 | ret = sctp_deliver_reasm_check(stcb, &stcb->asoc, strmin, SCTP_READ_LOCK_HELD); |
5369 | 0 | if (SCTP_MID_GT(asoc->idata_supported, mid, strmin->last_mid_delivered)) { |
5370 | | /* Restore the next to deliver unless we are ahead */ |
5371 | 0 | strmin->last_mid_delivered = mid; |
5372 | 0 | } |
5373 | 0 | if (ret == 0) { |
5374 | | /* Left the front Partial one on */ |
5375 | 0 | return; |
5376 | 0 | } |
5377 | 0 | need_reasm_check = 0; |
5378 | 0 | } |
5379 | | /* |
5380 | | * now we must deliver things in queue the normal way if any are |
5381 | | * now ready. |
5382 | | */ |
5383 | 0 | mid = strmin->last_mid_delivered + 1; |
5384 | 0 | TAILQ_FOREACH_SAFE(control, &strmin->inqueue, next_instrm, ncontrol) { |
5385 | 0 | if (SCTP_MID_EQ(asoc->idata_supported, mid, control->mid)) { |
5386 | 0 | if (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) { |
5387 | | /* this is deliverable now */ |
5388 | 0 | if (control->on_strm_q) { |
5389 | 0 | if (control->on_strm_q == SCTP_ON_ORDERED) { |
5390 | 0 | TAILQ_REMOVE(&strmin->inqueue, control, next_instrm); |
5391 | 0 | } else if (control->on_strm_q == SCTP_ON_UNORDERED) { |
5392 | 0 | TAILQ_REMOVE(&strmin->uno_inqueue, control, next_instrm); |
5393 | 0 | #ifdef INVARIANTS |
5394 | 0 | } else { |
5395 | 0 | panic("strmin: %p ctl: %p unknown %d", |
5396 | 0 | strmin, control, control->on_strm_q); |
5397 | 0 | #endif |
5398 | 0 | } |
5399 | 0 | control->on_strm_q = 0; |
5400 | 0 | } |
5401 | | /* subtract pending on streams */ |
5402 | 0 | if (asoc->size_on_all_streams >= control->length) { |
5403 | 0 | asoc->size_on_all_streams -= control->length; |
5404 | 0 | } else { |
5405 | 0 | #ifdef INVARIANTS |
5406 | 0 | panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length); |
5407 | | #else |
5408 | | asoc->size_on_all_streams = 0; |
5409 | | #endif |
5410 | 0 | } |
5411 | 0 | sctp_ucount_decr(asoc->cnt_on_all_streams); |
5412 | | /* deliver it to at least the delivery-q */ |
5413 | 0 | strmin->last_mid_delivered = control->mid; |
5414 | 0 | if (stcb->sctp_socket) { |
5415 | 0 | sctp_mark_non_revokable(asoc, control->sinfo_tsn); |
5416 | 0 | sctp_add_to_readq(stcb->sctp_ep, stcb, control, |
5417 | 0 | &stcb->sctp_socket->so_rcv, 1, |
5418 | 0 | SCTP_READ_LOCK_HELD, SCTP_SO_NOT_LOCKED); |
5419 | 0 | } |
5420 | 0 | mid = strmin->last_mid_delivered + 1; |
5421 | 0 | } else { |
5422 | | /* Its a fragmented message */ |
5423 | 0 | if (control->first_frag_seen) { |
5424 | | /* Make it so this is next to deliver */ |
5425 | 0 | strmin->last_mid_delivered = control->mid - 1; |
5426 | 0 | need_reasm_check = 1; |
5427 | 0 | break; |
5428 | 0 | } |
5429 | 0 | } |
5430 | 0 | } else { |
5431 | 0 | break; |
5432 | 0 | } |
5433 | 0 | } |
5434 | 0 | if (need_reasm_check) { |
5435 | 0 | (void)sctp_deliver_reasm_check(stcb, &stcb->asoc, strmin, SCTP_READ_LOCK_HELD); |
5436 | 0 | } |
5437 | 0 | } |
5438 | | |
5439 | | static void |
5440 | | sctp_flush_reassm_for_str_seq(struct sctp_tcb *stcb, |
5441 | | struct sctp_association *asoc, struct sctp_stream_in *strm, |
5442 | | struct sctp_queued_to_read *control, int ordered, uint32_t cumtsn) |
5443 | 0 | { |
5444 | 0 | struct sctp_tmit_chunk *chk, *nchk; |
5445 | | |
5446 | | /* |
5447 | | * For now large messages held on the stream reasm that are |
5448 | | * complete will be tossed too. We could in theory do more |
5449 | | * work to spin through and stop after dumping one msg aka |
5450 | | * seeing the start of a new msg at the head, and call the |
5451 | | * delivery function... to see if it can be delivered... But |
5452 | | * for now we just dump everything on the queue. |
5453 | | */ |
5454 | |
|
5455 | 0 | KASSERT(stcb != NULL, ("stcb == NULL")); |
5456 | 0 | SCTP_TCB_LOCK_ASSERT(stcb); |
5457 | 0 | SCTP_INP_READ_LOCK_ASSERT(stcb->sctp_ep); |
5458 | | |
5459 | 0 | if (!asoc->idata_supported && !ordered && |
5460 | 0 | control->first_frag_seen && |
5461 | 0 | SCTP_TSN_GT(control->fsn_included, cumtsn)) { |
5462 | 0 | return; |
5463 | 0 | } |
5464 | 0 | TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, nchk) { |
5465 | | /* Purge hanging chunks */ |
5466 | 0 | if (!asoc->idata_supported && !ordered) { |
5467 | 0 | if (SCTP_TSN_GT(chk->rec.data.tsn, cumtsn)) { |
5468 | 0 | break; |
5469 | 0 | } |
5470 | 0 | } |
5471 | 0 | TAILQ_REMOVE(&control->reasm, chk, sctp_next); |
5472 | 0 | if (asoc->size_on_reasm_queue >= chk->send_size) { |
5473 | 0 | asoc->size_on_reasm_queue -= chk->send_size; |
5474 | 0 | } else { |
5475 | 0 | #ifdef INVARIANTS |
5476 | 0 | panic("size_on_reasm_queue = %u smaller than chunk length %u", asoc->size_on_reasm_queue, chk->send_size); |
5477 | | #else |
5478 | | asoc->size_on_reasm_queue = 0; |
5479 | | #endif |
5480 | 0 | } |
5481 | 0 | sctp_ucount_decr(asoc->cnt_on_reasm_queue); |
5482 | 0 | if (chk->data) { |
5483 | 0 | sctp_m_freem(chk->data); |
5484 | 0 | chk->data = NULL; |
5485 | 0 | } |
5486 | 0 | sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED); |
5487 | 0 | } |
5488 | 0 | if (!TAILQ_EMPTY(&control->reasm)) { |
5489 | 0 | KASSERT(!asoc->idata_supported, |
5490 | 0 | ("Reassembly queue not empty for I-DATA")); |
5491 | 0 | KASSERT(!ordered, |
5492 | 0 | ("Reassembly queue not empty for ordered data")); |
5493 | 0 | if (control->data) { |
5494 | 0 | sctp_m_freem(control->data); |
5495 | 0 | control->data = NULL; |
5496 | 0 | } |
5497 | 0 | control->fsn_included = 0xffffffff; |
5498 | 0 | control->first_frag_seen = 0; |
5499 | 0 | control->last_frag_seen = 0; |
5500 | 0 | if (control->on_read_q) { |
5501 | | /* |
5502 | | * We have to purge it from there, |
5503 | | * hopefully this will work :-) |
5504 | | */ |
5505 | 0 | TAILQ_REMOVE(&stcb->sctp_ep->read_queue, control, next); |
5506 | 0 | control->on_read_q = 0; |
5507 | 0 | } |
5508 | 0 | chk = TAILQ_FIRST(&control->reasm); |
5509 | 0 | if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) { |
5510 | 0 | TAILQ_REMOVE(&control->reasm, chk, sctp_next); |
5511 | 0 | sctp_add_chk_to_control(control, strm, stcb, asoc, |
5512 | 0 | chk, SCTP_READ_LOCK_HELD); |
5513 | 0 | } |
5514 | 0 | sctp_deliver_reasm_check(stcb, asoc, strm, SCTP_READ_LOCK_HELD); |
5515 | 0 | return; |
5516 | 0 | } |
5517 | 0 | if (control->on_strm_q == SCTP_ON_ORDERED) { |
5518 | 0 | TAILQ_REMOVE(&strm->inqueue, control, next_instrm); |
5519 | 0 | if (asoc->size_on_all_streams >= control->length) { |
5520 | 0 | asoc->size_on_all_streams -= control->length; |
5521 | 0 | } else { |
5522 | 0 | #ifdef INVARIANTS |
5523 | 0 | panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length); |
5524 | | #else |
5525 | | asoc->size_on_all_streams = 0; |
5526 | | #endif |
5527 | 0 | } |
5528 | 0 | sctp_ucount_decr(asoc->cnt_on_all_streams); |
5529 | 0 | control->on_strm_q = 0; |
5530 | 0 | } else if (control->on_strm_q == SCTP_ON_UNORDERED) { |
5531 | 0 | TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm); |
5532 | 0 | control->on_strm_q = 0; |
5533 | 0 | #ifdef INVARIANTS |
5534 | 0 | } else if (control->on_strm_q) { |
5535 | 0 | panic("strm: %p ctl: %p unknown %d", |
5536 | 0 | strm, control, control->on_strm_q); |
5537 | 0 | #endif |
5538 | 0 | } |
5539 | 0 | control->on_strm_q = 0; |
5540 | 0 | if (control->on_read_q == 0) { |
5541 | 0 | sctp_free_remote_addr(control->whoFrom); |
5542 | 0 | if (control->data) { |
5543 | 0 | sctp_m_freem(control->data); |
5544 | 0 | control->data = NULL; |
5545 | 0 | } |
5546 | 0 | sctp_free_a_readq(stcb, control); |
5547 | 0 | } |
5548 | 0 | } |
5549 | | |
5550 | | void |
5551 | | sctp_handle_forward_tsn(struct sctp_tcb *stcb, |
5552 | | struct sctp_forward_tsn_chunk *fwd, |
5553 | | int *abort_flag, struct mbuf *m , int offset) |
5554 | 0 | { |
5555 | | /* The pr-sctp fwd tsn */ |
5556 | | /* |
5557 | | * here we will perform all the data receiver side steps for |
5558 | | * processing FwdTSN, as required in by pr-sctp draft: |
5559 | | * |
5560 | | * Assume we get FwdTSN(x): |
5561 | | * |
5562 | | * 1) update local cumTSN to x |
5563 | | * 2) try to further advance cumTSN to x + others we have |
5564 | | * 3) examine and update re-ordering queue on pr-in-streams |
5565 | | * 4) clean up re-assembly queue |
5566 | | * 5) Send a sack to report where we are. |
5567 | | */ |
5568 | 0 | struct sctp_association *asoc; |
5569 | 0 | uint32_t new_cum_tsn, gap; |
5570 | 0 | unsigned int i, fwd_sz, m_size; |
5571 | 0 | struct sctp_stream_in *strm; |
5572 | 0 | struct sctp_queued_to_read *control, *ncontrol; |
5573 | |
|
5574 | 0 | asoc = &stcb->asoc; |
5575 | 0 | if ((fwd_sz = ntohs(fwd->ch.chunk_length)) < sizeof(struct sctp_forward_tsn_chunk)) { |
5576 | 0 | SCTPDBG(SCTP_DEBUG_INDATA1, |
5577 | 0 | "Bad size too small/big fwd-tsn\n"); |
5578 | 0 | return; |
5579 | 0 | } |
5580 | 0 | m_size = (stcb->asoc.mapping_array_size << 3); |
5581 | | /*************************************************************/ |
5582 | | /* 1. Here we update local cumTSN and shift the bitmap array */ |
5583 | | /*************************************************************/ |
5584 | 0 | new_cum_tsn = ntohl(fwd->new_cumulative_tsn); |
5585 | |
|
5586 | 0 | if (SCTP_TSN_GE(asoc->cumulative_tsn, new_cum_tsn)) { |
5587 | | /* Already got there ... */ |
5588 | 0 | return; |
5589 | 0 | } |
5590 | | /* |
5591 | | * now we know the new TSN is more advanced, let's find the actual |
5592 | | * gap |
5593 | | */ |
5594 | 0 | SCTP_CALC_TSN_TO_GAP(gap, new_cum_tsn, asoc->mapping_array_base_tsn); |
5595 | 0 | asoc->cumulative_tsn = new_cum_tsn; |
5596 | 0 | if (gap >= m_size) { |
5597 | 0 | if ((long)gap > sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv)) { |
5598 | 0 | struct mbuf *op_err; |
5599 | 0 | char msg[SCTP_DIAG_INFO_LEN]; |
5600 | | |
5601 | | /* |
5602 | | * out of range (of single byte chunks in the rwnd I |
5603 | | * give out). This must be an attacker. |
5604 | | */ |
5605 | 0 | *abort_flag = 1; |
5606 | 0 | SCTP_SNPRINTF(msg, sizeof(msg), |
5607 | 0 | "New cum ack %8.8x too high, highest TSN %8.8x", |
5608 | 0 | new_cum_tsn, asoc->highest_tsn_inside_map); |
5609 | 0 | op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); |
5610 | 0 | stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_37; |
5611 | 0 | sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, false, SCTP_SO_NOT_LOCKED); |
5612 | 0 | return; |
5613 | 0 | } |
5614 | 0 | SCTP_STAT_INCR(sctps_fwdtsn_map_over); |
5615 | |
|
5616 | 0 | memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size); |
5617 | 0 | asoc->mapping_array_base_tsn = new_cum_tsn + 1; |
5618 | 0 | asoc->highest_tsn_inside_map = new_cum_tsn; |
5619 | |
|
5620 | 0 | memset(stcb->asoc.nr_mapping_array, 0, stcb->asoc.mapping_array_size); |
5621 | 0 | asoc->highest_tsn_inside_nr_map = new_cum_tsn; |
5622 | |
|
5623 | 0 | if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) { |
5624 | 0 | sctp_log_map(0, 3, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT); |
5625 | 0 | } |
5626 | 0 | } else { |
5627 | 0 | SCTP_TCB_LOCK_ASSERT(stcb); |
5628 | 0 | for (i = 0; i <= gap; i++) { |
5629 | 0 | if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, i) && |
5630 | 0 | !SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, i)) { |
5631 | 0 | SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, i); |
5632 | 0 | if (SCTP_TSN_GT(asoc->mapping_array_base_tsn + i, asoc->highest_tsn_inside_nr_map)) { |
5633 | 0 | asoc->highest_tsn_inside_nr_map = asoc->mapping_array_base_tsn + i; |
5634 | 0 | } |
5635 | 0 | } |
5636 | 0 | } |
5637 | 0 | } |
5638 | | /*************************************************************/ |
5639 | | /* 2. Clear up re-assembly queue */ |
5640 | | /*************************************************************/ |
5641 | | |
5642 | | /* This is now done as part of clearing up the stream/seq */ |
5643 | 0 | if (asoc->idata_supported == 0) { |
5644 | 0 | uint16_t sid; |
5645 | | |
5646 | | /* Flush all the un-ordered data based on cum-tsn */ |
5647 | 0 | SCTP_INP_READ_LOCK(stcb->sctp_ep); |
5648 | 0 | for (sid = 0; sid < asoc->streamincnt; sid++) { |
5649 | 0 | strm = &asoc->strmin[sid]; |
5650 | 0 | if (!TAILQ_EMPTY(&strm->uno_inqueue)) { |
5651 | 0 | sctp_flush_reassm_for_str_seq(stcb, asoc, strm, TAILQ_FIRST(&strm->uno_inqueue), 0, new_cum_tsn); |
5652 | 0 | } |
5653 | 0 | } |
5654 | 0 | SCTP_INP_READ_UNLOCK(stcb->sctp_ep); |
5655 | 0 | } |
5656 | | /*******************************************************/ |
5657 | | /* 3. Update the PR-stream re-ordering queues and fix */ |
5658 | | /* delivery issues as needed. */ |
5659 | | /*******************************************************/ |
5660 | 0 | fwd_sz -= sizeof(*fwd); |
5661 | 0 | if (m && fwd_sz) { |
5662 | | /* New method. */ |
5663 | 0 | unsigned int num_str; |
5664 | 0 | uint32_t mid; |
5665 | 0 | uint16_t sid; |
5666 | 0 | uint16_t ordered, flags; |
5667 | 0 | struct sctp_strseq *stseq, strseqbuf; |
5668 | 0 | struct sctp_strseq_mid *stseq_m, strseqbuf_m; |
5669 | 0 | offset += sizeof(*fwd); |
5670 | |
|
5671 | 0 | SCTP_INP_READ_LOCK(stcb->sctp_ep); |
5672 | 0 | if (asoc->idata_supported) { |
5673 | 0 | num_str = fwd_sz / sizeof(struct sctp_strseq_mid); |
5674 | 0 | } else { |
5675 | 0 | num_str = fwd_sz / sizeof(struct sctp_strseq); |
5676 | 0 | } |
5677 | 0 | for (i = 0; i < num_str; i++) { |
5678 | 0 | if (asoc->idata_supported) { |
5679 | 0 | stseq_m = (struct sctp_strseq_mid *)sctp_m_getptr(m, offset, |
5680 | 0 | sizeof(struct sctp_strseq_mid), |
5681 | 0 | (uint8_t *)&strseqbuf_m); |
5682 | 0 | offset += sizeof(struct sctp_strseq_mid); |
5683 | 0 | if (stseq_m == NULL) { |
5684 | 0 | break; |
5685 | 0 | } |
5686 | 0 | sid = ntohs(stseq_m->sid); |
5687 | 0 | mid = ntohl(stseq_m->mid); |
5688 | 0 | flags = ntohs(stseq_m->flags); |
5689 | 0 | if (flags & PR_SCTP_UNORDERED_FLAG) { |
5690 | 0 | ordered = 0; |
5691 | 0 | } else { |
5692 | 0 | ordered = 1; |
5693 | 0 | } |
5694 | 0 | } else { |
5695 | 0 | stseq = (struct sctp_strseq *)sctp_m_getptr(m, offset, |
5696 | 0 | sizeof(struct sctp_strseq), |
5697 | 0 | (uint8_t *)&strseqbuf); |
5698 | 0 | offset += sizeof(struct sctp_strseq); |
5699 | 0 | if (stseq == NULL) { |
5700 | 0 | break; |
5701 | 0 | } |
5702 | 0 | sid = ntohs(stseq->sid); |
5703 | 0 | mid = (uint32_t)ntohs(stseq->ssn); |
5704 | 0 | ordered = 1; |
5705 | 0 | } |
5706 | | /* Convert */ |
5707 | | |
5708 | | /* now process */ |
5709 | | |
5710 | | /* |
5711 | | * Ok we now look for the stream/seq on the read queue |
5712 | | * where its not all delivered. If we find it we transmute the |
5713 | | * read entry into a PDI_ABORTED. |
5714 | | */ |
5715 | 0 | if (sid >= asoc->streamincnt) { |
5716 | | /* screwed up streams, stop! */ |
5717 | 0 | break; |
5718 | 0 | } |
5719 | 0 | if ((asoc->str_of_pdapi == sid) && |
5720 | 0 | (asoc->ssn_of_pdapi == mid)) { |
5721 | | /* If this is the one we were partially delivering |
5722 | | * now then we no longer are. Note this will change |
5723 | | * with the reassembly re-write. |
5724 | | */ |
5725 | 0 | asoc->fragmented_delivery_inprogress = 0; |
5726 | 0 | } |
5727 | 0 | strm = &asoc->strmin[sid]; |
5728 | 0 | if (ordered) { |
5729 | 0 | TAILQ_FOREACH_SAFE(control, &strm->inqueue, next_instrm, ncontrol) { |
5730 | 0 | if (SCTP_MID_GE(asoc->idata_supported, mid, control->mid)) { |
5731 | 0 | sctp_flush_reassm_for_str_seq(stcb, asoc, strm, control, ordered, new_cum_tsn); |
5732 | 0 | } |
5733 | 0 | } |
5734 | 0 | } else { |
5735 | 0 | if (asoc->idata_supported) { |
5736 | 0 | TAILQ_FOREACH_SAFE(control, &strm->uno_inqueue, next_instrm, ncontrol) { |
5737 | 0 | if (SCTP_MID_GE(asoc->idata_supported, mid, control->mid)) { |
5738 | 0 | sctp_flush_reassm_for_str_seq(stcb, asoc, strm, control, ordered, new_cum_tsn); |
5739 | 0 | } |
5740 | 0 | } |
5741 | 0 | } else { |
5742 | 0 | if (!TAILQ_EMPTY(&strm->uno_inqueue)) { |
5743 | 0 | sctp_flush_reassm_for_str_seq(stcb, asoc, strm, TAILQ_FIRST(&strm->uno_inqueue), ordered, new_cum_tsn); |
5744 | 0 | } |
5745 | 0 | } |
5746 | 0 | } |
5747 | 0 | TAILQ_FOREACH(control, &stcb->sctp_ep->read_queue, next) { |
5748 | 0 | if ((control->sinfo_stream == sid) && |
5749 | 0 | (SCTP_MID_EQ(asoc->idata_supported, control->mid, mid))) { |
5750 | 0 | control->pdapi_aborted = 1; |
5751 | 0 | control->end_added = 1; |
5752 | 0 | if (control->on_strm_q == SCTP_ON_ORDERED) { |
5753 | 0 | TAILQ_REMOVE(&strm->inqueue, control, next_instrm); |
5754 | 0 | if (asoc->size_on_all_streams >= control->length) { |
5755 | 0 | asoc->size_on_all_streams -= control->length; |
5756 | 0 | } else { |
5757 | 0 | #ifdef INVARIANTS |
5758 | 0 | panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length); |
5759 | | #else |
5760 | | asoc->size_on_all_streams = 0; |
5761 | | #endif |
5762 | 0 | } |
5763 | 0 | sctp_ucount_decr(asoc->cnt_on_all_streams); |
5764 | 0 | } else if (control->on_strm_q == SCTP_ON_UNORDERED) { |
5765 | 0 | TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm); |
5766 | 0 | #ifdef INVARIANTS |
5767 | 0 | } else if (control->on_strm_q) { |
5768 | 0 | panic("strm: %p ctl: %p unknown %d", |
5769 | 0 | strm, control, control->on_strm_q); |
5770 | 0 | #endif |
5771 | 0 | } |
5772 | 0 | control->on_strm_q = 0; |
5773 | 0 | sctp_ulp_notify(SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION, |
5774 | 0 | stcb, |
5775 | 0 | SCTP_PARTIAL_DELIVERY_ABORTED, |
5776 | 0 | (void *)control, |
5777 | 0 | SCTP_SO_NOT_LOCKED); |
5778 | 0 | break; |
5779 | 0 | } else if ((control->sinfo_stream == sid) && |
5780 | 0 | SCTP_MID_GT(asoc->idata_supported, control->mid, mid)) { |
5781 | | /* We are past our victim SSN */ |
5782 | 0 | break; |
5783 | 0 | } |
5784 | 0 | } |
5785 | 0 | if (SCTP_MID_GT(asoc->idata_supported, mid, strm->last_mid_delivered)) { |
5786 | | /* Update the sequence number */ |
5787 | 0 | strm->last_mid_delivered = mid; |
5788 | 0 | } |
5789 | | /* now kick the stream the new way */ |
5790 | | /*sa_ignore NO_NULL_CHK*/ |
5791 | 0 | sctp_kick_prsctp_reorder_queue(stcb, strm); |
5792 | 0 | } |
5793 | 0 | SCTP_INP_READ_UNLOCK(stcb->sctp_ep); |
5794 | 0 | } |
5795 | | /* |
5796 | | * Now slide thing forward. |
5797 | | */ |
5798 | 0 | sctp_slide_mapping_arrays(stcb); |
5799 | 0 | } |