Coverage Report

Created: 2022-08-24 06:02

/src/usrsctp/usrsctplib/netinet/sctp_indata.c
Line
Count
Source (jump to first uncovered line)
1
/*-
2
 * SPDX-License-Identifier: BSD-3-Clause
3
 *
4
 * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
5
 * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
6
 * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
7
 *
8
 * Redistribution and use in source and binary forms, with or without
9
 * modification, are permitted provided that the following conditions are met:
10
 *
11
 * a) Redistributions of source code must retain the above copyright notice,
12
 *    this list of conditions and the following disclaimer.
13
 *
14
 * b) Redistributions in binary form must reproduce the above copyright
15
 *    notice, this list of conditions and the following disclaimer in
16
 *    the documentation and/or other materials provided with the distribution.
17
 *
18
 * c) Neither the name of Cisco Systems, Inc. nor the names of its
19
 *    contributors may be used to endorse or promote products derived
20
 *    from this software without specific prior written permission.
21
 *
22
 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23
 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
24
 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25
 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26
 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27
 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28
 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29
 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30
 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31
 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
32
 * THE POSSIBILITY OF SUCH DAMAGE.
33
 */
34
35
#if defined(__FreeBSD__) && !defined(__Userspace__)
36
#include <sys/cdefs.h>
37
__FBSDID("$FreeBSD$");
38
#endif
39
40
#include <netinet/sctp_os.h>
41
#if defined(__FreeBSD__) && !defined(__Userspace__)
42
#include <sys/proc.h>
43
#endif
44
#include <netinet/sctp_var.h>
45
#include <netinet/sctp_sysctl.h>
46
#include <netinet/sctp_header.h>
47
#include <netinet/sctp_pcb.h>
48
#include <netinet/sctputil.h>
49
#include <netinet/sctp_output.h>
50
#include <netinet/sctp_uio.h>
51
#include <netinet/sctp_auth.h>
52
#include <netinet/sctp_timer.h>
53
#include <netinet/sctp_asconf.h>
54
#include <netinet/sctp_indata.h>
55
#include <netinet/sctp_bsd_addr.h>
56
#include <netinet/sctp_input.h>
57
#include <netinet/sctp_crc32.h>
58
#if defined(__FreeBSD__) && !defined(__Userspace__)
59
#include <netinet/sctp_lock_bsd.h>
60
#endif
61
/*
62
 * NOTES: On the outbound side of things I need to check the sack timer to
63
 * see if I should generate a sack into the chunk queue (if I have data to
64
 * send that is and will be sending it .. for bundling.
65
 *
66
 * The callback in sctp_usrreq.c will get called when the socket is read from.
67
 * This will cause sctp_service_queues() to get called on the top entry in
68
 * the list.
69
 */
70
static uint32_t
71
sctp_add_chk_to_control(struct sctp_queued_to_read *control,
72
      struct sctp_stream_in *strm,
73
      struct sctp_tcb *stcb,
74
      struct sctp_association *asoc,
75
      struct sctp_tmit_chunk *chk, int hold_rlock);
76
77
void
78
sctp_set_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
79
19.5k
{
80
19.5k
  asoc->my_rwnd = sctp_calc_rwnd(stcb, asoc);
81
19.5k
}
82
83
/* Calculate what the rwnd would be */
84
uint32_t
85
sctp_calc_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
86
26.9k
{
87
26.9k
  uint32_t calc = 0;
88
89
  /*
90
   * This is really set wrong with respect to a 1-2-m socket. Since
91
   * the sb_cc is the count that everyone as put up. When we re-write
92
   * sctp_soreceive then we will fix this so that ONLY this
93
   * associations data is taken into account.
94
   */
95
26.9k
  if (stcb->sctp_socket == NULL) {
96
0
    return (calc);
97
0
  }
98
99
26.9k
  KASSERT(asoc->cnt_on_reasm_queue > 0 || asoc->size_on_reasm_queue == 0,
100
26.9k
          ("size_on_reasm_queue is %u", asoc->size_on_reasm_queue));
101
26.9k
  KASSERT(asoc->cnt_on_all_streams > 0 || asoc->size_on_all_streams == 0,
102
26.9k
          ("size_on_all_streams is %u", asoc->size_on_all_streams));
103
26.9k
  if (stcb->asoc.sb_cc == 0 &&
104
26.9k
      asoc->cnt_on_reasm_queue == 0 &&
105
26.9k
      asoc->cnt_on_all_streams == 0) {
106
    /* Full rwnd granted */
107
8.58k
    calc = max(SCTP_SB_LIMIT_RCV(stcb->sctp_socket), SCTP_MINIMAL_RWND);
108
8.58k
    return (calc);
109
8.58k
  }
110
  /* get actual space */
111
18.4k
  calc = (uint32_t) sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv);
112
  /*
113
   * take out what has NOT been put on socket queue and we yet hold
114
   * for putting up.
115
   */
116
18.4k
  calc = sctp_sbspace_sub(calc, (uint32_t)(asoc->size_on_reasm_queue +
117
18.4k
                                           asoc->cnt_on_reasm_queue * MSIZE));
118
18.4k
  calc = sctp_sbspace_sub(calc, (uint32_t)(asoc->size_on_all_streams +
119
18.4k
                                           asoc->cnt_on_all_streams * MSIZE));
120
18.4k
  if (calc == 0) {
121
    /* out of space */
122
812
    return (calc);
123
812
  }
124
125
  /* what is the overhead of all these rwnd's */
126
17.6k
  calc = sctp_sbspace_sub(calc, stcb->asoc.my_rwnd_control_len);
127
  /* If the window gets too small due to ctrl-stuff, reduce it
128
   * to 1, even it is 0. SWS engaged
129
   */
130
17.6k
  if (calc < stcb->asoc.my_rwnd_control_len) {
131
7.99k
    calc = 1;
132
7.99k
  }
133
17.6k
  return (calc);
134
18.4k
}
135
136
/*
137
 * Build out our readq entry based on the incoming packet.
138
 */
139
struct sctp_queued_to_read *
140
sctp_build_readq_entry(struct sctp_tcb *stcb,
141
    struct sctp_nets *net,
142
    uint32_t tsn, uint32_t ppid,
143
    uint32_t context, uint16_t sid,
144
    uint32_t mid, uint8_t flags,
145
    struct mbuf *dm)
146
632k
{
147
632k
  struct sctp_queued_to_read *read_queue_e = NULL;
148
149
632k
  sctp_alloc_a_readq(stcb, read_queue_e);
150
632k
  if (read_queue_e == NULL) {
151
0
    goto failed_build;
152
0
  }
153
632k
  memset(read_queue_e, 0, sizeof(struct sctp_queued_to_read));
154
632k
  read_queue_e->sinfo_stream = sid;
155
632k
  read_queue_e->sinfo_flags = (flags << 8);
156
632k
  read_queue_e->sinfo_ppid = ppid;
157
632k
  read_queue_e->sinfo_context = context;
158
632k
  read_queue_e->sinfo_tsn = tsn;
159
632k
  read_queue_e->sinfo_cumtsn = tsn;
160
632k
  read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb);
161
632k
  read_queue_e->mid = mid;
162
632k
  read_queue_e->top_fsn = read_queue_e->fsn_included = 0xffffffff;
163
632k
  TAILQ_INIT(&read_queue_e->reasm);
164
632k
  read_queue_e->whoFrom = net;
165
632k
  atomic_add_int(&net->ref_count, 1);
166
632k
  read_queue_e->data = dm;
167
632k
  read_queue_e->stcb = stcb;
168
632k
  read_queue_e->port_from = stcb->rport;
169
632k
  if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
170
0
    read_queue_e->do_not_ref_stcb = 1;
171
0
  }
172
632k
failed_build:
173
632k
  return (read_queue_e);
174
632k
}
175
176
struct mbuf *
177
sctp_build_ctl_nchunk(struct sctp_inpcb *inp, struct sctp_sndrcvinfo *sinfo)
178
0
{
179
0
  struct sctp_extrcvinfo *seinfo;
180
0
  struct sctp_sndrcvinfo *outinfo;
181
0
  struct sctp_rcvinfo *rcvinfo;
182
0
  struct sctp_nxtinfo *nxtinfo;
183
#if defined(_WIN32)
184
  WSACMSGHDR *cmh;
185
#else
186
0
  struct cmsghdr *cmh;
187
0
#endif
188
0
  struct mbuf *ret;
189
0
  int len;
190
0
  int use_extended;
191
0
  int provide_nxt;
192
193
0
  if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) &&
194
0
      sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) &&
195
0
      sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) {
196
    /* user does not want any ancillary data */
197
0
    return (NULL);
198
0
  }
199
200
0
  len = 0;
201
0
  if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) {
202
0
    len += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
203
0
  }
204
0
  seinfo = (struct sctp_extrcvinfo *)sinfo;
205
0
  if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO) &&
206
0
      (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_AVAIL)) {
207
0
    provide_nxt = 1;
208
0
    len += CMSG_SPACE(sizeof(struct sctp_nxtinfo));
209
0
  } else {
210
0
    provide_nxt = 0;
211
0
  }
212
0
  if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
213
0
    if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) {
214
0
      use_extended = 1;
215
0
      len += CMSG_SPACE(sizeof(struct sctp_extrcvinfo));
216
0
    } else {
217
0
      use_extended = 0;
218
0
      len += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo));
219
0
    }
220
0
  } else {
221
0
    use_extended = 0;
222
0
  }
223
224
0
  ret = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
225
0
  if (ret == NULL) {
226
    /* No space */
227
0
    return (ret);
228
0
  }
229
0
  SCTP_BUF_LEN(ret) = 0;
230
231
  /* We need a CMSG header followed by the struct */
232
#if defined(_WIN32)
233
  cmh = mtod(ret, WSACMSGHDR *);
234
#else
235
0
  cmh = mtod(ret, struct cmsghdr *);
236
0
#endif
237
  /*
238
   * Make sure that there is no un-initialized padding between
239
   * the cmsg header and cmsg data and after the cmsg data.
240
   */
241
0
  memset(cmh, 0, len);
242
0
  if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) {
243
0
    cmh->cmsg_level = IPPROTO_SCTP;
244
0
    cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_rcvinfo));
245
0
    cmh->cmsg_type = SCTP_RCVINFO;
246
0
    rcvinfo = (struct sctp_rcvinfo *)CMSG_DATA(cmh);
247
0
    rcvinfo->rcv_sid = sinfo->sinfo_stream;
248
0
    rcvinfo->rcv_ssn = sinfo->sinfo_ssn;
249
0
    rcvinfo->rcv_flags = sinfo->sinfo_flags;
250
0
    rcvinfo->rcv_ppid = sinfo->sinfo_ppid;
251
0
    rcvinfo->rcv_tsn = sinfo->sinfo_tsn;
252
0
    rcvinfo->rcv_cumtsn = sinfo->sinfo_cumtsn;
253
0
    rcvinfo->rcv_context = sinfo->sinfo_context;
254
0
    rcvinfo->rcv_assoc_id = sinfo->sinfo_assoc_id;
255
#if defined(_WIN32)
256
    cmh = (WSACMSGHDR *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_rcvinfo)));
257
#else
258
0
    cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_rcvinfo)));
259
0
#endif
260
0
    SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
261
0
  }
262
0
  if (provide_nxt) {
263
0
    cmh->cmsg_level = IPPROTO_SCTP;
264
0
    cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_nxtinfo));
265
0
    cmh->cmsg_type = SCTP_NXTINFO;
266
0
    nxtinfo = (struct sctp_nxtinfo *)CMSG_DATA(cmh);
267
0
    nxtinfo->nxt_sid = seinfo->serinfo_next_stream;
268
0
    nxtinfo->nxt_flags = 0;
269
0
    if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_IS_UNORDERED) {
270
0
      nxtinfo->nxt_flags |= SCTP_UNORDERED;
271
0
    }
272
0
    if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_IS_NOTIFICATION) {
273
0
      nxtinfo->nxt_flags |= SCTP_NOTIFICATION;
274
0
    }
275
0
    if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_ISCOMPLETE) {
276
0
      nxtinfo->nxt_flags |= SCTP_COMPLETE;
277
0
    }
278
0
    nxtinfo->nxt_ppid = seinfo->serinfo_next_ppid;
279
0
    nxtinfo->nxt_length = seinfo->serinfo_next_length;
280
0
    nxtinfo->nxt_assoc_id = seinfo->serinfo_next_aid;
281
#if defined(_WIN32)
282
    cmh = (WSACMSGHDR *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_nxtinfo)));
283
#else
284
0
    cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_nxtinfo)));
285
0
#endif
286
0
    SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_nxtinfo));
287
0
  }
288
0
  if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
289
0
    cmh->cmsg_level = IPPROTO_SCTP;
290
0
    outinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmh);
291
0
    if (use_extended) {
292
0
      cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_extrcvinfo));
293
0
      cmh->cmsg_type = SCTP_EXTRCV;
294
0
      memcpy(outinfo, sinfo, sizeof(struct sctp_extrcvinfo));
295
0
      SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_extrcvinfo));
296
0
    } else {
297
0
      cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
298
0
      cmh->cmsg_type = SCTP_SNDRCV;
299
0
      *outinfo = *sinfo;
300
0
      SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo));
301
0
    }
302
0
  }
303
0
  return (ret);
304
0
}
305
306
static void
307
sctp_mark_non_revokable(struct sctp_association *asoc, uint32_t tsn)
308
2.38k
{
309
2.38k
  uint32_t gap, i;
310
2.38k
  int in_r, in_nr;
311
312
2.38k
  if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
313
0
    return;
314
0
  }
315
2.38k
  if (SCTP_TSN_GE(asoc->cumulative_tsn, tsn)) {
316
    /*
317
     * This tsn is behind the cum ack and thus we don't
318
     * need to worry about it being moved from one to the other.
319
     */
320
8
    return;
321
8
  }
322
2.37k
  SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
323
2.37k
  in_r = SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap);
324
2.37k
  in_nr = SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap);
325
2.37k
  KASSERT(in_r || in_nr, ("%s: Things are really messed up now", __func__));
326
2.37k
  if (!in_nr) {
327
2.30k
    SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
328
2.30k
    if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
329
1.62k
      asoc->highest_tsn_inside_nr_map = tsn;
330
1.62k
    }
331
2.30k
  }
332
2.37k
  if (in_r) {
333
2.30k
    SCTP_UNSET_TSN_PRESENT(asoc->mapping_array, gap);
334
2.30k
    if (tsn == asoc->highest_tsn_inside_map) {
335
      /* We must back down to see what the new highest is. */
336
1.60M
      for (i = tsn - 1; SCTP_TSN_GE(i, asoc->mapping_array_base_tsn); i--) {
337
1.60M
        SCTP_CALC_TSN_TO_GAP(gap, i, asoc->mapping_array_base_tsn);
338
1.60M
        if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) {
339
249
          asoc->highest_tsn_inside_map = i;
340
249
          break;
341
249
        }
342
1.60M
      }
343
1.41k
      if (!SCTP_TSN_GE(i, asoc->mapping_array_base_tsn)) {
344
1.16k
        asoc->highest_tsn_inside_map = asoc->mapping_array_base_tsn - 1;
345
1.16k
      }
346
1.41k
    }
347
2.30k
  }
348
2.37k
}
349
350
static int
351
sctp_place_control_in_stream(struct sctp_stream_in *strm,
352
           struct sctp_association *asoc,
353
           struct sctp_queued_to_read *control)
354
3.74k
{
355
3.74k
  struct sctp_queued_to_read *at;
356
3.74k
  struct sctp_readhead *q;
357
3.74k
  uint8_t flags, unordered;
358
359
3.74k
  flags = (control->sinfo_flags >> 8);
360
3.74k
  unordered = flags & SCTP_DATA_UNORDERED;
361
3.74k
  if (unordered) {
362
1.40k
    q = &strm->uno_inqueue;
363
1.40k
    if (asoc->idata_supported == 0) {
364
820
      if (!TAILQ_EMPTY(q)) {
365
        /* Only one stream can be here in old style  -- abort */
366
3
        return (-1);
367
3
      }
368
817
      TAILQ_INSERT_TAIL(q, control, next_instrm);
369
817
      control->on_strm_q = SCTP_ON_UNORDERED;
370
817
      return (0);
371
820
    }
372
2.34k
  } else {
373
2.34k
    q = &strm->inqueue;
374
2.34k
  }
375
2.92k
  if ((flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
376
659
    control->end_added = 1;
377
659
    control->first_frag_seen = 1;
378
659
    control->last_frag_seen = 1;
379
659
  }
380
2.92k
  if (TAILQ_EMPTY(q)) {
381
    /* Empty queue */
382
1.90k
    TAILQ_INSERT_HEAD(q, control, next_instrm);
383
1.90k
    if (unordered) {
384
229
      control->on_strm_q = SCTP_ON_UNORDERED;
385
1.67k
    } else {
386
1.67k
      control->on_strm_q = SCTP_ON_ORDERED;
387
1.67k
    }
388
1.90k
    return (0);
389
1.90k
  } else {
390
2.45k
    TAILQ_FOREACH(at, q, next_instrm) {
391
2.45k
      if (SCTP_MID_GT(asoc->idata_supported, at->mid, control->mid)) {
392
        /*
393
         * one in queue is bigger than the
394
         * new one, insert before this one
395
         */
396
638
        TAILQ_INSERT_BEFORE(at, control, next_instrm);
397
638
        if (unordered) {
398
269
          control->on_strm_q = SCTP_ON_UNORDERED;
399
369
        } else {
400
369
          control->on_strm_q = SCTP_ON_ORDERED;
401
369
        }
402
638
        break;
403
1.81k
      } else if (SCTP_MID_EQ(asoc->idata_supported, at->mid, control->mid)) {
404
        /*
405
         * Gak, He sent me a duplicate msg
406
         * id number?? return -1 to abort.
407
         */
408
1
        return (-1);
409
1.81k
      } else {
410
1.81k
        if (TAILQ_NEXT(at, next_instrm) == NULL) {
411
          /*
412
           * We are at the end, insert
413
           * it after this one
414
           */
415
379
          if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
416
0
            sctp_log_strm_del(control, at,
417
0
                  SCTP_STR_LOG_FROM_INSERT_TL);
418
0
          }
419
379
          TAILQ_INSERT_AFTER(q, at, control, next_instrm);
420
379
          if (unordered) {
421
83
            control->on_strm_q = SCTP_ON_UNORDERED;
422
296
          } else {
423
296
            control->on_strm_q = SCTP_ON_ORDERED;
424
296
          }
425
379
          break;
426
379
        }
427
1.81k
      }
428
2.45k
    }
429
1.01k
  }
430
1.01k
  return (0);
431
2.92k
}
432
433
static void
434
sctp_abort_in_reasm(struct sctp_tcb *stcb,
435
                    struct sctp_queued_to_read *control,
436
                    struct sctp_tmit_chunk *chk,
437
                    int *abort_flag, int opspot)
438
273
{
439
273
  char msg[SCTP_DIAG_INFO_LEN];
440
273
  struct mbuf *oper;
441
442
273
  if (stcb->asoc.idata_supported) {
443
216
    SCTP_SNPRINTF(msg, sizeof(msg),
444
216
                  "Reass %x,CF:%x,TSN=%8.8x,SID=%4.4x,FSN=%8.8x,MID:%8.8x",
445
216
                  opspot,
446
216
                  control->fsn_included,
447
216
                  chk->rec.data.tsn,
448
216
                  chk->rec.data.sid,
449
216
                  chk->rec.data.fsn, chk->rec.data.mid);
450
216
  } else {
451
57
    SCTP_SNPRINTF(msg, sizeof(msg),
452
57
                  "Reass %x,CI:%x,TSN=%8.8x,SID=%4.4x,FSN=%4.4x,SSN:%4.4x",
453
57
                  opspot,
454
57
                  control->fsn_included,
455
57
                  chk->rec.data.tsn,
456
57
                  chk->rec.data.sid,
457
57
                  chk->rec.data.fsn,
458
57
                  (uint16_t)chk->rec.data.mid);
459
57
  }
460
273
  oper = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
461
273
  sctp_m_freem(chk->data);
462
273
  chk->data = NULL;
463
273
  sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
464
273
  stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_1;
465
273
  sctp_abort_an_association(stcb->sctp_ep, stcb, oper, false, SCTP_SO_NOT_LOCKED);
466
273
  *abort_flag = 1;
467
273
}
468
469
static void
470
sctp_clean_up_control(struct sctp_tcb *stcb, struct sctp_queued_to_read *control)
471
4
{
472
  /*
473
   * The control could not be placed and must be cleaned.
474
   */
475
4
  struct sctp_tmit_chunk *chk, *nchk;
476
4
  TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, nchk) {
477
0
    TAILQ_REMOVE(&control->reasm, chk, sctp_next);
478
0
    if (chk->data)
479
0
      sctp_m_freem(chk->data);
480
0
    chk->data = NULL;
481
0
    sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
482
0
  }
483
4
  sctp_free_remote_addr(control->whoFrom);
484
4
  if (control->data) {
485
4
    sctp_m_freem(control->data);
486
4
    control->data = NULL;
487
4
  }
488
4
  sctp_free_a_readq(stcb, control);
489
4
}
490
491
/*
492
 * Queue the chunk either right into the socket buffer if it is the next one
493
 * to go OR put it in the correct place in the delivery queue.  If we do
494
 * append to the so_buf, keep doing so until we are out of order as
495
 * long as the control's entered are non-fragmented.
496
 */
497
static void
498
sctp_queue_data_to_stream(struct sctp_tcb *stcb,
499
    struct sctp_association *asoc,
500
    struct sctp_queued_to_read *control, int *abort_flag, int *need_reasm)
501
1.10k
{
502
  /*
503
   * FIX-ME maybe? What happens when the ssn wraps? If we are getting
504
   * all the data in one stream this could happen quite rapidly. One
505
   * could use the TSN to keep track of things, but this scheme breaks
506
   * down in the other type of stream usage that could occur. Send a
507
   * single msg to stream 0, send 4Billion messages to stream 1, now
508
   * send a message to stream 0. You have a situation where the TSN
509
   * has wrapped but not in the stream. Is this worth worrying about
510
   * or should we just change our queue sort at the bottom to be by
511
   * TSN.
512
   *
513
   * Could it also be legal for a peer to send ssn 1 with TSN 2 and ssn 2
514
   * with TSN 1? If the peer is doing some sort of funky TSN/SSN
515
   * assignment this could happen... and I don't see how this would be
516
   * a violation. So for now I am undecided an will leave the sort by
517
   * SSN alone. Maybe a hybrid approach is the answer
518
   *
519
   */
520
1.10k
  struct sctp_queued_to_read *at;
521
1.10k
  int queue_needed;
522
1.10k
  uint32_t nxt_todel;
523
1.10k
  struct mbuf *op_err;
524
1.10k
  struct sctp_stream_in *strm;
525
1.10k
  char msg[SCTP_DIAG_INFO_LEN];
526
527
1.10k
  strm = &asoc->strmin[control->sinfo_stream];
528
1.10k
  if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
529
0
    sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INTO_STRD);
530
0
  }
531
1.10k
  if (SCTP_MID_GT((asoc->idata_supported), strm->last_mid_delivered, control->mid)) {
532
    /* The incoming sseq is behind where we last delivered? */
533
87
    SCTPDBG(SCTP_DEBUG_INDATA1, "Duplicate S-SEQ: %u delivered: %u from peer, Abort association\n",
534
87
      strm->last_mid_delivered, control->mid);
535
    /*
536
     * throw it in the stream so it gets cleaned up in
537
     * association destruction
538
     */
539
87
    TAILQ_INSERT_HEAD(&strm->inqueue, control, next_instrm);
540
87
    if (asoc->idata_supported) {
541
28
      SCTP_SNPRINTF(msg, sizeof(msg), "Delivered MID=%8.8x, got TSN=%8.8x, SID=%4.4x, MID=%8.8x",
542
28
                    strm->last_mid_delivered, control->sinfo_tsn,
543
28
                    control->sinfo_stream, control->mid);
544
59
    } else {
545
59
      SCTP_SNPRINTF(msg, sizeof(msg), "Delivered SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
546
59
                    (uint16_t)strm->last_mid_delivered,
547
59
                    control->sinfo_tsn,
548
59
                    control->sinfo_stream,
549
59
                    (uint16_t)control->mid);
550
59
    }
551
87
    op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
552
87
    stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_2;
553
87
    sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, false, SCTP_SO_NOT_LOCKED);
554
87
    *abort_flag = 1;
555
87
    return;
556
87
  }
557
1.02k
  queue_needed = 1;
558
1.02k
  asoc->size_on_all_streams += control->length;
559
1.02k
  sctp_ucount_incr(asoc->cnt_on_all_streams);
560
1.02k
  nxt_todel = strm->last_mid_delivered + 1;
561
1.02k
  if (SCTP_MID_EQ(asoc->idata_supported, nxt_todel, control->mid)) {
562
#if defined(__APPLE__) && !defined(__Userspace__)
563
    struct socket *so;
564
565
    so = SCTP_INP_SO(stcb->sctp_ep);
566
    atomic_add_int(&stcb->asoc.refcnt, 1);
567
    SCTP_TCB_UNLOCK(stcb);
568
    SCTP_SOCKET_LOCK(so, 1);
569
    SCTP_TCB_LOCK(stcb);
570
    atomic_subtract_int(&stcb->asoc.refcnt, 1);
571
    if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
572
      SCTP_SOCKET_UNLOCK(so, 1);
573
      return;
574
    }
575
#endif
576
    /* can be delivered right away? */
577
283
    if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
578
0
      sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_IMMED_DEL);
579
0
    }
580
    /* EY it wont be queued if it could be delivered directly */
581
283
    queue_needed = 0;
582
283
    if (asoc->size_on_all_streams >= control->length) {
583
283
      asoc->size_on_all_streams -= control->length;
584
283
    } else {
585
0
#ifdef INVARIANTS
586
0
      panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
587
#else
588
      asoc->size_on_all_streams = 0;
589
#endif
590
0
    }
591
283
    sctp_ucount_decr(asoc->cnt_on_all_streams);
592
283
    strm->last_mid_delivered++;
593
283
    sctp_mark_non_revokable(asoc, control->sinfo_tsn);
594
283
    sctp_add_to_readq(stcb->sctp_ep, stcb,
595
283
                      control,
596
283
                      &stcb->sctp_socket->so_rcv, 1,
597
283
                      SCTP_READ_LOCK_NOT_HELD, SCTP_SO_LOCKED);
598
283
    TAILQ_FOREACH_SAFE(control, &strm->inqueue, next_instrm, at) {
599
      /* all delivered */
600
110
      nxt_todel = strm->last_mid_delivered + 1;
601
110
      if (SCTP_MID_EQ(asoc->idata_supported, nxt_todel, control->mid) &&
602
110
          (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG)) {
603
29
        if (control->on_strm_q == SCTP_ON_ORDERED) {
604
29
          TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
605
29
          if (asoc->size_on_all_streams >= control->length) {
606
29
            asoc->size_on_all_streams -= control->length;
607
29
          } else {
608
0
#ifdef INVARIANTS
609
0
            panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
610
#else
611
            asoc->size_on_all_streams = 0;
612
#endif
613
0
          }
614
29
          sctp_ucount_decr(asoc->cnt_on_all_streams);
615
29
#ifdef INVARIANTS
616
29
        } else {
617
0
          panic("Huh control: %p is on_strm_q: %d",
618
0
                control, control->on_strm_q);
619
0
#endif
620
0
        }
621
29
        control->on_strm_q = 0;
622
29
        strm->last_mid_delivered++;
623
        /*
624
         * We ignore the return of deliver_data here
625
         * since we always can hold the chunk on the
626
         * d-queue. And we have a finite number that
627
         * can be delivered from the strq.
628
         */
629
29
        if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
630
0
          sctp_log_strm_del(control, NULL,
631
0
                SCTP_STR_LOG_FROM_IMMED_DEL);
632
0
        }
633
29
        sctp_mark_non_revokable(asoc, control->sinfo_tsn);
634
29
        sctp_add_to_readq(stcb->sctp_ep, stcb,
635
29
                          control,
636
29
                          &stcb->sctp_socket->so_rcv, 1,
637
29
                          SCTP_READ_LOCK_NOT_HELD,
638
29
                          SCTP_SO_LOCKED);
639
29
        continue;
640
81
      } else if (SCTP_MID_EQ(asoc->idata_supported, nxt_todel, control->mid)) {
641
11
        *need_reasm = 1;
642
11
      }
643
81
      break;
644
110
    }
645
#if defined(__APPLE__) && !defined(__Userspace__)
646
    SCTP_SOCKET_UNLOCK(so, 1);
647
#endif
648
283
  }
649
1.02k
  if (queue_needed) {
650
    /*
651
     * Ok, we did not deliver this guy, find the correct place
652
     * to put it on the queue.
653
     */
654
739
    if (sctp_place_control_in_stream(strm, asoc, control)) {
655
4
      SCTP_SNPRINTF(msg, sizeof(msg),
656
4
                    "Queue to str MID: %u duplicate", control->mid);
657
4
      sctp_clean_up_control(stcb, control);
658
4
      op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
659
4
      stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_3;
660
4
      sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, false, SCTP_SO_NOT_LOCKED);
661
4
      *abort_flag = 1;
662
4
    }
663
739
  }
664
1.02k
}
665
666
static void
667
sctp_setup_tail_pointer(struct sctp_queued_to_read *control)
668
1.60k
{
669
1.60k
  struct mbuf *m, *prev = NULL;
670
1.60k
  struct sctp_tcb *stcb;
671
672
1.60k
  stcb = control->stcb;
673
1.60k
  control->held_length = 0;
674
1.60k
  control->length = 0;
675
1.60k
  m = control->data;
676
3.81k
  while (m) {
677
2.21k
    if (SCTP_BUF_LEN(m) == 0) {
678
      /* Skip mbufs with NO length */
679
252
      if (prev == NULL) {
680
        /* First one */
681
252
        control->data = sctp_m_free(m);
682
252
        m = control->data;
683
252
      } else {
684
0
        SCTP_BUF_NEXT(prev) = sctp_m_free(m);
685
0
        m = SCTP_BUF_NEXT(prev);
686
0
      }
687
252
      if (m == NULL) {
688
0
        control->tail_mbuf = prev;
689
0
      }
690
252
      continue;
691
252
    }
692
1.96k
    prev = m;
693
1.96k
    atomic_add_int(&control->length, SCTP_BUF_LEN(m));
694
1.96k
    if (control->on_read_q) {
695
      /*
696
       * On read queue so we must increment the
697
       * SB stuff, we assume caller has done any locks of SB.
698
       */
699
0
      sctp_sballoc(stcb, &stcb->sctp_socket->so_rcv, m);
700
0
    }
701
1.96k
    m = SCTP_BUF_NEXT(m);
702
1.96k
  }
703
1.60k
  if (prev) {
704
1.60k
    control->tail_mbuf = prev;
705
1.60k
  }
706
1.60k
}
707
708
static void
709
sctp_add_to_tail_pointer(struct sctp_queued_to_read *control, struct mbuf *m, uint32_t *added)
710
542
{
711
542
  struct mbuf *prev=NULL;
712
542
  struct sctp_tcb *stcb;
713
714
542
  stcb = control->stcb;
715
542
  if (stcb == NULL) {
716
0
#ifdef INVARIANTS
717
0
    panic("Control broken");
718
#else
719
    return;
720
#endif
721
0
  }
722
542
  if (control->tail_mbuf == NULL) {
723
    /* TSNH */
724
0
    sctp_m_freem(control->data);
725
0
    control->data = m;
726
0
    sctp_setup_tail_pointer(control);
727
0
    return;
728
0
  }
729
542
  control->tail_mbuf->m_next = m;
730
1.30k
  while (m) {
731
764
    if (SCTP_BUF_LEN(m) == 0) {
732
      /* Skip mbufs with NO length */
733
39
      if (prev == NULL) {
734
        /* First one */
735
39
        control->tail_mbuf->m_next = sctp_m_free(m);
736
39
        m = control->tail_mbuf->m_next;
737
39
      } else {
738
0
        SCTP_BUF_NEXT(prev) = sctp_m_free(m);
739
0
        m = SCTP_BUF_NEXT(prev);
740
0
      }
741
39
      if (m == NULL) {
742
0
        control->tail_mbuf = prev;
743
0
      }
744
39
      continue;
745
39
    }
746
725
    prev = m;
747
725
    if (control->on_read_q) {
748
      /*
749
       * On read queue so we must increment the
750
       * SB stuff, we assume caller has done any locks of SB.
751
       */
752
0
      sctp_sballoc(stcb, &stcb->sctp_socket->so_rcv, m);
753
0
    }
754
725
    *added += SCTP_BUF_LEN(m);
755
725
    atomic_add_int(&control->length, SCTP_BUF_LEN(m));
756
725
    m = SCTP_BUF_NEXT(m);
757
725
  }
758
542
  if (prev) {
759
542
    control->tail_mbuf = prev;
760
542
  }
761
542
}
762
763
static void
764
sctp_build_readq_entry_from_ctl(struct sctp_queued_to_read *nc, struct sctp_queued_to_read *control)
765
157
{
766
157
  memset(nc, 0, sizeof(struct sctp_queued_to_read));
767
157
  nc->sinfo_stream = control->sinfo_stream;
768
157
  nc->mid = control->mid;
769
157
  TAILQ_INIT(&nc->reasm);
770
157
  nc->top_fsn = control->top_fsn;
771
157
  nc->mid = control->mid;
772
157
  nc->sinfo_flags = control->sinfo_flags;
773
157
  nc->sinfo_ppid = control->sinfo_ppid;
774
157
  nc->sinfo_context = control->sinfo_context;
775
157
  nc->fsn_included = 0xffffffff;
776
157
  nc->sinfo_tsn = control->sinfo_tsn;
777
157
  nc->sinfo_cumtsn = control->sinfo_cumtsn;
778
157
  nc->sinfo_assoc_id = control->sinfo_assoc_id;
779
157
  nc->whoFrom = control->whoFrom;
780
157
  atomic_add_int(&nc->whoFrom->ref_count, 1);
781
157
  nc->stcb = control->stcb;
782
157
  nc->port_from = control->port_from;
783
157
  nc->do_not_ref_stcb = control->do_not_ref_stcb;
784
157
}
785
786
static void
787
sctp_reset_a_control(struct sctp_queued_to_read *control,
788
                     struct sctp_inpcb *inp, uint32_t tsn)
789
0
{
790
0
  control->fsn_included = tsn;
791
0
  if (control->on_read_q) {
792
    /*
793
     * We have to purge it from there,
794
     * hopefully this will work :-)
795
     */
796
0
    TAILQ_REMOVE(&inp->read_queue, control, next);
797
0
    control->on_read_q = 0;
798
0
  }
799
0
}
800
801
static int
802
sctp_handle_old_unordered_data(struct sctp_tcb *stcb,
803
                               struct sctp_association *asoc,
804
                               struct sctp_stream_in *strm,
805
                               struct sctp_queued_to_read *control,
806
                               uint32_t pd_point,
807
                               int inp_read_lock_held)
808
2.37k
{
809
  /* Special handling for the old un-ordered data chunk.
810
   * All the chunks/TSN's go to mid 0. So
811
   * we have to do the old style watching to see
812
   * if we have it all. If you return one, no other
813
   * control entries on the un-ordered queue will
814
   * be looked at. In theory there should be no others
815
   * entries in reality, unless the guy is sending both
816
   * unordered NDATA and unordered DATA...
817
   */
818
2.37k
  struct sctp_tmit_chunk *chk, *lchk, *tchk;
819
2.37k
  uint32_t fsn;
820
2.37k
  struct sctp_queued_to_read *nc;
821
2.37k
  int cnt_added;
822
823
2.37k
  if (control->first_frag_seen == 0) {
824
    /* Nothing we can do, we have not seen the first piece yet */
825
834
    return (1);
826
834
  }
827
  /* Collapse any we can */
828
1.53k
  cnt_added = 0;
829
1.62k
restart:
830
1.62k
  fsn = control->fsn_included + 1;
831
  /* Now what can we add? */
832
1.62k
  TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, lchk) {
833
1.30k
    if (chk->rec.data.fsn == fsn) {
834
      /* Ok lets add it */
835
487
      sctp_alloc_a_readq(stcb, nc);
836
487
      if (nc == NULL) {
837
0
        break;
838
0
      }
839
487
      memset(nc, 0, sizeof(struct sctp_queued_to_read));
840
487
      TAILQ_REMOVE(&control->reasm, chk, sctp_next);
841
487
      sctp_add_chk_to_control(control, strm, stcb, asoc, chk, inp_read_lock_held);
842
487
      fsn++;
843
487
      cnt_added++;
844
487
      chk = NULL;
845
487
      if (control->end_added) {
846
        /* We are done */
847
201
        if (!TAILQ_EMPTY(&control->reasm)) {
848
          /*
849
           * Ok we have to move anything left on
850
           * the control queue to a new control.
851
           */
852
157
          sctp_build_readq_entry_from_ctl(nc, control);
853
157
          tchk = TAILQ_FIRST(&control->reasm);
854
157
          if (tchk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
855
133
            TAILQ_REMOVE(&control->reasm, tchk, sctp_next);
856
133
            if (asoc->size_on_reasm_queue >= tchk->send_size) {
857
133
              asoc->size_on_reasm_queue -= tchk->send_size;
858
133
            } else {
859
0
#ifdef INVARIANTS
860
0
            panic("size_on_reasm_queue = %u smaller than chunk length %u", asoc->size_on_reasm_queue, tchk->send_size);
861
#else
862
            asoc->size_on_reasm_queue = 0;
863
#endif
864
0
            }
865
133
            sctp_ucount_decr(asoc->cnt_on_reasm_queue);
866
133
            nc->first_frag_seen = 1;
867
133
            nc->fsn_included = tchk->rec.data.fsn;
868
133
            nc->data = tchk->data;
869
133
            nc->sinfo_ppid = tchk->rec.data.ppid;
870
133
            nc->sinfo_tsn = tchk->rec.data.tsn;
871
133
            sctp_mark_non_revokable(asoc, tchk->rec.data.tsn);
872
133
            tchk->data = NULL;
873
133
            sctp_free_a_chunk(stcb, tchk, SCTP_SO_NOT_LOCKED);
874
133
            sctp_setup_tail_pointer(nc);
875
133
            tchk = TAILQ_FIRST(&control->reasm);
876
133
          }
877
          /* Spin the rest onto the queue */
878
621
          while (tchk) {
879
464
            TAILQ_REMOVE(&control->reasm, tchk, sctp_next);
880
464
            TAILQ_INSERT_TAIL(&nc->reasm, tchk, sctp_next);
881
464
            tchk = TAILQ_FIRST(&control->reasm);
882
464
          }
883
          /* Now lets add it to the queue after removing control */
884
157
          TAILQ_INSERT_TAIL(&strm->uno_inqueue, nc, next_instrm);
885
157
          nc->on_strm_q = SCTP_ON_UNORDERED;
886
157
          if (control->on_strm_q) {
887
157
            TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
888
157
            control->on_strm_q = 0;
889
157
          }
890
157
        }
891
201
        if (control->pdapi_started) {
892
0
          strm->pd_api_started = 0;
893
0
          control->pdapi_started = 0;
894
0
        }
895
201
        if (control->on_strm_q) {
896
44
          TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
897
44
          control->on_strm_q = 0;
898
44
          SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
899
44
        }
900
201
        if (control->on_read_q == 0) {
901
201
          sctp_add_to_readq(stcb->sctp_ep, stcb, control,
902
201
                &stcb->sctp_socket->so_rcv, control->end_added,
903
201
                inp_read_lock_held, SCTP_SO_NOT_LOCKED);
904
201
#if defined(__Userspace__)
905
201
        } else {
906
0
          sctp_invoke_recv_callback(stcb->sctp_ep, stcb, control, inp_read_lock_held);
907
0
#endif
908
0
        }
909
201
        sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
910
201
        if ((nc->first_frag_seen) && !TAILQ_EMPTY(&nc->reasm)) {
911
          /* Switch to the new guy and continue */
912
93
          control = nc;
913
93
          goto restart;
914
108
        } else {
915
108
          if (nc->on_strm_q == 0) {
916
44
            sctp_free_a_readq(stcb, nc);
917
44
          }
918
108
        }
919
108
        return (1);
920
286
      } else {
921
286
        sctp_free_a_readq(stcb, nc);
922
286
      }
923
822
    } else {
924
      /* Can't add more */
925
822
      break;
926
822
    }
927
1.30k
  }
928
1.42k
  if (cnt_added && strm->pd_api_started) {
929
0
#if defined(__Userspace__)
930
0
    sctp_invoke_recv_callback(stcb->sctp_ep, stcb, control, inp_read_lock_held);
931
0
#endif
932
0
    sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
933
0
  }
934
1.42k
  if ((control->length > pd_point) && (strm->pd_api_started == 0)) {
935
0
    strm->pd_api_started = 1;
936
0
    control->pdapi_started = 1;
937
0
    sctp_add_to_readq(stcb->sctp_ep, stcb, control,
938
0
                      &stcb->sctp_socket->so_rcv, control->end_added,
939
0
                      inp_read_lock_held, SCTP_SO_NOT_LOCKED);
940
0
    sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
941
0
    return (0);
942
1.42k
  } else {
943
1.42k
    return (1);
944
1.42k
  }
945
1.42k
}
946
947
static void
948
sctp_inject_old_unordered_data(struct sctp_tcb *stcb,
949
                               struct sctp_association *asoc,
950
                               struct sctp_queued_to_read *control,
951
                               struct sctp_tmit_chunk *chk,
952
                               int *abort_flag)
953
2.21k
{
954
2.21k
  struct sctp_tmit_chunk *at;
955
2.21k
  int inserted;
956
  /*
957
   * Here we need to place the chunk into the control structure
958
   * sorted in the correct order.
959
   */
960
2.21k
  if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
961
    /* Its the very first one. */
962
1.40k
    SCTPDBG(SCTP_DEBUG_XXX,
963
1.40k
      "chunk is a first fsn: %u becomes fsn_included\n",
964
1.40k
      chk->rec.data.fsn);
965
1.40k
    at = TAILQ_FIRST(&control->reasm);
966
1.40k
    if (at && SCTP_TSN_GT(chk->rec.data.fsn, at->rec.data.fsn)) {
967
      /*
968
       * The first chunk in the reassembly is
969
       * a smaller TSN than this one, even though
970
       * this has a first, it must be from a subsequent
971
       * msg.
972
       */
973
460
      goto place_chunk;
974
460
    }
975
942
    if (control->first_frag_seen) {
976
      /*
977
       * In old un-ordered we can reassembly on
978
       * one control multiple messages. As long
979
       * as the next FIRST is greater then the old
980
       * first (TSN i.e. FSN wise)
981
       */
982
381
      struct mbuf *tdata;
983
381
      uint32_t tmp;
984
985
381
      if (SCTP_TSN_GT(chk->rec.data.fsn, control->fsn_included)) {
986
        /* Easy way the start of a new guy beyond the lowest */
987
192
        goto place_chunk;
988
192
      }
989
189
      if ((chk->rec.data.fsn == control->fsn_included) ||
990
189
          (control->pdapi_started)) {
991
        /*
992
         * Ok this should not happen, if it does
993
         * we started the pd-api on the higher TSN (since
994
         * the equals part is a TSN failure it must be that).
995
         *
996
         * We are completely hosed in that case since I have
997
         * no way to recover. This really will only happen
998
         * if we can get more TSN's higher before the pd-api-point.
999
         */
1000
0
        sctp_abort_in_reasm(stcb, control, chk,
1001
0
                abort_flag,
1002
0
                SCTP_FROM_SCTP_INDATA + SCTP_LOC_4);
1003
1004
0
        return;
1005
0
      }
1006
      /*
1007
       * Ok we have two firsts and the one we just got
1008
       * is smaller than the one we previously placed.. yuck!
1009
       * We must swap them out.
1010
       */
1011
      /* swap the mbufs */
1012
189
      tdata = control->data;
1013
189
      control->data = chk->data;
1014
189
      chk->data = tdata;
1015
      /* Save the lengths */
1016
189
      chk->send_size = control->length;
1017
      /* Recompute length of control and tail pointer */
1018
189
      sctp_setup_tail_pointer(control);
1019
      /* Fix the FSN included */
1020
189
      tmp = control->fsn_included;
1021
189
      control->fsn_included = chk->rec.data.fsn;
1022
189
      chk->rec.data.fsn = tmp;
1023
      /* Fix the TSN included */
1024
189
      tmp = control->sinfo_tsn;
1025
189
      control->sinfo_tsn = chk->rec.data.tsn;
1026
189
      chk->rec.data.tsn = tmp;
1027
      /* Fix the PPID included */
1028
189
      tmp = control->sinfo_ppid;
1029
189
      control->sinfo_ppid = chk->rec.data.ppid;
1030
189
      chk->rec.data.ppid = tmp;
1031
      /* Fix tail pointer */
1032
189
      goto place_chunk;
1033
189
    }
1034
561
    control->first_frag_seen = 1;
1035
561
    control->fsn_included = chk->rec.data.fsn;
1036
561
    control->top_fsn = chk->rec.data.fsn;
1037
561
    control->sinfo_tsn = chk->rec.data.tsn;
1038
561
    control->sinfo_ppid = chk->rec.data.ppid;
1039
561
    control->data = chk->data;
1040
561
    sctp_mark_non_revokable(asoc, chk->rec.data.tsn);
1041
561
    chk->data = NULL;
1042
561
    sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
1043
561
    sctp_setup_tail_pointer(control);
1044
561
    return;
1045
561
  }
1046
1.65k
place_chunk:
1047
1.65k
  inserted = 0;
1048
3.89k
  TAILQ_FOREACH(at, &control->reasm, sctp_next) {
1049
3.89k
    if (SCTP_TSN_GT(at->rec.data.fsn, chk->rec.data.fsn)) {
1050
      /*
1051
       * This one in queue is bigger than the new one, insert
1052
       * the new one before at.
1053
       */
1054
892
      asoc->size_on_reasm_queue += chk->send_size;
1055
892
      sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1056
892
      inserted = 1;
1057
892
      TAILQ_INSERT_BEFORE(at, chk, sctp_next);
1058
892
      break;
1059
3.00k
    } else if (at->rec.data.fsn == chk->rec.data.fsn) {
1060
      /*
1061
       * They sent a duplicate fsn number. This
1062
       * really should not happen since the FSN is
1063
       * a TSN and it should have been dropped earlier.
1064
       */
1065
0
      sctp_abort_in_reasm(stcb, control, chk,
1066
0
                          abort_flag,
1067
0
                          SCTP_FROM_SCTP_INDATA + SCTP_LOC_5);
1068
0
      return;
1069
0
    }
1070
3.89k
  }
1071
1.65k
  if (inserted == 0) {
1072
    /* Its at the end */
1073
760
    asoc->size_on_reasm_queue += chk->send_size;
1074
760
    sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1075
760
    control->top_fsn = chk->rec.data.fsn;
1076
760
    TAILQ_INSERT_TAIL(&control->reasm, chk, sctp_next);
1077
760
  }
1078
1.65k
}
1079
1080
static int
1081
sctp_deliver_reasm_check(struct sctp_tcb *stcb, struct sctp_association *asoc,
1082
                         struct sctp_stream_in *strm, int inp_read_lock_held)
1083
5.14k
{
1084
  /*
1085
   * Given a stream, strm, see if any of
1086
   * the SSN's on it that are fragmented
1087
   * are ready to deliver. If so go ahead
1088
   * and place them on the read queue. In
1089
   * so placing if we have hit the end, then
1090
   * we need to remove them from the stream's queue.
1091
   */
1092
5.14k
  struct sctp_queued_to_read *control, *nctl = NULL;
1093
5.14k
  uint32_t next_to_del;
1094
5.14k
  uint32_t pd_point;
1095
5.14k
  int ret = 0;
1096
1097
5.14k
  if (stcb->sctp_socket) {
1098
5.14k
    pd_point = min(SCTP_SB_LIMIT_RCV(stcb->sctp_socket) >> SCTP_PARTIAL_DELIVERY_SHIFT,
1099
5.14k
             stcb->sctp_ep->partial_delivery_point);
1100
5.14k
  } else {
1101
0
    pd_point = stcb->sctp_ep->partial_delivery_point;
1102
0
  }
1103
5.14k
  control = TAILQ_FIRST(&strm->uno_inqueue);
1104
1105
5.14k
  if ((control != NULL) &&
1106
5.14k
      (asoc->idata_supported == 0)) {
1107
    /* Special handling needed for "old" data format */
1108
2.37k
    if (sctp_handle_old_unordered_data(stcb, asoc, strm, control, pd_point, inp_read_lock_held)) {
1109
2.37k
      goto done_un;
1110
2.37k
    }
1111
2.37k
  }
1112
2.77k
  if (strm->pd_api_started) {
1113
    /* Can't add more */
1114
0
    return (0);
1115
0
  }
1116
5.12k
  while (control) {
1117
2.34k
    SCTPDBG(SCTP_DEBUG_XXX, "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u -uo\n",
1118
2.34k
      control, control->end_added, control->mid, control->top_fsn, control->fsn_included);
1119
2.34k
    nctl = TAILQ_NEXT(control, next_instrm);
1120
2.34k
    if (control->end_added) {
1121
      /* We just put the last bit on */
1122
26
      if (control->on_strm_q) {
1123
26
#ifdef INVARIANTS
1124
26
        if (control->on_strm_q != SCTP_ON_UNORDERED) {
1125
0
          panic("Huh control: %p on_q: %d -- not unordered?",
1126
0
                control, control->on_strm_q);
1127
0
        }
1128
26
#endif
1129
26
        SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
1130
26
        TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
1131
26
        if (asoc->size_on_all_streams >= control->length) {
1132
26
          asoc->size_on_all_streams -= control->length;
1133
26
        } else {
1134
0
#ifdef INVARIANTS
1135
0
          panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
1136
#else
1137
          asoc->size_on_all_streams = 0;
1138
#endif
1139
0
        }
1140
26
        sctp_ucount_decr(asoc->cnt_on_all_streams);
1141
26
        control->on_strm_q = 0;
1142
26
      }
1143
26
      if (control->on_read_q == 0) {
1144
26
        sctp_add_to_readq(stcb->sctp_ep, stcb,
1145
26
              control,
1146
26
              &stcb->sctp_socket->so_rcv, control->end_added,
1147
26
              inp_read_lock_held, SCTP_SO_NOT_LOCKED);
1148
26
      }
1149
2.32k
    } else {
1150
      /* Can we do a PD-API for this un-ordered guy? */
1151
2.32k
      if ((control->length >= pd_point) && (strm->pd_api_started == 0)) {
1152
0
        strm->pd_api_started = 1;
1153
0
        control->pdapi_started = 1;
1154
0
        sctp_add_to_readq(stcb->sctp_ep, stcb,
1155
0
              control,
1156
0
              &stcb->sctp_socket->so_rcv, control->end_added,
1157
0
              inp_read_lock_held, SCTP_SO_NOT_LOCKED);
1158
1159
0
        break;
1160
0
      }
1161
2.32k
    }
1162
2.34k
    control = nctl;
1163
2.34k
  }
1164
5.14k
done_un:
1165
5.14k
  control = TAILQ_FIRST(&strm->inqueue);
1166
5.14k
  if (strm->pd_api_started) {
1167
    /* Can't add more */
1168
0
    return (0);
1169
0
  }
1170
5.14k
  if (control == NULL) {
1171
2.27k
    return (ret);
1172
2.27k
  }
1173
2.86k
  if (SCTP_MID_EQ(asoc->idata_supported, strm->last_mid_delivered, control->mid)) {
1174
    /* Ok the guy at the top was being partially delivered
1175
     * completed, so we remove it. Note
1176
     * the pd_api flag was taken off when the
1177
     * chunk was merged on in sctp_queue_data_for_reasm below.
1178
     */
1179
246
    nctl = TAILQ_NEXT(control, next_instrm);
1180
246
    SCTPDBG(SCTP_DEBUG_XXX,
1181
246
      "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u (lastdel: %u)- o\n",
1182
246
      control, control->end_added, control->mid,
1183
246
      control->top_fsn, control->fsn_included,
1184
246
      strm->last_mid_delivered);
1185
246
    if (control->end_added) {
1186
22
      if (control->on_strm_q) {
1187
22
#ifdef INVARIANTS
1188
22
        if (control->on_strm_q != SCTP_ON_ORDERED) {
1189
0
          panic("Huh control: %p on_q: %d -- not ordered?",
1190
0
                control, control->on_strm_q);
1191
0
        }
1192
22
#endif
1193
22
        SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
1194
22
        TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
1195
22
        if (asoc->size_on_all_streams >= control->length) {
1196
22
          asoc->size_on_all_streams -= control->length;
1197
22
        } else {
1198
0
#ifdef INVARIANTS
1199
0
          panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
1200
#else
1201
          asoc->size_on_all_streams = 0;
1202
#endif
1203
0
        }
1204
22
        sctp_ucount_decr(asoc->cnt_on_all_streams);
1205
22
        control->on_strm_q = 0;
1206
22
      }
1207
22
      if (strm->pd_api_started && control->pdapi_started) {
1208
0
        control->pdapi_started = 0;
1209
0
        strm->pd_api_started = 0;
1210
0
      }
1211
22
      if (control->on_read_q == 0) {
1212
22
        sctp_add_to_readq(stcb->sctp_ep, stcb,
1213
22
              control,
1214
22
              &stcb->sctp_socket->so_rcv, control->end_added,
1215
22
              inp_read_lock_held, SCTP_SO_NOT_LOCKED);
1216
22
      }
1217
22
      control = nctl;
1218
22
    }
1219
246
  }
1220
2.86k
  if (strm->pd_api_started) {
1221
    /* Can't add more must have gotten an un-ordered above being partially delivered. */
1222
0
    return (0);
1223
0
  }
1224
2.87k
deliver_more:
1225
2.87k
  next_to_del = strm->last_mid_delivered + 1;
1226
2.87k
  if (control) {
1227
2.86k
    SCTPDBG(SCTP_DEBUG_XXX,
1228
2.86k
      "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u (nxtdel: %u)- o\n",
1229
2.86k
      control, control->end_added, control->mid, control->top_fsn, control->fsn_included,
1230
2.86k
      next_to_del);
1231
2.86k
    nctl = TAILQ_NEXT(control, next_instrm);
1232
2.86k
    if (SCTP_MID_EQ(asoc->idata_supported, control->mid, next_to_del) &&
1233
2.86k
        (control->first_frag_seen)) {
1234
403
      int done;
1235
1236
      /* Ok we can deliver it onto the stream. */
1237
403
      if (control->end_added) {
1238
        /* We are done with it afterwards */
1239
9
        if (control->on_strm_q) {
1240
9
#ifdef INVARIANTS
1241
9
          if (control->on_strm_q != SCTP_ON_ORDERED) {
1242
0
            panic("Huh control: %p on_q: %d -- not ordered?",
1243
0
                  control, control->on_strm_q);
1244
0
          }
1245
9
#endif
1246
9
          SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
1247
9
          TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
1248
9
          if (asoc->size_on_all_streams >= control->length) {
1249
9
            asoc->size_on_all_streams -= control->length;
1250
9
          } else {
1251
0
#ifdef INVARIANTS
1252
0
            panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
1253
#else
1254
            asoc->size_on_all_streams = 0;
1255
#endif
1256
0
          }
1257
9
          sctp_ucount_decr(asoc->cnt_on_all_streams);
1258
9
          control->on_strm_q = 0;
1259
9
        }
1260
9
        ret++;
1261
9
      }
1262
403
      if (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
1263
        /* A singleton now slipping through - mark it non-revokable too */
1264
3
        sctp_mark_non_revokable(asoc, control->sinfo_tsn);
1265
400
      } else if (control->end_added == 0) {
1266
        /* Check if we can defer adding until its all there */
1267
394
        if ((control->length < pd_point) || (strm->pd_api_started)) {
1268
          /* Don't need it or cannot add more (one being delivered that way) */
1269
394
          goto out;
1270
394
        }
1271
394
      }
1272
9
      done = (control->end_added) && (control->last_frag_seen);
1273
9
      if (control->on_read_q == 0) {
1274
9
        if (!done) {
1275
0
          if (asoc->size_on_all_streams >= control->length) {
1276
0
            asoc->size_on_all_streams -= control->length;
1277
0
          } else {
1278
0
#ifdef INVARIANTS
1279
0
            panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
1280
#else
1281
            asoc->size_on_all_streams = 0;
1282
#endif
1283
0
          }
1284
0
          strm->pd_api_started = 1;
1285
0
          control->pdapi_started = 1;
1286
0
        }
1287
9
        sctp_add_to_readq(stcb->sctp_ep, stcb,
1288
9
              control,
1289
9
              &stcb->sctp_socket->so_rcv, control->end_added,
1290
9
              inp_read_lock_held, SCTP_SO_NOT_LOCKED);
1291
9
      }
1292
9
      strm->last_mid_delivered = next_to_del;
1293
9
      if (done) {
1294
9
        control = nctl;
1295
9
        goto deliver_more;
1296
9
      }
1297
9
    }
1298
2.86k
  }
1299
2.86k
out:
1300
2.86k
  return (ret);
1301
2.87k
}
1302
1303
uint32_t
1304
sctp_add_chk_to_control(struct sctp_queued_to_read *control,
1305
      struct sctp_stream_in *strm,
1306
      struct sctp_tcb *stcb, struct sctp_association *asoc,
1307
      struct sctp_tmit_chunk *chk, int hold_rlock)
1308
542
{
1309
  /*
1310
   * Given a control and a chunk, merge the
1311
   * data from the chk onto the control and free
1312
   * up the chunk resources.
1313
   */
1314
542
  uint32_t added=0;
1315
542
  int i_locked = 0;
1316
1317
542
  if (control->on_read_q && (hold_rlock == 0)) {
1318
    /*
1319
     * Its being pd-api'd so we must
1320
     * do some locks.
1321
     */
1322
0
    SCTP_INP_READ_LOCK(stcb->sctp_ep);
1323
0
    i_locked = 1;
1324
0
  }
1325
542
  if (control->data == NULL) {
1326
0
    control->data = chk->data;
1327
0
    sctp_setup_tail_pointer(control);
1328
542
  } else {
1329
542
    sctp_add_to_tail_pointer(control, chk->data, &added);
1330
542
  }
1331
542
  control->fsn_included = chk->rec.data.fsn;
1332
542
  asoc->size_on_reasm_queue -= chk->send_size;
1333
542
  sctp_ucount_decr(asoc->cnt_on_reasm_queue);
1334
542
  sctp_mark_non_revokable(asoc, chk->rec.data.tsn);
1335
542
  chk->data = NULL;
1336
542
  if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
1337
203
    control->first_frag_seen = 1;
1338
203
    control->sinfo_tsn = chk->rec.data.tsn;
1339
203
    control->sinfo_ppid = chk->rec.data.ppid;
1340
203
  }
1341
542
  if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
1342
    /* Its complete */
1343
233
    if ((control->on_strm_q) && (control->on_read_q)) {
1344
0
      if (control->pdapi_started) {
1345
0
        control->pdapi_started = 0;
1346
0
        strm->pd_api_started = 0;
1347
0
      }
1348
0
      if (control->on_strm_q == SCTP_ON_UNORDERED) {
1349
        /* Unordered */
1350
0
        TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
1351
0
        control->on_strm_q = 0;
1352
0
      } else if (control->on_strm_q == SCTP_ON_ORDERED) {
1353
        /* Ordered */
1354
0
        TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
1355
        /*
1356
         * Don't need to decrement size_on_all_streams,
1357
         * since control is on the read queue.
1358
         */
1359
0
        sctp_ucount_decr(asoc->cnt_on_all_streams);
1360
0
        control->on_strm_q = 0;
1361
0
#ifdef INVARIANTS
1362
0
      } else if (control->on_strm_q) {
1363
0
        panic("Unknown state on ctrl: %p on_strm_q: %d", control,
1364
0
              control->on_strm_q);
1365
0
#endif
1366
0
      }
1367
0
    }
1368
233
    control->end_added = 1;
1369
233
    control->last_frag_seen = 1;
1370
233
  }
1371
542
  if (i_locked) {
1372
0
    SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
1373
0
  }
1374
542
  sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
1375
542
  return (added);
1376
542
}
1377
1378
/*
1379
 * Dump onto the re-assembly queue, in its proper place. After dumping on the
1380
 * queue, see if anthing can be delivered. If so pull it off (or as much as
1381
 * we can. If we run out of space then we must dump what we can and set the
1382
 * appropriate flag to say we queued what we could.
1383
 */
1384
static void
1385
sctp_queue_data_for_reasm(struct sctp_tcb *stcb, struct sctp_association *asoc,
1386
        struct sctp_queued_to_read *control,
1387
        struct sctp_tmit_chunk *chk,
1388
        int created_control,
1389
        int *abort_flag, uint32_t tsn)
1390
5.40k
{
1391
5.40k
  uint32_t next_fsn;
1392
5.40k
  struct sctp_tmit_chunk *at, *nat;
1393
5.40k
  struct sctp_stream_in *strm;
1394
5.40k
  int do_wakeup, unordered;
1395
5.40k
  uint32_t lenadded;
1396
1397
5.40k
  strm = &asoc->strmin[control->sinfo_stream];
1398
  /*
1399
   * For old un-ordered data chunks.
1400
   */
1401
5.40k
  if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) {
1402
2.88k
    unordered = 1;
1403
2.88k
  } else {
1404
2.51k
    unordered = 0;
1405
2.51k
  }
1406
  /* Must be added to the stream-in queue */
1407
5.40k
  if (created_control) {
1408
3.00k
    if ((unordered == 0) || (asoc->idata_supported)) {
1409
2.26k
      sctp_ucount_incr(asoc->cnt_on_all_streams);
1410
2.26k
    }
1411
3.00k
    if (sctp_place_control_in_stream(strm, asoc, control)) {
1412
      /* Duplicate SSN? */
1413
0
      sctp_abort_in_reasm(stcb, control, chk,
1414
0
              abort_flag,
1415
0
              SCTP_FROM_SCTP_INDATA + SCTP_LOC_6);
1416
0
      sctp_clean_up_control(stcb, control);
1417
0
      return;
1418
0
    }
1419
3.00k
    if ((tsn == (asoc->cumulative_tsn + 1) && (asoc->idata_supported == 0))) {
1420
      /* Ok we created this control and now
1421
       * lets validate that its legal i.e. there
1422
       * is a B bit set, if not and we have
1423
       * up to the cum-ack then its invalid.
1424
       */
1425
69
      if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) {
1426
3
        sctp_abort_in_reasm(stcb, control, chk,
1427
3
                            abort_flag,
1428
3
                            SCTP_FROM_SCTP_INDATA + SCTP_LOC_7);
1429
3
        return;
1430
3
      }
1431
69
    }
1432
3.00k
  }
1433
5.40k
  if ((asoc->idata_supported == 0) && (unordered == 1)) {
1434
2.21k
    sctp_inject_old_unordered_data(stcb, asoc, control, chk, abort_flag);
1435
2.21k
    return;
1436
2.21k
  }
1437
  /*
1438
   * Ok we must queue the chunk into the reasembly portion:
1439
   *  o if its the first it goes to the control mbuf.
1440
   *  o if its not first but the next in sequence it goes to the control,
1441
   *    and each succeeding one in order also goes.
1442
   *  o if its not in order we place it on the list in its place.
1443
   */
1444
3.18k
  if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
1445
    /* Its the very first one. */
1446
725
    SCTPDBG(SCTP_DEBUG_XXX,
1447
725
      "chunk is a first fsn: %u becomes fsn_included\n",
1448
725
      chk->rec.data.fsn);
1449
725
    if (control->first_frag_seen) {
1450
      /*
1451
       * Error on senders part, they either
1452
       * sent us two data chunks with FIRST,
1453
       * or they sent two un-ordered chunks that
1454
       * were fragmented at the same time in the same stream.
1455
       */
1456
5
      sctp_abort_in_reasm(stcb, control, chk,
1457
5
                          abort_flag,
1458
5
                          SCTP_FROM_SCTP_INDATA + SCTP_LOC_8);
1459
5
      return;
1460
5
    }
1461
720
    control->first_frag_seen = 1;
1462
720
    control->sinfo_ppid = chk->rec.data.ppid;
1463
720
    control->sinfo_tsn = chk->rec.data.tsn;
1464
720
    control->fsn_included = chk->rec.data.fsn;
1465
720
    control->data = chk->data;
1466
720
    sctp_mark_non_revokable(asoc, chk->rec.data.tsn);
1467
720
    chk->data = NULL;
1468
720
    sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
1469
720
    sctp_setup_tail_pointer(control);
1470
720
    asoc->size_on_all_streams += control->length;
1471
2.46k
  } else {
1472
    /* Place the chunk in our list */
1473
2.46k
    int inserted=0;
1474
2.46k
    if (control->last_frag_seen == 0) {
1475
      /* Still willing to raise highest FSN seen */
1476
2.19k
      if (SCTP_TSN_GT(chk->rec.data.fsn, control->top_fsn)) {
1477
288
        SCTPDBG(SCTP_DEBUG_XXX,
1478
288
          "We have a new top_fsn: %u\n",
1479
288
          chk->rec.data.fsn);
1480
288
        control->top_fsn = chk->rec.data.fsn;
1481
288
      }
1482
2.19k
      if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
1483
681
        SCTPDBG(SCTP_DEBUG_XXX,
1484
681
          "The last fsn is now in place fsn: %u\n",
1485
681
          chk->rec.data.fsn);
1486
681
        control->last_frag_seen = 1;
1487
681
        if (SCTP_TSN_GT(control->top_fsn, chk->rec.data.fsn)) {
1488
63
          SCTPDBG(SCTP_DEBUG_XXX,
1489
63
            "New fsn: %u is not at top_fsn: %u -- abort\n",
1490
63
            chk->rec.data.fsn,
1491
63
            control->top_fsn);
1492
63
          sctp_abort_in_reasm(stcb, control, chk,
1493
63
                  abort_flag,
1494
63
                  SCTP_FROM_SCTP_INDATA + SCTP_LOC_9);
1495
63
          return;
1496
63
        }
1497
681
      }
1498
2.13k
      if (asoc->idata_supported || control->first_frag_seen) {
1499
        /*
1500
         * For IDATA we always check since we know that
1501
         * the first fragment is 0. For old DATA we have
1502
         * to receive the first before we know the first FSN
1503
         * (which is the TSN).
1504
         */
1505
1.23k
        if (SCTP_TSN_GE(control->fsn_included, chk->rec.data.fsn)) {
1506
          /* We have already delivered up to this so its a dup */
1507
87
          sctp_abort_in_reasm(stcb, control, chk,
1508
87
                  abort_flag,
1509
87
                  SCTP_FROM_SCTP_INDATA + SCTP_LOC_10);
1510
87
          return;
1511
87
        }
1512
1.23k
      }
1513
2.13k
    } else {
1514
267
      if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
1515
        /* Second last? huh? */
1516
1
        SCTPDBG(SCTP_DEBUG_XXX,
1517
1
          "Duplicate last fsn: %u (top: %u) -- abort\n",
1518
1
          chk->rec.data.fsn, control->top_fsn);
1519
1
        sctp_abort_in_reasm(stcb, control,
1520
1
                chk, abort_flag,
1521
1
                SCTP_FROM_SCTP_INDATA + SCTP_LOC_11);
1522
1
        return;
1523
1
      }
1524
266
      if (asoc->idata_supported || control->first_frag_seen) {
1525
        /*
1526
         * For IDATA we always check since we know that
1527
         * the first fragment is 0. For old DATA we have
1528
         * to receive the first before we know the first FSN
1529
         * (which is the TSN).
1530
         */
1531
1532
182
        if (SCTP_TSN_GE(control->fsn_included, chk->rec.data.fsn)) {
1533
          /* We have already delivered up to this so its a dup */
1534
38
          SCTPDBG(SCTP_DEBUG_XXX,
1535
38
            "New fsn: %u is already seen in included_fsn: %u -- abort\n",
1536
38
            chk->rec.data.fsn, control->fsn_included);
1537
38
          sctp_abort_in_reasm(stcb, control, chk,
1538
38
                  abort_flag,
1539
38
                  SCTP_FROM_SCTP_INDATA + SCTP_LOC_12);
1540
38
          return;
1541
38
        }
1542
182
      }
1543
      /* validate not beyond top FSN if we have seen last one */
1544
228
      if (SCTP_TSN_GT(chk->rec.data.fsn, control->top_fsn)) {
1545
73
        SCTPDBG(SCTP_DEBUG_XXX,
1546
73
          "New fsn: %u is beyond or at top_fsn: %u -- abort\n",
1547
73
          chk->rec.data.fsn,
1548
73
          control->top_fsn);
1549
73
        sctp_abort_in_reasm(stcb, control, chk,
1550
73
                abort_flag,
1551
73
                SCTP_FROM_SCTP_INDATA + SCTP_LOC_13);
1552
73
        return;
1553
73
      }
1554
228
    }
1555
    /*
1556
     * If we reach here, we need to place the
1557
     * new chunk in the reassembly for this
1558
     * control.
1559
     */
1560
2.20k
    SCTPDBG(SCTP_DEBUG_XXX,
1561
2.20k
      "chunk is a not first fsn: %u needs to be inserted\n",
1562
2.20k
      chk->rec.data.fsn);
1563
2.20k
    TAILQ_FOREACH(at, &control->reasm, sctp_next) {
1564
1.33k
      if (SCTP_TSN_GT(at->rec.data.fsn, chk->rec.data.fsn)) {
1565
393
        if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
1566
          /* Last not at the end? huh? */
1567
1
          SCTPDBG(SCTP_DEBUG_XXX,
1568
1
                  "Last fragment not last in list: -- abort\n");
1569
1
          sctp_abort_in_reasm(stcb, control,
1570
1
                              chk, abort_flag,
1571
1
                              SCTP_FROM_SCTP_INDATA + SCTP_LOC_14);
1572
1
          return;
1573
1
        }
1574
        /*
1575
         * This one in queue is bigger than the new one, insert
1576
         * the new one before at.
1577
         */
1578
392
        SCTPDBG(SCTP_DEBUG_XXX,
1579
392
          "Insert it before fsn: %u\n",
1580
392
          at->rec.data.fsn);
1581
392
        asoc->size_on_reasm_queue += chk->send_size;
1582
392
        sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1583
392
        TAILQ_INSERT_BEFORE(at, chk, sctp_next);
1584
392
        inserted = 1;
1585
392
        break;
1586
942
      } else if (at->rec.data.fsn == chk->rec.data.fsn) {
1587
        /* Gak, He sent me a duplicate str seq number */
1588
        /*
1589
         * foo bar, I guess I will just free this new guy,
1590
         * should we abort too? FIX ME MAYBE? Or it COULD be
1591
         * that the SSN's have wrapped. Maybe I should
1592
         * compare to TSN somehow... sigh for now just blow
1593
         * away the chunk!
1594
         */
1595
2
        SCTPDBG(SCTP_DEBUG_XXX,
1596
2
          "Duplicate to fsn: %u -- abort\n",
1597
2
          at->rec.data.fsn);
1598
2
        sctp_abort_in_reasm(stcb, control,
1599
2
                chk, abort_flag,
1600
2
                SCTP_FROM_SCTP_INDATA + SCTP_LOC_15);
1601
2
        return;
1602
2
      }
1603
1.33k
    }
1604
2.19k
    if (inserted == 0) {
1605
      /* Goes on the end */
1606
1.80k
      SCTPDBG(SCTP_DEBUG_XXX, "Inserting at tail of list fsn: %u\n",
1607
1.80k
        chk->rec.data.fsn);
1608
1.80k
      asoc->size_on_reasm_queue += chk->send_size;
1609
1.80k
      sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1610
1.80k
      TAILQ_INSERT_TAIL(&control->reasm, chk, sctp_next);
1611
1.80k
    }
1612
2.19k
  }
1613
  /*
1614
   * Ok lets see if we can suck any up into the control
1615
   * structure that are in seq if it makes sense.
1616
   */
1617
2.91k
  do_wakeup = 0;
1618
  /*
1619
   * If the first fragment has not been
1620
   * seen there is no sense in looking.
1621
   */
1622
2.91k
  if (control->first_frag_seen) {
1623
974
    next_fsn = control->fsn_included + 1;
1624
974
    TAILQ_FOREACH_SAFE(at, &control->reasm, sctp_next, nat) {
1625
296
      if (at->rec.data.fsn == next_fsn) {
1626
        /* We can add this one now to the control */
1627
55
        SCTPDBG(SCTP_DEBUG_XXX,
1628
55
          "Adding more to control: %p at: %p fsn: %u next_fsn: %u included: %u\n",
1629
55
          control, at,
1630
55
          at->rec.data.fsn,
1631
55
          next_fsn, control->fsn_included);
1632
55
        TAILQ_REMOVE(&control->reasm, at, sctp_next);
1633
55
        lenadded = sctp_add_chk_to_control(control, strm, stcb, asoc, at, SCTP_READ_LOCK_NOT_HELD);
1634
55
        if (control->on_read_q) {
1635
0
          do_wakeup = 1;
1636
55
        } else {
1637
          /*
1638
           * We only add to the size-on-all-streams
1639
           * if its not on the read q. The read q
1640
           * flag will cause a sballoc so its accounted
1641
           * for there.
1642
           */
1643
55
          asoc->size_on_all_streams += lenadded;
1644
55
        }
1645
55
        next_fsn++;
1646
55
        if (control->end_added && control->pdapi_started) {
1647
0
          if (strm->pd_api_started) {
1648
0
            strm->pd_api_started = 0;
1649
0
            control->pdapi_started = 0;
1650
0
          }
1651
0
          if (control->on_read_q == 0) {
1652
0
            sctp_add_to_readq(stcb->sctp_ep, stcb,
1653
0
                  control,
1654
0
                  &stcb->sctp_socket->so_rcv, control->end_added,
1655
0
                  SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
1656
0
          }
1657
0
          break;
1658
0
        }
1659
241
      } else {
1660
241
        break;
1661
241
      }
1662
296
    }
1663
974
  }
1664
2.91k
  if (do_wakeup) {
1665
0
#if defined(__Userspace__)
1666
0
    sctp_invoke_recv_callback(stcb->sctp_ep, stcb, control, SCTP_READ_LOCK_NOT_HELD);
1667
0
#endif
1668
    /* Need to wakeup the reader */
1669
0
    sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
1670
0
  }
1671
2.91k
}
1672
1673
static struct sctp_queued_to_read *
1674
sctp_find_reasm_entry(struct sctp_stream_in *strm, uint32_t mid, int ordered, int idata_supported)
1675
7.25k
{
1676
7.25k
  struct sctp_queued_to_read *control;
1677
1678
7.25k
  if (ordered) {
1679
3.72k
    TAILQ_FOREACH(control, &strm->inqueue, next_instrm) {
1680
2.68k
      if (SCTP_MID_EQ(idata_supported, control->mid, mid)) {
1681
836
        break;
1682
836
      }
1683
2.68k
    }
1684
3.72k
  } else {
1685
3.53k
    if (idata_supported) {
1686
1.76k
      TAILQ_FOREACH(control, &strm->uno_inqueue, next_instrm) {
1687
1.76k
        if (SCTP_MID_EQ(idata_supported, control->mid, mid)) {
1688
91
          break;
1689
91
        }
1690
1.76k
      }
1691
2.83k
    } else {
1692
2.83k
      control = TAILQ_FIRST(&strm->uno_inqueue);
1693
2.83k
    }
1694
3.53k
  }
1695
7.25k
  return (control);
1696
7.25k
}
1697
1698
static int
1699
sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc,
1700
        struct mbuf **m, int offset,  int chk_length,
1701
        struct sctp_nets *net, uint32_t *high_tsn, int *abort_flag,
1702
        int *break_flag, int last_chunk, uint8_t chk_type)
1703
14.8k
{
1704
14.8k
  struct sctp_tmit_chunk *chk = NULL; /* make gcc happy */
1705
14.8k
  struct sctp_stream_in *strm;
1706
14.8k
  uint32_t tsn, fsn, gap, mid;
1707
14.8k
  struct mbuf *dmbuf;
1708
14.8k
  int the_len;
1709
14.8k
  int need_reasm_check = 0;
1710
14.8k
  uint16_t sid;
1711
14.8k
  struct mbuf *op_err;
1712
14.8k
  char msg[SCTP_DIAG_INFO_LEN];
1713
14.8k
  struct sctp_queued_to_read *control, *ncontrol;
1714
14.8k
  uint32_t ppid;
1715
14.8k
  uint8_t chk_flags;
1716
14.8k
  struct sctp_stream_reset_list *liste;
1717
14.8k
  int ordered;
1718
14.8k
  size_t clen;
1719
14.8k
  int created_control = 0;
1720
1721
14.8k
  if (chk_type == SCTP_IDATA) {
1722
4.26k
    struct sctp_idata_chunk *chunk, chunk_buf;
1723
1724
4.26k
    chunk = (struct sctp_idata_chunk *)sctp_m_getptr(*m, offset,
1725
4.26k
                                                     sizeof(struct sctp_idata_chunk), (uint8_t *)&chunk_buf);
1726
4.26k
    chk_flags = chunk->ch.chunk_flags;
1727
4.26k
    clen = sizeof(struct sctp_idata_chunk);
1728
4.26k
    tsn = ntohl(chunk->dp.tsn);
1729
4.26k
    sid = ntohs(chunk->dp.sid);
1730
4.26k
    mid = ntohl(chunk->dp.mid);
1731
4.26k
    if (chk_flags & SCTP_DATA_FIRST_FRAG) {
1732
2.68k
      fsn = 0;
1733
2.68k
      ppid = chunk->dp.ppid_fsn.ppid;
1734
2.68k
    } else {
1735
1.58k
      fsn = ntohl(chunk->dp.ppid_fsn.fsn);
1736
1.58k
      ppid = 0xffffffff; /* Use as an invalid value. */
1737
1.58k
    }
1738
10.5k
  } else {
1739
10.5k
    struct sctp_data_chunk *chunk, chunk_buf;
1740
1741
10.5k
    chunk = (struct sctp_data_chunk *)sctp_m_getptr(*m, offset,
1742
10.5k
                                                    sizeof(struct sctp_data_chunk), (uint8_t *)&chunk_buf);
1743
10.5k
    chk_flags = chunk->ch.chunk_flags;
1744
10.5k
    clen = sizeof(struct sctp_data_chunk);
1745
10.5k
    tsn = ntohl(chunk->dp.tsn);
1746
10.5k
    sid = ntohs(chunk->dp.sid);
1747
10.5k
    mid = (uint32_t)(ntohs(chunk->dp.ssn));
1748
10.5k
    fsn = tsn;
1749
10.5k
    ppid = chunk->dp.ppid;
1750
10.5k
  }
1751
14.8k
  if ((size_t)chk_length == clen) {
1752
    /*
1753
     * Need to send an abort since we had a
1754
     * empty data chunk.
1755
     */
1756
7
    op_err = sctp_generate_no_user_data_cause(tsn);
1757
7
    stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_16;
1758
7
    sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, false, SCTP_SO_NOT_LOCKED);
1759
7
    *abort_flag = 1;
1760
7
    return (0);
1761
7
  }
1762
14.8k
  if ((chk_flags & SCTP_DATA_SACK_IMMEDIATELY) == SCTP_DATA_SACK_IMMEDIATELY) {
1763
4.12k
    asoc->send_sack = 1;
1764
4.12k
  }
1765
14.8k
  ordered = ((chk_flags & SCTP_DATA_UNORDERED) == 0);
1766
14.8k
  if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
1767
0
    sctp_log_map(tsn, asoc->cumulative_tsn, asoc->highest_tsn_inside_map, SCTP_MAP_TSN_ENTERS);
1768
0
  }
1769
14.8k
  if (stcb == NULL) {
1770
0
    return (0);
1771
0
  }
1772
14.8k
  SCTP_LTRACE_CHK(stcb->sctp_ep, stcb, chk_type, tsn);
1773
14.8k
  if (SCTP_TSN_GE(asoc->cumulative_tsn, tsn)) {
1774
    /* It is a duplicate */
1775
2.44k
    SCTP_STAT_INCR(sctps_recvdupdata);
1776
2.44k
    if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1777
      /* Record a dup for the next outbound sack */
1778
1.76k
      asoc->dup_tsns[asoc->numduptsns] = tsn;
1779
1.76k
      asoc->numduptsns++;
1780
1.76k
    }
1781
2.44k
    asoc->send_sack = 1;
1782
2.44k
    return (0);
1783
2.44k
  }
1784
  /* Calculate the number of TSN's between the base and this TSN */
1785
12.3k
  SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
1786
12.3k
  if (gap >= (SCTP_MAPPING_ARRAY << 3)) {
1787
    /* Can't hold the bit in the mapping at max array, toss it */
1788
2.17k
    return (0);
1789
2.17k
  }
1790
10.2k
  if (gap >= (uint32_t) (asoc->mapping_array_size << 3)) {
1791
2.58k
    SCTP_TCB_LOCK_ASSERT(stcb);
1792
2.58k
    if (sctp_expand_mapping_array(asoc, gap)) {
1793
      /* Can't expand, drop it */
1794
0
      return (0);
1795
0
    }
1796
2.58k
  }
1797
10.2k
  if (SCTP_TSN_GT(tsn, *high_tsn)) {
1798
3.91k
    *high_tsn = tsn;
1799
3.91k
  }
1800
  /* See if we have received this one already */
1801
10.2k
  if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap) ||
1802
10.2k
      SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap)) {
1803
2.50k
    SCTP_STAT_INCR(sctps_recvdupdata);
1804
2.50k
    if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1805
      /* Record a dup for the next outbound sack */
1806
821
      asoc->dup_tsns[asoc->numduptsns] = tsn;
1807
821
      asoc->numduptsns++;
1808
821
    }
1809
2.50k
    asoc->send_sack = 1;
1810
2.50k
    return (0);
1811
2.50k
  }
1812
  /*
1813
   * Check to see about the GONE flag, duplicates would cause a sack
1814
   * to be sent up above
1815
   */
1816
7.69k
  if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
1817
7.69k
       (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
1818
7.69k
       (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET))) {
1819
    /*
1820
     * wait a minute, this guy is gone, there is no longer a
1821
     * receiver. Send peer an ABORT!
1822
     */
1823
0
    op_err = sctp_generate_cause(SCTP_CAUSE_OUT_OF_RESC, "");
1824
0
    sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, false, SCTP_SO_NOT_LOCKED);
1825
0
    *abort_flag = 1;
1826
0
    return (0);
1827
0
  }
1828
  /*
1829
   * Now before going further we see if there is room. If NOT then we
1830
   * MAY let one through only IF this TSN is the one we are waiting
1831
   * for on a partial delivery API.
1832
   */
1833
1834
  /* Is the stream valid? */
1835
7.69k
  if (sid >= asoc->streamincnt) {
1836
436
    struct sctp_error_invalid_stream *cause;
1837
1838
436
    op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_error_invalid_stream),
1839
436
                                   0, M_NOWAIT, 1, MT_DATA);
1840
436
    if (op_err != NULL) {
1841
      /* add some space up front so prepend will work well */
1842
436
      SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr));
1843
436
      cause = mtod(op_err, struct sctp_error_invalid_stream *);
1844
      /*
1845
       * Error causes are just param's and this one has
1846
       * two back to back phdr, one with the error type
1847
       * and size, the other with the streamid and a rsvd
1848
       */
1849
436
      SCTP_BUF_LEN(op_err) = sizeof(struct sctp_error_invalid_stream);
1850
436
      cause->cause.code = htons(SCTP_CAUSE_INVALID_STREAM);
1851
436
      cause->cause.length = htons(sizeof(struct sctp_error_invalid_stream));
1852
436
      cause->stream_id = htons(sid);
1853
436
      cause->reserved = htons(0);
1854
436
      sctp_queue_op_err(stcb, op_err);
1855
436
    }
1856
436
    SCTP_STAT_INCR(sctps_badsid);
1857
436
    SCTP_TCB_LOCK_ASSERT(stcb);
1858
436
    SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1859
436
    if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1860
308
      asoc->highest_tsn_inside_nr_map = tsn;
1861
308
    }
1862
436
    if (tsn == (asoc->cumulative_tsn + 1)) {
1863
      /* Update cum-ack */
1864
35
      asoc->cumulative_tsn = tsn;
1865
35
    }
1866
436
    return (0);
1867
436
  }
1868
  /*
1869
   * If its a fragmented message, lets see if we can
1870
   * find the control on the reassembly queues.
1871
   */
1872
7.25k
  if ((chk_type == SCTP_IDATA) &&
1873
7.25k
      ((chk_flags & SCTP_DATA_FIRST_FRAG) == 0) &&
1874
7.25k
      (fsn == 0)) {
1875
    /*
1876
     *  The first *must* be fsn 0, and other
1877
     *  (middle/end) pieces can *not* be fsn 0.
1878
     * XXX: This can happen in case of a wrap around.
1879
     *      Ignore is for now.
1880
     */
1881
1
    SCTP_SNPRINTF(msg, sizeof(msg), "FSN zero for MID=%8.8x, but flags=%2.2x", mid, chk_flags);
1882
1
    goto err_out;
1883
1
  }
1884
7.25k
  control = sctp_find_reasm_entry(&asoc->strmin[sid], mid, ordered, asoc->idata_supported);
1885
7.25k
  SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags:0x%x look for control on queues %p\n",
1886
7.25k
    chk_flags, control);
1887
7.25k
  if ((chk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) {
1888
    /* See if we can find the re-assembly entity */
1889
5.43k
    if (control != NULL) {
1890
      /* We found something, does it belong? */
1891
2.40k
      if (ordered && (mid != control->mid)) {
1892
0
        SCTP_SNPRINTF(msg, sizeof(msg), "Reassembly problem (MID=%8.8x)", mid);
1893
4
      err_out:
1894
4
        op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1895
4
        stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_17;
1896
4
        sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, false, SCTP_SO_NOT_LOCKED);
1897
4
        *abort_flag = 1;
1898
4
        return (0);
1899
0
      }
1900
2.40k
      if (ordered && ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED)) {
1901
        /* We can't have a switched order with an unordered chunk */
1902
0
        SCTP_SNPRINTF(msg, sizeof(msg),
1903
0
                      "All fragments of a user message must be ordered or unordered (TSN=%8.8x)",
1904
0
                      tsn);
1905
0
        goto err_out;
1906
0
      }
1907
2.40k
      if (!ordered && (((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) == 0)) {
1908
        /* We can't have a switched unordered with a ordered chunk */
1909
0
        SCTP_SNPRINTF(msg, sizeof(msg),
1910
0
                     "All fragments of a user message must be ordered or unordered (TSN=%8.8x)",
1911
0
                     tsn);
1912
0
        goto err_out;
1913
0
      }
1914
2.40k
    }
1915
5.43k
  } else {
1916
    /* Its a complete segment. Lets validate we
1917
     * don't have a re-assembly going on with
1918
     * the same Stream/Seq (for ordered) or in
1919
     * the same Stream for unordered.
1920
     */
1921
1.81k
    if (control != NULL) {
1922
65
      if (ordered || asoc->idata_supported) {
1923
2
        SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags: 0x%x dup detected on MID: %u\n",
1924
2
          chk_flags, mid);
1925
2
        SCTP_SNPRINTF(msg, sizeof(msg), "Duplicate MID=%8.8x detected.", mid);
1926
2
        goto err_out;
1927
63
      } else {
1928
63
        if ((tsn == control->fsn_included + 1) &&
1929
63
            (control->end_added == 0)) {
1930
1
          SCTP_SNPRINTF(msg, sizeof(msg),
1931
1
                        "Illegal message sequence, missing end for MID: %8.8x",
1932
1
                        control->fsn_included);
1933
1
          goto err_out;
1934
62
        } else {
1935
62
          control = NULL;
1936
62
        }
1937
63
      }
1938
65
    }
1939
1.81k
  }
1940
  /* now do the tests */
1941
7.25k
  if (((asoc->cnt_on_all_streams +
1942
7.25k
        asoc->cnt_on_reasm_queue +
1943
7.25k
        asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) ||
1944
7.25k
      (((int)asoc->my_rwnd) <= 0)) {
1945
    /*
1946
     * When we have NO room in the rwnd we check to make sure
1947
     * the reader is doing its job...
1948
     */
1949
73
    if (SCTP_SBAVAIL(&stcb->sctp_socket->so_rcv) > 0) {
1950
      /* some to read, wake-up */
1951
#if defined(__APPLE__) && !defined(__Userspace__)
1952
      struct socket *so;
1953
1954
      so = SCTP_INP_SO(stcb->sctp_ep);
1955
      atomic_add_int(&stcb->asoc.refcnt, 1);
1956
      SCTP_TCB_UNLOCK(stcb);
1957
      SCTP_SOCKET_LOCK(so, 1);
1958
      SCTP_TCB_LOCK(stcb);
1959
      atomic_subtract_int(&stcb->asoc.refcnt, 1);
1960
      if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
1961
        /* assoc was freed while we were unlocked */
1962
        SCTP_SOCKET_UNLOCK(so, 1);
1963
        return (0);
1964
      }
1965
#endif
1966
73
      sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
1967
#if defined(__APPLE__) && !defined(__Userspace__)
1968
      SCTP_SOCKET_UNLOCK(so, 1);
1969
#endif
1970
73
    }
1971
    /* now is it in the mapping array of what we have accepted? */
1972
73
    if (chk_type == SCTP_DATA) {
1973
71
      if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map) &&
1974
71
          SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1975
        /* Nope not in the valid range dump it */
1976
15
      dump_packet:
1977
15
        sctp_set_rwnd(stcb, asoc);
1978
15
        if ((asoc->cnt_on_all_streams +
1979
15
             asoc->cnt_on_reasm_queue +
1980
15
             asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) {
1981
0
          SCTP_STAT_INCR(sctps_datadropchklmt);
1982
15
        } else {
1983
15
          SCTP_STAT_INCR(sctps_datadroprwnd);
1984
15
        }
1985
15
        *break_flag = 1;
1986
15
        return (0);
1987
13
      }
1988
71
    } else {
1989
2
      if (control == NULL) {
1990
2
        goto dump_packet;
1991
2
      }
1992
0
      if (SCTP_TSN_GT(fsn, control->top_fsn)) {
1993
0
        goto dump_packet;
1994
0
      }
1995
0
    }
1996
73
  }
1997
#ifdef SCTP_ASOCLOG_OF_TSNS
1998
  SCTP_TCB_LOCK_ASSERT(stcb);
1999
  if (asoc->tsn_in_at >= SCTP_TSN_LOG_SIZE) {
2000
    asoc->tsn_in_at = 0;
2001
    asoc->tsn_in_wrapped = 1;
2002
  }
2003
  asoc->in_tsnlog[asoc->tsn_in_at].tsn = tsn;
2004
  asoc->in_tsnlog[asoc->tsn_in_at].strm = sid;
2005
  asoc->in_tsnlog[asoc->tsn_in_at].seq = mid;
2006
  asoc->in_tsnlog[asoc->tsn_in_at].sz = chk_length;
2007
  asoc->in_tsnlog[asoc->tsn_in_at].flgs = chunk_flags;
2008
  asoc->in_tsnlog[asoc->tsn_in_at].stcb = (void *)stcb;
2009
  asoc->in_tsnlog[asoc->tsn_in_at].in_pos = asoc->tsn_in_at;
2010
  asoc->in_tsnlog[asoc->tsn_in_at].in_out = 1;
2011
  asoc->tsn_in_at++;
2012
#endif
2013
  /*
2014
   * Before we continue lets validate that we are not being fooled by
2015
   * an evil attacker. We can only have Nk chunks based on our TSN
2016
   * spread allowed by the mapping array N * 8 bits, so there is no
2017
   * way our stream sequence numbers could have wrapped. We of course
2018
   * only validate the FIRST fragment so the bit must be set.
2019
   */
2020
7.23k
  if ((chk_flags & SCTP_DATA_FIRST_FRAG) &&
2021
7.23k
      (TAILQ_EMPTY(&asoc->resetHead)) &&
2022
7.23k
      (chk_flags & SCTP_DATA_UNORDERED) == 0 &&
2023
7.23k
      SCTP_MID_GE(asoc->idata_supported, asoc->strmin[sid].last_mid_delivered, mid)) {
2024
    /* The incoming sseq is behind where we last delivered? */
2025
60
    SCTPDBG(SCTP_DEBUG_INDATA1, "EVIL/Broken-Dup S-SEQ: %u delivered: %u from peer, Abort!\n",
2026
60
      mid, asoc->strmin[sid].last_mid_delivered);
2027
2028
60
    if (asoc->idata_supported) {
2029
38
      SCTP_SNPRINTF(msg, sizeof(msg), "Delivered MID=%8.8x, got TSN=%8.8x, SID=%4.4x, MID=%8.8x",
2030
38
                    asoc->strmin[sid].last_mid_delivered,
2031
38
                    tsn,
2032
38
                    sid,
2033
38
                    mid);
2034
38
    } else {
2035
22
      SCTP_SNPRINTF(msg, sizeof(msg), "Delivered SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
2036
22
                    (uint16_t)asoc->strmin[sid].last_mid_delivered,
2037
22
                    tsn,
2038
22
                    sid,
2039
22
                    (uint16_t)mid);
2040
22
    }
2041
60
    op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2042
60
    stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_18;
2043
60
    sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, false, SCTP_SO_NOT_LOCKED);
2044
60
    *abort_flag = 1;
2045
60
    return (0);
2046
60
  }
2047
7.17k
  if (chk_type == SCTP_IDATA) {
2048
2.05k
    the_len = (chk_length - sizeof(struct sctp_idata_chunk));
2049
5.12k
  } else {
2050
5.12k
    the_len = (chk_length - sizeof(struct sctp_data_chunk));
2051
5.12k
  }
2052
7.17k
  if (last_chunk == 0) {
2053
7.04k
    if (chk_type == SCTP_IDATA) {
2054
2.00k
      dmbuf = SCTP_M_COPYM(*m,
2055
2.00k
               (offset + sizeof(struct sctp_idata_chunk)),
2056
2.00k
               the_len, M_NOWAIT);
2057
5.03k
    } else {
2058
5.03k
      dmbuf = SCTP_M_COPYM(*m,
2059
5.03k
               (offset + sizeof(struct sctp_data_chunk)),
2060
5.03k
               the_len, M_NOWAIT);
2061
5.03k
    }
2062
#ifdef SCTP_MBUF_LOGGING
2063
    if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
2064
      sctp_log_mbc(dmbuf, SCTP_MBUF_ICOPY);
2065
    }
2066
#endif
2067
7.04k
  } else {
2068
    /* We can steal the last chunk */
2069
138
    int l_len;
2070
138
    dmbuf = *m;
2071
    /* lop off the top part */
2072
138
    if (chk_type == SCTP_IDATA) {
2073
50
      m_adj(dmbuf, (offset + sizeof(struct sctp_idata_chunk)));
2074
88
    } else {
2075
88
      m_adj(dmbuf, (offset + sizeof(struct sctp_data_chunk)));
2076
88
    }
2077
138
    if (SCTP_BUF_NEXT(dmbuf) == NULL) {
2078
43
      l_len = SCTP_BUF_LEN(dmbuf);
2079
95
    } else {
2080
      /* need to count up the size hopefully
2081
       * does not hit this to often :-0
2082
       */
2083
95
      struct mbuf *lat;
2084
2085
95
      l_len = 0;
2086
750
      for (lat = dmbuf; lat; lat = SCTP_BUF_NEXT(lat)) {
2087
655
        l_len += SCTP_BUF_LEN(lat);
2088
655
      }
2089
95
    }
2090
138
    if (l_len > the_len) {
2091
      /* Trim the end round bytes off  too */
2092
130
      m_adj(dmbuf, -(l_len - the_len));
2093
130
    }
2094
138
  }
2095
7.17k
  if (dmbuf == NULL) {
2096
0
    SCTP_STAT_INCR(sctps_nomem);
2097
0
    return (0);
2098
0
  }
2099
  /*
2100
   * Now no matter what, we need a control, get one
2101
   * if we don't have one (we may have gotten it
2102
   * above when we found the message was fragmented
2103
   */
2104
7.17k
  if (control == NULL) {
2105
4.77k
    sctp_alloc_a_readq(stcb, control);
2106
4.77k
    sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn,
2107
4.77k
             ppid,
2108
4.77k
             sid,
2109
4.77k
             chk_flags,
2110
4.77k
             NULL, fsn, mid);
2111
4.77k
    if (control == NULL) {
2112
0
      SCTP_STAT_INCR(sctps_nomem);
2113
0
      return (0);
2114
0
    }
2115
4.77k
    if ((chk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
2116
1.77k
      struct mbuf *mm;
2117
2118
1.77k
      control->data = dmbuf;
2119
1.77k
      control->tail_mbuf = NULL;
2120
4.05k
      for (mm = control->data; mm; mm = mm->m_next) {
2121
2.28k
        control->length += SCTP_BUF_LEN(mm);
2122
2.28k
        if (SCTP_BUF_NEXT(mm) == NULL) {
2123
1.77k
          control->tail_mbuf = mm;
2124
1.77k
        }
2125
2.28k
      }
2126
1.77k
      control->end_added = 1;
2127
1.77k
      control->last_frag_seen = 1;
2128
1.77k
      control->first_frag_seen = 1;
2129
1.77k
      control->fsn_included = fsn;
2130
1.77k
      control->top_fsn = fsn;
2131
1.77k
    }
2132
4.77k
    created_control = 1;
2133
4.77k
  }
2134
7.17k
  SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags: 0x%x ordered: %d MID: %u control: %p\n",
2135
7.17k
    chk_flags, ordered, mid, control);
2136
7.17k
  if ((chk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG &&
2137
7.17k
      TAILQ_EMPTY(&asoc->resetHead) &&
2138
7.17k
      ((ordered == 0) ||
2139
928
       (SCTP_MID_EQ(asoc->idata_supported, asoc->strmin[sid].last_mid_delivered + 1, mid) &&
2140
617
        TAILQ_EMPTY(&asoc->strmin[sid].inqueue)))) {
2141
    /* Candidate for express delivery */
2142
    /*
2143
     * Its not fragmented, No PD-API is up, Nothing in the
2144
     * delivery queue, Its un-ordered OR ordered and the next to
2145
     * deliver AND nothing else is stuck on the stream queue,
2146
     * And there is room for it in the socket buffer. Lets just
2147
     * stuff it up the buffer....
2148
     */
2149
348
    SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
2150
348
    if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
2151
195
      asoc->highest_tsn_inside_nr_map = tsn;
2152
195
    }
2153
348
    SCTPDBG(SCTP_DEBUG_XXX, "Injecting control: %p to be read (MID: %u)\n",
2154
348
      control, mid);
2155
2156
348
    sctp_add_to_readq(stcb->sctp_ep, stcb,
2157
348
                      control, &stcb->sctp_socket->so_rcv,
2158
348
                      1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
2159
2160
348
    if ((chk_flags & SCTP_DATA_UNORDERED) == 0) {
2161
      /* for ordered, bump what we delivered */
2162
37
      asoc->strmin[sid].last_mid_delivered++;
2163
37
    }
2164
348
    SCTP_STAT_INCR(sctps_recvexpress);
2165
348
    if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
2166
0
      sctp_log_strm_del_alt(stcb, tsn, mid, sid,
2167
0
                SCTP_STR_LOG_FROM_EXPRS_DEL);
2168
0
    }
2169
348
    control = NULL;
2170
348
    goto finish_express_del;
2171
348
  }
2172
2173
  /* Now will we need a chunk too? */
2174
6.83k
  if ((chk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) {
2175
5.40k
    sctp_alloc_a_chunk(stcb, chk);
2176
5.40k
    if (chk == NULL) {
2177
      /* No memory so we drop the chunk */
2178
0
      SCTP_STAT_INCR(sctps_nomem);
2179
0
      if (last_chunk == 0) {
2180
        /* we copied it, free the copy */
2181
0
        sctp_m_freem(dmbuf);
2182
0
      }
2183
0
      return (0);
2184
0
    }
2185
5.40k
    chk->rec.data.tsn = tsn;
2186
5.40k
    chk->no_fr_allowed = 0;
2187
5.40k
    chk->rec.data.fsn = fsn;
2188
5.40k
    chk->rec.data.mid = mid;
2189
5.40k
    chk->rec.data.sid = sid;
2190
5.40k
    chk->rec.data.ppid = ppid;
2191
5.40k
    chk->rec.data.context = stcb->asoc.context;
2192
5.40k
    chk->rec.data.doing_fast_retransmit = 0;
2193
5.40k
    chk->rec.data.rcv_flags = chk_flags;
2194
5.40k
    chk->asoc = asoc;
2195
5.40k
    chk->send_size = the_len;
2196
5.40k
    chk->whoTo = net;
2197
5.40k
    SCTPDBG(SCTP_DEBUG_XXX, "Building ck: %p for control: %p to be read (MID: %u)\n",
2198
5.40k
      chk,
2199
5.40k
      control, mid);
2200
5.40k
    atomic_add_int(&net->ref_count, 1);
2201
5.40k
    chk->data = dmbuf;
2202
5.40k
  }
2203
  /* Set the appropriate TSN mark */
2204
6.83k
  if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
2205
0
    SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
2206
0
    if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
2207
0
      asoc->highest_tsn_inside_nr_map = tsn;
2208
0
    }
2209
6.83k
  } else {
2210
6.83k
    SCTP_SET_TSN_PRESENT(asoc->mapping_array, gap);
2211
6.83k
    if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map)) {
2212
3.96k
      asoc->highest_tsn_inside_map = tsn;
2213
3.96k
    }
2214
6.83k
  }
2215
  /* Now is it complete (i.e. not fragmented)? */
2216
6.83k
  if ((chk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
2217
    /*
2218
     * Special check for when streams are resetting. We
2219
     * could be more smart about this and check the
2220
     * actual stream to see if it is not being reset..
2221
     * that way we would not create a HOLB when amongst
2222
     * streams being reset and those not being reset.
2223
     *
2224
     */
2225
1.42k
    if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
2226
1.42k
        SCTP_TSN_GT(tsn, liste->tsn)) {
2227
      /*
2228
       * yep its past where we need to reset... go
2229
       * ahead and queue it.
2230
       */
2231
481
      if (TAILQ_EMPTY(&asoc->pending_reply_queue)) {
2232
        /* first one on */
2233
168
        TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
2234
313
      } else {
2235
313
        struct sctp_queued_to_read *lcontrol, *nlcontrol;
2236
313
        unsigned char inserted = 0;
2237
1.19k
        TAILQ_FOREACH_SAFE(lcontrol, &asoc->pending_reply_queue, next, nlcontrol) {
2238
1.19k
          if (SCTP_TSN_GT(control->sinfo_tsn, lcontrol->sinfo_tsn)) {
2239
964
            continue;
2240
964
          } else {
2241
            /* found it */
2242
232
            TAILQ_INSERT_BEFORE(lcontrol, control, next);
2243
232
            inserted = 1;
2244
232
            break;
2245
232
          }
2246
1.19k
        }
2247
313
        if (inserted == 0) {
2248
          /*
2249
           * must be put at end, use
2250
           * prevP (all setup from
2251
           * loop) to setup nextP.
2252
           */
2253
81
          TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
2254
81
        }
2255
313
      }
2256
481
      goto finish_express_del;
2257
481
    }
2258
946
    if (chk_flags & SCTP_DATA_UNORDERED) {
2259
      /* queue directly into socket buffer */
2260
111
      SCTPDBG(SCTP_DEBUG_XXX, "Unordered data to be read control: %p MID: %u\n",
2261
111
        control, mid);
2262
111
      sctp_mark_non_revokable(asoc, control->sinfo_tsn);
2263
111
      sctp_add_to_readq(stcb->sctp_ep, stcb,
2264
111
                        control,
2265
111
                        &stcb->sctp_socket->so_rcv, 1,
2266
111
                        SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
2267
2268
835
    } else {
2269
835
      SCTPDBG(SCTP_DEBUG_XXX, "Queue control: %p for reordering MID: %u\n", control,
2270
835
        mid);
2271
835
      sctp_queue_data_to_stream(stcb, asoc, control, abort_flag, &need_reasm_check);
2272
835
      if (*abort_flag) {
2273
49
        if (last_chunk) {
2274
2
          *m = NULL;
2275
2
        }
2276
49
        return (0);
2277
49
      }
2278
835
    }
2279
897
    goto finish_express_del;
2280
946
  }
2281
  /* If we reach here its a reassembly */
2282
5.40k
  need_reasm_check = 1;
2283
5.40k
  SCTPDBG(SCTP_DEBUG_XXX,
2284
5.40k
    "Queue data to stream for reasm control: %p MID: %u\n",
2285
5.40k
    control, mid);
2286
5.40k
  sctp_queue_data_for_reasm(stcb, asoc, control, chk, created_control, abort_flag, tsn);
2287
5.40k
  if (*abort_flag) {
2288
    /*
2289
     * the assoc is now gone and chk was put onto the
2290
     * reasm queue, which has all been freed.
2291
     */
2292
273
    if (last_chunk) {
2293
7
      *m = NULL;
2294
7
    }
2295
273
    return (0);
2296
273
  }
2297
6.85k
finish_express_del:
2298
  /* Here we tidy up things */
2299
6.85k
  if (tsn == (asoc->cumulative_tsn + 1)) {
2300
    /* Update cum-ack */
2301
196
    asoc->cumulative_tsn = tsn;
2302
196
  }
2303
6.85k
  if (last_chunk) {
2304
129
    *m = NULL;
2305
129
  }
2306
6.85k
  if (ordered) {
2307
3.37k
    SCTP_STAT_INCR_COUNTER64(sctps_inorderchunks);
2308
3.48k
  } else {
2309
3.48k
    SCTP_STAT_INCR_COUNTER64(sctps_inunorderchunks);
2310
3.48k
  }
2311
6.85k
  SCTP_STAT_INCR(sctps_recvdata);
2312
  /* Set it present please */
2313
6.85k
  if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
2314
0
    sctp_log_strm_del_alt(stcb, tsn, mid, sid, SCTP_STR_LOG_FROM_MARK_TSN);
2315
0
  }
2316
6.85k
  if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2317
0
    sctp_log_map(asoc->mapping_array_base_tsn, asoc->cumulative_tsn,
2318
0
           asoc->highest_tsn_inside_map, SCTP_MAP_PREPARE_SLIDE);
2319
0
  }
2320
6.85k
  if (need_reasm_check) {
2321
5.13k
    (void)sctp_deliver_reasm_check(stcb, asoc, &asoc->strmin[sid], SCTP_READ_LOCK_NOT_HELD);
2322
5.13k
    need_reasm_check = 0;
2323
5.13k
  }
2324
  /* check the special flag for stream resets */
2325
6.85k
  if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
2326
6.85k
      SCTP_TSN_GE(asoc->cumulative_tsn, liste->tsn)) {
2327
    /*
2328
     * we have finished working through the backlogged TSN's now
2329
     * time to reset streams. 1: call reset function. 2: free
2330
     * pending_reply space 3: distribute any chunks in
2331
     * pending_reply_queue.
2332
     */
2333
253
    sctp_reset_in_stream(stcb, liste->number_entries, liste->list_of_streams);
2334
253
    TAILQ_REMOVE(&asoc->resetHead, liste, next_resp);
2335
253
    sctp_send_deferred_reset_response(stcb, liste, SCTP_STREAM_RESET_RESULT_PERFORMED);
2336
253
    SCTP_FREE(liste, SCTP_M_STRESET);
2337
    /*sa_ignore FREED_MEMORY*/
2338
253
    liste = TAILQ_FIRST(&asoc->resetHead);
2339
253
    if (TAILQ_EMPTY(&asoc->resetHead)) {
2340
      /* All can be removed */
2341
152
      TAILQ_FOREACH_SAFE(control, &asoc->pending_reply_queue, next, ncontrol) {
2342
152
        TAILQ_REMOVE(&asoc->pending_reply_queue, control, next);
2343
152
        strm = &asoc->strmin[control->sinfo_stream];
2344
152
        sctp_queue_data_to_stream(stcb, asoc, control, abort_flag, &need_reasm_check);
2345
152
        if (*abort_flag) {
2346
9
          return (0);
2347
9
        }
2348
143
        if (need_reasm_check) {
2349
4
          (void)sctp_deliver_reasm_check(stcb, asoc, strm, SCTP_READ_LOCK_NOT_HELD);
2350
4
          need_reasm_check = 0;
2351
4
        }
2352
143
      }
2353
139
    } else {
2354
153
      TAILQ_FOREACH_SAFE(control, &asoc->pending_reply_queue, next, ncontrol) {
2355
153
        if (SCTP_TSN_GT(control->sinfo_tsn, liste->tsn)) {
2356
31
          break;
2357
31
        }
2358
        /*
2359
         * if control->sinfo_tsn is <= liste->tsn we can
2360
         * process it which is the NOT of
2361
         * control->sinfo_tsn > liste->tsn
2362
         */
2363
122
        TAILQ_REMOVE(&asoc->pending_reply_queue, control, next);
2364
122
        strm = &asoc->strmin[control->sinfo_stream];
2365
122
        sctp_queue_data_to_stream(stcb, asoc, control, abort_flag, &need_reasm_check);
2366
122
        if (*abort_flag) {
2367
33
          return (0);
2368
33
        }
2369
89
        if (need_reasm_check) {
2370
1
          (void)sctp_deliver_reasm_check(stcb, asoc, strm, SCTP_READ_LOCK_NOT_HELD);
2371
1
          need_reasm_check = 0;
2372
1
        }
2373
89
      }
2374
139
    }
2375
253
  }
2376
6.81k
  return (1);
2377
6.85k
}
2378
2379
static const int8_t sctp_map_lookup_tab[256] = {
2380
  0, 1, 0, 2, 0, 1, 0, 3,
2381
  0, 1, 0, 2, 0, 1, 0, 4,
2382
  0, 1, 0, 2, 0, 1, 0, 3,
2383
  0, 1, 0, 2, 0, 1, 0, 5,
2384
  0, 1, 0, 2, 0, 1, 0, 3,
2385
  0, 1, 0, 2, 0, 1, 0, 4,
2386
  0, 1, 0, 2, 0, 1, 0, 3,
2387
  0, 1, 0, 2, 0, 1, 0, 6,
2388
  0, 1, 0, 2, 0, 1, 0, 3,
2389
  0, 1, 0, 2, 0, 1, 0, 4,
2390
  0, 1, 0, 2, 0, 1, 0, 3,
2391
  0, 1, 0, 2, 0, 1, 0, 5,
2392
  0, 1, 0, 2, 0, 1, 0, 3,
2393
  0, 1, 0, 2, 0, 1, 0, 4,
2394
  0, 1, 0, 2, 0, 1, 0, 3,
2395
  0, 1, 0, 2, 0, 1, 0, 7,
2396
  0, 1, 0, 2, 0, 1, 0, 3,
2397
  0, 1, 0, 2, 0, 1, 0, 4,
2398
  0, 1, 0, 2, 0, 1, 0, 3,
2399
  0, 1, 0, 2, 0, 1, 0, 5,
2400
  0, 1, 0, 2, 0, 1, 0, 3,
2401
  0, 1, 0, 2, 0, 1, 0, 4,
2402
  0, 1, 0, 2, 0, 1, 0, 3,
2403
  0, 1, 0, 2, 0, 1, 0, 6,
2404
  0, 1, 0, 2, 0, 1, 0, 3,
2405
  0, 1, 0, 2, 0, 1, 0, 4,
2406
  0, 1, 0, 2, 0, 1, 0, 3,
2407
  0, 1, 0, 2, 0, 1, 0, 5,
2408
  0, 1, 0, 2, 0, 1, 0, 3,
2409
  0, 1, 0, 2, 0, 1, 0, 4,
2410
  0, 1, 0, 2, 0, 1, 0, 3,
2411
  0, 1, 0, 2, 0, 1, 0, 8
2412
};
2413
2414
void
2415
sctp_slide_mapping_arrays(struct sctp_tcb *stcb)
2416
20.9k
{
2417
  /*
2418
   * Now we also need to check the mapping array in a couple of ways.
2419
   * 1) Did we move the cum-ack point?
2420
   *
2421
   * When you first glance at this you might think
2422
   * that all entries that make up the position
2423
   * of the cum-ack would be in the nr-mapping array
2424
   * only.. i.e. things up to the cum-ack are always
2425
   * deliverable. Thats true with one exception, when
2426
   * its a fragmented message we may not deliver the data
2427
   * until some threshold (or all of it) is in place. So
2428
   * we must OR the nr_mapping_array and mapping_array to
2429
   * get a true picture of the cum-ack.
2430
   */
2431
20.9k
  struct sctp_association *asoc;
2432
20.9k
  int at;
2433
20.9k
  uint8_t val;
2434
20.9k
  int slide_from, slide_end, lgap, distance;
2435
20.9k
  uint32_t old_cumack, old_base, old_highest, highest_tsn;
2436
2437
20.9k
  asoc = &stcb->asoc;
2438
2439
20.9k
  old_cumack = asoc->cumulative_tsn;
2440
20.9k
  old_base = asoc->mapping_array_base_tsn;
2441
20.9k
  old_highest = asoc->highest_tsn_inside_map;
2442
  /*
2443
   * We could probably improve this a small bit by calculating the
2444
   * offset of the current cum-ack as the starting point.
2445
   */
2446
20.9k
  at = 0;
2447
21.6k
  for (slide_from = 0; slide_from < stcb->asoc.mapping_array_size; slide_from++) {
2448
21.6k
    val = asoc->nr_mapping_array[slide_from] | asoc->mapping_array[slide_from];
2449
21.6k
    if (val == 0xff) {
2450
729
      at += 8;
2451
20.8k
    } else {
2452
      /* there is a 0 bit */
2453
20.8k
      at += sctp_map_lookup_tab[val];
2454
20.8k
      break;
2455
20.8k
    }
2456
21.6k
  }
2457
20.9k
  asoc->cumulative_tsn = asoc->mapping_array_base_tsn + (at-1);
2458
2459
20.9k
  if (SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_map) &&
2460
20.9k
            SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_nr_map)) {
2461
0
#ifdef INVARIANTS
2462
0
    panic("huh, cumack 0x%x greater than high-tsn 0x%x in map",
2463
0
          asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2464
#else
2465
    SCTP_PRINTF("huh, cumack 0x%x greater than high-tsn 0x%x in map - should panic?\n",
2466
          asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2467
    sctp_print_mapping_array(asoc);
2468
    if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2469
      sctp_log_map(0, 6, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
2470
    }
2471
    asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2472
    asoc->highest_tsn_inside_nr_map = asoc->cumulative_tsn;
2473
#endif
2474
0
  }
2475
20.9k
  if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2476
4.04k
    highest_tsn = asoc->highest_tsn_inside_nr_map;
2477
16.8k
  } else {
2478
16.8k
    highest_tsn = asoc->highest_tsn_inside_map;
2479
16.8k
  }
2480
20.9k
  if ((asoc->cumulative_tsn == highest_tsn) && (at >= 8)) {
2481
    /* The complete array was completed by a single FR */
2482
    /* highest becomes the cum-ack */
2483
125
    int clr;
2484
125
#ifdef INVARIANTS
2485
125
    unsigned int i;
2486
125
#endif
2487
2488
    /* clear the array */
2489
125
    clr = ((at+7) >> 3);
2490
125
    if (clr > asoc->mapping_array_size) {
2491
0
      clr = asoc->mapping_array_size;
2492
0
    }
2493
125
    memset(asoc->mapping_array, 0, clr);
2494
125
    memset(asoc->nr_mapping_array, 0, clr);
2495
125
#ifdef INVARIANTS
2496
2.12k
    for (i = 0; i < asoc->mapping_array_size; i++) {
2497
2.00k
      if ((asoc->mapping_array[i]) || (asoc->nr_mapping_array[i])) {
2498
0
        SCTP_PRINTF("Error Mapping array's not clean at clear\n");
2499
0
        sctp_print_mapping_array(asoc);
2500
0
      }
2501
2.00k
    }
2502
125
#endif
2503
125
    asoc->mapping_array_base_tsn = asoc->cumulative_tsn + 1;
2504
125
    asoc->highest_tsn_inside_nr_map = asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2505
20.7k
  } else if (at >= 8) {
2506
    /* we can slide the mapping array down */
2507
    /* slide_from holds where we hit the first NON 0xff byte */
2508
2509
    /*
2510
     * now calculate the ceiling of the move using our highest
2511
     * TSN value
2512
     */
2513
29
    SCTP_CALC_TSN_TO_GAP(lgap, highest_tsn, asoc->mapping_array_base_tsn);
2514
29
    slide_end = (lgap >> 3);
2515
29
    if (slide_end < slide_from) {
2516
0
      sctp_print_mapping_array(asoc);
2517
0
#ifdef INVARIANTS
2518
0
      panic("impossible slide");
2519
#else
2520
      SCTP_PRINTF("impossible slide lgap: %x slide_end: %x slide_from: %x? at: %d\n",
2521
                  lgap, slide_end, slide_from, at);
2522
      return;
2523
#endif
2524
0
    }
2525
29
    if (slide_end > asoc->mapping_array_size) {
2526
0
#ifdef INVARIANTS
2527
0
      panic("would overrun buffer");
2528
#else
2529
      SCTP_PRINTF("Gak, would have overrun map end: %d slide_end: %d\n",
2530
                  asoc->mapping_array_size, slide_end);
2531
      slide_end = asoc->mapping_array_size;
2532
#endif
2533
0
    }
2534
29
    distance = (slide_end - slide_from) + 1;
2535
29
    if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2536
0
      sctp_log_map(old_base, old_cumack, old_highest,
2537
0
             SCTP_MAP_PREPARE_SLIDE);
2538
0
      sctp_log_map((uint32_t) slide_from, (uint32_t) slide_end,
2539
0
             (uint32_t) lgap, SCTP_MAP_SLIDE_FROM);
2540
0
    }
2541
29
    if (distance + slide_from > asoc->mapping_array_size ||
2542
29
        distance < 0) {
2543
      /*
2544
       * Here we do NOT slide forward the array so that
2545
       * hopefully when more data comes in to fill it up
2546
       * we will be able to slide it forward. Really I
2547
       * don't think this should happen :-0
2548
       */
2549
0
      if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2550
0
        sctp_log_map((uint32_t) distance, (uint32_t) slide_from,
2551
0
               (uint32_t) asoc->mapping_array_size,
2552
0
               SCTP_MAP_SLIDE_NONE);
2553
0
      }
2554
29
    } else {
2555
29
      int ii;
2556
2557
3.68k
      for (ii = 0; ii < distance; ii++) {
2558
3.65k
        asoc->mapping_array[ii] = asoc->mapping_array[slide_from + ii];
2559
3.65k
        asoc->nr_mapping_array[ii] = asoc->nr_mapping_array[slide_from + ii];
2560
3.65k
      }
2561
1.09k
      for (ii = distance; ii < asoc->mapping_array_size; ii++) {
2562
1.06k
        asoc->mapping_array[ii] = 0;
2563
1.06k
        asoc->nr_mapping_array[ii] = 0;
2564
1.06k
      }
2565
29
      if (asoc->highest_tsn_inside_map + 1 == asoc->mapping_array_base_tsn) {
2566
11
        asoc->highest_tsn_inside_map += (slide_from << 3);
2567
11
      }
2568
29
      if (asoc->highest_tsn_inside_nr_map + 1 == asoc->mapping_array_base_tsn) {
2569
0
        asoc->highest_tsn_inside_nr_map += (slide_from << 3);
2570
0
      }
2571
29
      asoc->mapping_array_base_tsn += (slide_from << 3);
2572
29
      if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2573
0
        sctp_log_map(asoc->mapping_array_base_tsn,
2574
0
               asoc->cumulative_tsn, asoc->highest_tsn_inside_map,
2575
0
               SCTP_MAP_SLIDE_RESULT);
2576
0
      }
2577
29
    }
2578
29
  }
2579
20.9k
}
2580
2581
void
2582
sctp_sack_check(struct sctp_tcb *stcb, int was_a_gap)
2583
6.03k
{
2584
6.03k
  struct sctp_association *asoc;
2585
6.03k
  uint32_t highest_tsn;
2586
6.03k
  int is_a_gap;
2587
2588
6.03k
  sctp_slide_mapping_arrays(stcb);
2589
6.03k
  asoc = &stcb->asoc;
2590
6.03k
  if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2591
1.01k
    highest_tsn = asoc->highest_tsn_inside_nr_map;
2592
5.02k
  } else {
2593
5.02k
    highest_tsn = asoc->highest_tsn_inside_map;
2594
5.02k
  }
2595
  /* Is there a gap now? */
2596
6.03k
  is_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
2597
2598
  /*
2599
   * Now we need to see if we need to queue a sack or just start the
2600
   * timer (if allowed).
2601
   */
2602
6.03k
  if (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_SENT) {
2603
    /*
2604
     * Ok special case, in SHUTDOWN-SENT case. here we
2605
     * maker sure SACK timer is off and instead send a
2606
     * SHUTDOWN and a SACK
2607
     */
2608
0
    if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2609
0
      sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
2610
0
                      stcb->sctp_ep, stcb, NULL,
2611
0
                      SCTP_FROM_SCTP_INDATA + SCTP_LOC_19);
2612
0
    }
2613
0
    sctp_send_shutdown(stcb,
2614
0
                       ((stcb->asoc.alternate) ? stcb->asoc.alternate : stcb->asoc.primary_destination));
2615
0
    if (is_a_gap) {
2616
0
      sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
2617
0
    }
2618
6.03k
  } else {
2619
    /*
2620
     * CMT DAC algorithm: increase number of packets
2621
     * received since last ack
2622
     */
2623
6.03k
    stcb->asoc.cmt_dac_pkts_rcvd++;
2624
2625
6.03k
    if ((stcb->asoc.send_sack == 1) ||      /* We need to send a SACK */
2626
6.03k
        ((was_a_gap) && (is_a_gap == 0)) || /* was a gap, but no
2627
                                             * longer is one */
2628
6.03k
        (stcb->asoc.numduptsns) ||          /* we have dup's */
2629
6.03k
        (is_a_gap) ||                       /* is still a gap */
2630
6.03k
        (stcb->asoc.delayed_ack == 0) ||    /* Delayed sack disabled */
2631
6.03k
        (stcb->asoc.data_pkts_seen >= stcb->asoc.sack_freq)) { /* hit limit of pkts */
2632
5.89k
      if ((stcb->asoc.sctp_cmt_on_off > 0) &&
2633
5.89k
          (SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) &&
2634
5.89k
          (stcb->asoc.send_sack == 0) &&
2635
5.89k
          (stcb->asoc.numduptsns == 0) &&
2636
5.89k
          (stcb->asoc.delayed_ack) &&
2637
5.89k
          (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer))) {
2638
        /*
2639
         * CMT DAC algorithm: With CMT,
2640
         * delay acks even in the face of
2641
         * reordering. Therefore, if acks
2642
         * that do not have to be sent
2643
         * because of the above reasons,
2644
         * will be delayed. That is, acks
2645
         * that would have been sent due to
2646
         * gap reports will be delayed with
2647
         * DAC. Start the delayed ack timer.
2648
         */
2649
0
        sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2650
0
                         stcb->sctp_ep, stcb, NULL);
2651
5.89k
      } else {
2652
        /*
2653
         * Ok we must build a SACK since the
2654
         * timer is pending, we got our
2655
         * first packet OR there are gaps or
2656
         * duplicates.
2657
         */
2658
5.89k
        sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL,
2659
5.89k
                        SCTP_FROM_SCTP_INDATA + SCTP_LOC_20);
2660
5.89k
        sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
2661
5.89k
      }
2662
5.89k
    } else {
2663
145
      if (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2664
145
        sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2665
145
                         stcb->sctp_ep, stcb, NULL);
2666
145
      }
2667
145
    }
2668
6.03k
  }
2669
6.03k
}
2670
2671
int
2672
sctp_process_data(struct mbuf **mm, int iphlen, int *offset, int length,
2673
                  struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2674
                  struct sctp_nets *net, uint32_t *high_tsn)
2675
6.02k
{
2676
6.02k
  struct sctp_chunkhdr *ch, chunk_buf;
2677
6.02k
  struct sctp_association *asoc;
2678
6.02k
  int num_chunks = 0; /* number of control chunks processed */
2679
6.02k
  int stop_proc = 0;
2680
6.02k
  int break_flag, last_chunk;
2681
6.02k
  int abort_flag = 0, was_a_gap;
2682
6.02k
  struct mbuf *m;
2683
6.02k
  uint32_t highest_tsn;
2684
6.02k
  uint16_t chk_length;
2685
2686
  /* set the rwnd */
2687
6.02k
  sctp_set_rwnd(stcb, &stcb->asoc);
2688
2689
6.02k
  m = *mm;
2690
6.02k
  SCTP_TCB_LOCK_ASSERT(stcb);
2691
6.02k
  asoc = &stcb->asoc;
2692
6.02k
  if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2693
78
    highest_tsn = asoc->highest_tsn_inside_nr_map;
2694
5.94k
  } else {
2695
5.94k
    highest_tsn = asoc->highest_tsn_inside_map;
2696
5.94k
  }
2697
6.02k
  was_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
2698
  /*
2699
   * setup where we got the last DATA packet from for any SACK that
2700
   * may need to go out. Don't bump the net. This is done ONLY when a
2701
   * chunk is assigned.
2702
   */
2703
6.02k
  asoc->last_data_chunk_from = net;
2704
2705
  /*-
2706
   * Now before we proceed we must figure out if this is a wasted
2707
   * cluster... i.e. it is a small packet sent in and yet the driver
2708
   * underneath allocated a full cluster for it. If so we must copy it
2709
   * to a smaller mbuf and free up the cluster mbuf. This will help
2710
   * with cluster starvation.
2711
   */
2712
6.02k
  if (SCTP_BUF_LEN(m) < (long)MLEN && SCTP_BUF_NEXT(m) == NULL) {
2713
    /* we only handle mbufs that are singletons.. not chains */
2714
2.51k
    m = sctp_get_mbuf_for_msg(SCTP_BUF_LEN(m), 0, M_NOWAIT, 1, MT_DATA);
2715
2.51k
    if (m) {
2716
      /* ok lets see if we can copy the data up */
2717
2.51k
      caddr_t *from, *to;
2718
      /* get the pointers and copy */
2719
2.51k
      to = mtod(m, caddr_t *);
2720
2.51k
      from = mtod((*mm), caddr_t *);
2721
2.51k
      memcpy(to, from, SCTP_BUF_LEN((*mm)));
2722
      /* copy the length and free up the old */
2723
2.51k
      SCTP_BUF_LEN(m) = SCTP_BUF_LEN((*mm));
2724
2.51k
      sctp_m_freem(*mm);
2725
      /* success, back copy */
2726
2.51k
      *mm = m;
2727
2.51k
    } else {
2728
      /* We are in trouble in the mbuf world .. yikes */
2729
0
      m = *mm;
2730
0
    }
2731
2.51k
  }
2732
  /* get pointer to the first chunk header */
2733
6.02k
  ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
2734
6.02k
                                             sizeof(struct sctp_chunkhdr),
2735
6.02k
                                             (uint8_t *)&chunk_buf);
2736
6.02k
  if (ch == NULL) {
2737
0
    return (1);
2738
0
  }
2739
  /*
2740
   * process all DATA chunks...
2741
   */
2742
6.02k
  *high_tsn = asoc->cumulative_tsn;
2743
6.02k
  break_flag = 0;
2744
6.02k
  asoc->data_pkts_seen++;
2745
23.1k
  while (stop_proc == 0) {
2746
    /* validate chunk length */
2747
17.8k
    chk_length = ntohs(ch->chunk_length);
2748
17.8k
    if (length - *offset < chk_length) {
2749
      /* all done, mutulated chunk */
2750
562
      stop_proc = 1;
2751
562
      continue;
2752
562
    }
2753
17.3k
    if ((asoc->idata_supported == 1) &&
2754
17.3k
        (ch->chunk_type == SCTP_DATA)) {
2755
197
      struct mbuf *op_err;
2756
197
      char msg[SCTP_DIAG_INFO_LEN];
2757
2758
197
      SCTP_SNPRINTF(msg, sizeof(msg), "%s", "DATA chunk received when I-DATA was negotiated");
2759
197
      op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2760
197
      stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_21;
2761
197
      sctp_abort_an_association(inp, stcb, op_err, false, SCTP_SO_NOT_LOCKED);
2762
197
      return (2);
2763
197
    }
2764
17.1k
    if ((asoc->idata_supported == 0) &&
2765
17.1k
        (ch->chunk_type == SCTP_IDATA)) {
2766
8
      struct mbuf *op_err;
2767
8
      char msg[SCTP_DIAG_INFO_LEN];
2768
2769
8
      SCTP_SNPRINTF(msg, sizeof(msg), "%s", "I-DATA chunk received when DATA was negotiated");
2770
8
      op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2771
8
      stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_22;
2772
8
      sctp_abort_an_association(inp, stcb, op_err, false, SCTP_SO_NOT_LOCKED);
2773
8
      return (2);
2774
8
    }
2775
17.1k
    if ((ch->chunk_type == SCTP_DATA) ||
2776
17.1k
        (ch->chunk_type == SCTP_IDATA)) {
2777
14.9k
      uint16_t clen;
2778
2779
14.9k
      if (ch->chunk_type == SCTP_DATA) {
2780
10.6k
        clen = sizeof(struct sctp_data_chunk);
2781
10.6k
      } else {
2782
4.27k
        clen = sizeof(struct sctp_idata_chunk);
2783
4.27k
      }
2784
14.9k
      if (chk_length < clen) {
2785
        /*
2786
         * Need to send an abort since we had a
2787
         * invalid data chunk.
2788
         */
2789
88
        struct mbuf *op_err;
2790
88
        char msg[SCTP_DIAG_INFO_LEN];
2791
2792
88
        SCTP_SNPRINTF(msg, sizeof(msg), "%s chunk of length %u",
2793
88
                      ch->chunk_type == SCTP_DATA ? "DATA" : "I-DATA",
2794
88
                      chk_length);
2795
88
        op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2796
88
        stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_23;
2797
88
        sctp_abort_an_association(inp, stcb, op_err, false, SCTP_SO_NOT_LOCKED);
2798
88
        return (2);
2799
88
      }
2800
#ifdef SCTP_AUDITING_ENABLED
2801
      sctp_audit_log(0xB1, 0);
2802
#endif
2803
14.8k
      if (SCTP_SIZE32(chk_length) == (length - *offset)) {
2804
2.69k
        last_chunk = 1;
2805
12.1k
      } else {
2806
12.1k
        last_chunk = 0;
2807
12.1k
      }
2808
14.8k
      if (sctp_process_a_data_chunk(stcb, asoc, mm, *offset,
2809
14.8k
                  chk_length, net, high_tsn, &abort_flag, &break_flag,
2810
14.8k
                  last_chunk, ch->chunk_type)) {
2811
6.81k
        num_chunks++;
2812
6.81k
      }
2813
14.8k
      if (abort_flag)
2814
435
        return (2);
2815
2816
14.3k
      if (break_flag) {
2817
        /*
2818
         * Set because of out of rwnd space and no
2819
         * drop rep space left.
2820
         */
2821
15
        stop_proc = 1;
2822
15
        continue;
2823
15
      }
2824
14.3k
    } else {
2825
      /* not a data chunk in the data region */
2826
2.20k
      switch (ch->chunk_type) {
2827
7
      case SCTP_INITIATION:
2828
8
      case SCTP_INITIATION_ACK:
2829
10
      case SCTP_SELECTIVE_ACK:
2830
13
      case SCTP_NR_SELECTIVE_ACK:
2831
25
      case SCTP_HEARTBEAT_REQUEST:
2832
26
      case SCTP_HEARTBEAT_ACK:
2833
29
      case SCTP_ABORT_ASSOCIATION:
2834
31
      case SCTP_SHUTDOWN:
2835
34
      case SCTP_SHUTDOWN_ACK:
2836
45
      case SCTP_OPERATION_ERROR:
2837
46
      case SCTP_COOKIE_ECHO:
2838
49
      case SCTP_COOKIE_ACK:
2839
51
      case SCTP_ECN_ECHO:
2840
53
      case SCTP_ECN_CWR:
2841
55
      case SCTP_SHUTDOWN_COMPLETE:
2842
57
      case SCTP_AUTHENTICATION:
2843
60
      case SCTP_ASCONF_ACK:
2844
61
      case SCTP_PACKET_DROPPED:
2845
62
      case SCTP_STREAM_RESET:
2846
65
      case SCTP_FORWARD_CUM_TSN:
2847
66
      case SCTP_ASCONF:
2848
66
      {
2849
        /*
2850
         * Now, what do we do with KNOWN chunks that
2851
         * are NOT in the right place?
2852
         *
2853
         * For now, I do nothing but ignore them. We
2854
         * may later want to add sysctl stuff to
2855
         * switch out and do either an ABORT() or
2856
         * possibly process them.
2857
         */
2858
66
        struct mbuf *op_err;
2859
66
        char msg[SCTP_DIAG_INFO_LEN];
2860
2861
66
        SCTP_SNPRINTF(msg, sizeof(msg), "DATA chunk followed by chunk of type %2.2x",
2862
66
                      ch->chunk_type);
2863
66
        op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2864
66
        sctp_abort_an_association(inp, stcb, op_err, false, SCTP_SO_NOT_LOCKED);
2865
66
        return (2);
2866
65
      }
2867
2.13k
      default:
2868
        /*
2869
         * Unknown chunk type: use bit rules after
2870
         * checking length
2871
         */
2872
2.13k
        if (chk_length < sizeof(struct sctp_chunkhdr)) {
2873
          /*
2874
           * Need to send an abort since we had a
2875
           * invalid chunk.
2876
           */
2877
16
          struct mbuf *op_err;
2878
16
          char msg[SCTP_DIAG_INFO_LEN];
2879
2880
16
          SCTP_SNPRINTF(msg, sizeof(msg), "Chunk of length %u", chk_length);
2881
16
          op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2882
16
          stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_24;
2883
16
          sctp_abort_an_association(inp, stcb, op_err, false, SCTP_SO_NOT_LOCKED);
2884
16
          return (2);
2885
16
        }
2886
2.12k
        if (ch->chunk_type & 0x40) {
2887
          /* Add a error report to the queue */
2888
1.78k
          struct mbuf *op_err;
2889
1.78k
          struct sctp_gen_error_cause *cause;
2890
2891
1.78k
          op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_gen_error_cause),
2892
1.78k
                                         0, M_NOWAIT, 1, MT_DATA);
2893
1.78k
          if (op_err != NULL) {
2894
1.78k
            cause  = mtod(op_err, struct sctp_gen_error_cause *);
2895
1.78k
            cause->code = htons(SCTP_CAUSE_UNRECOG_CHUNK);
2896
1.78k
            cause->length = htons((uint16_t)(chk_length + sizeof(struct sctp_gen_error_cause)));
2897
1.78k
            SCTP_BUF_LEN(op_err) = sizeof(struct sctp_gen_error_cause);
2898
1.78k
            SCTP_BUF_NEXT(op_err) = SCTP_M_COPYM(m, *offset, chk_length, M_NOWAIT);
2899
1.78k
            if (SCTP_BUF_NEXT(op_err) != NULL) {
2900
1.78k
              sctp_queue_op_err(stcb, op_err);
2901
1.78k
            } else {
2902
0
              sctp_m_freem(op_err);
2903
0
            }
2904
1.78k
          }
2905
1.78k
        }
2906
2.12k
        if ((ch->chunk_type & 0x80) == 0) {
2907
          /* discard the rest of this packet */
2908
17
          stop_proc = 1;
2909
17
        }  /* else skip this bad chunk and
2910
           * continue... */
2911
2.12k
        break;
2912
2.20k
      } /* switch of chunk type */
2913
2.20k
    }
2914
16.5k
    *offset += SCTP_SIZE32(chk_length);
2915
16.5k
    if ((*offset >= length) || stop_proc) {
2916
      /* no more data left in the mbuf chain */
2917
4.54k
      stop_proc = 1;
2918
4.54k
      continue;
2919
4.54k
    }
2920
11.9k
    ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
2921
11.9k
                                               sizeof(struct sctp_chunkhdr),
2922
11.9k
                                               (uint8_t *)&chunk_buf);
2923
11.9k
    if (ch == NULL) {
2924
97
      *offset = length;
2925
97
      stop_proc = 1;
2926
97
      continue;
2927
97
    }
2928
11.9k
  }
2929
5.21k
  if (break_flag) {
2930
    /*
2931
     * we need to report rwnd overrun drops.
2932
     */
2933
15
    sctp_send_packet_dropped(stcb, net, *mm, length, iphlen, 0);
2934
15
  }
2935
5.21k
  if (num_chunks) {
2936
    /*
2937
     * Did we get data, if so update the time for auto-close and
2938
     * give peer credit for being alive.
2939
     */
2940
2.01k
    SCTP_STAT_INCR(sctps_recvpktwithdata);
2941
2.01k
    if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
2942
0
      sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
2943
0
               stcb->asoc.overall_error_count,
2944
0
               0,
2945
0
               SCTP_FROM_SCTP_INDATA,
2946
0
               __LINE__);
2947
0
    }
2948
2.01k
    stcb->asoc.overall_error_count = 0;
2949
2.01k
    (void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_last_rcvd);
2950
2.01k
  }
2951
  /* now service all of the reassm queue if needed */
2952
5.21k
  if (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_SENT) {
2953
    /* Assure that we ack right away */
2954
0
    stcb->asoc.send_sack = 1;
2955
0
  }
2956
  /* Start a sack timer or QUEUE a SACK for sending */
2957
5.21k
  sctp_sack_check(stcb, was_a_gap);
2958
5.21k
  return (0);
2959
6.02k
}
2960
2961
static int
2962
sctp_process_segment_range(struct sctp_tcb *stcb, struct sctp_tmit_chunk **p_tp1, uint32_t last_tsn,
2963
         uint16_t frag_strt, uint16_t frag_end, int nr_sacking,
2964
         int *num_frs,
2965
         uint32_t *biggest_newly_acked_tsn,
2966
         uint32_t  *this_sack_lowest_newack,
2967
         int *rto_ok)
2968
0
{
2969
0
  struct sctp_tmit_chunk *tp1;
2970
0
  unsigned int theTSN;
2971
0
  int j, wake_him = 0, circled = 0;
2972
2973
  /* Recover the tp1 we last saw */
2974
0
  tp1 = *p_tp1;
2975
0
  if (tp1 == NULL) {
2976
0
    tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
2977
0
  }
2978
0
  for (j = frag_strt; j <= frag_end; j++) {
2979
0
    theTSN = j + last_tsn;
2980
0
    while (tp1) {
2981
0
      if (tp1->rec.data.doing_fast_retransmit)
2982
0
        (*num_frs) += 1;
2983
2984
      /*-
2985
       * CMT: CUCv2 algorithm. For each TSN being
2986
       * processed from the sent queue, track the
2987
       * next expected pseudo-cumack, or
2988
       * rtx_pseudo_cumack, if required. Separate
2989
       * cumack trackers for first transmissions,
2990
       * and retransmissions.
2991
       */
2992
0
      if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
2993
0
          (tp1->whoTo->find_pseudo_cumack == 1) &&
2994
0
          (tp1->snd_count == 1)) {
2995
0
        tp1->whoTo->pseudo_cumack = tp1->rec.data.tsn;
2996
0
        tp1->whoTo->find_pseudo_cumack = 0;
2997
0
      }
2998
0
      if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
2999
0
          (tp1->whoTo->find_rtx_pseudo_cumack == 1) &&
3000
0
          (tp1->snd_count > 1)) {
3001
0
        tp1->whoTo->rtx_pseudo_cumack = tp1->rec.data.tsn;
3002
0
        tp1->whoTo->find_rtx_pseudo_cumack = 0;
3003
0
      }
3004
0
      if (tp1->rec.data.tsn == theTSN) {
3005
0
        if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
3006
          /*-
3007
           * must be held until
3008
           * cum-ack passes
3009
           */
3010
0
          if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3011
            /*-
3012
             * If it is less than RESEND, it is
3013
             * now no-longer in flight.
3014
             * Higher values may already be set
3015
             * via previous Gap Ack Blocks...
3016
             * i.e. ACKED or RESEND.
3017
             */
3018
0
            if (SCTP_TSN_GT(tp1->rec.data.tsn,
3019
0
                            *biggest_newly_acked_tsn)) {
3020
0
              *biggest_newly_acked_tsn = tp1->rec.data.tsn;
3021
0
            }
3022
            /*-
3023
             * CMT: SFR algo (and HTNA) - set
3024
             * saw_newack to 1 for dest being
3025
             * newly acked. update
3026
             * this_sack_highest_newack if
3027
             * appropriate.
3028
             */
3029
0
            if (tp1->rec.data.chunk_was_revoked == 0)
3030
0
              tp1->whoTo->saw_newack = 1;
3031
3032
0
            if (SCTP_TSN_GT(tp1->rec.data.tsn,
3033
0
                            tp1->whoTo->this_sack_highest_newack)) {
3034
0
              tp1->whoTo->this_sack_highest_newack =
3035
0
                tp1->rec.data.tsn;
3036
0
            }
3037
            /*-
3038
             * CMT DAC algo: also update
3039
             * this_sack_lowest_newack
3040
             */
3041
0
            if (*this_sack_lowest_newack == 0) {
3042
0
              if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3043
0
                sctp_log_sack(*this_sack_lowest_newack,
3044
0
                        last_tsn,
3045
0
                        tp1->rec.data.tsn,
3046
0
                        0,
3047
0
                        0,
3048
0
                        SCTP_LOG_TSN_ACKED);
3049
0
              }
3050
0
              *this_sack_lowest_newack = tp1->rec.data.tsn;
3051
0
            }
3052
            /*-
3053
             * CMT: CUCv2 algorithm. If (rtx-)pseudo-cumack for corresp
3054
             * dest is being acked, then we have a new (rtx-)pseudo-cumack. Set
3055
             * new_(rtx_)pseudo_cumack to TRUE so that the cwnd for this dest can be
3056
             * updated. Also trigger search for the next expected (rtx-)pseudo-cumack.
3057
             * Separate pseudo_cumack trackers for first transmissions and
3058
             * retransmissions.
3059
             */
3060
0
            if (tp1->rec.data.tsn == tp1->whoTo->pseudo_cumack) {
3061
0
              if (tp1->rec.data.chunk_was_revoked == 0) {
3062
0
                tp1->whoTo->new_pseudo_cumack = 1;
3063
0
              }
3064
0
              tp1->whoTo->find_pseudo_cumack = 1;
3065
0
            }
3066
0
            if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
3067
0
              sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.tsn, SCTP_CWND_LOG_FROM_SACK);
3068
0
            }
3069
0
            if (tp1->rec.data.tsn == tp1->whoTo->rtx_pseudo_cumack) {
3070
0
              if (tp1->rec.data.chunk_was_revoked == 0) {
3071
0
                tp1->whoTo->new_pseudo_cumack = 1;
3072
0
              }
3073
0
              tp1->whoTo->find_rtx_pseudo_cumack = 1;
3074
0
            }
3075
0
            if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3076
0
              sctp_log_sack(*biggest_newly_acked_tsn,
3077
0
                      last_tsn,
3078
0
                      tp1->rec.data.tsn,
3079
0
                      frag_strt,
3080
0
                      frag_end,
3081
0
                      SCTP_LOG_TSN_ACKED);
3082
0
            }
3083
0
            if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3084
0
              sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_GAP,
3085
0
                       tp1->whoTo->flight_size,
3086
0
                       tp1->book_size,
3087
0
                       (uint32_t)(uintptr_t)tp1->whoTo,
3088
0
                       tp1->rec.data.tsn);
3089
0
            }
3090
0
            sctp_flight_size_decrease(tp1);
3091
0
            if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3092
0
              (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged)(tp1->whoTo,
3093
0
                                 tp1);
3094
0
            }
3095
0
            sctp_total_flight_decrease(stcb, tp1);
3096
3097
0
            tp1->whoTo->net_ack += tp1->send_size;
3098
0
            if (tp1->snd_count < 2) {
3099
              /*-
3100
               * True non-retransmitted chunk
3101
               */
3102
0
              tp1->whoTo->net_ack2 += tp1->send_size;
3103
3104
              /*-
3105
               * update RTO too ?
3106
               */
3107
0
              if (tp1->do_rtt) {
3108
0
                if (*rto_ok &&
3109
0
                    sctp_calculate_rto(stcb,
3110
0
                                       &stcb->asoc,
3111
0
                                       tp1->whoTo,
3112
0
                                       &tp1->sent_rcv_time,
3113
0
                                       SCTP_RTT_FROM_DATA)) {
3114
0
                  *rto_ok = 0;
3115
0
                }
3116
0
                if (tp1->whoTo->rto_needed == 0) {
3117
0
                  tp1->whoTo->rto_needed = 1;
3118
0
                }
3119
0
                tp1->do_rtt = 0;
3120
0
              }
3121
0
            }
3122
0
          }
3123
0
          if (tp1->sent <= SCTP_DATAGRAM_RESEND) {
3124
0
            if (SCTP_TSN_GT(tp1->rec.data.tsn,
3125
0
                            stcb->asoc.this_sack_highest_gap)) {
3126
0
              stcb->asoc.this_sack_highest_gap =
3127
0
                tp1->rec.data.tsn;
3128
0
            }
3129
0
            if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3130
0
              sctp_ucount_decr(stcb->asoc.sent_queue_retran_cnt);
3131
#ifdef SCTP_AUDITING_ENABLED
3132
              sctp_audit_log(0xB2,
3133
                       (stcb->asoc.sent_queue_retran_cnt & 0x000000ff));
3134
#endif
3135
0
            }
3136
0
          }
3137
          /*-
3138
           * All chunks NOT UNSENT fall through here and are marked
3139
           * (leave PR-SCTP ones that are to skip alone though)
3140
           */
3141
0
          if ((tp1->sent != SCTP_FORWARD_TSN_SKIP) &&
3142
0
              (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) {
3143
0
            tp1->sent = SCTP_DATAGRAM_MARKED;
3144
0
          }
3145
0
          if (tp1->rec.data.chunk_was_revoked) {
3146
            /* deflate the cwnd */
3147
0
            tp1->whoTo->cwnd -= tp1->book_size;
3148
0
            tp1->rec.data.chunk_was_revoked = 0;
3149
0
          }
3150
          /* NR Sack code here */
3151
0
          if (nr_sacking &&
3152
0
              (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) {
3153
0
            if (stcb->asoc.strmout[tp1->rec.data.sid].chunks_on_queues > 0) {
3154
0
              stcb->asoc.strmout[tp1->rec.data.sid].chunks_on_queues--;
3155
0
#ifdef INVARIANTS
3156
0
            } else {
3157
0
              panic("No chunks on the queues for sid %u.", tp1->rec.data.sid);
3158
0
#endif
3159
0
            }
3160
0
            if ((stcb->asoc.strmout[tp1->rec.data.sid].chunks_on_queues == 0) &&
3161
0
                (stcb->asoc.strmout[tp1->rec.data.sid].state == SCTP_STREAM_RESET_PENDING) &&
3162
0
                TAILQ_EMPTY(&stcb->asoc.strmout[tp1->rec.data.sid].outqueue)) {
3163
0
              stcb->asoc.trigger_reset = 1;
3164
0
            }
3165
0
            tp1->sent = SCTP_DATAGRAM_NR_ACKED;
3166
0
            if (tp1->data) {
3167
              /* sa_ignore NO_NULL_CHK */
3168
0
              sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
3169
0
              sctp_m_freem(tp1->data);
3170
0
              tp1->data = NULL;
3171
0
            }
3172
0
            wake_him++;
3173
0
          }
3174
0
        }
3175
0
        break;
3176
0
      } /* if (tp1->tsn == theTSN) */
3177
0
      if (SCTP_TSN_GT(tp1->rec.data.tsn, theTSN)) {
3178
0
        break;
3179
0
      }
3180
0
      tp1 = TAILQ_NEXT(tp1, sctp_next);
3181
0
      if ((tp1 == NULL) && (circled == 0)) {
3182
0
        circled++;
3183
0
        tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
3184
0
      }
3185
0
    } /* end while (tp1) */
3186
0
    if (tp1 == NULL) {
3187
0
      circled = 0;
3188
0
      tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
3189
0
    }
3190
    /* In case the fragments were not in order we must reset */
3191
0
  } /* end for (j = fragStart */
3192
0
  *p_tp1 = tp1;
3193
0
  return (wake_him); /* Return value only used for nr-sack */
3194
0
}
3195
3196
static int
3197
sctp_handle_segments(struct mbuf *m, int *offset, struct sctp_tcb *stcb, struct sctp_association *asoc,
3198
    uint32_t last_tsn, uint32_t *biggest_tsn_acked,
3199
    uint32_t *biggest_newly_acked_tsn, uint32_t *this_sack_lowest_newack,
3200
    int num_seg, int num_nr_seg, int *rto_ok)
3201
0
{
3202
0
  struct sctp_gap_ack_block *frag, block;
3203
0
  struct sctp_tmit_chunk *tp1;
3204
0
  int i;
3205
0
  int num_frs = 0;
3206
0
  int chunk_freed;
3207
0
  int non_revocable;
3208
0
  uint16_t frag_strt, frag_end, prev_frag_end;
3209
3210
0
  tp1 = TAILQ_FIRST(&asoc->sent_queue);
3211
0
  prev_frag_end = 0;
3212
0
  chunk_freed = 0;
3213
3214
0
  for (i = 0; i < (num_seg + num_nr_seg); i++) {
3215
0
    if (i == num_seg) {
3216
0
      prev_frag_end = 0;
3217
0
      tp1 = TAILQ_FIRST(&asoc->sent_queue);
3218
0
    }
3219
0
    frag = (struct sctp_gap_ack_block *)sctp_m_getptr(m, *offset,
3220
0
                                                      sizeof(struct sctp_gap_ack_block), (uint8_t *) &block);
3221
0
    *offset += sizeof(block);
3222
0
    if (frag == NULL) {
3223
0
      return (chunk_freed);
3224
0
    }
3225
0
    frag_strt = ntohs(frag->start);
3226
0
    frag_end = ntohs(frag->end);
3227
3228
0
    if (frag_strt > frag_end) {
3229
      /* This gap report is malformed, skip it. */
3230
0
      continue;
3231
0
    }
3232
0
    if (frag_strt <= prev_frag_end) {
3233
      /* This gap report is not in order, so restart. */
3234
0
       tp1 = TAILQ_FIRST(&asoc->sent_queue);
3235
0
    }
3236
0
    if (SCTP_TSN_GT((last_tsn + frag_end), *biggest_tsn_acked)) {
3237
0
      *biggest_tsn_acked = last_tsn + frag_end;
3238
0
    }
3239
0
    if (i < num_seg) {
3240
0
      non_revocable = 0;
3241
0
    } else {
3242
0
      non_revocable = 1;
3243
0
    }
3244
0
    if (sctp_process_segment_range(stcb, &tp1, last_tsn, frag_strt, frag_end,
3245
0
                                   non_revocable, &num_frs, biggest_newly_acked_tsn,
3246
0
                                   this_sack_lowest_newack, rto_ok)) {
3247
0
      chunk_freed = 1;
3248
0
    }
3249
0
    prev_frag_end = frag_end;
3250
0
  }
3251
0
  if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3252
0
    if (num_frs)
3253
0
      sctp_log_fr(*biggest_tsn_acked,
3254
0
                  *biggest_newly_acked_tsn,
3255
0
                  last_tsn, SCTP_FR_LOG_BIGGEST_TSNS);
3256
0
  }
3257
0
  return (chunk_freed);
3258
0
}
3259
3260
static void
3261
sctp_check_for_revoked(struct sctp_tcb *stcb,
3262
           struct sctp_association *asoc, uint32_t cumack,
3263
           uint32_t biggest_tsn_acked)
3264
0
{
3265
0
  struct sctp_tmit_chunk *tp1;
3266
3267
0
  TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
3268
0
    if (SCTP_TSN_GT(tp1->rec.data.tsn, cumack)) {
3269
      /*
3270
       * ok this guy is either ACK or MARKED. If it is
3271
       * ACKED it has been previously acked but not this
3272
       * time i.e. revoked.  If it is MARKED it was ACK'ed
3273
       * again.
3274
       */
3275
0
      if (SCTP_TSN_GT(tp1->rec.data.tsn, biggest_tsn_acked)) {
3276
0
        break;
3277
0
      }
3278
0
      if (tp1->sent == SCTP_DATAGRAM_ACKED) {
3279
        /* it has been revoked */
3280
0
        tp1->sent = SCTP_DATAGRAM_SENT;
3281
0
        tp1->rec.data.chunk_was_revoked = 1;
3282
        /* We must add this stuff back in to
3283
         * assure timers and such get started.
3284
         */
3285
0
        if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3286
0
          sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
3287
0
                   tp1->whoTo->flight_size,
3288
0
                   tp1->book_size,
3289
0
                   (uint32_t)(uintptr_t)tp1->whoTo,
3290
0
                   tp1->rec.data.tsn);
3291
0
        }
3292
0
        sctp_flight_size_increase(tp1);
3293
0
        sctp_total_flight_increase(stcb, tp1);
3294
        /* We inflate the cwnd to compensate for our
3295
         * artificial inflation of the flight_size.
3296
         */
3297
0
        tp1->whoTo->cwnd += tp1->book_size;
3298
0
        if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3299
0
          sctp_log_sack(asoc->last_acked_seq,
3300
0
                  cumack,
3301
0
                  tp1->rec.data.tsn,
3302
0
                  0,
3303
0
                  0,
3304
0
                  SCTP_LOG_TSN_REVOKED);
3305
0
        }
3306
0
      } else if (tp1->sent == SCTP_DATAGRAM_MARKED) {
3307
        /* it has been re-acked in this SACK */
3308
0
        tp1->sent = SCTP_DATAGRAM_ACKED;
3309
0
      }
3310
0
    }
3311
0
    if (tp1->sent == SCTP_DATAGRAM_UNSENT)
3312
0
      break;
3313
0
  }
3314
0
}
3315
3316
static void
3317
sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc,
3318
         uint32_t biggest_tsn_acked, uint32_t biggest_tsn_newly_acked, uint32_t this_sack_lowest_newack, int accum_moved)
3319
0
{
3320
0
  struct sctp_tmit_chunk *tp1;
3321
0
  int strike_flag = 0;
3322
0
  struct timeval now;
3323
0
  uint32_t sending_seq;
3324
0
  struct sctp_nets *net;
3325
0
  int num_dests_sacked = 0;
3326
3327
  /*
3328
   * select the sending_seq, this is either the next thing ready to be
3329
   * sent but not transmitted, OR, the next seq we assign.
3330
   */
3331
0
  tp1 = TAILQ_FIRST(&stcb->asoc.send_queue);
3332
0
  if (tp1 == NULL) {
3333
0
    sending_seq = asoc->sending_seq;
3334
0
  } else {
3335
0
    sending_seq = tp1->rec.data.tsn;
3336
0
  }
3337
3338
  /* CMT DAC algo: finding out if SACK is a mixed SACK */
3339
0
  if ((asoc->sctp_cmt_on_off > 0) &&
3340
0
      SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3341
0
    TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3342
0
      if (net->saw_newack)
3343
0
        num_dests_sacked++;
3344
0
    }
3345
0
  }
3346
0
  if (stcb->asoc.prsctp_supported) {
3347
0
    (void)SCTP_GETTIME_TIMEVAL(&now);
3348
0
  }
3349
0
  TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
3350
0
    strike_flag = 0;
3351
0
    if (tp1->no_fr_allowed) {
3352
      /* this one had a timeout or something */
3353
0
      continue;
3354
0
    }
3355
0
    if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3356
0
      if (tp1->sent < SCTP_DATAGRAM_RESEND)
3357
0
        sctp_log_fr(biggest_tsn_newly_acked,
3358
0
              tp1->rec.data.tsn,
3359
0
              tp1->sent,
3360
0
              SCTP_FR_LOG_CHECK_STRIKE);
3361
0
    }
3362
0
    if (SCTP_TSN_GT(tp1->rec.data.tsn, biggest_tsn_acked) ||
3363
0
        tp1->sent == SCTP_DATAGRAM_UNSENT) {
3364
      /* done */
3365
0
      break;
3366
0
    }
3367
0
    if (stcb->asoc.prsctp_supported) {
3368
0
      if ((PR_SCTP_TTL_ENABLED(tp1->flags)) && tp1->sent < SCTP_DATAGRAM_ACKED) {
3369
        /* Is it expired? */
3370
0
#if !(defined(__FreeBSD__) && !defined(__Userspace__))
3371
0
        if (timercmp(&now, &tp1->rec.data.timetodrop, >)) {
3372
#else
3373
        if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
3374
#endif
3375
          /* Yes so drop it */
3376
0
          if (tp1->data != NULL) {
3377
0
            (void)sctp_release_pr_sctp_chunk(stcb, tp1, 1,
3378
0
                     SCTP_SO_NOT_LOCKED);
3379
0
          }
3380
0
          continue;
3381
0
        }
3382
0
      }
3383
0
    }
3384
0
    if (SCTP_TSN_GT(tp1->rec.data.tsn, asoc->this_sack_highest_gap) &&
3385
0
                    !(accum_moved && asoc->fast_retran_loss_recovery)) {
3386
      /* we are beyond the tsn in the sack  */
3387
0
      break;
3388
0
    }
3389
0
    if (tp1->sent >= SCTP_DATAGRAM_RESEND) {
3390
      /* either a RESEND, ACKED, or MARKED */
3391
      /* skip */
3392
0
      if (tp1->sent == SCTP_FORWARD_TSN_SKIP) {
3393
        /* Continue strikin FWD-TSN chunks */
3394
0
        tp1->rec.data.fwd_tsn_cnt++;
3395
0
      }
3396
0
      continue;
3397
0
    }
3398
    /*
3399
     * CMT : SFR algo (covers part of DAC and HTNA as well)
3400
     */
3401
0
    if (tp1->whoTo && tp1->whoTo->saw_newack == 0) {
3402
      /*
3403
       * No new acks were received for data sent to this
3404
       * dest. Therefore, according to the SFR algo for
3405
       * CMT, no data sent to this dest can be marked for
3406
       * FR using this SACK.
3407
       */
3408
0
      continue;
3409
0
    } else if (tp1->whoTo &&
3410
0
               SCTP_TSN_GT(tp1->rec.data.tsn,
3411
0
                           tp1->whoTo->this_sack_highest_newack) &&
3412
0
               !(accum_moved && asoc->fast_retran_loss_recovery)) {
3413
      /*
3414
       * CMT: New acks were received for data sent to
3415
       * this dest. But no new acks were seen for data
3416
       * sent after tp1. Therefore, according to the SFR
3417
       * algo for CMT, tp1 cannot be marked for FR using
3418
       * this SACK. This step covers part of the DAC algo
3419
       * and the HTNA algo as well.
3420
       */
3421
0
      continue;
3422
0
    }
3423
    /*
3424
     * Here we check to see if we were have already done a FR
3425
     * and if so we see if the biggest TSN we saw in the sack is
3426
     * smaller than the recovery point. If so we don't strike
3427
     * the tsn... otherwise we CAN strike the TSN.
3428
     */
3429
    /*
3430
     * @@@ JRI: Check for CMT
3431
     * if (accum_moved && asoc->fast_retran_loss_recovery && (sctp_cmt_on_off == 0)) {
3432
     */
3433
0
    if (accum_moved && asoc->fast_retran_loss_recovery) {
3434
      /*
3435
       * Strike the TSN if in fast-recovery and cum-ack
3436
       * moved.
3437
       */
3438
0
      if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3439
0
        sctp_log_fr(biggest_tsn_newly_acked,
3440
0
              tp1->rec.data.tsn,
3441
0
              tp1->sent,
3442
0
              SCTP_FR_LOG_STRIKE_CHUNK);
3443
0
      }
3444
0
      if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3445
0
        tp1->sent++;
3446
0
      }
3447
0
      if ((asoc->sctp_cmt_on_off > 0) &&
3448
0
          SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3449
        /*
3450
         * CMT DAC algorithm: If SACK flag is set to
3451
         * 0, then lowest_newack test will not pass
3452
         * because it would have been set to the
3453
         * cumack earlier. If not already to be
3454
         * rtx'd, If not a mixed sack and if tp1 is
3455
         * not between two sacked TSNs, then mark by
3456
         * one more.
3457
         * NOTE that we are marking by one additional time since the SACK DAC flag indicates that
3458
         * two packets have been received after this missing TSN.
3459
         */
3460
0
        if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3461
0
            SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.tsn)) {
3462
0
          if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3463
0
            sctp_log_fr(16 + num_dests_sacked,
3464
0
                  tp1->rec.data.tsn,
3465
0
                  tp1->sent,
3466
0
                  SCTP_FR_LOG_STRIKE_CHUNK);
3467
0
          }
3468
0
          tp1->sent++;
3469
0
        }
3470
0
      }
3471
0
    } else if ((tp1->rec.data.doing_fast_retransmit) &&
3472
0
               (asoc->sctp_cmt_on_off == 0)) {
3473
      /*
3474
       * For those that have done a FR we must take
3475
       * special consideration if we strike. I.e the
3476
       * biggest_newly_acked must be higher than the
3477
       * sending_seq at the time we did the FR.
3478
       */
3479
0
      if (
3480
#ifdef SCTP_FR_TO_ALTERNATE
3481
        /*
3482
         * If FR's go to new networks, then we must only do
3483
         * this for singly homed asoc's. However if the FR's
3484
         * go to the same network (Armando's work) then its
3485
         * ok to FR multiple times.
3486
         */
3487
        (asoc->numnets < 2)
3488
#else
3489
0
        (1)
3490
0
#endif
3491
0
        ) {
3492
0
        if (SCTP_TSN_GE(biggest_tsn_newly_acked,
3493
0
                        tp1->rec.data.fast_retran_tsn)) {
3494
          /*
3495
           * Strike the TSN, since this ack is
3496
           * beyond where things were when we
3497
           * did a FR.
3498
           */
3499
0
          if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3500
0
            sctp_log_fr(biggest_tsn_newly_acked,
3501
0
                  tp1->rec.data.tsn,
3502
0
                  tp1->sent,
3503
0
                  SCTP_FR_LOG_STRIKE_CHUNK);
3504
0
          }
3505
0
          if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3506
0
            tp1->sent++;
3507
0
          }
3508
0
          strike_flag = 1;
3509
0
          if ((asoc->sctp_cmt_on_off > 0) &&
3510
0
              SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3511
            /*
3512
             * CMT DAC algorithm: If
3513
             * SACK flag is set to 0,
3514
             * then lowest_newack test
3515
             * will not pass because it
3516
             * would have been set to
3517
             * the cumack earlier. If
3518
             * not already to be rtx'd,
3519
             * If not a mixed sack and
3520
             * if tp1 is not between two
3521
             * sacked TSNs, then mark by
3522
             * one more.
3523
             * NOTE that we are marking by one additional time since the SACK DAC flag indicates that
3524
             * two packets have been received after this missing TSN.
3525
             */
3526
0
            if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
3527
0
                (num_dests_sacked == 1) &&
3528
0
                SCTP_TSN_GT(this_sack_lowest_newack,
3529
0
                            tp1->rec.data.tsn)) {
3530
0
              if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3531
0
                sctp_log_fr(32 + num_dests_sacked,
3532
0
                      tp1->rec.data.tsn,
3533
0
                      tp1->sent,
3534
0
                      SCTP_FR_LOG_STRIKE_CHUNK);
3535
0
              }
3536
0
              if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3537
0
                tp1->sent++;
3538
0
              }
3539
0
            }
3540
0
          }
3541
0
        }
3542
0
      }
3543
      /*
3544
       * JRI: TODO: remove code for HTNA algo. CMT's
3545
       * SFR algo covers HTNA.
3546
       */
3547
0
    } else if (SCTP_TSN_GT(tp1->rec.data.tsn,
3548
0
                           biggest_tsn_newly_acked)) {
3549
      /*
3550
       * We don't strike these: This is the  HTNA
3551
       * algorithm i.e. we don't strike If our TSN is
3552
       * larger than the Highest TSN Newly Acked.
3553
       */
3554
0
      ;
3555
0
    } else {
3556
      /* Strike the TSN */
3557
0
      if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3558
0
        sctp_log_fr(biggest_tsn_newly_acked,
3559
0
              tp1->rec.data.tsn,
3560
0
              tp1->sent,
3561
0
              SCTP_FR_LOG_STRIKE_CHUNK);
3562
0
      }
3563
0
      if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3564
0
        tp1->sent++;
3565
0
      }
3566
0
      if ((asoc->sctp_cmt_on_off > 0) &&
3567
0
          SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3568
        /*
3569
         * CMT DAC algorithm: If SACK flag is set to
3570
         * 0, then lowest_newack test will not pass
3571
         * because it would have been set to the
3572
         * cumack earlier. If not already to be
3573
         * rtx'd, If not a mixed sack and if tp1 is
3574
         * not between two sacked TSNs, then mark by
3575
         * one more.
3576
         * NOTE that we are marking by one additional time since the SACK DAC flag indicates that
3577
         * two packets have been received after this missing TSN.
3578
         */
3579
0
        if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3580
0
            SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.tsn)) {
3581
0
          if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3582
0
            sctp_log_fr(48 + num_dests_sacked,
3583
0
                  tp1->rec.data.tsn,
3584
0
                  tp1->sent,
3585
0
                  SCTP_FR_LOG_STRIKE_CHUNK);
3586
0
          }
3587
0
          tp1->sent++;
3588
0
        }
3589
0
      }
3590
0
    }
3591
0
    if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3592
0
      struct sctp_nets *alt;
3593
3594
      /* fix counts and things */
3595
0
      if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3596
0
        sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_RSND,
3597
0
                 (tp1->whoTo ? (tp1->whoTo->flight_size) : 0),
3598
0
                 tp1->book_size,
3599
0
                 (uint32_t)(uintptr_t)tp1->whoTo,
3600
0
                 tp1->rec.data.tsn);
3601
0
      }
3602
0
      if (tp1->whoTo) {
3603
0
        tp1->whoTo->net_ack++;
3604
0
        sctp_flight_size_decrease(tp1);
3605
0
        if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3606
0
          (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged)(tp1->whoTo,
3607
0
                             tp1);
3608
0
        }
3609
0
      }
3610
3611
0
      if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
3612
0
        sctp_log_rwnd(SCTP_INCREASE_PEER_RWND,
3613
0
                asoc->peers_rwnd, tp1->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3614
0
      }
3615
      /* add back to the rwnd */
3616
0
      asoc->peers_rwnd += (tp1->send_size + SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3617
3618
      /* remove from the total flight */
3619
0
      sctp_total_flight_decrease(stcb, tp1);
3620
3621
0
      if ((stcb->asoc.prsctp_supported) &&
3622
0
          (PR_SCTP_RTX_ENABLED(tp1->flags))) {
3623
        /* Has it been retransmitted tv_sec times? - we store the retran count there. */
3624
0
        if (tp1->snd_count > tp1->rec.data.timetodrop.tv_sec) {
3625
          /* Yes, so drop it */
3626
0
          if (tp1->data != NULL) {
3627
0
            (void)sctp_release_pr_sctp_chunk(stcb, tp1, 1,
3628
0
                     SCTP_SO_NOT_LOCKED);
3629
0
          }
3630
          /* Make sure to flag we had a FR */
3631
0
          if (tp1->whoTo != NULL) {
3632
0
            tp1->whoTo->net_ack++;
3633
0
          }
3634
0
          continue;
3635
0
        }
3636
0
      }
3637
      /* SCTP_PRINTF("OK, we are now ready to FR this guy\n"); */
3638
0
      if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3639
0
        sctp_log_fr(tp1->rec.data.tsn, tp1->snd_count,
3640
0
              0, SCTP_FR_MARKED);
3641
0
      }
3642
0
      if (strike_flag) {
3643
        /* This is a subsequent FR */
3644
0
        SCTP_STAT_INCR(sctps_sendmultfastretrans);
3645
0
      }
3646
0
      sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
3647
0
      if (asoc->sctp_cmt_on_off > 0) {
3648
        /*
3649
         * CMT: Using RTX_SSTHRESH policy for CMT.
3650
         * If CMT is being used, then pick dest with
3651
         * largest ssthresh for any retransmission.
3652
         */
3653
0
        tp1->no_fr_allowed = 1;
3654
0
        alt = tp1->whoTo;
3655
        /*sa_ignore NO_NULL_CHK*/
3656
0
        if (asoc->sctp_cmt_pf > 0) {
3657
          /* JRS 5/18/07 - If CMT PF is on, use the PF version of find_alt_net() */
3658
0
          alt = sctp_find_alternate_net(stcb, alt, 2);
3659
0
        } else {
3660
          /* JRS 5/18/07 - If only CMT is on, use the CMT version of find_alt_net() */
3661
                                        /*sa_ignore NO_NULL_CHK*/
3662
0
          alt = sctp_find_alternate_net(stcb, alt, 1);
3663
0
        }
3664
0
        if (alt == NULL) {
3665
0
          alt = tp1->whoTo;
3666
0
        }
3667
        /*
3668
         * CUCv2: If a different dest is picked for
3669
         * the retransmission, then new
3670
         * (rtx-)pseudo_cumack needs to be tracked
3671
         * for orig dest. Let CUCv2 track new (rtx-)
3672
         * pseudo-cumack always.
3673
         */
3674
0
        if (tp1->whoTo) {
3675
0
          tp1->whoTo->find_pseudo_cumack = 1;
3676
0
          tp1->whoTo->find_rtx_pseudo_cumack = 1;
3677
0
        }
3678
0
      } else {/* CMT is OFF */
3679
#ifdef SCTP_FR_TO_ALTERNATE
3680
        /* Can we find an alternate? */
3681
        alt = sctp_find_alternate_net(stcb, tp1->whoTo, 0);
3682
#else
3683
        /*
3684
         * default behavior is to NOT retransmit
3685
         * FR's to an alternate. Armando Caro's
3686
         * paper details why.
3687
         */
3688
0
        alt = tp1->whoTo;
3689
0
#endif
3690
0
      }
3691
3692
0
      tp1->rec.data.doing_fast_retransmit = 1;
3693
      /* mark the sending seq for possible subsequent FR's */
3694
      /*
3695
       * SCTP_PRINTF("Marking TSN for FR new value %x\n",
3696
       * (uint32_t)tpi->rec.data.tsn);
3697
       */
3698
0
      if (TAILQ_EMPTY(&asoc->send_queue)) {
3699
        /*
3700
         * If the queue of send is empty then its
3701
         * the next sequence number that will be
3702
         * assigned so we subtract one from this to
3703
         * get the one we last sent.
3704
         */
3705
0
        tp1->rec.data.fast_retran_tsn = sending_seq;
3706
0
      } else {
3707
        /*
3708
         * If there are chunks on the send queue
3709
         * (unsent data that has made it from the
3710
         * stream queues but not out the door, we
3711
         * take the first one (which will have the
3712
         * lowest TSN) and subtract one to get the
3713
         * one we last sent.
3714
         */
3715
0
        struct sctp_tmit_chunk *ttt;
3716
3717
0
        ttt = TAILQ_FIRST(&asoc->send_queue);
3718
0
        tp1->rec.data.fast_retran_tsn =
3719
0
          ttt->rec.data.tsn;
3720
0
      }
3721
3722
0
      if (tp1->do_rtt) {
3723
        /*
3724
         * this guy had a RTO calculation pending on
3725
         * it, cancel it
3726
         */
3727
0
        if ((tp1->whoTo != NULL) &&
3728
0
            (tp1->whoTo->rto_needed == 0)) {
3729
0
          tp1->whoTo->rto_needed = 1;
3730
0
        }
3731
0
        tp1->do_rtt = 0;
3732
0
      }
3733
0
      if (alt != tp1->whoTo) {
3734
        /* yes, there is an alternate. */
3735
0
        sctp_free_remote_addr(tp1->whoTo);
3736
        /*sa_ignore FREED_MEMORY*/
3737
0
        tp1->whoTo = alt;
3738
0
        atomic_add_int(&alt->ref_count, 1);
3739
0
      }
3740
0
    }
3741
0
  }
3742
0
}
3743
3744
struct sctp_tmit_chunk *
3745
sctp_try_advance_peer_ack_point(struct sctp_tcb *stcb,
3746
    struct sctp_association *asoc)
3747
0
{
3748
0
  struct sctp_tmit_chunk *tp1, *tp2, *a_adv = NULL;
3749
0
  struct timeval now;
3750
0
  int now_filled = 0;
3751
3752
0
  if (asoc->prsctp_supported == 0) {
3753
0
    return (NULL);
3754
0
  }
3755
0
  TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
3756
0
    if (tp1->sent != SCTP_FORWARD_TSN_SKIP &&
3757
0
        tp1->sent != SCTP_DATAGRAM_RESEND &&
3758
0
        tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
3759
      /* no chance to advance, out of here */
3760
0
      break;
3761
0
    }
3762
0
    if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
3763
0
      if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) ||
3764
0
          (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) {
3765
0
        sctp_misc_ints(SCTP_FWD_TSN_CHECK,
3766
0
                 asoc->advanced_peer_ack_point,
3767
0
                 tp1->rec.data.tsn, 0, 0);
3768
0
      }
3769
0
    }
3770
0
    if (!PR_SCTP_ENABLED(tp1->flags)) {
3771
      /*
3772
       * We can't fwd-tsn past any that are reliable aka
3773
       * retransmitted until the asoc fails.
3774
       */
3775
0
      break;
3776
0
    }
3777
0
    if (!now_filled) {
3778
0
      (void)SCTP_GETTIME_TIMEVAL(&now);
3779
0
      now_filled = 1;
3780
0
    }
3781
    /*
3782
     * now we got a chunk which is marked for another
3783
     * retransmission to a PR-stream but has run out its chances
3784
     * already maybe OR has been marked to skip now. Can we skip
3785
     * it if its a resend?
3786
     */
3787
0
    if (tp1->sent == SCTP_DATAGRAM_RESEND &&
3788
0
        (PR_SCTP_TTL_ENABLED(tp1->flags))) {
3789
      /*
3790
       * Now is this one marked for resend and its time is
3791
       * now up?
3792
       */
3793
0
#if !(defined(__FreeBSD__) && !defined(__Userspace__))
3794
0
      if (timercmp(&now, &tp1->rec.data.timetodrop, >)) {
3795
#else
3796
      if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
3797
#endif
3798
        /* Yes so drop it */
3799
0
        if (tp1->data) {
3800
0
          (void)sctp_release_pr_sctp_chunk(stcb, tp1,
3801
0
              1, SCTP_SO_NOT_LOCKED);
3802
0
        }
3803
0
      } else {
3804
        /*
3805
         * No, we are done when hit one for resend
3806
         * whos time as not expired.
3807
         */
3808
0
        break;
3809
0
      }
3810
0
    }
3811
    /*
3812
     * Ok now if this chunk is marked to drop it we can clean up
3813
     * the chunk, advance our peer ack point and we can check
3814
     * the next chunk.
3815
     */
3816
0
    if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) ||
3817
0
        (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) {
3818
      /* advance PeerAckPoint goes forward */
3819
0
      if (SCTP_TSN_GT(tp1->rec.data.tsn, asoc->advanced_peer_ack_point)) {
3820
0
        asoc->advanced_peer_ack_point = tp1->rec.data.tsn;
3821
0
        a_adv = tp1;
3822
0
      } else if (tp1->rec.data.tsn == asoc->advanced_peer_ack_point) {
3823
        /* No update but we do save the chk */
3824
0
        a_adv = tp1;
3825
0
      }
3826
0
    } else {
3827
      /*
3828
       * If it is still in RESEND we can advance no
3829
       * further
3830
       */
3831
0
      break;
3832
0
    }
3833
0
  }
3834
0
  return (a_adv);
3835
0
}
3836
3837
static int
3838
sctp_fs_audit(struct sctp_association *asoc)
3839
0
{
3840
0
  struct sctp_tmit_chunk *chk;
3841
0
  int inflight = 0, resend = 0, inbetween = 0, acked = 0, above = 0;
3842
0
  int ret;
3843
#ifndef INVARIANTS
3844
  int entry_flight, entry_cnt;
3845
#endif
3846
3847
0
  ret = 0;
3848
#ifndef INVARIANTS
3849
  entry_flight = asoc->total_flight;
3850
  entry_cnt = asoc->total_flight_count;
3851
#endif
3852
0
  if (asoc->pr_sctp_cnt >= asoc->sent_queue_cnt)
3853
0
    return (0);
3854
3855
0
  TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
3856
0
    if (chk->sent < SCTP_DATAGRAM_RESEND) {
3857
0
      SCTP_PRINTF("Chk TSN: %u size: %d inflight cnt: %d\n",
3858
0
                  chk->rec.data.tsn,
3859
0
                  chk->send_size,
3860
0
                  chk->snd_count);
3861
0
      inflight++;
3862
0
    } else if (chk->sent == SCTP_DATAGRAM_RESEND) {
3863
0
      resend++;
3864
0
    } else if (chk->sent < SCTP_DATAGRAM_ACKED) {
3865
0
      inbetween++;
3866
0
    } else if (chk->sent > SCTP_DATAGRAM_ACKED) {
3867
0
      above++;
3868
0
    } else {
3869
0
      acked++;
3870
0
    }
3871
0
  }
3872
3873
0
  if ((inflight > 0) || (inbetween > 0)) {
3874
0
#ifdef INVARIANTS
3875
0
    panic("Flight size-express incorrect F: %d I: %d R: %d Ab: %d ACK: %d",
3876
0
          inflight, inbetween, resend, above, acked);
3877
#else
3878
    SCTP_PRINTF("asoc->total_flight: %d cnt: %d\n",
3879
                entry_flight, entry_cnt);
3880
    SCTP_PRINTF("Flight size-express incorrect F: %d I: %d R: %d Ab: %d ACK: %d\n",
3881
                inflight, inbetween, resend, above, acked);
3882
    ret = 1;
3883
#endif
3884
0
  }
3885
0
  return (ret);
3886
0
}
3887
3888
static void
3889
sctp_window_probe_recovery(struct sctp_tcb *stcb,
3890
                           struct sctp_association *asoc,
3891
                           struct sctp_tmit_chunk *tp1)
3892
0
{
3893
0
  tp1->window_probe = 0;
3894
0
  if ((tp1->sent >= SCTP_DATAGRAM_ACKED) || (tp1->data == NULL)) {
3895
    /* TSN's skipped we do NOT move back. */
3896
0
    sctp_misc_ints(SCTP_FLIGHT_LOG_DWN_WP_FWD,
3897
0
             tp1->whoTo ? tp1->whoTo->flight_size : 0,
3898
0
             tp1->book_size,
3899
0
             (uint32_t)(uintptr_t)tp1->whoTo,
3900
0
             tp1->rec.data.tsn);
3901
0
    return;
3902
0
  }
3903
  /* First setup this by shrinking flight */
3904
0
  if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3905
0
    (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged)(tp1->whoTo,
3906
0
                       tp1);
3907
0
  }
3908
0
  sctp_flight_size_decrease(tp1);
3909
0
  sctp_total_flight_decrease(stcb, tp1);
3910
  /* Now mark for resend */
3911
0
  tp1->sent = SCTP_DATAGRAM_RESEND;
3912
0
  sctp_ucount_incr(asoc->sent_queue_retran_cnt);
3913
3914
0
  if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3915
0
    sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_WP,
3916
0
             tp1->whoTo->flight_size,
3917
0
             tp1->book_size,
3918
0
             (uint32_t)(uintptr_t)tp1->whoTo,
3919
0
             tp1->rec.data.tsn);
3920
0
  }
3921
0
}
3922
3923
void
3924
sctp_express_handle_sack(struct sctp_tcb *stcb, uint32_t cumack,
3925
                         uint32_t rwnd, int *abort_now, int ecne_seen)
3926
613
{
3927
613
  struct sctp_nets *net;
3928
613
  struct sctp_association *asoc;
3929
613
  struct sctp_tmit_chunk *tp1, *tp2;
3930
613
  uint32_t old_rwnd;
3931
613
  int win_probe_recovery = 0;
3932
613
  int win_probe_recovered = 0;
3933
613
  int j, done_once = 0;
3934
613
  int rto_ok = 1;
3935
613
  uint32_t send_s;
3936
3937
613
  if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
3938
0
    sctp_misc_ints(SCTP_SACK_LOG_EXPRESS, cumack,
3939
0
                   rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
3940
0
  }
3941
613
  SCTP_TCB_LOCK_ASSERT(stcb);
3942
#ifdef SCTP_ASOCLOG_OF_TSNS
3943
  stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cumack;
3944
  stcb->asoc.cumack_log_at++;
3945
  if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
3946
    stcb->asoc.cumack_log_at = 0;
3947
  }
3948
#endif
3949
613
  asoc = &stcb->asoc;
3950
613
  old_rwnd = asoc->peers_rwnd;
3951
613
  if (SCTP_TSN_GT(asoc->last_acked_seq, cumack)) {
3952
    /* old ack */
3953
458
    return;
3954
458
  } else if (asoc->last_acked_seq == cumack) {
3955
    /* Window update sack */
3956
0
    asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
3957
0
                (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
3958
0
    if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
3959
      /* SWS sender side engages */
3960
0
      asoc->peers_rwnd = 0;
3961
0
    }
3962
0
    if (asoc->peers_rwnd > old_rwnd) {
3963
0
      goto again;
3964
0
    }
3965
0
    return;
3966
0
  }
3967
3968
  /* First setup for CC stuff */
3969
408
  TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3970
408
    if (SCTP_TSN_GT(cumack, net->cwr_window_tsn)) {
3971
      /* Drag along the window_tsn for cwr's */
3972
408
      net->cwr_window_tsn = cumack;
3973
408
    }
3974
408
    net->prev_cwnd = net->cwnd;
3975
408
    net->net_ack = 0;
3976
408
    net->net_ack2 = 0;
3977
3978
    /*
3979
     * CMT: Reset CUC and Fast recovery algo variables before
3980
     * SACK processing
3981
     */
3982
408
    net->new_pseudo_cumack = 0;
3983
408
    net->will_exit_fast_recovery = 0;
3984
408
    if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) {
3985
0
      (*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack)(stcb, net);
3986
0
    }
3987
408
  }
3988
155
  if (!TAILQ_EMPTY(&asoc->sent_queue)) {
3989
71
    tp1 = TAILQ_LAST(&asoc->sent_queue,
3990
71
         sctpchunk_listhead);
3991
71
    send_s = tp1->rec.data.tsn + 1;
3992
84
  } else {
3993
84
    send_s = asoc->sending_seq;
3994
84
  }
3995
155
  if (SCTP_TSN_GE(cumack, send_s)) {
3996
155
    struct mbuf *op_err;
3997
155
    char msg[SCTP_DIAG_INFO_LEN];
3998
3999
155
    *abort_now = 1;
4000
    /* XXX */
4001
155
    SCTP_SNPRINTF(msg, sizeof(msg),
4002
155
                  "Cum ack %8.8x greater or equal than TSN %8.8x",
4003
155
                  cumack, send_s);
4004
155
    op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
4005
155
    stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25;
4006
155
    sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, false, SCTP_SO_NOT_LOCKED);
4007
155
    return;
4008
155
  }
4009
0
  asoc->this_sack_highest_gap = cumack;
4010
0
  if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
4011
0
    sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
4012
0
             stcb->asoc.overall_error_count,
4013
0
             0,
4014
0
             SCTP_FROM_SCTP_INDATA,
4015
0
             __LINE__);
4016
0
  }
4017
0
  stcb->asoc.overall_error_count = 0;
4018
0
  if (SCTP_TSN_GT(cumack, asoc->last_acked_seq)) {
4019
    /* process the new consecutive TSN first */
4020
0
    TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
4021
0
      if (SCTP_TSN_GE(cumack, tp1->rec.data.tsn)) {
4022
0
        if (tp1->sent == SCTP_DATAGRAM_UNSENT) {
4023
0
          SCTP_PRINTF("Warning, an unsent is now acked?\n");
4024
0
        }
4025
0
        if (tp1->sent < SCTP_DATAGRAM_ACKED) {
4026
          /*
4027
           * If it is less than ACKED, it is
4028
           * now no-longer in flight. Higher
4029
           * values may occur during marking
4030
           */
4031
0
          if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4032
0
            if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4033
0
              sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
4034
0
                       tp1->whoTo->flight_size,
4035
0
                       tp1->book_size,
4036
0
                       (uint32_t)(uintptr_t)tp1->whoTo,
4037
0
                       tp1->rec.data.tsn);
4038
0
            }
4039
0
            sctp_flight_size_decrease(tp1);
4040
0
            if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
4041
0
              (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged)(tp1->whoTo,
4042
0
                                 tp1);
4043
0
            }
4044
            /* sa_ignore NO_NULL_CHK */
4045
0
            sctp_total_flight_decrease(stcb, tp1);
4046
0
          }
4047
0
          tp1->whoTo->net_ack += tp1->send_size;
4048
0
          if (tp1->snd_count < 2) {
4049
            /*
4050
             * True non-retransmitted
4051
             * chunk
4052
             */
4053
0
            tp1->whoTo->net_ack2 +=
4054
0
              tp1->send_size;
4055
4056
            /* update RTO too? */
4057
0
            if (tp1->do_rtt) {
4058
0
              if (rto_ok &&
4059
0
                  sctp_calculate_rto(stcb,
4060
0
                         &stcb->asoc,
4061
0
                         tp1->whoTo,
4062
0
                         &tp1->sent_rcv_time,
4063
0
                         SCTP_RTT_FROM_DATA)) {
4064
0
                rto_ok = 0;
4065
0
              }
4066
0
              if (tp1->whoTo->rto_needed == 0) {
4067
0
                tp1->whoTo->rto_needed = 1;
4068
0
              }
4069
0
              tp1->do_rtt = 0;
4070
0
            }
4071
0
          }
4072
          /*
4073
           * CMT: CUCv2 algorithm. From the
4074
           * cumack'd TSNs, for each TSN being
4075
           * acked for the first time, set the
4076
           * following variables for the
4077
           * corresp destination.
4078
           * new_pseudo_cumack will trigger a
4079
           * cwnd update.
4080
           * find_(rtx_)pseudo_cumack will
4081
           * trigger search for the next
4082
           * expected (rtx-)pseudo-cumack.
4083
           */
4084
0
          tp1->whoTo->new_pseudo_cumack = 1;
4085
0
          tp1->whoTo->find_pseudo_cumack = 1;
4086
0
          tp1->whoTo->find_rtx_pseudo_cumack = 1;
4087
0
          if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
4088
            /* sa_ignore NO_NULL_CHK */
4089
0
            sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.tsn, SCTP_CWND_LOG_FROM_SACK);
4090
0
          }
4091
0
        }
4092
0
        if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4093
0
          sctp_ucount_decr(asoc->sent_queue_retran_cnt);
4094
0
        }
4095
0
        if (tp1->rec.data.chunk_was_revoked) {
4096
          /* deflate the cwnd */
4097
0
          tp1->whoTo->cwnd -= tp1->book_size;
4098
0
          tp1->rec.data.chunk_was_revoked = 0;
4099
0
        }
4100
0
        if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
4101
0
          if (asoc->strmout[tp1->rec.data.sid].chunks_on_queues > 0) {
4102
0
            asoc->strmout[tp1->rec.data.sid].chunks_on_queues--;
4103
0
#ifdef INVARIANTS
4104
0
          } else {
4105
0
            panic("No chunks on the queues for sid %u.", tp1->rec.data.sid);
4106
0
#endif
4107
0
          }
4108
0
        }
4109
0
        if ((asoc->strmout[tp1->rec.data.sid].chunks_on_queues == 0) &&
4110
0
            (asoc->strmout[tp1->rec.data.sid].state == SCTP_STREAM_RESET_PENDING) &&
4111
0
            TAILQ_EMPTY(&asoc->strmout[tp1->rec.data.sid].outqueue)) {
4112
0
          asoc->trigger_reset = 1;
4113
0
        }
4114
0
        TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
4115
0
        if (tp1->data) {
4116
          /* sa_ignore NO_NULL_CHK */
4117
0
          sctp_free_bufspace(stcb, asoc, tp1, 1);
4118
0
          sctp_m_freem(tp1->data);
4119
0
          tp1->data = NULL;
4120
0
        }
4121
0
        if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4122
0
          sctp_log_sack(asoc->last_acked_seq,
4123
0
                  cumack,
4124
0
                  tp1->rec.data.tsn,
4125
0
                  0,
4126
0
                  0,
4127
0
                  SCTP_LOG_FREE_SENT);
4128
0
        }
4129
0
        asoc->sent_queue_cnt--;
4130
0
        sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED);
4131
0
      } else {
4132
0
        break;
4133
0
      }
4134
0
    }
4135
0
  }
4136
0
#if defined(__Userspace__)
4137
0
  if (stcb->sctp_ep->recv_callback) {
4138
0
    if (stcb->sctp_socket) {
4139
0
      uint32_t inqueue_bytes, sb_free_now;
4140
0
      struct sctp_inpcb *inp;
4141
4142
0
      inp = stcb->sctp_ep;
4143
0
      inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * sizeof(struct sctp_data_chunk));
4144
0
      sb_free_now = SCTP_SB_LIMIT_SND(stcb->sctp_socket) - (inqueue_bytes + stcb->asoc.sb_send_resv);
4145
4146
      /* check if the amount free in the send socket buffer crossed the threshold */
4147
0
      if (inp->send_callback &&
4148
0
          (((inp->send_sb_threshold > 0) &&
4149
0
            (sb_free_now >= inp->send_sb_threshold) &&
4150
0
            (stcb->asoc.chunks_on_out_queue <= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue))) ||
4151
0
           (inp->send_sb_threshold == 0))) {
4152
0
        atomic_add_int(&stcb->asoc.refcnt, 1);
4153
0
        SCTP_TCB_UNLOCK(stcb);
4154
0
        inp->send_callback(stcb->sctp_socket, sb_free_now, inp->ulp_info);
4155
0
        SCTP_TCB_LOCK(stcb);
4156
0
        atomic_subtract_int(&stcb->asoc.refcnt, 1);
4157
0
      }
4158
0
    }
4159
0
  } else if (stcb->sctp_socket) {
4160
#else
4161
  /* sa_ignore NO_NULL_CHK */
4162
  if (stcb->sctp_socket) {
4163
#endif
4164
#if defined(__APPLE__) && !defined(__Userspace__)
4165
    struct socket *so;
4166
4167
#endif
4168
0
    SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
4169
0
    if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4170
      /* sa_ignore NO_NULL_CHK */
4171
0
      sctp_wakeup_log(stcb, 1, SCTP_WAKESND_FROM_SACK);
4172
0
    }
4173
#if defined(__APPLE__) && !defined(__Userspace__)
4174
    so = SCTP_INP_SO(stcb->sctp_ep);
4175
    atomic_add_int(&stcb->asoc.refcnt, 1);
4176
    SCTP_TCB_UNLOCK(stcb);
4177
    SCTP_SOCKET_LOCK(so, 1);
4178
    SCTP_TCB_LOCK(stcb);
4179
    atomic_subtract_int(&stcb->asoc.refcnt, 1);
4180
    if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4181
      /* assoc was freed while we were unlocked */
4182
      SCTP_SOCKET_UNLOCK(so, 1);
4183
      return;
4184
    }
4185
#endif
4186
0
    sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
4187
#if defined(__APPLE__) && !defined(__Userspace__)
4188
    SCTP_SOCKET_UNLOCK(so, 1);
4189
#endif
4190
0
  } else {
4191
0
    if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4192
0
      sctp_wakeup_log(stcb, 1, SCTP_NOWAKE_FROM_SACK);
4193
0
    }
4194
0
  }
4195
4196
  /* JRS - Use the congestion control given in the CC module */
4197
0
  if ((asoc->last_acked_seq != cumack) && (ecne_seen == 0)) {
4198
0
    TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4199
0
      if (net->net_ack2 > 0) {
4200
        /*
4201
         * Karn's rule applies to clearing error count, this
4202
         * is optional.
4203
         */
4204
0
        net->error_count = 0;
4205
0
        if ((net->dest_state & SCTP_ADDR_REACHABLE) == 0) {
4206
          /* addr came good */
4207
0
          net->dest_state |= SCTP_ADDR_REACHABLE;
4208
0
          sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
4209
0
                          0, (void *)net, SCTP_SO_NOT_LOCKED);
4210
0
        }
4211
0
        if (net == stcb->asoc.primary_destination) {
4212
0
          if (stcb->asoc.alternate) {
4213
            /* release the alternate, primary is good */
4214
0
            sctp_free_remote_addr(stcb->asoc.alternate);
4215
0
            stcb->asoc.alternate = NULL;
4216
0
          }
4217
0
        }
4218
0
        if (net->dest_state & SCTP_ADDR_PF) {
4219
0
          net->dest_state &= ~SCTP_ADDR_PF;
4220
0
          sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT,
4221
0
                          stcb->sctp_ep, stcb, net,
4222
0
                          SCTP_FROM_SCTP_INDATA + SCTP_LOC_26);
4223
0
          sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
4224
0
          asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net);
4225
          /* Done with this net */
4226
0
          net->net_ack = 0;
4227
0
        }
4228
        /* restore any doubled timers */
4229
0
        net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
4230
0
        if (net->RTO < stcb->asoc.minrto) {
4231
0
          net->RTO = stcb->asoc.minrto;
4232
0
        }
4233
0
        if (net->RTO > stcb->asoc.maxrto) {
4234
0
          net->RTO = stcb->asoc.maxrto;
4235
0
        }
4236
0
      }
4237
0
    }
4238
0
    asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, 1, 0, 0);
4239
0
  }
4240
0
  asoc->last_acked_seq = cumack;
4241
4242
0
  if (TAILQ_EMPTY(&asoc->sent_queue)) {
4243
    /* nothing left in-flight */
4244
0
    TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4245
0
      net->flight_size = 0;
4246
0
      net->partial_bytes_acked = 0;
4247
0
    }
4248
0
    asoc->total_flight = 0;
4249
0
    asoc->total_flight_count = 0;
4250
0
  }
4251
4252
  /* RWND update */
4253
0
  asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
4254
0
              (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
4255
0
  if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4256
    /* SWS sender side engages */
4257
0
    asoc->peers_rwnd = 0;
4258
0
  }
4259
0
  if (asoc->peers_rwnd > old_rwnd) {
4260
0
    win_probe_recovery = 1;
4261
0
  }
4262
  /* Now assure a timer where data is queued at */
4263
0
again:
4264
0
  j = 0;
4265
0
  TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4266
0
    if (win_probe_recovery && (net->window_probe)) {
4267
0
      win_probe_recovered = 1;
4268
      /*
4269
       * Find first chunk that was used with window probe
4270
       * and clear the sent
4271
       */
4272
      /* sa_ignore FREED_MEMORY */
4273
0
      TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4274
0
        if (tp1->window_probe) {
4275
          /* move back to data send queue */
4276
0
          sctp_window_probe_recovery(stcb, asoc, tp1);
4277
0
          break;
4278
0
        }
4279
0
      }
4280
0
    }
4281
0
    if (net->flight_size) {
4282
0
      j++;
4283
0
      sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, net);
4284
0
      if (net->window_probe) {
4285
0
        net->window_probe = 0;
4286
0
      }
4287
0
    } else {
4288
0
      if (net->window_probe) {
4289
        /* In window probes we must assure a timer is still running there */
4290
0
        net->window_probe = 0;
4291
0
        if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4292
0
          sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, net);
4293
0
        }
4294
0
      } else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4295
0
        sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4296
0
                        stcb, net,
4297
0
                        SCTP_FROM_SCTP_INDATA + SCTP_LOC_27);
4298
0
      }
4299
0
    }
4300
0
  }
4301
0
  if ((j == 0) &&
4302
0
      (!TAILQ_EMPTY(&asoc->sent_queue)) &&
4303
0
      (asoc->sent_queue_retran_cnt == 0) &&
4304
0
      (win_probe_recovered == 0) &&
4305
0
      (done_once == 0)) {
4306
    /* huh, this should not happen unless all packets
4307
     * are PR-SCTP and marked to skip of course.
4308
     */
4309
0
    if (sctp_fs_audit(asoc)) {
4310
0
      TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4311
0
        net->flight_size = 0;
4312
0
      }
4313
0
      asoc->total_flight = 0;
4314
0
      asoc->total_flight_count = 0;
4315
0
      asoc->sent_queue_retran_cnt = 0;
4316
0
      TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4317
0
        if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4318
0
          sctp_flight_size_increase(tp1);
4319
0
          sctp_total_flight_increase(stcb, tp1);
4320
0
        } else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4321
0
          sctp_ucount_incr(asoc->sent_queue_retran_cnt);
4322
0
        }
4323
0
      }
4324
0
    }
4325
0
    done_once = 1;
4326
0
    goto again;
4327
0
  }
4328
  /**********************************/
4329
  /* Now what about shutdown issues */
4330
  /**********************************/
4331
0
  if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
4332
    /* nothing left on sendqueue.. consider done */
4333
    /* clean up */
4334
0
    if ((asoc->stream_queue_cnt == 1) &&
4335
0
        ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4336
0
         (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4337
0
        ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete)(stcb, asoc))) {
4338
0
      SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_PARTIAL_MSG_LEFT);
4339
0
    }
4340
0
    if (((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4341
0
         (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4342
0
        (asoc->stream_queue_cnt == 1) &&
4343
0
        (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) {
4344
0
      struct mbuf *op_err;
4345
4346
0
      *abort_now = 1;
4347
      /* XXX */
4348
0
      op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, "");
4349
0
      stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_28;
4350
0
      sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, false, SCTP_SO_NOT_LOCKED);
4351
0
      return;
4352
0
    }
4353
0
    if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
4354
0
        (asoc->stream_queue_cnt == 0)) {
4355
0
      struct sctp_nets *netp;
4356
4357
0
      if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) ||
4358
0
          (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4359
0
        SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4360
0
      }
4361
0
      SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_SENT);
4362
0
      sctp_stop_timers_for_shutdown(stcb);
4363
0
      if (asoc->alternate) {
4364
0
        netp = asoc->alternate;
4365
0
      } else {
4366
0
        netp = asoc->primary_destination;
4367
0
      }
4368
0
      sctp_send_shutdown(stcb, netp);
4369
0
      sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
4370
0
           stcb->sctp_ep, stcb, netp);
4371
0
      sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
4372
0
           stcb->sctp_ep, stcb, NULL);
4373
0
    } else if ((SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
4374
0
         (asoc->stream_queue_cnt == 0)) {
4375
0
      struct sctp_nets *netp;
4376
4377
0
      SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4378
0
      SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_ACK_SENT);
4379
0
      sctp_stop_timers_for_shutdown(stcb);
4380
0
      if (asoc->alternate) {
4381
0
        netp = asoc->alternate;
4382
0
      } else {
4383
0
        netp = asoc->primary_destination;
4384
0
      }
4385
0
      sctp_send_shutdown_ack(stcb, netp);
4386
0
      sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
4387
0
           stcb->sctp_ep, stcb, netp);
4388
0
    }
4389
0
  }
4390
  /*********************************************/
4391
  /* Here we perform PR-SCTP procedures        */
4392
  /* (section 4.2)                             */
4393
  /*********************************************/
4394
  /* C1. update advancedPeerAckPoint */
4395
0
  if (SCTP_TSN_GT(cumack, asoc->advanced_peer_ack_point)) {
4396
0
    asoc->advanced_peer_ack_point = cumack;
4397
0
  }
4398
  /* PR-Sctp issues need to be addressed too */
4399
0
  if ((asoc->prsctp_supported) && (asoc->pr_sctp_cnt > 0)) {
4400
0
    struct sctp_tmit_chunk *lchk;
4401
0
    uint32_t old_adv_peer_ack_point;
4402
4403
0
    old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
4404
0
    lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
4405
    /* C3. See if we need to send a Fwd-TSN */
4406
0
    if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cumack)) {
4407
      /*
4408
       * ISSUE with ECN, see FWD-TSN processing.
4409
       */
4410
0
      if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) {
4411
0
        send_forward_tsn(stcb, asoc);
4412
0
      } else if (lchk) {
4413
        /* try to FR fwd-tsn's that get lost too */
4414
0
        if (lchk->rec.data.fwd_tsn_cnt >= 3) {
4415
0
          send_forward_tsn(stcb, asoc);
4416
0
        }
4417
0
      }
4418
0
    }
4419
0
    for (; lchk != NULL; lchk = TAILQ_NEXT(lchk, sctp_next)) {
4420
0
      if (lchk->whoTo != NULL) {
4421
0
        break;
4422
0
      }
4423
0
    }
4424
0
    if (lchk != NULL) {
4425
      /* Assure a timer is up */
4426
0
      sctp_timer_start(SCTP_TIMER_TYPE_SEND,
4427
0
                       stcb->sctp_ep, stcb, lchk->whoTo);
4428
0
    }
4429
0
  }
4430
0
  if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
4431
0
    sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
4432
0
             rwnd,
4433
0
             stcb->asoc.peers_rwnd,
4434
0
             stcb->asoc.total_flight,
4435
0
             stcb->asoc.total_output_queue_size);
4436
0
  }
4437
0
}
4438
4439
void
4440
sctp_handle_sack(struct mbuf *m, int offset_seg, int offset_dup,
4441
                 struct sctp_tcb *stcb,
4442
                 uint16_t num_seg, uint16_t num_nr_seg, uint16_t num_dup,
4443
                 int *abort_now, uint8_t flags,
4444
                 uint32_t cum_ack, uint32_t rwnd, int ecne_seen)
4445
790
{
4446
790
  struct sctp_association *asoc;
4447
790
  struct sctp_tmit_chunk *tp1, *tp2;
4448
790
  uint32_t last_tsn, biggest_tsn_acked, biggest_tsn_newly_acked, this_sack_lowest_newack;
4449
790
  uint16_t wake_him = 0;
4450
790
  uint32_t send_s = 0;
4451
790
  long j;
4452
790
  int accum_moved = 0;
4453
790
  int will_exit_fast_recovery = 0;
4454
790
  uint32_t a_rwnd, old_rwnd;
4455
790
  int win_probe_recovery = 0;
4456
790
  int win_probe_recovered = 0;
4457
790
  struct sctp_nets *net = NULL;
4458
790
  int done_once;
4459
790
  int rto_ok = 1;
4460
790
  uint8_t reneged_all = 0;
4461
790
  uint8_t cmt_dac_flag;
4462
  /*
4463
   * we take any chance we can to service our queues since we cannot
4464
   * get awoken when the socket is read from :<
4465
   */
4466
  /*
4467
   * Now perform the actual SACK handling: 1) Verify that it is not an
4468
   * old sack, if so discard. 2) If there is nothing left in the send
4469
   * queue (cum-ack is equal to last acked) then you have a duplicate
4470
   * too, update any rwnd change and verify no timers are running.
4471
   * then return. 3) Process any new consecutive data i.e. cum-ack
4472
   * moved process these first and note that it moved. 4) Process any
4473
   * sack blocks. 5) Drop any acked from the queue. 6) Check for any
4474
   * revoked blocks and mark. 7) Update the cwnd. 8) Nothing left,
4475
   * sync up flightsizes and things, stop all timers and also check
4476
   * for shutdown_pending state. If so then go ahead and send off the
4477
   * shutdown. If in shutdown recv, send off the shutdown-ack and
4478
   * start that timer, Ret. 9) Strike any non-acked things and do FR
4479
   * procedure if needed being sure to set the FR flag. 10) Do pr-sctp
4480
   * procedures. 11) Apply any FR penalties. 12) Assure we will SACK
4481
   * if in shutdown_recv state.
4482
   */
4483
790
  SCTP_TCB_LOCK_ASSERT(stcb);
4484
  /* CMT DAC algo */
4485
790
  this_sack_lowest_newack = 0;
4486
790
  SCTP_STAT_INCR(sctps_slowpath_sack);
4487
790
  last_tsn = cum_ack;
4488
790
  cmt_dac_flag = flags & SCTP_SACK_CMT_DAC;
4489
#ifdef SCTP_ASOCLOG_OF_TSNS
4490
  stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cum_ack;
4491
  stcb->asoc.cumack_log_at++;
4492
  if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
4493
    stcb->asoc.cumack_log_at = 0;
4494
  }
4495
#endif
4496
790
  a_rwnd = rwnd;
4497
4498
790
  if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
4499
0
    sctp_misc_ints(SCTP_SACK_LOG_NORMAL, cum_ack,
4500
0
                   rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
4501
0
  }
4502
4503
790
  old_rwnd = stcb->asoc.peers_rwnd;
4504
790
  if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
4505
0
    sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
4506
0
                   stcb->asoc.overall_error_count,
4507
0
                   0,
4508
0
                   SCTP_FROM_SCTP_INDATA,
4509
0
                   __LINE__);
4510
0
  }
4511
790
  stcb->asoc.overall_error_count = 0;
4512
790
  asoc = &stcb->asoc;
4513
790
  if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4514
0
    sctp_log_sack(asoc->last_acked_seq,
4515
0
                  cum_ack,
4516
0
                  0,
4517
0
                  num_seg,
4518
0
                  num_dup,
4519
0
                  SCTP_LOG_NEW_SACK);
4520
0
  }
4521
790
  if ((num_dup) && (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE)) {
4522
0
    uint16_t i;
4523
0
    uint32_t *dupdata, dblock;
4524
4525
0
    for (i = 0; i < num_dup; i++) {
4526
0
      dupdata = (uint32_t *)sctp_m_getptr(m, offset_dup + i * sizeof(uint32_t),
4527
0
                                          sizeof(uint32_t), (uint8_t *)&dblock);
4528
0
      if (dupdata == NULL) {
4529
0
        break;
4530
0
      }
4531
0
      sctp_log_fr(*dupdata, 0, 0, SCTP_FR_DUPED);
4532
0
    }
4533
0
  }
4534
  /* reality check */
4535
790
  if (!TAILQ_EMPTY(&asoc->sent_queue)) {
4536
220
    tp1 = TAILQ_LAST(&asoc->sent_queue,
4537
220
         sctpchunk_listhead);
4538
220
    send_s = tp1->rec.data.tsn + 1;
4539
570
  } else {
4540
570
    tp1 = NULL;
4541
570
    send_s = asoc->sending_seq;
4542
570
  }
4543
790
  if (SCTP_TSN_GE(cum_ack, send_s)) {
4544
65
    struct mbuf *op_err;
4545
65
    char msg[SCTP_DIAG_INFO_LEN];
4546
4547
    /*
4548
     * no way, we have not even sent this TSN out yet.
4549
     * Peer is hopelessly messed up with us.
4550
     */
4551
65
    SCTP_PRINTF("NEW cum_ack:%x send_s:%x is smaller or equal\n",
4552
65
          cum_ack, send_s);
4553
65
    if (tp1) {
4554
1
      SCTP_PRINTF("Got send_s from tsn:%x + 1 of tp1: %p\n",
4555
1
            tp1->rec.data.tsn, (void *)tp1);
4556
1
    }
4557
65
  hopeless_peer:
4558
65
    *abort_now = 1;
4559
    /* XXX */
4560
65
    SCTP_SNPRINTF(msg, sizeof(msg),
4561
65
                  "Cum ack %8.8x greater or equal than TSN %8.8x",
4562
65
                  cum_ack, send_s);
4563
65
    op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
4564
65
    stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_29;
4565
65
    sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, false, SCTP_SO_NOT_LOCKED);
4566
65
    return;
4567
65
  }
4568
  /**********************/
4569
  /* 1) check the range */
4570
  /**********************/
4571
725
  if (SCTP_TSN_GT(asoc->last_acked_seq, last_tsn)) {
4572
    /* acking something behind */
4573
725
    return;
4574
725
  }
4575
4576
  /* update the Rwnd of the peer */
4577
0
  if (TAILQ_EMPTY(&asoc->sent_queue) &&
4578
0
      TAILQ_EMPTY(&asoc->send_queue) &&
4579
0
      (asoc->stream_queue_cnt == 0)) {
4580
    /* nothing left on send/sent and strmq */
4581
0
    if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4582
0
      sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4583
0
                        asoc->peers_rwnd, 0, 0, a_rwnd);
4584
0
    }
4585
0
    asoc->peers_rwnd = a_rwnd;
4586
0
    if (asoc->sent_queue_retran_cnt) {
4587
0
      asoc->sent_queue_retran_cnt = 0;
4588
0
    }
4589
0
    if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4590
      /* SWS sender side engages */
4591
0
      asoc->peers_rwnd = 0;
4592
0
    }
4593
    /* stop any timers */
4594
0
    TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4595
0
      sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4596
0
                      stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_30);
4597
0
      net->partial_bytes_acked = 0;
4598
0
      net->flight_size = 0;
4599
0
    }
4600
0
    asoc->total_flight = 0;
4601
0
    asoc->total_flight_count = 0;
4602
0
    return;
4603
0
  }
4604
  /*
4605
   * We init netAckSz and netAckSz2 to 0. These are used to track 2
4606
   * things. The total byte count acked is tracked in netAckSz AND
4607
   * netAck2 is used to track the total bytes acked that are un-
4608
   * ambiguous and were never retransmitted. We track these on a per
4609
   * destination address basis.
4610
   */
4611
0
  TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4612
0
    if (SCTP_TSN_GT(cum_ack, net->cwr_window_tsn)) {
4613
      /* Drag along the window_tsn for cwr's */
4614
0
      net->cwr_window_tsn = cum_ack;
4615
0
    }
4616
0
    net->prev_cwnd = net->cwnd;
4617
0
    net->net_ack = 0;
4618
0
    net->net_ack2 = 0;
4619
4620
    /*
4621
     * CMT: Reset CUC and Fast recovery algo variables before
4622
     * SACK processing
4623
     */
4624
0
    net->new_pseudo_cumack = 0;
4625
0
    net->will_exit_fast_recovery = 0;
4626
0
    if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) {
4627
0
      (*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack)(stcb, net);
4628
0
    }
4629
4630
    /*
4631
     * CMT: SFR algo (and HTNA) - this_sack_highest_newack has
4632
     * to be greater than the cumack. Also reset saw_newack to 0
4633
     * for all dests.
4634
     */
4635
0
    net->saw_newack = 0;
4636
0
    net->this_sack_highest_newack = last_tsn;
4637
0
  }
4638
  /* process the new consecutive TSN first */
4639
0
  TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4640
0
    if (SCTP_TSN_GE(last_tsn, tp1->rec.data.tsn)) {
4641
0
      if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
4642
0
        accum_moved = 1;
4643
0
        if (tp1->sent < SCTP_DATAGRAM_ACKED) {
4644
          /*
4645
           * If it is less than ACKED, it is
4646
           * now no-longer in flight. Higher
4647
           * values may occur during marking
4648
           */
4649
0
          if ((tp1->whoTo->dest_state &
4650
0
               SCTP_ADDR_UNCONFIRMED) &&
4651
0
              (tp1->snd_count < 2)) {
4652
            /*
4653
             * If there was no retran
4654
             * and the address is
4655
             * un-confirmed and we sent
4656
             * there and are now
4657
             * sacked.. its confirmed,
4658
             * mark it so.
4659
             */
4660
0
            tp1->whoTo->dest_state &=
4661
0
              ~SCTP_ADDR_UNCONFIRMED;
4662
0
          }
4663
0
          if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4664
0
            if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4665
0
              sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
4666
0
                             tp1->whoTo->flight_size,
4667
0
                             tp1->book_size,
4668
0
                             (uint32_t)(uintptr_t)tp1->whoTo,
4669
0
                             tp1->rec.data.tsn);
4670
0
            }
4671
0
            sctp_flight_size_decrease(tp1);
4672
0
            sctp_total_flight_decrease(stcb, tp1);
4673
0
            if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
4674
0
              (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged)(tp1->whoTo,
4675
0
                                 tp1);
4676
0
            }
4677
0
          }
4678
0
          tp1->whoTo->net_ack += tp1->send_size;
4679
4680
          /* CMT SFR and DAC algos */
4681
0
          this_sack_lowest_newack = tp1->rec.data.tsn;
4682
0
          tp1->whoTo->saw_newack = 1;
4683
4684
0
          if (tp1->snd_count < 2) {
4685
            /*
4686
             * True non-retransmitted
4687
             * chunk
4688
             */
4689
0
            tp1->whoTo->net_ack2 +=
4690
0
              tp1->send_size;
4691
4692
            /* update RTO too? */
4693
0
            if (tp1->do_rtt) {
4694
0
              if (rto_ok &&
4695
0
                  sctp_calculate_rto(stcb,
4696
0
                         &stcb->asoc,
4697
0
                         tp1->whoTo,
4698
0
                         &tp1->sent_rcv_time,
4699
0
                         SCTP_RTT_FROM_DATA)) {
4700
0
                rto_ok = 0;
4701
0
              }
4702
0
              if (tp1->whoTo->rto_needed == 0) {
4703
0
                tp1->whoTo->rto_needed = 1;
4704
0
              }
4705
0
              tp1->do_rtt = 0;
4706
0
            }
4707
0
          }
4708
          /*
4709
           * CMT: CUCv2 algorithm. From the
4710
           * cumack'd TSNs, for each TSN being
4711
           * acked for the first time, set the
4712
           * following variables for the
4713
           * corresp destination.
4714
           * new_pseudo_cumack will trigger a
4715
           * cwnd update.
4716
           * find_(rtx_)pseudo_cumack will
4717
           * trigger search for the next
4718
           * expected (rtx-)pseudo-cumack.
4719
           */
4720
0
          tp1->whoTo->new_pseudo_cumack = 1;
4721
0
          tp1->whoTo->find_pseudo_cumack = 1;
4722
0
          tp1->whoTo->find_rtx_pseudo_cumack = 1;
4723
0
          if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4724
0
            sctp_log_sack(asoc->last_acked_seq,
4725
0
                          cum_ack,
4726
0
                          tp1->rec.data.tsn,
4727
0
                          0,
4728
0
                          0,
4729
0
                          SCTP_LOG_TSN_ACKED);
4730
0
          }
4731
0
          if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
4732
0
            sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.tsn, SCTP_CWND_LOG_FROM_SACK);
4733
0
          }
4734
0
        }
4735
0
        if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4736
0
          sctp_ucount_decr(asoc->sent_queue_retran_cnt);
4737
#ifdef SCTP_AUDITING_ENABLED
4738
          sctp_audit_log(0xB3,
4739
                         (asoc->sent_queue_retran_cnt & 0x000000ff));
4740
#endif
4741
0
        }
4742
0
        if (tp1->rec.data.chunk_was_revoked) {
4743
          /* deflate the cwnd */
4744
0
          tp1->whoTo->cwnd -= tp1->book_size;
4745
0
          tp1->rec.data.chunk_was_revoked = 0;
4746
0
        }
4747
0
        if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
4748
0
          tp1->sent = SCTP_DATAGRAM_ACKED;
4749
0
        }
4750
0
      }
4751
0
    } else {
4752
0
      break;
4753
0
    }
4754
0
  }
4755
0
  biggest_tsn_newly_acked = biggest_tsn_acked = last_tsn;
4756
  /* always set this up to cum-ack */
4757
0
  asoc->this_sack_highest_gap = last_tsn;
4758
4759
0
  if ((num_seg > 0) || (num_nr_seg > 0)) {
4760
    /*
4761
     * thisSackHighestGap will increase while handling NEW
4762
     * segments this_sack_highest_newack will increase while
4763
     * handling NEWLY ACKED chunks. this_sack_lowest_newack is
4764
     * used for CMT DAC algo. saw_newack will also change.
4765
     */
4766
0
    if (sctp_handle_segments(m, &offset_seg, stcb, asoc, last_tsn, &biggest_tsn_acked,
4767
0
      &biggest_tsn_newly_acked, &this_sack_lowest_newack,
4768
0
      num_seg, num_nr_seg, &rto_ok)) {
4769
0
      wake_him++;
4770
0
    }
4771
    /*
4772
     * validate the biggest_tsn_acked in the gap acks if
4773
     * strict adherence is wanted.
4774
     */
4775
0
    if (SCTP_TSN_GE(biggest_tsn_acked, send_s)) {
4776
      /*
4777
       * peer is either confused or we are under
4778
       * attack. We must abort.
4779
       */
4780
0
      SCTP_PRINTF("Hopeless peer! biggest_tsn_acked:%x largest seq:%x\n",
4781
0
            biggest_tsn_acked, send_s);
4782
0
      goto hopeless_peer;
4783
0
    }
4784
0
  }
4785
  /*******************************************/
4786
  /* cancel ALL T3-send timer if accum moved */
4787
  /*******************************************/
4788
0
  if (asoc->sctp_cmt_on_off > 0) {
4789
0
    TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4790
0
      if (net->new_pseudo_cumack)
4791
0
        sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4792
0
                        stcb, net,
4793
0
                        SCTP_FROM_SCTP_INDATA + SCTP_LOC_31);
4794
0
    }
4795
0
  } else {
4796
0
    if (accum_moved) {
4797
0
      TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4798
0
        sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4799
0
                        stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_32);
4800
0
      }
4801
0
    }
4802
0
  }
4803
  /********************************************/
4804
  /* drop the acked chunks from the sentqueue */
4805
  /********************************************/
4806
0
  asoc->last_acked_seq = cum_ack;
4807
4808
0
  TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
4809
0
    if (SCTP_TSN_GT(tp1->rec.data.tsn, cum_ack)) {
4810
0
      break;
4811
0
    }
4812
0
    if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
4813
0
      if (asoc->strmout[tp1->rec.data.sid].chunks_on_queues > 0) {
4814
0
        asoc->strmout[tp1->rec.data.sid].chunks_on_queues--;
4815
0
#ifdef INVARIANTS
4816
0
      } else {
4817
0
        panic("No chunks on the queues for sid %u.", tp1->rec.data.sid);
4818
0
#endif
4819
0
      }
4820
0
    }
4821
0
    if ((asoc->strmout[tp1->rec.data.sid].chunks_on_queues == 0) &&
4822
0
        (asoc->strmout[tp1->rec.data.sid].state == SCTP_STREAM_RESET_PENDING) &&
4823
0
        TAILQ_EMPTY(&asoc->strmout[tp1->rec.data.sid].outqueue)) {
4824
0
      asoc->trigger_reset = 1;
4825
0
    }
4826
0
    TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
4827
0
    if (PR_SCTP_ENABLED(tp1->flags)) {
4828
0
      if (asoc->pr_sctp_cnt != 0)
4829
0
        asoc->pr_sctp_cnt--;
4830
0
    }
4831
0
    asoc->sent_queue_cnt--;
4832
0
    if (tp1->data) {
4833
      /* sa_ignore NO_NULL_CHK */
4834
0
      sctp_free_bufspace(stcb, asoc, tp1, 1);
4835
0
      sctp_m_freem(tp1->data);
4836
0
      tp1->data = NULL;
4837
0
      if (asoc->prsctp_supported && PR_SCTP_BUF_ENABLED(tp1->flags)) {
4838
0
        asoc->sent_queue_cnt_removeable--;
4839
0
      }
4840
0
    }
4841
0
    if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4842
0
      sctp_log_sack(asoc->last_acked_seq,
4843
0
                    cum_ack,
4844
0
                    tp1->rec.data.tsn,
4845
0
                    0,
4846
0
                    0,
4847
0
                    SCTP_LOG_FREE_SENT);
4848
0
    }
4849
0
    sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED);
4850
0
    wake_him++;
4851
0
  }
4852
0
  if (TAILQ_EMPTY(&asoc->sent_queue) && (asoc->total_flight > 0)) {
4853
0
#ifdef INVARIANTS
4854
0
    panic("Warning flight size is positive and should be 0");
4855
#else
4856
    SCTP_PRINTF("Warning flight size incorrect should be 0 is %d\n",
4857
                asoc->total_flight);
4858
#endif
4859
0
    asoc->total_flight = 0;
4860
0
  }
4861
4862
0
#if defined(__Userspace__)
4863
0
  if (stcb->sctp_ep->recv_callback) {
4864
0
    if (stcb->sctp_socket) {
4865
0
      uint32_t inqueue_bytes, sb_free_now;
4866
0
      struct sctp_inpcb *inp;
4867
4868
0
      inp = stcb->sctp_ep;
4869
0
      inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * sizeof(struct sctp_data_chunk));
4870
0
      sb_free_now = SCTP_SB_LIMIT_SND(stcb->sctp_socket) - (inqueue_bytes + stcb->asoc.sb_send_resv);
4871
4872
      /* check if the amount free in the send socket buffer crossed the threshold */
4873
0
      if (inp->send_callback &&
4874
0
         (((inp->send_sb_threshold > 0) && (sb_free_now >= inp->send_sb_threshold)) ||
4875
0
          (inp->send_sb_threshold == 0))) {
4876
0
        atomic_add_int(&stcb->asoc.refcnt, 1);
4877
0
        SCTP_TCB_UNLOCK(stcb);
4878
0
        inp->send_callback(stcb->sctp_socket, sb_free_now, inp->ulp_info);
4879
0
        SCTP_TCB_LOCK(stcb);
4880
0
        atomic_subtract_int(&stcb->asoc.refcnt, 1);
4881
0
      }
4882
0
    }
4883
0
  } else if ((wake_him) && (stcb->sctp_socket)) {
4884
#else
4885
  /* sa_ignore NO_NULL_CHK */
4886
  if ((wake_him) && (stcb->sctp_socket)) {
4887
#endif
4888
#if defined(__APPLE__) && !defined(__Userspace__)
4889
    struct socket *so;
4890
4891
#endif
4892
0
    SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
4893
0
    if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4894
0
      sctp_wakeup_log(stcb, wake_him, SCTP_WAKESND_FROM_SACK);
4895
0
    }
4896
#if defined(__APPLE__) && !defined(__Userspace__)
4897
    so = SCTP_INP_SO(stcb->sctp_ep);
4898
    atomic_add_int(&stcb->asoc.refcnt, 1);
4899
    SCTP_TCB_UNLOCK(stcb);
4900
    SCTP_SOCKET_LOCK(so, 1);
4901
    SCTP_TCB_LOCK(stcb);
4902
    atomic_subtract_int(&stcb->asoc.refcnt, 1);
4903
    if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4904
      /* assoc was freed while we were unlocked */
4905
      SCTP_SOCKET_UNLOCK(so, 1);
4906
      return;
4907
    }
4908
#endif
4909
0
    sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
4910
#if defined(__APPLE__) && !defined(__Userspace__)
4911
    SCTP_SOCKET_UNLOCK(so, 1);
4912
#endif
4913
0
  } else {
4914
0
    if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4915
0
      sctp_wakeup_log(stcb, wake_him, SCTP_NOWAKE_FROM_SACK);
4916
0
    }
4917
0
  }
4918
4919
0
  if (asoc->fast_retran_loss_recovery && accum_moved) {
4920
0
    if (SCTP_TSN_GE(asoc->last_acked_seq, asoc->fast_recovery_tsn)) {
4921
      /* Setup so we will exit RFC2582 fast recovery */
4922
0
      will_exit_fast_recovery = 1;
4923
0
    }
4924
0
  }
4925
  /*
4926
   * Check for revoked fragments:
4927
   *
4928
   * if Previous sack - Had no frags then we can't have any revoked if
4929
   * Previous sack - Had frag's then - If we now have frags aka
4930
   * num_seg > 0 call sctp_check_for_revoked() to tell if peer revoked
4931
   * some of them. else - The peer revoked all ACKED fragments, since
4932
   * we had some before and now we have NONE.
4933
   */
4934
4935
0
  if (num_seg) {
4936
0
    sctp_check_for_revoked(stcb, asoc, cum_ack, biggest_tsn_acked);
4937
0
    asoc->saw_sack_with_frags = 1;
4938
0
  } else if (asoc->saw_sack_with_frags) {
4939
0
    int cnt_revoked = 0;
4940
4941
    /* Peer revoked all dg's marked or acked */
4942
0
    TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4943
0
      if (tp1->sent == SCTP_DATAGRAM_ACKED) {
4944
0
        tp1->sent = SCTP_DATAGRAM_SENT;
4945
0
        if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4946
0
          sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
4947
0
                         tp1->whoTo->flight_size,
4948
0
                         tp1->book_size,
4949
0
                         (uint32_t)(uintptr_t)tp1->whoTo,
4950
0
                         tp1->rec.data.tsn);
4951
0
        }
4952
0
        sctp_flight_size_increase(tp1);
4953
0
        sctp_total_flight_increase(stcb, tp1);
4954
0
        tp1->rec.data.chunk_was_revoked = 1;
4955
        /*
4956
         * To ensure that this increase in
4957
         * flightsize, which is artificial,
4958
         * does not throttle the sender, we
4959
         * also increase the cwnd
4960
         * artificially.
4961
         */
4962
0
        tp1->whoTo->cwnd += tp1->book_size;
4963
0
        cnt_revoked++;
4964
0
      }
4965
0
    }
4966
0
    if (cnt_revoked) {
4967
0
      reneged_all = 1;
4968
0
    }
4969
0
    asoc->saw_sack_with_frags = 0;
4970
0
  }
4971
0
  if (num_nr_seg > 0)
4972
0
    asoc->saw_sack_with_nr_frags = 1;
4973
0
  else
4974
0
    asoc->saw_sack_with_nr_frags = 0;
4975
4976
  /* JRS - Use the congestion control given in the CC module */
4977
0
  if (ecne_seen == 0) {
4978
0
    TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4979
0
      if (net->net_ack2 > 0) {
4980
        /*
4981
         * Karn's rule applies to clearing error count, this
4982
         * is optional.
4983
         */
4984
0
        net->error_count = 0;
4985
0
        if ((net->dest_state & SCTP_ADDR_REACHABLE) == 0) {
4986
          /* addr came good */
4987
0
          net->dest_state |= SCTP_ADDR_REACHABLE;
4988
0
          sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
4989
0
                          0, (void *)net, SCTP_SO_NOT_LOCKED);
4990
0
        }
4991
4992
0
        if (net == stcb->asoc.primary_destination) {
4993
0
          if (stcb->asoc.alternate) {
4994
            /* release the alternate, primary is good */
4995
0
            sctp_free_remote_addr(stcb->asoc.alternate);
4996
0
            stcb->asoc.alternate = NULL;
4997
0
          }
4998
0
        }
4999
5000
0
        if (net->dest_state & SCTP_ADDR_PF) {
5001
0
          net->dest_state &= ~SCTP_ADDR_PF;
5002
0
          sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT,
5003
0
                          stcb->sctp_ep, stcb, net,
5004
0
                          SCTP_FROM_SCTP_INDATA + SCTP_LOC_33);
5005
0
          sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
5006
0
          asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net);
5007
          /* Done with this net */
5008
0
          net->net_ack = 0;
5009
0
        }
5010
        /* restore any doubled timers */
5011
0
        net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
5012
0
        if (net->RTO < stcb->asoc.minrto) {
5013
0
          net->RTO = stcb->asoc.minrto;
5014
0
        }
5015
0
        if (net->RTO > stcb->asoc.maxrto) {
5016
0
          net->RTO = stcb->asoc.maxrto;
5017
0
        }
5018
0
      }
5019
0
    }
5020
0
    asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, accum_moved, reneged_all, will_exit_fast_recovery);
5021
0
  }
5022
5023
0
  if (TAILQ_EMPTY(&asoc->sent_queue)) {
5024
    /* nothing left in-flight */
5025
0
    TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5026
      /* stop all timers */
5027
0
      sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
5028
0
                      stcb, net,
5029
0
                      SCTP_FROM_SCTP_INDATA + SCTP_LOC_34);
5030
0
      net->flight_size = 0;
5031
0
      net->partial_bytes_acked = 0;
5032
0
    }
5033
0
    asoc->total_flight = 0;
5034
0
    asoc->total_flight_count = 0;
5035
0
  }
5036
5037
  /**********************************/
5038
  /* Now what about shutdown issues */
5039
  /**********************************/
5040
0
  if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
5041
    /* nothing left on sendqueue.. consider done */
5042
0
    if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
5043
0
      sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
5044
0
                        asoc->peers_rwnd, 0, 0, a_rwnd);
5045
0
    }
5046
0
    asoc->peers_rwnd = a_rwnd;
5047
0
    if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
5048
      /* SWS sender side engages */
5049
0
      asoc->peers_rwnd = 0;
5050
0
    }
5051
    /* clean up */
5052
0
    if ((asoc->stream_queue_cnt == 1) &&
5053
0
        ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
5054
0
         (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) &&
5055
0
        ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete)(stcb, asoc))) {
5056
0
      SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_PARTIAL_MSG_LEFT);
5057
0
    }
5058
0
    if (((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
5059
0
         (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) &&
5060
0
        (asoc->stream_queue_cnt == 1) &&
5061
0
        (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) {
5062
0
      struct mbuf *op_err;
5063
5064
0
      *abort_now = 1;
5065
      /* XXX */
5066
0
      op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, "");
5067
0
      stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_35;
5068
0
      sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, false, SCTP_SO_NOT_LOCKED);
5069
0
      return;
5070
0
    }
5071
0
    if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
5072
0
        (asoc->stream_queue_cnt == 0)) {
5073
0
      struct sctp_nets *netp;
5074
5075
0
      if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) ||
5076
0
          (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
5077
0
        SCTP_STAT_DECR_GAUGE32(sctps_currestab);
5078
0
      }
5079
0
      SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_SENT);
5080
0
      sctp_stop_timers_for_shutdown(stcb);
5081
0
      if (asoc->alternate) {
5082
0
        netp = asoc->alternate;
5083
0
      } else {
5084
0
        netp = asoc->primary_destination;
5085
0
      }
5086
0
      sctp_send_shutdown(stcb, netp);
5087
0
      sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
5088
0
           stcb->sctp_ep, stcb, netp);
5089
0
      sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
5090
0
           stcb->sctp_ep, stcb, NULL);
5091
0
      return;
5092
0
    } else if ((SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
5093
0
         (asoc->stream_queue_cnt == 0)) {
5094
0
      struct sctp_nets *netp;
5095
5096
0
      SCTP_STAT_DECR_GAUGE32(sctps_currestab);
5097
0
      SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_ACK_SENT);
5098
0
      sctp_stop_timers_for_shutdown(stcb);
5099
0
      if (asoc->alternate) {
5100
0
        netp = asoc->alternate;
5101
0
      } else {
5102
0
        netp = asoc->primary_destination;
5103
0
      }
5104
0
      sctp_send_shutdown_ack(stcb, netp);
5105
0
      sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
5106
0
                       stcb->sctp_ep, stcb, netp);
5107
0
      return;
5108
0
    }
5109
0
  }
5110
  /*
5111
   * Now here we are going to recycle net_ack for a different use...
5112
   * HEADS UP.
5113
   */
5114
0
  TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5115
0
    net->net_ack = 0;
5116
0
  }
5117
5118
  /*
5119
   * CMT DAC algorithm: If SACK DAC flag was 0, then no extra marking
5120
   * to be done. Setting this_sack_lowest_newack to the cum_ack will
5121
   * automatically ensure that.
5122
   */
5123
0
  if ((asoc->sctp_cmt_on_off > 0) &&
5124
0
      SCTP_BASE_SYSCTL(sctp_cmt_use_dac) &&
5125
0
      (cmt_dac_flag == 0)) {
5126
0
    this_sack_lowest_newack = cum_ack;
5127
0
  }
5128
0
  if ((num_seg > 0) || (num_nr_seg > 0)) {
5129
0
    sctp_strike_gap_ack_chunks(stcb, asoc, biggest_tsn_acked,
5130
0
                               biggest_tsn_newly_acked, this_sack_lowest_newack, accum_moved);
5131
0
  }
5132
  /* JRS - Use the congestion control given in the CC module */
5133
0
  asoc->cc_functions.sctp_cwnd_update_after_fr(stcb, asoc);
5134
5135
  /* Now are we exiting loss recovery ? */
5136
0
  if (will_exit_fast_recovery) {
5137
    /* Ok, we must exit fast recovery */
5138
0
    asoc->fast_retran_loss_recovery = 0;
5139
0
  }
5140
0
  if ((asoc->sat_t3_loss_recovery) &&
5141
0
      SCTP_TSN_GE(asoc->last_acked_seq, asoc->sat_t3_recovery_tsn)) {
5142
    /* end satellite t3 loss recovery */
5143
0
    asoc->sat_t3_loss_recovery = 0;
5144
0
  }
5145
  /*
5146
   * CMT Fast recovery
5147
   */
5148
0
  TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5149
0
    if (net->will_exit_fast_recovery) {
5150
      /* Ok, we must exit fast recovery */
5151
0
      net->fast_retran_loss_recovery = 0;
5152
0
    }
5153
0
  }
5154
5155
  /* Adjust and set the new rwnd value */
5156
0
  if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
5157
0
    sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
5158
0
                      asoc->peers_rwnd, asoc->total_flight, (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)), a_rwnd);
5159
0
  }
5160
0
  asoc->peers_rwnd = sctp_sbspace_sub(a_rwnd,
5161
0
                                      (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
5162
0
  if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
5163
    /* SWS sender side engages */
5164
0
    asoc->peers_rwnd = 0;
5165
0
  }
5166
0
  if (asoc->peers_rwnd > old_rwnd) {
5167
0
    win_probe_recovery = 1;
5168
0
  }
5169
5170
  /*
5171
   * Now we must setup so we have a timer up for anyone with
5172
   * outstanding data.
5173
   */
5174
0
  done_once = 0;
5175
0
again:
5176
0
  j = 0;
5177
0
  TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5178
0
    if (win_probe_recovery && (net->window_probe)) {
5179
0
      win_probe_recovered = 1;
5180
      /*-
5181
       * Find first chunk that was used with
5182
       * window probe and clear the event. Put
5183
       * it back into the send queue as if has
5184
       * not been sent.
5185
       */
5186
0
      TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5187
0
        if (tp1->window_probe) {
5188
0
          sctp_window_probe_recovery(stcb, asoc, tp1);
5189
0
          break;
5190
0
        }
5191
0
      }
5192
0
    }
5193
0
    if (net->flight_size) {
5194
0
      j++;
5195
0
      if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5196
0
        sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5197
0
                         stcb->sctp_ep, stcb, net);
5198
0
      }
5199
0
      if (net->window_probe) {
5200
0
        net->window_probe = 0;
5201
0
      }
5202
0
    } else {
5203
0
      if (net->window_probe) {
5204
        /* In window probes we must assure a timer is still running there */
5205
0
        if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5206
0
          sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5207
0
                           stcb->sctp_ep, stcb, net);
5208
0
        }
5209
0
      } else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5210
0
        sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
5211
0
                        stcb, net,
5212
0
                        SCTP_FROM_SCTP_INDATA + SCTP_LOC_36);
5213
0
      }
5214
0
    }
5215
0
  }
5216
0
  if ((j == 0) &&
5217
0
      (!TAILQ_EMPTY(&asoc->sent_queue)) &&
5218
0
      (asoc->sent_queue_retran_cnt == 0) &&
5219
0
      (win_probe_recovered == 0) &&
5220
0
      (done_once == 0)) {
5221
    /* huh, this should not happen unless all packets
5222
     * are PR-SCTP and marked to skip of course.
5223
     */
5224
0
    if (sctp_fs_audit(asoc)) {
5225
0
      TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5226
0
        net->flight_size = 0;
5227
0
      }
5228
0
      asoc->total_flight = 0;
5229
0
      asoc->total_flight_count = 0;
5230
0
      asoc->sent_queue_retran_cnt = 0;
5231
0
      TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5232
0
        if (tp1->sent < SCTP_DATAGRAM_RESEND) {
5233
0
          sctp_flight_size_increase(tp1);
5234
0
          sctp_total_flight_increase(stcb, tp1);
5235
0
        } else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
5236
0
          sctp_ucount_incr(asoc->sent_queue_retran_cnt);
5237
0
        }
5238
0
      }
5239
0
    }
5240
0
    done_once = 1;
5241
0
    goto again;
5242
0
  }
5243
  /*********************************************/
5244
  /* Here we perform PR-SCTP procedures        */
5245
  /* (section 4.2)                             */
5246
  /*********************************************/
5247
  /* C1. update advancedPeerAckPoint */
5248
0
  if (SCTP_TSN_GT(cum_ack, asoc->advanced_peer_ack_point)) {
5249
0
    asoc->advanced_peer_ack_point = cum_ack;
5250
0
  }
5251
  /* C2. try to further move advancedPeerAckPoint ahead */
5252
0
  if ((asoc->prsctp_supported) && (asoc->pr_sctp_cnt > 0)) {
5253
0
    struct sctp_tmit_chunk *lchk;
5254
0
    uint32_t old_adv_peer_ack_point;
5255
5256
0
    old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
5257
0
    lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
5258
    /* C3. See if we need to send a Fwd-TSN */
5259
0
    if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cum_ack)) {
5260
      /*
5261
       * ISSUE with ECN, see FWD-TSN processing.
5262
       */
5263
0
      if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
5264
0
        sctp_misc_ints(SCTP_FWD_TSN_CHECK,
5265
0
                       0xee, cum_ack, asoc->advanced_peer_ack_point,
5266
0
                       old_adv_peer_ack_point);
5267
0
      }
5268
0
      if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) {
5269
0
        send_forward_tsn(stcb, asoc);
5270
0
      } else if (lchk) {
5271
        /* try to FR fwd-tsn's that get lost too */
5272
0
        if (lchk->rec.data.fwd_tsn_cnt >= 3) {
5273
0
          send_forward_tsn(stcb, asoc);
5274
0
        }
5275
0
      }
5276
0
    }
5277
0
    for (; lchk != NULL; lchk = TAILQ_NEXT(lchk, sctp_next)) {
5278
0
      if (lchk->whoTo != NULL) {
5279
0
        break;
5280
0
      }
5281
0
    }
5282
0
    if (lchk != NULL) {
5283
      /* Assure a timer is up */
5284
0
      sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5285
0
                       stcb->sctp_ep, stcb, lchk->whoTo);
5286
0
    }
5287
0
  }
5288
0
  if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
5289
0
    sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
5290
0
                   a_rwnd,
5291
0
                   stcb->asoc.peers_rwnd,
5292
0
                   stcb->asoc.total_flight,
5293
0
                   stcb->asoc.total_output_queue_size);
5294
0
  }
5295
0
}
5296
5297
void
5298
sctp_update_acked(struct sctp_tcb *stcb, struct sctp_shutdown_chunk *cp, int *abort_flag)
5299
595
{
5300
  /* Copy cum-ack */
5301
595
  uint32_t cum_ack, a_rwnd;
5302
5303
595
  cum_ack = ntohl(cp->cumulative_tsn_ack);
5304
  /* Arrange so a_rwnd does NOT change */
5305
595
  a_rwnd = stcb->asoc.peers_rwnd + stcb->asoc.total_flight;
5306
5307
  /* Now call the express sack handling */
5308
595
  sctp_express_handle_sack(stcb, cum_ack, a_rwnd, abort_flag, 0);
5309
595
}
5310
5311
static void
5312
sctp_kick_prsctp_reorder_queue(struct sctp_tcb *stcb,
5313
             struct sctp_stream_in *strmin)
5314
5.74k
{
5315
5.74k
  struct sctp_queued_to_read *control, *ncontrol;
5316
5.74k
  struct sctp_association *asoc;
5317
5.74k
  uint32_t mid;
5318
5.74k
  int need_reasm_check = 0;
5319
5320
5.74k
  asoc = &stcb->asoc;
5321
5.74k
  mid = strmin->last_mid_delivered;
5322
  /*
5323
   * First deliver anything prior to and including the stream no that
5324
   * came in.
5325
   */
5326
5.74k
  TAILQ_FOREACH_SAFE(control, &strmin->inqueue, next_instrm, ncontrol) {
5327
0
    if (SCTP_MID_GE(asoc->idata_supported, mid, control->mid)) {
5328
      /* this is deliverable now */
5329
0
      if (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
5330
0
        if (control->on_strm_q) {
5331
0
          if (control->on_strm_q == SCTP_ON_ORDERED) {
5332
0
            TAILQ_REMOVE(&strmin->inqueue, control, next_instrm);
5333
0
          } else if (control->on_strm_q == SCTP_ON_UNORDERED) {
5334
0
            TAILQ_REMOVE(&strmin->uno_inqueue, control, next_instrm);
5335
0
#ifdef INVARIANTS
5336
0
          } else {
5337
0
            panic("strmin: %p ctl: %p unknown %d",
5338
0
                  strmin, control, control->on_strm_q);
5339
0
#endif
5340
0
          }
5341
0
          control->on_strm_q = 0;
5342
0
        }
5343
        /* subtract pending on streams */
5344
0
        if (asoc->size_on_all_streams >= control->length) {
5345
0
          asoc->size_on_all_streams -= control->length;
5346
0
        } else {
5347
0
#ifdef INVARIANTS
5348
0
          panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
5349
#else
5350
          asoc->size_on_all_streams = 0;
5351
#endif
5352
0
        }
5353
0
        sctp_ucount_decr(asoc->cnt_on_all_streams);
5354
        /* deliver it to at least the delivery-q */
5355
0
        if (stcb->sctp_socket) {
5356
0
          sctp_mark_non_revokable(asoc, control->sinfo_tsn);
5357
0
          sctp_add_to_readq(stcb->sctp_ep, stcb,
5358
0
                control,
5359
0
                &stcb->sctp_socket->so_rcv,
5360
0
                1, SCTP_READ_LOCK_HELD,
5361
0
                SCTP_SO_NOT_LOCKED);
5362
0
        }
5363
0
      } else {
5364
        /* Its a fragmented message */
5365
0
        if (control->first_frag_seen) {
5366
          /* Make it so this is next to deliver, we restore later */
5367
0
          strmin->last_mid_delivered = control->mid - 1;
5368
0
          need_reasm_check = 1;
5369
0
          break;
5370
0
        }
5371
0
      }
5372
0
    } else {
5373
      /* no more delivery now. */
5374
0
      break;
5375
0
    }
5376
0
  }
5377
5.74k
  if (need_reasm_check) {
5378
0
    int ret;
5379
0
    ret = sctp_deliver_reasm_check(stcb, &stcb->asoc, strmin, SCTP_READ_LOCK_HELD);
5380
0
    if (SCTP_MID_GT(asoc->idata_supported, mid, strmin->last_mid_delivered)) {
5381
      /* Restore the next to deliver unless we are ahead */
5382
0
      strmin->last_mid_delivered = mid;
5383
0
    }
5384
0
    if (ret == 0) {
5385
      /* Left the front Partial one on */
5386
0
      return;
5387
0
    }
5388
0
    need_reasm_check = 0;
5389
0
  }
5390
  /*
5391
   * now we must deliver things in queue the normal way  if any are
5392
   * now ready.
5393
   */
5394
5.74k
  mid = strmin->last_mid_delivered + 1;
5395
5.74k
  TAILQ_FOREACH_SAFE(control, &strmin->inqueue, next_instrm, ncontrol) {
5396
0
    if (SCTP_MID_EQ(asoc->idata_supported, mid, control->mid)) {
5397
0
      if (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
5398
        /* this is deliverable now */
5399
0
        if (control->on_strm_q) {
5400
0
          if (control->on_strm_q == SCTP_ON_ORDERED) {
5401
0
            TAILQ_REMOVE(&strmin->inqueue, control, next_instrm);
5402
0
          } else if (control->on_strm_q == SCTP_ON_UNORDERED) {
5403
0
            TAILQ_REMOVE(&strmin->uno_inqueue, control, next_instrm);
5404
0
#ifdef INVARIANTS
5405
0
          } else {
5406
0
            panic("strmin: %p ctl: %p unknown %d",
5407
0
                  strmin, control, control->on_strm_q);
5408
0
#endif
5409
0
          }
5410
0
          control->on_strm_q = 0;
5411
0
        }
5412
        /* subtract pending on streams */
5413
0
        if (asoc->size_on_all_streams >= control->length) {
5414
0
          asoc->size_on_all_streams -= control->length;
5415
0
        } else {
5416
0
#ifdef INVARIANTS
5417
0
          panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
5418
#else
5419
          asoc->size_on_all_streams = 0;
5420
#endif
5421
0
        }
5422
0
        sctp_ucount_decr(asoc->cnt_on_all_streams);
5423
        /* deliver it to at least the delivery-q */
5424
0
        strmin->last_mid_delivered = control->mid;
5425
0
        if (stcb->sctp_socket) {
5426
0
          sctp_mark_non_revokable(asoc, control->sinfo_tsn);
5427
0
          sctp_add_to_readq(stcb->sctp_ep, stcb,
5428
0
                control,
5429
0
                &stcb->sctp_socket->so_rcv, 1,
5430
0
                SCTP_READ_LOCK_HELD, SCTP_SO_NOT_LOCKED);
5431
0
        }
5432
0
        mid = strmin->last_mid_delivered + 1;
5433
0
      } else {
5434
        /* Its a fragmented message */
5435
0
        if (control->first_frag_seen) {
5436
          /* Make it so this is next to deliver */
5437
0
          strmin->last_mid_delivered = control->mid - 1;
5438
0
          need_reasm_check = 1;
5439
0
          break;
5440
0
        }
5441
0
      }
5442
0
    } else {
5443
0
      break;
5444
0
    }
5445
0
  }
5446
5.74k
  if (need_reasm_check) {
5447
0
    (void)sctp_deliver_reasm_check(stcb, &stcb->asoc, strmin, SCTP_READ_LOCK_HELD);
5448
0
  }
5449
5.74k
}
5450
5451
static void
5452
sctp_flush_reassm_for_str_seq(struct sctp_tcb *stcb,
5453
  struct sctp_association *asoc, struct sctp_stream_in *strm,
5454
  struct sctp_queued_to_read *control, int ordered, uint32_t cumtsn)
5455
0
{
5456
0
  struct sctp_tmit_chunk *chk, *nchk;
5457
5458
  /*
5459
   * For now large messages held on the stream reasm that are
5460
   * complete will be tossed too. We could in theory do more
5461
   * work to spin through and stop after dumping one msg aka
5462
   * seeing the start of a new msg at the head, and call the
5463
   * delivery function... to see if it can be delivered... But
5464
   * for now we just dump everything on the queue.
5465
   */
5466
0
  if (!asoc->idata_supported && !ordered &&
5467
0
      control->first_frag_seen &&
5468
0
      SCTP_TSN_GT(control->fsn_included, cumtsn)) {
5469
0
    return;
5470
0
  }
5471
0
  TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, nchk) {
5472
    /* Purge hanging chunks */
5473
0
    if (!asoc->idata_supported && !ordered) {
5474
0
      if (SCTP_TSN_GT(chk->rec.data.tsn, cumtsn)) {
5475
0
        break;
5476
0
      }
5477
0
    }
5478
0
    TAILQ_REMOVE(&control->reasm, chk, sctp_next);
5479
0
    if (asoc->size_on_reasm_queue >= chk->send_size) {
5480
0
      asoc->size_on_reasm_queue -= chk->send_size;
5481
0
    } else {
5482
0
#ifdef INVARIANTS
5483
0
      panic("size_on_reasm_queue = %u smaller than chunk length %u", asoc->size_on_reasm_queue, chk->send_size);
5484
#else
5485
      asoc->size_on_reasm_queue = 0;
5486
#endif
5487
0
    }
5488
0
    sctp_ucount_decr(asoc->cnt_on_reasm_queue);
5489
0
    if (chk->data) {
5490
0
      sctp_m_freem(chk->data);
5491
0
      chk->data = NULL;
5492
0
    }
5493
0
    sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
5494
0
  }
5495
0
  if (!TAILQ_EMPTY(&control->reasm)) {
5496
    /* This has to be old data, unordered */
5497
0
    if (control->data) {
5498
0
      sctp_m_freem(control->data);
5499
0
      control->data = NULL;
5500
0
    }
5501
0
    sctp_reset_a_control(control, stcb->sctp_ep, cumtsn);
5502
0
    chk = TAILQ_FIRST(&control->reasm);
5503
0
    if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
5504
0
      TAILQ_REMOVE(&control->reasm, chk, sctp_next);
5505
0
      sctp_add_chk_to_control(control, strm, stcb, asoc,
5506
0
            chk, SCTP_READ_LOCK_HELD);
5507
0
    }
5508
0
    sctp_deliver_reasm_check(stcb, asoc, strm, SCTP_READ_LOCK_HELD);
5509
0
    return;
5510
0
  }
5511
0
  if (control->on_strm_q == SCTP_ON_ORDERED) {
5512
0
    TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
5513
0
    if (asoc->size_on_all_streams >= control->length) {
5514
0
      asoc->size_on_all_streams -= control->length;
5515
0
    } else {
5516
0
#ifdef INVARIANTS
5517
0
      panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
5518
#else
5519
      asoc->size_on_all_streams = 0;
5520
#endif
5521
0
    }
5522
0
    sctp_ucount_decr(asoc->cnt_on_all_streams);
5523
0
    control->on_strm_q = 0;
5524
0
  } else if (control->on_strm_q == SCTP_ON_UNORDERED) {
5525
0
    TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
5526
0
    control->on_strm_q = 0;
5527
0
#ifdef INVARIANTS
5528
0
  } else if (control->on_strm_q) {
5529
0
    panic("strm: %p ctl: %p unknown %d",
5530
0
        strm, control, control->on_strm_q);
5531
0
#endif
5532
0
  }
5533
0
  control->on_strm_q = 0;
5534
0
  if (control->on_read_q == 0) {
5535
0
    sctp_free_remote_addr(control->whoFrom);
5536
0
    if (control->data) {
5537
0
      sctp_m_freem(control->data);
5538
0
      control->data = NULL;
5539
0
    }
5540
0
    sctp_free_a_readq(stcb, control);
5541
0
  }
5542
0
}
5543
5544
void
5545
sctp_handle_forward_tsn(struct sctp_tcb *stcb,
5546
                        struct sctp_forward_tsn_chunk *fwd,
5547
                        int *abort_flag, struct mbuf *m , int offset)
5548
3.48k
{
5549
  /* The pr-sctp fwd tsn */
5550
  /*
5551
   * here we will perform all the data receiver side steps for
5552
   * processing FwdTSN, as required in by pr-sctp draft:
5553
   *
5554
   * Assume we get FwdTSN(x):
5555
   *
5556
   * 1) update local cumTSN to x
5557
   * 2) try to further advance cumTSN to x + others we have
5558
   * 3) examine and update re-ordering queue on pr-in-streams
5559
   * 4) clean up re-assembly queue
5560
   * 5) Send a sack to report where we are.
5561
   */
5562
3.48k
  struct sctp_association *asoc;
5563
3.48k
  uint32_t new_cum_tsn, gap;
5564
3.48k
  unsigned int i, fwd_sz, m_size;
5565
3.48k
  uint32_t str_seq;
5566
3.48k
  struct sctp_stream_in *strm;
5567
3.48k
  struct sctp_queued_to_read *control, *ncontrol, *sv;
5568
5569
3.48k
  asoc = &stcb->asoc;
5570
3.48k
  if ((fwd_sz = ntohs(fwd->ch.chunk_length)) < sizeof(struct sctp_forward_tsn_chunk)) {
5571
0
    SCTPDBG(SCTP_DEBUG_INDATA1,
5572
0
      "Bad size too small/big fwd-tsn\n");
5573
0
    return;
5574
0
  }
5575
3.48k
  m_size = (stcb->asoc.mapping_array_size << 3);
5576
  /*************************************************************/
5577
  /* 1. Here we update local cumTSN and shift the bitmap array */
5578
  /*************************************************************/
5579
3.48k
  new_cum_tsn = ntohl(fwd->new_cumulative_tsn);
5580
5581
3.48k
  if (SCTP_TSN_GE(asoc->cumulative_tsn, new_cum_tsn)) {
5582
    /* Already got there ... */
5583
2.01k
    return;
5584
2.01k
  }
5585
  /*
5586
   * now we know the new TSN is more advanced, let's find the actual
5587
   * gap
5588
   */
5589
1.47k
  SCTP_CALC_TSN_TO_GAP(gap, new_cum_tsn, asoc->mapping_array_base_tsn);
5590
1.47k
  asoc->cumulative_tsn = new_cum_tsn;
5591
1.47k
  if (gap >= m_size) {
5592
1.05k
    if ((long)gap > sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv)) {
5593
132
      struct mbuf *op_err;
5594
132
      char msg[SCTP_DIAG_INFO_LEN];
5595
5596
      /*
5597
       * out of range (of single byte chunks in the rwnd I
5598
       * give out). This must be an attacker.
5599
       */
5600
132
      *abort_flag = 1;
5601
132
      SCTP_SNPRINTF(msg, sizeof(msg),
5602
132
                    "New cum ack %8.8x too high, highest TSN %8.8x",
5603
132
                    new_cum_tsn, asoc->highest_tsn_inside_map);
5604
132
      op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
5605
132
      stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_37;
5606
132
      sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, false, SCTP_SO_NOT_LOCKED);
5607
132
      return;
5608
132
    }
5609
918
    SCTP_STAT_INCR(sctps_fwdtsn_map_over);
5610
5611
918
    memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size);
5612
918
    asoc->mapping_array_base_tsn = new_cum_tsn + 1;
5613
918
    asoc->highest_tsn_inside_map = new_cum_tsn;
5614
5615
918
    memset(stcb->asoc.nr_mapping_array, 0, stcb->asoc.mapping_array_size);
5616
918
    asoc->highest_tsn_inside_nr_map = new_cum_tsn;
5617
5618
918
    if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
5619
0
      sctp_log_map(0, 3, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
5620
0
    }
5621
918
  } else {
5622
424
    SCTP_TCB_LOCK_ASSERT(stcb);
5623
7.27k
    for (i = 0; i <= gap; i++) {
5624
6.85k
      if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, i) &&
5625
6.85k
          !SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, i)) {
5626
6.77k
        SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, i);
5627
6.77k
        if (SCTP_TSN_GT(asoc->mapping_array_base_tsn + i, asoc->highest_tsn_inside_nr_map)) {
5628
6.68k
          asoc->highest_tsn_inside_nr_map = asoc->mapping_array_base_tsn + i;
5629
6.68k
        }
5630
6.77k
      }
5631
6.85k
    }
5632
424
  }
5633
  /*************************************************************/
5634
  /* 2. Clear up re-assembly queue                             */
5635
  /*************************************************************/
5636
5637
  /* This is now done as part of clearing up the stream/seq */
5638
1.34k
  if (asoc->idata_supported == 0) {
5639
821
    uint16_t sid;
5640
5641
    /* Flush all the un-ordered data based on cum-tsn */
5642
821
    SCTP_INP_READ_LOCK(stcb->sctp_ep);
5643
144k
    for (sid = 0; sid < asoc->streamincnt; sid++) {
5644
143k
      strm = &asoc->strmin[sid];
5645
143k
      if (!TAILQ_EMPTY(&strm->uno_inqueue)) {
5646
0
        sctp_flush_reassm_for_str_seq(stcb, asoc, strm, TAILQ_FIRST(&strm->uno_inqueue), 0, new_cum_tsn);
5647
0
      }
5648
143k
    }
5649
821
    SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5650
821
  }
5651
  /*******************************************************/
5652
  /* 3. Update the PR-stream re-ordering queues and fix  */
5653
  /*    delivery issues as needed.                       */
5654
  /*******************************************************/
5655
1.34k
  fwd_sz -= sizeof(*fwd);
5656
1.34k
  if (m && fwd_sz) {
5657
    /* New method. */
5658
790
    unsigned int num_str;
5659
790
    uint32_t mid;
5660
790
    uint16_t sid;
5661
790
    uint16_t ordered, flags;
5662
790
    struct sctp_strseq *stseq, strseqbuf;
5663
790
    struct sctp_strseq_mid *stseq_m, strseqbuf_m;
5664
790
    offset += sizeof(*fwd);
5665
5666
790
    SCTP_INP_READ_LOCK(stcb->sctp_ep);
5667
790
    if (asoc->idata_supported) {
5668
388
      num_str = fwd_sz / sizeof(struct sctp_strseq_mid);
5669
402
    } else {
5670
402
      num_str = fwd_sz / sizeof(struct sctp_strseq);
5671
402
    }
5672
6.53k
    for (i = 0; i < num_str; i++) {
5673
5.93k
      if (asoc->idata_supported) {
5674
3.14k
        stseq_m = (struct sctp_strseq_mid *)sctp_m_getptr(m, offset,
5675
3.14k
                      sizeof(struct sctp_strseq_mid),
5676
3.14k
                      (uint8_t *)&strseqbuf_m);
5677
3.14k
        offset += sizeof(struct sctp_strseq_mid);
5678
3.14k
        if (stseq_m == NULL) {
5679
0
          break;
5680
0
        }
5681
3.14k
        sid = ntohs(stseq_m->sid);
5682
3.14k
        mid = ntohl(stseq_m->mid);
5683
3.14k
        flags = ntohs(stseq_m->flags);
5684
3.14k
        if (flags & PR_SCTP_UNORDERED_FLAG) {
5685
1.37k
          ordered = 0;
5686
1.76k
        } else {
5687
1.76k
          ordered = 1;
5688
1.76k
        }
5689
3.14k
      } else {
5690
2.78k
        stseq = (struct sctp_strseq *)sctp_m_getptr(m, offset,
5691
2.78k
                      sizeof(struct sctp_strseq),
5692
2.78k
                      (uint8_t *)&strseqbuf);
5693
2.78k
        offset += sizeof(struct sctp_strseq);
5694
2.78k
        if (stseq == NULL) {
5695
0
          break;
5696
0
        }
5697
2.78k
        sid = ntohs(stseq->sid);
5698
2.78k
        mid = (uint32_t)ntohs(stseq->ssn);
5699
2.78k
        ordered = 1;
5700
2.78k
      }
5701
      /* Convert */
5702
5703
      /* now process */
5704
5705
      /*
5706
       * Ok we now look for the stream/seq on the read queue
5707
       * where its not all delivered. If we find it we transmute the
5708
       * read entry into a PDI_ABORTED.
5709
       */
5710
5.93k
      if (sid >= asoc->streamincnt) {
5711
        /* screwed up streams, stop!  */
5712
187
        break;
5713
187
      }
5714
5.74k
      if ((asoc->str_of_pdapi == sid) &&
5715
5.74k
          (asoc->ssn_of_pdapi == mid)) {
5716
        /* If this is the one we were partially delivering
5717
         * now then we no longer are. Note this will change
5718
         * with the reassembly re-write.
5719
         */
5720
1.38k
        asoc->fragmented_delivery_inprogress = 0;
5721
1.38k
      }
5722
5.74k
      strm = &asoc->strmin[sid];
5723
5.74k
      if (ordered) {
5724
4.39k
        TAILQ_FOREACH_SAFE(control, &strm->inqueue, next_instrm, ncontrol) {
5725
0
          if (SCTP_MID_GE(asoc->idata_supported, mid, control->mid)) {
5726
0
            sctp_flush_reassm_for_str_seq(stcb, asoc, strm, control, ordered, new_cum_tsn);
5727
0
          }
5728
0
        }
5729
4.39k
      } else {
5730
1.35k
        if (asoc->idata_supported) {
5731
1.35k
          TAILQ_FOREACH_SAFE(control, &strm->uno_inqueue, next_instrm, ncontrol) {
5732
0
            if (SCTP_MID_GE(asoc->idata_supported, mid, control->mid)) {
5733
0
              sctp_flush_reassm_for_str_seq(stcb, asoc, strm, control, ordered, new_cum_tsn);
5734
0
            }
5735
0
          }
5736
1.35k
        } else {
5737
0
          if (!TAILQ_EMPTY(&strm->uno_inqueue)) {
5738
0
            sctp_flush_reassm_for_str_seq(stcb, asoc, strm, TAILQ_FIRST(&strm->uno_inqueue), ordered, new_cum_tsn);
5739
0
          }
5740
0
        }
5741
1.35k
      }
5742
7.02k
      TAILQ_FOREACH(control, &stcb->sctp_ep->read_queue, next) {
5743
7.02k
        if ((control->sinfo_stream == sid) &&
5744
7.02k
            (SCTP_MID_EQ(asoc->idata_supported, control->mid, mid))) {
5745
1.02k
          str_seq = (sid << 16) | (0x0000ffff & mid);
5746
1.02k
          control->pdapi_aborted = 1;
5747
1.02k
          sv = stcb->asoc.control_pdapi;
5748
1.02k
          control->end_added = 1;
5749
1.02k
          if (control->on_strm_q == SCTP_ON_ORDERED) {
5750
0
            TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
5751
0
            if (asoc->size_on_all_streams >= control->length) {
5752
0
              asoc->size_on_all_streams -= control->length;
5753
0
            } else {
5754
0
#ifdef INVARIANTS
5755
0
              panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
5756
#else
5757
              asoc->size_on_all_streams = 0;
5758
#endif
5759
0
            }
5760
0
            sctp_ucount_decr(asoc->cnt_on_all_streams);
5761
1.02k
          } else if (control->on_strm_q == SCTP_ON_UNORDERED) {
5762
0
            TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
5763
0
#ifdef INVARIANTS
5764
1.02k
          } else if (control->on_strm_q) {
5765
0
            panic("strm: %p ctl: %p unknown %d",
5766
0
                  strm, control, control->on_strm_q);
5767
0
#endif
5768
0
          }
5769
1.02k
          control->on_strm_q = 0;
5770
1.02k
          stcb->asoc.control_pdapi = control;
5771
1.02k
          sctp_ulp_notify(SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION,
5772
1.02k
                          stcb,
5773
1.02k
                          SCTP_PARTIAL_DELIVERY_ABORTED,
5774
1.02k
                          (void *)&str_seq,
5775
1.02k
              SCTP_SO_NOT_LOCKED);
5776
1.02k
          stcb->asoc.control_pdapi = sv;
5777
1.02k
          break;
5778
6.00k
        } else if ((control->sinfo_stream == sid) &&
5779
6.00k
             SCTP_MID_GT(asoc->idata_supported, control->mid, mid)) {
5780
          /* We are past our victim SSN */
5781
151
          break;
5782
151
        }
5783
7.02k
      }
5784
5.74k
      if (SCTP_MID_GT(asoc->idata_supported, mid, strm->last_mid_delivered)) {
5785
        /* Update the sequence number */
5786
1.27k
        strm->last_mid_delivered = mid;
5787
1.27k
      }
5788
      /* now kick the stream the new way */
5789
      /*sa_ignore NO_NULL_CHK*/
5790
5.74k
      sctp_kick_prsctp_reorder_queue(stcb, strm);
5791
5.74k
    }
5792
790
    SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5793
790
  }
5794
  /*
5795
   * Now slide thing forward.
5796
   */
5797
1.34k
  sctp_slide_mapping_arrays(stcb);
5798
1.34k
}