Coverage Report

Created: 2026-02-14 06:43

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
/src/usrsctp/usrsctplib/netinet/sctp_indata.c
Line
Count
Source
1
/*-
2
 * SPDX-License-Identifier: BSD-3-Clause
3
 *
4
 * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
5
 * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
6
 * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
7
 *
8
 * Redistribution and use in source and binary forms, with or without
9
 * modification, are permitted provided that the following conditions are met:
10
 *
11
 * a) Redistributions of source code must retain the above copyright notice,
12
 *    this list of conditions and the following disclaimer.
13
 *
14
 * b) Redistributions in binary form must reproduce the above copyright
15
 *    notice, this list of conditions and the following disclaimer in
16
 *    the documentation and/or other materials provided with the distribution.
17
 *
18
 * c) Neither the name of Cisco Systems, Inc. nor the names of its
19
 *    contributors may be used to endorse or promote products derived
20
 *    from this software without specific prior written permission.
21
 *
22
 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23
 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
24
 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25
 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26
 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27
 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28
 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29
 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30
 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31
 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
32
 * THE POSSIBILITY OF SUCH DAMAGE.
33
 */
34
35
#include <netinet/sctp_os.h>
36
#if defined(__FreeBSD__) && !defined(__Userspace__)
37
#include <sys/proc.h>
38
#endif
39
#include <netinet/sctp_var.h>
40
#include <netinet/sctp_sysctl.h>
41
#include <netinet/sctp_header.h>
42
#include <netinet/sctp_pcb.h>
43
#include <netinet/sctputil.h>
44
#include <netinet/sctp_output.h>
45
#include <netinet/sctp_uio.h>
46
#include <netinet/sctp_auth.h>
47
#include <netinet/sctp_timer.h>
48
#include <netinet/sctp_asconf.h>
49
#include <netinet/sctp_indata.h>
50
#include <netinet/sctp_bsd_addr.h>
51
#include <netinet/sctp_input.h>
52
#include <netinet/sctp_crc32.h>
53
#if defined(__FreeBSD__) && !defined(__Userspace__)
54
#include <netinet/sctp_lock_bsd.h>
55
#endif
56
#if defined(_WIN32) && defined(__MINGW32__)
57
#include <minmax.h>
58
#endif
59
/*
60
 * NOTES: On the outbound side of things I need to check the sack timer to
61
 * see if I should generate a sack into the chunk queue (if I have data to
62
 * send that is and will be sending it .. for bundling.
63
 *
64
 * The callback in sctp_usrreq.c will get called when the socket is read from.
65
 * This will cause sctp_service_queues() to get called on the top entry in
66
 * the list.
67
 */
68
static uint32_t
69
sctp_add_chk_to_control(struct sctp_queued_to_read *control,
70
      struct sctp_stream_in *strm,
71
      struct sctp_tcb *stcb,
72
      struct sctp_association *asoc,
73
      struct sctp_tmit_chunk *chk, int hold_rlock);
74
75
void
76
sctp_set_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
77
12.9k
{
78
12.9k
  asoc->my_rwnd = sctp_calc_rwnd(stcb, asoc);
79
12.9k
}
80
81
/* Calculate what the rwnd would be */
82
uint32_t
83
sctp_calc_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
84
17.8k
{
85
17.8k
  uint32_t calc = 0;
86
87
  /*
88
   * This is really set wrong with respect to a 1-2-m socket. Since
89
   * the sb_cc is the count that everyone as put up. When we re-write
90
   * sctp_soreceive then we will fix this so that ONLY this
91
   * associations data is taken into account.
92
   */
93
17.8k
  if (stcb->sctp_socket == NULL) {
94
0
    return (calc);
95
0
  }
96
97
17.8k
  KASSERT(asoc->cnt_on_reasm_queue > 0 || asoc->size_on_reasm_queue == 0,
98
17.8k
          ("size_on_reasm_queue is %u", asoc->size_on_reasm_queue));
99
17.8k
  KASSERT(asoc->cnt_on_all_streams > 0 || asoc->size_on_all_streams == 0,
100
17.8k
          ("size_on_all_streams is %u", asoc->size_on_all_streams));
101
17.8k
  if (stcb->asoc.sb_cc == 0 &&
102
8.18k
      asoc->cnt_on_reasm_queue == 0 &&
103
7.92k
      asoc->cnt_on_all_streams == 0) {
104
    /* Full rwnd granted */
105
7.84k
    calc = max(SCTP_SB_LIMIT_RCV(stcb->sctp_socket), SCTP_MINIMAL_RWND);
106
7.84k
    return (calc);
107
7.84k
  }
108
  /* get actual space */
109
10.0k
  calc = (uint32_t) sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv);
110
  /*
111
   * take out what has NOT been put on socket queue and we yet hold
112
   * for putting up.
113
   */
114
10.0k
  calc = sctp_sbspace_sub(calc, (uint32_t)(asoc->size_on_reasm_queue +
115
10.0k
                                           asoc->cnt_on_reasm_queue * MSIZE));
116
10.0k
  calc = sctp_sbspace_sub(calc, (uint32_t)(asoc->size_on_all_streams +
117
10.0k
                                           asoc->cnt_on_all_streams * MSIZE));
118
10.0k
  if (calc == 0) {
119
    /* out of space */
120
290
    return (calc);
121
290
  }
122
123
  /* what is the overhead of all these rwnd's */
124
9.75k
  calc = sctp_sbspace_sub(calc, stcb->asoc.my_rwnd_control_len);
125
  /* If the window gets too small due to ctrl-stuff, reduce it
126
   * to 1, even it is 0. SWS engaged
127
   */
128
9.75k
  if (calc < stcb->asoc.my_rwnd_control_len) {
129
3.96k
    calc = 1;
130
3.96k
  }
131
9.75k
  return (calc);
132
10.0k
}
133
134
/*
135
 * Build out our readq entry based on the incoming packet.
136
 */
137
struct sctp_queued_to_read *
138
sctp_build_readq_entry(struct sctp_tcb *stcb,
139
    struct sctp_nets *net,
140
    uint32_t tsn, uint32_t ppid,
141
    uint32_t context, uint16_t sid,
142
    uint32_t mid, uint8_t flags,
143
    struct mbuf *dm)
144
438k
{
145
438k
  struct sctp_queued_to_read *read_queue_e = NULL;
146
147
438k
  sctp_alloc_a_readq(stcb, read_queue_e);
148
438k
  if (read_queue_e == NULL) {
149
0
    goto failed_build;
150
0
  }
151
438k
  memset(read_queue_e, 0, sizeof(struct sctp_queued_to_read));
152
438k
  read_queue_e->sinfo_stream = sid;
153
438k
  read_queue_e->sinfo_flags = (flags << 8);
154
438k
  read_queue_e->sinfo_ppid = ppid;
155
438k
  read_queue_e->sinfo_context = context;
156
438k
  read_queue_e->sinfo_tsn = tsn;
157
438k
  read_queue_e->sinfo_cumtsn = tsn;
158
438k
  read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb);
159
438k
  read_queue_e->mid = mid;
160
438k
  read_queue_e->top_fsn = read_queue_e->fsn_included = 0xffffffff;
161
438k
  TAILQ_INIT(&read_queue_e->reasm);
162
438k
  read_queue_e->whoFrom = net;
163
438k
  atomic_add_int(&net->ref_count, 1);
164
438k
  read_queue_e->data = dm;
165
438k
  read_queue_e->stcb = stcb;
166
438k
  read_queue_e->port_from = stcb->rport;
167
438k
  if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
168
0
    read_queue_e->do_not_ref_stcb = 1;
169
0
  }
170
438k
failed_build:
171
438k
  return (read_queue_e);
172
438k
}
173
174
struct mbuf *
175
sctp_build_ctl_nchunk(struct sctp_inpcb *inp, struct sctp_sndrcvinfo *sinfo)
176
0
{
177
0
  struct sctp_extrcvinfo *seinfo;
178
0
  struct sctp_sndrcvinfo *outinfo;
179
0
  struct sctp_rcvinfo *rcvinfo;
180
0
  struct sctp_nxtinfo *nxtinfo;
181
#if defined(_WIN32)
182
  WSACMSGHDR *cmh;
183
#else
184
0
  struct cmsghdr *cmh;
185
0
#endif
186
0
  struct mbuf *ret;
187
0
  int len;
188
0
  int use_extended;
189
0
  int provide_nxt;
190
191
0
  if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) &&
192
0
      sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) &&
193
0
      sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) {
194
    /* user does not want any ancillary data */
195
0
    return (NULL);
196
0
  }
197
198
0
  len = 0;
199
0
  if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) {
200
0
    len += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
201
0
  }
202
0
  seinfo = (struct sctp_extrcvinfo *)sinfo;
203
0
  if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO) &&
204
0
      (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_AVAIL)) {
205
0
    provide_nxt = 1;
206
0
    len += CMSG_SPACE(sizeof(struct sctp_nxtinfo));
207
0
  } else {
208
0
    provide_nxt = 0;
209
0
  }
210
0
  if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
211
0
    if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) {
212
0
      use_extended = 1;
213
0
      len += CMSG_SPACE(sizeof(struct sctp_extrcvinfo));
214
0
    } else {
215
0
      use_extended = 0;
216
0
      len += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo));
217
0
    }
218
0
  } else {
219
0
    use_extended = 0;
220
0
  }
221
222
0
  ret = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
223
0
  if (ret == NULL) {
224
    /* No space */
225
0
    return (ret);
226
0
  }
227
0
  SCTP_BUF_LEN(ret) = 0;
228
229
  /* We need a CMSG header followed by the struct */
230
#if defined(_WIN32)
231
  cmh = mtod(ret, WSACMSGHDR *);
232
#else
233
0
  cmh = mtod(ret, struct cmsghdr *);
234
0
#endif
235
  /*
236
   * Make sure that there is no un-initialized padding between
237
   * the cmsg header and cmsg data and after the cmsg data.
238
   */
239
0
  memset(cmh, 0, len);
240
0
  if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) {
241
0
    cmh->cmsg_level = IPPROTO_SCTP;
242
0
    cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_rcvinfo));
243
0
    cmh->cmsg_type = SCTP_RCVINFO;
244
0
    rcvinfo = (struct sctp_rcvinfo *)CMSG_DATA(cmh);
245
0
    rcvinfo->rcv_sid = sinfo->sinfo_stream;
246
0
    rcvinfo->rcv_ssn = sinfo->sinfo_ssn;
247
0
    rcvinfo->rcv_flags = sinfo->sinfo_flags;
248
0
    rcvinfo->rcv_ppid = sinfo->sinfo_ppid;
249
0
    rcvinfo->rcv_tsn = sinfo->sinfo_tsn;
250
0
    rcvinfo->rcv_cumtsn = sinfo->sinfo_cumtsn;
251
0
    rcvinfo->rcv_context = sinfo->sinfo_context;
252
0
    rcvinfo->rcv_assoc_id = sinfo->sinfo_assoc_id;
253
#if defined(_WIN32)
254
    cmh = (WSACMSGHDR *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_rcvinfo)));
255
#else
256
0
    cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_rcvinfo)));
257
0
#endif
258
0
    SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
259
0
  }
260
0
  if (provide_nxt) {
261
0
    cmh->cmsg_level = IPPROTO_SCTP;
262
0
    cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_nxtinfo));
263
0
    cmh->cmsg_type = SCTP_NXTINFO;
264
0
    nxtinfo = (struct sctp_nxtinfo *)CMSG_DATA(cmh);
265
0
    nxtinfo->nxt_sid = seinfo->serinfo_next_stream;
266
0
    nxtinfo->nxt_flags = 0;
267
0
    if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_IS_UNORDERED) {
268
0
      nxtinfo->nxt_flags |= SCTP_UNORDERED;
269
0
    }
270
0
    if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_IS_NOTIFICATION) {
271
0
      nxtinfo->nxt_flags |= SCTP_NOTIFICATION;
272
0
    }
273
0
    if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_ISCOMPLETE) {
274
0
      nxtinfo->nxt_flags |= SCTP_COMPLETE;
275
0
    }
276
0
    nxtinfo->nxt_ppid = seinfo->serinfo_next_ppid;
277
0
    nxtinfo->nxt_length = seinfo->serinfo_next_length;
278
0
    nxtinfo->nxt_assoc_id = seinfo->serinfo_next_aid;
279
#if defined(_WIN32)
280
    cmh = (WSACMSGHDR *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_nxtinfo)));
281
#else
282
0
    cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_nxtinfo)));
283
0
#endif
284
0
    SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_nxtinfo));
285
0
  }
286
0
  if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
287
0
    cmh->cmsg_level = IPPROTO_SCTP;
288
0
    outinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmh);
289
0
    if (use_extended) {
290
0
      cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_extrcvinfo));
291
0
      cmh->cmsg_type = SCTP_EXTRCV;
292
0
      memcpy(outinfo, sinfo, sizeof(struct sctp_extrcvinfo));
293
0
      SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_extrcvinfo));
294
0
    } else {
295
0
      cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
296
0
      cmh->cmsg_type = SCTP_SNDRCV;
297
0
      *outinfo = *sinfo;
298
0
      SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo));
299
0
    }
300
0
  }
301
0
  return (ret);
302
0
}
303
304
static void
305
sctp_mark_non_revokable(struct sctp_association *asoc, uint32_t tsn)
306
1.85k
{
307
1.85k
  uint32_t gap, i;
308
1.85k
  int in_r, in_nr;
309
310
1.85k
  if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
311
0
    return;
312
0
  }
313
1.85k
  if (SCTP_TSN_GE(asoc->cumulative_tsn, tsn)) {
314
    /*
315
     * This tsn is behind the cum ack and thus we don't
316
     * need to worry about it being moved from one to the other.
317
     */
318
8
    return;
319
8
  }
320
1.85k
  SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
321
1.85k
  in_r = SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap);
322
1.85k
  in_nr = SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap);
323
1.85k
  KASSERT(in_r || in_nr, ("%s: Things are really messed up now", __func__));
324
1.85k
  if (!in_nr) {
325
1.79k
    SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
326
1.79k
    if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
327
1.04k
      asoc->highest_tsn_inside_nr_map = tsn;
328
1.04k
    }
329
1.79k
  }
330
1.85k
  if (in_r) {
331
1.79k
    SCTP_UNSET_TSN_PRESENT(asoc->mapping_array, gap);
332
1.79k
    if (tsn == asoc->highest_tsn_inside_map) {
333
      /* We must back down to see what the new highest is. */
334
1.01M
      for (i = tsn - 1; SCTP_TSN_GE(i, asoc->mapping_array_base_tsn); i--) {
335
1.01M
        SCTP_CALC_TSN_TO_GAP(gap, i, asoc->mapping_array_base_tsn);
336
1.01M
        if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) {
337
138
          asoc->highest_tsn_inside_map = i;
338
138
          break;
339
138
        }
340
1.01M
      }
341
900
      if (!SCTP_TSN_GE(i, asoc->mapping_array_base_tsn)) {
342
762
        asoc->highest_tsn_inside_map = asoc->mapping_array_base_tsn - 1;
343
762
      }
344
900
    }
345
1.79k
  }
346
1.85k
}
347
348
static int
349
sctp_place_control_in_stream(struct sctp_stream_in *strm,
350
           struct sctp_association *asoc,
351
           struct sctp_queued_to_read *control)
352
2.77k
{
353
2.77k
  struct sctp_queued_to_read *at;
354
2.77k
  struct sctp_readhead *q;
355
2.77k
  uint8_t flags, unordered;
356
357
2.77k
  flags = (control->sinfo_flags >> 8);
358
2.77k
  unordered = flags & SCTP_DATA_UNORDERED;
359
2.77k
  if (unordered) {
360
1.14k
    q = &strm->uno_inqueue;
361
1.14k
    if (asoc->idata_supported == 0) {
362
495
      if (!TAILQ_EMPTY(q)) {
363
        /* Only one stream can be here in old style  -- abort */
364
4
        return (-1);
365
4
      }
366
495
      TAILQ_INSERT_TAIL(q, control, next_instrm);
367
491
      control->on_strm_q = SCTP_ON_UNORDERED;
368
491
      return (0);
369
495
    }
370
1.63k
  } else {
371
1.63k
    q = &strm->inqueue;
372
1.63k
  }
373
2.27k
  if ((flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
374
545
    control->end_added = 1;
375
545
    control->first_frag_seen = 1;
376
545
    control->last_frag_seen = 1;
377
545
  }
378
2.27k
  if (TAILQ_EMPTY(q)) {
379
    /* Empty queue */
380
1.21k
    TAILQ_INSERT_HEAD(q, control, next_instrm);
381
1.21k
    if (unordered) {
382
193
      control->on_strm_q = SCTP_ON_UNORDERED;
383
1.02k
    } else {
384
1.02k
      control->on_strm_q = SCTP_ON_ORDERED;
385
1.02k
    }
386
1.21k
    return (0);
387
1.21k
  } else {
388
2.94k
    TAILQ_FOREACH(at, q, next_instrm) {
389
2.94k
      if (SCTP_MID_GT(asoc->idata_supported, at->mid, control->mid)) {
390
        /*
391
         * one in queue is bigger than the
392
         * new one, insert before this one
393
         */
394
756
        TAILQ_INSERT_BEFORE(at, control, next_instrm);
395
756
        if (unordered) {
396
377
          control->on_strm_q = SCTP_ON_UNORDERED;
397
379
        } else {
398
379
          control->on_strm_q = SCTP_ON_ORDERED;
399
379
        }
400
756
        break;
401
2.18k
      } else if (SCTP_MID_EQ(asoc->idata_supported, at->mid, control->mid)) {
402
        /*
403
         * Gak, He sent me a duplicate msg
404
         * id number?? return -1 to abort.
405
         */
406
3
        return (-1);
407
2.18k
      } else {
408
2.18k
        if (TAILQ_NEXT(at, next_instrm) == NULL) {
409
          /*
410
           * We are at the end, insert
411
           * it after this one
412
           */
413
300
          if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
414
0
            sctp_log_strm_del(control, at,
415
0
                  SCTP_STR_LOG_FROM_INSERT_TL);
416
0
          }
417
300
          TAILQ_INSERT_AFTER(q, at, control, next_instrm);
418
300
          if (unordered) {
419
76
            control->on_strm_q = SCTP_ON_UNORDERED;
420
224
          } else {
421
224
            control->on_strm_q = SCTP_ON_ORDERED;
422
224
          }
423
300
          break;
424
300
        }
425
2.18k
      }
426
2.94k
    }
427
1.05k
  }
428
1.05k
  return (0);
429
2.27k
}
430
431
static void
432
sctp_abort_in_reasm(struct sctp_tcb *stcb,
433
                    struct sctp_queued_to_read *control,
434
                    struct sctp_tmit_chunk *chk,
435
                    int *abort_flag, int opspot)
436
197
{
437
197
  char msg[SCTP_DIAG_INFO_LEN];
438
197
  struct mbuf *oper;
439
440
197
  if (stcb->asoc.idata_supported) {
441
162
    SCTP_SNPRINTF(msg, sizeof(msg),
442
162
                  "Reass %x,CF:%x,TSN=%8.8x,SID=%4.4x,FSN=%8.8x,MID:%8.8x",
443
162
                  opspot,
444
162
                  control->fsn_included,
445
162
                  chk->rec.data.tsn,
446
162
                  chk->rec.data.sid,
447
162
                  chk->rec.data.fsn, chk->rec.data.mid);
448
162
  } else {
449
35
    SCTP_SNPRINTF(msg, sizeof(msg),
450
35
                  "Reass %x,CI:%x,TSN=%8.8x,SID=%4.4x,FSN=%4.4x,SSN:%4.4x",
451
35
                  opspot,
452
35
                  control->fsn_included,
453
35
                  chk->rec.data.tsn,
454
35
                  chk->rec.data.sid,
455
35
                  chk->rec.data.fsn,
456
35
                  (uint16_t)chk->rec.data.mid);
457
35
  }
458
197
  oper = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
459
197
  sctp_m_freem(chk->data);
460
197
  chk->data = NULL;
461
197
  sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
462
197
  stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_1;
463
197
  sctp_abort_an_association(stcb->sctp_ep, stcb, oper, false, SCTP_SO_NOT_LOCKED);
464
197
  *abort_flag = 1;
465
197
}
466
467
static void
468
sctp_clean_up_control(struct sctp_tcb *stcb, struct sctp_queued_to_read *control)
469
7
{
470
  /*
471
   * The control could not be placed and must be cleaned.
472
   */
473
7
  struct sctp_tmit_chunk *chk, *nchk;
474
7
  TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, nchk) {
475
0
    TAILQ_REMOVE(&control->reasm, chk, sctp_next);
476
0
    if (chk->data)
477
0
      sctp_m_freem(chk->data);
478
0
    chk->data = NULL;
479
0
    sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
480
0
  }
481
7
  sctp_free_remote_addr(control->whoFrom);
482
7
  if (control->data) {
483
7
    sctp_m_freem(control->data);
484
7
    control->data = NULL;
485
7
  }
486
7
  sctp_free_a_readq(stcb, control);
487
7
}
488
489
/*
490
 * Queue the chunk either right into the socket buffer if it is the next one
491
 * to go OR put it in the correct place in the delivery queue.  If we do
492
 * append to the so_buf, keep doing so until we are out of order as
493
 * long as the control's entered are non-fragmented.
494
 */
495
static void
496
sctp_queue_data_to_stream(struct sctp_tcb *stcb,
497
    struct sctp_association *asoc,
498
    struct sctp_queued_to_read *control, int *abort_flag, int *need_reasm)
499
784
{
500
  /*
501
   * FIX-ME maybe? What happens when the ssn wraps? If we are getting
502
   * all the data in one stream this could happen quite rapidly. One
503
   * could use the TSN to keep track of things, but this scheme breaks
504
   * down in the other type of stream usage that could occur. Send a
505
   * single msg to stream 0, send 4Billion messages to stream 1, now
506
   * send a message to stream 0. You have a situation where the TSN
507
   * has wrapped but not in the stream. Is this worth worrying about
508
   * or should we just change our queue sort at the bottom to be by
509
   * TSN.
510
   *
511
   * Could it also be legal for a peer to send ssn 1 with TSN 2 and ssn 2
512
   * with TSN 1? If the peer is doing some sort of funky TSN/SSN
513
   * assignment this could happen... and I don't see how this would be
514
   * a violation. So for now I am undecided an will leave the sort by
515
   * SSN alone. Maybe a hybrid approach is the answer
516
   *
517
   */
518
784
  struct sctp_queued_to_read *at;
519
784
  int queue_needed;
520
784
  uint32_t nxt_todel;
521
784
  struct mbuf *op_err;
522
784
  struct sctp_stream_in *strm;
523
784
  char msg[SCTP_DIAG_INFO_LEN];
524
525
784
  strm = &asoc->strmin[control->sinfo_stream];
526
784
  if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
527
0
    sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INTO_STRD);
528
0
  }
529
784
  if (SCTP_MID_GT((asoc->idata_supported), strm->last_mid_delivered, control->mid)) {
530
    /* The incoming sseq is behind where we last delivered? */
531
58
    SCTPDBG(SCTP_DEBUG_INDATA1, "Duplicate S-SEQ: %u delivered: %u from peer, Abort association\n",
532
58
      strm->last_mid_delivered, control->mid);
533
    /*
534
     * throw it in the stream so it gets cleaned up in
535
     * association destruction
536
     */
537
58
    TAILQ_INSERT_HEAD(&strm->inqueue, control, next_instrm);
538
58
    if (asoc->idata_supported) {
539
28
      SCTP_SNPRINTF(msg, sizeof(msg), "Delivered MID=%8.8x, got TSN=%8.8x, SID=%4.4x, MID=%8.8x",
540
28
                    strm->last_mid_delivered, control->sinfo_tsn,
541
28
                    control->sinfo_stream, control->mid);
542
30
    } else {
543
30
      SCTP_SNPRINTF(msg, sizeof(msg), "Delivered SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
544
30
                    (uint16_t)strm->last_mid_delivered,
545
30
                    control->sinfo_tsn,
546
30
                    control->sinfo_stream,
547
30
                    (uint16_t)control->mid);
548
30
    }
549
58
    op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
550
58
    stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_2;
551
58
    sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, false, SCTP_SO_NOT_LOCKED);
552
58
    *abort_flag = 1;
553
58
    return;
554
58
  }
555
726
  queue_needed = 1;
556
726
  asoc->size_on_all_streams += control->length;
557
726
  sctp_ucount_incr(asoc->cnt_on_all_streams);
558
726
  nxt_todel = strm->last_mid_delivered + 1;
559
726
  if (SCTP_MID_EQ(asoc->idata_supported, nxt_todel, control->mid)) {
560
#if defined(__APPLE__) && !defined(__Userspace__)
561
    struct socket *so;
562
563
    so = SCTP_INP_SO(stcb->sctp_ep);
564
    atomic_add_int(&stcb->asoc.refcnt, 1);
565
    SCTP_TCB_UNLOCK(stcb);
566
    SCTP_SOCKET_LOCK(so, 1);
567
    SCTP_TCB_LOCK(stcb);
568
    atomic_subtract_int(&stcb->asoc.refcnt, 1);
569
    if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
570
      SCTP_SOCKET_UNLOCK(so, 1);
571
      return;
572
    }
573
#endif
574
    /* can be delivered right away? */
575
147
    if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
576
0
      sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_IMMED_DEL);
577
0
    }
578
    /* EY it wont be queued if it could be delivered directly */
579
147
    queue_needed = 0;
580
147
    if (asoc->size_on_all_streams >= control->length) {
581
147
      asoc->size_on_all_streams -= control->length;
582
147
    } else {
583
0
#ifdef INVARIANTS
584
0
      panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
585
#else
586
      asoc->size_on_all_streams = 0;
587
#endif
588
0
    }
589
147
    sctp_ucount_decr(asoc->cnt_on_all_streams);
590
147
    strm->last_mid_delivered++;
591
147
    sctp_mark_non_revokable(asoc, control->sinfo_tsn);
592
147
    sctp_add_to_readq(stcb->sctp_ep, stcb,
593
147
                      control,
594
147
                      &stcb->sctp_socket->so_rcv, 1,
595
147
                      SCTP_READ_LOCK_NOT_HELD, SCTP_SO_LOCKED);
596
147
    TAILQ_FOREACH_SAFE(control, &strm->inqueue, next_instrm, at) {
597
      /* all delivered */
598
57
      nxt_todel = strm->last_mid_delivered + 1;
599
57
      if (SCTP_MID_EQ(asoc->idata_supported, nxt_todel, control->mid) &&
600
12
          (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG)) {
601
7
        if (control->on_strm_q == SCTP_ON_ORDERED) {
602
7
          TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
603
7
          if (asoc->size_on_all_streams >= control->length) {
604
7
            asoc->size_on_all_streams -= control->length;
605
7
          } else {
606
0
#ifdef INVARIANTS
607
0
            panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
608
#else
609
            asoc->size_on_all_streams = 0;
610
#endif
611
0
          }
612
7
          sctp_ucount_decr(asoc->cnt_on_all_streams);
613
7
#ifdef INVARIANTS
614
7
        } else {
615
0
          panic("Huh control: %p is on_strm_q: %d",
616
0
                control, control->on_strm_q);
617
0
#endif
618
0
        }
619
7
        control->on_strm_q = 0;
620
7
        strm->last_mid_delivered++;
621
        /*
622
         * We ignore the return of deliver_data here
623
         * since we always can hold the chunk on the
624
         * d-queue. And we have a finite number that
625
         * can be delivered from the strq.
626
         */
627
7
        if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
628
0
          sctp_log_strm_del(control, NULL,
629
0
                SCTP_STR_LOG_FROM_IMMED_DEL);
630
0
        }
631
7
        sctp_mark_non_revokable(asoc, control->sinfo_tsn);
632
7
        sctp_add_to_readq(stcb->sctp_ep, stcb,
633
7
                          control,
634
7
                          &stcb->sctp_socket->so_rcv, 1,
635
7
                          SCTP_READ_LOCK_NOT_HELD,
636
7
                          SCTP_SO_LOCKED);
637
7
        continue;
638
50
      } else if (SCTP_MID_EQ(asoc->idata_supported, nxt_todel, control->mid)) {
639
5
        *need_reasm = 1;
640
5
      }
641
50
      break;
642
57
    }
643
#if defined(__APPLE__) && !defined(__Userspace__)
644
    SCTP_SOCKET_UNLOCK(so, 1);
645
#endif
646
147
  }
647
726
  if (queue_needed) {
648
    /*
649
     * Ok, we did not deliver this guy, find the correct place
650
     * to put it on the queue.
651
     */
652
579
    if (sctp_place_control_in_stream(strm, asoc, control)) {
653
7
      SCTP_SNPRINTF(msg, sizeof(msg),
654
7
                    "Queue to str MID: %u duplicate", control->mid);
655
7
      sctp_clean_up_control(stcb, control);
656
7
      op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
657
7
      stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_3;
658
7
      sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, false, SCTP_SO_NOT_LOCKED);
659
7
      *abort_flag = 1;
660
7
    }
661
579
  }
662
726
}
663
664
static void
665
sctp_setup_tail_pointer(struct sctp_queued_to_read *control)
666
1.36k
{
667
1.36k
  struct mbuf *m, *prev = NULL;
668
1.36k
  struct sctp_tcb *stcb;
669
670
1.36k
  stcb = control->stcb;
671
1.36k
  control->held_length = 0;
672
1.36k
  control->length = 0;
673
1.36k
  m = control->data;
674
3.30k
  while (m) {
675
1.93k
    if (SCTP_BUF_LEN(m) == 0) {
676
      /* Skip mbufs with NO length */
677
378
      if (prev == NULL) {
678
        /* First one */
679
378
        control->data = sctp_m_free(m);
680
378
        m = control->data;
681
378
      } else {
682
0
        SCTP_BUF_NEXT(prev) = sctp_m_free(m);
683
0
        m = SCTP_BUF_NEXT(prev);
684
0
      }
685
378
      if (m == NULL) {
686
0
        control->tail_mbuf = prev;
687
0
      }
688
378
      continue;
689
378
    }
690
1.55k
    prev = m;
691
1.55k
    atomic_add_int(&control->length, SCTP_BUF_LEN(m));
692
1.55k
    if (control->on_read_q) {
693
      /*
694
       * On read queue so we must increment the
695
       * SB stuff, we assume caller has done any locks of SB.
696
       */
697
0
      sctp_sballoc(stcb, &stcb->sctp_socket->so_rcv, m);
698
0
    }
699
1.55k
    m = SCTP_BUF_NEXT(m);
700
1.55k
  }
701
1.36k
  if (prev) {
702
1.36k
    control->tail_mbuf = prev;
703
1.36k
  }
704
1.36k
}
705
706
static void
707
sctp_add_to_tail_pointer(struct sctp_queued_to_read *control, struct mbuf *m, uint32_t *added)
708
363
{
709
363
  struct mbuf *prev=NULL;
710
363
  struct sctp_tcb *stcb;
711
712
363
  stcb = control->stcb;
713
363
  if (stcb == NULL) {
714
0
#ifdef INVARIANTS
715
0
    panic("Control broken");
716
#else
717
    return;
718
#endif
719
0
  }
720
363
  if (control->tail_mbuf == NULL) {
721
    /* TSNH */
722
0
    sctp_m_freem(control->data);
723
0
    control->data = m;
724
0
    sctp_setup_tail_pointer(control);
725
0
    return;
726
0
  }
727
363
  control->tail_mbuf->m_next = m;
728
853
  while (m) {
729
490
    if (SCTP_BUF_LEN(m) == 0) {
730
      /* Skip mbufs with NO length */
731
88
      if (prev == NULL) {
732
        /* First one */
733
88
        control->tail_mbuf->m_next = sctp_m_free(m);
734
88
        m = control->tail_mbuf->m_next;
735
88
      } else {
736
0
        SCTP_BUF_NEXT(prev) = sctp_m_free(m);
737
0
        m = SCTP_BUF_NEXT(prev);
738
0
      }
739
88
      if (m == NULL) {
740
0
        control->tail_mbuf = prev;
741
0
      }
742
88
      continue;
743
88
    }
744
402
    prev = m;
745
402
    if (control->on_read_q) {
746
      /*
747
       * On read queue so we must increment the
748
       * SB stuff, we assume caller has done any locks of SB.
749
       */
750
0
      sctp_sballoc(stcb, &stcb->sctp_socket->so_rcv, m);
751
0
    }
752
402
    *added += SCTP_BUF_LEN(m);
753
402
    atomic_add_int(&control->length, SCTP_BUF_LEN(m));
754
402
    m = SCTP_BUF_NEXT(m);
755
402
  }
756
363
  if (prev) {
757
363
    control->tail_mbuf = prev;
758
363
  }
759
363
}
760
761
static void
762
sctp_build_readq_entry_from_ctl(struct sctp_queued_to_read *nc, struct sctp_queued_to_read *control)
763
113
{
764
113
  memset(nc, 0, sizeof(struct sctp_queued_to_read));
765
113
  nc->sinfo_stream = control->sinfo_stream;
766
113
  nc->mid = control->mid;
767
113
  TAILQ_INIT(&nc->reasm);
768
113
  nc->top_fsn = control->top_fsn;
769
113
  nc->mid = control->mid;
770
113
  nc->sinfo_flags = control->sinfo_flags;
771
113
  nc->sinfo_ppid = control->sinfo_ppid;
772
113
  nc->sinfo_context = control->sinfo_context;
773
113
  nc->fsn_included = 0xffffffff;
774
113
  nc->sinfo_tsn = control->sinfo_tsn;
775
113
  nc->sinfo_cumtsn = control->sinfo_cumtsn;
776
113
  nc->sinfo_assoc_id = control->sinfo_assoc_id;
777
113
  nc->whoFrom = control->whoFrom;
778
113
  atomic_add_int(&nc->whoFrom->ref_count, 1);
779
113
  nc->stcb = control->stcb;
780
113
  nc->port_from = control->port_from;
781
113
  nc->do_not_ref_stcb = control->do_not_ref_stcb;
782
113
}
783
784
static int
785
sctp_handle_old_unordered_data(struct sctp_tcb *stcb,
786
                               struct sctp_association *asoc,
787
                               struct sctp_stream_in *strm,
788
                               struct sctp_queued_to_read *control,
789
                               uint32_t pd_point,
790
                               int inp_read_lock_held)
791
1.56k
{
792
  /* Special handling for the old un-ordered data chunk.
793
   * All the chunks/TSN's go to mid 0. So
794
   * we have to do the old style watching to see
795
   * if we have it all. If you return one, no other
796
   * control entries on the un-ordered queue will
797
   * be looked at. In theory there should be no others
798
   * entries in reality, unless the guy is sending both
799
   * unordered NDATA and unordered DATA...
800
   */
801
1.56k
  struct sctp_tmit_chunk *chk, *lchk, *tchk;
802
1.56k
  uint32_t fsn;
803
1.56k
  struct sctp_queued_to_read *nc;
804
1.56k
  int cnt_added;
805
806
1.56k
  if (control->first_frag_seen == 0) {
807
    /* Nothing we can do, we have not seen the first piece yet */
808
665
    return (1);
809
665
  }
810
  /* Collapse any we can */
811
898
  cnt_added = 0;
812
966
restart:
813
966
  fsn = control->fsn_included + 1;
814
  /* Now what can we add? */
815
966
  TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, lchk) {
816
775
    if (chk->rec.data.fsn == fsn) {
817
      /* Ok lets add it */
818
301
      sctp_alloc_a_readq(stcb, nc);
819
301
      if (nc == NULL) {
820
0
        break;
821
0
      }
822
301
      memset(nc, 0, sizeof(struct sctp_queued_to_read));
823
301
      TAILQ_REMOVE(&control->reasm, chk, sctp_next);
824
301
      sctp_add_chk_to_control(control, strm, stcb, asoc, chk, inp_read_lock_held);
825
301
      fsn++;
826
301
      cnt_added++;
827
301
      chk = NULL;
828
301
      if (control->end_added) {
829
        /* We are done */
830
142
        if (!TAILQ_EMPTY(&control->reasm)) {
831
          /*
832
           * Ok we have to move anything left on
833
           * the control queue to a new control.
834
           */
835
113
          sctp_build_readq_entry_from_ctl(nc, control);
836
113
          tchk = TAILQ_FIRST(&control->reasm);
837
113
          if (tchk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
838
99
            TAILQ_REMOVE(&control->reasm, tchk, sctp_next);
839
99
            if (asoc->size_on_reasm_queue >= tchk->send_size) {
840
99
              asoc->size_on_reasm_queue -= tchk->send_size;
841
99
            } else {
842
0
#ifdef INVARIANTS
843
0
            panic("size_on_reasm_queue = %u smaller than chunk length %u", asoc->size_on_reasm_queue, tchk->send_size);
844
#else
845
            asoc->size_on_reasm_queue = 0;
846
#endif
847
0
            }
848
99
            sctp_ucount_decr(asoc->cnt_on_reasm_queue);
849
99
            nc->first_frag_seen = 1;
850
99
            nc->fsn_included = tchk->rec.data.fsn;
851
99
            nc->data = tchk->data;
852
99
            nc->sinfo_ppid = tchk->rec.data.ppid;
853
99
            nc->sinfo_tsn = tchk->rec.data.tsn;
854
99
            sctp_mark_non_revokable(asoc, tchk->rec.data.tsn);
855
99
            tchk->data = NULL;
856
99
            sctp_free_a_chunk(stcb, tchk, SCTP_SO_NOT_LOCKED);
857
99
            sctp_setup_tail_pointer(nc);
858
99
            tchk = TAILQ_FIRST(&control->reasm);
859
99
          }
860
          /* Spin the rest onto the queue */
861
453
          while (tchk) {
862
340
            TAILQ_REMOVE(&control->reasm, tchk, sctp_next);
863
340
            TAILQ_INSERT_TAIL(&nc->reasm, tchk, sctp_next);
864
340
            tchk = TAILQ_FIRST(&control->reasm);
865
340
          }
866
          /* Now lets add it to the queue after removing control */
867
113
          TAILQ_INSERT_TAIL(&strm->uno_inqueue, nc, next_instrm);
868
113
          nc->on_strm_q = SCTP_ON_UNORDERED;
869
113
          if (control->on_strm_q) {
870
113
            TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
871
113
            control->on_strm_q = 0;
872
113
          }
873
113
        }
874
142
        if (control->pdapi_started) {
875
0
          strm->pd_api_started = 0;
876
0
          control->pdapi_started = 0;
877
0
        }
878
142
        if (control->on_strm_q) {
879
29
          TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
880
29
          control->on_strm_q = 0;
881
29
          SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
882
29
        }
883
142
        if (control->on_read_q == 0) {
884
142
          sctp_add_to_readq(stcb->sctp_ep, stcb, control,
885
142
                &stcb->sctp_socket->so_rcv, control->end_added,
886
142
                inp_read_lock_held, SCTP_SO_NOT_LOCKED);
887
142
#if defined(__Userspace__)
888
142
        } else {
889
0
          sctp_invoke_recv_callback(stcb->sctp_ep, stcb, control, inp_read_lock_held);
890
0
#endif
891
0
        }
892
142
        sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
893
142
        if ((nc->first_frag_seen) && !TAILQ_EMPTY(&nc->reasm)) {
894
          /* Switch to the new guy and continue */
895
68
          control = nc;
896
68
          goto restart;
897
74
        } else {
898
74
          if (nc->on_strm_q == 0) {
899
29
            sctp_free_a_readq(stcb, nc);
900
29
          }
901
74
        }
902
74
        return (1);
903
159
      } else {
904
159
        sctp_free_a_readq(stcb, nc);
905
159
      }
906
474
    } else {
907
      /* Can't add more */
908
474
      break;
909
474
    }
910
775
  }
911
824
  if (cnt_added && strm->pd_api_started) {
912
0
#if defined(__Userspace__)
913
0
    sctp_invoke_recv_callback(stcb->sctp_ep, stcb, control, inp_read_lock_held);
914
0
#endif
915
0
    sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
916
0
  }
917
824
  if ((control->length > pd_point) && (strm->pd_api_started == 0)) {
918
0
    strm->pd_api_started = 1;
919
0
    control->pdapi_started = 1;
920
0
    sctp_add_to_readq(stcb->sctp_ep, stcb, control,
921
0
                      &stcb->sctp_socket->so_rcv, control->end_added,
922
0
                      inp_read_lock_held, SCTP_SO_NOT_LOCKED);
923
0
    sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
924
0
    return (0);
925
824
  } else {
926
824
    return (1);
927
824
  }
928
824
}
929
930
static void
931
sctp_inject_old_unordered_data(struct sctp_tcb *stcb,
932
                               struct sctp_association *asoc,
933
                               struct sctp_queued_to_read *control,
934
                               struct sctp_tmit_chunk *chk,
935
                               int *abort_flag)
936
1.49k
{
937
1.49k
  struct sctp_tmit_chunk *at;
938
1.49k
  int inserted;
939
  /*
940
   * Here we need to place the chunk into the control structure
941
   * sorted in the correct order.
942
   */
943
1.49k
  if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
944
    /* Its the very first one. */
945
901
    SCTPDBG(SCTP_DEBUG_XXX,
946
901
      "chunk is a first fsn: %u becomes fsn_included\n",
947
901
      chk->rec.data.fsn);
948
901
    at = TAILQ_FIRST(&control->reasm);
949
901
    if (at && SCTP_TSN_GT(chk->rec.data.fsn, at->rec.data.fsn)) {
950
      /*
951
       * The first chunk in the reassembly is
952
       * a smaller TSN than this one, even though
953
       * this has a first, it must be from a subsequent
954
       * msg.
955
       */
956
308
      goto place_chunk;
957
308
    }
958
593
    if (control->first_frag_seen) {
959
      /*
960
       * In old un-ordered we can reassembly on
961
       * one control multiple messages. As long
962
       * as the next FIRST is greater then the old
963
       * first (TSN i.e. FSN wise)
964
       */
965
259
      struct mbuf *tdata;
966
259
      uint32_t tmp;
967
968
259
      if (SCTP_TSN_GT(chk->rec.data.fsn, control->fsn_included)) {
969
        /* Easy way the start of a new guy beyond the lowest */
970
152
        goto place_chunk;
971
152
      }
972
107
      if ((chk->rec.data.fsn == control->fsn_included) ||
973
107
          (control->pdapi_started)) {
974
        /*
975
         * Ok this should not happen, if it does
976
         * we started the pd-api on the higher TSN (since
977
         * the equals part is a TSN failure it must be that).
978
         *
979
         * We are completely hosed in that case since I have
980
         * no way to recover. This really will only happen
981
         * if we can get more TSN's higher before the pd-api-point.
982
         */
983
0
        sctp_abort_in_reasm(stcb, control, chk,
984
0
                abort_flag,
985
0
                SCTP_FROM_SCTP_INDATA + SCTP_LOC_4);
986
987
0
        return;
988
0
      }
989
      /*
990
       * Ok we have two firsts and the one we just got
991
       * is smaller than the one we previously placed.. yuck!
992
       * We must swap them out.
993
       */
994
      /* swap the mbufs */
995
107
      tdata = control->data;
996
107
      control->data = chk->data;
997
107
      chk->data = tdata;
998
      /* Save the lengths */
999
107
      chk->send_size = control->length;
1000
      /* Recompute length of control and tail pointer */
1001
107
      sctp_setup_tail_pointer(control);
1002
      /* Fix the FSN included */
1003
107
      tmp = control->fsn_included;
1004
107
      control->fsn_included = chk->rec.data.fsn;
1005
107
      chk->rec.data.fsn = tmp;
1006
      /* Fix the TSN included */
1007
107
      tmp = control->sinfo_tsn;
1008
107
      control->sinfo_tsn = chk->rec.data.tsn;
1009
107
      chk->rec.data.tsn = tmp;
1010
      /* Fix the PPID included */
1011
107
      tmp = control->sinfo_ppid;
1012
107
      control->sinfo_ppid = chk->rec.data.ppid;
1013
107
      chk->rec.data.ppid = tmp;
1014
      /* Fix tail pointer */
1015
107
      goto place_chunk;
1016
107
    }
1017
334
    control->first_frag_seen = 1;
1018
334
    control->fsn_included = chk->rec.data.fsn;
1019
334
    control->top_fsn = chk->rec.data.fsn;
1020
334
    control->sinfo_tsn = chk->rec.data.tsn;
1021
334
    control->sinfo_ppid = chk->rec.data.ppid;
1022
334
    control->data = chk->data;
1023
334
    sctp_mark_non_revokable(asoc, chk->rec.data.tsn);
1024
334
    chk->data = NULL;
1025
334
    sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
1026
334
    sctp_setup_tail_pointer(control);
1027
334
    return;
1028
334
  }
1029
1.16k
place_chunk:
1030
1.16k
  inserted = 0;
1031
3.30k
  TAILQ_FOREACH(at, &control->reasm, sctp_next) {
1032
3.30k
    if (SCTP_TSN_GT(at->rec.data.fsn, chk->rec.data.fsn)) {
1033
      /*
1034
       * This one in queue is bigger than the new one, insert
1035
       * the new one before at.
1036
       */
1037
615
      asoc->size_on_reasm_queue += chk->send_size;
1038
615
      sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1039
615
      inserted = 1;
1040
615
      TAILQ_INSERT_BEFORE(at, chk, sctp_next);
1041
615
      break;
1042
2.69k
    } else if (at->rec.data.fsn == chk->rec.data.fsn) {
1043
      /*
1044
       * They sent a duplicate fsn number. This
1045
       * really should not happen since the FSN is
1046
       * a TSN and it should have been dropped earlier.
1047
       */
1048
0
      sctp_abort_in_reasm(stcb, control, chk,
1049
0
                          abort_flag,
1050
0
                          SCTP_FROM_SCTP_INDATA + SCTP_LOC_5);
1051
0
      return;
1052
0
    }
1053
3.30k
  }
1054
1.16k
  if (inserted == 0) {
1055
    /* Its at the end */
1056
546
    asoc->size_on_reasm_queue += chk->send_size;
1057
546
    sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1058
546
    control->top_fsn = chk->rec.data.fsn;
1059
546
    TAILQ_INSERT_TAIL(&control->reasm, chk, sctp_next);
1060
546
  }
1061
1.16k
}
1062
1063
static int
1064
sctp_deliver_reasm_check(struct sctp_tcb *stcb, struct sctp_association *asoc,
1065
                         struct sctp_stream_in *strm, int inp_read_lock_held)
1066
3.53k
{
1067
  /*
1068
   * Given a stream, strm, see if any of
1069
   * the SSN's on it that are fragmented
1070
   * are ready to deliver. If so go ahead
1071
   * and place them on the read queue. In
1072
   * so placing if we have hit the end, then
1073
   * we need to remove them from the stream's queue.
1074
   */
1075
3.53k
  struct sctp_queued_to_read *control, *nctl = NULL;
1076
3.53k
  uint32_t next_to_del;
1077
3.53k
  uint32_t pd_point;
1078
3.53k
  int ret = 0;
1079
1080
3.53k
  if (stcb->sctp_socket) {
1081
3.53k
    pd_point = min(SCTP_SB_LIMIT_RCV(stcb->sctp_socket) >> SCTP_PARTIAL_DELIVERY_SHIFT,
1082
3.53k
             stcb->sctp_ep->partial_delivery_point);
1083
3.53k
  } else {
1084
0
    pd_point = stcb->sctp_ep->partial_delivery_point;
1085
0
  }
1086
3.53k
  control = TAILQ_FIRST(&strm->uno_inqueue);
1087
1088
3.53k
  if ((control != NULL) &&
1089
2.41k
      (asoc->idata_supported == 0)) {
1090
    /* Special handling needed for "old" data format */
1091
1.56k
    if (sctp_handle_old_unordered_data(stcb, asoc, strm, control, pd_point, inp_read_lock_held)) {
1092
1.56k
      goto done_un;
1093
1.56k
    }
1094
1.56k
  }
1095
1.97k
  if (strm->pd_api_started) {
1096
    /* Can't add more */
1097
0
    return (0);
1098
0
  }
1099
5.97k
  while (control) {
1100
4.00k
    SCTPDBG(SCTP_DEBUG_XXX, "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u -uo\n",
1101
4.00k
      control, control->end_added, control->mid, control->top_fsn, control->fsn_included);
1102
4.00k
    nctl = TAILQ_NEXT(control, next_instrm);
1103
4.00k
    if (control->end_added) {
1104
      /* We just put the last bit on */
1105
27
      if (control->on_strm_q) {
1106
27
#ifdef INVARIANTS
1107
27
        if (control->on_strm_q != SCTP_ON_UNORDERED) {
1108
0
          panic("Huh control: %p on_q: %d -- not unordered?",
1109
0
                control, control->on_strm_q);
1110
0
        }
1111
27
#endif
1112
27
        SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
1113
27
        TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
1114
27
        if (asoc->size_on_all_streams >= control->length) {
1115
27
          asoc->size_on_all_streams -= control->length;
1116
27
        } else {
1117
0
#ifdef INVARIANTS
1118
0
          panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
1119
#else
1120
          asoc->size_on_all_streams = 0;
1121
#endif
1122
0
        }
1123
27
        sctp_ucount_decr(asoc->cnt_on_all_streams);
1124
27
        control->on_strm_q = 0;
1125
27
      }
1126
27
      if (control->on_read_q == 0) {
1127
27
        sctp_add_to_readq(stcb->sctp_ep, stcb,
1128
27
              control,
1129
27
              &stcb->sctp_socket->so_rcv, control->end_added,
1130
27
              inp_read_lock_held, SCTP_SO_NOT_LOCKED);
1131
27
      }
1132
3.97k
    } else {
1133
      /* Can we do a PD-API for this un-ordered guy? */
1134
3.97k
      if ((control->length >= pd_point) && (strm->pd_api_started == 0)) {
1135
0
        strm->pd_api_started = 1;
1136
0
        control->pdapi_started = 1;
1137
0
        sctp_add_to_readq(stcb->sctp_ep, stcb,
1138
0
              control,
1139
0
              &stcb->sctp_socket->so_rcv, control->end_added,
1140
0
              inp_read_lock_held, SCTP_SO_NOT_LOCKED);
1141
1142
0
        break;
1143
0
      }
1144
3.97k
    }
1145
4.00k
    control = nctl;
1146
4.00k
  }
1147
3.53k
done_un:
1148
3.53k
  control = TAILQ_FIRST(&strm->inqueue);
1149
3.53k
  if (strm->pd_api_started) {
1150
    /* Can't add more */
1151
0
    return (0);
1152
0
  }
1153
3.53k
  if (control == NULL) {
1154
1.51k
    return (ret);
1155
1.51k
  }
1156
2.02k
  if (SCTP_MID_EQ(asoc->idata_supported, strm->last_mid_delivered, control->mid)) {
1157
    /* Ok the guy at the top was being partially delivered
1158
     * completed, so we remove it. Note
1159
     * the pd_api flag was taken off when the
1160
     * chunk was merged on in sctp_queue_data_for_reasm below.
1161
     */
1162
109
    nctl = TAILQ_NEXT(control, next_instrm);
1163
109
    SCTPDBG(SCTP_DEBUG_XXX,
1164
109
      "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u (lastdel: %u)- o\n",
1165
109
      control, control->end_added, control->mid,
1166
109
      control->top_fsn, control->fsn_included,
1167
109
      strm->last_mid_delivered);
1168
109
    if (control->end_added) {
1169
14
      if (control->on_strm_q) {
1170
14
#ifdef INVARIANTS
1171
14
        if (control->on_strm_q != SCTP_ON_ORDERED) {
1172
0
          panic("Huh control: %p on_q: %d -- not ordered?",
1173
0
                control, control->on_strm_q);
1174
0
        }
1175
14
#endif
1176
14
        SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
1177
14
        TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
1178
14
        if (asoc->size_on_all_streams >= control->length) {
1179
14
          asoc->size_on_all_streams -= control->length;
1180
14
        } else {
1181
0
#ifdef INVARIANTS
1182
0
          panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
1183
#else
1184
          asoc->size_on_all_streams = 0;
1185
#endif
1186
0
        }
1187
14
        sctp_ucount_decr(asoc->cnt_on_all_streams);
1188
14
        control->on_strm_q = 0;
1189
14
      }
1190
14
      if (strm->pd_api_started && control->pdapi_started) {
1191
0
        control->pdapi_started = 0;
1192
0
        strm->pd_api_started = 0;
1193
0
      }
1194
14
      if (control->on_read_q == 0) {
1195
14
        sctp_add_to_readq(stcb->sctp_ep, stcb,
1196
14
              control,
1197
14
              &stcb->sctp_socket->so_rcv, control->end_added,
1198
14
              inp_read_lock_held, SCTP_SO_NOT_LOCKED);
1199
14
      }
1200
14
      control = nctl;
1201
14
    }
1202
109
  }
1203
2.02k
  if (strm->pd_api_started) {
1204
    /* Can't add more must have gotten an un-ordered above being partially delivered. */
1205
0
    return (0);
1206
0
  }
1207
2.03k
deliver_more:
1208
2.03k
  next_to_del = strm->last_mid_delivered + 1;
1209
2.03k
  if (control) {
1210
2.02k
    SCTPDBG(SCTP_DEBUG_XXX,
1211
2.02k
      "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u (nxtdel: %u)- o\n",
1212
2.02k
      control, control->end_added, control->mid, control->top_fsn, control->fsn_included,
1213
2.02k
      next_to_del);
1214
2.02k
    nctl = TAILQ_NEXT(control, next_instrm);
1215
2.02k
    if (SCTP_MID_EQ(asoc->idata_supported, control->mid, next_to_del) &&
1216
818
        (control->first_frag_seen)) {
1217
337
      int done;
1218
1219
      /* Ok we can deliver it onto the stream. */
1220
337
      if (control->end_added) {
1221
        /* We are done with it afterwards */
1222
7
        if (control->on_strm_q) {
1223
7
#ifdef INVARIANTS
1224
7
          if (control->on_strm_q != SCTP_ON_ORDERED) {
1225
0
            panic("Huh control: %p on_q: %d -- not ordered?",
1226
0
                  control, control->on_strm_q);
1227
0
          }
1228
7
#endif
1229
7
          SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
1230
7
          TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
1231
7
          if (asoc->size_on_all_streams >= control->length) {
1232
7
            asoc->size_on_all_streams -= control->length;
1233
7
          } else {
1234
0
#ifdef INVARIANTS
1235
0
            panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
1236
#else
1237
            asoc->size_on_all_streams = 0;
1238
#endif
1239
0
          }
1240
7
          sctp_ucount_decr(asoc->cnt_on_all_streams);
1241
7
          control->on_strm_q = 0;
1242
7
        }
1243
7
        ret++;
1244
7
      }
1245
337
      if (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
1246
        /* A singleton now slipping through - mark it non-revokable too */
1247
3
        sctp_mark_non_revokable(asoc, control->sinfo_tsn);
1248
334
      } else if (control->end_added == 0) {
1249
        /* Check if we can defer adding until its all there */
1250
330
        if ((control->length < pd_point) || (strm->pd_api_started)) {
1251
          /* Don't need it or cannot add more (one being delivered that way) */
1252
330
          goto out;
1253
330
        }
1254
330
      }
1255
7
      done = (control->end_added) && (control->last_frag_seen);
1256
7
      if (control->on_read_q == 0) {
1257
7
        if (!done) {
1258
0
          if (asoc->size_on_all_streams >= control->length) {
1259
0
            asoc->size_on_all_streams -= control->length;
1260
0
          } else {
1261
0
#ifdef INVARIANTS
1262
0
            panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
1263
#else
1264
            asoc->size_on_all_streams = 0;
1265
#endif
1266
0
          }
1267
0
          strm->pd_api_started = 1;
1268
0
          control->pdapi_started = 1;
1269
0
        }
1270
7
        sctp_add_to_readq(stcb->sctp_ep, stcb,
1271
7
              control,
1272
7
              &stcb->sctp_socket->so_rcv, control->end_added,
1273
7
              inp_read_lock_held, SCTP_SO_NOT_LOCKED);
1274
7
      }
1275
7
      strm->last_mid_delivered = next_to_del;
1276
7
      if (done) {
1277
7
        control = nctl;
1278
7
        goto deliver_more;
1279
7
      }
1280
7
    }
1281
2.02k
  }
1282
2.02k
out:
1283
2.02k
  return (ret);
1284
2.03k
}
1285
1286
uint32_t
1287
sctp_add_chk_to_control(struct sctp_queued_to_read *control,
1288
      struct sctp_stream_in *strm,
1289
      struct sctp_tcb *stcb, struct sctp_association *asoc,
1290
      struct sctp_tmit_chunk *chk, int hold_rlock)
1291
363
{
1292
  /*
1293
   * Given a control and a chunk, merge the
1294
   * data from the chk onto the control and free
1295
   * up the chunk resources.
1296
   */
1297
363
  uint32_t added = 0;
1298
363
  bool i_locked = false;
1299
1300
363
  if (control->on_read_q) {
1301
0
    if (hold_rlock == 0) {
1302
      /* Its being pd-api'd so we must do some locks. */
1303
0
      SCTP_INP_READ_LOCK(stcb->sctp_ep);
1304
0
      i_locked = true;
1305
0
    }
1306
0
    if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) {
1307
0
      goto out;
1308
0
    }
1309
0
  }
1310
363
  if (control->data == NULL) {
1311
0
    control->data = chk->data;
1312
0
    sctp_setup_tail_pointer(control);
1313
363
  } else {
1314
363
    sctp_add_to_tail_pointer(control, chk->data, &added);
1315
363
  }
1316
363
  control->fsn_included = chk->rec.data.fsn;
1317
363
  asoc->size_on_reasm_queue -= chk->send_size;
1318
363
  sctp_ucount_decr(asoc->cnt_on_reasm_queue);
1319
363
  sctp_mark_non_revokable(asoc, chk->rec.data.tsn);
1320
363
  chk->data = NULL;
1321
363
  if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
1322
116
    control->first_frag_seen = 1;
1323
116
    control->sinfo_tsn = chk->rec.data.tsn;
1324
116
    control->sinfo_ppid = chk->rec.data.ppid;
1325
116
  }
1326
363
  if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
1327
    /* Its complete */
1328
174
    if ((control->on_strm_q) && (control->on_read_q)) {
1329
0
      if (control->pdapi_started) {
1330
0
        control->pdapi_started = 0;
1331
0
        strm->pd_api_started = 0;
1332
0
      }
1333
0
      if (control->on_strm_q == SCTP_ON_UNORDERED) {
1334
        /* Unordered */
1335
0
        TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
1336
0
        control->on_strm_q = 0;
1337
0
      } else if (control->on_strm_q == SCTP_ON_ORDERED) {
1338
        /* Ordered */
1339
0
        TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
1340
        /*
1341
         * Don't need to decrement size_on_all_streams,
1342
         * since control is on the read queue.
1343
         */
1344
0
        sctp_ucount_decr(asoc->cnt_on_all_streams);
1345
0
        control->on_strm_q = 0;
1346
0
#ifdef INVARIANTS
1347
0
      } else if (control->on_strm_q) {
1348
0
        panic("Unknown state on ctrl: %p on_strm_q: %d", control,
1349
0
              control->on_strm_q);
1350
0
#endif
1351
0
      }
1352
0
    }
1353
174
    control->end_added = 1;
1354
174
    control->last_frag_seen = 1;
1355
174
  }
1356
363
out:
1357
363
  if (i_locked) {
1358
0
    SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
1359
0
  }
1360
363
  sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
1361
363
  return (added);
1362
363
}
1363
1364
/*
1365
 * Dump onto the re-assembly queue, in its proper place. After dumping on the
1366
 * queue, see if anything can be delivered. If so pull it off (or as much as
1367
 * we can. If we run out of space then we must dump what we can and set the
1368
 * appropriate flag to say we queued what we could.
1369
 */
1370
static void
1371
sctp_queue_data_for_reasm(struct sctp_tcb *stcb, struct sctp_association *asoc,
1372
        struct sctp_queued_to_read *control,
1373
        struct sctp_tmit_chunk *chk,
1374
        int created_control,
1375
        int *abort_flag, uint32_t tsn)
1376
3.73k
{
1377
3.73k
  uint32_t next_fsn;
1378
3.73k
  struct sctp_tmit_chunk *at, *nat;
1379
3.73k
  struct sctp_stream_in *strm;
1380
3.73k
  int do_wakeup, unordered;
1381
3.73k
  uint32_t lenadded;
1382
1383
3.73k
  strm = &asoc->strmin[control->sinfo_stream];
1384
  /*
1385
   * For old un-ordered data chunks.
1386
   */
1387
3.73k
  if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) {
1388
2.21k
    unordered = 1;
1389
2.21k
  } else {
1390
1.51k
    unordered = 0;
1391
1.51k
  }
1392
  /* Must be added to the stream-in queue */
1393
3.73k
  if (created_control) {
1394
2.19k
    if ((unordered == 0) || (asoc->idata_supported)) {
1395
1.73k
      sctp_ucount_incr(asoc->cnt_on_all_streams);
1396
1.73k
    }
1397
2.19k
    if (sctp_place_control_in_stream(strm, asoc, control)) {
1398
      /* Duplicate SSN? */
1399
0
      sctp_abort_in_reasm(stcb, control, chk,
1400
0
              abort_flag,
1401
0
              SCTP_FROM_SCTP_INDATA + SCTP_LOC_6);
1402
0
      sctp_clean_up_control(stcb, control);
1403
0
      return;
1404
0
    }
1405
2.19k
    if ((tsn == (asoc->cumulative_tsn + 1) && (asoc->idata_supported == 0))) {
1406
      /* Ok we created this control and now
1407
       * lets validate that its legal i.e. there
1408
       * is a B bit set, if not and we have
1409
       * up to the cum-ack then its invalid.
1410
       */
1411
25
      if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) {
1412
3
        sctp_abort_in_reasm(stcb, control, chk,
1413
3
                            abort_flag,
1414
3
                            SCTP_FROM_SCTP_INDATA + SCTP_LOC_7);
1415
3
        return;
1416
3
      }
1417
25
    }
1418
2.19k
  }
1419
3.72k
  if ((asoc->idata_supported == 0) && (unordered == 1)) {
1420
1.49k
    sctp_inject_old_unordered_data(stcb, asoc, control, chk, abort_flag);
1421
1.49k
    return;
1422
1.49k
  }
1423
  /*
1424
   * Ok we must queue the chunk into the reasembly portion:
1425
   *  o if its the first it goes to the control mbuf.
1426
   *  o if its not first but the next in sequence it goes to the control,
1427
   *    and each succeeding one in order also goes.
1428
   *  o if its not in order we place it on the list in its place.
1429
   */
1430
2.23k
  if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
1431
    /* Its the very first one. */
1432
832
    SCTPDBG(SCTP_DEBUG_XXX,
1433
832
      "chunk is a first fsn: %u becomes fsn_included\n",
1434
832
      chk->rec.data.fsn);
1435
832
    if (control->first_frag_seen) {
1436
      /*
1437
       * Error on senders part, they either
1438
       * sent us two data chunks with FIRST,
1439
       * or they sent two un-ordered chunks that
1440
       * were fragmented at the same time in the same stream.
1441
       */
1442
3
      sctp_abort_in_reasm(stcb, control, chk,
1443
3
                          abort_flag,
1444
3
                          SCTP_FROM_SCTP_INDATA + SCTP_LOC_8);
1445
3
      return;
1446
3
    }
1447
829
    control->first_frag_seen = 1;
1448
829
    control->sinfo_ppid = chk->rec.data.ppid;
1449
829
    control->sinfo_tsn = chk->rec.data.tsn;
1450
829
    control->fsn_included = chk->rec.data.fsn;
1451
829
    control->data = chk->data;
1452
829
    sctp_mark_non_revokable(asoc, chk->rec.data.tsn);
1453
829
    chk->data = NULL;
1454
829
    sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
1455
829
    sctp_setup_tail_pointer(control);
1456
829
    asoc->size_on_all_streams += control->length;
1457
1.40k
  } else {
1458
    /* Place the chunk in our list */
1459
1.40k
    int inserted=0;
1460
1.40k
    if (control->last_frag_seen == 0) {
1461
      /* Still willing to raise highest FSN seen */
1462
1.23k
      if (SCTP_TSN_GT(chk->rec.data.fsn, control->top_fsn)) {
1463
153
        SCTPDBG(SCTP_DEBUG_XXX,
1464
153
          "We have a new top_fsn: %u\n",
1465
153
          chk->rec.data.fsn);
1466
153
        control->top_fsn = chk->rec.data.fsn;
1467
153
      }
1468
1.23k
      if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
1469
455
        SCTPDBG(SCTP_DEBUG_XXX,
1470
455
          "The last fsn is now in place fsn: %u\n",
1471
455
          chk->rec.data.fsn);
1472
455
        control->last_frag_seen = 1;
1473
455
        if (SCTP_TSN_GT(control->top_fsn, chk->rec.data.fsn)) {
1474
47
          SCTPDBG(SCTP_DEBUG_XXX,
1475
47
            "New fsn: %u is not at top_fsn: %u -- abort\n",
1476
47
            chk->rec.data.fsn,
1477
47
            control->top_fsn);
1478
47
          sctp_abort_in_reasm(stcb, control, chk,
1479
47
                  abort_flag,
1480
47
                  SCTP_FROM_SCTP_INDATA + SCTP_LOC_9);
1481
47
          return;
1482
47
        }
1483
455
      }
1484
1.18k
      if (asoc->idata_supported || control->first_frag_seen) {
1485
        /*
1486
         * For IDATA we always check since we know that
1487
         * the first fragment is 0. For old DATA we have
1488
         * to receive the first before we know the first FSN
1489
         * (which is the TSN).
1490
         */
1491
729
        if (SCTP_TSN_GE(control->fsn_included, chk->rec.data.fsn)) {
1492
          /* We have already delivered up to this so its a dup */
1493
68
          sctp_abort_in_reasm(stcb, control, chk,
1494
68
                  abort_flag,
1495
68
                  SCTP_FROM_SCTP_INDATA + SCTP_LOC_10);
1496
68
          return;
1497
68
        }
1498
729
      }
1499
1.18k
    } else {
1500
169
      if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
1501
        /* Second last? huh? */
1502
2
        SCTPDBG(SCTP_DEBUG_XXX,
1503
2
          "Duplicate last fsn: %u (top: %u) -- abort\n",
1504
2
          chk->rec.data.fsn, control->top_fsn);
1505
2
        sctp_abort_in_reasm(stcb, control,
1506
2
                chk, abort_flag,
1507
2
                SCTP_FROM_SCTP_INDATA + SCTP_LOC_11);
1508
2
        return;
1509
2
      }
1510
167
      if (asoc->idata_supported || control->first_frag_seen) {
1511
        /*
1512
         * For IDATA we always check since we know that
1513
         * the first fragment is 0. For old DATA we have
1514
         * to receive the first before we know the first FSN
1515
         * (which is the TSN).
1516
         */
1517
1518
134
        if (SCTP_TSN_GE(control->fsn_included, chk->rec.data.fsn)) {
1519
          /* We have already delivered up to this so its a dup */
1520
25
          SCTPDBG(SCTP_DEBUG_XXX,
1521
25
            "New fsn: %u is already seen in included_fsn: %u -- abort\n",
1522
25
            chk->rec.data.fsn, control->fsn_included);
1523
25
          sctp_abort_in_reasm(stcb, control, chk,
1524
25
                  abort_flag,
1525
25
                  SCTP_FROM_SCTP_INDATA + SCTP_LOC_12);
1526
25
          return;
1527
25
        }
1528
134
      }
1529
      /* validate not beyond top FSN if we have seen last one */
1530
142
      if (SCTP_TSN_GT(chk->rec.data.fsn, control->top_fsn)) {
1531
46
        SCTPDBG(SCTP_DEBUG_XXX,
1532
46
          "New fsn: %u is beyond or at top_fsn: %u -- abort\n",
1533
46
          chk->rec.data.fsn,
1534
46
          control->top_fsn);
1535
46
        sctp_abort_in_reasm(stcb, control, chk,
1536
46
                abort_flag,
1537
46
                SCTP_FROM_SCTP_INDATA + SCTP_LOC_13);
1538
46
        return;
1539
46
      }
1540
142
    }
1541
    /*
1542
     * If we reach here, we need to place the
1543
     * new chunk in the reassembly for this
1544
     * control.
1545
     */
1546
1.21k
    SCTPDBG(SCTP_DEBUG_XXX,
1547
1.21k
      "chunk is a not first fsn: %u needs to be inserted\n",
1548
1.21k
      chk->rec.data.fsn);
1549
1.21k
    TAILQ_FOREACH(at, &control->reasm, sctp_next) {
1550
484
      if (SCTP_TSN_GT(at->rec.data.fsn, chk->rec.data.fsn)) {
1551
186
        if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
1552
          /* Last not at the end? huh? */
1553
1
          SCTPDBG(SCTP_DEBUG_XXX,
1554
1
                  "Last fragment not last in list: -- abort\n");
1555
1
          sctp_abort_in_reasm(stcb, control,
1556
1
                              chk, abort_flag,
1557
1
                              SCTP_FROM_SCTP_INDATA + SCTP_LOC_14);
1558
1
          return;
1559
1
        }
1560
        /*
1561
         * This one in queue is bigger than the new one, insert
1562
         * the new one before at.
1563
         */
1564
185
        SCTPDBG(SCTP_DEBUG_XXX,
1565
185
          "Insert it before fsn: %u\n",
1566
185
          at->rec.data.fsn);
1567
185
        asoc->size_on_reasm_queue += chk->send_size;
1568
185
        sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1569
185
        TAILQ_INSERT_BEFORE(at, chk, sctp_next);
1570
185
        inserted = 1;
1571
185
        break;
1572
298
      } else if (at->rec.data.fsn == chk->rec.data.fsn) {
1573
        /* Gak, He sent me a duplicate str seq number */
1574
        /*
1575
         * foo bar, I guess I will just free this new guy,
1576
         * should we abort too? FIX ME MAYBE? Or it COULD be
1577
         * that the SSN's have wrapped. Maybe I should
1578
         * compare to TSN somehow... sigh for now just blow
1579
         * away the chunk!
1580
         */
1581
2
        SCTPDBG(SCTP_DEBUG_XXX,
1582
2
          "Duplicate to fsn: %u -- abort\n",
1583
2
          at->rec.data.fsn);
1584
2
        sctp_abort_in_reasm(stcb, control,
1585
2
                chk, abort_flag,
1586
2
                SCTP_FROM_SCTP_INDATA + SCTP_LOC_15);
1587
2
        return;
1588
2
      }
1589
484
    }
1590
1.20k
    if (inserted == 0) {
1591
      /* Goes on the end */
1592
1.02k
      SCTPDBG(SCTP_DEBUG_XXX, "Inserting at tail of list fsn: %u\n",
1593
1.02k
        chk->rec.data.fsn);
1594
1.02k
      asoc->size_on_reasm_queue += chk->send_size;
1595
1.02k
      sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1596
1.02k
      TAILQ_INSERT_TAIL(&control->reasm, chk, sctp_next);
1597
1.02k
    }
1598
1.20k
  }
1599
  /*
1600
   * Ok lets see if we can suck any up into the control
1601
   * structure that are in seq if it makes sense.
1602
   */
1603
2.03k
  do_wakeup = 0;
1604
  /*
1605
   * If the first fragment has not been
1606
   * seen there is no sense in looking.
1607
   */
1608
2.03k
  if (control->first_frag_seen) {
1609
969
    next_fsn = control->fsn_included + 1;
1610
969
    TAILQ_FOREACH_SAFE(at, &control->reasm, sctp_next, nat) {
1611
180
      if (at->rec.data.fsn == next_fsn) {
1612
        /* We can add this one now to the control */
1613
62
        SCTPDBG(SCTP_DEBUG_XXX,
1614
62
          "Adding more to control: %p at: %p fsn: %u next_fsn: %u included: %u\n",
1615
62
          control, at,
1616
62
          at->rec.data.fsn,
1617
62
          next_fsn, control->fsn_included);
1618
62
        TAILQ_REMOVE(&control->reasm, at, sctp_next);
1619
62
        lenadded = sctp_add_chk_to_control(control, strm, stcb, asoc, at, SCTP_READ_LOCK_NOT_HELD);
1620
62
        if (control->on_read_q) {
1621
0
          do_wakeup = 1;
1622
62
        } else {
1623
          /*
1624
           * We only add to the size-on-all-streams
1625
           * if its not on the read q. The read q
1626
           * flag will cause a sballoc so its accounted
1627
           * for there.
1628
           */
1629
62
          asoc->size_on_all_streams += lenadded;
1630
62
        }
1631
62
        next_fsn++;
1632
62
        if (control->end_added && control->pdapi_started) {
1633
0
          if (strm->pd_api_started) {
1634
0
            strm->pd_api_started = 0;
1635
0
            control->pdapi_started = 0;
1636
0
          }
1637
0
          if (control->on_read_q == 0) {
1638
0
            sctp_add_to_readq(stcb->sctp_ep, stcb,
1639
0
                  control,
1640
0
                  &stcb->sctp_socket->so_rcv, control->end_added,
1641
0
                  SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
1642
0
          }
1643
0
          break;
1644
0
        }
1645
118
      } else {
1646
118
        break;
1647
118
      }
1648
180
    }
1649
969
  }
1650
2.03k
  if (do_wakeup) {
1651
0
#if defined(__Userspace__)
1652
0
    sctp_invoke_recv_callback(stcb->sctp_ep, stcb, control, SCTP_READ_LOCK_NOT_HELD);
1653
0
#endif
1654
    /* Need to wakeup the reader */
1655
0
    sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
1656
0
  }
1657
2.03k
}
1658
1659
static struct sctp_queued_to_read *
1660
sctp_find_reasm_entry(struct sctp_stream_in *strm, uint32_t mid, int ordered, int idata_supported)
1661
4.99k
{
1662
4.99k
  struct sctp_queued_to_read *control;
1663
1664
4.99k
  if (ordered) {
1665
2.45k
    TAILQ_FOREACH(control, &strm->inqueue, next_instrm) {
1666
2.45k
      if (SCTP_MID_EQ(idata_supported, control->mid, mid)) {
1667
429
        break;
1668
429
      }
1669
2.45k
    }
1670
2.59k
  } else {
1671
2.59k
    if (idata_supported) {
1672
2.50k
      TAILQ_FOREACH(control, &strm->uno_inqueue, next_instrm) {
1673
2.50k
        if (SCTP_MID_EQ(idata_supported, control->mid, mid)) {
1674
74
          break;
1675
74
        }
1676
2.50k
      }
1677
1.85k
    } else {
1678
1.85k
      control = TAILQ_FIRST(&strm->uno_inqueue);
1679
1.85k
    }
1680
2.59k
  }
1681
4.99k
  return (control);
1682
4.99k
}
1683
1684
static int
1685
sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc,
1686
        struct mbuf **m, int offset,  int chk_length,
1687
        struct sctp_nets *net, uint32_t *high_tsn, int *abort_flag,
1688
        int *break_flag, int last_chunk, uint8_t chk_type)
1689
9.21k
{
1690
9.21k
  struct sctp_tmit_chunk *chk = NULL; /* make gcc happy */
1691
9.21k
  struct sctp_stream_in *strm;
1692
9.21k
  uint32_t tsn, fsn, gap, mid;
1693
9.21k
  struct mbuf *dmbuf;
1694
9.21k
  int the_len;
1695
9.21k
  int need_reasm_check = 0;
1696
9.21k
  uint16_t sid;
1697
9.21k
  struct mbuf *op_err;
1698
9.21k
  char msg[SCTP_DIAG_INFO_LEN];
1699
9.21k
  struct sctp_queued_to_read *control, *ncontrol;
1700
9.21k
  uint32_t ppid;
1701
9.21k
  uint8_t chk_flags;
1702
9.21k
  struct sctp_stream_reset_list *liste;
1703
9.21k
  int ordered;
1704
9.21k
  size_t clen;
1705
9.21k
  int created_control = 0;
1706
1707
9.21k
  if (chk_type == SCTP_IDATA) {
1708
3.01k
    struct sctp_idata_chunk *chunk, chunk_buf;
1709
1710
3.01k
    chunk = (struct sctp_idata_chunk *)sctp_m_getptr(*m, offset,
1711
3.01k
                                                     sizeof(struct sctp_idata_chunk), (uint8_t *)&chunk_buf);
1712
3.01k
    chk_flags = chunk->ch.chunk_flags;
1713
3.01k
    clen = sizeof(struct sctp_idata_chunk);
1714
3.01k
    tsn = ntohl(chunk->dp.tsn);
1715
3.01k
    sid = ntohs(chunk->dp.sid);
1716
3.01k
    mid = ntohl(chunk->dp.mid);
1717
3.01k
    if (chk_flags & SCTP_DATA_FIRST_FRAG) {
1718
1.95k
      fsn = 0;
1719
1.95k
      ppid = chunk->dp.ppid_fsn.ppid;
1720
1.95k
    } else {
1721
1.06k
      fsn = ntohl(chunk->dp.ppid_fsn.fsn);
1722
1.06k
      ppid = 0xffffffff; /* Use as an invalid value. */
1723
1.06k
    }
1724
6.20k
  } else {
1725
6.20k
    struct sctp_data_chunk *chunk, chunk_buf;
1726
1727
6.20k
    chunk = (struct sctp_data_chunk *)sctp_m_getptr(*m, offset,
1728
6.20k
                                                    sizeof(struct sctp_data_chunk), (uint8_t *)&chunk_buf);
1729
6.20k
    chk_flags = chunk->ch.chunk_flags;
1730
6.20k
    clen = sizeof(struct sctp_data_chunk);
1731
6.20k
    tsn = ntohl(chunk->dp.tsn);
1732
6.20k
    sid = ntohs(chunk->dp.sid);
1733
6.20k
    mid = (uint32_t)(ntohs(chunk->dp.ssn));
1734
6.20k
    fsn = tsn;
1735
6.20k
    ppid = chunk->dp.ppid;
1736
6.20k
  }
1737
9.21k
  if ((size_t)chk_length == clen) {
1738
    /*
1739
     * Need to send an abort since we had a
1740
     * empty data chunk.
1741
     */
1742
5
    op_err = sctp_generate_no_user_data_cause(tsn);
1743
5
    stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_16;
1744
5
    sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, false, SCTP_SO_NOT_LOCKED);
1745
5
    *abort_flag = 1;
1746
5
    return (0);
1747
5
  }
1748
9.21k
  if ((chk_flags & SCTP_DATA_SACK_IMMEDIATELY) == SCTP_DATA_SACK_IMMEDIATELY) {
1749
3.58k
    asoc->send_sack = 1;
1750
3.58k
  }
1751
9.21k
  ordered = ((chk_flags & SCTP_DATA_UNORDERED) == 0);
1752
9.21k
  if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
1753
0
    sctp_log_map(tsn, asoc->cumulative_tsn, asoc->highest_tsn_inside_map, SCTP_MAP_TSN_ENTERS);
1754
0
  }
1755
9.21k
  if (stcb == NULL) {
1756
0
    return (0);
1757
0
  }
1758
9.21k
  SCTP_LTRACE_CHK(stcb->sctp_ep, stcb, chk_type, tsn);
1759
9.21k
  if (SCTP_TSN_GE(asoc->cumulative_tsn, tsn)) {
1760
    /* It is a duplicate */
1761
1.32k
    SCTP_STAT_INCR(sctps_recvdupdata);
1762
1.32k
    if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1763
      /* Record a dup for the next outbound sack */
1764
1.22k
      asoc->dup_tsns[asoc->numduptsns] = tsn;
1765
1.22k
      asoc->numduptsns++;
1766
1.22k
    }
1767
1.32k
    asoc->send_sack = 1;
1768
1.32k
    return (0);
1769
1.32k
  }
1770
  /* Calculate the number of TSN's between the base and this TSN */
1771
7.88k
  SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
1772
7.88k
  if (gap >= (SCTP_MAPPING_ARRAY << 3)) {
1773
    /* Can't hold the bit in the mapping at max array, toss it */
1774
1.32k
    return (0);
1775
1.32k
  }
1776
6.56k
  if (gap >= (uint32_t) (asoc->mapping_array_size << 3)) {
1777
1.57k
    SCTP_TCB_LOCK_ASSERT(stcb);
1778
1.57k
    if (sctp_expand_mapping_array(asoc, gap)) {
1779
      /* Can't expand, drop it */
1780
0
      return (0);
1781
0
    }
1782
1.57k
  }
1783
6.56k
  if (SCTP_TSN_GT(tsn, *high_tsn)) {
1784
2.39k
    *high_tsn = tsn;
1785
2.39k
  }
1786
  /* See if we have received this one already */
1787
6.56k
  if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap) ||
1788
5.80k
      SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap)) {
1789
1.33k
    SCTP_STAT_INCR(sctps_recvdupdata);
1790
1.33k
    if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1791
      /* Record a dup for the next outbound sack */
1792
646
      asoc->dup_tsns[asoc->numduptsns] = tsn;
1793
646
      asoc->numduptsns++;
1794
646
    }
1795
1.33k
    asoc->send_sack = 1;
1796
1.33k
    return (0);
1797
1.33k
  }
1798
  /*
1799
   * Check to see about the GONE flag, duplicates would cause a sack
1800
   * to be sent up above
1801
   */
1802
5.23k
  if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
1803
5.23k
       (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
1804
5.23k
       (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET))) {
1805
    /*
1806
     * wait a minute, this guy is gone, there is no longer a
1807
     * receiver. Send peer an ABORT!
1808
     */
1809
0
    op_err = sctp_generate_cause(SCTP_CAUSE_OUT_OF_RESC, "");
1810
0
    sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, false, SCTP_SO_NOT_LOCKED);
1811
0
    *abort_flag = 1;
1812
0
    return (0);
1813
0
  }
1814
  /*
1815
   * Now before going further we see if there is room. If NOT then we
1816
   * MAY let one through only IF this TSN is the one we are waiting
1817
   * for on a partial delivery API.
1818
   */
1819
1820
  /* Is the stream valid? */
1821
5.23k
  if (sid >= asoc->streamincnt) {
1822
233
    struct sctp_error_invalid_stream *cause;
1823
1824
233
    op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_error_invalid_stream),
1825
233
                                   0, M_NOWAIT, 1, MT_DATA);
1826
233
    if (op_err != NULL) {
1827
      /* add some space up front so prepend will work well */
1828
233
      SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr));
1829
233
      cause = mtod(op_err, struct sctp_error_invalid_stream *);
1830
      /*
1831
       * Error causes are just param's and this one has
1832
       * two back to back phdr, one with the error type
1833
       * and size, the other with the streamid and a rsvd
1834
       */
1835
233
      SCTP_BUF_LEN(op_err) = sizeof(struct sctp_error_invalid_stream);
1836
233
      cause->cause.code = htons(SCTP_CAUSE_INVALID_STREAM);
1837
233
      cause->cause.length = htons(sizeof(struct sctp_error_invalid_stream));
1838
233
      cause->stream_id = htons(sid);
1839
233
      cause->reserved = htons(0);
1840
233
      sctp_queue_op_err(stcb, op_err);
1841
233
    }
1842
233
    SCTP_STAT_INCR(sctps_badsid);
1843
233
    SCTP_TCB_LOCK_ASSERT(stcb);
1844
233
    SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1845
233
    if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1846
157
      asoc->highest_tsn_inside_nr_map = tsn;
1847
157
    }
1848
233
    if (tsn == (asoc->cumulative_tsn + 1)) {
1849
      /* Update cum-ack */
1850
13
      asoc->cumulative_tsn = tsn;
1851
13
    }
1852
233
    return (0);
1853
233
  }
1854
  /*
1855
   * If its a fragmented message, lets see if we can
1856
   * find the control on the reassembly queues.
1857
   */
1858
4.99k
  if ((chk_type == SCTP_IDATA) &&
1859
1.78k
      ((chk_flags & SCTP_DATA_FIRST_FRAG) == 0) &&
1860
800
      (fsn == 0)) {
1861
    /*
1862
     *  The first *must* be fsn 0, and other
1863
     *  (middle/end) pieces can *not* be fsn 0.
1864
     * XXX: This can happen in case of a wrap around.
1865
     *      Ignore is for now.
1866
     */
1867
1
    SCTP_SNPRINTF(msg, sizeof(msg), "FSN zero for MID=%8.8x, but flags=%2.2x", mid, chk_flags);
1868
1
    goto err_out;
1869
1
  }
1870
4.99k
  control = sctp_find_reasm_entry(&asoc->strmin[sid], mid, ordered, asoc->idata_supported);
1871
4.99k
  SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags:0x%x look for control on queues %p\n",
1872
4.99k
    chk_flags, control);
1873
4.99k
  if ((chk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) {
1874
    /* See if we can find the re-assembly entity */
1875
3.74k
    if (control != NULL) {
1876
      /* We found something, does it belong? */
1877
1.53k
      if (ordered && (mid != control->mid)) {
1878
0
        SCTP_SNPRINTF(msg, sizeof(msg), "Reassembly problem (MID=%8.8x)", mid);
1879
3
      err_out:
1880
3
        op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1881
3
        stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_17;
1882
3
        sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, false, SCTP_SO_NOT_LOCKED);
1883
3
        *abort_flag = 1;
1884
3
        return (0);
1885
0
      }
1886
1.53k
      if (ordered && ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED)) {
1887
        /* We can't have a switched order with an unordered chunk */
1888
0
        SCTP_SNPRINTF(msg, sizeof(msg),
1889
0
                      "All fragments of a user message must be ordered or unordered (TSN=%8.8x)",
1890
0
                      tsn);
1891
0
        goto err_out;
1892
0
      }
1893
1.53k
      if (!ordered && (((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) == 0)) {
1894
        /* We can't have a switched unordered with a ordered chunk */
1895
0
        SCTP_SNPRINTF(msg, sizeof(msg),
1896
0
                     "All fragments of a user message must be ordered or unordered (TSN=%8.8x)",
1897
0
                     tsn);
1898
0
        goto err_out;
1899
0
      }
1900
1.53k
    }
1901
3.74k
  } else {
1902
    /* Its a complete segment. Lets validate we
1903
     * don't have a re-assembly going on with
1904
     * the same Stream/Seq (for ordered) or in
1905
     * the same Stream for unordered.
1906
     */
1907
1.25k
    if (control != NULL) {
1908
55
      if (ordered || asoc->idata_supported) {
1909
2
        SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags: 0x%x dup detected on MID: %u\n",
1910
2
          chk_flags, mid);
1911
2
        SCTP_SNPRINTF(msg, sizeof(msg), "Duplicate MID=%8.8x detected.", mid);
1912
2
        goto err_out;
1913
53
      } else {
1914
53
        if ((control->first_frag_seen) &&
1915
34
            (tsn == control->fsn_included + 1) &&
1916
0
            (control->end_added == 0)) {
1917
0
          SCTP_SNPRINTF(msg, sizeof(msg),
1918
0
                        "Illegal message sequence, missing end for MID: %8.8x",
1919
0
                        control->fsn_included);
1920
0
          goto err_out;
1921
53
        } else {
1922
53
          control = NULL;
1923
53
        }
1924
53
      }
1925
55
    }
1926
1.25k
  }
1927
  /* now do the tests */
1928
4.99k
  if (((asoc->cnt_on_all_streams +
1929
4.99k
        asoc->cnt_on_reasm_queue +
1930
4.99k
        asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) ||
1931
4.99k
      (((int)asoc->my_rwnd) <= 0)) {
1932
    /*
1933
     * When we have NO room in the rwnd we check to make sure
1934
     * the reader is doing its job...
1935
     */
1936
80
    if (SCTP_SBAVAIL(&stcb->sctp_socket->so_rcv) > 0) {
1937
      /* some to read, wake-up */
1938
#if defined(__APPLE__) && !defined(__Userspace__)
1939
      struct socket *so;
1940
1941
      so = SCTP_INP_SO(stcb->sctp_ep);
1942
      atomic_add_int(&stcb->asoc.refcnt, 1);
1943
      SCTP_TCB_UNLOCK(stcb);
1944
      SCTP_SOCKET_LOCK(so, 1);
1945
      SCTP_TCB_LOCK(stcb);
1946
      atomic_subtract_int(&stcb->asoc.refcnt, 1);
1947
      if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
1948
        /* assoc was freed while we were unlocked */
1949
        SCTP_SOCKET_UNLOCK(so, 1);
1950
        return (0);
1951
      }
1952
#endif
1953
80
      sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
1954
#if defined(__APPLE__) && !defined(__Userspace__)
1955
      SCTP_SOCKET_UNLOCK(so, 1);
1956
#endif
1957
80
    }
1958
    /* now is it in the mapping array of what we have accepted? */
1959
80
    if (chk_type == SCTP_DATA) {
1960
79
      if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map) &&
1961
40
          SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1962
        /* Nope not in the valid range dump it */
1963
10
      dump_packet:
1964
10
        sctp_set_rwnd(stcb, asoc);
1965
10
        if ((asoc->cnt_on_all_streams +
1966
10
             asoc->cnt_on_reasm_queue +
1967
10
             asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) {
1968
0
          SCTP_STAT_INCR(sctps_datadropchklmt);
1969
10
        } else {
1970
10
          SCTP_STAT_INCR(sctps_datadroprwnd);
1971
10
        }
1972
10
        *break_flag = 1;
1973
10
        return (0);
1974
9
      }
1975
79
    } else {
1976
1
      if (control == NULL) {
1977
1
        goto dump_packet;
1978
1
      }
1979
0
      if (SCTP_TSN_GT(fsn, control->top_fsn)) {
1980
0
        goto dump_packet;
1981
0
      }
1982
0
    }
1983
80
  }
1984
#ifdef SCTP_ASOCLOG_OF_TSNS
1985
  SCTP_TCB_LOCK_ASSERT(stcb);
1986
  if (asoc->tsn_in_at >= SCTP_TSN_LOG_SIZE) {
1987
    asoc->tsn_in_at = 0;
1988
    asoc->tsn_in_wrapped = 1;
1989
  }
1990
  asoc->in_tsnlog[asoc->tsn_in_at].tsn = tsn;
1991
  asoc->in_tsnlog[asoc->tsn_in_at].strm = sid;
1992
  asoc->in_tsnlog[asoc->tsn_in_at].seq = mid;
1993
  asoc->in_tsnlog[asoc->tsn_in_at].sz = chk_length;
1994
  asoc->in_tsnlog[asoc->tsn_in_at].flgs = chunk_flags;
1995
  asoc->in_tsnlog[asoc->tsn_in_at].stcb = (void *)stcb;
1996
  asoc->in_tsnlog[asoc->tsn_in_at].in_pos = asoc->tsn_in_at;
1997
  asoc->in_tsnlog[asoc->tsn_in_at].in_out = 1;
1998
  asoc->tsn_in_at++;
1999
#endif
2000
  /*
2001
   * Before we continue lets validate that we are not being fooled by
2002
   * an evil attacker. We can only have Nk chunks based on our TSN
2003
   * spread allowed by the mapping array N * 8 bits, so there is no
2004
   * way our stream sequence numbers could have wrapped. We of course
2005
   * only validate the FIRST fragment so the bit must be set.
2006
   */
2007
4.98k
  if ((chk_flags & SCTP_DATA_FIRST_FRAG) &&
2008
2.98k
      (TAILQ_EMPTY(&asoc->resetHead)) &&
2009
2.36k
      (chk_flags & SCTP_DATA_UNORDERED) == 0 &&
2010
862
      SCTP_MID_GE(asoc->idata_supported, asoc->strmin[sid].last_mid_delivered, mid)) {
2011
    /* The incoming sseq is behind where we last delivered? */
2012
40
    SCTPDBG(SCTP_DEBUG_INDATA1, "EVIL/Broken-Dup S-SEQ: %u delivered: %u from peer, Abort!\n",
2013
40
      mid, asoc->strmin[sid].last_mid_delivered);
2014
2015
40
    if (asoc->idata_supported) {
2016
27
      SCTP_SNPRINTF(msg, sizeof(msg), "Delivered MID=%8.8x, got TSN=%8.8x, SID=%4.4x, MID=%8.8x",
2017
27
                    asoc->strmin[sid].last_mid_delivered,
2018
27
                    tsn,
2019
27
                    sid,
2020
27
                    mid);
2021
27
    } else {
2022
13
      SCTP_SNPRINTF(msg, sizeof(msg), "Delivered SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
2023
13
                    (uint16_t)asoc->strmin[sid].last_mid_delivered,
2024
13
                    tsn,
2025
13
                    sid,
2026
13
                    (uint16_t)mid);
2027
13
    }
2028
40
    op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2029
40
    stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_18;
2030
40
    sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, false, SCTP_SO_NOT_LOCKED);
2031
40
    *abort_flag = 1;
2032
40
    return (0);
2033
40
  }
2034
4.94k
  if (chk_type == SCTP_IDATA) {
2035
1.75k
    the_len = (chk_length - sizeof(struct sctp_idata_chunk));
2036
3.18k
  } else {
2037
3.18k
    the_len = (chk_length - sizeof(struct sctp_data_chunk));
2038
3.18k
  }
2039
4.94k
  if (last_chunk == 0) {
2040
4.83k
    if (chk_type == SCTP_IDATA) {
2041
1.72k
      dmbuf = SCTP_M_COPYM(*m,
2042
1.72k
               (offset + sizeof(struct sctp_idata_chunk)),
2043
1.72k
               the_len, M_NOWAIT);
2044
3.10k
    } else {
2045
3.10k
      dmbuf = SCTP_M_COPYM(*m,
2046
3.10k
               (offset + sizeof(struct sctp_data_chunk)),
2047
3.10k
               the_len, M_NOWAIT);
2048
3.10k
    }
2049
#ifdef SCTP_MBUF_LOGGING
2050
    if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
2051
      sctp_log_mbc(dmbuf, SCTP_MBUF_ICOPY);
2052
    }
2053
#endif
2054
4.83k
  } else {
2055
    /* We can steal the last chunk */
2056
112
    int l_len;
2057
112
    dmbuf = *m;
2058
    /* lop off the top part */
2059
112
    if (chk_type == SCTP_IDATA) {
2060
32
      m_adj(dmbuf, (offset + sizeof(struct sctp_idata_chunk)));
2061
80
    } else {
2062
80
      m_adj(dmbuf, (offset + sizeof(struct sctp_data_chunk)));
2063
80
    }
2064
112
    if (SCTP_BUF_NEXT(dmbuf) == NULL) {
2065
34
      l_len = SCTP_BUF_LEN(dmbuf);
2066
78
    } else {
2067
      /* need to count up the size hopefully
2068
       * does not hit this to often :-0
2069
       */
2070
78
      struct mbuf *lat;
2071
2072
78
      l_len = 0;
2073
750
      for (lat = dmbuf; lat; lat = SCTP_BUF_NEXT(lat)) {
2074
672
        l_len += SCTP_BUF_LEN(lat);
2075
672
      }
2076
78
    }
2077
112
    if (l_len > the_len) {
2078
      /* Trim the end round bytes off  too */
2079
80
      m_adj(dmbuf, -(l_len - the_len));
2080
80
    }
2081
112
  }
2082
4.94k
  if (dmbuf == NULL) {
2083
0
    SCTP_STAT_INCR(sctps_nomem);
2084
0
    return (0);
2085
0
  }
2086
  /*
2087
   * Now no matter what, we need a control, get one
2088
   * if we don't have one (we may have gotten it
2089
   * above when we found the message was fragmented
2090
   */
2091
4.94k
  if (control == NULL) {
2092
3.40k
    sctp_alloc_a_readq(stcb, control);
2093
3.40k
    sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn,
2094
3.40k
             ppid,
2095
3.40k
             sid,
2096
3.40k
             chk_flags,
2097
3.40k
             NULL, fsn, mid);
2098
3.40k
    if (control == NULL) {
2099
0
      SCTP_STAT_INCR(sctps_nomem);
2100
0
      return (0);
2101
0
    }
2102
3.40k
    if ((chk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
2103
1.21k
      struct mbuf *mm;
2104
2105
1.21k
      control->data = dmbuf;
2106
1.21k
      control->tail_mbuf = NULL;
2107
2.87k
      for (mm = control->data; mm; mm = mm->m_next) {
2108
1.65k
        control->length += SCTP_BUF_LEN(mm);
2109
1.65k
        if (SCTP_BUF_NEXT(mm) == NULL) {
2110
1.21k
          control->tail_mbuf = mm;
2111
1.21k
        }
2112
1.65k
      }
2113
1.21k
      control->end_added = 1;
2114
1.21k
      control->last_frag_seen = 1;
2115
1.21k
      control->first_frag_seen = 1;
2116
1.21k
      control->fsn_included = fsn;
2117
1.21k
      control->top_fsn = fsn;
2118
1.21k
    }
2119
3.40k
    created_control = 1;
2120
3.40k
  }
2121
4.94k
  SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags: 0x%x ordered: %d MID: %u control: %p\n",
2122
4.94k
    chk_flags, ordered, mid, control);
2123
4.94k
  if ((chk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG &&
2124
4.94k
      TAILQ_EMPTY(&asoc->resetHead) &&
2125
632
      ((ordered == 0) ||
2126
437
       (SCTP_MID_EQ(asoc->idata_supported, asoc->strmin[sid].last_mid_delivered + 1, mid) &&
2127
437
        TAILQ_EMPTY(&asoc->strmin[sid].inqueue)))) {
2128
    /* Candidate for express delivery */
2129
    /*
2130
     * Its not fragmented, No PD-API is up, Nothing in the
2131
     * delivery queue, Its un-ordered OR ordered and the next to
2132
     * deliver AND nothing else is stuck on the stream queue,
2133
     * And there is room for it in the socket buffer. Lets just
2134
     * stuff it up the buffer....
2135
     */
2136
221
    SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
2137
221
    if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
2138
115
      asoc->highest_tsn_inside_nr_map = tsn;
2139
115
    }
2140
221
    SCTPDBG(SCTP_DEBUG_XXX, "Injecting control: %p to be read (MID: %u)\n",
2141
221
      control, mid);
2142
2143
221
    sctp_add_to_readq(stcb->sctp_ep, stcb,
2144
221
                      control, &stcb->sctp_socket->so_rcv,
2145
221
                      1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
2146
2147
221
    if ((chk_flags & SCTP_DATA_UNORDERED) == 0) {
2148
      /* for ordered, bump what we delivered */
2149
26
      asoc->strmin[sid].last_mid_delivered++;
2150
26
    }
2151
221
    SCTP_STAT_INCR(sctps_recvexpress);
2152
221
    if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
2153
0
      sctp_log_strm_del_alt(stcb, tsn, mid, sid,
2154
0
                SCTP_STR_LOG_FROM_EXPRS_DEL);
2155
0
    }
2156
221
    control = NULL;
2157
221
    goto finish_express_del;
2158
221
  }
2159
2160
  /* Now will we need a chunk too? */
2161
4.72k
  if ((chk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) {
2162
3.73k
    sctp_alloc_a_chunk(stcb, chk);
2163
3.73k
    if (chk == NULL) {
2164
      /* No memory so we drop the chunk */
2165
0
      SCTP_STAT_INCR(sctps_nomem);
2166
0
      if (last_chunk == 0) {
2167
        /* we copied it, free the copy */
2168
0
        sctp_m_freem(dmbuf);
2169
0
      }
2170
0
      return (0);
2171
0
    }
2172
3.73k
    chk->rec.data.tsn = tsn;
2173
3.73k
    chk->no_fr_allowed = 0;
2174
3.73k
    chk->rec.data.fsn = fsn;
2175
3.73k
    chk->rec.data.mid = mid;
2176
3.73k
    chk->rec.data.sid = sid;
2177
3.73k
    chk->rec.data.ppid = ppid;
2178
3.73k
    chk->rec.data.context = stcb->asoc.context;
2179
3.73k
    chk->rec.data.doing_fast_retransmit = 0;
2180
3.73k
    chk->rec.data.rcv_flags = chk_flags;
2181
3.73k
    chk->asoc = asoc;
2182
3.73k
    chk->send_size = the_len;
2183
3.73k
    chk->whoTo = net;
2184
3.73k
    SCTPDBG(SCTP_DEBUG_XXX, "Building ck: %p for control: %p to be read (MID: %u)\n",
2185
3.73k
      chk,
2186
3.73k
      control, mid);
2187
3.73k
    atomic_add_int(&net->ref_count, 1);
2188
3.73k
    chk->data = dmbuf;
2189
3.73k
  }
2190
  /* Set the appropriate TSN mark */
2191
4.72k
  if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
2192
0
    SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
2193
0
    if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
2194
0
      asoc->highest_tsn_inside_nr_map = tsn;
2195
0
    }
2196
4.72k
  } else {
2197
4.72k
    SCTP_SET_TSN_PRESENT(asoc->mapping_array, gap);
2198
4.72k
    if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map)) {
2199
2.54k
      asoc->highest_tsn_inside_map = tsn;
2200
2.54k
    }
2201
4.72k
  }
2202
  /* Now is it complete (i.e. not fragmented)? */
2203
4.72k
  if ((chk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
2204
    /*
2205
     * Special check for when streams are resetting. We
2206
     * could be more smart about this and check the
2207
     * actual stream to see if it is not being reset..
2208
     * that way we would not create a HOLB when amongst
2209
     * streams being reset and those not being reset.
2210
     *
2211
     */
2212
994
    if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
2213
583
        SCTP_TSN_GT(tsn, liste->tsn)) {
2214
      /*
2215
       * yep its past where we need to reset... go
2216
       * ahead and queue it.
2217
       */
2218
334
      if (TAILQ_EMPTY(&asoc->pending_reply_queue)) {
2219
        /* first one on */
2220
135
        TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
2221
199
      } else {
2222
199
        struct sctp_queued_to_read *lcontrol, *nlcontrol;
2223
199
        unsigned char inserted = 0;
2224
705
        TAILQ_FOREACH_SAFE(lcontrol, &asoc->pending_reply_queue, next, nlcontrol) {
2225
705
          if (SCTP_TSN_GT(control->sinfo_tsn, lcontrol->sinfo_tsn)) {
2226
587
            continue;
2227
587
          } else {
2228
            /* found it */
2229
118
            TAILQ_INSERT_BEFORE(lcontrol, control, next);
2230
118
            inserted = 1;
2231
118
            break;
2232
118
          }
2233
705
        }
2234
199
        if (inserted == 0) {
2235
          /*
2236
           * must be put at end, use
2237
           * prevP (all setup from
2238
           * loop) to setup nextP.
2239
           */
2240
81
          TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
2241
81
        }
2242
199
      }
2243
334
      goto finish_express_del;
2244
334
    }
2245
660
    if (chk_flags & SCTP_DATA_UNORDERED) {
2246
      /* queue directly into socket buffer */
2247
76
      SCTPDBG(SCTP_DEBUG_XXX, "Unordered data to be read control: %p MID: %u\n",
2248
76
        control, mid);
2249
76
      sctp_mark_non_revokable(asoc, control->sinfo_tsn);
2250
76
      sctp_add_to_readq(stcb->sctp_ep, stcb,
2251
76
                        control,
2252
76
                        &stcb->sctp_socket->so_rcv, 1,
2253
76
                        SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
2254
2255
584
    } else {
2256
584
      SCTPDBG(SCTP_DEBUG_XXX, "Queue control: %p for reordering MID: %u\n", control,
2257
584
        mid);
2258
584
      sctp_queue_data_to_stream(stcb, asoc, control, abort_flag, &need_reasm_check);
2259
584
      if (*abort_flag) {
2260
40
        if (last_chunk) {
2261
1
          *m = NULL;
2262
1
        }
2263
40
        return (0);
2264
40
      }
2265
584
    }
2266
620
    goto finish_express_del;
2267
660
  }
2268
  /* If we reach here its a reassembly */
2269
3.73k
  need_reasm_check = 1;
2270
3.73k
  SCTPDBG(SCTP_DEBUG_XXX,
2271
3.73k
    "Queue data to stream for reasm control: %p MID: %u\n",
2272
3.73k
    control, mid);
2273
3.73k
  sctp_queue_data_for_reasm(stcb, asoc, control, chk, created_control, abort_flag, tsn);
2274
3.73k
  if (*abort_flag) {
2275
    /*
2276
     * the assoc is now gone and chk was put onto the
2277
     * reasm queue, which has all been freed.
2278
     */
2279
197
    if (last_chunk) {
2280
6
      *m = NULL;
2281
6
    }
2282
197
    return (0);
2283
197
  }
2284
4.70k
finish_express_del:
2285
  /* Here we tidy up things */
2286
4.70k
  if (tsn == (asoc->cumulative_tsn + 1)) {
2287
    /* Update cum-ack */
2288
107
    asoc->cumulative_tsn = tsn;
2289
107
  }
2290
4.70k
  if (last_chunk) {
2291
105
    *m = NULL;
2292
105
  }
2293
4.70k
  if (ordered) {
2294
2.16k
    SCTP_STAT_INCR_COUNTER64(sctps_inorderchunks);
2295
2.54k
  } else {
2296
2.54k
    SCTP_STAT_INCR_COUNTER64(sctps_inunorderchunks);
2297
2.54k
  }
2298
4.70k
  SCTP_STAT_INCR(sctps_recvdata);
2299
  /* Set it present please */
2300
4.70k
  if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
2301
0
    sctp_log_strm_del_alt(stcb, tsn, mid, sid, SCTP_STR_LOG_FROM_MARK_TSN);
2302
0
  }
2303
4.70k
  if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2304
0
    sctp_log_map(asoc->mapping_array_base_tsn, asoc->cumulative_tsn,
2305
0
           asoc->highest_tsn_inside_map, SCTP_MAP_PREPARE_SLIDE);
2306
0
  }
2307
4.70k
  if (need_reasm_check) {
2308
3.53k
    (void)sctp_deliver_reasm_check(stcb, asoc, &asoc->strmin[sid], SCTP_READ_LOCK_NOT_HELD);
2309
3.53k
    need_reasm_check = 0;
2310
3.53k
  }
2311
  /* check the special flag for stream resets */
2312
4.70k
  if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
2313
633
      SCTP_TSN_GE(asoc->cumulative_tsn, liste->tsn)) {
2314
    /*
2315
     * we have finished working through the backlogged TSN's now
2316
     * time to reset streams. 1: call reset function. 2: free
2317
     * pending_reply space 3: distribute any chunks in
2318
     * pending_reply_queue.
2319
     */
2320
156
    sctp_reset_in_stream(stcb, liste->number_entries, liste->list_of_streams);
2321
156
    TAILQ_REMOVE(&asoc->resetHead, liste, next_resp);
2322
156
    sctp_send_deferred_reset_response(stcb, liste, SCTP_STREAM_RESET_RESULT_PERFORMED);
2323
156
    SCTP_FREE(liste, SCTP_M_STRESET);
2324
    /*sa_ignore FREED_MEMORY*/
2325
156
    liste = TAILQ_FIRST(&asoc->resetHead);
2326
156
    if (TAILQ_EMPTY(&asoc->resetHead)) {
2327
      /* All can be removed */
2328
121
      TAILQ_FOREACH_SAFE(control, &asoc->pending_reply_queue, next, ncontrol) {
2329
121
        TAILQ_REMOVE(&asoc->pending_reply_queue, control, next);
2330
121
        strm = &asoc->strmin[control->sinfo_stream];
2331
121
        sctp_queue_data_to_stream(stcb, asoc, control, abort_flag, &need_reasm_check);
2332
121
        if (*abort_flag) {
2333
13
          return (0);
2334
13
        }
2335
108
        if (need_reasm_check) {
2336
1
          (void)sctp_deliver_reasm_check(stcb, asoc, strm, SCTP_READ_LOCK_NOT_HELD);
2337
1
          need_reasm_check = 0;
2338
1
        }
2339
108
      }
2340
95
    } else {
2341
114
      TAILQ_FOREACH_SAFE(control, &asoc->pending_reply_queue, next, ncontrol) {
2342
114
        if (SCTP_TSN_GT(control->sinfo_tsn, liste->tsn)) {
2343
35
          break;
2344
35
        }
2345
        /*
2346
         * if control->sinfo_tsn is <= liste->tsn we can
2347
         * process it which is the NOT of
2348
         * control->sinfo_tsn > liste->tsn
2349
         */
2350
114
        TAILQ_REMOVE(&asoc->pending_reply_queue, control, next);
2351
79
        strm = &asoc->strmin[control->sinfo_stream];
2352
79
        sctp_queue_data_to_stream(stcb, asoc, control, abort_flag, &need_reasm_check);
2353
79
        if (*abort_flag) {
2354
12
          return (0);
2355
12
        }
2356
67
        if (need_reasm_check) {
2357
1
          (void)sctp_deliver_reasm_check(stcb, asoc, strm, SCTP_READ_LOCK_NOT_HELD);
2358
1
          need_reasm_check = 0;
2359
1
        }
2360
67
      }
2361
95
    }
2362
156
  }
2363
4.68k
  return (1);
2364
4.70k
}
2365
2366
static const int8_t sctp_map_lookup_tab[256] = {
2367
  0, 1, 0, 2, 0, 1, 0, 3,
2368
  0, 1, 0, 2, 0, 1, 0, 4,
2369
  0, 1, 0, 2, 0, 1, 0, 3,
2370
  0, 1, 0, 2, 0, 1, 0, 5,
2371
  0, 1, 0, 2, 0, 1, 0, 3,
2372
  0, 1, 0, 2, 0, 1, 0, 4,
2373
  0, 1, 0, 2, 0, 1, 0, 3,
2374
  0, 1, 0, 2, 0, 1, 0, 6,
2375
  0, 1, 0, 2, 0, 1, 0, 3,
2376
  0, 1, 0, 2, 0, 1, 0, 4,
2377
  0, 1, 0, 2, 0, 1, 0, 3,
2378
  0, 1, 0, 2, 0, 1, 0, 5,
2379
  0, 1, 0, 2, 0, 1, 0, 3,
2380
  0, 1, 0, 2, 0, 1, 0, 4,
2381
  0, 1, 0, 2, 0, 1, 0, 3,
2382
  0, 1, 0, 2, 0, 1, 0, 7,
2383
  0, 1, 0, 2, 0, 1, 0, 3,
2384
  0, 1, 0, 2, 0, 1, 0, 4,
2385
  0, 1, 0, 2, 0, 1, 0, 3,
2386
  0, 1, 0, 2, 0, 1, 0, 5,
2387
  0, 1, 0, 2, 0, 1, 0, 3,
2388
  0, 1, 0, 2, 0, 1, 0, 4,
2389
  0, 1, 0, 2, 0, 1, 0, 3,
2390
  0, 1, 0, 2, 0, 1, 0, 6,
2391
  0, 1, 0, 2, 0, 1, 0, 3,
2392
  0, 1, 0, 2, 0, 1, 0, 4,
2393
  0, 1, 0, 2, 0, 1, 0, 3,
2394
  0, 1, 0, 2, 0, 1, 0, 5,
2395
  0, 1, 0, 2, 0, 1, 0, 3,
2396
  0, 1, 0, 2, 0, 1, 0, 4,
2397
  0, 1, 0, 2, 0, 1, 0, 3,
2398
  0, 1, 0, 2, 0, 1, 0, 8
2399
};
2400
2401
void
2402
sctp_slide_mapping_arrays(struct sctp_tcb *stcb)
2403
13.7k
{
2404
  /*
2405
   * Now we also need to check the mapping array in a couple of ways.
2406
   * 1) Did we move the cum-ack point?
2407
   *
2408
   * When you first glance at this you might think
2409
   * that all entries that make up the position
2410
   * of the cum-ack would be in the nr-mapping array
2411
   * only.. i.e. things up to the cum-ack are always
2412
   * deliverable. Thats true with one exception, when
2413
   * its a fragmented message we may not deliver the data
2414
   * until some threshold (or all of it) is in place. So
2415
   * we must OR the nr_mapping_array and mapping_array to
2416
   * get a true picture of the cum-ack.
2417
   */
2418
13.7k
  struct sctp_association *asoc;
2419
13.7k
  int at;
2420
13.7k
  uint8_t val;
2421
13.7k
  int slide_from, slide_end, lgap, distance;
2422
13.7k
  uint32_t old_cumack, old_base, old_highest, highest_tsn;
2423
2424
13.7k
  asoc = &stcb->asoc;
2425
2426
13.7k
  old_cumack = asoc->cumulative_tsn;
2427
13.7k
  old_base = asoc->mapping_array_base_tsn;
2428
13.7k
  old_highest = asoc->highest_tsn_inside_map;
2429
  /*
2430
   * We could probably improve this a small bit by calculating the
2431
   * offset of the current cum-ack as the starting point.
2432
   */
2433
13.7k
  at = 0;
2434
14.8k
  for (slide_from = 0; slide_from < stcb->asoc.mapping_array_size; slide_from++) {
2435
14.8k
    val = asoc->nr_mapping_array[slide_from] | asoc->mapping_array[slide_from];
2436
14.8k
    if (val == 0xff) {
2437
1.06k
      at += 8;
2438
13.7k
    } else {
2439
      /* there is a 0 bit */
2440
13.7k
      at += sctp_map_lookup_tab[val];
2441
13.7k
      break;
2442
13.7k
    }
2443
14.8k
  }
2444
13.7k
  asoc->cumulative_tsn = asoc->mapping_array_base_tsn + (at-1);
2445
2446
13.7k
  if (SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_map) &&
2447
3.14k
            SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_nr_map)) {
2448
0
#ifdef INVARIANTS
2449
0
    panic("huh, cumack 0x%x greater than high-tsn 0x%x in map",
2450
0
          asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2451
#else
2452
    SCTP_PRINTF("huh, cumack 0x%x greater than high-tsn 0x%x in map - should panic?\n",
2453
          asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2454
    sctp_print_mapping_array(asoc);
2455
    if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2456
      sctp_log_map(0, 6, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
2457
    }
2458
    asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2459
    asoc->highest_tsn_inside_nr_map = asoc->cumulative_tsn;
2460
#endif
2461
0
  }
2462
13.7k
  if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2463
4.26k
    highest_tsn = asoc->highest_tsn_inside_nr_map;
2464
9.53k
  } else {
2465
9.53k
    highest_tsn = asoc->highest_tsn_inside_map;
2466
9.53k
  }
2467
13.7k
  if ((asoc->cumulative_tsn == highest_tsn) && (at >= 8)) {
2468
    /* The complete array was completed by a single FR */
2469
    /* highest becomes the cum-ack */
2470
170
    int clr;
2471
170
#ifdef INVARIANTS
2472
170
    unsigned int i;
2473
170
#endif
2474
2475
    /* clear the array */
2476
170
    clr = ((at+7) >> 3);
2477
170
    if (clr > asoc->mapping_array_size) {
2478
0
      clr = asoc->mapping_array_size;
2479
0
    }
2480
170
    memset(asoc->mapping_array, 0, clr);
2481
170
    memset(asoc->nr_mapping_array, 0, clr);
2482
170
#ifdef INVARIANTS
2483
2.89k
    for (i = 0; i < asoc->mapping_array_size; i++) {
2484
2.72k
      if ((asoc->mapping_array[i]) || (asoc->nr_mapping_array[i])) {
2485
0
        SCTP_PRINTF("Error Mapping array's not clean at clear\n");
2486
0
        sctp_print_mapping_array(asoc);
2487
0
      }
2488
2.72k
    }
2489
170
#endif
2490
170
    asoc->mapping_array_base_tsn = asoc->cumulative_tsn + 1;
2491
170
    asoc->highest_tsn_inside_nr_map = asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2492
13.6k
  } else if (at >= 8) {
2493
    /* we can slide the mapping array down */
2494
    /* slide_from holds where we hit the first NON 0xff byte */
2495
2496
    /*
2497
     * now calculate the ceiling of the move using our highest
2498
     * TSN value
2499
     */
2500
13
    SCTP_CALC_TSN_TO_GAP(lgap, highest_tsn, asoc->mapping_array_base_tsn);
2501
13
    slide_end = (lgap >> 3);
2502
13
    if (slide_end < slide_from) {
2503
0
      sctp_print_mapping_array(asoc);
2504
0
#ifdef INVARIANTS
2505
0
      panic("impossible slide");
2506
#else
2507
      SCTP_PRINTF("impossible slide lgap: %x slide_end: %x slide_from: %x? at: %d\n",
2508
                  lgap, slide_end, slide_from, at);
2509
      return;
2510
#endif
2511
0
    }
2512
13
    if (slide_end > asoc->mapping_array_size) {
2513
0
#ifdef INVARIANTS
2514
0
      panic("would overrun buffer");
2515
#else
2516
      SCTP_PRINTF("Gak, would have overrun map end: %d slide_end: %d\n",
2517
                  asoc->mapping_array_size, slide_end);
2518
      slide_end = asoc->mapping_array_size;
2519
#endif
2520
0
    }
2521
13
    distance = (slide_end - slide_from) + 1;
2522
13
    if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2523
0
      sctp_log_map(old_base, old_cumack, old_highest,
2524
0
             SCTP_MAP_PREPARE_SLIDE);
2525
0
      sctp_log_map((uint32_t) slide_from, (uint32_t) slide_end,
2526
0
             (uint32_t) lgap, SCTP_MAP_SLIDE_FROM);
2527
0
    }
2528
13
    if (distance + slide_from > asoc->mapping_array_size ||
2529
13
        distance < 0) {
2530
      /*
2531
       * Here we do NOT slide forward the array so that
2532
       * hopefully when more data comes in to fill it up
2533
       * we will be able to slide it forward. Really I
2534
       * don't think this should happen :-0
2535
       */
2536
0
      if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2537
0
        sctp_log_map((uint32_t) distance, (uint32_t) slide_from,
2538
0
               (uint32_t) asoc->mapping_array_size,
2539
0
               SCTP_MAP_SLIDE_NONE);
2540
0
      }
2541
13
    } else {
2542
13
      int ii;
2543
2544
1.26k
      for (ii = 0; ii < distance; ii++) {
2545
1.25k
        asoc->mapping_array[ii] = asoc->mapping_array[slide_from + ii];
2546
1.25k
        asoc->nr_mapping_array[ii] = asoc->nr_mapping_array[slide_from + ii];
2547
1.25k
      }
2548
553
      for (ii = distance; ii < asoc->mapping_array_size; ii++) {
2549
540
        asoc->mapping_array[ii] = 0;
2550
540
        asoc->nr_mapping_array[ii] = 0;
2551
540
      }
2552
13
      if (asoc->highest_tsn_inside_map + 1 == asoc->mapping_array_base_tsn) {
2553
9
        asoc->highest_tsn_inside_map += (slide_from << 3);
2554
9
      }
2555
13
      if (asoc->highest_tsn_inside_nr_map + 1 == asoc->mapping_array_base_tsn) {
2556
0
        asoc->highest_tsn_inside_nr_map += (slide_from << 3);
2557
0
      }
2558
13
      asoc->mapping_array_base_tsn += (slide_from << 3);
2559
13
      if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2560
0
        sctp_log_map(asoc->mapping_array_base_tsn,
2561
0
               asoc->cumulative_tsn, asoc->highest_tsn_inside_map,
2562
0
               SCTP_MAP_SLIDE_RESULT);
2563
0
      }
2564
13
    }
2565
13
  }
2566
13.7k
}
2567
2568
void
2569
sctp_sack_check(struct sctp_tcb *stcb, int was_a_gap)
2570
3.53k
{
2571
3.53k
  struct sctp_association *asoc;
2572
3.53k
  uint32_t highest_tsn;
2573
3.53k
  int is_a_gap;
2574
2575
3.53k
  sctp_slide_mapping_arrays(stcb);
2576
3.53k
  asoc = &stcb->asoc;
2577
3.53k
  if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2578
635
    highest_tsn = asoc->highest_tsn_inside_nr_map;
2579
2.90k
  } else {
2580
2.90k
    highest_tsn = asoc->highest_tsn_inside_map;
2581
2.90k
  }
2582
  /* Is there a gap now? */
2583
3.53k
  is_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
2584
2585
  /*
2586
   * Now we need to see if we need to queue a sack or just start the
2587
   * timer (if allowed).
2588
   */
2589
3.53k
  if (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_SENT) {
2590
    /*
2591
     * Ok special case, in SHUTDOWN-SENT case. here we
2592
     * maker sure SACK timer is off and instead send a
2593
     * SHUTDOWN and a SACK
2594
     */
2595
0
    if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2596
0
      sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
2597
0
                      stcb->sctp_ep, stcb, NULL,
2598
0
                      SCTP_FROM_SCTP_INDATA + SCTP_LOC_19);
2599
0
    }
2600
0
    sctp_send_shutdown(stcb,
2601
0
                       ((stcb->asoc.alternate) ? stcb->asoc.alternate : stcb->asoc.primary_destination));
2602
0
    if (is_a_gap) {
2603
0
      sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
2604
0
    }
2605
3.53k
  } else {
2606
    /*
2607
     * CMT DAC algorithm: increase number of packets
2608
     * received since last ack
2609
     */
2610
3.53k
    stcb->asoc.cmt_dac_pkts_rcvd++;
2611
2612
3.53k
    if ((stcb->asoc.send_sack == 1) ||      /* We need to send a SACK */
2613
154
        ((was_a_gap) && (is_a_gap == 0)) ||  /* was a gap, but no
2614
                                             * longer is one */
2615
154
        (stcb->asoc.numduptsns) ||          /* we have dup's */
2616
154
        (is_a_gap) ||                       /* is still a gap */
2617
65
        (stcb->asoc.delayed_ack == 0) ||    /* Delayed sack disabled */
2618
3.47k
        (stcb->asoc.data_pkts_seen >= stcb->asoc.sack_freq)) { /* hit limit of pkts */
2619
3.47k
      if ((stcb->asoc.sctp_cmt_on_off > 0) &&
2620
0
          (SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) &&
2621
0
          (stcb->asoc.send_sack == 0) &&
2622
0
          (stcb->asoc.numduptsns == 0) &&
2623
0
          (stcb->asoc.delayed_ack) &&
2624
0
          (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer))) {
2625
        /*
2626
         * CMT DAC algorithm: With CMT,
2627
         * delay acks even in the face of
2628
         * reordering. Therefore, if acks
2629
         * that do not have to be sent
2630
         * because of the above reasons,
2631
         * will be delayed. That is, acks
2632
         * that would have been sent due to
2633
         * gap reports will be delayed with
2634
         * DAC. Start the delayed ack timer.
2635
         */
2636
0
        sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2637
0
                         stcb->sctp_ep, stcb, NULL);
2638
3.47k
      } else {
2639
        /*
2640
         * Ok we must build a SACK since the
2641
         * timer is pending, we got our
2642
         * first packet OR there are gaps or
2643
         * duplicates.
2644
         */
2645
3.47k
        sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL,
2646
3.47k
                        SCTP_FROM_SCTP_INDATA + SCTP_LOC_20);
2647
3.47k
        sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
2648
3.47k
      }
2649
3.47k
    } else {
2650
65
      if (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2651
65
        sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2652
65
                         stcb->sctp_ep, stcb, NULL);
2653
65
      }
2654
65
    }
2655
3.53k
  }
2656
3.53k
}
2657
2658
int
2659
sctp_process_data(struct mbuf **mm, int iphlen, int *offset, int length,
2660
                  struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2661
                  struct sctp_nets *net, uint32_t *high_tsn)
2662
3.56k
{
2663
3.56k
  struct sctp_chunkhdr *ch, chunk_buf;
2664
3.56k
  struct sctp_association *asoc;
2665
3.56k
  int num_chunks = 0; /* number of control chunks processed */
2666
3.56k
  int stop_proc = 0;
2667
3.56k
  int break_flag, last_chunk;
2668
3.56k
  int abort_flag = 0, was_a_gap;
2669
3.56k
  struct mbuf *m;
2670
3.56k
  uint32_t highest_tsn;
2671
3.56k
  uint16_t chk_length;
2672
2673
  /* set the rwnd */
2674
3.56k
  sctp_set_rwnd(stcb, &stcb->asoc);
2675
2676
3.56k
  m = *mm;
2677
3.56k
  SCTP_TCB_LOCK_ASSERT(stcb);
2678
3.56k
  asoc = &stcb->asoc;
2679
3.56k
  if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2680
58
    highest_tsn = asoc->highest_tsn_inside_nr_map;
2681
3.50k
  } else {
2682
3.50k
    highest_tsn = asoc->highest_tsn_inside_map;
2683
3.50k
  }
2684
3.56k
  was_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
2685
  /*
2686
   * setup where we got the last DATA packet from for any SACK that
2687
   * may need to go out. Don't bump the net. This is done ONLY when a
2688
   * chunk is assigned.
2689
   */
2690
3.56k
  asoc->last_data_chunk_from = net;
2691
2692
  /*-
2693
   * Now before we proceed we must figure out if this is a wasted
2694
   * cluster... i.e. it is a small packet sent in and yet the driver
2695
   * underneath allocated a full cluster for it. If so we must copy it
2696
   * to a smaller mbuf and free up the cluster mbuf. This will help
2697
   * with cluster starvation.
2698
   */
2699
3.56k
  if (SCTP_BUF_LEN(m) < (long)MLEN && SCTP_BUF_NEXT(m) == NULL) {
2700
    /* we only handle mbufs that are singletons.. not chains */
2701
1.49k
    m = sctp_get_mbuf_for_msg(SCTP_BUF_LEN(m), 0, M_NOWAIT, 1, MT_DATA);
2702
1.49k
    if (m) {
2703
      /* ok lets see if we can copy the data up */
2704
1.49k
      caddr_t *from, *to;
2705
      /* get the pointers and copy */
2706
1.49k
      to = mtod(m, caddr_t *);
2707
1.49k
      from = mtod((*mm), caddr_t *);
2708
1.49k
      memcpy(to, from, SCTP_BUF_LEN((*mm)));
2709
      /* copy the length and free up the old */
2710
1.49k
      SCTP_BUF_LEN(m) = SCTP_BUF_LEN((*mm));
2711
1.49k
      sctp_m_freem(*mm);
2712
      /* success, back copy */
2713
1.49k
      *mm = m;
2714
1.49k
    } else {
2715
      /* We are in trouble in the mbuf world .. yikes */
2716
0
      m = *mm;
2717
0
    }
2718
1.49k
  }
2719
  /* get pointer to the first chunk header */
2720
3.56k
  ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
2721
3.56k
                                             sizeof(struct sctp_chunkhdr),
2722
3.56k
                                             (uint8_t *)&chunk_buf);
2723
3.56k
  if (ch == NULL) {
2724
0
    return (1);
2725
0
  }
2726
  /*
2727
   * process all DATA chunks...
2728
   */
2729
3.56k
  *high_tsn = asoc->cumulative_tsn;
2730
3.56k
  break_flag = 0;
2731
3.56k
  asoc->data_pkts_seen++;
2732
15.0k
  while (stop_proc == 0) {
2733
    /* validate chunk length */
2734
12.0k
    chk_length = ntohs(ch->chunk_length);
2735
12.0k
    if (length - *offset < chk_length) {
2736
      /* all done, mutulated chunk */
2737
418
      stop_proc = 1;
2738
418
      continue;
2739
418
    }
2740
11.6k
    if ((asoc->idata_supported == 1) &&
2741
4.94k
        (ch->chunk_type == SCTP_DATA)) {
2742
143
      struct mbuf *op_err;
2743
143
      char msg[SCTP_DIAG_INFO_LEN];
2744
2745
143
      SCTP_SNPRINTF(msg, sizeof(msg), "%s", "DATA chunk received when I-DATA was negotiated");
2746
143
      op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2747
143
      stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_21;
2748
143
      sctp_abort_an_association(inp, stcb, op_err, false, SCTP_SO_NOT_LOCKED);
2749
143
      return (2);
2750
143
    }
2751
11.4k
    if ((asoc->idata_supported == 0) &&
2752
6.67k
        (ch->chunk_type == SCTP_IDATA)) {
2753
4
      struct mbuf *op_err;
2754
4
      char msg[SCTP_DIAG_INFO_LEN];
2755
2756
4
      SCTP_SNPRINTF(msg, sizeof(msg), "%s", "I-DATA chunk received when DATA was negotiated");
2757
4
      op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2758
4
      stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_22;
2759
4
      sctp_abort_an_association(inp, stcb, op_err, false, SCTP_SO_NOT_LOCKED);
2760
4
      return (2);
2761
4
    }
2762
11.4k
    if ((ch->chunk_type == SCTP_DATA) ||
2763
9.27k
        (ch->chunk_type == SCTP_IDATA)) {
2764
9.27k
      uint16_t clen;
2765
2766
9.27k
      if (ch->chunk_type == SCTP_DATA) {
2767
6.25k
        clen = sizeof(struct sctp_data_chunk);
2768
6.25k
      } else {
2769
3.02k
        clen = sizeof(struct sctp_idata_chunk);
2770
3.02k
      }
2771
9.27k
      if (chk_length < clen) {
2772
        /*
2773
         * Need to send an abort since we had a
2774
         * invalid data chunk.
2775
         */
2776
60
        struct mbuf *op_err;
2777
60
        char msg[SCTP_DIAG_INFO_LEN];
2778
2779
60
        SCTP_SNPRINTF(msg, sizeof(msg), "%s chunk of length %u",
2780
60
                      ch->chunk_type == SCTP_DATA ? "DATA" : "I-DATA",
2781
60
                      chk_length);
2782
60
        op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2783
60
        stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_23;
2784
60
        sctp_abort_an_association(inp, stcb, op_err, false, SCTP_SO_NOT_LOCKED);
2785
60
        return (2);
2786
60
      }
2787
#ifdef SCTP_AUDITING_ENABLED
2788
      sctp_audit_log(0xB1, 0);
2789
#endif
2790
9.21k
      if (SCTP_SIZE32(chk_length) == (length - *offset)) {
2791
1.47k
        last_chunk = 1;
2792
7.74k
      } else {
2793
7.74k
        last_chunk = 0;
2794
7.74k
      }
2795
9.21k
      if (sctp_process_a_data_chunk(stcb, asoc, mm, *offset,
2796
9.21k
                  chk_length, net, high_tsn, &abort_flag, &break_flag,
2797
9.21k
                  last_chunk, ch->chunk_type)) {
2798
4.68k
        num_chunks++;
2799
4.68k
      }
2800
9.21k
      if (abort_flag)
2801
310
        return (2);
2802
2803
8.90k
      if (break_flag) {
2804
        /*
2805
         * Set because of out of rwnd space and no
2806
         * drop rep space left.
2807
         */
2808
10
        stop_proc = 1;
2809
10
        continue;
2810
10
      }
2811
8.90k
    } else {
2812
      /* not a data chunk in the data region */
2813
2.19k
      switch (ch->chunk_type) {
2814
2
      case SCTP_INITIATION:
2815
3
      case SCTP_INITIATION_ACK:
2816
4
      case SCTP_SELECTIVE_ACK:
2817
5
      case SCTP_NR_SELECTIVE_ACK:
2818
16
      case SCTP_HEARTBEAT_REQUEST:
2819
18
      case SCTP_HEARTBEAT_ACK:
2820
20
      case SCTP_ABORT_ASSOCIATION:
2821
23
      case SCTP_SHUTDOWN:
2822
25
      case SCTP_SHUTDOWN_ACK:
2823
37
      case SCTP_OPERATION_ERROR:
2824
38
      case SCTP_COOKIE_ECHO:
2825
39
      case SCTP_COOKIE_ACK:
2826
41
      case SCTP_ECN_ECHO:
2827
43
      case SCTP_ECN_CWR:
2828
44
      case SCTP_SHUTDOWN_COMPLETE:
2829
46
      case SCTP_AUTHENTICATION:
2830
48
      case SCTP_ASCONF_ACK:
2831
49
      case SCTP_PACKET_DROPPED:
2832
51
      case SCTP_STREAM_RESET:
2833
53
      case SCTP_FORWARD_CUM_TSN:
2834
55
      case SCTP_ASCONF:
2835
55
      {
2836
        /*
2837
         * Now, what do we do with KNOWN chunks that
2838
         * are NOT in the right place?
2839
         *
2840
         * For now, I do nothing but ignore them. We
2841
         * may later want to add sysctl stuff to
2842
         * switch out and do either an ABORT() or
2843
         * possibly process them.
2844
         */
2845
55
        struct mbuf *op_err;
2846
55
        char msg[SCTP_DIAG_INFO_LEN];
2847
2848
55
        SCTP_SNPRINTF(msg, sizeof(msg), "DATA chunk followed by chunk of type %2.2x",
2849
55
                      ch->chunk_type);
2850
55
        op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2851
55
        sctp_abort_an_association(inp, stcb, op_err, false, SCTP_SO_NOT_LOCKED);
2852
55
        return (2);
2853
53
      }
2854
2.13k
      default:
2855
        /*
2856
         * Unknown chunk type: use bit rules after
2857
         * checking length
2858
         */
2859
2.13k
        if (chk_length < sizeof(struct sctp_chunkhdr)) {
2860
          /*
2861
           * Need to send an abort since we had a
2862
           * invalid chunk.
2863
           */
2864
9
          struct mbuf *op_err;
2865
9
          char msg[SCTP_DIAG_INFO_LEN];
2866
2867
9
          SCTP_SNPRINTF(msg, sizeof(msg), "Chunk of length %u", chk_length);
2868
9
          op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2869
9
          stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_24;
2870
9
          sctp_abort_an_association(inp, stcb, op_err, false, SCTP_SO_NOT_LOCKED);
2871
9
          return (2);
2872
9
        }
2873
2.12k
        if (ch->chunk_type & 0x40) {
2874
          /* Add a error report to the queue */
2875
1.87k
          struct mbuf *op_err;
2876
1.87k
          struct sctp_gen_error_cause *cause;
2877
2878
1.87k
          op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_gen_error_cause),
2879
1.87k
                                         0, M_NOWAIT, 1, MT_DATA);
2880
1.87k
          if (op_err != NULL) {
2881
1.87k
            cause  = mtod(op_err, struct sctp_gen_error_cause *);
2882
1.87k
            cause->code = htons(SCTP_CAUSE_UNRECOG_CHUNK);
2883
1.87k
            cause->length = htons((uint16_t)(chk_length + sizeof(struct sctp_gen_error_cause)));
2884
1.87k
            SCTP_BUF_LEN(op_err) = sizeof(struct sctp_gen_error_cause);
2885
1.87k
            SCTP_BUF_NEXT(op_err) = SCTP_M_COPYM(m, *offset, chk_length, M_NOWAIT);
2886
1.87k
            if (SCTP_BUF_NEXT(op_err) != NULL) {
2887
1.87k
              sctp_queue_op_err(stcb, op_err);
2888
1.87k
            } else {
2889
0
              sctp_m_freem(op_err);
2890
0
            }
2891
1.87k
          }
2892
1.87k
        }
2893
2.12k
        if ((ch->chunk_type & 0x80) == 0) {
2894
          /* discard the rest of this packet */
2895
11
          stop_proc = 1;
2896
11
        }  /* else skip this bad chunk and
2897
           * continue... */
2898
2.12k
        break;
2899
2.19k
      } /* switch of chunk type */
2900
2.19k
    }
2901
11.0k
    *offset += SCTP_SIZE32(chk_length);
2902
11.0k
    if ((*offset >= length) || stop_proc) {
2903
      /* no more data left in the mbuf chain */
2904
2.46k
      stop_proc = 1;
2905
2.46k
      continue;
2906
2.46k
    }
2907
8.56k
    ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
2908
8.56k
                                               sizeof(struct sctp_chunkhdr),
2909
8.56k
                                               (uint8_t *)&chunk_buf);
2910
8.56k
    if (ch == NULL) {
2911
90
      *offset = length;
2912
90
      stop_proc = 1;
2913
90
      continue;
2914
90
    }
2915
8.56k
  }
2916
2.98k
  if (break_flag) {
2917
    /*
2918
     * we need to report rwnd overrun drops.
2919
     */
2920
10
    sctp_send_packet_dropped(stcb, net, *mm, length, iphlen, 0);
2921
10
  }
2922
2.98k
  if (num_chunks) {
2923
    /*
2924
     * Did we get data, if so update the time for auto-close and
2925
     * give peer credit for being alive.
2926
     */
2927
1.17k
    SCTP_STAT_INCR(sctps_recvpktwithdata);
2928
1.17k
    if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
2929
0
      sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
2930
0
               stcb->asoc.overall_error_count,
2931
0
               0,
2932
0
               SCTP_FROM_SCTP_INDATA,
2933
0
               __LINE__);
2934
0
    }
2935
1.17k
    stcb->asoc.overall_error_count = 0;
2936
1.17k
    (void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_last_rcvd);
2937
1.17k
  }
2938
  /* now service all of the reassm queue if needed */
2939
2.98k
  if (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_SENT) {
2940
    /* Assure that we ack right away */
2941
0
    stcb->asoc.send_sack = 1;
2942
0
  }
2943
  /* Start a sack timer or QUEUE a SACK for sending */
2944
2.98k
  sctp_sack_check(stcb, was_a_gap);
2945
2.98k
  return (0);
2946
3.56k
}
2947
2948
static int
2949
sctp_process_segment_range(struct sctp_tcb *stcb, struct sctp_tmit_chunk **p_tp1, uint32_t last_tsn,
2950
         uint16_t frag_strt, uint16_t frag_end, int nr_sacking,
2951
         int *num_frs,
2952
         uint32_t *biggest_newly_acked_tsn,
2953
         uint32_t  *this_sack_lowest_newack,
2954
         int *rto_ok)
2955
0
{
2956
0
  struct sctp_tmit_chunk *tp1;
2957
0
  unsigned int theTSN;
2958
0
  int j, wake_him = 0, circled = 0;
2959
2960
  /* Recover the tp1 we last saw */
2961
0
  tp1 = *p_tp1;
2962
0
  if (tp1 == NULL) {
2963
0
    tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
2964
0
  }
2965
0
  for (j = frag_strt; j <= frag_end; j++) {
2966
0
    theTSN = j + last_tsn;
2967
0
    while (tp1) {
2968
0
      if (tp1->rec.data.doing_fast_retransmit)
2969
0
        (*num_frs) += 1;
2970
2971
      /*-
2972
       * CMT: CUCv2 algorithm. For each TSN being
2973
       * processed from the sent queue, track the
2974
       * next expected pseudo-cumack, or
2975
       * rtx_pseudo_cumack, if required. Separate
2976
       * cumack trackers for first transmissions,
2977
       * and retransmissions.
2978
       */
2979
0
      if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
2980
0
          (tp1->whoTo->find_pseudo_cumack == 1) &&
2981
0
          (tp1->snd_count == 1)) {
2982
0
        tp1->whoTo->pseudo_cumack = tp1->rec.data.tsn;
2983
0
        tp1->whoTo->find_pseudo_cumack = 0;
2984
0
      }
2985
0
      if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
2986
0
          (tp1->whoTo->find_rtx_pseudo_cumack == 1) &&
2987
0
          (tp1->snd_count > 1)) {
2988
0
        tp1->whoTo->rtx_pseudo_cumack = tp1->rec.data.tsn;
2989
0
        tp1->whoTo->find_rtx_pseudo_cumack = 0;
2990
0
      }
2991
0
      if (tp1->rec.data.tsn == theTSN) {
2992
0
        if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
2993
          /*-
2994
           * must be held until
2995
           * cum-ack passes
2996
           */
2997
0
          if (tp1->sent < SCTP_DATAGRAM_RESEND) {
2998
            /*-
2999
             * If it is less than RESEND, it is
3000
             * now no-longer in flight.
3001
             * Higher values may already be set
3002
             * via previous Gap Ack Blocks...
3003
             * i.e. ACKED or RESEND.
3004
             */
3005
0
            if (SCTP_TSN_GT(tp1->rec.data.tsn,
3006
0
                            *biggest_newly_acked_tsn)) {
3007
0
              *biggest_newly_acked_tsn = tp1->rec.data.tsn;
3008
0
            }
3009
            /*-
3010
             * CMT: SFR algo (and HTNA) - set
3011
             * saw_newack to 1 for dest being
3012
             * newly acked. update
3013
             * this_sack_highest_newack if
3014
             * appropriate.
3015
             */
3016
0
            if (tp1->rec.data.chunk_was_revoked == 0)
3017
0
              tp1->whoTo->saw_newack = 1;
3018
3019
0
            if (SCTP_TSN_GT(tp1->rec.data.tsn,
3020
0
                            tp1->whoTo->this_sack_highest_newack)) {
3021
0
              tp1->whoTo->this_sack_highest_newack =
3022
0
                tp1->rec.data.tsn;
3023
0
            }
3024
            /*-
3025
             * CMT DAC algo: also update
3026
             * this_sack_lowest_newack
3027
             */
3028
0
            if (*this_sack_lowest_newack == 0) {
3029
0
              if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3030
0
                sctp_log_sack(*this_sack_lowest_newack,
3031
0
                        last_tsn,
3032
0
                        tp1->rec.data.tsn,
3033
0
                        0,
3034
0
                        0,
3035
0
                        SCTP_LOG_TSN_ACKED);
3036
0
              }
3037
0
              *this_sack_lowest_newack = tp1->rec.data.tsn;
3038
0
            }
3039
            /*-
3040
             * CMT: CUCv2 algorithm. If (rtx-)pseudo-cumack for corresp
3041
             * dest is being acked, then we have a new (rtx-)pseudo-cumack. Set
3042
             * new_(rtx_)pseudo_cumack to TRUE so that the cwnd for this dest can be
3043
             * updated. Also trigger search for the next expected (rtx-)pseudo-cumack.
3044
             * Separate pseudo_cumack trackers for first transmissions and
3045
             * retransmissions.
3046
             */
3047
0
            if (tp1->rec.data.tsn == tp1->whoTo->pseudo_cumack) {
3048
0
              if (tp1->rec.data.chunk_was_revoked == 0) {
3049
0
                tp1->whoTo->new_pseudo_cumack = 1;
3050
0
              }
3051
0
              tp1->whoTo->find_pseudo_cumack = 1;
3052
0
            }
3053
0
            if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
3054
0
              sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.tsn, SCTP_CWND_LOG_FROM_SACK);
3055
0
            }
3056
0
            if (tp1->rec.data.tsn == tp1->whoTo->rtx_pseudo_cumack) {
3057
0
              if (tp1->rec.data.chunk_was_revoked == 0) {
3058
0
                tp1->whoTo->new_pseudo_cumack = 1;
3059
0
              }
3060
0
              tp1->whoTo->find_rtx_pseudo_cumack = 1;
3061
0
            }
3062
0
            if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3063
0
              sctp_log_sack(*biggest_newly_acked_tsn,
3064
0
                      last_tsn,
3065
0
                      tp1->rec.data.tsn,
3066
0
                      frag_strt,
3067
0
                      frag_end,
3068
0
                      SCTP_LOG_TSN_ACKED);
3069
0
            }
3070
0
            if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3071
0
              sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_GAP,
3072
0
                       tp1->whoTo->flight_size,
3073
0
                       tp1->book_size,
3074
0
                       (uint32_t)(uintptr_t)tp1->whoTo,
3075
0
                       tp1->rec.data.tsn);
3076
0
            }
3077
0
            sctp_flight_size_decrease(tp1);
3078
0
            if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3079
0
              (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged)(tp1->whoTo,
3080
0
                                 tp1);
3081
0
            }
3082
0
            sctp_total_flight_decrease(stcb, tp1);
3083
3084
0
            tp1->whoTo->net_ack += tp1->send_size;
3085
0
            if (tp1->snd_count < 2) {
3086
              /*-
3087
               * True non-retransmitted chunk
3088
               */
3089
0
              tp1->whoTo->net_ack2 += tp1->send_size;
3090
3091
              /*-
3092
               * update RTO too ?
3093
               */
3094
0
              if (tp1->do_rtt) {
3095
0
                if (*rto_ok &&
3096
0
                    sctp_calculate_rto(stcb,
3097
0
                                       &stcb->asoc,
3098
0
                                       tp1->whoTo,
3099
0
                                       &tp1->sent_rcv_time,
3100
0
                                       SCTP_RTT_FROM_DATA)) {
3101
0
                  *rto_ok = 0;
3102
0
                }
3103
0
                if (tp1->whoTo->rto_needed == 0) {
3104
0
                  tp1->whoTo->rto_needed = 1;
3105
0
                }
3106
0
                tp1->do_rtt = 0;
3107
0
              }
3108
0
            }
3109
0
          }
3110
0
          if (tp1->sent <= SCTP_DATAGRAM_RESEND) {
3111
0
            if (SCTP_TSN_GT(tp1->rec.data.tsn,
3112
0
                            stcb->asoc.this_sack_highest_gap)) {
3113
0
              stcb->asoc.this_sack_highest_gap =
3114
0
                tp1->rec.data.tsn;
3115
0
            }
3116
0
            if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3117
0
              sctp_ucount_decr(stcb->asoc.sent_queue_retran_cnt);
3118
#ifdef SCTP_AUDITING_ENABLED
3119
              sctp_audit_log(0xB2,
3120
                       (stcb->asoc.sent_queue_retran_cnt & 0x000000ff));
3121
#endif
3122
0
            }
3123
0
          }
3124
          /*-
3125
           * All chunks NOT UNSENT fall through here and are marked
3126
           * (leave PR-SCTP ones that are to skip alone though)
3127
           */
3128
0
          if ((tp1->sent != SCTP_FORWARD_TSN_SKIP) &&
3129
0
              (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) {
3130
0
            tp1->sent = SCTP_DATAGRAM_MARKED;
3131
0
          }
3132
0
          if (tp1->rec.data.chunk_was_revoked) {
3133
            /* deflate the cwnd */
3134
0
            tp1->whoTo->cwnd -= tp1->book_size;
3135
0
            tp1->rec.data.chunk_was_revoked = 0;
3136
0
          }
3137
          /* NR Sack code here */
3138
0
          if (nr_sacking &&
3139
0
              (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) {
3140
0
            if (stcb->asoc.strmout[tp1->rec.data.sid].chunks_on_queues > 0) {
3141
0
              stcb->asoc.strmout[tp1->rec.data.sid].chunks_on_queues--;
3142
0
#ifdef INVARIANTS
3143
0
            } else {
3144
0
              panic("No chunks on the queues for sid %u.", tp1->rec.data.sid);
3145
0
#endif
3146
0
            }
3147
0
            if ((stcb->asoc.strmout[tp1->rec.data.sid].chunks_on_queues == 0) &&
3148
0
                (stcb->asoc.strmout[tp1->rec.data.sid].state == SCTP_STREAM_RESET_PENDING) &&
3149
0
                TAILQ_EMPTY(&stcb->asoc.strmout[tp1->rec.data.sid].outqueue)) {
3150
0
              stcb->asoc.trigger_reset = 1;
3151
0
            }
3152
0
            tp1->sent = SCTP_DATAGRAM_NR_ACKED;
3153
0
            if (tp1->data) {
3154
              /* sa_ignore NO_NULL_CHK */
3155
0
              sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
3156
0
              sctp_m_freem(tp1->data);
3157
0
              tp1->data = NULL;
3158
0
            }
3159
0
            wake_him++;
3160
0
          }
3161
0
        }
3162
0
        break;
3163
0
      } /* if (tp1->tsn == theTSN) */
3164
0
      if (SCTP_TSN_GT(tp1->rec.data.tsn, theTSN)) {
3165
0
        break;
3166
0
      }
3167
0
      tp1 = TAILQ_NEXT(tp1, sctp_next);
3168
0
      if ((tp1 == NULL) && (circled == 0)) {
3169
0
        circled++;
3170
0
        tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
3171
0
      }
3172
0
    } /* end while (tp1) */
3173
0
    if (tp1 == NULL) {
3174
0
      circled = 0;
3175
0
      tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
3176
0
    }
3177
    /* In case the fragments were not in order we must reset */
3178
0
  } /* end for (j = fragStart */
3179
0
  *p_tp1 = tp1;
3180
0
  return (wake_him); /* Return value only used for nr-sack */
3181
0
}
3182
3183
static int
3184
sctp_handle_segments(struct mbuf *m, int *offset, struct sctp_tcb *stcb, struct sctp_association *asoc,
3185
    uint32_t last_tsn, uint32_t *biggest_tsn_acked,
3186
    uint32_t *biggest_newly_acked_tsn, uint32_t *this_sack_lowest_newack,
3187
    int num_seg, int num_nr_seg, int *rto_ok)
3188
0
{
3189
0
  struct sctp_gap_ack_block *frag, block;
3190
0
  struct sctp_tmit_chunk *tp1;
3191
0
  int i;
3192
0
  int num_frs = 0;
3193
0
  int chunk_freed;
3194
0
  int non_revocable;
3195
0
  uint16_t frag_strt, frag_end, prev_frag_end;
3196
3197
0
  tp1 = TAILQ_FIRST(&asoc->sent_queue);
3198
0
  prev_frag_end = 0;
3199
0
  chunk_freed = 0;
3200
3201
0
  for (i = 0; i < (num_seg + num_nr_seg); i++) {
3202
0
    if (i == num_seg) {
3203
0
      prev_frag_end = 0;
3204
0
      tp1 = TAILQ_FIRST(&asoc->sent_queue);
3205
0
    }
3206
0
    frag = (struct sctp_gap_ack_block *)sctp_m_getptr(m, *offset,
3207
0
                                                      sizeof(struct sctp_gap_ack_block), (uint8_t *) &block);
3208
0
    *offset += sizeof(block);
3209
0
    if (frag == NULL) {
3210
0
      return (chunk_freed);
3211
0
    }
3212
0
    frag_strt = ntohs(frag->start);
3213
0
    frag_end = ntohs(frag->end);
3214
3215
0
    if (frag_strt > frag_end) {
3216
      /* This gap report is malformed, skip it. */
3217
0
      continue;
3218
0
    }
3219
0
    if (frag_strt <= prev_frag_end) {
3220
      /* This gap report is not in order, so restart. */
3221
0
       tp1 = TAILQ_FIRST(&asoc->sent_queue);
3222
0
    }
3223
0
    if (SCTP_TSN_GT((last_tsn + frag_end), *biggest_tsn_acked)) {
3224
0
      *biggest_tsn_acked = last_tsn + frag_end;
3225
0
    }
3226
0
    if (i < num_seg) {
3227
0
      non_revocable = 0;
3228
0
    } else {
3229
0
      non_revocable = 1;
3230
0
    }
3231
0
    if (sctp_process_segment_range(stcb, &tp1, last_tsn, frag_strt, frag_end,
3232
0
                                   non_revocable, &num_frs, biggest_newly_acked_tsn,
3233
0
                                   this_sack_lowest_newack, rto_ok)) {
3234
0
      chunk_freed = 1;
3235
0
    }
3236
0
    prev_frag_end = frag_end;
3237
0
  }
3238
0
  if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3239
0
    if (num_frs)
3240
0
      sctp_log_fr(*biggest_tsn_acked,
3241
0
                  *biggest_newly_acked_tsn,
3242
0
                  last_tsn, SCTP_FR_LOG_BIGGEST_TSNS);
3243
0
  }
3244
0
  return (chunk_freed);
3245
0
}
3246
3247
static void
3248
sctp_check_for_revoked(struct sctp_tcb *stcb,
3249
           struct sctp_association *asoc, uint32_t cumack,
3250
           uint32_t biggest_tsn_acked)
3251
0
{
3252
0
  struct sctp_tmit_chunk *tp1;
3253
3254
0
  TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
3255
0
    if (SCTP_TSN_GT(tp1->rec.data.tsn, cumack)) {
3256
      /*
3257
       * ok this guy is either ACK or MARKED. If it is
3258
       * ACKED it has been previously acked but not this
3259
       * time i.e. revoked.  If it is MARKED it was ACK'ed
3260
       * again.
3261
       */
3262
0
      if (SCTP_TSN_GT(tp1->rec.data.tsn, biggest_tsn_acked)) {
3263
0
        break;
3264
0
      }
3265
0
      if (tp1->sent == SCTP_DATAGRAM_ACKED) {
3266
        /* it has been revoked */
3267
0
        tp1->sent = SCTP_DATAGRAM_SENT;
3268
0
        tp1->rec.data.chunk_was_revoked = 1;
3269
        /* We must add this stuff back in to
3270
         * assure timers and such get started.
3271
         */
3272
0
        if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3273
0
          sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
3274
0
                   tp1->whoTo->flight_size,
3275
0
                   tp1->book_size,
3276
0
                   (uint32_t)(uintptr_t)tp1->whoTo,
3277
0
                   tp1->rec.data.tsn);
3278
0
        }
3279
0
        sctp_flight_size_increase(tp1);
3280
0
        sctp_total_flight_increase(stcb, tp1);
3281
        /* We inflate the cwnd to compensate for our
3282
         * artificial inflation of the flight_size.
3283
         */
3284
0
        tp1->whoTo->cwnd += tp1->book_size;
3285
0
        if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3286
0
          sctp_log_sack(asoc->last_acked_seq,
3287
0
                  cumack,
3288
0
                  tp1->rec.data.tsn,
3289
0
                  0,
3290
0
                  0,
3291
0
                  SCTP_LOG_TSN_REVOKED);
3292
0
        }
3293
0
      } else if (tp1->sent == SCTP_DATAGRAM_MARKED) {
3294
        /* it has been re-acked in this SACK */
3295
0
        tp1->sent = SCTP_DATAGRAM_ACKED;
3296
0
      }
3297
0
    }
3298
0
    if (tp1->sent == SCTP_DATAGRAM_UNSENT)
3299
0
      break;
3300
0
  }
3301
0
}
3302
3303
static void
3304
sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc,
3305
         uint32_t biggest_tsn_acked, uint32_t biggest_tsn_newly_acked, uint32_t this_sack_lowest_newack, int accum_moved)
3306
0
{
3307
0
  struct sctp_tmit_chunk *tp1;
3308
0
  int strike_flag = 0;
3309
0
  struct timeval now;
3310
0
  uint32_t sending_seq;
3311
0
  struct sctp_nets *net;
3312
0
  int num_dests_sacked = 0;
3313
3314
  /*
3315
   * select the sending_seq, this is either the next thing ready to be
3316
   * sent but not transmitted, OR, the next seq we assign.
3317
   */
3318
0
  tp1 = TAILQ_FIRST(&stcb->asoc.send_queue);
3319
0
  if (tp1 == NULL) {
3320
0
    sending_seq = asoc->sending_seq;
3321
0
  } else {
3322
0
    sending_seq = tp1->rec.data.tsn;
3323
0
  }
3324
3325
  /* CMT DAC algo: finding out if SACK is a mixed SACK */
3326
0
  if ((asoc->sctp_cmt_on_off > 0) &&
3327
0
      SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3328
0
    TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3329
0
      if (net->saw_newack)
3330
0
        num_dests_sacked++;
3331
0
    }
3332
0
  }
3333
0
  if (stcb->asoc.prsctp_supported) {
3334
0
    (void)SCTP_GETTIME_TIMEVAL(&now);
3335
0
  }
3336
0
  TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
3337
0
    strike_flag = 0;
3338
0
    if (tp1->no_fr_allowed) {
3339
      /* this one had a timeout or something */
3340
0
      continue;
3341
0
    }
3342
0
    if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3343
0
      if (tp1->sent < SCTP_DATAGRAM_RESEND)
3344
0
        sctp_log_fr(biggest_tsn_newly_acked,
3345
0
              tp1->rec.data.tsn,
3346
0
              tp1->sent,
3347
0
              SCTP_FR_LOG_CHECK_STRIKE);
3348
0
    }
3349
0
    if (SCTP_TSN_GT(tp1->rec.data.tsn, biggest_tsn_acked) ||
3350
0
        tp1->sent == SCTP_DATAGRAM_UNSENT) {
3351
      /* done */
3352
0
      break;
3353
0
    }
3354
0
    if (stcb->asoc.prsctp_supported) {
3355
0
      if ((PR_SCTP_TTL_ENABLED(tp1->flags)) && tp1->sent < SCTP_DATAGRAM_ACKED) {
3356
        /* Is it expired? */
3357
0
#if !(defined(__FreeBSD__) && !defined(__Userspace__))
3358
0
        if (timercmp(&now, &tp1->rec.data.timetodrop, >)) {
3359
#else
3360
        if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
3361
#endif
3362
          /* Yes so drop it */
3363
0
          if (tp1->data != NULL) {
3364
0
            (void)sctp_release_pr_sctp_chunk(stcb, tp1, 1,
3365
0
                     SCTP_SO_NOT_LOCKED);
3366
0
          }
3367
0
          continue;
3368
0
        }
3369
0
      }
3370
0
    }
3371
0
    if (SCTP_TSN_GT(tp1->rec.data.tsn, asoc->this_sack_highest_gap) &&
3372
0
                    !(accum_moved && asoc->fast_retran_loss_recovery)) {
3373
      /* we are beyond the tsn in the sack  */
3374
0
      break;
3375
0
    }
3376
0
    if (tp1->sent >= SCTP_DATAGRAM_RESEND) {
3377
      /* either a RESEND, ACKED, or MARKED */
3378
      /* skip */
3379
0
      if (tp1->sent == SCTP_FORWARD_TSN_SKIP) {
3380
        /* Continue strikin FWD-TSN chunks */
3381
0
        tp1->rec.data.fwd_tsn_cnt++;
3382
0
      }
3383
0
      continue;
3384
0
    }
3385
    /*
3386
     * CMT : SFR algo (covers part of DAC and HTNA as well)
3387
     */
3388
0
    if (tp1->whoTo && tp1->whoTo->saw_newack == 0) {
3389
      /*
3390
       * No new acks were received for data sent to this
3391
       * dest. Therefore, according to the SFR algo for
3392
       * CMT, no data sent to this dest can be marked for
3393
       * FR using this SACK.
3394
       */
3395
0
      continue;
3396
0
    } else if (tp1->whoTo &&
3397
0
               SCTP_TSN_GT(tp1->rec.data.tsn,
3398
0
                           tp1->whoTo->this_sack_highest_newack) &&
3399
0
               !(accum_moved && asoc->fast_retran_loss_recovery)) {
3400
      /*
3401
       * CMT: New acks were received for data sent to
3402
       * this dest. But no new acks were seen for data
3403
       * sent after tp1. Therefore, according to the SFR
3404
       * algo for CMT, tp1 cannot be marked for FR using
3405
       * this SACK. This step covers part of the DAC algo
3406
       * and the HTNA algo as well.
3407
       */
3408
0
      continue;
3409
0
    }
3410
    /*
3411
     * Here we check to see if we were have already done a FR
3412
     * and if so we see if the biggest TSN we saw in the sack is
3413
     * smaller than the recovery point. If so we don't strike
3414
     * the tsn... otherwise we CAN strike the TSN.
3415
     */
3416
    /*
3417
     * @@@ JRI: Check for CMT
3418
     * if (accum_moved && asoc->fast_retran_loss_recovery && (sctp_cmt_on_off == 0)) {
3419
     */
3420
0
    if (accum_moved && asoc->fast_retran_loss_recovery) {
3421
      /*
3422
       * Strike the TSN if in fast-recovery and cum-ack
3423
       * moved.
3424
       */
3425
0
      if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3426
0
        sctp_log_fr(biggest_tsn_newly_acked,
3427
0
              tp1->rec.data.tsn,
3428
0
              tp1->sent,
3429
0
              SCTP_FR_LOG_STRIKE_CHUNK);
3430
0
      }
3431
0
      if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3432
0
        tp1->sent++;
3433
0
      }
3434
0
      if ((asoc->sctp_cmt_on_off > 0) &&
3435
0
          SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3436
        /*
3437
         * CMT DAC algorithm: If SACK flag is set to
3438
         * 0, then lowest_newack test will not pass
3439
         * because it would have been set to the
3440
         * cumack earlier. If not already to be
3441
         * rtx'd, If not a mixed sack and if tp1 is
3442
         * not between two sacked TSNs, then mark by
3443
         * one more.
3444
         * NOTE that we are marking by one additional time since the SACK DAC flag indicates that
3445
         * two packets have been received after this missing TSN.
3446
         */
3447
0
        if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3448
0
            SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.tsn)) {
3449
0
          if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3450
0
            sctp_log_fr(16 + num_dests_sacked,
3451
0
                  tp1->rec.data.tsn,
3452
0
                  tp1->sent,
3453
0
                  SCTP_FR_LOG_STRIKE_CHUNK);
3454
0
          }
3455
0
          tp1->sent++;
3456
0
        }
3457
0
      }
3458
0
    } else if ((tp1->rec.data.doing_fast_retransmit) &&
3459
0
               (asoc->sctp_cmt_on_off == 0)) {
3460
      /*
3461
       * For those that have done a FR we must take
3462
       * special consideration if we strike. I.e the
3463
       * biggest_newly_acked must be higher than the
3464
       * sending_seq at the time we did the FR.
3465
       */
3466
0
      if (
3467
#ifdef SCTP_FR_TO_ALTERNATE
3468
        /*
3469
         * If FR's go to new networks, then we must only do
3470
         * this for singly homed asoc's. However if the FR's
3471
         * go to the same network (Armando's work) then its
3472
         * ok to FR multiple times.
3473
         */
3474
        (asoc->numnets < 2)
3475
#else
3476
0
        (1)
3477
0
#endif
3478
0
        ) {
3479
0
        if (SCTP_TSN_GE(biggest_tsn_newly_acked,
3480
0
                        tp1->rec.data.fast_retran_tsn)) {
3481
          /*
3482
           * Strike the TSN, since this ack is
3483
           * beyond where things were when we
3484
           * did a FR.
3485
           */
3486
0
          if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3487
0
            sctp_log_fr(biggest_tsn_newly_acked,
3488
0
                  tp1->rec.data.tsn,
3489
0
                  tp1->sent,
3490
0
                  SCTP_FR_LOG_STRIKE_CHUNK);
3491
0
          }
3492
0
          if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3493
0
            tp1->sent++;
3494
0
          }
3495
0
          strike_flag = 1;
3496
0
          if ((asoc->sctp_cmt_on_off > 0) &&
3497
0
              SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3498
            /*
3499
             * CMT DAC algorithm: If
3500
             * SACK flag is set to 0,
3501
             * then lowest_newack test
3502
             * will not pass because it
3503
             * would have been set to
3504
             * the cumack earlier. If
3505
             * not already to be rtx'd,
3506
             * If not a mixed sack and
3507
             * if tp1 is not between two
3508
             * sacked TSNs, then mark by
3509
             * one more.
3510
             * NOTE that we are marking by one additional time since the SACK DAC flag indicates that
3511
             * two packets have been received after this missing TSN.
3512
             */
3513
0
            if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
3514
0
                (num_dests_sacked == 1) &&
3515
0
                SCTP_TSN_GT(this_sack_lowest_newack,
3516
0
                            tp1->rec.data.tsn)) {
3517
0
              if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3518
0
                sctp_log_fr(32 + num_dests_sacked,
3519
0
                      tp1->rec.data.tsn,
3520
0
                      tp1->sent,
3521
0
                      SCTP_FR_LOG_STRIKE_CHUNK);
3522
0
              }
3523
0
              if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3524
0
                tp1->sent++;
3525
0
              }
3526
0
            }
3527
0
          }
3528
0
        }
3529
0
      }
3530
      /*
3531
       * JRI: TODO: remove code for HTNA algo. CMT's
3532
       * SFR algo covers HTNA.
3533
       */
3534
0
    } else if (SCTP_TSN_GT(tp1->rec.data.tsn,
3535
0
                           biggest_tsn_newly_acked)) {
3536
      /*
3537
       * We don't strike these: This is the  HTNA
3538
       * algorithm i.e. we don't strike If our TSN is
3539
       * larger than the Highest TSN Newly Acked.
3540
       */
3541
0
      ;
3542
0
    } else {
3543
      /* Strike the TSN */
3544
0
      if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3545
0
        sctp_log_fr(biggest_tsn_newly_acked,
3546
0
              tp1->rec.data.tsn,
3547
0
              tp1->sent,
3548
0
              SCTP_FR_LOG_STRIKE_CHUNK);
3549
0
      }
3550
0
      if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3551
0
        tp1->sent++;
3552
0
      }
3553
0
      if ((asoc->sctp_cmt_on_off > 0) &&
3554
0
          SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3555
        /*
3556
         * CMT DAC algorithm: If SACK flag is set to
3557
         * 0, then lowest_newack test will not pass
3558
         * because it would have been set to the
3559
         * cumack earlier. If not already to be
3560
         * rtx'd, If not a mixed sack and if tp1 is
3561
         * not between two sacked TSNs, then mark by
3562
         * one more.
3563
         * NOTE that we are marking by one additional time since the SACK DAC flag indicates that
3564
         * two packets have been received after this missing TSN.
3565
         */
3566
0
        if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3567
0
            SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.tsn)) {
3568
0
          if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3569
0
            sctp_log_fr(48 + num_dests_sacked,
3570
0
                  tp1->rec.data.tsn,
3571
0
                  tp1->sent,
3572
0
                  SCTP_FR_LOG_STRIKE_CHUNK);
3573
0
          }
3574
0
          tp1->sent++;
3575
0
        }
3576
0
      }
3577
0
    }
3578
0
    if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3579
0
      struct sctp_nets *alt;
3580
3581
      /* fix counts and things */
3582
0
      if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3583
0
        sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_RSND,
3584
0
                 (tp1->whoTo ? (tp1->whoTo->flight_size) : 0),
3585
0
                 tp1->book_size,
3586
0
                 (uint32_t)(uintptr_t)tp1->whoTo,
3587
0
                 tp1->rec.data.tsn);
3588
0
      }
3589
0
      if (tp1->whoTo) {
3590
0
        tp1->whoTo->net_ack++;
3591
0
        sctp_flight_size_decrease(tp1);
3592
0
        if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3593
0
          (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged)(tp1->whoTo,
3594
0
                             tp1);
3595
0
        }
3596
0
      }
3597
3598
0
      if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
3599
0
        sctp_log_rwnd(SCTP_INCREASE_PEER_RWND,
3600
0
                asoc->peers_rwnd, tp1->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3601
0
      }
3602
      /* add back to the rwnd */
3603
0
      asoc->peers_rwnd += (tp1->send_size + SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3604
3605
      /* remove from the total flight */
3606
0
      sctp_total_flight_decrease(stcb, tp1);
3607
3608
0
      if ((stcb->asoc.prsctp_supported) &&
3609
0
          (PR_SCTP_RTX_ENABLED(tp1->flags))) {
3610
        /* Has it been retransmitted tv_sec times? - we store the retran count there. */
3611
0
        if (tp1->snd_count > tp1->rec.data.timetodrop.tv_sec) {
3612
          /* Yes, so drop it */
3613
0
          if (tp1->data != NULL) {
3614
0
            (void)sctp_release_pr_sctp_chunk(stcb, tp1, 1,
3615
0
                     SCTP_SO_NOT_LOCKED);
3616
0
          }
3617
          /* Make sure to flag we had a FR */
3618
0
          if (tp1->whoTo != NULL) {
3619
0
            tp1->whoTo->net_ack++;
3620
0
          }
3621
0
          continue;
3622
0
        }
3623
0
      }
3624
      /* SCTP_PRINTF("OK, we are now ready to FR this guy\n"); */
3625
0
      if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3626
0
        sctp_log_fr(tp1->rec.data.tsn, tp1->snd_count,
3627
0
              0, SCTP_FR_MARKED);
3628
0
      }
3629
0
      if (strike_flag) {
3630
        /* This is a subsequent FR */
3631
0
        SCTP_STAT_INCR(sctps_sendmultfastretrans);
3632
0
      }
3633
0
      sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
3634
0
      if (asoc->sctp_cmt_on_off > 0) {
3635
        /*
3636
         * CMT: Using RTX_SSTHRESH policy for CMT.
3637
         * If CMT is being used, then pick dest with
3638
         * largest ssthresh for any retransmission.
3639
         */
3640
0
        tp1->no_fr_allowed = 1;
3641
0
        alt = tp1->whoTo;
3642
        /*sa_ignore NO_NULL_CHK*/
3643
0
        if (asoc->sctp_cmt_pf > 0) {
3644
          /* JRS 5/18/07 - If CMT PF is on, use the PF version of find_alt_net() */
3645
0
          alt = sctp_find_alternate_net(stcb, alt, 2);
3646
0
        } else {
3647
          /* JRS 5/18/07 - If only CMT is on, use the CMT version of find_alt_net() */
3648
                                        /*sa_ignore NO_NULL_CHK*/
3649
0
          alt = sctp_find_alternate_net(stcb, alt, 1);
3650
0
        }
3651
0
        if (alt == NULL) {
3652
0
          alt = tp1->whoTo;
3653
0
        }
3654
        /*
3655
         * CUCv2: If a different dest is picked for
3656
         * the retransmission, then new
3657
         * (rtx-)pseudo_cumack needs to be tracked
3658
         * for orig dest. Let CUCv2 track new (rtx-)
3659
         * pseudo-cumack always.
3660
         */
3661
0
        if (tp1->whoTo) {
3662
0
          tp1->whoTo->find_pseudo_cumack = 1;
3663
0
          tp1->whoTo->find_rtx_pseudo_cumack = 1;
3664
0
        }
3665
0
      } else {/* CMT is OFF */
3666
#ifdef SCTP_FR_TO_ALTERNATE
3667
        /* Can we find an alternate? */
3668
        alt = sctp_find_alternate_net(stcb, tp1->whoTo, 0);
3669
#else
3670
        /*
3671
         * default behavior is to NOT retransmit
3672
         * FR's to an alternate. Armando Caro's
3673
         * paper details why.
3674
         */
3675
0
        alt = tp1->whoTo;
3676
0
#endif
3677
0
      }
3678
3679
0
      tp1->rec.data.doing_fast_retransmit = 1;
3680
      /* mark the sending seq for possible subsequent FR's */
3681
      /*
3682
       * SCTP_PRINTF("Marking TSN for FR new value %x\n",
3683
       * (uint32_t)tpi->rec.data.tsn);
3684
       */
3685
0
      if (TAILQ_EMPTY(&asoc->send_queue)) {
3686
        /*
3687
         * If the queue of send is empty then its
3688
         * the next sequence number that will be
3689
         * assigned so we subtract one from this to
3690
         * get the one we last sent.
3691
         */
3692
0
        tp1->rec.data.fast_retran_tsn = sending_seq;
3693
0
      } else {
3694
        /*
3695
         * If there are chunks on the send queue
3696
         * (unsent data that has made it from the
3697
         * stream queues but not out the door, we
3698
         * take the first one (which will have the
3699
         * lowest TSN) and subtract one to get the
3700
         * one we last sent.
3701
         */
3702
0
        struct sctp_tmit_chunk *ttt;
3703
3704
0
        ttt = TAILQ_FIRST(&asoc->send_queue);
3705
0
        tp1->rec.data.fast_retran_tsn =
3706
0
          ttt->rec.data.tsn;
3707
0
      }
3708
3709
0
      if (tp1->do_rtt) {
3710
        /*
3711
         * this guy had a RTO calculation pending on
3712
         * it, cancel it
3713
         */
3714
0
        if ((tp1->whoTo != NULL) &&
3715
0
            (tp1->whoTo->rto_needed == 0)) {
3716
0
          tp1->whoTo->rto_needed = 1;
3717
0
        }
3718
0
        tp1->do_rtt = 0;
3719
0
      }
3720
0
      if (alt != tp1->whoTo) {
3721
        /* yes, there is an alternate. */
3722
0
        sctp_free_remote_addr(tp1->whoTo);
3723
        /*sa_ignore FREED_MEMORY*/
3724
0
        tp1->whoTo = alt;
3725
0
        atomic_add_int(&alt->ref_count, 1);
3726
0
      }
3727
0
    }
3728
0
  }
3729
0
}
3730
3731
struct sctp_tmit_chunk *
3732
sctp_try_advance_peer_ack_point(struct sctp_tcb *stcb,
3733
    struct sctp_association *asoc)
3734
0
{
3735
0
  struct sctp_tmit_chunk *tp1, *tp2, *a_adv = NULL;
3736
0
  struct timeval now;
3737
0
  int now_filled = 0;
3738
3739
0
  if (asoc->prsctp_supported == 0) {
3740
0
    return (NULL);
3741
0
  }
3742
0
  TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
3743
0
    if (tp1->sent != SCTP_FORWARD_TSN_SKIP &&
3744
0
        tp1->sent != SCTP_DATAGRAM_RESEND &&
3745
0
        tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
3746
      /* no chance to advance, out of here */
3747
0
      break;
3748
0
    }
3749
0
    if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
3750
0
      if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) ||
3751
0
          (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) {
3752
0
        sctp_misc_ints(SCTP_FWD_TSN_CHECK,
3753
0
                 asoc->advanced_peer_ack_point,
3754
0
                 tp1->rec.data.tsn, 0, 0);
3755
0
      }
3756
0
    }
3757
0
    if (!PR_SCTP_ENABLED(tp1->flags)) {
3758
      /*
3759
       * We can't fwd-tsn past any that are reliable aka
3760
       * retransmitted until the asoc fails.
3761
       */
3762
0
      break;
3763
0
    }
3764
0
    if (!now_filled) {
3765
0
      (void)SCTP_GETTIME_TIMEVAL(&now);
3766
0
      now_filled = 1;
3767
0
    }
3768
    /*
3769
     * now we got a chunk which is marked for another
3770
     * retransmission to a PR-stream but has run out its chances
3771
     * already maybe OR has been marked to skip now. Can we skip
3772
     * it if its a resend?
3773
     */
3774
0
    if (tp1->sent == SCTP_DATAGRAM_RESEND &&
3775
0
        (PR_SCTP_TTL_ENABLED(tp1->flags))) {
3776
      /*
3777
       * Now is this one marked for resend and its time is
3778
       * now up?
3779
       */
3780
0
#if !(defined(__FreeBSD__) && !defined(__Userspace__))
3781
0
      if (timercmp(&now, &tp1->rec.data.timetodrop, >)) {
3782
#else
3783
      if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
3784
#endif
3785
        /* Yes so drop it */
3786
0
        if (tp1->data) {
3787
0
          (void)sctp_release_pr_sctp_chunk(stcb, tp1,
3788
0
              1, SCTP_SO_NOT_LOCKED);
3789
0
        }
3790
0
      } else {
3791
        /*
3792
         * No, we are done when hit one for resend
3793
         * whos time as not expired.
3794
         */
3795
0
        break;
3796
0
      }
3797
0
    }
3798
    /*
3799
     * Ok now if this chunk is marked to drop it we can clean up
3800
     * the chunk, advance our peer ack point and we can check
3801
     * the next chunk.
3802
     */
3803
0
    if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) ||
3804
0
        (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) {
3805
      /* advance PeerAckPoint goes forward */
3806
0
      if (SCTP_TSN_GT(tp1->rec.data.tsn, asoc->advanced_peer_ack_point)) {
3807
0
        asoc->advanced_peer_ack_point = tp1->rec.data.tsn;
3808
0
        a_adv = tp1;
3809
0
      } else if (tp1->rec.data.tsn == asoc->advanced_peer_ack_point) {
3810
        /* No update but we do save the chk */
3811
0
        a_adv = tp1;
3812
0
      }
3813
0
    } else {
3814
      /*
3815
       * If it is still in RESEND we can advance no
3816
       * further
3817
       */
3818
0
      break;
3819
0
    }
3820
0
  }
3821
0
  return (a_adv);
3822
0
}
3823
3824
static int
3825
sctp_fs_audit(struct sctp_association *asoc)
3826
0
{
3827
0
  struct sctp_tmit_chunk *chk;
3828
0
  int inflight = 0, resend = 0, inbetween = 0, acked = 0, above = 0;
3829
0
  int ret;
3830
#ifndef INVARIANTS
3831
  int entry_flight, entry_cnt;
3832
#endif
3833
3834
0
  ret = 0;
3835
#ifndef INVARIANTS
3836
  entry_flight = asoc->total_flight;
3837
  entry_cnt = asoc->total_flight_count;
3838
#endif
3839
0
  if (asoc->pr_sctp_cnt >= asoc->sent_queue_cnt)
3840
0
    return (0);
3841
3842
0
  TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
3843
0
    if (chk->sent < SCTP_DATAGRAM_RESEND) {
3844
0
      SCTP_PRINTF("Chk TSN: %u size: %d inflight cnt: %d\n",
3845
0
                  chk->rec.data.tsn,
3846
0
                  chk->send_size,
3847
0
                  chk->snd_count);
3848
0
      inflight++;
3849
0
    } else if (chk->sent == SCTP_DATAGRAM_RESEND) {
3850
0
      resend++;
3851
0
    } else if (chk->sent < SCTP_DATAGRAM_ACKED) {
3852
0
      inbetween++;
3853
0
    } else if (chk->sent > SCTP_DATAGRAM_ACKED) {
3854
0
      above++;
3855
0
    } else {
3856
0
      acked++;
3857
0
    }
3858
0
  }
3859
3860
0
  if ((inflight > 0) || (inbetween > 0)) {
3861
0
#ifdef INVARIANTS
3862
0
    panic("Flight size-express incorrect F: %d I: %d R: %d Ab: %d ACK: %d",
3863
0
          inflight, inbetween, resend, above, acked);
3864
#else
3865
    SCTP_PRINTF("asoc->total_flight: %d cnt: %d\n",
3866
                entry_flight, entry_cnt);
3867
    SCTP_PRINTF("Flight size-express incorrect F: %d I: %d R: %d Ab: %d ACK: %d\n",
3868
                inflight, inbetween, resend, above, acked);
3869
    ret = 1;
3870
#endif
3871
0
  }
3872
0
  return (ret);
3873
0
}
3874
3875
static void
3876
sctp_window_probe_recovery(struct sctp_tcb *stcb,
3877
                           struct sctp_association *asoc,
3878
                           struct sctp_tmit_chunk *tp1)
3879
0
{
3880
0
  tp1->window_probe = 0;
3881
0
  if ((tp1->sent >= SCTP_DATAGRAM_ACKED) || (tp1->data == NULL)) {
3882
    /* TSN's skipped we do NOT move back. */
3883
0
    sctp_misc_ints(SCTP_FLIGHT_LOG_DWN_WP_FWD,
3884
0
             tp1->whoTo ? tp1->whoTo->flight_size : 0,
3885
0
             tp1->book_size,
3886
0
             (uint32_t)(uintptr_t)tp1->whoTo,
3887
0
             tp1->rec.data.tsn);
3888
0
    return;
3889
0
  }
3890
  /* First setup this by shrinking flight */
3891
0
  if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3892
0
    (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged)(tp1->whoTo,
3893
0
                       tp1);
3894
0
  }
3895
0
  sctp_flight_size_decrease(tp1);
3896
0
  sctp_total_flight_decrease(stcb, tp1);
3897
  /* Now mark for resend */
3898
0
  tp1->sent = SCTP_DATAGRAM_RESEND;
3899
0
  sctp_ucount_incr(asoc->sent_queue_retran_cnt);
3900
3901
0
  if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3902
0
    sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_WP,
3903
0
             tp1->whoTo->flight_size,
3904
0
             tp1->book_size,
3905
0
             (uint32_t)(uintptr_t)tp1->whoTo,
3906
0
             tp1->rec.data.tsn);
3907
0
  }
3908
0
}
3909
3910
void
3911
sctp_express_handle_sack(struct sctp_tcb *stcb, uint32_t cumack,
3912
                         uint32_t rwnd, int *abort_now, int ecne_seen)
3913
720
{
3914
720
  struct sctp_nets *net;
3915
720
  struct sctp_association *asoc;
3916
720
  struct sctp_tmit_chunk *tp1, *tp2;
3917
720
  uint32_t old_rwnd;
3918
720
  int win_probe_recovery = 0;
3919
720
  int win_probe_recovered = 0;
3920
720
  int j, done_once = 0;
3921
720
  int rto_ok = 1;
3922
720
  uint32_t send_s;
3923
3924
720
  if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
3925
0
    sctp_misc_ints(SCTP_SACK_LOG_EXPRESS, cumack,
3926
0
                   rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
3927
0
  }
3928
720
  SCTP_TCB_LOCK_ASSERT(stcb);
3929
#ifdef SCTP_ASOCLOG_OF_TSNS
3930
  stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cumack;
3931
  stcb->asoc.cumack_log_at++;
3932
  if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
3933
    stcb->asoc.cumack_log_at = 0;
3934
  }
3935
#endif
3936
720
  asoc = &stcb->asoc;
3937
720
  old_rwnd = asoc->peers_rwnd;
3938
720
  if (SCTP_TSN_GT(asoc->last_acked_seq, cumack)) {
3939
    /* old ack */
3940
606
    return;
3941
606
  } else if (asoc->last_acked_seq == cumack) {
3942
    /* Window update sack */
3943
0
    asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
3944
0
                (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
3945
0
    if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
3946
      /* SWS sender side engages */
3947
0
      asoc->peers_rwnd = 0;
3948
0
    }
3949
0
    if (asoc->peers_rwnd > old_rwnd) {
3950
0
      goto again;
3951
0
    }
3952
0
    return;
3953
0
  }
3954
3955
  /* First setup for CC stuff */
3956
720
  TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3957
452
    if (SCTP_TSN_GT(cumack, net->cwr_window_tsn)) {
3958
      /* Drag along the window_tsn for cwr's */
3959
452
      net->cwr_window_tsn = cumack;
3960
452
    }
3961
452
    net->prev_cwnd = net->cwnd;
3962
452
    net->net_ack = 0;
3963
452
    net->net_ack2 = 0;
3964
3965
    /*
3966
     * CMT: Reset CUC and Fast recovery algo variables before
3967
     * SACK processing
3968
     */
3969
452
    net->new_pseudo_cumack = 0;
3970
452
    net->will_exit_fast_recovery = 0;
3971
452
    if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) {
3972
0
      (*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack)(stcb, net);
3973
0
    }
3974
452
  }
3975
114
  if (!TAILQ_EMPTY(&asoc->sent_queue)) {
3976
49
    tp1 = TAILQ_LAST(&asoc->sent_queue,
3977
49
         sctpchunk_listhead);
3978
49
    send_s = tp1->rec.data.tsn + 1;
3979
65
  } else {
3980
65
    send_s = asoc->sending_seq;
3981
65
  }
3982
114
  if (SCTP_TSN_GE(cumack, send_s)) {
3983
114
    struct mbuf *op_err;
3984
114
    char msg[SCTP_DIAG_INFO_LEN];
3985
3986
114
    *abort_now = 1;
3987
    /* XXX */
3988
114
    SCTP_SNPRINTF(msg, sizeof(msg),
3989
114
                  "Cum ack %8.8x greater or equal than TSN %8.8x",
3990
114
                  cumack, send_s);
3991
114
    op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
3992
114
    stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25;
3993
114
    sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, false, SCTP_SO_NOT_LOCKED);
3994
114
    return;
3995
114
  }
3996
0
  asoc->this_sack_highest_gap = cumack;
3997
0
  if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
3998
0
    sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
3999
0
             stcb->asoc.overall_error_count,
4000
0
             0,
4001
0
             SCTP_FROM_SCTP_INDATA,
4002
0
             __LINE__);
4003
0
  }
4004
0
  stcb->asoc.overall_error_count = 0;
4005
0
  if (SCTP_TSN_GT(cumack, asoc->last_acked_seq)) {
4006
    /* process the new consecutive TSN first */
4007
0
    TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
4008
0
      if (SCTP_TSN_GE(cumack, tp1->rec.data.tsn)) {
4009
0
        if (tp1->sent == SCTP_DATAGRAM_UNSENT) {
4010
0
          SCTP_PRINTF("Warning, an unsent is now acked?\n");
4011
0
        }
4012
0
        if (tp1->sent < SCTP_DATAGRAM_ACKED) {
4013
          /*
4014
           * If it is less than ACKED, it is
4015
           * now no-longer in flight. Higher
4016
           * values may occur during marking
4017
           */
4018
0
          if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4019
0
            if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4020
0
              sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
4021
0
                       tp1->whoTo->flight_size,
4022
0
                       tp1->book_size,
4023
0
                       (uint32_t)(uintptr_t)tp1->whoTo,
4024
0
                       tp1->rec.data.tsn);
4025
0
            }
4026
0
            sctp_flight_size_decrease(tp1);
4027
0
            if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
4028
0
              (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged)(tp1->whoTo,
4029
0
                                 tp1);
4030
0
            }
4031
            /* sa_ignore NO_NULL_CHK */
4032
0
            sctp_total_flight_decrease(stcb, tp1);
4033
0
          }
4034
0
          tp1->whoTo->net_ack += tp1->send_size;
4035
0
          if (tp1->snd_count < 2) {
4036
            /*
4037
             * True non-retransmitted
4038
             * chunk
4039
             */
4040
0
            tp1->whoTo->net_ack2 +=
4041
0
              tp1->send_size;
4042
4043
            /* update RTO too? */
4044
0
            if (tp1->do_rtt) {
4045
0
              if (rto_ok &&
4046
0
                  sctp_calculate_rto(stcb,
4047
0
                         &stcb->asoc,
4048
0
                         tp1->whoTo,
4049
0
                         &tp1->sent_rcv_time,
4050
0
                         SCTP_RTT_FROM_DATA)) {
4051
0
                rto_ok = 0;
4052
0
              }
4053
0
              if (tp1->whoTo->rto_needed == 0) {
4054
0
                tp1->whoTo->rto_needed = 1;
4055
0
              }
4056
0
              tp1->do_rtt = 0;
4057
0
            }
4058
0
          }
4059
          /*
4060
           * CMT: CUCv2 algorithm. From the
4061
           * cumack'd TSNs, for each TSN being
4062
           * acked for the first time, set the
4063
           * following variables for the
4064
           * corresp destination.
4065
           * new_pseudo_cumack will trigger a
4066
           * cwnd update.
4067
           * find_(rtx_)pseudo_cumack will
4068
           * trigger search for the next
4069
           * expected (rtx-)pseudo-cumack.
4070
           */
4071
0
          tp1->whoTo->new_pseudo_cumack = 1;
4072
0
          tp1->whoTo->find_pseudo_cumack = 1;
4073
0
          tp1->whoTo->find_rtx_pseudo_cumack = 1;
4074
0
          if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
4075
            /* sa_ignore NO_NULL_CHK */
4076
0
            sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.tsn, SCTP_CWND_LOG_FROM_SACK);
4077
0
          }
4078
0
        }
4079
0
        if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4080
0
          sctp_ucount_decr(asoc->sent_queue_retran_cnt);
4081
0
        }
4082
0
        if (tp1->rec.data.chunk_was_revoked) {
4083
          /* deflate the cwnd */
4084
0
          tp1->whoTo->cwnd -= tp1->book_size;
4085
0
          tp1->rec.data.chunk_was_revoked = 0;
4086
0
        }
4087
0
        if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
4088
0
          if (asoc->strmout[tp1->rec.data.sid].chunks_on_queues > 0) {
4089
0
            asoc->strmout[tp1->rec.data.sid].chunks_on_queues--;
4090
0
#ifdef INVARIANTS
4091
0
          } else {
4092
0
            panic("No chunks on the queues for sid %u.", tp1->rec.data.sid);
4093
0
#endif
4094
0
          }
4095
0
        }
4096
0
        if ((asoc->strmout[tp1->rec.data.sid].chunks_on_queues == 0) &&
4097
0
            (asoc->strmout[tp1->rec.data.sid].state == SCTP_STREAM_RESET_PENDING) &&
4098
0
            TAILQ_EMPTY(&asoc->strmout[tp1->rec.data.sid].outqueue)) {
4099
0
          asoc->trigger_reset = 1;
4100
0
        }
4101
0
        TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
4102
0
        if (tp1->data) {
4103
          /* sa_ignore NO_NULL_CHK */
4104
0
          sctp_free_bufspace(stcb, asoc, tp1, 1);
4105
0
          sctp_m_freem(tp1->data);
4106
0
          tp1->data = NULL;
4107
0
        }
4108
0
        if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4109
0
          sctp_log_sack(asoc->last_acked_seq,
4110
0
                  cumack,
4111
0
                  tp1->rec.data.tsn,
4112
0
                  0,
4113
0
                  0,
4114
0
                  SCTP_LOG_FREE_SENT);
4115
0
        }
4116
0
        asoc->sent_queue_cnt--;
4117
0
        sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED);
4118
0
      } else {
4119
0
        break;
4120
0
      }
4121
0
    }
4122
0
  }
4123
0
#if defined(__Userspace__)
4124
0
  if (stcb->sctp_ep->recv_callback) {
4125
0
    if (stcb->sctp_socket) {
4126
0
      uint32_t inqueue_bytes, sb_free_now;
4127
0
      struct sctp_inpcb *inp;
4128
4129
0
      inp = stcb->sctp_ep;
4130
0
      inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * sizeof(struct sctp_data_chunk));
4131
0
      sb_free_now = SCTP_SB_LIMIT_SND(stcb->sctp_socket) - (inqueue_bytes + stcb->asoc.sb_send_resv);
4132
4133
      /* check if the amount free in the send socket buffer crossed the threshold */
4134
0
      if (inp->send_callback &&
4135
0
          (((inp->send_sb_threshold > 0) &&
4136
0
            (sb_free_now >= inp->send_sb_threshold) &&
4137
0
            (stcb->asoc.chunks_on_out_queue <= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue))) ||
4138
0
           (inp->send_sb_threshold == 0))) {
4139
0
        atomic_add_int(&stcb->asoc.refcnt, 1);
4140
0
        SCTP_TCB_UNLOCK(stcb);
4141
0
        inp->send_callback(stcb->sctp_socket, sb_free_now, inp->ulp_info);
4142
0
        SCTP_TCB_LOCK(stcb);
4143
0
        atomic_subtract_int(&stcb->asoc.refcnt, 1);
4144
0
      }
4145
0
    }
4146
0
  } else if (stcb->sctp_socket) {
4147
#else
4148
  /* sa_ignore NO_NULL_CHK */
4149
  if (stcb->sctp_socket) {
4150
#endif
4151
#if defined(__APPLE__) && !defined(__Userspace__)
4152
    struct socket *so;
4153
4154
#endif
4155
0
    SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
4156
0
    if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4157
      /* sa_ignore NO_NULL_CHK */
4158
0
      sctp_wakeup_log(stcb, 1, SCTP_WAKESND_FROM_SACK);
4159
0
    }
4160
#if defined(__APPLE__) && !defined(__Userspace__)
4161
    so = SCTP_INP_SO(stcb->sctp_ep);
4162
    atomic_add_int(&stcb->asoc.refcnt, 1);
4163
    SCTP_TCB_UNLOCK(stcb);
4164
    SCTP_SOCKET_LOCK(so, 1);
4165
    SCTP_TCB_LOCK(stcb);
4166
    atomic_subtract_int(&stcb->asoc.refcnt, 1);
4167
    if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4168
      /* assoc was freed while we were unlocked */
4169
      SCTP_SOCKET_UNLOCK(so, 1);
4170
      return;
4171
    }
4172
#endif
4173
0
    sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
4174
#if defined(__APPLE__) && !defined(__Userspace__)
4175
    SCTP_SOCKET_UNLOCK(so, 1);
4176
#endif
4177
0
  } else {
4178
0
    if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4179
0
      sctp_wakeup_log(stcb, 1, SCTP_NOWAKE_FROM_SACK);
4180
0
    }
4181
0
  }
4182
4183
  /* JRS - Use the congestion control given in the CC module */
4184
0
  if ((asoc->last_acked_seq != cumack) && (ecne_seen == 0)) {
4185
0
    TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4186
0
      if (net->net_ack2 > 0) {
4187
        /*
4188
         * Karn's rule applies to clearing error count, this
4189
         * is optional.
4190
         */
4191
0
        net->error_count = 0;
4192
0
        if ((net->dest_state & SCTP_ADDR_REACHABLE) == 0) {
4193
          /* addr came good */
4194
0
          net->dest_state |= SCTP_ADDR_REACHABLE;
4195
0
          sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
4196
0
                          0, (void *)net, SCTP_SO_NOT_LOCKED);
4197
0
        }
4198
0
        if (net == stcb->asoc.primary_destination) {
4199
0
          if (stcb->asoc.alternate) {
4200
            /* release the alternate, primary is good */
4201
0
            sctp_free_remote_addr(stcb->asoc.alternate);
4202
0
            stcb->asoc.alternate = NULL;
4203
0
          }
4204
0
        }
4205
0
        if (net->dest_state & SCTP_ADDR_PF) {
4206
0
          net->dest_state &= ~SCTP_ADDR_PF;
4207
0
          sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT,
4208
0
                          stcb->sctp_ep, stcb, net,
4209
0
                          SCTP_FROM_SCTP_INDATA + SCTP_LOC_26);
4210
0
          sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
4211
0
          asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net);
4212
          /* Done with this net */
4213
0
          net->net_ack = 0;
4214
0
        }
4215
        /* restore any doubled timers */
4216
0
        net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
4217
0
        if (net->RTO < stcb->asoc.minrto) {
4218
0
          net->RTO = stcb->asoc.minrto;
4219
0
        }
4220
0
        if (net->RTO > stcb->asoc.maxrto) {
4221
0
          net->RTO = stcb->asoc.maxrto;
4222
0
        }
4223
0
      }
4224
0
    }
4225
0
    asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, 1, 0, 0);
4226
0
  }
4227
0
  asoc->last_acked_seq = cumack;
4228
4229
0
  if (TAILQ_EMPTY(&asoc->sent_queue)) {
4230
    /* nothing left in-flight */
4231
0
    TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4232
0
      net->flight_size = 0;
4233
0
      net->partial_bytes_acked = 0;
4234
0
    }
4235
0
    asoc->total_flight = 0;
4236
0
    asoc->total_flight_count = 0;
4237
0
  }
4238
4239
  /* RWND update */
4240
0
  asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
4241
0
              (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
4242
0
  if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4243
    /* SWS sender side engages */
4244
0
    asoc->peers_rwnd = 0;
4245
0
  }
4246
0
  if (asoc->peers_rwnd > old_rwnd) {
4247
0
    win_probe_recovery = 1;
4248
0
  }
4249
  /* Now assure a timer where data is queued at */
4250
0
again:
4251
0
  j = 0;
4252
0
  TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4253
0
    if (win_probe_recovery && (net->window_probe)) {
4254
0
      win_probe_recovered = 1;
4255
      /*
4256
       * Find first chunk that was used with window probe
4257
       * and clear the sent
4258
       */
4259
      /* sa_ignore FREED_MEMORY */
4260
0
      TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4261
0
        if (tp1->window_probe) {
4262
          /* move back to data send queue */
4263
0
          sctp_window_probe_recovery(stcb, asoc, tp1);
4264
0
          break;
4265
0
        }
4266
0
      }
4267
0
    }
4268
0
    if (net->flight_size) {
4269
0
      j++;
4270
0
      sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, net);
4271
0
      if (net->window_probe) {
4272
0
        net->window_probe = 0;
4273
0
      }
4274
0
    } else {
4275
0
      if (net->window_probe) {
4276
        /* In window probes we must assure a timer is still running there */
4277
0
        net->window_probe = 0;
4278
0
        if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4279
0
          sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, net);
4280
0
        }
4281
0
      } else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4282
0
        sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4283
0
                        stcb, net,
4284
0
                        SCTP_FROM_SCTP_INDATA + SCTP_LOC_27);
4285
0
      }
4286
0
    }
4287
0
  }
4288
0
  if ((j == 0) &&
4289
0
      (!TAILQ_EMPTY(&asoc->sent_queue)) &&
4290
0
      (asoc->sent_queue_retran_cnt == 0) &&
4291
0
      (win_probe_recovered == 0) &&
4292
0
      (done_once == 0)) {
4293
    /* huh, this should not happen unless all packets
4294
     * are PR-SCTP and marked to skip of course.
4295
     */
4296
0
    if (sctp_fs_audit(asoc)) {
4297
0
      TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4298
0
        net->flight_size = 0;
4299
0
      }
4300
0
      asoc->total_flight = 0;
4301
0
      asoc->total_flight_count = 0;
4302
0
      asoc->sent_queue_retran_cnt = 0;
4303
0
      TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4304
0
        if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4305
0
          sctp_flight_size_increase(tp1);
4306
0
          sctp_total_flight_increase(stcb, tp1);
4307
0
        } else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4308
0
          sctp_ucount_incr(asoc->sent_queue_retran_cnt);
4309
0
        }
4310
0
      }
4311
0
    }
4312
0
    done_once = 1;
4313
0
    goto again;
4314
0
  }
4315
  /**********************************/
4316
  /* Now what about shutdown issues */
4317
  /**********************************/
4318
0
  if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
4319
    /* nothing left on sendqueue.. consider done */
4320
    /* clean up */
4321
0
    if ((asoc->stream_queue_cnt == 1) &&
4322
0
        ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4323
0
         (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4324
0
        ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete)(stcb, asoc))) {
4325
0
      SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_PARTIAL_MSG_LEFT);
4326
0
    }
4327
0
    if (((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4328
0
         (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4329
0
        (asoc->stream_queue_cnt == 1) &&
4330
0
        (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) {
4331
0
      struct mbuf *op_err;
4332
4333
0
      *abort_now = 1;
4334
      /* XXX */
4335
0
      op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, "");
4336
0
      stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_28;
4337
0
      sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, false, SCTP_SO_NOT_LOCKED);
4338
0
      return;
4339
0
    }
4340
0
    if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
4341
0
        (asoc->stream_queue_cnt == 0)) {
4342
0
      struct sctp_nets *netp;
4343
4344
0
      if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) ||
4345
0
          (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4346
0
        SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4347
0
      }
4348
0
      SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_SENT);
4349
0
      sctp_stop_timers_for_shutdown(stcb);
4350
0
      if (asoc->alternate) {
4351
0
        netp = asoc->alternate;
4352
0
      } else {
4353
0
        netp = asoc->primary_destination;
4354
0
      }
4355
0
      sctp_send_shutdown(stcb, netp);
4356
0
      sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
4357
0
           stcb->sctp_ep, stcb, netp);
4358
0
      sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
4359
0
           stcb->sctp_ep, stcb, NULL);
4360
0
    } else if ((SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
4361
0
         (asoc->stream_queue_cnt == 0)) {
4362
0
      struct sctp_nets *netp;
4363
4364
0
      SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4365
0
      SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_ACK_SENT);
4366
0
      sctp_stop_timers_for_shutdown(stcb);
4367
0
      if (asoc->alternate) {
4368
0
        netp = asoc->alternate;
4369
0
      } else {
4370
0
        netp = asoc->primary_destination;
4371
0
      }
4372
0
      sctp_send_shutdown_ack(stcb, netp);
4373
0
      sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
4374
0
           stcb->sctp_ep, stcb, netp);
4375
0
    }
4376
0
  }
4377
  /*********************************************/
4378
  /* Here we perform PR-SCTP procedures        */
4379
  /* (section 4.2)                             */
4380
  /*********************************************/
4381
  /* C1. update advancedPeerAckPoint */
4382
0
  if (SCTP_TSN_GT(cumack, asoc->advanced_peer_ack_point)) {
4383
0
    asoc->advanced_peer_ack_point = cumack;
4384
0
  }
4385
  /* PR-Sctp issues need to be addressed too */
4386
0
  if ((asoc->prsctp_supported) && (asoc->pr_sctp_cnt > 0)) {
4387
0
    struct sctp_tmit_chunk *lchk;
4388
0
    uint32_t old_adv_peer_ack_point;
4389
4390
0
    old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
4391
0
    lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
4392
    /* C3. See if we need to send a Fwd-TSN */
4393
0
    if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cumack)) {
4394
      /*
4395
       * ISSUE with ECN, see FWD-TSN processing.
4396
       */
4397
0
      if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) {
4398
0
        send_forward_tsn(stcb, asoc);
4399
0
      } else if (lchk) {
4400
        /* try to FR fwd-tsn's that get lost too */
4401
0
        if (lchk->rec.data.fwd_tsn_cnt >= 3) {
4402
0
          send_forward_tsn(stcb, asoc);
4403
0
        }
4404
0
      }
4405
0
    }
4406
0
    for (; lchk != NULL; lchk = TAILQ_NEXT(lchk, sctp_next)) {
4407
0
      if (lchk->whoTo != NULL) {
4408
0
        break;
4409
0
      }
4410
0
    }
4411
0
    if (lchk != NULL) {
4412
      /* Assure a timer is up */
4413
0
      sctp_timer_start(SCTP_TIMER_TYPE_SEND,
4414
0
                       stcb->sctp_ep, stcb, lchk->whoTo);
4415
0
    }
4416
0
  }
4417
0
  if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
4418
0
    sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
4419
0
             rwnd,
4420
0
             stcb->asoc.peers_rwnd,
4421
0
             stcb->asoc.total_flight,
4422
0
             stcb->asoc.total_output_queue_size);
4423
0
  }
4424
0
}
4425
4426
void
4427
sctp_handle_sack(struct mbuf *m, int offset_seg, int offset_dup,
4428
                 struct sctp_tcb *stcb,
4429
                 uint16_t num_seg, uint16_t num_nr_seg, uint16_t num_dup,
4430
                 int *abort_now, uint8_t flags,
4431
                 uint32_t cum_ack, uint32_t rwnd, int ecne_seen)
4432
677
{
4433
677
  struct sctp_association *asoc;
4434
677
  struct sctp_tmit_chunk *tp1, *tp2;
4435
677
  uint32_t last_tsn, biggest_tsn_acked, biggest_tsn_newly_acked, this_sack_lowest_newack;
4436
677
  uint16_t wake_him = 0;
4437
677
  uint32_t send_s = 0;
4438
677
  long j;
4439
677
  int accum_moved = 0;
4440
677
  int will_exit_fast_recovery = 0;
4441
677
  uint32_t a_rwnd, old_rwnd;
4442
677
  int win_probe_recovery = 0;
4443
677
  int win_probe_recovered = 0;
4444
677
  struct sctp_nets *net = NULL;
4445
677
  int done_once;
4446
677
  int rto_ok = 1;
4447
677
  uint8_t reneged_all = 0;
4448
677
  uint8_t cmt_dac_flag;
4449
  /*
4450
   * we take any chance we can to service our queues since we cannot
4451
   * get awoken when the socket is read from :<
4452
   */
4453
  /*
4454
   * Now perform the actual SACK handling: 1) Verify that it is not an
4455
   * old sack, if so discard. 2) If there is nothing left in the send
4456
   * queue (cum-ack is equal to last acked) then you have a duplicate
4457
   * too, update any rwnd change and verify no timers are running.
4458
   * then return. 3) Process any new consecutive data i.e. cum-ack
4459
   * moved process these first and note that it moved. 4) Process any
4460
   * sack blocks. 5) Drop any acked from the queue. 6) Check for any
4461
   * revoked blocks and mark. 7) Update the cwnd. 8) Nothing left,
4462
   * sync up flightsizes and things, stop all timers and also check
4463
   * for shutdown_pending state. If so then go ahead and send off the
4464
   * shutdown. If in shutdown recv, send off the shutdown-ack and
4465
   * start that timer, Ret. 9) Strike any non-acked things and do FR
4466
   * procedure if needed being sure to set the FR flag. 10) Do pr-sctp
4467
   * procedures. 11) Apply any FR penalties. 12) Assure we will SACK
4468
   * if in shutdown_recv state.
4469
   */
4470
677
  SCTP_TCB_LOCK_ASSERT(stcb);
4471
  /* CMT DAC algo */
4472
677
  this_sack_lowest_newack = 0;
4473
677
  SCTP_STAT_INCR(sctps_slowpath_sack);
4474
677
  last_tsn = cum_ack;
4475
677
  cmt_dac_flag = flags & SCTP_SACK_CMT_DAC;
4476
#ifdef SCTP_ASOCLOG_OF_TSNS
4477
  stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cum_ack;
4478
  stcb->asoc.cumack_log_at++;
4479
  if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
4480
    stcb->asoc.cumack_log_at = 0;
4481
  }
4482
#endif
4483
677
  a_rwnd = rwnd;
4484
4485
677
  if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
4486
0
    sctp_misc_ints(SCTP_SACK_LOG_NORMAL, cum_ack,
4487
0
                   rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
4488
0
  }
4489
4490
677
  old_rwnd = stcb->asoc.peers_rwnd;
4491
677
  if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
4492
0
    sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
4493
0
                   stcb->asoc.overall_error_count,
4494
0
                   0,
4495
0
                   SCTP_FROM_SCTP_INDATA,
4496
0
                   __LINE__);
4497
0
  }
4498
677
  stcb->asoc.overall_error_count = 0;
4499
677
  asoc = &stcb->asoc;
4500
677
  if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4501
0
    sctp_log_sack(asoc->last_acked_seq,
4502
0
                  cum_ack,
4503
0
                  0,
4504
0
                  num_seg,
4505
0
                  num_dup,
4506
0
                  SCTP_LOG_NEW_SACK);
4507
0
  }
4508
677
  if ((num_dup) && (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE)) {
4509
0
    uint16_t i;
4510
0
    uint32_t *dupdata, dblock;
4511
4512
0
    for (i = 0; i < num_dup; i++) {
4513
0
      dupdata = (uint32_t *)sctp_m_getptr(m, offset_dup + i * sizeof(uint32_t),
4514
0
                                          sizeof(uint32_t), (uint8_t *)&dblock);
4515
0
      if (dupdata == NULL) {
4516
0
        break;
4517
0
      }
4518
0
      sctp_log_fr(*dupdata, 0, 0, SCTP_FR_DUPED);
4519
0
    }
4520
0
  }
4521
  /* reality check */
4522
677
  if (!TAILQ_EMPTY(&asoc->sent_queue)) {
4523
181
    tp1 = TAILQ_LAST(&asoc->sent_queue,
4524
181
         sctpchunk_listhead);
4525
181
    send_s = tp1->rec.data.tsn + 1;
4526
496
  } else {
4527
496
    tp1 = NULL;
4528
496
    send_s = asoc->sending_seq;
4529
496
  }
4530
677
  if (SCTP_TSN_GE(cum_ack, send_s)) {
4531
39
    struct mbuf *op_err;
4532
39
    char msg[SCTP_DIAG_INFO_LEN];
4533
4534
    /*
4535
     * no way, we have not even sent this TSN out yet.
4536
     * Peer is hopelessly messed up with us.
4537
     */
4538
39
    SCTP_PRINTF("NEW cum_ack:%x send_s:%x is smaller or equal\n",
4539
39
          cum_ack, send_s);
4540
39
    if (tp1) {
4541
0
      SCTP_PRINTF("Got send_s from tsn:%x + 1 of tp1: %p\n",
4542
0
            tp1->rec.data.tsn, (void *)tp1);
4543
0
    }
4544
39
  hopeless_peer:
4545
39
    *abort_now = 1;
4546
    /* XXX */
4547
39
    SCTP_SNPRINTF(msg, sizeof(msg),
4548
39
                  "Cum ack %8.8x greater or equal than TSN %8.8x",
4549
39
                  cum_ack, send_s);
4550
39
    op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
4551
39
    stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_29;
4552
39
    sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, false, SCTP_SO_NOT_LOCKED);
4553
39
    return;
4554
39
  }
4555
  /**********************/
4556
  /* 1) check the range */
4557
  /**********************/
4558
638
  if (SCTP_TSN_GT(asoc->last_acked_seq, last_tsn)) {
4559
    /* acking something behind */
4560
638
    return;
4561
638
  }
4562
4563
  /* update the Rwnd of the peer */
4564
0
  if (TAILQ_EMPTY(&asoc->sent_queue) &&
4565
0
      TAILQ_EMPTY(&asoc->send_queue) &&
4566
0
      (asoc->stream_queue_cnt == 0)) {
4567
    /* nothing left on send/sent and strmq */
4568
0
    if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4569
0
      sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4570
0
                        asoc->peers_rwnd, 0, 0, a_rwnd);
4571
0
    }
4572
0
    asoc->peers_rwnd = a_rwnd;
4573
0
    if (asoc->sent_queue_retran_cnt) {
4574
0
      asoc->sent_queue_retran_cnt = 0;
4575
0
    }
4576
0
    if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4577
      /* SWS sender side engages */
4578
0
      asoc->peers_rwnd = 0;
4579
0
    }
4580
    /* stop any timers */
4581
0
    TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4582
0
      sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4583
0
                      stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_30);
4584
0
      net->partial_bytes_acked = 0;
4585
0
      net->flight_size = 0;
4586
0
    }
4587
0
    asoc->total_flight = 0;
4588
0
    asoc->total_flight_count = 0;
4589
0
    return;
4590
0
  }
4591
  /*
4592
   * We init netAckSz and netAckSz2 to 0. These are used to track 2
4593
   * things. The total byte count acked is tracked in netAckSz AND
4594
   * netAck2 is used to track the total bytes acked that are un-
4595
   * ambiguous and were never retransmitted. We track these on a per
4596
   * destination address basis.
4597
   */
4598
0
  TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4599
0
    if (SCTP_TSN_GT(cum_ack, net->cwr_window_tsn)) {
4600
      /* Drag along the window_tsn for cwr's */
4601
0
      net->cwr_window_tsn = cum_ack;
4602
0
    }
4603
0
    net->prev_cwnd = net->cwnd;
4604
0
    net->net_ack = 0;
4605
0
    net->net_ack2 = 0;
4606
4607
    /*
4608
     * CMT: Reset CUC and Fast recovery algo variables before
4609
     * SACK processing
4610
     */
4611
0
    net->new_pseudo_cumack = 0;
4612
0
    net->will_exit_fast_recovery = 0;
4613
0
    if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) {
4614
0
      (*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack)(stcb, net);
4615
0
    }
4616
4617
    /*
4618
     * CMT: SFR algo (and HTNA) - this_sack_highest_newack has
4619
     * to be greater than the cumack. Also reset saw_newack to 0
4620
     * for all dests.
4621
     */
4622
0
    net->saw_newack = 0;
4623
0
    net->this_sack_highest_newack = last_tsn;
4624
0
  }
4625
  /* process the new consecutive TSN first */
4626
0
  TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4627
0
    if (SCTP_TSN_GE(last_tsn, tp1->rec.data.tsn)) {
4628
0
      if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
4629
0
        accum_moved = 1;
4630
0
        if (tp1->sent < SCTP_DATAGRAM_ACKED) {
4631
          /*
4632
           * If it is less than ACKED, it is
4633
           * now no-longer in flight. Higher
4634
           * values may occur during marking
4635
           */
4636
0
          if ((tp1->whoTo->dest_state &
4637
0
               SCTP_ADDR_UNCONFIRMED) &&
4638
0
              (tp1->snd_count < 2)) {
4639
            /*
4640
             * If there was no retran
4641
             * and the address is
4642
             * un-confirmed and we sent
4643
             * there and are now
4644
             * sacked.. its confirmed,
4645
             * mark it so.
4646
             */
4647
0
            tp1->whoTo->dest_state &=
4648
0
              ~SCTP_ADDR_UNCONFIRMED;
4649
0
          }
4650
0
          if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4651
0
            if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4652
0
              sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
4653
0
                             tp1->whoTo->flight_size,
4654
0
                             tp1->book_size,
4655
0
                             (uint32_t)(uintptr_t)tp1->whoTo,
4656
0
                             tp1->rec.data.tsn);
4657
0
            }
4658
0
            sctp_flight_size_decrease(tp1);
4659
0
            sctp_total_flight_decrease(stcb, tp1);
4660
0
            if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
4661
0
              (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged)(tp1->whoTo,
4662
0
                                 tp1);
4663
0
            }
4664
0
          }
4665
0
          tp1->whoTo->net_ack += tp1->send_size;
4666
4667
          /* CMT SFR and DAC algos */
4668
0
          this_sack_lowest_newack = tp1->rec.data.tsn;
4669
0
          tp1->whoTo->saw_newack = 1;
4670
4671
0
          if (tp1->snd_count < 2) {
4672
            /*
4673
             * True non-retransmitted
4674
             * chunk
4675
             */
4676
0
            tp1->whoTo->net_ack2 +=
4677
0
              tp1->send_size;
4678
4679
            /* update RTO too? */
4680
0
            if (tp1->do_rtt) {
4681
0
              if (rto_ok &&
4682
0
                  sctp_calculate_rto(stcb,
4683
0
                         &stcb->asoc,
4684
0
                         tp1->whoTo,
4685
0
                         &tp1->sent_rcv_time,
4686
0
                         SCTP_RTT_FROM_DATA)) {
4687
0
                rto_ok = 0;
4688
0
              }
4689
0
              if (tp1->whoTo->rto_needed == 0) {
4690
0
                tp1->whoTo->rto_needed = 1;
4691
0
              }
4692
0
              tp1->do_rtt = 0;
4693
0
            }
4694
0
          }
4695
          /*
4696
           * CMT: CUCv2 algorithm. From the
4697
           * cumack'd TSNs, for each TSN being
4698
           * acked for the first time, set the
4699
           * following variables for the
4700
           * corresp destination.
4701
           * new_pseudo_cumack will trigger a
4702
           * cwnd update.
4703
           * find_(rtx_)pseudo_cumack will
4704
           * trigger search for the next
4705
           * expected (rtx-)pseudo-cumack.
4706
           */
4707
0
          tp1->whoTo->new_pseudo_cumack = 1;
4708
0
          tp1->whoTo->find_pseudo_cumack = 1;
4709
0
          tp1->whoTo->find_rtx_pseudo_cumack = 1;
4710
0
          if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4711
0
            sctp_log_sack(asoc->last_acked_seq,
4712
0
                          cum_ack,
4713
0
                          tp1->rec.data.tsn,
4714
0
                          0,
4715
0
                          0,
4716
0
                          SCTP_LOG_TSN_ACKED);
4717
0
          }
4718
0
          if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
4719
0
            sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.tsn, SCTP_CWND_LOG_FROM_SACK);
4720
0
          }
4721
0
        }
4722
0
        if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4723
0
          sctp_ucount_decr(asoc->sent_queue_retran_cnt);
4724
#ifdef SCTP_AUDITING_ENABLED
4725
          sctp_audit_log(0xB3,
4726
                         (asoc->sent_queue_retran_cnt & 0x000000ff));
4727
#endif
4728
0
        }
4729
0
        if (tp1->rec.data.chunk_was_revoked) {
4730
          /* deflate the cwnd */
4731
0
          tp1->whoTo->cwnd -= tp1->book_size;
4732
0
          tp1->rec.data.chunk_was_revoked = 0;
4733
0
        }
4734
0
        if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
4735
0
          tp1->sent = SCTP_DATAGRAM_ACKED;
4736
0
        }
4737
0
      }
4738
0
    } else {
4739
0
      break;
4740
0
    }
4741
0
  }
4742
0
  biggest_tsn_newly_acked = biggest_tsn_acked = last_tsn;
4743
  /* always set this up to cum-ack */
4744
0
  asoc->this_sack_highest_gap = last_tsn;
4745
4746
0
  if ((num_seg > 0) || (num_nr_seg > 0)) {
4747
    /*
4748
     * thisSackHighestGap will increase while handling NEW
4749
     * segments this_sack_highest_newack will increase while
4750
     * handling NEWLY ACKED chunks. this_sack_lowest_newack is
4751
     * used for CMT DAC algo. saw_newack will also change.
4752
     */
4753
0
    if (sctp_handle_segments(m, &offset_seg, stcb, asoc, last_tsn, &biggest_tsn_acked,
4754
0
      &biggest_tsn_newly_acked, &this_sack_lowest_newack,
4755
0
      num_seg, num_nr_seg, &rto_ok)) {
4756
0
      wake_him++;
4757
0
    }
4758
    /*
4759
     * validate the biggest_tsn_acked in the gap acks if
4760
     * strict adherence is wanted.
4761
     */
4762
0
    if (SCTP_TSN_GE(biggest_tsn_acked, send_s)) {
4763
      /*
4764
       * peer is either confused or we are under
4765
       * attack. We must abort.
4766
       */
4767
0
      SCTP_PRINTF("Hopeless peer! biggest_tsn_acked:%x largest seq:%x\n",
4768
0
            biggest_tsn_acked, send_s);
4769
0
      goto hopeless_peer;
4770
0
    }
4771
0
  }
4772
  /*******************************************/
4773
  /* cancel ALL T3-send timer if accum moved */
4774
  /*******************************************/
4775
0
  if (asoc->sctp_cmt_on_off > 0) {
4776
0
    TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4777
0
      if (net->new_pseudo_cumack)
4778
0
        sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4779
0
                        stcb, net,
4780
0
                        SCTP_FROM_SCTP_INDATA + SCTP_LOC_31);
4781
0
    }
4782
0
  } else {
4783
0
    if (accum_moved) {
4784
0
      TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4785
0
        sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4786
0
                        stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_32);
4787
0
      }
4788
0
    }
4789
0
  }
4790
  /********************************************/
4791
  /* drop the acked chunks from the sentqueue */
4792
  /********************************************/
4793
0
  asoc->last_acked_seq = cum_ack;
4794
4795
0
  TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
4796
0
    if (SCTP_TSN_GT(tp1->rec.data.tsn, cum_ack)) {
4797
0
      break;
4798
0
    }
4799
0
    if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
4800
0
      if (asoc->strmout[tp1->rec.data.sid].chunks_on_queues > 0) {
4801
0
        asoc->strmout[tp1->rec.data.sid].chunks_on_queues--;
4802
0
#ifdef INVARIANTS
4803
0
      } else {
4804
0
        panic("No chunks on the queues for sid %u.", tp1->rec.data.sid);
4805
0
#endif
4806
0
      }
4807
0
    }
4808
0
    if ((asoc->strmout[tp1->rec.data.sid].chunks_on_queues == 0) &&
4809
0
        (asoc->strmout[tp1->rec.data.sid].state == SCTP_STREAM_RESET_PENDING) &&
4810
0
        TAILQ_EMPTY(&asoc->strmout[tp1->rec.data.sid].outqueue)) {
4811
0
      asoc->trigger_reset = 1;
4812
0
    }
4813
0
    TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
4814
0
    if (PR_SCTP_ENABLED(tp1->flags)) {
4815
0
      if (asoc->pr_sctp_cnt != 0)
4816
0
        asoc->pr_sctp_cnt--;
4817
0
    }
4818
0
    asoc->sent_queue_cnt--;
4819
0
    if (tp1->data) {
4820
      /* sa_ignore NO_NULL_CHK */
4821
0
      sctp_free_bufspace(stcb, asoc, tp1, 1);
4822
0
      sctp_m_freem(tp1->data);
4823
0
      tp1->data = NULL;
4824
0
      if (asoc->prsctp_supported && PR_SCTP_BUF_ENABLED(tp1->flags)) {
4825
0
        asoc->sent_queue_cnt_removeable--;
4826
0
      }
4827
0
    }
4828
0
    if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4829
0
      sctp_log_sack(asoc->last_acked_seq,
4830
0
                    cum_ack,
4831
0
                    tp1->rec.data.tsn,
4832
0
                    0,
4833
0
                    0,
4834
0
                    SCTP_LOG_FREE_SENT);
4835
0
    }
4836
0
    sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED);
4837
0
    wake_him++;
4838
0
  }
4839
0
  if (TAILQ_EMPTY(&asoc->sent_queue) && (asoc->total_flight > 0)) {
4840
0
#ifdef INVARIANTS
4841
0
    panic("Warning flight size is positive and should be 0");
4842
#else
4843
    SCTP_PRINTF("Warning flight size incorrect should be 0 is %d\n",
4844
                asoc->total_flight);
4845
#endif
4846
0
    asoc->total_flight = 0;
4847
0
  }
4848
4849
0
#if defined(__Userspace__)
4850
0
  if (stcb->sctp_ep->recv_callback) {
4851
0
    if (stcb->sctp_socket) {
4852
0
      uint32_t inqueue_bytes, sb_free_now;
4853
0
      struct sctp_inpcb *inp;
4854
4855
0
      inp = stcb->sctp_ep;
4856
0
      inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * sizeof(struct sctp_data_chunk));
4857
0
      sb_free_now = SCTP_SB_LIMIT_SND(stcb->sctp_socket) - (inqueue_bytes + stcb->asoc.sb_send_resv);
4858
4859
      /* check if the amount free in the send socket buffer crossed the threshold */
4860
0
      if (inp->send_callback &&
4861
0
         (((inp->send_sb_threshold > 0) && (sb_free_now >= inp->send_sb_threshold)) ||
4862
0
          (inp->send_sb_threshold == 0))) {
4863
0
        atomic_add_int(&stcb->asoc.refcnt, 1);
4864
0
        SCTP_TCB_UNLOCK(stcb);
4865
0
        inp->send_callback(stcb->sctp_socket, sb_free_now, inp->ulp_info);
4866
0
        SCTP_TCB_LOCK(stcb);
4867
0
        atomic_subtract_int(&stcb->asoc.refcnt, 1);
4868
0
      }
4869
0
    }
4870
0
  } else if ((wake_him) && (stcb->sctp_socket)) {
4871
#else
4872
  /* sa_ignore NO_NULL_CHK */
4873
  if ((wake_him) && (stcb->sctp_socket)) {
4874
#endif
4875
#if defined(__APPLE__) && !defined(__Userspace__)
4876
    struct socket *so;
4877
4878
#endif
4879
0
    SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
4880
0
    if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4881
0
      sctp_wakeup_log(stcb, wake_him, SCTP_WAKESND_FROM_SACK);
4882
0
    }
4883
#if defined(__APPLE__) && !defined(__Userspace__)
4884
    so = SCTP_INP_SO(stcb->sctp_ep);
4885
    atomic_add_int(&stcb->asoc.refcnt, 1);
4886
    SCTP_TCB_UNLOCK(stcb);
4887
    SCTP_SOCKET_LOCK(so, 1);
4888
    SCTP_TCB_LOCK(stcb);
4889
    atomic_subtract_int(&stcb->asoc.refcnt, 1);
4890
    if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4891
      /* assoc was freed while we were unlocked */
4892
      SCTP_SOCKET_UNLOCK(so, 1);
4893
      return;
4894
    }
4895
#endif
4896
0
    sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
4897
#if defined(__APPLE__) && !defined(__Userspace__)
4898
    SCTP_SOCKET_UNLOCK(so, 1);
4899
#endif
4900
0
  } else {
4901
0
    if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4902
0
      sctp_wakeup_log(stcb, wake_him, SCTP_NOWAKE_FROM_SACK);
4903
0
    }
4904
0
  }
4905
4906
0
  if (asoc->fast_retran_loss_recovery && accum_moved) {
4907
0
    if (SCTP_TSN_GE(asoc->last_acked_seq, asoc->fast_recovery_tsn)) {
4908
      /* Setup so we will exit RFC2582 fast recovery */
4909
0
      will_exit_fast_recovery = 1;
4910
0
    }
4911
0
  }
4912
  /*
4913
   * Check for revoked fragments:
4914
   *
4915
   * if Previous sack - Had no frags then we can't have any revoked if
4916
   * Previous sack - Had frag's then - If we now have frags aka
4917
   * num_seg > 0 call sctp_check_for_revoked() to tell if peer revoked
4918
   * some of them. else - The peer revoked all ACKED fragments, since
4919
   * we had some before and now we have NONE.
4920
   */
4921
4922
0
  if (num_seg) {
4923
0
    sctp_check_for_revoked(stcb, asoc, cum_ack, biggest_tsn_acked);
4924
0
    asoc->saw_sack_with_frags = 1;
4925
0
  } else if (asoc->saw_sack_with_frags) {
4926
0
    int cnt_revoked = 0;
4927
4928
    /* Peer revoked all dg's marked or acked */
4929
0
    TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4930
0
      if (tp1->sent == SCTP_DATAGRAM_ACKED) {
4931
0
        tp1->sent = SCTP_DATAGRAM_SENT;
4932
0
        if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4933
0
          sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
4934
0
                         tp1->whoTo->flight_size,
4935
0
                         tp1->book_size,
4936
0
                         (uint32_t)(uintptr_t)tp1->whoTo,
4937
0
                         tp1->rec.data.tsn);
4938
0
        }
4939
0
        sctp_flight_size_increase(tp1);
4940
0
        sctp_total_flight_increase(stcb, tp1);
4941
0
        tp1->rec.data.chunk_was_revoked = 1;
4942
        /*
4943
         * To ensure that this increase in
4944
         * flightsize, which is artificial,
4945
         * does not throttle the sender, we
4946
         * also increase the cwnd
4947
         * artificially.
4948
         */
4949
0
        tp1->whoTo->cwnd += tp1->book_size;
4950
0
        cnt_revoked++;
4951
0
      }
4952
0
    }
4953
0
    if (cnt_revoked) {
4954
0
      reneged_all = 1;
4955
0
    }
4956
0
    asoc->saw_sack_with_frags = 0;
4957
0
  }
4958
0
  if (num_nr_seg > 0)
4959
0
    asoc->saw_sack_with_nr_frags = 1;
4960
0
  else
4961
0
    asoc->saw_sack_with_nr_frags = 0;
4962
4963
  /* JRS - Use the congestion control given in the CC module */
4964
0
  if (ecne_seen == 0) {
4965
0
    TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4966
0
      if (net->net_ack2 > 0) {
4967
        /*
4968
         * Karn's rule applies to clearing error count, this
4969
         * is optional.
4970
         */
4971
0
        net->error_count = 0;
4972
0
        if ((net->dest_state & SCTP_ADDR_REACHABLE) == 0) {
4973
          /* addr came good */
4974
0
          net->dest_state |= SCTP_ADDR_REACHABLE;
4975
0
          sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
4976
0
                          0, (void *)net, SCTP_SO_NOT_LOCKED);
4977
0
        }
4978
4979
0
        if (net == stcb->asoc.primary_destination) {
4980
0
          if (stcb->asoc.alternate) {
4981
            /* release the alternate, primary is good */
4982
0
            sctp_free_remote_addr(stcb->asoc.alternate);
4983
0
            stcb->asoc.alternate = NULL;
4984
0
          }
4985
0
        }
4986
4987
0
        if (net->dest_state & SCTP_ADDR_PF) {
4988
0
          net->dest_state &= ~SCTP_ADDR_PF;
4989
0
          sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT,
4990
0
                          stcb->sctp_ep, stcb, net,
4991
0
                          SCTP_FROM_SCTP_INDATA + SCTP_LOC_33);
4992
0
          sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
4993
0
          asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net);
4994
          /* Done with this net */
4995
0
          net->net_ack = 0;
4996
0
        }
4997
        /* restore any doubled timers */
4998
0
        net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
4999
0
        if (net->RTO < stcb->asoc.minrto) {
5000
0
          net->RTO = stcb->asoc.minrto;
5001
0
        }
5002
0
        if (net->RTO > stcb->asoc.maxrto) {
5003
0
          net->RTO = stcb->asoc.maxrto;
5004
0
        }
5005
0
      }
5006
0
    }
5007
0
    asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, accum_moved, reneged_all, will_exit_fast_recovery);
5008
0
  }
5009
5010
0
  if (TAILQ_EMPTY(&asoc->sent_queue)) {
5011
    /* nothing left in-flight */
5012
0
    TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5013
      /* stop all timers */
5014
0
      sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
5015
0
                      stcb, net,
5016
0
                      SCTP_FROM_SCTP_INDATA + SCTP_LOC_34);
5017
0
      net->flight_size = 0;
5018
0
      net->partial_bytes_acked = 0;
5019
0
    }
5020
0
    asoc->total_flight = 0;
5021
0
    asoc->total_flight_count = 0;
5022
0
  }
5023
5024
  /**********************************/
5025
  /* Now what about shutdown issues */
5026
  /**********************************/
5027
0
  if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
5028
    /* nothing left on sendqueue.. consider done */
5029
0
    if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
5030
0
      sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
5031
0
                        asoc->peers_rwnd, 0, 0, a_rwnd);
5032
0
    }
5033
0
    asoc->peers_rwnd = a_rwnd;
5034
0
    if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
5035
      /* SWS sender side engages */
5036
0
      asoc->peers_rwnd = 0;
5037
0
    }
5038
    /* clean up */
5039
0
    if ((asoc->stream_queue_cnt == 1) &&
5040
0
        ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
5041
0
         (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) &&
5042
0
        ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete)(stcb, asoc))) {
5043
0
      SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_PARTIAL_MSG_LEFT);
5044
0
    }
5045
0
    if (((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
5046
0
         (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) &&
5047
0
        (asoc->stream_queue_cnt == 1) &&
5048
0
        (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) {
5049
0
      struct mbuf *op_err;
5050
5051
0
      *abort_now = 1;
5052
      /* XXX */
5053
0
      op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, "");
5054
0
      stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_35;
5055
0
      sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, false, SCTP_SO_NOT_LOCKED);
5056
0
      return;
5057
0
    }
5058
0
    if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
5059
0
        (asoc->stream_queue_cnt == 0)) {
5060
0
      struct sctp_nets *netp;
5061
5062
0
      if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) ||
5063
0
          (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
5064
0
        SCTP_STAT_DECR_GAUGE32(sctps_currestab);
5065
0
      }
5066
0
      SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_SENT);
5067
0
      sctp_stop_timers_for_shutdown(stcb);
5068
0
      if (asoc->alternate) {
5069
0
        netp = asoc->alternate;
5070
0
      } else {
5071
0
        netp = asoc->primary_destination;
5072
0
      }
5073
0
      sctp_send_shutdown(stcb, netp);
5074
0
      sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
5075
0
           stcb->sctp_ep, stcb, netp);
5076
0
      sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
5077
0
           stcb->sctp_ep, stcb, NULL);
5078
0
      return;
5079
0
    } else if ((SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
5080
0
         (asoc->stream_queue_cnt == 0)) {
5081
0
      struct sctp_nets *netp;
5082
5083
0
      SCTP_STAT_DECR_GAUGE32(sctps_currestab);
5084
0
      SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_ACK_SENT);
5085
0
      sctp_stop_timers_for_shutdown(stcb);
5086
0
      if (asoc->alternate) {
5087
0
        netp = asoc->alternate;
5088
0
      } else {
5089
0
        netp = asoc->primary_destination;
5090
0
      }
5091
0
      sctp_send_shutdown_ack(stcb, netp);
5092
0
      sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
5093
0
                       stcb->sctp_ep, stcb, netp);
5094
0
      return;
5095
0
    }
5096
0
  }
5097
  /*
5098
   * Now here we are going to recycle net_ack for a different use...
5099
   * HEADS UP.
5100
   */
5101
0
  TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5102
0
    net->net_ack = 0;
5103
0
  }
5104
5105
  /*
5106
   * CMT DAC algorithm: If SACK DAC flag was 0, then no extra marking
5107
   * to be done. Setting this_sack_lowest_newack to the cum_ack will
5108
   * automatically ensure that.
5109
   */
5110
0
  if ((asoc->sctp_cmt_on_off > 0) &&
5111
0
      SCTP_BASE_SYSCTL(sctp_cmt_use_dac) &&
5112
0
      (cmt_dac_flag == 0)) {
5113
0
    this_sack_lowest_newack = cum_ack;
5114
0
  }
5115
0
  if ((num_seg > 0) || (num_nr_seg > 0)) {
5116
0
    sctp_strike_gap_ack_chunks(stcb, asoc, biggest_tsn_acked,
5117
0
                               biggest_tsn_newly_acked, this_sack_lowest_newack, accum_moved);
5118
0
  }
5119
  /* JRS - Use the congestion control given in the CC module */
5120
0
  asoc->cc_functions.sctp_cwnd_update_after_fr(stcb, asoc);
5121
5122
  /* Now are we exiting loss recovery ? */
5123
0
  if (will_exit_fast_recovery) {
5124
    /* Ok, we must exit fast recovery */
5125
0
    asoc->fast_retran_loss_recovery = 0;
5126
0
  }
5127
0
  if ((asoc->sat_t3_loss_recovery) &&
5128
0
      SCTP_TSN_GE(asoc->last_acked_seq, asoc->sat_t3_recovery_tsn)) {
5129
    /* end satellite t3 loss recovery */
5130
0
    asoc->sat_t3_loss_recovery = 0;
5131
0
  }
5132
  /*
5133
   * CMT Fast recovery
5134
   */
5135
0
  TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5136
0
    if (net->will_exit_fast_recovery) {
5137
      /* Ok, we must exit fast recovery */
5138
0
      net->fast_retran_loss_recovery = 0;
5139
0
    }
5140
0
  }
5141
5142
  /* Adjust and set the new rwnd value */
5143
0
  if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
5144
0
    sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
5145
0
                      asoc->peers_rwnd, asoc->total_flight, (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)), a_rwnd);
5146
0
  }
5147
0
  asoc->peers_rwnd = sctp_sbspace_sub(a_rwnd,
5148
0
                                      (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
5149
0
  if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
5150
    /* SWS sender side engages */
5151
0
    asoc->peers_rwnd = 0;
5152
0
  }
5153
0
  if (asoc->peers_rwnd > old_rwnd) {
5154
0
    win_probe_recovery = 1;
5155
0
  }
5156
5157
  /*
5158
   * Now we must setup so we have a timer up for anyone with
5159
   * outstanding data.
5160
   */
5161
0
  done_once = 0;
5162
0
again:
5163
0
  j = 0;
5164
0
  TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5165
0
    if (win_probe_recovery && (net->window_probe)) {
5166
0
      win_probe_recovered = 1;
5167
      /*-
5168
       * Find first chunk that was used with
5169
       * window probe and clear the event. Put
5170
       * it back into the send queue as if has
5171
       * not been sent.
5172
       */
5173
0
      TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5174
0
        if (tp1->window_probe) {
5175
0
          sctp_window_probe_recovery(stcb, asoc, tp1);
5176
0
          break;
5177
0
        }
5178
0
      }
5179
0
    }
5180
0
    if (net->flight_size) {
5181
0
      j++;
5182
0
      if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5183
0
        sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5184
0
                         stcb->sctp_ep, stcb, net);
5185
0
      }
5186
0
      if (net->window_probe) {
5187
0
        net->window_probe = 0;
5188
0
      }
5189
0
    } else {
5190
0
      if (net->window_probe) {
5191
        /* In window probes we must assure a timer is still running there */
5192
0
        if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5193
0
          sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5194
0
                           stcb->sctp_ep, stcb, net);
5195
0
        }
5196
0
      } else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5197
0
        sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
5198
0
                        stcb, net,
5199
0
                        SCTP_FROM_SCTP_INDATA + SCTP_LOC_36);
5200
0
      }
5201
0
    }
5202
0
  }
5203
0
  if ((j == 0) &&
5204
0
      (!TAILQ_EMPTY(&asoc->sent_queue)) &&
5205
0
      (asoc->sent_queue_retran_cnt == 0) &&
5206
0
      (win_probe_recovered == 0) &&
5207
0
      (done_once == 0)) {
5208
    /* huh, this should not happen unless all packets
5209
     * are PR-SCTP and marked to skip of course.
5210
     */
5211
0
    if (sctp_fs_audit(asoc)) {
5212
0
      TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5213
0
        net->flight_size = 0;
5214
0
      }
5215
0
      asoc->total_flight = 0;
5216
0
      asoc->total_flight_count = 0;
5217
0
      asoc->sent_queue_retran_cnt = 0;
5218
0
      TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5219
0
        if (tp1->sent < SCTP_DATAGRAM_RESEND) {
5220
0
          sctp_flight_size_increase(tp1);
5221
0
          sctp_total_flight_increase(stcb, tp1);
5222
0
        } else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
5223
0
          sctp_ucount_incr(asoc->sent_queue_retran_cnt);
5224
0
        }
5225
0
      }
5226
0
    }
5227
0
    done_once = 1;
5228
0
    goto again;
5229
0
  }
5230
  /*********************************************/
5231
  /* Here we perform PR-SCTP procedures        */
5232
  /* (section 4.2)                             */
5233
  /*********************************************/
5234
  /* C1. update advancedPeerAckPoint */
5235
0
  if (SCTP_TSN_GT(cum_ack, asoc->advanced_peer_ack_point)) {
5236
0
    asoc->advanced_peer_ack_point = cum_ack;
5237
0
  }
5238
  /* C2. try to further move advancedPeerAckPoint ahead */
5239
0
  if ((asoc->prsctp_supported) && (asoc->pr_sctp_cnt > 0)) {
5240
0
    struct sctp_tmit_chunk *lchk;
5241
0
    uint32_t old_adv_peer_ack_point;
5242
5243
0
    old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
5244
0
    lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
5245
    /* C3. See if we need to send a Fwd-TSN */
5246
0
    if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cum_ack)) {
5247
      /*
5248
       * ISSUE with ECN, see FWD-TSN processing.
5249
       */
5250
0
      if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
5251
0
        sctp_misc_ints(SCTP_FWD_TSN_CHECK,
5252
0
                       0xee, cum_ack, asoc->advanced_peer_ack_point,
5253
0
                       old_adv_peer_ack_point);
5254
0
      }
5255
0
      if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) {
5256
0
        send_forward_tsn(stcb, asoc);
5257
0
      } else if (lchk) {
5258
        /* try to FR fwd-tsn's that get lost too */
5259
0
        if (lchk->rec.data.fwd_tsn_cnt >= 3) {
5260
0
          send_forward_tsn(stcb, asoc);
5261
0
        }
5262
0
      }
5263
0
    }
5264
0
    for (; lchk != NULL; lchk = TAILQ_NEXT(lchk, sctp_next)) {
5265
0
      if (lchk->whoTo != NULL) {
5266
0
        break;
5267
0
      }
5268
0
    }
5269
0
    if (lchk != NULL) {
5270
      /* Assure a timer is up */
5271
0
      sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5272
0
                       stcb->sctp_ep, stcb, lchk->whoTo);
5273
0
    }
5274
0
  }
5275
0
  if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
5276
0
    sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
5277
0
                   a_rwnd,
5278
0
                   stcb->asoc.peers_rwnd,
5279
0
                   stcb->asoc.total_flight,
5280
0
                   stcb->asoc.total_output_queue_size);
5281
0
  }
5282
0
}
5283
5284
void
5285
sctp_update_acked(struct sctp_tcb *stcb, struct sctp_shutdown_chunk *cp, int *abort_flag)
5286
706
{
5287
  /* Copy cum-ack */
5288
706
  uint32_t cum_ack, a_rwnd;
5289
5290
706
  cum_ack = ntohl(cp->cumulative_tsn_ack);
5291
  /* Arrange so a_rwnd does NOT change */
5292
706
  a_rwnd = stcb->asoc.peers_rwnd + stcb->asoc.total_flight;
5293
5294
  /* Now call the express sack handling */
5295
706
  sctp_express_handle_sack(stcb, cum_ack, a_rwnd, abort_flag, 0);
5296
706
}
5297
5298
static void
5299
sctp_kick_prsctp_reorder_queue(struct sctp_tcb *stcb,
5300
                               struct sctp_stream_in *strmin)
5301
5.53k
{
5302
5.53k
  struct sctp_queued_to_read *control, *ncontrol;
5303
5.53k
  struct sctp_association *asoc;
5304
5.53k
  uint32_t mid;
5305
5.53k
  int need_reasm_check = 0;
5306
5307
5.53k
  KASSERT(stcb != NULL, ("stcb == NULL"));
5308
5.53k
  SCTP_TCB_LOCK_ASSERT(stcb);
5309
5.53k
  SCTP_INP_READ_LOCK_ASSERT(stcb->sctp_ep);
5310
5311
5.53k
  asoc = &stcb->asoc;
5312
5.53k
  mid = strmin->last_mid_delivered;
5313
  /*
5314
   * First deliver anything prior to and including the stream no that
5315
   * came in.
5316
   */
5317
5.53k
  TAILQ_FOREACH_SAFE(control, &strmin->inqueue, next_instrm, ncontrol) {
5318
0
    if (SCTP_MID_GE(asoc->idata_supported, mid, control->mid)) {
5319
      /* this is deliverable now */
5320
0
      if (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
5321
0
        if (control->on_strm_q) {
5322
0
          if (control->on_strm_q == SCTP_ON_ORDERED) {
5323
0
            TAILQ_REMOVE(&strmin->inqueue, control, next_instrm);
5324
0
          } else if (control->on_strm_q == SCTP_ON_UNORDERED) {
5325
0
            TAILQ_REMOVE(&strmin->uno_inqueue, control, next_instrm);
5326
0
#ifdef INVARIANTS
5327
0
          } else {
5328
0
            panic("strmin: %p ctl: %p unknown %d",
5329
0
                  strmin, control, control->on_strm_q);
5330
0
#endif
5331
0
          }
5332
0
          control->on_strm_q = 0;
5333
0
        }
5334
        /* subtract pending on streams */
5335
0
        if (asoc->size_on_all_streams >= control->length) {
5336
0
          asoc->size_on_all_streams -= control->length;
5337
0
        } else {
5338
0
#ifdef INVARIANTS
5339
0
          panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
5340
#else
5341
          asoc->size_on_all_streams = 0;
5342
#endif
5343
0
        }
5344
0
        sctp_ucount_decr(asoc->cnt_on_all_streams);
5345
        /* deliver it to at least the delivery-q */
5346
0
        if (stcb->sctp_socket) {
5347
0
          sctp_mark_non_revokable(asoc, control->sinfo_tsn);
5348
0
          sctp_add_to_readq(stcb->sctp_ep, stcb, control,
5349
0
                            &stcb->sctp_socket->so_rcv, 1,
5350
0
                            SCTP_READ_LOCK_HELD, SCTP_SO_NOT_LOCKED);
5351
0
        }
5352
0
      } else {
5353
        /* Its a fragmented message */
5354
0
        if (control->first_frag_seen) {
5355
          /* Make it so this is next to deliver, we restore later */
5356
0
          strmin->last_mid_delivered = control->mid - 1;
5357
0
          need_reasm_check = 1;
5358
0
          break;
5359
0
        }
5360
0
      }
5361
0
    } else {
5362
      /* no more delivery now. */
5363
0
      break;
5364
0
    }
5365
0
  }
5366
5.53k
  if (need_reasm_check) {
5367
0
    int ret;
5368
0
    ret = sctp_deliver_reasm_check(stcb, &stcb->asoc, strmin, SCTP_READ_LOCK_HELD);
5369
0
    if (SCTP_MID_GT(asoc->idata_supported, mid, strmin->last_mid_delivered)) {
5370
      /* Restore the next to deliver unless we are ahead */
5371
0
      strmin->last_mid_delivered = mid;
5372
0
    }
5373
0
    if (ret == 0) {
5374
      /* Left the front Partial one on */
5375
0
      return;
5376
0
    }
5377
0
    need_reasm_check = 0;
5378
0
  }
5379
  /*
5380
   * now we must deliver things in queue the normal way  if any are
5381
   * now ready.
5382
   */
5383
5.53k
  mid = strmin->last_mid_delivered + 1;
5384
5.53k
  TAILQ_FOREACH_SAFE(control, &strmin->inqueue, next_instrm, ncontrol) {
5385
0
    if (SCTP_MID_EQ(asoc->idata_supported, mid, control->mid)) {
5386
0
      if (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
5387
        /* this is deliverable now */
5388
0
        if (control->on_strm_q) {
5389
0
          if (control->on_strm_q == SCTP_ON_ORDERED) {
5390
0
            TAILQ_REMOVE(&strmin->inqueue, control, next_instrm);
5391
0
          } else if (control->on_strm_q == SCTP_ON_UNORDERED) {
5392
0
            TAILQ_REMOVE(&strmin->uno_inqueue, control, next_instrm);
5393
0
#ifdef INVARIANTS
5394
0
          } else {
5395
0
            panic("strmin: %p ctl: %p unknown %d",
5396
0
                  strmin, control, control->on_strm_q);
5397
0
#endif
5398
0
          }
5399
0
          control->on_strm_q = 0;
5400
0
        }
5401
        /* subtract pending on streams */
5402
0
        if (asoc->size_on_all_streams >= control->length) {
5403
0
          asoc->size_on_all_streams -= control->length;
5404
0
        } else {
5405
0
#ifdef INVARIANTS
5406
0
          panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
5407
#else
5408
          asoc->size_on_all_streams = 0;
5409
#endif
5410
0
        }
5411
0
        sctp_ucount_decr(asoc->cnt_on_all_streams);
5412
        /* deliver it to at least the delivery-q */
5413
0
        strmin->last_mid_delivered = control->mid;
5414
0
        if (stcb->sctp_socket) {
5415
0
          sctp_mark_non_revokable(asoc, control->sinfo_tsn);
5416
0
          sctp_add_to_readq(stcb->sctp_ep, stcb, control,
5417
0
                            &stcb->sctp_socket->so_rcv, 1,
5418
0
                            SCTP_READ_LOCK_HELD, SCTP_SO_NOT_LOCKED);
5419
0
        }
5420
0
        mid = strmin->last_mid_delivered + 1;
5421
0
      } else {
5422
        /* Its a fragmented message */
5423
0
        if (control->first_frag_seen) {
5424
          /* Make it so this is next to deliver */
5425
0
          strmin->last_mid_delivered = control->mid - 1;
5426
0
          need_reasm_check = 1;
5427
0
          break;
5428
0
        }
5429
0
      }
5430
0
    } else {
5431
0
      break;
5432
0
    }
5433
0
  }
5434
5.53k
  if (need_reasm_check) {
5435
0
    (void)sctp_deliver_reasm_check(stcb, &stcb->asoc, strmin, SCTP_READ_LOCK_HELD);
5436
0
  }
5437
5.53k
}
5438
5439
static void
5440
sctp_flush_reassm_for_str_seq(struct sctp_tcb *stcb,
5441
                              struct sctp_association *asoc, struct sctp_stream_in *strm,
5442
                              struct sctp_queued_to_read *control, int ordered, uint32_t cumtsn)
5443
0
{
5444
0
  struct sctp_tmit_chunk *chk, *nchk;
5445
5446
  /*
5447
   * For now large messages held on the stream reasm that are
5448
   * complete will be tossed too. We could in theory do more
5449
   * work to spin through and stop after dumping one msg aka
5450
   * seeing the start of a new msg at the head, and call the
5451
   * delivery function... to see if it can be delivered... But
5452
   * for now we just dump everything on the queue.
5453
   */
5454
5455
0
  KASSERT(stcb != NULL, ("stcb == NULL"));
5456
0
  SCTP_TCB_LOCK_ASSERT(stcb);
5457
0
  SCTP_INP_READ_LOCK_ASSERT(stcb->sctp_ep);
5458
5459
0
  if (!asoc->idata_supported && !ordered &&
5460
0
      control->first_frag_seen &&
5461
0
      SCTP_TSN_GT(control->fsn_included, cumtsn)) {
5462
0
    return;
5463
0
  }
5464
0
  TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, nchk) {
5465
    /* Purge hanging chunks */
5466
0
    if (!asoc->idata_supported && !ordered) {
5467
0
      if (SCTP_TSN_GT(chk->rec.data.tsn, cumtsn)) {
5468
0
        break;
5469
0
      }
5470
0
    }
5471
0
    TAILQ_REMOVE(&control->reasm, chk, sctp_next);
5472
0
    if (asoc->size_on_reasm_queue >= chk->send_size) {
5473
0
      asoc->size_on_reasm_queue -= chk->send_size;
5474
0
    } else {
5475
0
#ifdef INVARIANTS
5476
0
      panic("size_on_reasm_queue = %u smaller than chunk length %u", asoc->size_on_reasm_queue, chk->send_size);
5477
#else
5478
      asoc->size_on_reasm_queue = 0;
5479
#endif
5480
0
    }
5481
0
    sctp_ucount_decr(asoc->cnt_on_reasm_queue);
5482
0
    if (chk->data) {
5483
0
      sctp_m_freem(chk->data);
5484
0
      chk->data = NULL;
5485
0
    }
5486
0
    sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
5487
0
  }
5488
0
  if (!TAILQ_EMPTY(&control->reasm)) {
5489
0
    KASSERT(!asoc->idata_supported,
5490
0
        ("Reassembly queue not empty for I-DATA"));
5491
0
    KASSERT(!ordered,
5492
0
        ("Reassembly queue not empty for ordered data"));
5493
0
    if (control->data) {
5494
0
      sctp_m_freem(control->data);
5495
0
      control->data = NULL;
5496
0
    }
5497
0
    control->fsn_included = 0xffffffff;
5498
0
    control->first_frag_seen = 0;
5499
0
    control->last_frag_seen = 0;
5500
0
    if (control->on_read_q) {
5501
      /*
5502
       * We have to purge it from there,
5503
       * hopefully this will work :-)
5504
       */
5505
0
      TAILQ_REMOVE(&stcb->sctp_ep->read_queue, control, next);
5506
0
      control->on_read_q = 0;
5507
0
    }
5508
0
    chk = TAILQ_FIRST(&control->reasm);
5509
0
    if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
5510
0
      TAILQ_REMOVE(&control->reasm, chk, sctp_next);
5511
0
      sctp_add_chk_to_control(control, strm, stcb, asoc,
5512
0
                              chk, SCTP_READ_LOCK_HELD);
5513
0
    }
5514
0
    sctp_deliver_reasm_check(stcb, asoc, strm, SCTP_READ_LOCK_HELD);
5515
0
    return;
5516
0
  }
5517
0
  if (control->on_strm_q == SCTP_ON_ORDERED) {
5518
0
    TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
5519
0
    if (asoc->size_on_all_streams >= control->length) {
5520
0
      asoc->size_on_all_streams -= control->length;
5521
0
    } else {
5522
0
#ifdef INVARIANTS
5523
0
      panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
5524
#else
5525
      asoc->size_on_all_streams = 0;
5526
#endif
5527
0
    }
5528
0
    sctp_ucount_decr(asoc->cnt_on_all_streams);
5529
0
    control->on_strm_q = 0;
5530
0
  } else if (control->on_strm_q == SCTP_ON_UNORDERED) {
5531
0
    TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
5532
0
    control->on_strm_q = 0;
5533
0
#ifdef INVARIANTS
5534
0
  } else if (control->on_strm_q) {
5535
0
    panic("strm: %p ctl: %p unknown %d",
5536
0
        strm, control, control->on_strm_q);
5537
0
#endif
5538
0
  }
5539
0
  control->on_strm_q = 0;
5540
0
  if (control->on_read_q == 0) {
5541
0
    sctp_free_remote_addr(control->whoFrom);
5542
0
    if (control->data) {
5543
0
      sctp_m_freem(control->data);
5544
0
      control->data = NULL;
5545
0
    }
5546
0
    sctp_free_a_readq(stcb, control);
5547
0
  }
5548
0
}
5549
5550
void
5551
sctp_handle_forward_tsn(struct sctp_tcb *stcb,
5552
                        struct sctp_forward_tsn_chunk *fwd,
5553
                        int *abort_flag, struct mbuf *m , int offset)
5554
2.63k
{
5555
  /* The pr-sctp fwd tsn */
5556
  /*
5557
   * here we will perform all the data receiver side steps for
5558
   * processing FwdTSN, as required in by pr-sctp draft:
5559
   *
5560
   * Assume we get FwdTSN(x):
5561
   *
5562
   * 1) update local cumTSN to x
5563
   * 2) try to further advance cumTSN to x + others we have
5564
   * 3) examine and update re-ordering queue on pr-in-streams
5565
   * 4) clean up re-assembly queue
5566
   * 5) Send a sack to report where we are.
5567
   */
5568
2.63k
  struct sctp_association *asoc;
5569
2.63k
  uint32_t new_cum_tsn, gap;
5570
2.63k
  unsigned int i, fwd_sz, m_size;
5571
2.63k
  struct sctp_stream_in *strm;
5572
2.63k
  struct sctp_queued_to_read *control, *ncontrol;
5573
5574
2.63k
  asoc = &stcb->asoc;
5575
2.63k
  if ((fwd_sz = ntohs(fwd->ch.chunk_length)) < sizeof(struct sctp_forward_tsn_chunk)) {
5576
0
    SCTPDBG(SCTP_DEBUG_INDATA1,
5577
0
      "Bad size too small/big fwd-tsn\n");
5578
0
    return;
5579
0
  }
5580
2.63k
  m_size = (stcb->asoc.mapping_array_size << 3);
5581
  /*************************************************************/
5582
  /* 1. Here we update local cumTSN and shift the bitmap array */
5583
  /*************************************************************/
5584
2.63k
  new_cum_tsn = ntohl(fwd->new_cumulative_tsn);
5585
5586
2.63k
  if (SCTP_TSN_GE(asoc->cumulative_tsn, new_cum_tsn)) {
5587
    /* Already got there ... */
5588
1.67k
    return;
5589
1.67k
  }
5590
  /*
5591
   * now we know the new TSN is more advanced, let's find the actual
5592
   * gap
5593
   */
5594
959
  SCTP_CALC_TSN_TO_GAP(gap, new_cum_tsn, asoc->mapping_array_base_tsn);
5595
959
  asoc->cumulative_tsn = new_cum_tsn;
5596
959
  if (gap >= m_size) {
5597
533
    if ((long)gap > sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv)) {
5598
70
      struct mbuf *op_err;
5599
70
      char msg[SCTP_DIAG_INFO_LEN];
5600
5601
      /*
5602
       * out of range (of single byte chunks in the rwnd I
5603
       * give out). This must be an attacker.
5604
       */
5605
70
      *abort_flag = 1;
5606
70
      SCTP_SNPRINTF(msg, sizeof(msg),
5607
70
                    "New cum ack %8.8x too high, highest TSN %8.8x",
5608
70
                    new_cum_tsn, asoc->highest_tsn_inside_map);
5609
70
      op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
5610
70
      stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_37;
5611
70
      sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, false, SCTP_SO_NOT_LOCKED);
5612
70
      return;
5613
70
    }
5614
463
    SCTP_STAT_INCR(sctps_fwdtsn_map_over);
5615
5616
463
    memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size);
5617
463
    asoc->mapping_array_base_tsn = new_cum_tsn + 1;
5618
463
    asoc->highest_tsn_inside_map = new_cum_tsn;
5619
5620
463
    memset(stcb->asoc.nr_mapping_array, 0, stcb->asoc.mapping_array_size);
5621
463
    asoc->highest_tsn_inside_nr_map = new_cum_tsn;
5622
5623
463
    if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
5624
0
      sctp_log_map(0, 3, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
5625
0
    }
5626
463
  } else {
5627
426
    SCTP_TCB_LOCK_ASSERT(stcb);
5628
10.2k
    for (i = 0; i <= gap; i++) {
5629
9.78k
      if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, i) &&
5630
9.78k
          !SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, i)) {
5631
9.75k
        SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, i);
5632
9.75k
        if (SCTP_TSN_GT(asoc->mapping_array_base_tsn + i, asoc->highest_tsn_inside_nr_map)) {
5633
9.70k
          asoc->highest_tsn_inside_nr_map = asoc->mapping_array_base_tsn + i;
5634
9.70k
        }
5635
9.75k
      }
5636
9.78k
    }
5637
426
  }
5638
  /*************************************************************/
5639
  /* 2. Clear up re-assembly queue                             */
5640
  /*************************************************************/
5641
5642
  /* This is now done as part of clearing up the stream/seq */
5643
889
  if (asoc->idata_supported == 0) {
5644
439
    uint16_t sid;
5645
5646
    /* Flush all the un-ordered data based on cum-tsn */
5647
439
    SCTP_INP_READ_LOCK(stcb->sctp_ep);
5648
84.2k
    for (sid = 0; sid < asoc->streamincnt; sid++) {
5649
83.7k
      strm = &asoc->strmin[sid];
5650
83.7k
      if (!TAILQ_EMPTY(&strm->uno_inqueue)) {
5651
0
        sctp_flush_reassm_for_str_seq(stcb, asoc, strm, TAILQ_FIRST(&strm->uno_inqueue), 0, new_cum_tsn);
5652
0
      }
5653
83.7k
    }
5654
439
    SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5655
439
  }
5656
  /*******************************************************/
5657
  /* 3. Update the PR-stream re-ordering queues and fix  */
5658
  /*    delivery issues as needed.                       */
5659
  /*******************************************************/
5660
889
  fwd_sz -= sizeof(*fwd);
5661
889
  if (m && fwd_sz) {
5662
    /* New method. */
5663
475
    unsigned int num_str;
5664
475
    uint32_t mid;
5665
475
    uint16_t sid;
5666
475
    uint16_t ordered, flags;
5667
475
    struct sctp_strseq *stseq, strseqbuf;
5668
475
    struct sctp_strseq_mid *stseq_m, strseqbuf_m;
5669
475
    offset += sizeof(*fwd);
5670
5671
475
    SCTP_INP_READ_LOCK(stcb->sctp_ep);
5672
475
    if (asoc->idata_supported) {
5673
266
      num_str = fwd_sz / sizeof(struct sctp_strseq_mid);
5674
266
    } else {
5675
209
      num_str = fwd_sz / sizeof(struct sctp_strseq);
5676
209
    }
5677
6.00k
    for (i = 0; i < num_str; i++) {
5678
5.67k
      if (asoc->idata_supported) {
5679
1.41k
        stseq_m = (struct sctp_strseq_mid *)sctp_m_getptr(m, offset,
5680
1.41k
                      sizeof(struct sctp_strseq_mid),
5681
1.41k
                      (uint8_t *)&strseqbuf_m);
5682
1.41k
        offset += sizeof(struct sctp_strseq_mid);
5683
1.41k
        if (stseq_m == NULL) {
5684
0
          break;
5685
0
        }
5686
1.41k
        sid = ntohs(stseq_m->sid);
5687
1.41k
        mid = ntohl(stseq_m->mid);
5688
1.41k
        flags = ntohs(stseq_m->flags);
5689
1.41k
        if (flags & PR_SCTP_UNORDERED_FLAG) {
5690
422
          ordered = 0;
5691
993
        } else {
5692
993
          ordered = 1;
5693
993
        }
5694
4.25k
      } else {
5695
4.25k
        stseq = (struct sctp_strseq *)sctp_m_getptr(m, offset,
5696
4.25k
                      sizeof(struct sctp_strseq),
5697
4.25k
                      (uint8_t *)&strseqbuf);
5698
4.25k
        offset += sizeof(struct sctp_strseq);
5699
4.25k
        if (stseq == NULL) {
5700
0
          break;
5701
0
        }
5702
4.25k
        sid = ntohs(stseq->sid);
5703
4.25k
        mid = (uint32_t)ntohs(stseq->ssn);
5704
4.25k
        ordered = 1;
5705
4.25k
      }
5706
      /* Convert */
5707
5708
      /* now process */
5709
5710
      /*
5711
       * Ok we now look for the stream/seq on the read queue
5712
       * where its not all delivered. If we find it we transmute the
5713
       * read entry into a PDI_ABORTED.
5714
       */
5715
5.67k
      if (sid >= asoc->streamincnt) {
5716
        /* screwed up streams, stop!  */
5717
138
        break;
5718
138
      }
5719
5.53k
      if ((asoc->str_of_pdapi == sid) &&
5720
4.22k
          (asoc->ssn_of_pdapi == mid)) {
5721
        /* If this is the one we were partially delivering
5722
         * now then we no longer are. Note this will change
5723
         * with the reassembly re-write.
5724
         */
5725
1.74k
        asoc->fragmented_delivery_inprogress = 0;
5726
1.74k
      }
5727
5.53k
      strm = &asoc->strmin[sid];
5728
5.53k
      if (ordered) {
5729
5.13k
        TAILQ_FOREACH_SAFE(control, &strm->inqueue, next_instrm, ncontrol) {
5730
0
          if (SCTP_MID_GE(asoc->idata_supported, mid, control->mid)) {
5731
0
            sctp_flush_reassm_for_str_seq(stcb, asoc, strm, control, ordered, new_cum_tsn);
5732
0
          }
5733
0
        }
5734
5.13k
      } else {
5735
399
        if (asoc->idata_supported) {
5736
399
          TAILQ_FOREACH_SAFE(control, &strm->uno_inqueue, next_instrm, ncontrol) {
5737
0
            if (SCTP_MID_GE(asoc->idata_supported, mid, control->mid)) {
5738
0
              sctp_flush_reassm_for_str_seq(stcb, asoc, strm, control, ordered, new_cum_tsn);
5739
0
            }
5740
0
          }
5741
399
        } else {
5742
0
          if (!TAILQ_EMPTY(&strm->uno_inqueue)) {
5743
0
            sctp_flush_reassm_for_str_seq(stcb, asoc, strm, TAILQ_FIRST(&strm->uno_inqueue), ordered, new_cum_tsn);
5744
0
          }
5745
0
        }
5746
399
      }
5747
5.56M
      TAILQ_FOREACH(control, &stcb->sctp_ep->read_queue, next) {
5748
5.56M
        if ((control->sinfo_stream == sid) &&
5749
4.96M
            (SCTP_MID_EQ(asoc->idata_supported, control->mid, mid))) {
5750
1.29k
          control->pdapi_aborted = 1;
5751
1.29k
          control->end_added = 1;
5752
1.29k
          if (control->on_strm_q == SCTP_ON_ORDERED) {
5753
0
            TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
5754
0
            if (asoc->size_on_all_streams >= control->length) {
5755
0
              asoc->size_on_all_streams -= control->length;
5756
0
            } else {
5757
0
#ifdef INVARIANTS
5758
0
              panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
5759
#else
5760
              asoc->size_on_all_streams = 0;
5761
#endif
5762
0
            }
5763
0
            sctp_ucount_decr(asoc->cnt_on_all_streams);
5764
1.29k
          } else if (control->on_strm_q == SCTP_ON_UNORDERED) {
5765
0
            TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
5766
0
#ifdef INVARIANTS
5767
1.29k
          } else if (control->on_strm_q) {
5768
0
            panic("strm: %p ctl: %p unknown %d",
5769
0
                  strm, control, control->on_strm_q);
5770
0
#endif
5771
0
          }
5772
1.29k
          control->on_strm_q = 0;
5773
1.29k
          sctp_ulp_notify(SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION,
5774
1.29k
                          stcb,
5775
1.29k
                          SCTP_PARTIAL_DELIVERY_ABORTED,
5776
1.29k
                          (void *)control,
5777
1.29k
                          SCTP_SO_NOT_LOCKED);
5778
1.29k
          break;
5779
5.56M
        } else if ((control->sinfo_stream == sid) &&
5780
4.96M
             SCTP_MID_GT(asoc->idata_supported, control->mid, mid)) {
5781
          /* We are past our victim SSN */
5782
144
          break;
5783
144
        }
5784
5.56M
      }
5785
5.53k
      if (SCTP_MID_GT(asoc->idata_supported, mid, strm->last_mid_delivered)) {
5786
        /* Update the sequence number */
5787
669
        strm->last_mid_delivered = mid;
5788
669
      }
5789
      /* now kick the stream the new way */
5790
      /*sa_ignore NO_NULL_CHK*/
5791
5.53k
      sctp_kick_prsctp_reorder_queue(stcb, strm);
5792
5.53k
    }
5793
475
    SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5794
475
  }
5795
  /*
5796
   * Now slide thing forward.
5797
   */
5798
889
  sctp_slide_mapping_arrays(stcb);
5799
889
}