Coverage Report

Created: 2026-01-17 07:01

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
/src/usrsctp/usrsctplib/netinet/sctp_indata.c
Line
Count
Source
1
/*-
2
 * SPDX-License-Identifier: BSD-3-Clause
3
 *
4
 * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
5
 * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
6
 * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
7
 *
8
 * Redistribution and use in source and binary forms, with or without
9
 * modification, are permitted provided that the following conditions are met:
10
 *
11
 * a) Redistributions of source code must retain the above copyright notice,
12
 *    this list of conditions and the following disclaimer.
13
 *
14
 * b) Redistributions in binary form must reproduce the above copyright
15
 *    notice, this list of conditions and the following disclaimer in
16
 *    the documentation and/or other materials provided with the distribution.
17
 *
18
 * c) Neither the name of Cisco Systems, Inc. nor the names of its
19
 *    contributors may be used to endorse or promote products derived
20
 *    from this software without specific prior written permission.
21
 *
22
 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23
 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
24
 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25
 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26
 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27
 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28
 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29
 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30
 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31
 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
32
 * THE POSSIBILITY OF SUCH DAMAGE.
33
 */
34
35
#include <netinet/sctp_os.h>
36
#if defined(__FreeBSD__) && !defined(__Userspace__)
37
#include <sys/proc.h>
38
#endif
39
#include <netinet/sctp_var.h>
40
#include <netinet/sctp_sysctl.h>
41
#include <netinet/sctp_header.h>
42
#include <netinet/sctp_pcb.h>
43
#include <netinet/sctputil.h>
44
#include <netinet/sctp_output.h>
45
#include <netinet/sctp_uio.h>
46
#include <netinet/sctp_auth.h>
47
#include <netinet/sctp_timer.h>
48
#include <netinet/sctp_asconf.h>
49
#include <netinet/sctp_indata.h>
50
#include <netinet/sctp_bsd_addr.h>
51
#include <netinet/sctp_input.h>
52
#include <netinet/sctp_crc32.h>
53
#if defined(__FreeBSD__) && !defined(__Userspace__)
54
#include <netinet/sctp_lock_bsd.h>
55
#endif
56
#if defined(_WIN32) && defined(__MINGW32__)
57
#include <minmax.h>
58
#endif
59
/*
60
 * NOTES: On the outbound side of things I need to check the sack timer to
61
 * see if I should generate a sack into the chunk queue (if I have data to
62
 * send that is and will be sending it .. for bundling.
63
 *
64
 * The callback in sctp_usrreq.c will get called when the socket is read from.
65
 * This will cause sctp_service_queues() to get called on the top entry in
66
 * the list.
67
 */
68
static uint32_t
69
sctp_add_chk_to_control(struct sctp_queued_to_read *control,
70
      struct sctp_stream_in *strm,
71
      struct sctp_tcb *stcb,
72
      struct sctp_association *asoc,
73
      struct sctp_tmit_chunk *chk, int hold_rlock);
74
75
void
76
sctp_set_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
77
15.6k
{
78
15.6k
  asoc->my_rwnd = sctp_calc_rwnd(stcb, asoc);
79
15.6k
}
80
81
/* Calculate what the rwnd would be */
82
uint32_t
83
sctp_calc_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
84
22.8k
{
85
22.8k
  uint32_t calc = 0;
86
87
  /*
88
   * This is really set wrong with respect to a 1-2-m socket. Since
89
   * the sb_cc is the count that everyone as put up. When we re-write
90
   * sctp_soreceive then we will fix this so that ONLY this
91
   * associations data is taken into account.
92
   */
93
22.8k
  if (stcb->sctp_socket == NULL) {
94
0
    return (calc);
95
0
  }
96
97
22.8k
  KASSERT(asoc->cnt_on_reasm_queue > 0 || asoc->size_on_reasm_queue == 0,
98
22.8k
          ("size_on_reasm_queue is %u", asoc->size_on_reasm_queue));
99
22.8k
  KASSERT(asoc->cnt_on_all_streams > 0 || asoc->size_on_all_streams == 0,
100
22.8k
          ("size_on_all_streams is %u", asoc->size_on_all_streams));
101
22.8k
  if (stcb->asoc.sb_cc == 0 &&
102
9.61k
      asoc->cnt_on_reasm_queue == 0 &&
103
9.26k
      asoc->cnt_on_all_streams == 0) {
104
    /* Full rwnd granted */
105
9.14k
    calc = max(SCTP_SB_LIMIT_RCV(stcb->sctp_socket), SCTP_MINIMAL_RWND);
106
9.14k
    return (calc);
107
9.14k
  }
108
  /* get actual space */
109
13.7k
  calc = (uint32_t) sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv);
110
  /*
111
   * take out what has NOT been put on socket queue and we yet hold
112
   * for putting up.
113
   */
114
13.7k
  calc = sctp_sbspace_sub(calc, (uint32_t)(asoc->size_on_reasm_queue +
115
13.7k
                                           asoc->cnt_on_reasm_queue * MSIZE));
116
13.7k
  calc = sctp_sbspace_sub(calc, (uint32_t)(asoc->size_on_all_streams +
117
13.7k
                                           asoc->cnt_on_all_streams * MSIZE));
118
13.7k
  if (calc == 0) {
119
    /* out of space */
120
636
    return (calc);
121
636
  }
122
123
  /* what is the overhead of all these rwnd's */
124
13.0k
  calc = sctp_sbspace_sub(calc, stcb->asoc.my_rwnd_control_len);
125
  /* If the window gets too small due to ctrl-stuff, reduce it
126
   * to 1, even it is 0. SWS engaged
127
   */
128
13.0k
  if (calc < stcb->asoc.my_rwnd_control_len) {
129
5.80k
    calc = 1;
130
5.80k
  }
131
13.0k
  return (calc);
132
13.7k
}
133
134
/*
135
 * Build out our readq entry based on the incoming packet.
136
 */
137
struct sctp_queued_to_read *
138
sctp_build_readq_entry(struct sctp_tcb *stcb,
139
    struct sctp_nets *net,
140
    uint32_t tsn, uint32_t ppid,
141
    uint32_t context, uint16_t sid,
142
    uint32_t mid, uint8_t flags,
143
    struct mbuf *dm)
144
558k
{
145
558k
  struct sctp_queued_to_read *read_queue_e = NULL;
146
147
558k
  sctp_alloc_a_readq(stcb, read_queue_e);
148
558k
  if (read_queue_e == NULL) {
149
0
    goto failed_build;
150
0
  }
151
558k
  memset(read_queue_e, 0, sizeof(struct sctp_queued_to_read));
152
558k
  read_queue_e->sinfo_stream = sid;
153
558k
  read_queue_e->sinfo_flags = (flags << 8);
154
558k
  read_queue_e->sinfo_ppid = ppid;
155
558k
  read_queue_e->sinfo_context = context;
156
558k
  read_queue_e->sinfo_tsn = tsn;
157
558k
  read_queue_e->sinfo_cumtsn = tsn;
158
558k
  read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb);
159
558k
  read_queue_e->mid = mid;
160
558k
  read_queue_e->top_fsn = read_queue_e->fsn_included = 0xffffffff;
161
558k
  TAILQ_INIT(&read_queue_e->reasm);
162
558k
  read_queue_e->whoFrom = net;
163
558k
  atomic_add_int(&net->ref_count, 1);
164
558k
  read_queue_e->data = dm;
165
558k
  read_queue_e->stcb = stcb;
166
558k
  read_queue_e->port_from = stcb->rport;
167
558k
  if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
168
0
    read_queue_e->do_not_ref_stcb = 1;
169
0
  }
170
558k
failed_build:
171
558k
  return (read_queue_e);
172
558k
}
173
174
struct mbuf *
175
sctp_build_ctl_nchunk(struct sctp_inpcb *inp, struct sctp_sndrcvinfo *sinfo)
176
0
{
177
0
  struct sctp_extrcvinfo *seinfo;
178
0
  struct sctp_sndrcvinfo *outinfo;
179
0
  struct sctp_rcvinfo *rcvinfo;
180
0
  struct sctp_nxtinfo *nxtinfo;
181
#if defined(_WIN32)
182
  WSACMSGHDR *cmh;
183
#else
184
0
  struct cmsghdr *cmh;
185
0
#endif
186
0
  struct mbuf *ret;
187
0
  int len;
188
0
  int use_extended;
189
0
  int provide_nxt;
190
191
0
  if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) &&
192
0
      sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) &&
193
0
      sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) {
194
    /* user does not want any ancillary data */
195
0
    return (NULL);
196
0
  }
197
198
0
  len = 0;
199
0
  if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) {
200
0
    len += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
201
0
  }
202
0
  seinfo = (struct sctp_extrcvinfo *)sinfo;
203
0
  if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO) &&
204
0
      (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_AVAIL)) {
205
0
    provide_nxt = 1;
206
0
    len += CMSG_SPACE(sizeof(struct sctp_nxtinfo));
207
0
  } else {
208
0
    provide_nxt = 0;
209
0
  }
210
0
  if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
211
0
    if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) {
212
0
      use_extended = 1;
213
0
      len += CMSG_SPACE(sizeof(struct sctp_extrcvinfo));
214
0
    } else {
215
0
      use_extended = 0;
216
0
      len += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo));
217
0
    }
218
0
  } else {
219
0
    use_extended = 0;
220
0
  }
221
222
0
  ret = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
223
0
  if (ret == NULL) {
224
    /* No space */
225
0
    return (ret);
226
0
  }
227
0
  SCTP_BUF_LEN(ret) = 0;
228
229
  /* We need a CMSG header followed by the struct */
230
#if defined(_WIN32)
231
  cmh = mtod(ret, WSACMSGHDR *);
232
#else
233
0
  cmh = mtod(ret, struct cmsghdr *);
234
0
#endif
235
  /*
236
   * Make sure that there is no un-initialized padding between
237
   * the cmsg header and cmsg data and after the cmsg data.
238
   */
239
0
  memset(cmh, 0, len);
240
0
  if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) {
241
0
    cmh->cmsg_level = IPPROTO_SCTP;
242
0
    cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_rcvinfo));
243
0
    cmh->cmsg_type = SCTP_RCVINFO;
244
0
    rcvinfo = (struct sctp_rcvinfo *)CMSG_DATA(cmh);
245
0
    rcvinfo->rcv_sid = sinfo->sinfo_stream;
246
0
    rcvinfo->rcv_ssn = sinfo->sinfo_ssn;
247
0
    rcvinfo->rcv_flags = sinfo->sinfo_flags;
248
0
    rcvinfo->rcv_ppid = sinfo->sinfo_ppid;
249
0
    rcvinfo->rcv_tsn = sinfo->sinfo_tsn;
250
0
    rcvinfo->rcv_cumtsn = sinfo->sinfo_cumtsn;
251
0
    rcvinfo->rcv_context = sinfo->sinfo_context;
252
0
    rcvinfo->rcv_assoc_id = sinfo->sinfo_assoc_id;
253
#if defined(_WIN32)
254
    cmh = (WSACMSGHDR *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_rcvinfo)));
255
#else
256
0
    cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_rcvinfo)));
257
0
#endif
258
0
    SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
259
0
  }
260
0
  if (provide_nxt) {
261
0
    cmh->cmsg_level = IPPROTO_SCTP;
262
0
    cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_nxtinfo));
263
0
    cmh->cmsg_type = SCTP_NXTINFO;
264
0
    nxtinfo = (struct sctp_nxtinfo *)CMSG_DATA(cmh);
265
0
    nxtinfo->nxt_sid = seinfo->serinfo_next_stream;
266
0
    nxtinfo->nxt_flags = 0;
267
0
    if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_IS_UNORDERED) {
268
0
      nxtinfo->nxt_flags |= SCTP_UNORDERED;
269
0
    }
270
0
    if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_IS_NOTIFICATION) {
271
0
      nxtinfo->nxt_flags |= SCTP_NOTIFICATION;
272
0
    }
273
0
    if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_ISCOMPLETE) {
274
0
      nxtinfo->nxt_flags |= SCTP_COMPLETE;
275
0
    }
276
0
    nxtinfo->nxt_ppid = seinfo->serinfo_next_ppid;
277
0
    nxtinfo->nxt_length = seinfo->serinfo_next_length;
278
0
    nxtinfo->nxt_assoc_id = seinfo->serinfo_next_aid;
279
#if defined(_WIN32)
280
    cmh = (WSACMSGHDR *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_nxtinfo)));
281
#else
282
0
    cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_nxtinfo)));
283
0
#endif
284
0
    SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_nxtinfo));
285
0
  }
286
0
  if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
287
0
    cmh->cmsg_level = IPPROTO_SCTP;
288
0
    outinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmh);
289
0
    if (use_extended) {
290
0
      cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_extrcvinfo));
291
0
      cmh->cmsg_type = SCTP_EXTRCV;
292
0
      memcpy(outinfo, sinfo, sizeof(struct sctp_extrcvinfo));
293
0
      SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_extrcvinfo));
294
0
    } else {
295
0
      cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
296
0
      cmh->cmsg_type = SCTP_SNDRCV;
297
0
      *outinfo = *sinfo;
298
0
      SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo));
299
0
    }
300
0
  }
301
0
  return (ret);
302
0
}
303
304
static void
305
sctp_mark_non_revokable(struct sctp_association *asoc, uint32_t tsn)
306
2.02k
{
307
2.02k
  uint32_t gap, i;
308
2.02k
  int in_r, in_nr;
309
310
2.02k
  if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
311
0
    return;
312
0
  }
313
2.02k
  if (SCTP_TSN_GE(asoc->cumulative_tsn, tsn)) {
314
    /*
315
     * This tsn is behind the cum ack and thus we don't
316
     * need to worry about it being moved from one to the other.
317
     */
318
13
    return;
319
13
  }
320
2.00k
  SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
321
2.00k
  in_r = SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap);
322
2.00k
  in_nr = SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap);
323
2.00k
  KASSERT(in_r || in_nr, ("%s: Things are really messed up now", __func__));
324
2.00k
  if (!in_nr) {
325
1.96k
    SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
326
1.96k
    if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
327
1.23k
      asoc->highest_tsn_inside_nr_map = tsn;
328
1.23k
    }
329
1.96k
  }
330
2.00k
  if (in_r) {
331
1.96k
    SCTP_UNSET_TSN_PRESENT(asoc->mapping_array, gap);
332
1.96k
    if (tsn == asoc->highest_tsn_inside_map) {
333
      /* We must back down to see what the new highest is. */
334
1.38M
      for (i = tsn - 1; SCTP_TSN_GE(i, asoc->mapping_array_base_tsn); i--) {
335
1.38M
        SCTP_CALC_TSN_TO_GAP(gap, i, asoc->mapping_array_base_tsn);
336
1.38M
        if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) {
337
171
          asoc->highest_tsn_inside_map = i;
338
171
          break;
339
171
        }
340
1.38M
      }
341
1.06k
      if (!SCTP_TSN_GE(i, asoc->mapping_array_base_tsn)) {
342
889
        asoc->highest_tsn_inside_map = asoc->mapping_array_base_tsn - 1;
343
889
      }
344
1.06k
    }
345
1.96k
  }
346
2.00k
}
347
348
static int
349
sctp_place_control_in_stream(struct sctp_stream_in *strm,
350
           struct sctp_association *asoc,
351
           struct sctp_queued_to_read *control)
352
3.28k
{
353
3.28k
  struct sctp_queued_to_read *at;
354
3.28k
  struct sctp_readhead *q;
355
3.28k
  uint8_t flags, unordered;
356
357
3.28k
  flags = (control->sinfo_flags >> 8);
358
3.28k
  unordered = flags & SCTP_DATA_UNORDERED;
359
3.28k
  if (unordered) {
360
1.25k
    q = &strm->uno_inqueue;
361
1.25k
    if (asoc->idata_supported == 0) {
362
620
      if (!TAILQ_EMPTY(q)) {
363
        /* Only one stream can be here in old style  -- abort */
364
4
        return (-1);
365
4
      }
366
620
      TAILQ_INSERT_TAIL(q, control, next_instrm);
367
616
      control->on_strm_q = SCTP_ON_UNORDERED;
368
616
      return (0);
369
620
    }
370
2.02k
  } else {
371
2.02k
    q = &strm->inqueue;
372
2.02k
  }
373
2.66k
  if ((flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
374
639
    control->end_added = 1;
375
639
    control->first_frag_seen = 1;
376
639
    control->last_frag_seen = 1;
377
639
  }
378
2.66k
  if (TAILQ_EMPTY(q)) {
379
    /* Empty queue */
380
1.58k
    TAILQ_INSERT_HEAD(q, control, next_instrm);
381
1.58k
    if (unordered) {
382
255
      control->on_strm_q = SCTP_ON_UNORDERED;
383
1.32k
    } else {
384
1.32k
      control->on_strm_q = SCTP_ON_ORDERED;
385
1.32k
    }
386
1.58k
    return (0);
387
1.58k
  } else {
388
2.78k
    TAILQ_FOREACH(at, q, next_instrm) {
389
2.78k
      if (SCTP_MID_GT(asoc->idata_supported, at->mid, control->mid)) {
390
        /*
391
         * one in queue is bigger than the
392
         * new one, insert before this one
393
         */
394
721
        TAILQ_INSERT_BEFORE(at, control, next_instrm);
395
721
        if (unordered) {
396
311
          control->on_strm_q = SCTP_ON_UNORDERED;
397
410
        } else {
398
410
          control->on_strm_q = SCTP_ON_ORDERED;
399
410
        }
400
721
        break;
401
2.06k
      } else if (SCTP_MID_EQ(asoc->idata_supported, at->mid, control->mid)) {
402
        /*
403
         * Gak, He sent me a duplicate msg
404
         * id number?? return -1 to abort.
405
         */
406
3
        return (-1);
407
2.05k
      } else {
408
2.05k
        if (TAILQ_NEXT(at, next_instrm) == NULL) {
409
          /*
410
           * We are at the end, insert
411
           * it after this one
412
           */
413
353
          if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
414
0
            sctp_log_strm_del(control, at,
415
0
                  SCTP_STR_LOG_FROM_INSERT_TL);
416
0
          }
417
353
          TAILQ_INSERT_AFTER(q, at, control, next_instrm);
418
353
          if (unordered) {
419
72
            control->on_strm_q = SCTP_ON_UNORDERED;
420
281
          } else {
421
281
            control->on_strm_q = SCTP_ON_ORDERED;
422
281
          }
423
353
          break;
424
353
        }
425
2.05k
      }
426
2.78k
    }
427
1.07k
  }
428
1.07k
  return (0);
429
2.66k
}
430
431
static void
432
sctp_abort_in_reasm(struct sctp_tcb *stcb,
433
                    struct sctp_queued_to_read *control,
434
                    struct sctp_tmit_chunk *chk,
435
                    int *abort_flag, int opspot)
436
261
{
437
261
  char msg[SCTP_DIAG_INFO_LEN];
438
261
  struct mbuf *oper;
439
440
261
  if (stcb->asoc.idata_supported) {
441
223
    SCTP_SNPRINTF(msg, sizeof(msg),
442
223
                  "Reass %x,CF:%x,TSN=%8.8x,SID=%4.4x,FSN=%8.8x,MID:%8.8x",
443
223
                  opspot,
444
223
                  control->fsn_included,
445
223
                  chk->rec.data.tsn,
446
223
                  chk->rec.data.sid,
447
223
                  chk->rec.data.fsn, chk->rec.data.mid);
448
223
  } else {
449
38
    SCTP_SNPRINTF(msg, sizeof(msg),
450
38
                  "Reass %x,CI:%x,TSN=%8.8x,SID=%4.4x,FSN=%4.4x,SSN:%4.4x",
451
38
                  opspot,
452
38
                  control->fsn_included,
453
38
                  chk->rec.data.tsn,
454
38
                  chk->rec.data.sid,
455
38
                  chk->rec.data.fsn,
456
38
                  (uint16_t)chk->rec.data.mid);
457
38
  }
458
261
  oper = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
459
261
  sctp_m_freem(chk->data);
460
261
  chk->data = NULL;
461
261
  sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
462
261
  stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_1;
463
261
  sctp_abort_an_association(stcb->sctp_ep, stcb, oper, false, SCTP_SO_NOT_LOCKED);
464
261
  *abort_flag = 1;
465
261
}
466
467
static void
468
sctp_clean_up_control(struct sctp_tcb *stcb, struct sctp_queued_to_read *control)
469
7
{
470
  /*
471
   * The control could not be placed and must be cleaned.
472
   */
473
7
  struct sctp_tmit_chunk *chk, *nchk;
474
7
  TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, nchk) {
475
0
    TAILQ_REMOVE(&control->reasm, chk, sctp_next);
476
0
    if (chk->data)
477
0
      sctp_m_freem(chk->data);
478
0
    chk->data = NULL;
479
0
    sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
480
0
  }
481
7
  sctp_free_remote_addr(control->whoFrom);
482
7
  if (control->data) {
483
7
    sctp_m_freem(control->data);
484
7
    control->data = NULL;
485
7
  }
486
7
  sctp_free_a_readq(stcb, control);
487
7
}
488
489
/*
490
 * Queue the chunk either right into the socket buffer if it is the next one
491
 * to go OR put it in the correct place in the delivery queue.  If we do
492
 * append to the so_buf, keep doing so until we are out of order as
493
 * long as the control's entered are non-fragmented.
494
 */
495
static void
496
sctp_queue_data_to_stream(struct sctp_tcb *stcb,
497
    struct sctp_association *asoc,
498
    struct sctp_queued_to_read *control, int *abort_flag, int *need_reasm)
499
886
{
500
  /*
501
   * FIX-ME maybe? What happens when the ssn wraps? If we are getting
502
   * all the data in one stream this could happen quite rapidly. One
503
   * could use the TSN to keep track of things, but this scheme breaks
504
   * down in the other type of stream usage that could occur. Send a
505
   * single msg to stream 0, send 4Billion messages to stream 1, now
506
   * send a message to stream 0. You have a situation where the TSN
507
   * has wrapped but not in the stream. Is this worth worrying about
508
   * or should we just change our queue sort at the bottom to be by
509
   * TSN.
510
   *
511
   * Could it also be legal for a peer to send ssn 1 with TSN 2 and ssn 2
512
   * with TSN 1? If the peer is doing some sort of funky TSN/SSN
513
   * assignment this could happen... and I don't see how this would be
514
   * a violation. So for now I am undecided an will leave the sort by
515
   * SSN alone. Maybe a hybrid approach is the answer
516
   *
517
   */
518
886
  struct sctp_queued_to_read *at;
519
886
  int queue_needed;
520
886
  uint32_t nxt_todel;
521
886
  struct mbuf *op_err;
522
886
  struct sctp_stream_in *strm;
523
886
  char msg[SCTP_DIAG_INFO_LEN];
524
525
886
  strm = &asoc->strmin[control->sinfo_stream];
526
886
  if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
527
0
    sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INTO_STRD);
528
0
  }
529
886
  if (SCTP_MID_GT((asoc->idata_supported), strm->last_mid_delivered, control->mid)) {
530
    /* The incoming sseq is behind where we last delivered? */
531
74
    SCTPDBG(SCTP_DEBUG_INDATA1, "Duplicate S-SEQ: %u delivered: %u from peer, Abort association\n",
532
74
      strm->last_mid_delivered, control->mid);
533
    /*
534
     * throw it in the stream so it gets cleaned up in
535
     * association destruction
536
     */
537
74
    TAILQ_INSERT_HEAD(&strm->inqueue, control, next_instrm);
538
74
    if (asoc->idata_supported) {
539
26
      SCTP_SNPRINTF(msg, sizeof(msg), "Delivered MID=%8.8x, got TSN=%8.8x, SID=%4.4x, MID=%8.8x",
540
26
                    strm->last_mid_delivered, control->sinfo_tsn,
541
26
                    control->sinfo_stream, control->mid);
542
48
    } else {
543
48
      SCTP_SNPRINTF(msg, sizeof(msg), "Delivered SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
544
48
                    (uint16_t)strm->last_mid_delivered,
545
48
                    control->sinfo_tsn,
546
48
                    control->sinfo_stream,
547
48
                    (uint16_t)control->mid);
548
48
    }
549
74
    op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
550
74
    stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_2;
551
74
    sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, false, SCTP_SO_NOT_LOCKED);
552
74
    *abort_flag = 1;
553
74
    return;
554
74
  }
555
812
  queue_needed = 1;
556
812
  asoc->size_on_all_streams += control->length;
557
812
  sctp_ucount_incr(asoc->cnt_on_all_streams);
558
812
  nxt_todel = strm->last_mid_delivered + 1;
559
812
  if (SCTP_MID_EQ(asoc->idata_supported, nxt_todel, control->mid)) {
560
#if defined(__APPLE__) && !defined(__Userspace__)
561
    struct socket *so;
562
563
    so = SCTP_INP_SO(stcb->sctp_ep);
564
    atomic_add_int(&stcb->asoc.refcnt, 1);
565
    SCTP_TCB_UNLOCK(stcb);
566
    SCTP_SOCKET_LOCK(so, 1);
567
    SCTP_TCB_LOCK(stcb);
568
    atomic_subtract_int(&stcb->asoc.refcnt, 1);
569
    if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
570
      SCTP_SOCKET_UNLOCK(so, 1);
571
      return;
572
    }
573
#endif
574
    /* can be delivered right away? */
575
152
    if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
576
0
      sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_IMMED_DEL);
577
0
    }
578
    /* EY it wont be queued if it could be delivered directly */
579
152
    queue_needed = 0;
580
152
    if (asoc->size_on_all_streams >= control->length) {
581
152
      asoc->size_on_all_streams -= control->length;
582
152
    } else {
583
0
#ifdef INVARIANTS
584
0
      panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
585
#else
586
      asoc->size_on_all_streams = 0;
587
#endif
588
0
    }
589
152
    sctp_ucount_decr(asoc->cnt_on_all_streams);
590
152
    strm->last_mid_delivered++;
591
152
    sctp_mark_non_revokable(asoc, control->sinfo_tsn);
592
152
    sctp_add_to_readq(stcb->sctp_ep, stcb,
593
152
                      control,
594
152
                      &stcb->sctp_socket->so_rcv, 1,
595
152
                      SCTP_READ_LOCK_NOT_HELD, SCTP_SO_LOCKED);
596
152
    TAILQ_FOREACH_SAFE(control, &strm->inqueue, next_instrm, at) {
597
      /* all delivered */
598
63
      nxt_todel = strm->last_mid_delivered + 1;
599
63
      if (SCTP_MID_EQ(asoc->idata_supported, nxt_todel, control->mid) &&
600
22
          (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG)) {
601
14
        if (control->on_strm_q == SCTP_ON_ORDERED) {
602
14
          TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
603
14
          if (asoc->size_on_all_streams >= control->length) {
604
14
            asoc->size_on_all_streams -= control->length;
605
14
          } else {
606
0
#ifdef INVARIANTS
607
0
            panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
608
#else
609
            asoc->size_on_all_streams = 0;
610
#endif
611
0
          }
612
14
          sctp_ucount_decr(asoc->cnt_on_all_streams);
613
14
#ifdef INVARIANTS
614
14
        } else {
615
0
          panic("Huh control: %p is on_strm_q: %d",
616
0
                control, control->on_strm_q);
617
0
#endif
618
0
        }
619
14
        control->on_strm_q = 0;
620
14
        strm->last_mid_delivered++;
621
        /*
622
         * We ignore the return of deliver_data here
623
         * since we always can hold the chunk on the
624
         * d-queue. And we have a finite number that
625
         * can be delivered from the strq.
626
         */
627
14
        if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
628
0
          sctp_log_strm_del(control, NULL,
629
0
                SCTP_STR_LOG_FROM_IMMED_DEL);
630
0
        }
631
14
        sctp_mark_non_revokable(asoc, control->sinfo_tsn);
632
14
        sctp_add_to_readq(stcb->sctp_ep, stcb,
633
14
                          control,
634
14
                          &stcb->sctp_socket->so_rcv, 1,
635
14
                          SCTP_READ_LOCK_NOT_HELD,
636
14
                          SCTP_SO_LOCKED);
637
14
        continue;
638
49
      } else if (SCTP_MID_EQ(asoc->idata_supported, nxt_todel, control->mid)) {
639
8
        *need_reasm = 1;
640
8
      }
641
49
      break;
642
63
    }
643
#if defined(__APPLE__) && !defined(__Userspace__)
644
    SCTP_SOCKET_UNLOCK(so, 1);
645
#endif
646
152
  }
647
812
  if (queue_needed) {
648
    /*
649
     * Ok, we did not deliver this guy, find the correct place
650
     * to put it on the queue.
651
     */
652
660
    if (sctp_place_control_in_stream(strm, asoc, control)) {
653
7
      SCTP_SNPRINTF(msg, sizeof(msg),
654
7
                    "Queue to str MID: %u duplicate", control->mid);
655
7
      sctp_clean_up_control(stcb, control);
656
7
      op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
657
7
      stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_3;
658
7
      sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, false, SCTP_SO_NOT_LOCKED);
659
7
      *abort_flag = 1;
660
7
    }
661
660
  }
662
812
}
663
664
static void
665
sctp_setup_tail_pointer(struct sctp_queued_to_read *control)
666
1.56k
{
667
1.56k
  struct mbuf *m, *prev = NULL;
668
1.56k
  struct sctp_tcb *stcb;
669
670
1.56k
  stcb = control->stcb;
671
1.56k
  control->held_length = 0;
672
1.56k
  control->length = 0;
673
1.56k
  m = control->data;
674
3.76k
  while (m) {
675
2.19k
    if (SCTP_BUF_LEN(m) == 0) {
676
      /* Skip mbufs with NO length */
677
408
      if (prev == NULL) {
678
        /* First one */
679
408
        control->data = sctp_m_free(m);
680
408
        m = control->data;
681
408
      } else {
682
0
        SCTP_BUF_NEXT(prev) = sctp_m_free(m);
683
0
        m = SCTP_BUF_NEXT(prev);
684
0
      }
685
408
      if (m == NULL) {
686
0
        control->tail_mbuf = prev;
687
0
      }
688
408
      continue;
689
408
    }
690
1.79k
    prev = m;
691
1.79k
    atomic_add_int(&control->length, SCTP_BUF_LEN(m));
692
1.79k
    if (control->on_read_q) {
693
      /*
694
       * On read queue so we must increment the
695
       * SB stuff, we assume caller has done any locks of SB.
696
       */
697
0
      sctp_sballoc(stcb, &stcb->sctp_socket->so_rcv, m);
698
0
    }
699
1.79k
    m = SCTP_BUF_NEXT(m);
700
1.79k
  }
701
1.56k
  if (prev) {
702
1.56k
    control->tail_mbuf = prev;
703
1.56k
  }
704
1.56k
}
705
706
static void
707
sctp_add_to_tail_pointer(struct sctp_queued_to_read *control, struct mbuf *m, uint32_t *added)
708
365
{
709
365
  struct mbuf *prev=NULL;
710
365
  struct sctp_tcb *stcb;
711
712
365
  stcb = control->stcb;
713
365
  if (stcb == NULL) {
714
0
#ifdef INVARIANTS
715
0
    panic("Control broken");
716
#else
717
    return;
718
#endif
719
0
  }
720
365
  if (control->tail_mbuf == NULL) {
721
    /* TSNH */
722
0
    sctp_m_freem(control->data);
723
0
    control->data = m;
724
0
    sctp_setup_tail_pointer(control);
725
0
    return;
726
0
  }
727
365
  control->tail_mbuf->m_next = m;
728
883
  while (m) {
729
518
    if (SCTP_BUF_LEN(m) == 0) {
730
      /* Skip mbufs with NO length */
731
133
      if (prev == NULL) {
732
        /* First one */
733
133
        control->tail_mbuf->m_next = sctp_m_free(m);
734
133
        m = control->tail_mbuf->m_next;
735
133
      } else {
736
0
        SCTP_BUF_NEXT(prev) = sctp_m_free(m);
737
0
        m = SCTP_BUF_NEXT(prev);
738
0
      }
739
133
      if (m == NULL) {
740
0
        control->tail_mbuf = prev;
741
0
      }
742
133
      continue;
743
133
    }
744
385
    prev = m;
745
385
    if (control->on_read_q) {
746
      /*
747
       * On read queue so we must increment the
748
       * SB stuff, we assume caller has done any locks of SB.
749
       */
750
0
      sctp_sballoc(stcb, &stcb->sctp_socket->so_rcv, m);
751
0
    }
752
385
    *added += SCTP_BUF_LEN(m);
753
385
    atomic_add_int(&control->length, SCTP_BUF_LEN(m));
754
385
    m = SCTP_BUF_NEXT(m);
755
385
  }
756
365
  if (prev) {
757
365
    control->tail_mbuf = prev;
758
365
  }
759
365
}
760
761
static void
762
sctp_build_readq_entry_from_ctl(struct sctp_queued_to_read *nc, struct sctp_queued_to_read *control)
763
122
{
764
122
  memset(nc, 0, sizeof(struct sctp_queued_to_read));
765
122
  nc->sinfo_stream = control->sinfo_stream;
766
122
  nc->mid = control->mid;
767
122
  TAILQ_INIT(&nc->reasm);
768
122
  nc->top_fsn = control->top_fsn;
769
122
  nc->mid = control->mid;
770
122
  nc->sinfo_flags = control->sinfo_flags;
771
122
  nc->sinfo_ppid = control->sinfo_ppid;
772
122
  nc->sinfo_context = control->sinfo_context;
773
122
  nc->fsn_included = 0xffffffff;
774
122
  nc->sinfo_tsn = control->sinfo_tsn;
775
122
  nc->sinfo_cumtsn = control->sinfo_cumtsn;
776
122
  nc->sinfo_assoc_id = control->sinfo_assoc_id;
777
122
  nc->whoFrom = control->whoFrom;
778
122
  atomic_add_int(&nc->whoFrom->ref_count, 1);
779
122
  nc->stcb = control->stcb;
780
122
  nc->port_from = control->port_from;
781
122
  nc->do_not_ref_stcb = control->do_not_ref_stcb;
782
122
}
783
784
static int
785
sctp_handle_old_unordered_data(struct sctp_tcb *stcb,
786
                               struct sctp_association *asoc,
787
                               struct sctp_stream_in *strm,
788
                               struct sctp_queued_to_read *control,
789
                               uint32_t pd_point,
790
                               int inp_read_lock_held)
791
2.13k
{
792
  /* Special handling for the old un-ordered data chunk.
793
   * All the chunks/TSN's go to mid 0. So
794
   * we have to do the old style watching to see
795
   * if we have it all. If you return one, no other
796
   * control entries on the un-ordered queue will
797
   * be looked at. In theory there should be no others
798
   * entries in reality, unless the guy is sending both
799
   * unordered NDATA and unordered DATA...
800
   */
801
2.13k
  struct sctp_tmit_chunk *chk, *lchk, *tchk;
802
2.13k
  uint32_t fsn;
803
2.13k
  struct sctp_queued_to_read *nc;
804
2.13k
  int cnt_added;
805
806
2.13k
  if (control->first_frag_seen == 0) {
807
    /* Nothing we can do, we have not seen the first piece yet */
808
905
    return (1);
809
905
  }
810
  /* Collapse any we can */
811
1.22k
  cnt_added = 0;
812
1.30k
restart:
813
1.30k
  fsn = control->fsn_included + 1;
814
  /* Now what can we add? */
815
1.30k
  TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, lchk) {
816
1.05k
    if (chk->rec.data.fsn == fsn) {
817
      /* Ok lets add it */
818
304
      sctp_alloc_a_readq(stcb, nc);
819
304
      if (nc == NULL) {
820
0
        break;
821
0
      }
822
304
      memset(nc, 0, sizeof(struct sctp_queued_to_read));
823
304
      TAILQ_REMOVE(&control->reasm, chk, sctp_next);
824
304
      sctp_add_chk_to_control(control, strm, stcb, asoc, chk, inp_read_lock_held);
825
304
      fsn++;
826
304
      cnt_added++;
827
304
      chk = NULL;
828
304
      if (control->end_added) {
829
        /* We are done */
830
145
        if (!TAILQ_EMPTY(&control->reasm)) {
831
          /*
832
           * Ok we have to move anything left on
833
           * the control queue to a new control.
834
           */
835
122
          sctp_build_readq_entry_from_ctl(nc, control);
836
122
          tchk = TAILQ_FIRST(&control->reasm);
837
122
          if (tchk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
838
105
            TAILQ_REMOVE(&control->reasm, tchk, sctp_next);
839
105
            if (asoc->size_on_reasm_queue >= tchk->send_size) {
840
105
              asoc->size_on_reasm_queue -= tchk->send_size;
841
105
            } else {
842
0
#ifdef INVARIANTS
843
0
            panic("size_on_reasm_queue = %u smaller than chunk length %u", asoc->size_on_reasm_queue, tchk->send_size);
844
#else
845
            asoc->size_on_reasm_queue = 0;
846
#endif
847
0
            }
848
105
            sctp_ucount_decr(asoc->cnt_on_reasm_queue);
849
105
            nc->first_frag_seen = 1;
850
105
            nc->fsn_included = tchk->rec.data.fsn;
851
105
            nc->data = tchk->data;
852
105
            nc->sinfo_ppid = tchk->rec.data.ppid;
853
105
            nc->sinfo_tsn = tchk->rec.data.tsn;
854
105
            sctp_mark_non_revokable(asoc, tchk->rec.data.tsn);
855
105
            tchk->data = NULL;
856
105
            sctp_free_a_chunk(stcb, tchk, SCTP_SO_NOT_LOCKED);
857
105
            sctp_setup_tail_pointer(nc);
858
105
            tchk = TAILQ_FIRST(&control->reasm);
859
105
          }
860
          /* Spin the rest onto the queue */
861
588
          while (tchk) {
862
466
            TAILQ_REMOVE(&control->reasm, tchk, sctp_next);
863
466
            TAILQ_INSERT_TAIL(&nc->reasm, tchk, sctp_next);
864
466
            tchk = TAILQ_FIRST(&control->reasm);
865
466
          }
866
          /* Now lets add it to the queue after removing control */
867
122
          TAILQ_INSERT_TAIL(&strm->uno_inqueue, nc, next_instrm);
868
122
          nc->on_strm_q = SCTP_ON_UNORDERED;
869
122
          if (control->on_strm_q) {
870
122
            TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
871
122
            control->on_strm_q = 0;
872
122
          }
873
122
        }
874
145
        if (control->pdapi_started) {
875
0
          strm->pd_api_started = 0;
876
0
          control->pdapi_started = 0;
877
0
        }
878
145
        if (control->on_strm_q) {
879
23
          TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
880
23
          control->on_strm_q = 0;
881
23
          SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
882
23
        }
883
145
        if (control->on_read_q == 0) {
884
145
          sctp_add_to_readq(stcb->sctp_ep, stcb, control,
885
145
                &stcb->sctp_socket->so_rcv, control->end_added,
886
145
                inp_read_lock_held, SCTP_SO_NOT_LOCKED);
887
145
#if defined(__Userspace__)
888
145
        } else {
889
0
          sctp_invoke_recv_callback(stcb->sctp_ep, stcb, control, inp_read_lock_held);
890
0
#endif
891
0
        }
892
145
        sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
893
145
        if ((nc->first_frag_seen) && !TAILQ_EMPTY(&nc->reasm)) {
894
          /* Switch to the new guy and continue */
895
78
          control = nc;
896
78
          goto restart;
897
78
        } else {
898
67
          if (nc->on_strm_q == 0) {
899
23
            sctp_free_a_readq(stcb, nc);
900
23
          }
901
67
        }
902
67
        return (1);
903
159
      } else {
904
159
        sctp_free_a_readq(stcb, nc);
905
159
      }
906
746
    } else {
907
      /* Can't add more */
908
746
      break;
909
746
    }
910
1.05k
  }
911
1.16k
  if (cnt_added && strm->pd_api_started) {
912
0
#if defined(__Userspace__)
913
0
    sctp_invoke_recv_callback(stcb->sctp_ep, stcb, control, inp_read_lock_held);
914
0
#endif
915
0
    sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
916
0
  }
917
1.16k
  if ((control->length > pd_point) && (strm->pd_api_started == 0)) {
918
0
    strm->pd_api_started = 1;
919
0
    control->pdapi_started = 1;
920
0
    sctp_add_to_readq(stcb->sctp_ep, stcb, control,
921
0
                      &stcb->sctp_socket->so_rcv, control->end_added,
922
0
                      inp_read_lock_held, SCTP_SO_NOT_LOCKED);
923
0
    sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
924
0
    return (0);
925
1.16k
  } else {
926
1.16k
    return (1);
927
1.16k
  }
928
1.16k
}
929
930
static void
931
sctp_inject_old_unordered_data(struct sctp_tcb *stcb,
932
                               struct sctp_association *asoc,
933
                               struct sctp_queued_to_read *control,
934
                               struct sctp_tmit_chunk *chk,
935
                               int *abort_flag)
936
2.03k
{
937
2.03k
  struct sctp_tmit_chunk *at;
938
2.03k
  int inserted;
939
  /*
940
   * Here we need to place the chunk into the control structure
941
   * sorted in the correct order.
942
   */
943
2.03k
  if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
944
    /* Its the very first one. */
945
1.19k
    SCTPDBG(SCTP_DEBUG_XXX,
946
1.19k
      "chunk is a first fsn: %u becomes fsn_included\n",
947
1.19k
      chk->rec.data.fsn);
948
1.19k
    at = TAILQ_FIRST(&control->reasm);
949
1.19k
    if (at && SCTP_TSN_GT(chk->rec.data.fsn, at->rec.data.fsn)) {
950
      /*
951
       * The first chunk in the reassembly is
952
       * a smaller TSN than this one, even though
953
       * this has a first, it must be from a subsequent
954
       * msg.
955
       */
956
482
      goto place_chunk;
957
482
    }
958
717
    if (control->first_frag_seen) {
959
      /*
960
       * In old un-ordered we can reassembly on
961
       * one control multiple messages. As long
962
       * as the next FIRST is greater then the old
963
       * first (TSN i.e. FSN wise)
964
       */
965
302
      struct mbuf *tdata;
966
302
      uint32_t tmp;
967
968
302
      if (SCTP_TSN_GT(chk->rec.data.fsn, control->fsn_included)) {
969
        /* Easy way the start of a new guy beyond the lowest */
970
144
        goto place_chunk;
971
144
      }
972
158
      if ((chk->rec.data.fsn == control->fsn_included) ||
973
158
          (control->pdapi_started)) {
974
        /*
975
         * Ok this should not happen, if it does
976
         * we started the pd-api on the higher TSN (since
977
         * the equals part is a TSN failure it must be that).
978
         *
979
         * We are completely hosed in that case since I have
980
         * no way to recover. This really will only happen
981
         * if we can get more TSN's higher before the pd-api-point.
982
         */
983
0
        sctp_abort_in_reasm(stcb, control, chk,
984
0
                abort_flag,
985
0
                SCTP_FROM_SCTP_INDATA + SCTP_LOC_4);
986
987
0
        return;
988
0
      }
989
      /*
990
       * Ok we have two firsts and the one we just got
991
       * is smaller than the one we previously placed.. yuck!
992
       * We must swap them out.
993
       */
994
      /* swap the mbufs */
995
158
      tdata = control->data;
996
158
      control->data = chk->data;
997
158
      chk->data = tdata;
998
      /* Save the lengths */
999
158
      chk->send_size = control->length;
1000
      /* Recompute length of control and tail pointer */
1001
158
      sctp_setup_tail_pointer(control);
1002
      /* Fix the FSN included */
1003
158
      tmp = control->fsn_included;
1004
158
      control->fsn_included = chk->rec.data.fsn;
1005
158
      chk->rec.data.fsn = tmp;
1006
      /* Fix the TSN included */
1007
158
      tmp = control->sinfo_tsn;
1008
158
      control->sinfo_tsn = chk->rec.data.tsn;
1009
158
      chk->rec.data.tsn = tmp;
1010
      /* Fix the PPID included */
1011
158
      tmp = control->sinfo_ppid;
1012
158
      control->sinfo_ppid = chk->rec.data.ppid;
1013
158
      chk->rec.data.ppid = tmp;
1014
      /* Fix tail pointer */
1015
158
      goto place_chunk;
1016
158
    }
1017
415
    control->first_frag_seen = 1;
1018
415
    control->fsn_included = chk->rec.data.fsn;
1019
415
    control->top_fsn = chk->rec.data.fsn;
1020
415
    control->sinfo_tsn = chk->rec.data.tsn;
1021
415
    control->sinfo_ppid = chk->rec.data.ppid;
1022
415
    control->data = chk->data;
1023
415
    sctp_mark_non_revokable(asoc, chk->rec.data.tsn);
1024
415
    chk->data = NULL;
1025
415
    sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
1026
415
    sctp_setup_tail_pointer(control);
1027
415
    return;
1028
415
  }
1029
1.62k
place_chunk:
1030
1.62k
  inserted = 0;
1031
5.18k
  TAILQ_FOREACH(at, &control->reasm, sctp_next) {
1032
5.18k
    if (SCTP_TSN_GT(at->rec.data.fsn, chk->rec.data.fsn)) {
1033
      /*
1034
       * This one in queue is bigger than the new one, insert
1035
       * the new one before at.
1036
       */
1037
914
      asoc->size_on_reasm_queue += chk->send_size;
1038
914
      sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1039
914
      inserted = 1;
1040
914
      TAILQ_INSERT_BEFORE(at, chk, sctp_next);
1041
914
      break;
1042
4.26k
    } else if (at->rec.data.fsn == chk->rec.data.fsn) {
1043
      /*
1044
       * They sent a duplicate fsn number. This
1045
       * really should not happen since the FSN is
1046
       * a TSN and it should have been dropped earlier.
1047
       */
1048
0
      sctp_abort_in_reasm(stcb, control, chk,
1049
0
                          abort_flag,
1050
0
                          SCTP_FROM_SCTP_INDATA + SCTP_LOC_5);
1051
0
      return;
1052
0
    }
1053
5.18k
  }
1054
1.62k
  if (inserted == 0) {
1055
    /* Its at the end */
1056
708
    asoc->size_on_reasm_queue += chk->send_size;
1057
708
    sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1058
708
    control->top_fsn = chk->rec.data.fsn;
1059
708
    TAILQ_INSERT_TAIL(&control->reasm, chk, sctp_next);
1060
708
  }
1061
1.62k
}
1062
1063
static int
1064
sctp_deliver_reasm_check(struct sctp_tcb *stcb, struct sctp_association *asoc,
1065
                         struct sctp_stream_in *strm, int inp_read_lock_held)
1066
4.41k
{
1067
  /*
1068
   * Given a stream, strm, see if any of
1069
   * the SSN's on it that are fragmented
1070
   * are ready to deliver. If so go ahead
1071
   * and place them on the read queue. In
1072
   * so placing if we have hit the end, then
1073
   * we need to remove them from the stream's queue.
1074
   */
1075
4.41k
  struct sctp_queued_to_read *control, *nctl = NULL;
1076
4.41k
  uint32_t next_to_del;
1077
4.41k
  uint32_t pd_point;
1078
4.41k
  int ret = 0;
1079
1080
4.41k
  if (stcb->sctp_socket) {
1081
4.41k
    pd_point = min(SCTP_SB_LIMIT_RCV(stcb->sctp_socket) >> SCTP_PARTIAL_DELIVERY_SHIFT,
1082
4.41k
             stcb->sctp_ep->partial_delivery_point);
1083
4.41k
  } else {
1084
0
    pd_point = stcb->sctp_ep->partial_delivery_point;
1085
0
  }
1086
4.41k
  control = TAILQ_FIRST(&strm->uno_inqueue);
1087
1088
4.41k
  if ((control != NULL) &&
1089
3.03k
      (asoc->idata_supported == 0)) {
1090
    /* Special handling needed for "old" data format */
1091
2.13k
    if (sctp_handle_old_unordered_data(stcb, asoc, strm, control, pd_point, inp_read_lock_held)) {
1092
2.13k
      goto done_un;
1093
2.13k
    }
1094
2.13k
  }
1095
2.28k
  if (strm->pd_api_started) {
1096
    /* Can't add more */
1097
0
    return (0);
1098
0
  }
1099
5.77k
  while (control) {
1100
3.49k
    SCTPDBG(SCTP_DEBUG_XXX, "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u -uo\n",
1101
3.49k
      control, control->end_added, control->mid, control->top_fsn, control->fsn_included);
1102
3.49k
    nctl = TAILQ_NEXT(control, next_instrm);
1103
3.49k
    if (control->end_added) {
1104
      /* We just put the last bit on */
1105
25
      if (control->on_strm_q) {
1106
25
#ifdef INVARIANTS
1107
25
        if (control->on_strm_q != SCTP_ON_UNORDERED) {
1108
0
          panic("Huh control: %p on_q: %d -- not unordered?",
1109
0
                control, control->on_strm_q);
1110
0
        }
1111
25
#endif
1112
25
        SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
1113
25
        TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
1114
25
        if (asoc->size_on_all_streams >= control->length) {
1115
25
          asoc->size_on_all_streams -= control->length;
1116
25
        } else {
1117
0
#ifdef INVARIANTS
1118
0
          panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
1119
#else
1120
          asoc->size_on_all_streams = 0;
1121
#endif
1122
0
        }
1123
25
        sctp_ucount_decr(asoc->cnt_on_all_streams);
1124
25
        control->on_strm_q = 0;
1125
25
      }
1126
25
      if (control->on_read_q == 0) {
1127
25
        sctp_add_to_readq(stcb->sctp_ep, stcb,
1128
25
              control,
1129
25
              &stcb->sctp_socket->so_rcv, control->end_added,
1130
25
              inp_read_lock_held, SCTP_SO_NOT_LOCKED);
1131
25
      }
1132
3.46k
    } else {
1133
      /* Can we do a PD-API for this un-ordered guy? */
1134
3.46k
      if ((control->length >= pd_point) && (strm->pd_api_started == 0)) {
1135
0
        strm->pd_api_started = 1;
1136
0
        control->pdapi_started = 1;
1137
0
        sctp_add_to_readq(stcb->sctp_ep, stcb,
1138
0
              control,
1139
0
              &stcb->sctp_socket->so_rcv, control->end_added,
1140
0
              inp_read_lock_held, SCTP_SO_NOT_LOCKED);
1141
1142
0
        break;
1143
0
      }
1144
3.46k
    }
1145
3.49k
    control = nctl;
1146
3.49k
  }
1147
4.41k
done_un:
1148
4.41k
  control = TAILQ_FIRST(&strm->inqueue);
1149
4.41k
  if (strm->pd_api_started) {
1150
    /* Can't add more */
1151
0
    return (0);
1152
0
  }
1153
4.41k
  if (control == NULL) {
1154
1.84k
    return (ret);
1155
1.84k
  }
1156
2.57k
  if (SCTP_MID_EQ(asoc->idata_supported, strm->last_mid_delivered, control->mid)) {
1157
    /* Ok the guy at the top was being partially delivered
1158
     * completed, so we remove it. Note
1159
     * the pd_api flag was taken off when the
1160
     * chunk was merged on in sctp_queue_data_for_reasm below.
1161
     */
1162
220
    nctl = TAILQ_NEXT(control, next_instrm);
1163
220
    SCTPDBG(SCTP_DEBUG_XXX,
1164
220
      "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u (lastdel: %u)- o\n",
1165
220
      control, control->end_added, control->mid,
1166
220
      control->top_fsn, control->fsn_included,
1167
220
      strm->last_mid_delivered);
1168
220
    if (control->end_added) {
1169
20
      if (control->on_strm_q) {
1170
20
#ifdef INVARIANTS
1171
20
        if (control->on_strm_q != SCTP_ON_ORDERED) {
1172
0
          panic("Huh control: %p on_q: %d -- not ordered?",
1173
0
                control, control->on_strm_q);
1174
0
        }
1175
20
#endif
1176
20
        SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
1177
20
        TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
1178
20
        if (asoc->size_on_all_streams >= control->length) {
1179
20
          asoc->size_on_all_streams -= control->length;
1180
20
        } else {
1181
0
#ifdef INVARIANTS
1182
0
          panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
1183
#else
1184
          asoc->size_on_all_streams = 0;
1185
#endif
1186
0
        }
1187
20
        sctp_ucount_decr(asoc->cnt_on_all_streams);
1188
20
        control->on_strm_q = 0;
1189
20
      }
1190
20
      if (strm->pd_api_started && control->pdapi_started) {
1191
0
        control->pdapi_started = 0;
1192
0
        strm->pd_api_started = 0;
1193
0
      }
1194
20
      if (control->on_read_q == 0) {
1195
20
        sctp_add_to_readq(stcb->sctp_ep, stcb,
1196
20
              control,
1197
20
              &stcb->sctp_socket->so_rcv, control->end_added,
1198
20
              inp_read_lock_held, SCTP_SO_NOT_LOCKED);
1199
20
      }
1200
20
      control = nctl;
1201
20
    }
1202
220
  }
1203
2.57k
  if (strm->pd_api_started) {
1204
    /* Can't add more must have gotten an un-ordered above being partially delivered. */
1205
0
    return (0);
1206
0
  }
1207
2.58k
deliver_more:
1208
2.58k
  next_to_del = strm->last_mid_delivered + 1;
1209
2.58k
  if (control) {
1210
2.57k
    SCTPDBG(SCTP_DEBUG_XXX,
1211
2.57k
      "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u (nxtdel: %u)- o\n",
1212
2.57k
      control, control->end_added, control->mid, control->top_fsn, control->fsn_included,
1213
2.57k
      next_to_del);
1214
2.57k
    nctl = TAILQ_NEXT(control, next_instrm);
1215
2.57k
    if (SCTP_MID_EQ(asoc->idata_supported, control->mid, next_to_del) &&
1216
1.20k
        (control->first_frag_seen)) {
1217
538
      int done;
1218
1219
      /* Ok we can deliver it onto the stream. */
1220
538
      if (control->end_added) {
1221
        /* We are done with it afterwards */
1222
11
        if (control->on_strm_q) {
1223
11
#ifdef INVARIANTS
1224
11
          if (control->on_strm_q != SCTP_ON_ORDERED) {
1225
0
            panic("Huh control: %p on_q: %d -- not ordered?",
1226
0
                  control, control->on_strm_q);
1227
0
          }
1228
11
#endif
1229
11
          SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
1230
11
          TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
1231
11
          if (asoc->size_on_all_streams >= control->length) {
1232
11
            asoc->size_on_all_streams -= control->length;
1233
11
          } else {
1234
0
#ifdef INVARIANTS
1235
0
            panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
1236
#else
1237
            asoc->size_on_all_streams = 0;
1238
#endif
1239
0
          }
1240
11
          sctp_ucount_decr(asoc->cnt_on_all_streams);
1241
11
          control->on_strm_q = 0;
1242
11
        }
1243
11
        ret++;
1244
11
      }
1245
538
      if (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
1246
        /* A singleton now slipping through - mark it non-revokable too */
1247
3
        sctp_mark_non_revokable(asoc, control->sinfo_tsn);
1248
535
      } else if (control->end_added == 0) {
1249
        /* Check if we can defer adding until its all there */
1250
527
        if ((control->length < pd_point) || (strm->pd_api_started)) {
1251
          /* Don't need it or cannot add more (one being delivered that way) */
1252
527
          goto out;
1253
527
        }
1254
527
      }
1255
11
      done = (control->end_added) && (control->last_frag_seen);
1256
11
      if (control->on_read_q == 0) {
1257
11
        if (!done) {
1258
0
          if (asoc->size_on_all_streams >= control->length) {
1259
0
            asoc->size_on_all_streams -= control->length;
1260
0
          } else {
1261
0
#ifdef INVARIANTS
1262
0
            panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
1263
#else
1264
            asoc->size_on_all_streams = 0;
1265
#endif
1266
0
          }
1267
0
          strm->pd_api_started = 1;
1268
0
          control->pdapi_started = 1;
1269
0
        }
1270
11
        sctp_add_to_readq(stcb->sctp_ep, stcb,
1271
11
              control,
1272
11
              &stcb->sctp_socket->so_rcv, control->end_added,
1273
11
              inp_read_lock_held, SCTP_SO_NOT_LOCKED);
1274
11
      }
1275
11
      strm->last_mid_delivered = next_to_del;
1276
11
      if (done) {
1277
11
        control = nctl;
1278
11
        goto deliver_more;
1279
11
      }
1280
11
    }
1281
2.57k
  }
1282
2.57k
out:
1283
2.57k
  return (ret);
1284
2.58k
}
1285
1286
uint32_t
1287
sctp_add_chk_to_control(struct sctp_queued_to_read *control,
1288
      struct sctp_stream_in *strm,
1289
      struct sctp_tcb *stcb, struct sctp_association *asoc,
1290
      struct sctp_tmit_chunk *chk, int hold_rlock)
1291
365
{
1292
  /*
1293
   * Given a control and a chunk, merge the
1294
   * data from the chk onto the control and free
1295
   * up the chunk resources.
1296
   */
1297
365
  uint32_t added = 0;
1298
365
  bool i_locked = false;
1299
1300
365
  if (control->on_read_q) {
1301
0
    if (hold_rlock == 0) {
1302
      /* Its being pd-api'd so we must do some locks. */
1303
0
      SCTP_INP_READ_LOCK(stcb->sctp_ep);
1304
0
      i_locked = true;
1305
0
    }
1306
0
    if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) {
1307
0
      goto out;
1308
0
    }
1309
0
  }
1310
365
  if (control->data == NULL) {
1311
0
    control->data = chk->data;
1312
0
    sctp_setup_tail_pointer(control);
1313
365
  } else {
1314
365
    sctp_add_to_tail_pointer(control, chk->data, &added);
1315
365
  }
1316
365
  control->fsn_included = chk->rec.data.fsn;
1317
365
  asoc->size_on_reasm_queue -= chk->send_size;
1318
365
  sctp_ucount_decr(asoc->cnt_on_reasm_queue);
1319
365
  sctp_mark_non_revokable(asoc, chk->rec.data.tsn);
1320
365
  chk->data = NULL;
1321
365
  if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
1322
112
    control->first_frag_seen = 1;
1323
112
    control->sinfo_tsn = chk->rec.data.tsn;
1324
112
    control->sinfo_ppid = chk->rec.data.ppid;
1325
112
  }
1326
365
  if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
1327
    /* Its complete */
1328
178
    if ((control->on_strm_q) && (control->on_read_q)) {
1329
0
      if (control->pdapi_started) {
1330
0
        control->pdapi_started = 0;
1331
0
        strm->pd_api_started = 0;
1332
0
      }
1333
0
      if (control->on_strm_q == SCTP_ON_UNORDERED) {
1334
        /* Unordered */
1335
0
        TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
1336
0
        control->on_strm_q = 0;
1337
0
      } else if (control->on_strm_q == SCTP_ON_ORDERED) {
1338
        /* Ordered */
1339
0
        TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
1340
        /*
1341
         * Don't need to decrement size_on_all_streams,
1342
         * since control is on the read queue.
1343
         */
1344
0
        sctp_ucount_decr(asoc->cnt_on_all_streams);
1345
0
        control->on_strm_q = 0;
1346
0
#ifdef INVARIANTS
1347
0
      } else if (control->on_strm_q) {
1348
0
        panic("Unknown state on ctrl: %p on_strm_q: %d", control,
1349
0
              control->on_strm_q);
1350
0
#endif
1351
0
      }
1352
0
    }
1353
178
    control->end_added = 1;
1354
178
    control->last_frag_seen = 1;
1355
178
  }
1356
365
out:
1357
365
  if (i_locked) {
1358
0
    SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
1359
0
  }
1360
365
  sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
1361
365
  return (added);
1362
365
}
1363
1364
/*
1365
 * Dump onto the re-assembly queue, in its proper place. After dumping on the
1366
 * queue, see if anything can be delivered. If so pull it off (or as much as
1367
 * we can. If we run out of space then we must dump what we can and set the
1368
 * appropriate flag to say we queued what we could.
1369
 */
1370
static void
1371
sctp_queue_data_for_reasm(struct sctp_tcb *stcb, struct sctp_association *asoc,
1372
        struct sctp_queued_to_read *control,
1373
        struct sctp_tmit_chunk *chk,
1374
        int created_control,
1375
        int *abort_flag, uint32_t tsn)
1376
4.67k
{
1377
4.67k
  uint32_t next_fsn;
1378
4.67k
  struct sctp_tmit_chunk *at, *nat;
1379
4.67k
  struct sctp_stream_in *strm;
1380
4.67k
  int do_wakeup, unordered;
1381
4.67k
  uint32_t lenadded;
1382
1383
4.67k
  strm = &asoc->strmin[control->sinfo_stream];
1384
  /*
1385
   * For old un-ordered data chunks.
1386
   */
1387
4.67k
  if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) {
1388
2.76k
    unordered = 1;
1389
2.76k
  } else {
1390
1.91k
    unordered = 0;
1391
1.91k
  }
1392
  /* Must be added to the stream-in queue */
1393
4.67k
  if (created_control) {
1394
2.62k
    if ((unordered == 0) || (asoc->idata_supported)) {
1395
2.02k
      sctp_ucount_incr(asoc->cnt_on_all_streams);
1396
2.02k
    }
1397
2.62k
    if (sctp_place_control_in_stream(strm, asoc, control)) {
1398
      /* Duplicate SSN? */
1399
0
      sctp_abort_in_reasm(stcb, control, chk,
1400
0
              abort_flag,
1401
0
              SCTP_FROM_SCTP_INDATA + SCTP_LOC_6);
1402
0
      sctp_clean_up_control(stcb, control);
1403
0
      return;
1404
0
    }
1405
2.62k
    if ((tsn == (asoc->cumulative_tsn + 1) && (asoc->idata_supported == 0))) {
1406
      /* Ok we created this control and now
1407
       * lets validate that its legal i.e. there
1408
       * is a B bit set, if not and we have
1409
       * up to the cum-ack then its invalid.
1410
       */
1411
31
      if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) {
1412
3
        sctp_abort_in_reasm(stcb, control, chk,
1413
3
                            abort_flag,
1414
3
                            SCTP_FROM_SCTP_INDATA + SCTP_LOC_7);
1415
3
        return;
1416
3
      }
1417
31
    }
1418
2.62k
  }
1419
4.66k
  if ((asoc->idata_supported == 0) && (unordered == 1)) {
1420
2.03k
    sctp_inject_old_unordered_data(stcb, asoc, control, chk, abort_flag);
1421
2.03k
    return;
1422
2.03k
  }
1423
  /*
1424
   * Ok we must queue the chunk into the reasembly portion:
1425
   *  o if its the first it goes to the control mbuf.
1426
   *  o if its not first but the next in sequence it goes to the control,
1427
   *    and each succeeding one in order also goes.
1428
   *  o if its not in order we place it on the list in its place.
1429
   */
1430
2.63k
  if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
1431
    /* Its the very first one. */
1432
892
    SCTPDBG(SCTP_DEBUG_XXX,
1433
892
      "chunk is a first fsn: %u becomes fsn_included\n",
1434
892
      chk->rec.data.fsn);
1435
892
    if (control->first_frag_seen) {
1436
      /*
1437
       * Error on senders part, they either
1438
       * sent us two data chunks with FIRST,
1439
       * or they sent two un-ordered chunks that
1440
       * were fragmented at the same time in the same stream.
1441
       */
1442
2
      sctp_abort_in_reasm(stcb, control, chk,
1443
2
                          abort_flag,
1444
2
                          SCTP_FROM_SCTP_INDATA + SCTP_LOC_8);
1445
2
      return;
1446
2
    }
1447
890
    control->first_frag_seen = 1;
1448
890
    control->sinfo_ppid = chk->rec.data.ppid;
1449
890
    control->sinfo_tsn = chk->rec.data.tsn;
1450
890
    control->fsn_included = chk->rec.data.fsn;
1451
890
    control->data = chk->data;
1452
890
    sctp_mark_non_revokable(asoc, chk->rec.data.tsn);
1453
890
    chk->data = NULL;
1454
890
    sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
1455
890
    sctp_setup_tail_pointer(control);
1456
890
    asoc->size_on_all_streams += control->length;
1457
1.73k
  } else {
1458
    /* Place the chunk in our list */
1459
1.73k
    int inserted=0;
1460
1.73k
    if (control->last_frag_seen == 0) {
1461
      /* Still willing to raise highest FSN seen */
1462
1.53k
      if (SCTP_TSN_GT(chk->rec.data.fsn, control->top_fsn)) {
1463
164
        SCTPDBG(SCTP_DEBUG_XXX,
1464
164
          "We have a new top_fsn: %u\n",
1465
164
          chk->rec.data.fsn);
1466
164
        control->top_fsn = chk->rec.data.fsn;
1467
164
      }
1468
1.53k
      if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
1469
595
        SCTPDBG(SCTP_DEBUG_XXX,
1470
595
          "The last fsn is now in place fsn: %u\n",
1471
595
          chk->rec.data.fsn);
1472
595
        control->last_frag_seen = 1;
1473
595
        if (SCTP_TSN_GT(control->top_fsn, chk->rec.data.fsn)) {
1474
79
          SCTPDBG(SCTP_DEBUG_XXX,
1475
79
            "New fsn: %u is not at top_fsn: %u -- abort\n",
1476
79
            chk->rec.data.fsn,
1477
79
            control->top_fsn);
1478
79
          sctp_abort_in_reasm(stcb, control, chk,
1479
79
                  abort_flag,
1480
79
                  SCTP_FROM_SCTP_INDATA + SCTP_LOC_9);
1481
79
          return;
1482
79
        }
1483
595
      }
1484
1.45k
      if (asoc->idata_supported || control->first_frag_seen) {
1485
        /*
1486
         * For IDATA we always check since we know that
1487
         * the first fragment is 0. For old DATA we have
1488
         * to receive the first before we know the first FSN
1489
         * (which is the TSN).
1490
         */
1491
918
        if (SCTP_TSN_GE(control->fsn_included, chk->rec.data.fsn)) {
1492
          /* We have already delivered up to this so its a dup */
1493
79
          sctp_abort_in_reasm(stcb, control, chk,
1494
79
                  abort_flag,
1495
79
                  SCTP_FROM_SCTP_INDATA + SCTP_LOC_10);
1496
79
          return;
1497
79
        }
1498
918
      }
1499
1.45k
    } else {
1500
205
      if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
1501
        /* Second last? huh? */
1502
1
        SCTPDBG(SCTP_DEBUG_XXX,
1503
1
          "Duplicate last fsn: %u (top: %u) -- abort\n",
1504
1
          chk->rec.data.fsn, control->top_fsn);
1505
1
        sctp_abort_in_reasm(stcb, control,
1506
1
                chk, abort_flag,
1507
1
                SCTP_FROM_SCTP_INDATA + SCTP_LOC_11);
1508
1
        return;
1509
1
      }
1510
204
      if (asoc->idata_supported || control->first_frag_seen) {
1511
        /*
1512
         * For IDATA we always check since we know that
1513
         * the first fragment is 0. For old DATA we have
1514
         * to receive the first before we know the first FSN
1515
         * (which is the TSN).
1516
         */
1517
1518
163
        if (SCTP_TSN_GE(control->fsn_included, chk->rec.data.fsn)) {
1519
          /* We have already delivered up to this so its a dup */
1520
39
          SCTPDBG(SCTP_DEBUG_XXX,
1521
39
            "New fsn: %u is already seen in included_fsn: %u -- abort\n",
1522
39
            chk->rec.data.fsn, control->fsn_included);
1523
39
          sctp_abort_in_reasm(stcb, control, chk,
1524
39
                  abort_flag,
1525
39
                  SCTP_FROM_SCTP_INDATA + SCTP_LOC_12);
1526
39
          return;
1527
39
        }
1528
163
      }
1529
      /* validate not beyond top FSN if we have seen last one */
1530
165
      if (SCTP_TSN_GT(chk->rec.data.fsn, control->top_fsn)) {
1531
54
        SCTPDBG(SCTP_DEBUG_XXX,
1532
54
          "New fsn: %u is beyond or at top_fsn: %u -- abort\n",
1533
54
          chk->rec.data.fsn,
1534
54
          control->top_fsn);
1535
54
        sctp_abort_in_reasm(stcb, control, chk,
1536
54
                abort_flag,
1537
54
                SCTP_FROM_SCTP_INDATA + SCTP_LOC_13);
1538
54
        return;
1539
54
      }
1540
165
    }
1541
    /*
1542
     * If we reach here, we need to place the
1543
     * new chunk in the reassembly for this
1544
     * control.
1545
     */
1546
1.48k
    SCTPDBG(SCTP_DEBUG_XXX,
1547
1.48k
      "chunk is a not first fsn: %u needs to be inserted\n",
1548
1.48k
      chk->rec.data.fsn);
1549
1.48k
    TAILQ_FOREACH(at, &control->reasm, sctp_next) {
1550
492
      if (SCTP_TSN_GT(at->rec.data.fsn, chk->rec.data.fsn)) {
1551
213
        if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
1552
          /* Last not at the end? huh? */
1553
1
          SCTPDBG(SCTP_DEBUG_XXX,
1554
1
                  "Last fragment not last in list: -- abort\n");
1555
1
          sctp_abort_in_reasm(stcb, control,
1556
1
                              chk, abort_flag,
1557
1
                              SCTP_FROM_SCTP_INDATA + SCTP_LOC_14);
1558
1
          return;
1559
1
        }
1560
        /*
1561
         * This one in queue is bigger than the new one, insert
1562
         * the new one before at.
1563
         */
1564
212
        SCTPDBG(SCTP_DEBUG_XXX,
1565
212
          "Insert it before fsn: %u\n",
1566
212
          at->rec.data.fsn);
1567
212
        asoc->size_on_reasm_queue += chk->send_size;
1568
212
        sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1569
212
        TAILQ_INSERT_BEFORE(at, chk, sctp_next);
1570
212
        inserted = 1;
1571
212
        break;
1572
279
      } else if (at->rec.data.fsn == chk->rec.data.fsn) {
1573
        /* Gak, He sent me a duplicate str seq number */
1574
        /*
1575
         * foo bar, I guess I will just free this new guy,
1576
         * should we abort too? FIX ME MAYBE? Or it COULD be
1577
         * that the SSN's have wrapped. Maybe I should
1578
         * compare to TSN somehow... sigh for now just blow
1579
         * away the chunk!
1580
         */
1581
3
        SCTPDBG(SCTP_DEBUG_XXX,
1582
3
          "Duplicate to fsn: %u -- abort\n",
1583
3
          at->rec.data.fsn);
1584
3
        sctp_abort_in_reasm(stcb, control,
1585
3
                chk, abort_flag,
1586
3
                SCTP_FROM_SCTP_INDATA + SCTP_LOC_15);
1587
3
        return;
1588
3
      }
1589
492
    }
1590
1.48k
    if (inserted == 0) {
1591
      /* Goes on the end */
1592
1.27k
      SCTPDBG(SCTP_DEBUG_XXX, "Inserting at tail of list fsn: %u\n",
1593
1.27k
        chk->rec.data.fsn);
1594
1.27k
      asoc->size_on_reasm_queue += chk->send_size;
1595
1.27k
      sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1596
1.27k
      TAILQ_INSERT_TAIL(&control->reasm, chk, sctp_next);
1597
1.27k
    }
1598
1.48k
  }
1599
  /*
1600
   * Ok lets see if we can suck any up into the control
1601
   * structure that are in seq if it makes sense.
1602
   */
1603
2.37k
  do_wakeup = 0;
1604
  /*
1605
   * If the first fragment has not been
1606
   * seen there is no sense in looking.
1607
   */
1608
2.37k
  if (control->first_frag_seen) {
1609
1.05k
    next_fsn = control->fsn_included + 1;
1610
1.05k
    TAILQ_FOREACH_SAFE(at, &control->reasm, sctp_next, nat) {
1611
214
      if (at->rec.data.fsn == next_fsn) {
1612
        /* We can add this one now to the control */
1613
61
        SCTPDBG(SCTP_DEBUG_XXX,
1614
61
          "Adding more to control: %p at: %p fsn: %u next_fsn: %u included: %u\n",
1615
61
          control, at,
1616
61
          at->rec.data.fsn,
1617
61
          next_fsn, control->fsn_included);
1618
61
        TAILQ_REMOVE(&control->reasm, at, sctp_next);
1619
61
        lenadded = sctp_add_chk_to_control(control, strm, stcb, asoc, at, SCTP_READ_LOCK_NOT_HELD);
1620
61
        if (control->on_read_q) {
1621
0
          do_wakeup = 1;
1622
61
        } else {
1623
          /*
1624
           * We only add to the size-on-all-streams
1625
           * if its not on the read q. The read q
1626
           * flag will cause a sballoc so its accounted
1627
           * for there.
1628
           */
1629
61
          asoc->size_on_all_streams += lenadded;
1630
61
        }
1631
61
        next_fsn++;
1632
61
        if (control->end_added && control->pdapi_started) {
1633
0
          if (strm->pd_api_started) {
1634
0
            strm->pd_api_started = 0;
1635
0
            control->pdapi_started = 0;
1636
0
          }
1637
0
          if (control->on_read_q == 0) {
1638
0
            sctp_add_to_readq(stcb->sctp_ep, stcb,
1639
0
                  control,
1640
0
                  &stcb->sctp_socket->so_rcv, control->end_added,
1641
0
                  SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
1642
0
          }
1643
0
          break;
1644
0
        }
1645
153
      } else {
1646
153
        break;
1647
153
      }
1648
214
    }
1649
1.05k
  }
1650
2.37k
  if (do_wakeup) {
1651
0
#if defined(__Userspace__)
1652
0
    sctp_invoke_recv_callback(stcb->sctp_ep, stcb, control, SCTP_READ_LOCK_NOT_HELD);
1653
0
#endif
1654
    /* Need to wakeup the reader */
1655
0
    sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
1656
0
  }
1657
2.37k
}
1658
1659
static struct sctp_queued_to_read *
1660
sctp_find_reasm_entry(struct sctp_stream_in *strm, uint32_t mid, int ordered, int idata_supported)
1661
6.20k
{
1662
6.20k
  struct sctp_queued_to_read *control;
1663
1664
6.20k
  if (ordered) {
1665
2.99k
    TAILQ_FOREACH(control, &strm->inqueue, next_instrm) {
1666
2.99k
      if (SCTP_MID_EQ(idata_supported, control->mid, mid)) {
1667
529
        break;
1668
529
      }
1669
2.99k
    }
1670
3.22k
  } else {
1671
3.22k
    if (idata_supported) {
1672
1.71k
      TAILQ_FOREACH(control, &strm->uno_inqueue, next_instrm) {
1673
1.71k
        if (SCTP_MID_EQ(idata_supported, control->mid, mid)) {
1674
84
          break;
1675
84
        }
1676
1.71k
      }
1677
2.46k
    } else {
1678
2.46k
      control = TAILQ_FIRST(&strm->uno_inqueue);
1679
2.46k
    }
1680
3.22k
  }
1681
6.20k
  return (control);
1682
6.20k
}
1683
1684
static int
1685
sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc,
1686
        struct mbuf **m, int offset,  int chk_length,
1687
        struct sctp_nets *net, uint32_t *high_tsn, int *abort_flag,
1688
        int *break_flag, int last_chunk, uint8_t chk_type)
1689
11.7k
{
1690
11.7k
  struct sctp_tmit_chunk *chk = NULL; /* make gcc happy */
1691
11.7k
  struct sctp_stream_in *strm;
1692
11.7k
  uint32_t tsn, fsn, gap, mid;
1693
11.7k
  struct mbuf *dmbuf;
1694
11.7k
  int the_len;
1695
11.7k
  int need_reasm_check = 0;
1696
11.7k
  uint16_t sid;
1697
11.7k
  struct mbuf *op_err;
1698
11.7k
  char msg[SCTP_DIAG_INFO_LEN];
1699
11.7k
  struct sctp_queued_to_read *control, *ncontrol;
1700
11.7k
  uint32_t ppid;
1701
11.7k
  uint8_t chk_flags;
1702
11.7k
  struct sctp_stream_reset_list *liste;
1703
11.7k
  int ordered;
1704
11.7k
  size_t clen;
1705
11.7k
  int created_control = 0;
1706
1707
11.7k
  if (chk_type == SCTP_IDATA) {
1708
3.98k
    struct sctp_idata_chunk *chunk, chunk_buf;
1709
1710
3.98k
    chunk = (struct sctp_idata_chunk *)sctp_m_getptr(*m, offset,
1711
3.98k
                                                     sizeof(struct sctp_idata_chunk), (uint8_t *)&chunk_buf);
1712
3.98k
    chk_flags = chunk->ch.chunk_flags;
1713
3.98k
    clen = sizeof(struct sctp_idata_chunk);
1714
3.98k
    tsn = ntohl(chunk->dp.tsn);
1715
3.98k
    sid = ntohs(chunk->dp.sid);
1716
3.98k
    mid = ntohl(chunk->dp.mid);
1717
3.98k
    if (chk_flags & SCTP_DATA_FIRST_FRAG) {
1718
2.33k
      fsn = 0;
1719
2.33k
      ppid = chunk->dp.ppid_fsn.ppid;
1720
2.33k
    } else {
1721
1.64k
      fsn = ntohl(chunk->dp.ppid_fsn.fsn);
1722
1.64k
      ppid = 0xffffffff; /* Use as an invalid value. */
1723
1.64k
    }
1724
7.76k
  } else {
1725
7.76k
    struct sctp_data_chunk *chunk, chunk_buf;
1726
1727
7.76k
    chunk = (struct sctp_data_chunk *)sctp_m_getptr(*m, offset,
1728
7.76k
                                                    sizeof(struct sctp_data_chunk), (uint8_t *)&chunk_buf);
1729
7.76k
    chk_flags = chunk->ch.chunk_flags;
1730
7.76k
    clen = sizeof(struct sctp_data_chunk);
1731
7.76k
    tsn = ntohl(chunk->dp.tsn);
1732
7.76k
    sid = ntohs(chunk->dp.sid);
1733
7.76k
    mid = (uint32_t)(ntohs(chunk->dp.ssn));
1734
7.76k
    fsn = tsn;
1735
7.76k
    ppid = chunk->dp.ppid;
1736
7.76k
  }
1737
11.7k
  if ((size_t)chk_length == clen) {
1738
    /*
1739
     * Need to send an abort since we had a
1740
     * empty data chunk.
1741
     */
1742
7
    op_err = sctp_generate_no_user_data_cause(tsn);
1743
7
    stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_16;
1744
7
    sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, false, SCTP_SO_NOT_LOCKED);
1745
7
    *abort_flag = 1;
1746
7
    return (0);
1747
7
  }
1748
11.7k
  if ((chk_flags & SCTP_DATA_SACK_IMMEDIATELY) == SCTP_DATA_SACK_IMMEDIATELY) {
1749
3.34k
    asoc->send_sack = 1;
1750
3.34k
  }
1751
11.7k
  ordered = ((chk_flags & SCTP_DATA_UNORDERED) == 0);
1752
11.7k
  if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
1753
0
    sctp_log_map(tsn, asoc->cumulative_tsn, asoc->highest_tsn_inside_map, SCTP_MAP_TSN_ENTERS);
1754
0
  }
1755
11.7k
  if (stcb == NULL) {
1756
0
    return (0);
1757
0
  }
1758
11.7k
  SCTP_LTRACE_CHK(stcb->sctp_ep, stcb, chk_type, tsn);
1759
11.7k
  if (SCTP_TSN_GE(asoc->cumulative_tsn, tsn)) {
1760
    /* It is a duplicate */
1761
2.03k
    SCTP_STAT_INCR(sctps_recvdupdata);
1762
2.03k
    if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1763
      /* Record a dup for the next outbound sack */
1764
1.52k
      asoc->dup_tsns[asoc->numduptsns] = tsn;
1765
1.52k
      asoc->numduptsns++;
1766
1.52k
    }
1767
2.03k
    asoc->send_sack = 1;
1768
2.03k
    return (0);
1769
2.03k
  }
1770
  /* Calculate the number of TSN's between the base and this TSN */
1771
9.70k
  SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
1772
9.70k
  if (gap >= (SCTP_MAPPING_ARRAY << 3)) {
1773
    /* Can't hold the bit in the mapping at max array, toss it */
1774
1.58k
    return (0);
1775
1.58k
  }
1776
8.12k
  if (gap >= (uint32_t) (asoc->mapping_array_size << 3)) {
1777
2.07k
    SCTP_TCB_LOCK_ASSERT(stcb);
1778
2.07k
    if (sctp_expand_mapping_array(asoc, gap)) {
1779
      /* Can't expand, drop it */
1780
0
      return (0);
1781
0
    }
1782
2.07k
  }
1783
8.12k
  if (SCTP_TSN_GT(tsn, *high_tsn)) {
1784
3.26k
    *high_tsn = tsn;
1785
3.26k
  }
1786
  /* See if we have received this one already */
1787
8.12k
  if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap) ||
1788
7.48k
      SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap)) {
1789
1.62k
    SCTP_STAT_INCR(sctps_recvdupdata);
1790
1.62k
    if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1791
      /* Record a dup for the next outbound sack */
1792
787
      asoc->dup_tsns[asoc->numduptsns] = tsn;
1793
787
      asoc->numduptsns++;
1794
787
    }
1795
1.62k
    asoc->send_sack = 1;
1796
1.62k
    return (0);
1797
1.62k
  }
1798
  /*
1799
   * Check to see about the GONE flag, duplicates would cause a sack
1800
   * to be sent up above
1801
   */
1802
6.49k
  if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
1803
6.49k
       (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
1804
6.49k
       (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET))) {
1805
    /*
1806
     * wait a minute, this guy is gone, there is no longer a
1807
     * receiver. Send peer an ABORT!
1808
     */
1809
0
    op_err = sctp_generate_cause(SCTP_CAUSE_OUT_OF_RESC, "");
1810
0
    sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, false, SCTP_SO_NOT_LOCKED);
1811
0
    *abort_flag = 1;
1812
0
    return (0);
1813
0
  }
1814
  /*
1815
   * Now before going further we see if there is room. If NOT then we
1816
   * MAY let one through only IF this TSN is the one we are waiting
1817
   * for on a partial delivery API.
1818
   */
1819
1820
  /* Is the stream valid? */
1821
6.49k
  if (sid >= asoc->streamincnt) {
1822
286
    struct sctp_error_invalid_stream *cause;
1823
1824
286
    op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_error_invalid_stream),
1825
286
                                   0, M_NOWAIT, 1, MT_DATA);
1826
286
    if (op_err != NULL) {
1827
      /* add some space up front so prepend will work well */
1828
286
      SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr));
1829
286
      cause = mtod(op_err, struct sctp_error_invalid_stream *);
1830
      /*
1831
       * Error causes are just param's and this one has
1832
       * two back to back phdr, one with the error type
1833
       * and size, the other with the streamid and a rsvd
1834
       */
1835
286
      SCTP_BUF_LEN(op_err) = sizeof(struct sctp_error_invalid_stream);
1836
286
      cause->cause.code = htons(SCTP_CAUSE_INVALID_STREAM);
1837
286
      cause->cause.length = htons(sizeof(struct sctp_error_invalid_stream));
1838
286
      cause->stream_id = htons(sid);
1839
286
      cause->reserved = htons(0);
1840
286
      sctp_queue_op_err(stcb, op_err);
1841
286
    }
1842
286
    SCTP_STAT_INCR(sctps_badsid);
1843
286
    SCTP_TCB_LOCK_ASSERT(stcb);
1844
286
    SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1845
286
    if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1846
224
      asoc->highest_tsn_inside_nr_map = tsn;
1847
224
    }
1848
286
    if (tsn == (asoc->cumulative_tsn + 1)) {
1849
      /* Update cum-ack */
1850
14
      asoc->cumulative_tsn = tsn;
1851
14
    }
1852
286
    return (0);
1853
286
  }
1854
  /*
1855
   * If its a fragmented message, lets see if we can
1856
   * find the control on the reassembly queues.
1857
   */
1858
6.20k
  if ((chk_type == SCTP_IDATA) &&
1859
2.09k
      ((chk_flags & SCTP_DATA_FIRST_FRAG) == 0) &&
1860
1.04k
      (fsn == 0)) {
1861
    /*
1862
     *  The first *must* be fsn 0, and other
1863
     *  (middle/end) pieces can *not* be fsn 0.
1864
     * XXX: This can happen in case of a wrap around.
1865
     *      Ignore is for now.
1866
     */
1867
1
    SCTP_SNPRINTF(msg, sizeof(msg), "FSN zero for MID=%8.8x, but flags=%2.2x", mid, chk_flags);
1868
1
    goto err_out;
1869
1
  }
1870
6.20k
  control = sctp_find_reasm_entry(&asoc->strmin[sid], mid, ordered, asoc->idata_supported);
1871
6.20k
  SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags:0x%x look for control on queues %p\n",
1872
6.20k
    chk_flags, control);
1873
6.20k
  if ((chk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) {
1874
    /* See if we can find the re-assembly entity */
1875
4.70k
    if (control != NULL) {
1876
      /* We found something, does it belong? */
1877
2.05k
      if (ordered && (mid != control->mid)) {
1878
0
        SCTP_SNPRINTF(msg, sizeof(msg), "Reassembly problem (MID=%8.8x)", mid);
1879
6
      err_out:
1880
6
        op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1881
6
        stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_17;
1882
6
        sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, false, SCTP_SO_NOT_LOCKED);
1883
6
        *abort_flag = 1;
1884
6
        return (0);
1885
0
      }
1886
2.05k
      if (ordered && ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED)) {
1887
        /* We can't have a switched order with an unordered chunk */
1888
0
        SCTP_SNPRINTF(msg, sizeof(msg),
1889
0
                      "All fragments of a user message must be ordered or unordered (TSN=%8.8x)",
1890
0
                      tsn);
1891
0
        goto err_out;
1892
0
      }
1893
2.05k
      if (!ordered && (((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) == 0)) {
1894
        /* We can't have a switched unordered with a ordered chunk */
1895
0
        SCTP_SNPRINTF(msg, sizeof(msg),
1896
0
                     "All fragments of a user message must be ordered or unordered (TSN=%8.8x)",
1897
0
                     tsn);
1898
0
        goto err_out;
1899
0
      }
1900
2.05k
    }
1901
4.70k
  } else {
1902
    /* Its a complete segment. Lets validate we
1903
     * don't have a re-assembly going on with
1904
     * the same Stream/Seq (for ordered) or in
1905
     * the same Stream for unordered.
1906
     */
1907
1.50k
    if (control != NULL) {
1908
105
      if (ordered || asoc->idata_supported) {
1909
3
        SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags: 0x%x dup detected on MID: %u\n",
1910
3
          chk_flags, mid);
1911
3
        SCTP_SNPRINTF(msg, sizeof(msg), "Duplicate MID=%8.8x detected.", mid);
1912
3
        goto err_out;
1913
102
      } else {
1914
102
        if ((control->first_frag_seen) &&
1915
76
            (tsn == control->fsn_included + 1) &&
1916
3
            (control->end_added == 0)) {
1917
2
          SCTP_SNPRINTF(msg, sizeof(msg),
1918
2
                        "Illegal message sequence, missing end for MID: %8.8x",
1919
2
                        control->fsn_included);
1920
2
          goto err_out;
1921
100
        } else {
1922
100
          control = NULL;
1923
100
        }
1924
102
      }
1925
105
    }
1926
1.50k
  }
1927
  /* now do the tests */
1928
6.20k
  if (((asoc->cnt_on_all_streams +
1929
6.20k
        asoc->cnt_on_reasm_queue +
1930
6.20k
        asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) ||
1931
6.20k
      (((int)asoc->my_rwnd) <= 0)) {
1932
    /*
1933
     * When we have NO room in the rwnd we check to make sure
1934
     * the reader is doing its job...
1935
     */
1936
90
    if (SCTP_SBAVAIL(&stcb->sctp_socket->so_rcv) > 0) {
1937
      /* some to read, wake-up */
1938
#if defined(__APPLE__) && !defined(__Userspace__)
1939
      struct socket *so;
1940
1941
      so = SCTP_INP_SO(stcb->sctp_ep);
1942
      atomic_add_int(&stcb->asoc.refcnt, 1);
1943
      SCTP_TCB_UNLOCK(stcb);
1944
      SCTP_SOCKET_LOCK(so, 1);
1945
      SCTP_TCB_LOCK(stcb);
1946
      atomic_subtract_int(&stcb->asoc.refcnt, 1);
1947
      if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
1948
        /* assoc was freed while we were unlocked */
1949
        SCTP_SOCKET_UNLOCK(so, 1);
1950
        return (0);
1951
      }
1952
#endif
1953
90
      sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
1954
#if defined(__APPLE__) && !defined(__Userspace__)
1955
      SCTP_SOCKET_UNLOCK(so, 1);
1956
#endif
1957
90
    }
1958
    /* now is it in the mapping array of what we have accepted? */
1959
90
    if (chk_type == SCTP_DATA) {
1960
89
      if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map) &&
1961
44
          SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1962
        /* Nope not in the valid range dump it */
1963
12
      dump_packet:
1964
12
        sctp_set_rwnd(stcb, asoc);
1965
12
        if ((asoc->cnt_on_all_streams +
1966
12
             asoc->cnt_on_reasm_queue +
1967
12
             asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) {
1968
0
          SCTP_STAT_INCR(sctps_datadropchklmt);
1969
12
        } else {
1970
12
          SCTP_STAT_INCR(sctps_datadroprwnd);
1971
12
        }
1972
12
        *break_flag = 1;
1973
12
        return (0);
1974
11
      }
1975
89
    } else {
1976
1
      if (control == NULL) {
1977
1
        goto dump_packet;
1978
1
      }
1979
0
      if (SCTP_TSN_GT(fsn, control->top_fsn)) {
1980
0
        goto dump_packet;
1981
0
      }
1982
0
    }
1983
90
  }
1984
#ifdef SCTP_ASOCLOG_OF_TSNS
1985
  SCTP_TCB_LOCK_ASSERT(stcb);
1986
  if (asoc->tsn_in_at >= SCTP_TSN_LOG_SIZE) {
1987
    asoc->tsn_in_at = 0;
1988
    asoc->tsn_in_wrapped = 1;
1989
  }
1990
  asoc->in_tsnlog[asoc->tsn_in_at].tsn = tsn;
1991
  asoc->in_tsnlog[asoc->tsn_in_at].strm = sid;
1992
  asoc->in_tsnlog[asoc->tsn_in_at].seq = mid;
1993
  asoc->in_tsnlog[asoc->tsn_in_at].sz = chk_length;
1994
  asoc->in_tsnlog[asoc->tsn_in_at].flgs = chunk_flags;
1995
  asoc->in_tsnlog[asoc->tsn_in_at].stcb = (void *)stcb;
1996
  asoc->in_tsnlog[asoc->tsn_in_at].in_pos = asoc->tsn_in_at;
1997
  asoc->in_tsnlog[asoc->tsn_in_at].in_out = 1;
1998
  asoc->tsn_in_at++;
1999
#endif
2000
  /*
2001
   * Before we continue lets validate that we are not being fooled by
2002
   * an evil attacker. We can only have Nk chunks based on our TSN
2003
   * spread allowed by the mapping array N * 8 bits, so there is no
2004
   * way our stream sequence numbers could have wrapped. We of course
2005
   * only validate the FIRST fragment so the bit must be set.
2006
   */
2007
6.19k
  if ((chk_flags & SCTP_DATA_FIRST_FRAG) &&
2008
3.61k
      (TAILQ_EMPTY(&asoc->resetHead)) &&
2009
2.84k
      (chk_flags & SCTP_DATA_UNORDERED) == 0 &&
2010
1.02k
      SCTP_MID_GE(asoc->idata_supported, asoc->strmin[sid].last_mid_delivered, mid)) {
2011
    /* The incoming sseq is behind where we last delivered? */
2012
58
    SCTPDBG(SCTP_DEBUG_INDATA1, "EVIL/Broken-Dup S-SEQ: %u delivered: %u from peer, Abort!\n",
2013
58
      mid, asoc->strmin[sid].last_mid_delivered);
2014
2015
58
    if (asoc->idata_supported) {
2016
36
      SCTP_SNPRINTF(msg, sizeof(msg), "Delivered MID=%8.8x, got TSN=%8.8x, SID=%4.4x, MID=%8.8x",
2017
36
                    asoc->strmin[sid].last_mid_delivered,
2018
36
                    tsn,
2019
36
                    sid,
2020
36
                    mid);
2021
36
    } else {
2022
22
      SCTP_SNPRINTF(msg, sizeof(msg), "Delivered SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
2023
22
                    (uint16_t)asoc->strmin[sid].last_mid_delivered,
2024
22
                    tsn,
2025
22
                    sid,
2026
22
                    (uint16_t)mid);
2027
22
    }
2028
58
    op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2029
58
    stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_18;
2030
58
    sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, false, SCTP_SO_NOT_LOCKED);
2031
58
    *abort_flag = 1;
2032
58
    return (0);
2033
58
  }
2034
6.13k
  if (chk_type == SCTP_IDATA) {
2035
2.05k
    the_len = (chk_length - sizeof(struct sctp_idata_chunk));
2036
4.08k
  } else {
2037
4.08k
    the_len = (chk_length - sizeof(struct sctp_data_chunk));
2038
4.08k
  }
2039
6.13k
  if (last_chunk == 0) {
2040
5.97k
    if (chk_type == SCTP_IDATA) {
2041
2.02k
      dmbuf = SCTP_M_COPYM(*m,
2042
2.02k
               (offset + sizeof(struct sctp_idata_chunk)),
2043
2.02k
               the_len, M_NOWAIT);
2044
3.95k
    } else {
2045
3.95k
      dmbuf = SCTP_M_COPYM(*m,
2046
3.95k
               (offset + sizeof(struct sctp_data_chunk)),
2047
3.95k
               the_len, M_NOWAIT);
2048
3.95k
    }
2049
#ifdef SCTP_MBUF_LOGGING
2050
    if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
2051
      sctp_log_mbc(dmbuf, SCTP_MBUF_ICOPY);
2052
    }
2053
#endif
2054
5.97k
  } else {
2055
    /* We can steal the last chunk */
2056
158
    int l_len;
2057
158
    dmbuf = *m;
2058
    /* lop off the top part */
2059
158
    if (chk_type == SCTP_IDATA) {
2060
32
      m_adj(dmbuf, (offset + sizeof(struct sctp_idata_chunk)));
2061
126
    } else {
2062
126
      m_adj(dmbuf, (offset + sizeof(struct sctp_data_chunk)));
2063
126
    }
2064
158
    if (SCTP_BUF_NEXT(dmbuf) == NULL) {
2065
57
      l_len = SCTP_BUF_LEN(dmbuf);
2066
101
    } else {
2067
      /* need to count up the size hopefully
2068
       * does not hit this to often :-0
2069
       */
2070
101
      struct mbuf *lat;
2071
2072
101
      l_len = 0;
2073
1.04k
      for (lat = dmbuf; lat; lat = SCTP_BUF_NEXT(lat)) {
2074
943
        l_len += SCTP_BUF_LEN(lat);
2075
943
      }
2076
101
    }
2077
158
    if (l_len > the_len) {
2078
      /* Trim the end round bytes off  too */
2079
121
      m_adj(dmbuf, -(l_len - the_len));
2080
121
    }
2081
158
  }
2082
6.13k
  if (dmbuf == NULL) {
2083
0
    SCTP_STAT_INCR(sctps_nomem);
2084
0
    return (0);
2085
0
  }
2086
  /*
2087
   * Now no matter what, we need a control, get one
2088
   * if we don't have one (we may have gotten it
2089
   * above when we found the message was fragmented
2090
   */
2091
6.13k
  if (control == NULL) {
2092
4.08k
    sctp_alloc_a_readq(stcb, control);
2093
4.08k
    sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn,
2094
4.08k
             ppid,
2095
4.08k
             sid,
2096
4.08k
             chk_flags,
2097
4.08k
             NULL, fsn, mid);
2098
4.08k
    if (control == NULL) {
2099
0
      SCTP_STAT_INCR(sctps_nomem);
2100
0
      return (0);
2101
0
    }
2102
4.08k
    if ((chk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
2103
1.46k
      struct mbuf *mm;
2104
2105
1.46k
      control->data = dmbuf;
2106
1.46k
      control->tail_mbuf = NULL;
2107
3.37k
      for (mm = control->data; mm; mm = mm->m_next) {
2108
1.91k
        control->length += SCTP_BUF_LEN(mm);
2109
1.91k
        if (SCTP_BUF_NEXT(mm) == NULL) {
2110
1.46k
          control->tail_mbuf = mm;
2111
1.46k
        }
2112
1.91k
      }
2113
1.46k
      control->end_added = 1;
2114
1.46k
      control->last_frag_seen = 1;
2115
1.46k
      control->first_frag_seen = 1;
2116
1.46k
      control->fsn_included = fsn;
2117
1.46k
      control->top_fsn = fsn;
2118
1.46k
    }
2119
4.08k
    created_control = 1;
2120
4.08k
  }
2121
6.13k
  SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags: 0x%x ordered: %d MID: %u control: %p\n",
2122
6.13k
    chk_flags, ordered, mid, control);
2123
6.13k
  if ((chk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG &&
2124
6.13k
      TAILQ_EMPTY(&asoc->resetHead) &&
2125
770
      ((ordered == 0) ||
2126
482
       (SCTP_MID_EQ(asoc->idata_supported, asoc->strmin[sid].last_mid_delivered + 1, mid) &&
2127
482
        TAILQ_EMPTY(&asoc->strmin[sid].inqueue)))) {
2128
    /* Candidate for express delivery */
2129
    /*
2130
     * Its not fragmented, No PD-API is up, Nothing in the
2131
     * delivery queue, Its un-ordered OR ordered and the next to
2132
     * deliver AND nothing else is stuck on the stream queue,
2133
     * And there is room for it in the socket buffer. Lets just
2134
     * stuff it up the buffer....
2135
     */
2136
318
    SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
2137
318
    if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
2138
158
      asoc->highest_tsn_inside_nr_map = tsn;
2139
158
    }
2140
318
    SCTPDBG(SCTP_DEBUG_XXX, "Injecting control: %p to be read (MID: %u)\n",
2141
318
      control, mid);
2142
2143
318
    sctp_add_to_readq(stcb->sctp_ep, stcb,
2144
318
                      control, &stcb->sctp_socket->so_rcv,
2145
318
                      1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
2146
2147
318
    if ((chk_flags & SCTP_DATA_UNORDERED) == 0) {
2148
      /* for ordered, bump what we delivered */
2149
30
      asoc->strmin[sid].last_mid_delivered++;
2150
30
    }
2151
318
    SCTP_STAT_INCR(sctps_recvexpress);
2152
318
    if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
2153
0
      sctp_log_strm_del_alt(stcb, tsn, mid, sid,
2154
0
                SCTP_STR_LOG_FROM_EXPRS_DEL);
2155
0
    }
2156
318
    control = NULL;
2157
318
    goto finish_express_del;
2158
318
  }
2159
2160
  /* Now will we need a chunk too? */
2161
5.81k
  if ((chk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) {
2162
4.67k
    sctp_alloc_a_chunk(stcb, chk);
2163
4.67k
    if (chk == NULL) {
2164
      /* No memory so we drop the chunk */
2165
0
      SCTP_STAT_INCR(sctps_nomem);
2166
0
      if (last_chunk == 0) {
2167
        /* we copied it, free the copy */
2168
0
        sctp_m_freem(dmbuf);
2169
0
      }
2170
0
      return (0);
2171
0
    }
2172
4.67k
    chk->rec.data.tsn = tsn;
2173
4.67k
    chk->no_fr_allowed = 0;
2174
4.67k
    chk->rec.data.fsn = fsn;
2175
4.67k
    chk->rec.data.mid = mid;
2176
4.67k
    chk->rec.data.sid = sid;
2177
4.67k
    chk->rec.data.ppid = ppid;
2178
4.67k
    chk->rec.data.context = stcb->asoc.context;
2179
4.67k
    chk->rec.data.doing_fast_retransmit = 0;
2180
4.67k
    chk->rec.data.rcv_flags = chk_flags;
2181
4.67k
    chk->asoc = asoc;
2182
4.67k
    chk->send_size = the_len;
2183
4.67k
    chk->whoTo = net;
2184
4.67k
    SCTPDBG(SCTP_DEBUG_XXX, "Building ck: %p for control: %p to be read (MID: %u)\n",
2185
4.67k
      chk,
2186
4.67k
      control, mid);
2187
4.67k
    atomic_add_int(&net->ref_count, 1);
2188
4.67k
    chk->data = dmbuf;
2189
4.67k
  }
2190
  /* Set the appropriate TSN mark */
2191
5.81k
  if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
2192
0
    SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
2193
0
    if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
2194
0
      asoc->highest_tsn_inside_nr_map = tsn;
2195
0
    }
2196
5.81k
  } else {
2197
5.81k
    SCTP_SET_TSN_PRESENT(asoc->mapping_array, gap);
2198
5.81k
    if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map)) {
2199
3.28k
      asoc->highest_tsn_inside_map = tsn;
2200
3.28k
    }
2201
5.81k
  }
2202
  /* Now is it complete (i.e. not fragmented)? */
2203
5.81k
  if ((chk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
2204
    /*
2205
     * Special check for when streams are resetting. We
2206
     * could be more smart about this and check the
2207
     * actual stream to see if it is not being reset..
2208
     * that way we would not create a HOLB when amongst
2209
     * streams being reset and those not being reset.
2210
     *
2211
     */
2212
1.14k
    if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
2213
692
        SCTP_TSN_GT(tsn, liste->tsn)) {
2214
      /*
2215
       * yep its past where we need to reset... go
2216
       * ahead and queue it.
2217
       */
2218
352
      if (TAILQ_EMPTY(&asoc->pending_reply_queue)) {
2219
        /* first one on */
2220
125
        TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
2221
227
      } else {
2222
227
        struct sctp_queued_to_read *lcontrol, *nlcontrol;
2223
227
        unsigned char inserted = 0;
2224
713
        TAILQ_FOREACH_SAFE(lcontrol, &asoc->pending_reply_queue, next, nlcontrol) {
2225
713
          if (SCTP_TSN_GT(control->sinfo_tsn, lcontrol->sinfo_tsn)) {
2226
576
            continue;
2227
576
          } else {
2228
            /* found it */
2229
137
            TAILQ_INSERT_BEFORE(lcontrol, control, next);
2230
137
            inserted = 1;
2231
137
            break;
2232
137
          }
2233
713
        }
2234
227
        if (inserted == 0) {
2235
          /*
2236
           * must be put at end, use
2237
           * prevP (all setup from
2238
           * loop) to setup nextP.
2239
           */
2240
90
          TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
2241
90
        }
2242
227
      }
2243
352
      goto finish_express_del;
2244
352
    }
2245
792
    if (chk_flags & SCTP_DATA_UNORDERED) {
2246
      /* queue directly into socket buffer */
2247
76
      SCTPDBG(SCTP_DEBUG_XXX, "Unordered data to be read control: %p MID: %u\n",
2248
76
        control, mid);
2249
76
      sctp_mark_non_revokable(asoc, control->sinfo_tsn);
2250
76
      sctp_add_to_readq(stcb->sctp_ep, stcb,
2251
76
                        control,
2252
76
                        &stcb->sctp_socket->so_rcv, 1,
2253
76
                        SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
2254
2255
716
    } else {
2256
716
      SCTPDBG(SCTP_DEBUG_XXX, "Queue control: %p for reordering MID: %u\n", control,
2257
716
        mid);
2258
716
      sctp_queue_data_to_stream(stcb, asoc, control, abort_flag, &need_reasm_check);
2259
716
      if (*abort_flag) {
2260
48
        if (last_chunk) {
2261
6
          *m = NULL;
2262
6
        }
2263
48
        return (0);
2264
48
      }
2265
716
    }
2266
744
    goto finish_express_del;
2267
792
  }
2268
  /* If we reach here its a reassembly */
2269
4.67k
  need_reasm_check = 1;
2270
4.67k
  SCTPDBG(SCTP_DEBUG_XXX,
2271
4.67k
    "Queue data to stream for reasm control: %p MID: %u\n",
2272
4.67k
    control, mid);
2273
4.67k
  sctp_queue_data_for_reasm(stcb, asoc, control, chk, created_control, abort_flag, tsn);
2274
4.67k
  if (*abort_flag) {
2275
    /*
2276
     * the assoc is now gone and chk was put onto the
2277
     * reasm queue, which has all been freed.
2278
     */
2279
261
    if (last_chunk) {
2280
6
      *m = NULL;
2281
6
    }
2282
261
    return (0);
2283
261
  }
2284
5.82k
finish_express_del:
2285
  /* Here we tidy up things */
2286
5.82k
  if (tsn == (asoc->cumulative_tsn + 1)) {
2287
    /* Update cum-ack */
2288
124
    asoc->cumulative_tsn = tsn;
2289
124
  }
2290
5.82k
  if (last_chunk) {
2291
146
    *m = NULL;
2292
146
  }
2293
5.82k
  if (ordered) {
2294
2.67k
    SCTP_STAT_INCR_COUNTER64(sctps_inorderchunks);
2295
3.15k
  } else {
2296
3.15k
    SCTP_STAT_INCR_COUNTER64(sctps_inunorderchunks);
2297
3.15k
  }
2298
5.82k
  SCTP_STAT_INCR(sctps_recvdata);
2299
  /* Set it present please */
2300
5.82k
  if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
2301
0
    sctp_log_strm_del_alt(stcb, tsn, mid, sid, SCTP_STR_LOG_FROM_MARK_TSN);
2302
0
  }
2303
5.82k
  if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2304
0
    sctp_log_map(asoc->mapping_array_base_tsn, asoc->cumulative_tsn,
2305
0
           asoc->highest_tsn_inside_map, SCTP_MAP_PREPARE_SLIDE);
2306
0
  }
2307
5.82k
  if (need_reasm_check) {
2308
4.41k
    (void)sctp_deliver_reasm_check(stcb, asoc, &asoc->strmin[sid], SCTP_READ_LOCK_NOT_HELD);
2309
4.41k
    need_reasm_check = 0;
2310
4.41k
  }
2311
  /* check the special flag for stream resets */
2312
5.82k
  if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
2313
783
      SCTP_TSN_GE(asoc->cumulative_tsn, liste->tsn)) {
2314
    /*
2315
     * we have finished working through the backlogged TSN's now
2316
     * time to reset streams. 1: call reset function. 2: free
2317
     * pending_reply space 3: distribute any chunks in
2318
     * pending_reply_queue.
2319
     */
2320
145
    sctp_reset_in_stream(stcb, liste->number_entries, liste->list_of_streams);
2321
145
    TAILQ_REMOVE(&asoc->resetHead, liste, next_resp);
2322
145
    sctp_send_deferred_reset_response(stcb, liste, SCTP_STREAM_RESET_RESULT_PERFORMED);
2323
145
    SCTP_FREE(liste, SCTP_M_STRESET);
2324
    /*sa_ignore FREED_MEMORY*/
2325
145
    liste = TAILQ_FIRST(&asoc->resetHead);
2326
145
    if (TAILQ_EMPTY(&asoc->resetHead)) {
2327
      /* All can be removed */
2328
80
      TAILQ_FOREACH_SAFE(control, &asoc->pending_reply_queue, next, ncontrol) {
2329
80
        TAILQ_REMOVE(&asoc->pending_reply_queue, control, next);
2330
80
        strm = &asoc->strmin[control->sinfo_stream];
2331
80
        sctp_queue_data_to_stream(stcb, asoc, control, abort_flag, &need_reasm_check);
2332
80
        if (*abort_flag) {
2333
14
          return (0);
2334
14
        }
2335
66
        if (need_reasm_check) {
2336
1
          (void)sctp_deliver_reasm_check(stcb, asoc, strm, SCTP_READ_LOCK_NOT_HELD);
2337
1
          need_reasm_check = 0;
2338
1
        }
2339
66
      }
2340
87
    } else {
2341
128
      TAILQ_FOREACH_SAFE(control, &asoc->pending_reply_queue, next, ncontrol) {
2342
128
        if (SCTP_TSN_GT(control->sinfo_tsn, liste->tsn)) {
2343
38
          break;
2344
38
        }
2345
        /*
2346
         * if control->sinfo_tsn is <= liste->tsn we can
2347
         * process it which is the NOT of
2348
         * control->sinfo_tsn > liste->tsn
2349
         */
2350
128
        TAILQ_REMOVE(&asoc->pending_reply_queue, control, next);
2351
90
        strm = &asoc->strmin[control->sinfo_stream];
2352
90
        sctp_queue_data_to_stream(stcb, asoc, control, abort_flag, &need_reasm_check);
2353
90
        if (*abort_flag) {
2354
19
          return (0);
2355
19
        }
2356
71
        if (need_reasm_check) {
2357
2
          (void)sctp_deliver_reasm_check(stcb, asoc, strm, SCTP_READ_LOCK_NOT_HELD);
2358
2
          need_reasm_check = 0;
2359
2
        }
2360
71
      }
2361
87
    }
2362
145
  }
2363
5.79k
  return (1);
2364
5.82k
}
2365
2366
static const int8_t sctp_map_lookup_tab[256] = {
2367
  0, 1, 0, 2, 0, 1, 0, 3,
2368
  0, 1, 0, 2, 0, 1, 0, 4,
2369
  0, 1, 0, 2, 0, 1, 0, 3,
2370
  0, 1, 0, 2, 0, 1, 0, 5,
2371
  0, 1, 0, 2, 0, 1, 0, 3,
2372
  0, 1, 0, 2, 0, 1, 0, 4,
2373
  0, 1, 0, 2, 0, 1, 0, 3,
2374
  0, 1, 0, 2, 0, 1, 0, 6,
2375
  0, 1, 0, 2, 0, 1, 0, 3,
2376
  0, 1, 0, 2, 0, 1, 0, 4,
2377
  0, 1, 0, 2, 0, 1, 0, 3,
2378
  0, 1, 0, 2, 0, 1, 0, 5,
2379
  0, 1, 0, 2, 0, 1, 0, 3,
2380
  0, 1, 0, 2, 0, 1, 0, 4,
2381
  0, 1, 0, 2, 0, 1, 0, 3,
2382
  0, 1, 0, 2, 0, 1, 0, 7,
2383
  0, 1, 0, 2, 0, 1, 0, 3,
2384
  0, 1, 0, 2, 0, 1, 0, 4,
2385
  0, 1, 0, 2, 0, 1, 0, 3,
2386
  0, 1, 0, 2, 0, 1, 0, 5,
2387
  0, 1, 0, 2, 0, 1, 0, 3,
2388
  0, 1, 0, 2, 0, 1, 0, 4,
2389
  0, 1, 0, 2, 0, 1, 0, 3,
2390
  0, 1, 0, 2, 0, 1, 0, 6,
2391
  0, 1, 0, 2, 0, 1, 0, 3,
2392
  0, 1, 0, 2, 0, 1, 0, 4,
2393
  0, 1, 0, 2, 0, 1, 0, 3,
2394
  0, 1, 0, 2, 0, 1, 0, 5,
2395
  0, 1, 0, 2, 0, 1, 0, 3,
2396
  0, 1, 0, 2, 0, 1, 0, 4,
2397
  0, 1, 0, 2, 0, 1, 0, 3,
2398
  0, 1, 0, 2, 0, 1, 0, 8
2399
};
2400
2401
void
2402
sctp_slide_mapping_arrays(struct sctp_tcb *stcb)
2403
16.7k
{
2404
  /*
2405
   * Now we also need to check the mapping array in a couple of ways.
2406
   * 1) Did we move the cum-ack point?
2407
   *
2408
   * When you first glance at this you might think
2409
   * that all entries that make up the position
2410
   * of the cum-ack would be in the nr-mapping array
2411
   * only.. i.e. things up to the cum-ack are always
2412
   * deliverable. Thats true with one exception, when
2413
   * its a fragmented message we may not deliver the data
2414
   * until some threshold (or all of it) is in place. So
2415
   * we must OR the nr_mapping_array and mapping_array to
2416
   * get a true picture of the cum-ack.
2417
   */
2418
16.7k
  struct sctp_association *asoc;
2419
16.7k
  int at;
2420
16.7k
  uint8_t val;
2421
16.7k
  int slide_from, slide_end, lgap, distance;
2422
16.7k
  uint32_t old_cumack, old_base, old_highest, highest_tsn;
2423
2424
16.7k
  asoc = &stcb->asoc;
2425
2426
16.7k
  old_cumack = asoc->cumulative_tsn;
2427
16.7k
  old_base = asoc->mapping_array_base_tsn;
2428
16.7k
  old_highest = asoc->highest_tsn_inside_map;
2429
  /*
2430
   * We could probably improve this a small bit by calculating the
2431
   * offset of the current cum-ack as the starting point.
2432
   */
2433
16.7k
  at = 0;
2434
18.0k
  for (slide_from = 0; slide_from < stcb->asoc.mapping_array_size; slide_from++) {
2435
18.0k
    val = asoc->nr_mapping_array[slide_from] | asoc->mapping_array[slide_from];
2436
18.0k
    if (val == 0xff) {
2437
1.29k
      at += 8;
2438
16.7k
    } else {
2439
      /* there is a 0 bit */
2440
16.7k
      at += sctp_map_lookup_tab[val];
2441
16.7k
      break;
2442
16.7k
    }
2443
18.0k
  }
2444
16.7k
  asoc->cumulative_tsn = asoc->mapping_array_base_tsn + (at-1);
2445
2446
16.7k
  if (SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_map) &&
2447
4.28k
            SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_nr_map)) {
2448
0
#ifdef INVARIANTS
2449
0
    panic("huh, cumack 0x%x greater than high-tsn 0x%x in map",
2450
0
          asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2451
#else
2452
    SCTP_PRINTF("huh, cumack 0x%x greater than high-tsn 0x%x in map - should panic?\n",
2453
          asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2454
    sctp_print_mapping_array(asoc);
2455
    if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2456
      sctp_log_map(0, 6, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
2457
    }
2458
    asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2459
    asoc->highest_tsn_inside_nr_map = asoc->cumulative_tsn;
2460
#endif
2461
0
  }
2462
16.7k
  if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2463
5.65k
    highest_tsn = asoc->highest_tsn_inside_nr_map;
2464
11.0k
  } else {
2465
11.0k
    highest_tsn = asoc->highest_tsn_inside_map;
2466
11.0k
  }
2467
16.7k
  if ((asoc->cumulative_tsn == highest_tsn) && (at >= 8)) {
2468
    /* The complete array was completed by a single FR */
2469
    /* highest becomes the cum-ack */
2470
214
    int clr;
2471
214
#ifdef INVARIANTS
2472
214
    unsigned int i;
2473
214
#endif
2474
2475
    /* clear the array */
2476
214
    clr = ((at+7) >> 3);
2477
214
    if (clr > asoc->mapping_array_size) {
2478
0
      clr = asoc->mapping_array_size;
2479
0
    }
2480
214
    memset(asoc->mapping_array, 0, clr);
2481
214
    memset(asoc->nr_mapping_array, 0, clr);
2482
214
#ifdef INVARIANTS
2483
3.63k
    for (i = 0; i < asoc->mapping_array_size; i++) {
2484
3.42k
      if ((asoc->mapping_array[i]) || (asoc->nr_mapping_array[i])) {
2485
0
        SCTP_PRINTF("Error Mapping array's not clean at clear\n");
2486
0
        sctp_print_mapping_array(asoc);
2487
0
      }
2488
3.42k
    }
2489
214
#endif
2490
214
    asoc->mapping_array_base_tsn = asoc->cumulative_tsn + 1;
2491
214
    asoc->highest_tsn_inside_nr_map = asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2492
16.5k
  } else if (at >= 8) {
2493
    /* we can slide the mapping array down */
2494
    /* slide_from holds where we hit the first NON 0xff byte */
2495
2496
    /*
2497
     * now calculate the ceiling of the move using our highest
2498
     * TSN value
2499
     */
2500
14
    SCTP_CALC_TSN_TO_GAP(lgap, highest_tsn, asoc->mapping_array_base_tsn);
2501
14
    slide_end = (lgap >> 3);
2502
14
    if (slide_end < slide_from) {
2503
0
      sctp_print_mapping_array(asoc);
2504
0
#ifdef INVARIANTS
2505
0
      panic("impossible slide");
2506
#else
2507
      SCTP_PRINTF("impossible slide lgap: %x slide_end: %x slide_from: %x? at: %d\n",
2508
                  lgap, slide_end, slide_from, at);
2509
      return;
2510
#endif
2511
0
    }
2512
14
    if (slide_end > asoc->mapping_array_size) {
2513
0
#ifdef INVARIANTS
2514
0
      panic("would overrun buffer");
2515
#else
2516
      SCTP_PRINTF("Gak, would have overrun map end: %d slide_end: %d\n",
2517
                  asoc->mapping_array_size, slide_end);
2518
      slide_end = asoc->mapping_array_size;
2519
#endif
2520
0
    }
2521
14
    distance = (slide_end - slide_from) + 1;
2522
14
    if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2523
0
      sctp_log_map(old_base, old_cumack, old_highest,
2524
0
             SCTP_MAP_PREPARE_SLIDE);
2525
0
      sctp_log_map((uint32_t) slide_from, (uint32_t) slide_end,
2526
0
             (uint32_t) lgap, SCTP_MAP_SLIDE_FROM);
2527
0
    }
2528
14
    if (distance + slide_from > asoc->mapping_array_size ||
2529
14
        distance < 0) {
2530
      /*
2531
       * Here we do NOT slide forward the array so that
2532
       * hopefully when more data comes in to fill it up
2533
       * we will be able to slide it forward. Really I
2534
       * don't think this should happen :-0
2535
       */
2536
0
      if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2537
0
        sctp_log_map((uint32_t) distance, (uint32_t) slide_from,
2538
0
               (uint32_t) asoc->mapping_array_size,
2539
0
               SCTP_MAP_SLIDE_NONE);
2540
0
      }
2541
14
    } else {
2542
14
      int ii;
2543
2544
1.06k
      for (ii = 0; ii < distance; ii++) {
2545
1.05k
        asoc->mapping_array[ii] = asoc->mapping_array[slide_from + ii];
2546
1.05k
        asoc->nr_mapping_array[ii] = asoc->nr_mapping_array[slide_from + ii];
2547
1.05k
      }
2548
339
      for (ii = distance; ii < asoc->mapping_array_size; ii++) {
2549
325
        asoc->mapping_array[ii] = 0;
2550
325
        asoc->nr_mapping_array[ii] = 0;
2551
325
      }
2552
14
      if (asoc->highest_tsn_inside_map + 1 == asoc->mapping_array_base_tsn) {
2553
9
        asoc->highest_tsn_inside_map += (slide_from << 3);
2554
9
      }
2555
14
      if (asoc->highest_tsn_inside_nr_map + 1 == asoc->mapping_array_base_tsn) {
2556
0
        asoc->highest_tsn_inside_nr_map += (slide_from << 3);
2557
0
      }
2558
14
      asoc->mapping_array_base_tsn += (slide_from << 3);
2559
14
      if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2560
0
        sctp_log_map(asoc->mapping_array_base_tsn,
2561
0
               asoc->cumulative_tsn, asoc->highest_tsn_inside_map,
2562
0
               SCTP_MAP_SLIDE_RESULT);
2563
0
      }
2564
14
    }
2565
14
  }
2566
16.7k
}
2567
2568
void
2569
sctp_sack_check(struct sctp_tcb *stcb, int was_a_gap)
2570
4.62k
{
2571
4.62k
  struct sctp_association *asoc;
2572
4.62k
  uint32_t highest_tsn;
2573
4.62k
  int is_a_gap;
2574
2575
4.62k
  sctp_slide_mapping_arrays(stcb);
2576
4.62k
  asoc = &stcb->asoc;
2577
4.62k
  if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2578
796
    highest_tsn = asoc->highest_tsn_inside_nr_map;
2579
3.83k
  } else {
2580
3.83k
    highest_tsn = asoc->highest_tsn_inside_map;
2581
3.83k
  }
2582
  /* Is there a gap now? */
2583
4.62k
  is_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
2584
2585
  /*
2586
   * Now we need to see if we need to queue a sack or just start the
2587
   * timer (if allowed).
2588
   */
2589
4.62k
  if (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_SENT) {
2590
    /*
2591
     * Ok special case, in SHUTDOWN-SENT case. here we
2592
     * maker sure SACK timer is off and instead send a
2593
     * SHUTDOWN and a SACK
2594
     */
2595
0
    if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2596
0
      sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
2597
0
                      stcb->sctp_ep, stcb, NULL,
2598
0
                      SCTP_FROM_SCTP_INDATA + SCTP_LOC_19);
2599
0
    }
2600
0
    sctp_send_shutdown(stcb,
2601
0
                       ((stcb->asoc.alternate) ? stcb->asoc.alternate : stcb->asoc.primary_destination));
2602
0
    if (is_a_gap) {
2603
0
      sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
2604
0
    }
2605
4.62k
  } else {
2606
    /*
2607
     * CMT DAC algorithm: increase number of packets
2608
     * received since last ack
2609
     */
2610
4.62k
    stcb->asoc.cmt_dac_pkts_rcvd++;
2611
2612
4.62k
    if ((stcb->asoc.send_sack == 1) ||      /* We need to send a SACK */
2613
211
        ((was_a_gap) && (is_a_gap == 0)) ||  /* was a gap, but no
2614
                                             * longer is one */
2615
211
        (stcb->asoc.numduptsns) ||          /* we have dup's */
2616
211
        (is_a_gap) ||                       /* is still a gap */
2617
92
        (stcb->asoc.delayed_ack == 0) ||    /* Delayed sack disabled */
2618
4.53k
        (stcb->asoc.data_pkts_seen >= stcb->asoc.sack_freq)) { /* hit limit of pkts */
2619
4.53k
      if ((stcb->asoc.sctp_cmt_on_off > 0) &&
2620
0
          (SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) &&
2621
0
          (stcb->asoc.send_sack == 0) &&
2622
0
          (stcb->asoc.numduptsns == 0) &&
2623
0
          (stcb->asoc.delayed_ack) &&
2624
0
          (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer))) {
2625
        /*
2626
         * CMT DAC algorithm: With CMT,
2627
         * delay acks even in the face of
2628
         * reordering. Therefore, if acks
2629
         * that do not have to be sent
2630
         * because of the above reasons,
2631
         * will be delayed. That is, acks
2632
         * that would have been sent due to
2633
         * gap reports will be delayed with
2634
         * DAC. Start the delayed ack timer.
2635
         */
2636
0
        sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2637
0
                         stcb->sctp_ep, stcb, NULL);
2638
4.53k
      } else {
2639
        /*
2640
         * Ok we must build a SACK since the
2641
         * timer is pending, we got our
2642
         * first packet OR there are gaps or
2643
         * duplicates.
2644
         */
2645
4.53k
        sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL,
2646
4.53k
                        SCTP_FROM_SCTP_INDATA + SCTP_LOC_20);
2647
4.53k
        sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
2648
4.53k
      }
2649
4.53k
    } else {
2650
92
      if (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2651
92
        sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2652
92
                         stcb->sctp_ep, stcb, NULL);
2653
92
      }
2654
92
    }
2655
4.62k
  }
2656
4.62k
}
2657
2658
int
2659
sctp_process_data(struct mbuf **mm, int iphlen, int *offset, int length,
2660
                  struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2661
                  struct sctp_nets *net, uint32_t *high_tsn)
2662
4.67k
{
2663
4.67k
  struct sctp_chunkhdr *ch, chunk_buf;
2664
4.67k
  struct sctp_association *asoc;
2665
4.67k
  int num_chunks = 0; /* number of control chunks processed */
2666
4.67k
  int stop_proc = 0;
2667
4.67k
  int break_flag, last_chunk;
2668
4.67k
  int abort_flag = 0, was_a_gap;
2669
4.67k
  struct mbuf *m;
2670
4.67k
  uint32_t highest_tsn;
2671
4.67k
  uint16_t chk_length;
2672
2673
  /* set the rwnd */
2674
4.67k
  sctp_set_rwnd(stcb, &stcb->asoc);
2675
2676
4.67k
  m = *mm;
2677
4.67k
  SCTP_TCB_LOCK_ASSERT(stcb);
2678
4.67k
  asoc = &stcb->asoc;
2679
4.67k
  if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2680
60
    highest_tsn = asoc->highest_tsn_inside_nr_map;
2681
4.61k
  } else {
2682
4.61k
    highest_tsn = asoc->highest_tsn_inside_map;
2683
4.61k
  }
2684
4.67k
  was_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
2685
  /*
2686
   * setup where we got the last DATA packet from for any SACK that
2687
   * may need to go out. Don't bump the net. This is done ONLY when a
2688
   * chunk is assigned.
2689
   */
2690
4.67k
  asoc->last_data_chunk_from = net;
2691
2692
  /*-
2693
   * Now before we proceed we must figure out if this is a wasted
2694
   * cluster... i.e. it is a small packet sent in and yet the driver
2695
   * underneath allocated a full cluster for it. If so we must copy it
2696
   * to a smaller mbuf and free up the cluster mbuf. This will help
2697
   * with cluster starvation.
2698
   */
2699
4.67k
  if (SCTP_BUF_LEN(m) < (long)MLEN && SCTP_BUF_NEXT(m) == NULL) {
2700
    /* we only handle mbufs that are singletons.. not chains */
2701
2.04k
    m = sctp_get_mbuf_for_msg(SCTP_BUF_LEN(m), 0, M_NOWAIT, 1, MT_DATA);
2702
2.04k
    if (m) {
2703
      /* ok lets see if we can copy the data up */
2704
2.04k
      caddr_t *from, *to;
2705
      /* get the pointers and copy */
2706
2.04k
      to = mtod(m, caddr_t *);
2707
2.04k
      from = mtod((*mm), caddr_t *);
2708
2.04k
      memcpy(to, from, SCTP_BUF_LEN((*mm)));
2709
      /* copy the length and free up the old */
2710
2.04k
      SCTP_BUF_LEN(m) = SCTP_BUF_LEN((*mm));
2711
2.04k
      sctp_m_freem(*mm);
2712
      /* success, back copy */
2713
2.04k
      *mm = m;
2714
2.04k
    } else {
2715
      /* We are in trouble in the mbuf world .. yikes */
2716
0
      m = *mm;
2717
0
    }
2718
2.04k
  }
2719
  /* get pointer to the first chunk header */
2720
4.67k
  ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
2721
4.67k
                                             sizeof(struct sctp_chunkhdr),
2722
4.67k
                                             (uint8_t *)&chunk_buf);
2723
4.67k
  if (ch == NULL) {
2724
0
    return (1);
2725
0
  }
2726
  /*
2727
   * process all DATA chunks...
2728
   */
2729
4.67k
  *high_tsn = asoc->cumulative_tsn;
2730
4.67k
  break_flag = 0;
2731
4.67k
  asoc->data_pkts_seen++;
2732
19.2k
  while (stop_proc == 0) {
2733
    /* validate chunk length */
2734
15.4k
    chk_length = ntohs(ch->chunk_length);
2735
15.4k
    if (length - *offset < chk_length) {
2736
      /* all done, mutulated chunk */
2737
651
      stop_proc = 1;
2738
651
      continue;
2739
651
    }
2740
14.7k
    if ((asoc->idata_supported == 1) &&
2741
6.46k
        (ch->chunk_type == SCTP_DATA)) {
2742
226
      struct mbuf *op_err;
2743
226
      char msg[SCTP_DIAG_INFO_LEN];
2744
2745
226
      SCTP_SNPRINTF(msg, sizeof(msg), "%s", "DATA chunk received when I-DATA was negotiated");
2746
226
      op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2747
226
      stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_21;
2748
226
      sctp_abort_an_association(inp, stcb, op_err, false, SCTP_SO_NOT_LOCKED);
2749
226
      return (2);
2750
226
    }
2751
14.5k
    if ((asoc->idata_supported == 0) &&
2752
8.31k
        (ch->chunk_type == SCTP_IDATA)) {
2753
3
      struct mbuf *op_err;
2754
3
      char msg[SCTP_DIAG_INFO_LEN];
2755
2756
3
      SCTP_SNPRINTF(msg, sizeof(msg), "%s", "I-DATA chunk received when DATA was negotiated");
2757
3
      op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2758
3
      stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_22;
2759
3
      sctp_abort_an_association(inp, stcb, op_err, false, SCTP_SO_NOT_LOCKED);
2760
3
      return (2);
2761
3
    }
2762
14.5k
    if ((ch->chunk_type == SCTP_DATA) ||
2763
11.8k
        (ch->chunk_type == SCTP_IDATA)) {
2764
11.8k
      uint16_t clen;
2765
2766
11.8k
      if (ch->chunk_type == SCTP_DATA) {
2767
7.86k
        clen = sizeof(struct sctp_data_chunk);
2768
7.86k
      } else {
2769
3.99k
        clen = sizeof(struct sctp_idata_chunk);
2770
3.99k
      }
2771
11.8k
      if (chk_length < clen) {
2772
        /*
2773
         * Need to send an abort since we had a
2774
         * invalid data chunk.
2775
         */
2776
111
        struct mbuf *op_err;
2777
111
        char msg[SCTP_DIAG_INFO_LEN];
2778
2779
111
        SCTP_SNPRINTF(msg, sizeof(msg), "%s chunk of length %u",
2780
111
                      ch->chunk_type == SCTP_DATA ? "DATA" : "I-DATA",
2781
111
                      chk_length);
2782
111
        op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2783
111
        stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_23;
2784
111
        sctp_abort_an_association(inp, stcb, op_err, false, SCTP_SO_NOT_LOCKED);
2785
111
        return (2);
2786
111
      }
2787
#ifdef SCTP_AUDITING_ENABLED
2788
      sctp_audit_log(0xB1, 0);
2789
#endif
2790
11.7k
      if (SCTP_SIZE32(chk_length) == (length - *offset)) {
2791
1.87k
        last_chunk = 1;
2792
9.87k
      } else {
2793
9.87k
        last_chunk = 0;
2794
9.87k
      }
2795
11.7k
      if (sctp_process_a_data_chunk(stcb, asoc, mm, *offset,
2796
11.7k
                  chk_length, net, high_tsn, &abort_flag, &break_flag,
2797
11.7k
                  last_chunk, ch->chunk_type)) {
2798
5.79k
        num_chunks++;
2799
5.79k
      }
2800
11.7k
      if (abort_flag)
2801
413
        return (2);
2802
2803
11.3k
      if (break_flag) {
2804
        /*
2805
         * Set because of out of rwnd space and no
2806
         * drop rep space left.
2807
         */
2808
12
        stop_proc = 1;
2809
12
        continue;
2810
12
      }
2811
11.3k
    } else {
2812
      /* not a data chunk in the data region */
2813
2.68k
      switch (ch->chunk_type) {
2814
3
      case SCTP_INITIATION:
2815
5
      case SCTP_INITIATION_ACK:
2816
9
      case SCTP_SELECTIVE_ACK:
2817
10
      case SCTP_NR_SELECTIVE_ACK:
2818
19
      case SCTP_HEARTBEAT_REQUEST:
2819
20
      case SCTP_HEARTBEAT_ACK:
2820
21
      case SCTP_ABORT_ASSOCIATION:
2821
23
      case SCTP_SHUTDOWN:
2822
25
      case SCTP_SHUTDOWN_ACK:
2823
37
      case SCTP_OPERATION_ERROR:
2824
38
      case SCTP_COOKIE_ECHO:
2825
40
      case SCTP_COOKIE_ACK:
2826
42
      case SCTP_ECN_ECHO:
2827
43
      case SCTP_ECN_CWR:
2828
45
      case SCTP_SHUTDOWN_COMPLETE:
2829
46
      case SCTP_AUTHENTICATION:
2830
49
      case SCTP_ASCONF_ACK:
2831
50
      case SCTP_PACKET_DROPPED:
2832
52
      case SCTP_STREAM_RESET:
2833
57
      case SCTP_FORWARD_CUM_TSN:
2834
60
      case SCTP_ASCONF:
2835
60
      {
2836
        /*
2837
         * Now, what do we do with KNOWN chunks that
2838
         * are NOT in the right place?
2839
         *
2840
         * For now, I do nothing but ignore them. We
2841
         * may later want to add sysctl stuff to
2842
         * switch out and do either an ABORT() or
2843
         * possibly process them.
2844
         */
2845
60
        struct mbuf *op_err;
2846
60
        char msg[SCTP_DIAG_INFO_LEN];
2847
2848
60
        SCTP_SNPRINTF(msg, sizeof(msg), "DATA chunk followed by chunk of type %2.2x",
2849
60
                      ch->chunk_type);
2850
60
        op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2851
60
        sctp_abort_an_association(inp, stcb, op_err, false, SCTP_SO_NOT_LOCKED);
2852
60
        return (2);
2853
57
      }
2854
2.62k
      default:
2855
        /*
2856
         * Unknown chunk type: use bit rules after
2857
         * checking length
2858
         */
2859
2.62k
        if (chk_length < sizeof(struct sctp_chunkhdr)) {
2860
          /*
2861
           * Need to send an abort since we had a
2862
           * invalid chunk.
2863
           */
2864
19
          struct mbuf *op_err;
2865
19
          char msg[SCTP_DIAG_INFO_LEN];
2866
2867
19
          SCTP_SNPRINTF(msg, sizeof(msg), "Chunk of length %u", chk_length);
2868
19
          op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2869
19
          stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_24;
2870
19
          sctp_abort_an_association(inp, stcb, op_err, false, SCTP_SO_NOT_LOCKED);
2871
19
          return (2);
2872
19
        }
2873
2.60k
        if (ch->chunk_type & 0x40) {
2874
          /* Add a error report to the queue */
2875
2.34k
          struct mbuf *op_err;
2876
2.34k
          struct sctp_gen_error_cause *cause;
2877
2878
2.34k
          op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_gen_error_cause),
2879
2.34k
                                         0, M_NOWAIT, 1, MT_DATA);
2880
2.34k
          if (op_err != NULL) {
2881
2.34k
            cause  = mtod(op_err, struct sctp_gen_error_cause *);
2882
2.34k
            cause->code = htons(SCTP_CAUSE_UNRECOG_CHUNK);
2883
2.34k
            cause->length = htons((uint16_t)(chk_length + sizeof(struct sctp_gen_error_cause)));
2884
2.34k
            SCTP_BUF_LEN(op_err) = sizeof(struct sctp_gen_error_cause);
2885
2.34k
            SCTP_BUF_NEXT(op_err) = SCTP_M_COPYM(m, *offset, chk_length, M_NOWAIT);
2886
2.34k
            if (SCTP_BUF_NEXT(op_err) != NULL) {
2887
2.34k
              sctp_queue_op_err(stcb, op_err);
2888
2.34k
            } else {
2889
0
              sctp_m_freem(op_err);
2890
0
            }
2891
2.34k
          }
2892
2.34k
        }
2893
2.60k
        if ((ch->chunk_type & 0x80) == 0) {
2894
          /* discard the rest of this packet */
2895
19
          stop_proc = 1;
2896
19
        }  /* else skip this bad chunk and
2897
           * continue... */
2898
2.60k
        break;
2899
2.68k
      } /* switch of chunk type */
2900
2.68k
    }
2901
13.9k
    *offset += SCTP_SIZE32(chk_length);
2902
13.9k
    if ((*offset >= length) || stop_proc) {
2903
      /* no more data left in the mbuf chain */
2904
3.05k
      stop_proc = 1;
2905
3.05k
      continue;
2906
3.05k
    }
2907
10.8k
    ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
2908
10.8k
                                               sizeof(struct sctp_chunkhdr),
2909
10.8k
                                               (uint8_t *)&chunk_buf);
2910
10.8k
    if (ch == NULL) {
2911
124
      *offset = length;
2912
124
      stop_proc = 1;
2913
124
      continue;
2914
124
    }
2915
10.8k
  }
2916
3.84k
  if (break_flag) {
2917
    /*
2918
     * we need to report rwnd overrun drops.
2919
     */
2920
12
    sctp_send_packet_dropped(stcb, net, *mm, length, iphlen, 0);
2921
12
  }
2922
3.84k
  if (num_chunks) {
2923
    /*
2924
     * Did we get data, if so update the time for auto-close and
2925
     * give peer credit for being alive.
2926
     */
2927
1.48k
    SCTP_STAT_INCR(sctps_recvpktwithdata);
2928
1.48k
    if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
2929
0
      sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
2930
0
               stcb->asoc.overall_error_count,
2931
0
               0,
2932
0
               SCTP_FROM_SCTP_INDATA,
2933
0
               __LINE__);
2934
0
    }
2935
1.48k
    stcb->asoc.overall_error_count = 0;
2936
1.48k
    (void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_last_rcvd);
2937
1.48k
  }
2938
  /* now service all of the reassm queue if needed */
2939
3.84k
  if (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_SENT) {
2940
    /* Assure that we ack right away */
2941
0
    stcb->asoc.send_sack = 1;
2942
0
  }
2943
  /* Start a sack timer or QUEUE a SACK for sending */
2944
3.84k
  sctp_sack_check(stcb, was_a_gap);
2945
3.84k
  return (0);
2946
4.67k
}
2947
2948
static int
2949
sctp_process_segment_range(struct sctp_tcb *stcb, struct sctp_tmit_chunk **p_tp1, uint32_t last_tsn,
2950
         uint16_t frag_strt, uint16_t frag_end, int nr_sacking,
2951
         int *num_frs,
2952
         uint32_t *biggest_newly_acked_tsn,
2953
         uint32_t  *this_sack_lowest_newack,
2954
         int *rto_ok)
2955
0
{
2956
0
  struct sctp_tmit_chunk *tp1;
2957
0
  unsigned int theTSN;
2958
0
  int j, wake_him = 0, circled = 0;
2959
2960
  /* Recover the tp1 we last saw */
2961
0
  tp1 = *p_tp1;
2962
0
  if (tp1 == NULL) {
2963
0
    tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
2964
0
  }
2965
0
  for (j = frag_strt; j <= frag_end; j++) {
2966
0
    theTSN = j + last_tsn;
2967
0
    while (tp1) {
2968
0
      if (tp1->rec.data.doing_fast_retransmit)
2969
0
        (*num_frs) += 1;
2970
2971
      /*-
2972
       * CMT: CUCv2 algorithm. For each TSN being
2973
       * processed from the sent queue, track the
2974
       * next expected pseudo-cumack, or
2975
       * rtx_pseudo_cumack, if required. Separate
2976
       * cumack trackers for first transmissions,
2977
       * and retransmissions.
2978
       */
2979
0
      if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
2980
0
          (tp1->whoTo->find_pseudo_cumack == 1) &&
2981
0
          (tp1->snd_count == 1)) {
2982
0
        tp1->whoTo->pseudo_cumack = tp1->rec.data.tsn;
2983
0
        tp1->whoTo->find_pseudo_cumack = 0;
2984
0
      }
2985
0
      if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
2986
0
          (tp1->whoTo->find_rtx_pseudo_cumack == 1) &&
2987
0
          (tp1->snd_count > 1)) {
2988
0
        tp1->whoTo->rtx_pseudo_cumack = tp1->rec.data.tsn;
2989
0
        tp1->whoTo->find_rtx_pseudo_cumack = 0;
2990
0
      }
2991
0
      if (tp1->rec.data.tsn == theTSN) {
2992
0
        if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
2993
          /*-
2994
           * must be held until
2995
           * cum-ack passes
2996
           */
2997
0
          if (tp1->sent < SCTP_DATAGRAM_RESEND) {
2998
            /*-
2999
             * If it is less than RESEND, it is
3000
             * now no-longer in flight.
3001
             * Higher values may already be set
3002
             * via previous Gap Ack Blocks...
3003
             * i.e. ACKED or RESEND.
3004
             */
3005
0
            if (SCTP_TSN_GT(tp1->rec.data.tsn,
3006
0
                            *biggest_newly_acked_tsn)) {
3007
0
              *biggest_newly_acked_tsn = tp1->rec.data.tsn;
3008
0
            }
3009
            /*-
3010
             * CMT: SFR algo (and HTNA) - set
3011
             * saw_newack to 1 for dest being
3012
             * newly acked. update
3013
             * this_sack_highest_newack if
3014
             * appropriate.
3015
             */
3016
0
            if (tp1->rec.data.chunk_was_revoked == 0)
3017
0
              tp1->whoTo->saw_newack = 1;
3018
3019
0
            if (SCTP_TSN_GT(tp1->rec.data.tsn,
3020
0
                            tp1->whoTo->this_sack_highest_newack)) {
3021
0
              tp1->whoTo->this_sack_highest_newack =
3022
0
                tp1->rec.data.tsn;
3023
0
            }
3024
            /*-
3025
             * CMT DAC algo: also update
3026
             * this_sack_lowest_newack
3027
             */
3028
0
            if (*this_sack_lowest_newack == 0) {
3029
0
              if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3030
0
                sctp_log_sack(*this_sack_lowest_newack,
3031
0
                        last_tsn,
3032
0
                        tp1->rec.data.tsn,
3033
0
                        0,
3034
0
                        0,
3035
0
                        SCTP_LOG_TSN_ACKED);
3036
0
              }
3037
0
              *this_sack_lowest_newack = tp1->rec.data.tsn;
3038
0
            }
3039
            /*-
3040
             * CMT: CUCv2 algorithm. If (rtx-)pseudo-cumack for corresp
3041
             * dest is being acked, then we have a new (rtx-)pseudo-cumack. Set
3042
             * new_(rtx_)pseudo_cumack to TRUE so that the cwnd for this dest can be
3043
             * updated. Also trigger search for the next expected (rtx-)pseudo-cumack.
3044
             * Separate pseudo_cumack trackers for first transmissions and
3045
             * retransmissions.
3046
             */
3047
0
            if (tp1->rec.data.tsn == tp1->whoTo->pseudo_cumack) {
3048
0
              if (tp1->rec.data.chunk_was_revoked == 0) {
3049
0
                tp1->whoTo->new_pseudo_cumack = 1;
3050
0
              }
3051
0
              tp1->whoTo->find_pseudo_cumack = 1;
3052
0
            }
3053
0
            if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
3054
0
              sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.tsn, SCTP_CWND_LOG_FROM_SACK);
3055
0
            }
3056
0
            if (tp1->rec.data.tsn == tp1->whoTo->rtx_pseudo_cumack) {
3057
0
              if (tp1->rec.data.chunk_was_revoked == 0) {
3058
0
                tp1->whoTo->new_pseudo_cumack = 1;
3059
0
              }
3060
0
              tp1->whoTo->find_rtx_pseudo_cumack = 1;
3061
0
            }
3062
0
            if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3063
0
              sctp_log_sack(*biggest_newly_acked_tsn,
3064
0
                      last_tsn,
3065
0
                      tp1->rec.data.tsn,
3066
0
                      frag_strt,
3067
0
                      frag_end,
3068
0
                      SCTP_LOG_TSN_ACKED);
3069
0
            }
3070
0
            if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3071
0
              sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_GAP,
3072
0
                       tp1->whoTo->flight_size,
3073
0
                       tp1->book_size,
3074
0
                       (uint32_t)(uintptr_t)tp1->whoTo,
3075
0
                       tp1->rec.data.tsn);
3076
0
            }
3077
0
            sctp_flight_size_decrease(tp1);
3078
0
            if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3079
0
              (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged)(tp1->whoTo,
3080
0
                                 tp1);
3081
0
            }
3082
0
            sctp_total_flight_decrease(stcb, tp1);
3083
3084
0
            tp1->whoTo->net_ack += tp1->send_size;
3085
0
            if (tp1->snd_count < 2) {
3086
              /*-
3087
               * True non-retransmitted chunk
3088
               */
3089
0
              tp1->whoTo->net_ack2 += tp1->send_size;
3090
3091
              /*-
3092
               * update RTO too ?
3093
               */
3094
0
              if (tp1->do_rtt) {
3095
0
                if (*rto_ok &&
3096
0
                    sctp_calculate_rto(stcb,
3097
0
                                       &stcb->asoc,
3098
0
                                       tp1->whoTo,
3099
0
                                       &tp1->sent_rcv_time,
3100
0
                                       SCTP_RTT_FROM_DATA)) {
3101
0
                  *rto_ok = 0;
3102
0
                }
3103
0
                if (tp1->whoTo->rto_needed == 0) {
3104
0
                  tp1->whoTo->rto_needed = 1;
3105
0
                }
3106
0
                tp1->do_rtt = 0;
3107
0
              }
3108
0
            }
3109
0
          }
3110
0
          if (tp1->sent <= SCTP_DATAGRAM_RESEND) {
3111
0
            if (SCTP_TSN_GT(tp1->rec.data.tsn,
3112
0
                            stcb->asoc.this_sack_highest_gap)) {
3113
0
              stcb->asoc.this_sack_highest_gap =
3114
0
                tp1->rec.data.tsn;
3115
0
            }
3116
0
            if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3117
0
              sctp_ucount_decr(stcb->asoc.sent_queue_retran_cnt);
3118
#ifdef SCTP_AUDITING_ENABLED
3119
              sctp_audit_log(0xB2,
3120
                       (stcb->asoc.sent_queue_retran_cnt & 0x000000ff));
3121
#endif
3122
0
            }
3123
0
          }
3124
          /*-
3125
           * All chunks NOT UNSENT fall through here and are marked
3126
           * (leave PR-SCTP ones that are to skip alone though)
3127
           */
3128
0
          if ((tp1->sent != SCTP_FORWARD_TSN_SKIP) &&
3129
0
              (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) {
3130
0
            tp1->sent = SCTP_DATAGRAM_MARKED;
3131
0
          }
3132
0
          if (tp1->rec.data.chunk_was_revoked) {
3133
            /* deflate the cwnd */
3134
0
            tp1->whoTo->cwnd -= tp1->book_size;
3135
0
            tp1->rec.data.chunk_was_revoked = 0;
3136
0
          }
3137
          /* NR Sack code here */
3138
0
          if (nr_sacking &&
3139
0
              (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) {
3140
0
            if (stcb->asoc.strmout[tp1->rec.data.sid].chunks_on_queues > 0) {
3141
0
              stcb->asoc.strmout[tp1->rec.data.sid].chunks_on_queues--;
3142
0
#ifdef INVARIANTS
3143
0
            } else {
3144
0
              panic("No chunks on the queues for sid %u.", tp1->rec.data.sid);
3145
0
#endif
3146
0
            }
3147
0
            if ((stcb->asoc.strmout[tp1->rec.data.sid].chunks_on_queues == 0) &&
3148
0
                (stcb->asoc.strmout[tp1->rec.data.sid].state == SCTP_STREAM_RESET_PENDING) &&
3149
0
                TAILQ_EMPTY(&stcb->asoc.strmout[tp1->rec.data.sid].outqueue)) {
3150
0
              stcb->asoc.trigger_reset = 1;
3151
0
            }
3152
0
            tp1->sent = SCTP_DATAGRAM_NR_ACKED;
3153
0
            if (tp1->data) {
3154
              /* sa_ignore NO_NULL_CHK */
3155
0
              sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
3156
0
              sctp_m_freem(tp1->data);
3157
0
              tp1->data = NULL;
3158
0
            }
3159
0
            wake_him++;
3160
0
          }
3161
0
        }
3162
0
        break;
3163
0
      } /* if (tp1->tsn == theTSN) */
3164
0
      if (SCTP_TSN_GT(tp1->rec.data.tsn, theTSN)) {
3165
0
        break;
3166
0
      }
3167
0
      tp1 = TAILQ_NEXT(tp1, sctp_next);
3168
0
      if ((tp1 == NULL) && (circled == 0)) {
3169
0
        circled++;
3170
0
        tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
3171
0
      }
3172
0
    } /* end while (tp1) */
3173
0
    if (tp1 == NULL) {
3174
0
      circled = 0;
3175
0
      tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
3176
0
    }
3177
    /* In case the fragments were not in order we must reset */
3178
0
  } /* end for (j = fragStart */
3179
0
  *p_tp1 = tp1;
3180
0
  return (wake_him); /* Return value only used for nr-sack */
3181
0
}
3182
3183
static int
3184
sctp_handle_segments(struct mbuf *m, int *offset, struct sctp_tcb *stcb, struct sctp_association *asoc,
3185
    uint32_t last_tsn, uint32_t *biggest_tsn_acked,
3186
    uint32_t *biggest_newly_acked_tsn, uint32_t *this_sack_lowest_newack,
3187
    int num_seg, int num_nr_seg, int *rto_ok)
3188
0
{
3189
0
  struct sctp_gap_ack_block *frag, block;
3190
0
  struct sctp_tmit_chunk *tp1;
3191
0
  int i;
3192
0
  int num_frs = 0;
3193
0
  int chunk_freed;
3194
0
  int non_revocable;
3195
0
  uint16_t frag_strt, frag_end, prev_frag_end;
3196
3197
0
  tp1 = TAILQ_FIRST(&asoc->sent_queue);
3198
0
  prev_frag_end = 0;
3199
0
  chunk_freed = 0;
3200
3201
0
  for (i = 0; i < (num_seg + num_nr_seg); i++) {
3202
0
    if (i == num_seg) {
3203
0
      prev_frag_end = 0;
3204
0
      tp1 = TAILQ_FIRST(&asoc->sent_queue);
3205
0
    }
3206
0
    frag = (struct sctp_gap_ack_block *)sctp_m_getptr(m, *offset,
3207
0
                                                      sizeof(struct sctp_gap_ack_block), (uint8_t *) &block);
3208
0
    *offset += sizeof(block);
3209
0
    if (frag == NULL) {
3210
0
      return (chunk_freed);
3211
0
    }
3212
0
    frag_strt = ntohs(frag->start);
3213
0
    frag_end = ntohs(frag->end);
3214
3215
0
    if (frag_strt > frag_end) {
3216
      /* This gap report is malformed, skip it. */
3217
0
      continue;
3218
0
    }
3219
0
    if (frag_strt <= prev_frag_end) {
3220
      /* This gap report is not in order, so restart. */
3221
0
       tp1 = TAILQ_FIRST(&asoc->sent_queue);
3222
0
    }
3223
0
    if (SCTP_TSN_GT((last_tsn + frag_end), *biggest_tsn_acked)) {
3224
0
      *biggest_tsn_acked = last_tsn + frag_end;
3225
0
    }
3226
0
    if (i < num_seg) {
3227
0
      non_revocable = 0;
3228
0
    } else {
3229
0
      non_revocable = 1;
3230
0
    }
3231
0
    if (sctp_process_segment_range(stcb, &tp1, last_tsn, frag_strt, frag_end,
3232
0
                                   non_revocable, &num_frs, biggest_newly_acked_tsn,
3233
0
                                   this_sack_lowest_newack, rto_ok)) {
3234
0
      chunk_freed = 1;
3235
0
    }
3236
0
    prev_frag_end = frag_end;
3237
0
  }
3238
0
  if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3239
0
    if (num_frs)
3240
0
      sctp_log_fr(*biggest_tsn_acked,
3241
0
                  *biggest_newly_acked_tsn,
3242
0
                  last_tsn, SCTP_FR_LOG_BIGGEST_TSNS);
3243
0
  }
3244
0
  return (chunk_freed);
3245
0
}
3246
3247
static void
3248
sctp_check_for_revoked(struct sctp_tcb *stcb,
3249
           struct sctp_association *asoc, uint32_t cumack,
3250
           uint32_t biggest_tsn_acked)
3251
0
{
3252
0
  struct sctp_tmit_chunk *tp1;
3253
3254
0
  TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
3255
0
    if (SCTP_TSN_GT(tp1->rec.data.tsn, cumack)) {
3256
      /*
3257
       * ok this guy is either ACK or MARKED. If it is
3258
       * ACKED it has been previously acked but not this
3259
       * time i.e. revoked.  If it is MARKED it was ACK'ed
3260
       * again.
3261
       */
3262
0
      if (SCTP_TSN_GT(tp1->rec.data.tsn, biggest_tsn_acked)) {
3263
0
        break;
3264
0
      }
3265
0
      if (tp1->sent == SCTP_DATAGRAM_ACKED) {
3266
        /* it has been revoked */
3267
0
        tp1->sent = SCTP_DATAGRAM_SENT;
3268
0
        tp1->rec.data.chunk_was_revoked = 1;
3269
        /* We must add this stuff back in to
3270
         * assure timers and such get started.
3271
         */
3272
0
        if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3273
0
          sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
3274
0
                   tp1->whoTo->flight_size,
3275
0
                   tp1->book_size,
3276
0
                   (uint32_t)(uintptr_t)tp1->whoTo,
3277
0
                   tp1->rec.data.tsn);
3278
0
        }
3279
0
        sctp_flight_size_increase(tp1);
3280
0
        sctp_total_flight_increase(stcb, tp1);
3281
        /* We inflate the cwnd to compensate for our
3282
         * artificial inflation of the flight_size.
3283
         */
3284
0
        tp1->whoTo->cwnd += tp1->book_size;
3285
0
        if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3286
0
          sctp_log_sack(asoc->last_acked_seq,
3287
0
                  cumack,
3288
0
                  tp1->rec.data.tsn,
3289
0
                  0,
3290
0
                  0,
3291
0
                  SCTP_LOG_TSN_REVOKED);
3292
0
        }
3293
0
      } else if (tp1->sent == SCTP_DATAGRAM_MARKED) {
3294
        /* it has been re-acked in this SACK */
3295
0
        tp1->sent = SCTP_DATAGRAM_ACKED;
3296
0
      }
3297
0
    }
3298
0
    if (tp1->sent == SCTP_DATAGRAM_UNSENT)
3299
0
      break;
3300
0
  }
3301
0
}
3302
3303
static void
3304
sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc,
3305
         uint32_t biggest_tsn_acked, uint32_t biggest_tsn_newly_acked, uint32_t this_sack_lowest_newack, int accum_moved)
3306
0
{
3307
0
  struct sctp_tmit_chunk *tp1;
3308
0
  int strike_flag = 0;
3309
0
  struct timeval now;
3310
0
  uint32_t sending_seq;
3311
0
  struct sctp_nets *net;
3312
0
  int num_dests_sacked = 0;
3313
3314
  /*
3315
   * select the sending_seq, this is either the next thing ready to be
3316
   * sent but not transmitted, OR, the next seq we assign.
3317
   */
3318
0
  tp1 = TAILQ_FIRST(&stcb->asoc.send_queue);
3319
0
  if (tp1 == NULL) {
3320
0
    sending_seq = asoc->sending_seq;
3321
0
  } else {
3322
0
    sending_seq = tp1->rec.data.tsn;
3323
0
  }
3324
3325
  /* CMT DAC algo: finding out if SACK is a mixed SACK */
3326
0
  if ((asoc->sctp_cmt_on_off > 0) &&
3327
0
      SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3328
0
    TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3329
0
      if (net->saw_newack)
3330
0
        num_dests_sacked++;
3331
0
    }
3332
0
  }
3333
0
  if (stcb->asoc.prsctp_supported) {
3334
0
    (void)SCTP_GETTIME_TIMEVAL(&now);
3335
0
  }
3336
0
  TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
3337
0
    strike_flag = 0;
3338
0
    if (tp1->no_fr_allowed) {
3339
      /* this one had a timeout or something */
3340
0
      continue;
3341
0
    }
3342
0
    if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3343
0
      if (tp1->sent < SCTP_DATAGRAM_RESEND)
3344
0
        sctp_log_fr(biggest_tsn_newly_acked,
3345
0
              tp1->rec.data.tsn,
3346
0
              tp1->sent,
3347
0
              SCTP_FR_LOG_CHECK_STRIKE);
3348
0
    }
3349
0
    if (SCTP_TSN_GT(tp1->rec.data.tsn, biggest_tsn_acked) ||
3350
0
        tp1->sent == SCTP_DATAGRAM_UNSENT) {
3351
      /* done */
3352
0
      break;
3353
0
    }
3354
0
    if (stcb->asoc.prsctp_supported) {
3355
0
      if ((PR_SCTP_TTL_ENABLED(tp1->flags)) && tp1->sent < SCTP_DATAGRAM_ACKED) {
3356
        /* Is it expired? */
3357
0
#if !(defined(__FreeBSD__) && !defined(__Userspace__))
3358
0
        if (timercmp(&now, &tp1->rec.data.timetodrop, >)) {
3359
#else
3360
        if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
3361
#endif
3362
          /* Yes so drop it */
3363
0
          if (tp1->data != NULL) {
3364
0
            (void)sctp_release_pr_sctp_chunk(stcb, tp1, 1,
3365
0
                     SCTP_SO_NOT_LOCKED);
3366
0
          }
3367
0
          continue;
3368
0
        }
3369
0
      }
3370
0
    }
3371
0
    if (SCTP_TSN_GT(tp1->rec.data.tsn, asoc->this_sack_highest_gap) &&
3372
0
                    !(accum_moved && asoc->fast_retran_loss_recovery)) {
3373
      /* we are beyond the tsn in the sack  */
3374
0
      break;
3375
0
    }
3376
0
    if (tp1->sent >= SCTP_DATAGRAM_RESEND) {
3377
      /* either a RESEND, ACKED, or MARKED */
3378
      /* skip */
3379
0
      if (tp1->sent == SCTP_FORWARD_TSN_SKIP) {
3380
        /* Continue strikin FWD-TSN chunks */
3381
0
        tp1->rec.data.fwd_tsn_cnt++;
3382
0
      }
3383
0
      continue;
3384
0
    }
3385
    /*
3386
     * CMT : SFR algo (covers part of DAC and HTNA as well)
3387
     */
3388
0
    if (tp1->whoTo && tp1->whoTo->saw_newack == 0) {
3389
      /*
3390
       * No new acks were received for data sent to this
3391
       * dest. Therefore, according to the SFR algo for
3392
       * CMT, no data sent to this dest can be marked for
3393
       * FR using this SACK.
3394
       */
3395
0
      continue;
3396
0
    } else if (tp1->whoTo &&
3397
0
               SCTP_TSN_GT(tp1->rec.data.tsn,
3398
0
                           tp1->whoTo->this_sack_highest_newack) &&
3399
0
               !(accum_moved && asoc->fast_retran_loss_recovery)) {
3400
      /*
3401
       * CMT: New acks were received for data sent to
3402
       * this dest. But no new acks were seen for data
3403
       * sent after tp1. Therefore, according to the SFR
3404
       * algo for CMT, tp1 cannot be marked for FR using
3405
       * this SACK. This step covers part of the DAC algo
3406
       * and the HTNA algo as well.
3407
       */
3408
0
      continue;
3409
0
    }
3410
    /*
3411
     * Here we check to see if we were have already done a FR
3412
     * and if so we see if the biggest TSN we saw in the sack is
3413
     * smaller than the recovery point. If so we don't strike
3414
     * the tsn... otherwise we CAN strike the TSN.
3415
     */
3416
    /*
3417
     * @@@ JRI: Check for CMT
3418
     * if (accum_moved && asoc->fast_retran_loss_recovery && (sctp_cmt_on_off == 0)) {
3419
     */
3420
0
    if (accum_moved && asoc->fast_retran_loss_recovery) {
3421
      /*
3422
       * Strike the TSN if in fast-recovery and cum-ack
3423
       * moved.
3424
       */
3425
0
      if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3426
0
        sctp_log_fr(biggest_tsn_newly_acked,
3427
0
              tp1->rec.data.tsn,
3428
0
              tp1->sent,
3429
0
              SCTP_FR_LOG_STRIKE_CHUNK);
3430
0
      }
3431
0
      if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3432
0
        tp1->sent++;
3433
0
      }
3434
0
      if ((asoc->sctp_cmt_on_off > 0) &&
3435
0
          SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3436
        /*
3437
         * CMT DAC algorithm: If SACK flag is set to
3438
         * 0, then lowest_newack test will not pass
3439
         * because it would have been set to the
3440
         * cumack earlier. If not already to be
3441
         * rtx'd, If not a mixed sack and if tp1 is
3442
         * not between two sacked TSNs, then mark by
3443
         * one more.
3444
         * NOTE that we are marking by one additional time since the SACK DAC flag indicates that
3445
         * two packets have been received after this missing TSN.
3446
         */
3447
0
        if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3448
0
            SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.tsn)) {
3449
0
          if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3450
0
            sctp_log_fr(16 + num_dests_sacked,
3451
0
                  tp1->rec.data.tsn,
3452
0
                  tp1->sent,
3453
0
                  SCTP_FR_LOG_STRIKE_CHUNK);
3454
0
          }
3455
0
          tp1->sent++;
3456
0
        }
3457
0
      }
3458
0
    } else if ((tp1->rec.data.doing_fast_retransmit) &&
3459
0
               (asoc->sctp_cmt_on_off == 0)) {
3460
      /*
3461
       * For those that have done a FR we must take
3462
       * special consideration if we strike. I.e the
3463
       * biggest_newly_acked must be higher than the
3464
       * sending_seq at the time we did the FR.
3465
       */
3466
0
      if (
3467
#ifdef SCTP_FR_TO_ALTERNATE
3468
        /*
3469
         * If FR's go to new networks, then we must only do
3470
         * this for singly homed asoc's. However if the FR's
3471
         * go to the same network (Armando's work) then its
3472
         * ok to FR multiple times.
3473
         */
3474
        (asoc->numnets < 2)
3475
#else
3476
0
        (1)
3477
0
#endif
3478
0
        ) {
3479
0
        if (SCTP_TSN_GE(biggest_tsn_newly_acked,
3480
0
                        tp1->rec.data.fast_retran_tsn)) {
3481
          /*
3482
           * Strike the TSN, since this ack is
3483
           * beyond where things were when we
3484
           * did a FR.
3485
           */
3486
0
          if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3487
0
            sctp_log_fr(biggest_tsn_newly_acked,
3488
0
                  tp1->rec.data.tsn,
3489
0
                  tp1->sent,
3490
0
                  SCTP_FR_LOG_STRIKE_CHUNK);
3491
0
          }
3492
0
          if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3493
0
            tp1->sent++;
3494
0
          }
3495
0
          strike_flag = 1;
3496
0
          if ((asoc->sctp_cmt_on_off > 0) &&
3497
0
              SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3498
            /*
3499
             * CMT DAC algorithm: If
3500
             * SACK flag is set to 0,
3501
             * then lowest_newack test
3502
             * will not pass because it
3503
             * would have been set to
3504
             * the cumack earlier. If
3505
             * not already to be rtx'd,
3506
             * If not a mixed sack and
3507
             * if tp1 is not between two
3508
             * sacked TSNs, then mark by
3509
             * one more.
3510
             * NOTE that we are marking by one additional time since the SACK DAC flag indicates that
3511
             * two packets have been received after this missing TSN.
3512
             */
3513
0
            if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
3514
0
                (num_dests_sacked == 1) &&
3515
0
                SCTP_TSN_GT(this_sack_lowest_newack,
3516
0
                            tp1->rec.data.tsn)) {
3517
0
              if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3518
0
                sctp_log_fr(32 + num_dests_sacked,
3519
0
                      tp1->rec.data.tsn,
3520
0
                      tp1->sent,
3521
0
                      SCTP_FR_LOG_STRIKE_CHUNK);
3522
0
              }
3523
0
              if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3524
0
                tp1->sent++;
3525
0
              }
3526
0
            }
3527
0
          }
3528
0
        }
3529
0
      }
3530
      /*
3531
       * JRI: TODO: remove code for HTNA algo. CMT's
3532
       * SFR algo covers HTNA.
3533
       */
3534
0
    } else if (SCTP_TSN_GT(tp1->rec.data.tsn,
3535
0
                           biggest_tsn_newly_acked)) {
3536
      /*
3537
       * We don't strike these: This is the  HTNA
3538
       * algorithm i.e. we don't strike If our TSN is
3539
       * larger than the Highest TSN Newly Acked.
3540
       */
3541
0
      ;
3542
0
    } else {
3543
      /* Strike the TSN */
3544
0
      if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3545
0
        sctp_log_fr(biggest_tsn_newly_acked,
3546
0
              tp1->rec.data.tsn,
3547
0
              tp1->sent,
3548
0
              SCTP_FR_LOG_STRIKE_CHUNK);
3549
0
      }
3550
0
      if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3551
0
        tp1->sent++;
3552
0
      }
3553
0
      if ((asoc->sctp_cmt_on_off > 0) &&
3554
0
          SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3555
        /*
3556
         * CMT DAC algorithm: If SACK flag is set to
3557
         * 0, then lowest_newack test will not pass
3558
         * because it would have been set to the
3559
         * cumack earlier. If not already to be
3560
         * rtx'd, If not a mixed sack and if tp1 is
3561
         * not between two sacked TSNs, then mark by
3562
         * one more.
3563
         * NOTE that we are marking by one additional time since the SACK DAC flag indicates that
3564
         * two packets have been received after this missing TSN.
3565
         */
3566
0
        if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3567
0
            SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.tsn)) {
3568
0
          if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3569
0
            sctp_log_fr(48 + num_dests_sacked,
3570
0
                  tp1->rec.data.tsn,
3571
0
                  tp1->sent,
3572
0
                  SCTP_FR_LOG_STRIKE_CHUNK);
3573
0
          }
3574
0
          tp1->sent++;
3575
0
        }
3576
0
      }
3577
0
    }
3578
0
    if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3579
0
      struct sctp_nets *alt;
3580
3581
      /* fix counts and things */
3582
0
      if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3583
0
        sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_RSND,
3584
0
                 (tp1->whoTo ? (tp1->whoTo->flight_size) : 0),
3585
0
                 tp1->book_size,
3586
0
                 (uint32_t)(uintptr_t)tp1->whoTo,
3587
0
                 tp1->rec.data.tsn);
3588
0
      }
3589
0
      if (tp1->whoTo) {
3590
0
        tp1->whoTo->net_ack++;
3591
0
        sctp_flight_size_decrease(tp1);
3592
0
        if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3593
0
          (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged)(tp1->whoTo,
3594
0
                             tp1);
3595
0
        }
3596
0
      }
3597
3598
0
      if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
3599
0
        sctp_log_rwnd(SCTP_INCREASE_PEER_RWND,
3600
0
                asoc->peers_rwnd, tp1->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3601
0
      }
3602
      /* add back to the rwnd */
3603
0
      asoc->peers_rwnd += (tp1->send_size + SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3604
3605
      /* remove from the total flight */
3606
0
      sctp_total_flight_decrease(stcb, tp1);
3607
3608
0
      if ((stcb->asoc.prsctp_supported) &&
3609
0
          (PR_SCTP_RTX_ENABLED(tp1->flags))) {
3610
        /* Has it been retransmitted tv_sec times? - we store the retran count there. */
3611
0
        if (tp1->snd_count > tp1->rec.data.timetodrop.tv_sec) {
3612
          /* Yes, so drop it */
3613
0
          if (tp1->data != NULL) {
3614
0
            (void)sctp_release_pr_sctp_chunk(stcb, tp1, 1,
3615
0
                     SCTP_SO_NOT_LOCKED);
3616
0
          }
3617
          /* Make sure to flag we had a FR */
3618
0
          if (tp1->whoTo != NULL) {
3619
0
            tp1->whoTo->net_ack++;
3620
0
          }
3621
0
          continue;
3622
0
        }
3623
0
      }
3624
      /* SCTP_PRINTF("OK, we are now ready to FR this guy\n"); */
3625
0
      if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3626
0
        sctp_log_fr(tp1->rec.data.tsn, tp1->snd_count,
3627
0
              0, SCTP_FR_MARKED);
3628
0
      }
3629
0
      if (strike_flag) {
3630
        /* This is a subsequent FR */
3631
0
        SCTP_STAT_INCR(sctps_sendmultfastretrans);
3632
0
      }
3633
0
      sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
3634
0
      if (asoc->sctp_cmt_on_off > 0) {
3635
        /*
3636
         * CMT: Using RTX_SSTHRESH policy for CMT.
3637
         * If CMT is being used, then pick dest with
3638
         * largest ssthresh for any retransmission.
3639
         */
3640
0
        tp1->no_fr_allowed = 1;
3641
0
        alt = tp1->whoTo;
3642
        /*sa_ignore NO_NULL_CHK*/
3643
0
        if (asoc->sctp_cmt_pf > 0) {
3644
          /* JRS 5/18/07 - If CMT PF is on, use the PF version of find_alt_net() */
3645
0
          alt = sctp_find_alternate_net(stcb, alt, 2);
3646
0
        } else {
3647
          /* JRS 5/18/07 - If only CMT is on, use the CMT version of find_alt_net() */
3648
                                        /*sa_ignore NO_NULL_CHK*/
3649
0
          alt = sctp_find_alternate_net(stcb, alt, 1);
3650
0
        }
3651
0
        if (alt == NULL) {
3652
0
          alt = tp1->whoTo;
3653
0
        }
3654
        /*
3655
         * CUCv2: If a different dest is picked for
3656
         * the retransmission, then new
3657
         * (rtx-)pseudo_cumack needs to be tracked
3658
         * for orig dest. Let CUCv2 track new (rtx-)
3659
         * pseudo-cumack always.
3660
         */
3661
0
        if (tp1->whoTo) {
3662
0
          tp1->whoTo->find_pseudo_cumack = 1;
3663
0
          tp1->whoTo->find_rtx_pseudo_cumack = 1;
3664
0
        }
3665
0
      } else {/* CMT is OFF */
3666
#ifdef SCTP_FR_TO_ALTERNATE
3667
        /* Can we find an alternate? */
3668
        alt = sctp_find_alternate_net(stcb, tp1->whoTo, 0);
3669
#else
3670
        /*
3671
         * default behavior is to NOT retransmit
3672
         * FR's to an alternate. Armando Caro's
3673
         * paper details why.
3674
         */
3675
0
        alt = tp1->whoTo;
3676
0
#endif
3677
0
      }
3678
3679
0
      tp1->rec.data.doing_fast_retransmit = 1;
3680
      /* mark the sending seq for possible subsequent FR's */
3681
      /*
3682
       * SCTP_PRINTF("Marking TSN for FR new value %x\n",
3683
       * (uint32_t)tpi->rec.data.tsn);
3684
       */
3685
0
      if (TAILQ_EMPTY(&asoc->send_queue)) {
3686
        /*
3687
         * If the queue of send is empty then its
3688
         * the next sequence number that will be
3689
         * assigned so we subtract one from this to
3690
         * get the one we last sent.
3691
         */
3692
0
        tp1->rec.data.fast_retran_tsn = sending_seq;
3693
0
      } else {
3694
        /*
3695
         * If there are chunks on the send queue
3696
         * (unsent data that has made it from the
3697
         * stream queues but not out the door, we
3698
         * take the first one (which will have the
3699
         * lowest TSN) and subtract one to get the
3700
         * one we last sent.
3701
         */
3702
0
        struct sctp_tmit_chunk *ttt;
3703
3704
0
        ttt = TAILQ_FIRST(&asoc->send_queue);
3705
0
        tp1->rec.data.fast_retran_tsn =
3706
0
          ttt->rec.data.tsn;
3707
0
      }
3708
3709
0
      if (tp1->do_rtt) {
3710
        /*
3711
         * this guy had a RTO calculation pending on
3712
         * it, cancel it
3713
         */
3714
0
        if ((tp1->whoTo != NULL) &&
3715
0
            (tp1->whoTo->rto_needed == 0)) {
3716
0
          tp1->whoTo->rto_needed = 1;
3717
0
        }
3718
0
        tp1->do_rtt = 0;
3719
0
      }
3720
0
      if (alt != tp1->whoTo) {
3721
        /* yes, there is an alternate. */
3722
0
        sctp_free_remote_addr(tp1->whoTo);
3723
        /*sa_ignore FREED_MEMORY*/
3724
0
        tp1->whoTo = alt;
3725
0
        atomic_add_int(&alt->ref_count, 1);
3726
0
      }
3727
0
    }
3728
0
  }
3729
0
}
3730
3731
struct sctp_tmit_chunk *
3732
sctp_try_advance_peer_ack_point(struct sctp_tcb *stcb,
3733
    struct sctp_association *asoc)
3734
0
{
3735
0
  struct sctp_tmit_chunk *tp1, *tp2, *a_adv = NULL;
3736
0
  struct timeval now;
3737
0
  int now_filled = 0;
3738
3739
0
  if (asoc->prsctp_supported == 0) {
3740
0
    return (NULL);
3741
0
  }
3742
0
  TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
3743
0
    if (tp1->sent != SCTP_FORWARD_TSN_SKIP &&
3744
0
        tp1->sent != SCTP_DATAGRAM_RESEND &&
3745
0
        tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
3746
      /* no chance to advance, out of here */
3747
0
      break;
3748
0
    }
3749
0
    if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
3750
0
      if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) ||
3751
0
          (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) {
3752
0
        sctp_misc_ints(SCTP_FWD_TSN_CHECK,
3753
0
                 asoc->advanced_peer_ack_point,
3754
0
                 tp1->rec.data.tsn, 0, 0);
3755
0
      }
3756
0
    }
3757
0
    if (!PR_SCTP_ENABLED(tp1->flags)) {
3758
      /*
3759
       * We can't fwd-tsn past any that are reliable aka
3760
       * retransmitted until the asoc fails.
3761
       */
3762
0
      break;
3763
0
    }
3764
0
    if (!now_filled) {
3765
0
      (void)SCTP_GETTIME_TIMEVAL(&now);
3766
0
      now_filled = 1;
3767
0
    }
3768
    /*
3769
     * now we got a chunk which is marked for another
3770
     * retransmission to a PR-stream but has run out its chances
3771
     * already maybe OR has been marked to skip now. Can we skip
3772
     * it if its a resend?
3773
     */
3774
0
    if (tp1->sent == SCTP_DATAGRAM_RESEND &&
3775
0
        (PR_SCTP_TTL_ENABLED(tp1->flags))) {
3776
      /*
3777
       * Now is this one marked for resend and its time is
3778
       * now up?
3779
       */
3780
0
#if !(defined(__FreeBSD__) && !defined(__Userspace__))
3781
0
      if (timercmp(&now, &tp1->rec.data.timetodrop, >)) {
3782
#else
3783
      if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
3784
#endif
3785
        /* Yes so drop it */
3786
0
        if (tp1->data) {
3787
0
          (void)sctp_release_pr_sctp_chunk(stcb, tp1,
3788
0
              1, SCTP_SO_NOT_LOCKED);
3789
0
        }
3790
0
      } else {
3791
        /*
3792
         * No, we are done when hit one for resend
3793
         * whos time as not expired.
3794
         */
3795
0
        break;
3796
0
      }
3797
0
    }
3798
    /*
3799
     * Ok now if this chunk is marked to drop it we can clean up
3800
     * the chunk, advance our peer ack point and we can check
3801
     * the next chunk.
3802
     */
3803
0
    if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) ||
3804
0
        (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) {
3805
      /* advance PeerAckPoint goes forward */
3806
0
      if (SCTP_TSN_GT(tp1->rec.data.tsn, asoc->advanced_peer_ack_point)) {
3807
0
        asoc->advanced_peer_ack_point = tp1->rec.data.tsn;
3808
0
        a_adv = tp1;
3809
0
      } else if (tp1->rec.data.tsn == asoc->advanced_peer_ack_point) {
3810
        /* No update but we do save the chk */
3811
0
        a_adv = tp1;
3812
0
      }
3813
0
    } else {
3814
      /*
3815
       * If it is still in RESEND we can advance no
3816
       * further
3817
       */
3818
0
      break;
3819
0
    }
3820
0
  }
3821
0
  return (a_adv);
3822
0
}
3823
3824
static int
3825
sctp_fs_audit(struct sctp_association *asoc)
3826
0
{
3827
0
  struct sctp_tmit_chunk *chk;
3828
0
  int inflight = 0, resend = 0, inbetween = 0, acked = 0, above = 0;
3829
0
  int ret;
3830
#ifndef INVARIANTS
3831
  int entry_flight, entry_cnt;
3832
#endif
3833
3834
0
  ret = 0;
3835
#ifndef INVARIANTS
3836
  entry_flight = asoc->total_flight;
3837
  entry_cnt = asoc->total_flight_count;
3838
#endif
3839
0
  if (asoc->pr_sctp_cnt >= asoc->sent_queue_cnt)
3840
0
    return (0);
3841
3842
0
  TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
3843
0
    if (chk->sent < SCTP_DATAGRAM_RESEND) {
3844
0
      SCTP_PRINTF("Chk TSN: %u size: %d inflight cnt: %d\n",
3845
0
                  chk->rec.data.tsn,
3846
0
                  chk->send_size,
3847
0
                  chk->snd_count);
3848
0
      inflight++;
3849
0
    } else if (chk->sent == SCTP_DATAGRAM_RESEND) {
3850
0
      resend++;
3851
0
    } else if (chk->sent < SCTP_DATAGRAM_ACKED) {
3852
0
      inbetween++;
3853
0
    } else if (chk->sent > SCTP_DATAGRAM_ACKED) {
3854
0
      above++;
3855
0
    } else {
3856
0
      acked++;
3857
0
    }
3858
0
  }
3859
3860
0
  if ((inflight > 0) || (inbetween > 0)) {
3861
0
#ifdef INVARIANTS
3862
0
    panic("Flight size-express incorrect F: %d I: %d R: %d Ab: %d ACK: %d",
3863
0
          inflight, inbetween, resend, above, acked);
3864
#else
3865
    SCTP_PRINTF("asoc->total_flight: %d cnt: %d\n",
3866
                entry_flight, entry_cnt);
3867
    SCTP_PRINTF("Flight size-express incorrect F: %d I: %d R: %d Ab: %d ACK: %d\n",
3868
                inflight, inbetween, resend, above, acked);
3869
    ret = 1;
3870
#endif
3871
0
  }
3872
0
  return (ret);
3873
0
}
3874
3875
static void
3876
sctp_window_probe_recovery(struct sctp_tcb *stcb,
3877
                           struct sctp_association *asoc,
3878
                           struct sctp_tmit_chunk *tp1)
3879
0
{
3880
0
  tp1->window_probe = 0;
3881
0
  if ((tp1->sent >= SCTP_DATAGRAM_ACKED) || (tp1->data == NULL)) {
3882
    /* TSN's skipped we do NOT move back. */
3883
0
    sctp_misc_ints(SCTP_FLIGHT_LOG_DWN_WP_FWD,
3884
0
             tp1->whoTo ? tp1->whoTo->flight_size : 0,
3885
0
             tp1->book_size,
3886
0
             (uint32_t)(uintptr_t)tp1->whoTo,
3887
0
             tp1->rec.data.tsn);
3888
0
    return;
3889
0
  }
3890
  /* First setup this by shrinking flight */
3891
0
  if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3892
0
    (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged)(tp1->whoTo,
3893
0
                       tp1);
3894
0
  }
3895
0
  sctp_flight_size_decrease(tp1);
3896
0
  sctp_total_flight_decrease(stcb, tp1);
3897
  /* Now mark for resend */
3898
0
  tp1->sent = SCTP_DATAGRAM_RESEND;
3899
0
  sctp_ucount_incr(asoc->sent_queue_retran_cnt);
3900
3901
0
  if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3902
0
    sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_WP,
3903
0
             tp1->whoTo->flight_size,
3904
0
             tp1->book_size,
3905
0
             (uint32_t)(uintptr_t)tp1->whoTo,
3906
0
             tp1->rec.data.tsn);
3907
0
  }
3908
0
}
3909
3910
void
3911
sctp_express_handle_sack(struct sctp_tcb *stcb, uint32_t cumack,
3912
                         uint32_t rwnd, int *abort_now, int ecne_seen)
3913
778
{
3914
778
  struct sctp_nets *net;
3915
778
  struct sctp_association *asoc;
3916
778
  struct sctp_tmit_chunk *tp1, *tp2;
3917
778
  uint32_t old_rwnd;
3918
778
  int win_probe_recovery = 0;
3919
778
  int win_probe_recovered = 0;
3920
778
  int j, done_once = 0;
3921
778
  int rto_ok = 1;
3922
778
  uint32_t send_s;
3923
3924
778
  if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
3925
0
    sctp_misc_ints(SCTP_SACK_LOG_EXPRESS, cumack,
3926
0
                   rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
3927
0
  }
3928
778
  SCTP_TCB_LOCK_ASSERT(stcb);
3929
#ifdef SCTP_ASOCLOG_OF_TSNS
3930
  stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cumack;
3931
  stcb->asoc.cumack_log_at++;
3932
  if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
3933
    stcb->asoc.cumack_log_at = 0;
3934
  }
3935
#endif
3936
778
  asoc = &stcb->asoc;
3937
778
  old_rwnd = asoc->peers_rwnd;
3938
778
  if (SCTP_TSN_GT(asoc->last_acked_seq, cumack)) {
3939
    /* old ack */
3940
662
    return;
3941
662
  } else if (asoc->last_acked_seq == cumack) {
3942
    /* Window update sack */
3943
0
    asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
3944
0
                (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
3945
0
    if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
3946
      /* SWS sender side engages */
3947
0
      asoc->peers_rwnd = 0;
3948
0
    }
3949
0
    if (asoc->peers_rwnd > old_rwnd) {
3950
0
      goto again;
3951
0
    }
3952
0
    return;
3953
0
  }
3954
3955
  /* First setup for CC stuff */
3956
778
  TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3957
340
    if (SCTP_TSN_GT(cumack, net->cwr_window_tsn)) {
3958
      /* Drag along the window_tsn for cwr's */
3959
340
      net->cwr_window_tsn = cumack;
3960
340
    }
3961
340
    net->prev_cwnd = net->cwnd;
3962
340
    net->net_ack = 0;
3963
340
    net->net_ack2 = 0;
3964
3965
    /*
3966
     * CMT: Reset CUC and Fast recovery algo variables before
3967
     * SACK processing
3968
     */
3969
340
    net->new_pseudo_cumack = 0;
3970
340
    net->will_exit_fast_recovery = 0;
3971
340
    if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) {
3972
0
      (*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack)(stcb, net);
3973
0
    }
3974
340
  }
3975
116
  if (!TAILQ_EMPTY(&asoc->sent_queue)) {
3976
56
    tp1 = TAILQ_LAST(&asoc->sent_queue,
3977
56
         sctpchunk_listhead);
3978
56
    send_s = tp1->rec.data.tsn + 1;
3979
60
  } else {
3980
60
    send_s = asoc->sending_seq;
3981
60
  }
3982
116
  if (SCTP_TSN_GE(cumack, send_s)) {
3983
116
    struct mbuf *op_err;
3984
116
    char msg[SCTP_DIAG_INFO_LEN];
3985
3986
116
    *abort_now = 1;
3987
    /* XXX */
3988
116
    SCTP_SNPRINTF(msg, sizeof(msg),
3989
116
                  "Cum ack %8.8x greater or equal than TSN %8.8x",
3990
116
                  cumack, send_s);
3991
116
    op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
3992
116
    stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25;
3993
116
    sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, false, SCTP_SO_NOT_LOCKED);
3994
116
    return;
3995
116
  }
3996
0
  asoc->this_sack_highest_gap = cumack;
3997
0
  if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
3998
0
    sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
3999
0
             stcb->asoc.overall_error_count,
4000
0
             0,
4001
0
             SCTP_FROM_SCTP_INDATA,
4002
0
             __LINE__);
4003
0
  }
4004
0
  stcb->asoc.overall_error_count = 0;
4005
0
  if (SCTP_TSN_GT(cumack, asoc->last_acked_seq)) {
4006
    /* process the new consecutive TSN first */
4007
0
    TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
4008
0
      if (SCTP_TSN_GE(cumack, tp1->rec.data.tsn)) {
4009
0
        if (tp1->sent == SCTP_DATAGRAM_UNSENT) {
4010
0
          SCTP_PRINTF("Warning, an unsent is now acked?\n");
4011
0
        }
4012
0
        if (tp1->sent < SCTP_DATAGRAM_ACKED) {
4013
          /*
4014
           * If it is less than ACKED, it is
4015
           * now no-longer in flight. Higher
4016
           * values may occur during marking
4017
           */
4018
0
          if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4019
0
            if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4020
0
              sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
4021
0
                       tp1->whoTo->flight_size,
4022
0
                       tp1->book_size,
4023
0
                       (uint32_t)(uintptr_t)tp1->whoTo,
4024
0
                       tp1->rec.data.tsn);
4025
0
            }
4026
0
            sctp_flight_size_decrease(tp1);
4027
0
            if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
4028
0
              (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged)(tp1->whoTo,
4029
0
                                 tp1);
4030
0
            }
4031
            /* sa_ignore NO_NULL_CHK */
4032
0
            sctp_total_flight_decrease(stcb, tp1);
4033
0
          }
4034
0
          tp1->whoTo->net_ack += tp1->send_size;
4035
0
          if (tp1->snd_count < 2) {
4036
            /*
4037
             * True non-retransmitted
4038
             * chunk
4039
             */
4040
0
            tp1->whoTo->net_ack2 +=
4041
0
              tp1->send_size;
4042
4043
            /* update RTO too? */
4044
0
            if (tp1->do_rtt) {
4045
0
              if (rto_ok &&
4046
0
                  sctp_calculate_rto(stcb,
4047
0
                         &stcb->asoc,
4048
0
                         tp1->whoTo,
4049
0
                         &tp1->sent_rcv_time,
4050
0
                         SCTP_RTT_FROM_DATA)) {
4051
0
                rto_ok = 0;
4052
0
              }
4053
0
              if (tp1->whoTo->rto_needed == 0) {
4054
0
                tp1->whoTo->rto_needed = 1;
4055
0
              }
4056
0
              tp1->do_rtt = 0;
4057
0
            }
4058
0
          }
4059
          /*
4060
           * CMT: CUCv2 algorithm. From the
4061
           * cumack'd TSNs, for each TSN being
4062
           * acked for the first time, set the
4063
           * following variables for the
4064
           * corresp destination.
4065
           * new_pseudo_cumack will trigger a
4066
           * cwnd update.
4067
           * find_(rtx_)pseudo_cumack will
4068
           * trigger search for the next
4069
           * expected (rtx-)pseudo-cumack.
4070
           */
4071
0
          tp1->whoTo->new_pseudo_cumack = 1;
4072
0
          tp1->whoTo->find_pseudo_cumack = 1;
4073
0
          tp1->whoTo->find_rtx_pseudo_cumack = 1;
4074
0
          if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
4075
            /* sa_ignore NO_NULL_CHK */
4076
0
            sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.tsn, SCTP_CWND_LOG_FROM_SACK);
4077
0
          }
4078
0
        }
4079
0
        if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4080
0
          sctp_ucount_decr(asoc->sent_queue_retran_cnt);
4081
0
        }
4082
0
        if (tp1->rec.data.chunk_was_revoked) {
4083
          /* deflate the cwnd */
4084
0
          tp1->whoTo->cwnd -= tp1->book_size;
4085
0
          tp1->rec.data.chunk_was_revoked = 0;
4086
0
        }
4087
0
        if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
4088
0
          if (asoc->strmout[tp1->rec.data.sid].chunks_on_queues > 0) {
4089
0
            asoc->strmout[tp1->rec.data.sid].chunks_on_queues--;
4090
0
#ifdef INVARIANTS
4091
0
          } else {
4092
0
            panic("No chunks on the queues for sid %u.", tp1->rec.data.sid);
4093
0
#endif
4094
0
          }
4095
0
        }
4096
0
        if ((asoc->strmout[tp1->rec.data.sid].chunks_on_queues == 0) &&
4097
0
            (asoc->strmout[tp1->rec.data.sid].state == SCTP_STREAM_RESET_PENDING) &&
4098
0
            TAILQ_EMPTY(&asoc->strmout[tp1->rec.data.sid].outqueue)) {
4099
0
          asoc->trigger_reset = 1;
4100
0
        }
4101
0
        TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
4102
0
        if (tp1->data) {
4103
          /* sa_ignore NO_NULL_CHK */
4104
0
          sctp_free_bufspace(stcb, asoc, tp1, 1);
4105
0
          sctp_m_freem(tp1->data);
4106
0
          tp1->data = NULL;
4107
0
        }
4108
0
        if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4109
0
          sctp_log_sack(asoc->last_acked_seq,
4110
0
                  cumack,
4111
0
                  tp1->rec.data.tsn,
4112
0
                  0,
4113
0
                  0,
4114
0
                  SCTP_LOG_FREE_SENT);
4115
0
        }
4116
0
        asoc->sent_queue_cnt--;
4117
0
        sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED);
4118
0
      } else {
4119
0
        break;
4120
0
      }
4121
0
    }
4122
0
  }
4123
0
#if defined(__Userspace__)
4124
0
  if (stcb->sctp_ep->recv_callback) {
4125
0
    if (stcb->sctp_socket) {
4126
0
      uint32_t inqueue_bytes, sb_free_now;
4127
0
      struct sctp_inpcb *inp;
4128
4129
0
      inp = stcb->sctp_ep;
4130
0
      inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * sizeof(struct sctp_data_chunk));
4131
0
      sb_free_now = SCTP_SB_LIMIT_SND(stcb->sctp_socket) - (inqueue_bytes + stcb->asoc.sb_send_resv);
4132
4133
      /* check if the amount free in the send socket buffer crossed the threshold */
4134
0
      if (inp->send_callback &&
4135
0
          (((inp->send_sb_threshold > 0) &&
4136
0
            (sb_free_now >= inp->send_sb_threshold) &&
4137
0
            (stcb->asoc.chunks_on_out_queue <= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue))) ||
4138
0
           (inp->send_sb_threshold == 0))) {
4139
0
        atomic_add_int(&stcb->asoc.refcnt, 1);
4140
0
        SCTP_TCB_UNLOCK(stcb);
4141
0
        inp->send_callback(stcb->sctp_socket, sb_free_now, inp->ulp_info);
4142
0
        SCTP_TCB_LOCK(stcb);
4143
0
        atomic_subtract_int(&stcb->asoc.refcnt, 1);
4144
0
      }
4145
0
    }
4146
0
  } else if (stcb->sctp_socket) {
4147
#else
4148
  /* sa_ignore NO_NULL_CHK */
4149
  if (stcb->sctp_socket) {
4150
#endif
4151
#if defined(__APPLE__) && !defined(__Userspace__)
4152
    struct socket *so;
4153
4154
#endif
4155
0
    SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
4156
0
    if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4157
      /* sa_ignore NO_NULL_CHK */
4158
0
      sctp_wakeup_log(stcb, 1, SCTP_WAKESND_FROM_SACK);
4159
0
    }
4160
#if defined(__APPLE__) && !defined(__Userspace__)
4161
    so = SCTP_INP_SO(stcb->sctp_ep);
4162
    atomic_add_int(&stcb->asoc.refcnt, 1);
4163
    SCTP_TCB_UNLOCK(stcb);
4164
    SCTP_SOCKET_LOCK(so, 1);
4165
    SCTP_TCB_LOCK(stcb);
4166
    atomic_subtract_int(&stcb->asoc.refcnt, 1);
4167
    if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4168
      /* assoc was freed while we were unlocked */
4169
      SCTP_SOCKET_UNLOCK(so, 1);
4170
      return;
4171
    }
4172
#endif
4173
0
    sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
4174
#if defined(__APPLE__) && !defined(__Userspace__)
4175
    SCTP_SOCKET_UNLOCK(so, 1);
4176
#endif
4177
0
  } else {
4178
0
    if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4179
0
      sctp_wakeup_log(stcb, 1, SCTP_NOWAKE_FROM_SACK);
4180
0
    }
4181
0
  }
4182
4183
  /* JRS - Use the congestion control given in the CC module */
4184
0
  if ((asoc->last_acked_seq != cumack) && (ecne_seen == 0)) {
4185
0
    TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4186
0
      if (net->net_ack2 > 0) {
4187
        /*
4188
         * Karn's rule applies to clearing error count, this
4189
         * is optional.
4190
         */
4191
0
        net->error_count = 0;
4192
0
        if ((net->dest_state & SCTP_ADDR_REACHABLE) == 0) {
4193
          /* addr came good */
4194
0
          net->dest_state |= SCTP_ADDR_REACHABLE;
4195
0
          sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
4196
0
                          0, (void *)net, SCTP_SO_NOT_LOCKED);
4197
0
        }
4198
0
        if (net == stcb->asoc.primary_destination) {
4199
0
          if (stcb->asoc.alternate) {
4200
            /* release the alternate, primary is good */
4201
0
            sctp_free_remote_addr(stcb->asoc.alternate);
4202
0
            stcb->asoc.alternate = NULL;
4203
0
          }
4204
0
        }
4205
0
        if (net->dest_state & SCTP_ADDR_PF) {
4206
0
          net->dest_state &= ~SCTP_ADDR_PF;
4207
0
          sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT,
4208
0
                          stcb->sctp_ep, stcb, net,
4209
0
                          SCTP_FROM_SCTP_INDATA + SCTP_LOC_26);
4210
0
          sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
4211
0
          asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net);
4212
          /* Done with this net */
4213
0
          net->net_ack = 0;
4214
0
        }
4215
        /* restore any doubled timers */
4216
0
        net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
4217
0
        if (net->RTO < stcb->asoc.minrto) {
4218
0
          net->RTO = stcb->asoc.minrto;
4219
0
        }
4220
0
        if (net->RTO > stcb->asoc.maxrto) {
4221
0
          net->RTO = stcb->asoc.maxrto;
4222
0
        }
4223
0
      }
4224
0
    }
4225
0
    asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, 1, 0, 0);
4226
0
  }
4227
0
  asoc->last_acked_seq = cumack;
4228
4229
0
  if (TAILQ_EMPTY(&asoc->sent_queue)) {
4230
    /* nothing left in-flight */
4231
0
    TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4232
0
      net->flight_size = 0;
4233
0
      net->partial_bytes_acked = 0;
4234
0
    }
4235
0
    asoc->total_flight = 0;
4236
0
    asoc->total_flight_count = 0;
4237
0
  }
4238
4239
  /* RWND update */
4240
0
  asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
4241
0
              (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
4242
0
  if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4243
    /* SWS sender side engages */
4244
0
    asoc->peers_rwnd = 0;
4245
0
  }
4246
0
  if (asoc->peers_rwnd > old_rwnd) {
4247
0
    win_probe_recovery = 1;
4248
0
  }
4249
  /* Now assure a timer where data is queued at */
4250
0
again:
4251
0
  j = 0;
4252
0
  TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4253
0
    if (win_probe_recovery && (net->window_probe)) {
4254
0
      win_probe_recovered = 1;
4255
      /*
4256
       * Find first chunk that was used with window probe
4257
       * and clear the sent
4258
       */
4259
      /* sa_ignore FREED_MEMORY */
4260
0
      TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4261
0
        if (tp1->window_probe) {
4262
          /* move back to data send queue */
4263
0
          sctp_window_probe_recovery(stcb, asoc, tp1);
4264
0
          break;
4265
0
        }
4266
0
      }
4267
0
    }
4268
0
    if (net->flight_size) {
4269
0
      j++;
4270
0
      sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, net);
4271
0
      if (net->window_probe) {
4272
0
        net->window_probe = 0;
4273
0
      }
4274
0
    } else {
4275
0
      if (net->window_probe) {
4276
        /* In window probes we must assure a timer is still running there */
4277
0
        net->window_probe = 0;
4278
0
        if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4279
0
          sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, net);
4280
0
        }
4281
0
      } else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4282
0
        sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4283
0
                        stcb, net,
4284
0
                        SCTP_FROM_SCTP_INDATA + SCTP_LOC_27);
4285
0
      }
4286
0
    }
4287
0
  }
4288
0
  if ((j == 0) &&
4289
0
      (!TAILQ_EMPTY(&asoc->sent_queue)) &&
4290
0
      (asoc->sent_queue_retran_cnt == 0) &&
4291
0
      (win_probe_recovered == 0) &&
4292
0
      (done_once == 0)) {
4293
    /* huh, this should not happen unless all packets
4294
     * are PR-SCTP and marked to skip of course.
4295
     */
4296
0
    if (sctp_fs_audit(asoc)) {
4297
0
      TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4298
0
        net->flight_size = 0;
4299
0
      }
4300
0
      asoc->total_flight = 0;
4301
0
      asoc->total_flight_count = 0;
4302
0
      asoc->sent_queue_retran_cnt = 0;
4303
0
      TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4304
0
        if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4305
0
          sctp_flight_size_increase(tp1);
4306
0
          sctp_total_flight_increase(stcb, tp1);
4307
0
        } else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4308
0
          sctp_ucount_incr(asoc->sent_queue_retran_cnt);
4309
0
        }
4310
0
      }
4311
0
    }
4312
0
    done_once = 1;
4313
0
    goto again;
4314
0
  }
4315
  /**********************************/
4316
  /* Now what about shutdown issues */
4317
  /**********************************/
4318
0
  if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
4319
    /* nothing left on sendqueue.. consider done */
4320
    /* clean up */
4321
0
    if ((asoc->stream_queue_cnt == 1) &&
4322
0
        ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4323
0
         (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4324
0
        ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete)(stcb, asoc))) {
4325
0
      SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_PARTIAL_MSG_LEFT);
4326
0
    }
4327
0
    if (((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4328
0
         (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4329
0
        (asoc->stream_queue_cnt == 1) &&
4330
0
        (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) {
4331
0
      struct mbuf *op_err;
4332
4333
0
      *abort_now = 1;
4334
      /* XXX */
4335
0
      op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, "");
4336
0
      stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_28;
4337
0
      sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, false, SCTP_SO_NOT_LOCKED);
4338
0
      return;
4339
0
    }
4340
0
    if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
4341
0
        (asoc->stream_queue_cnt == 0)) {
4342
0
      struct sctp_nets *netp;
4343
4344
0
      if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) ||
4345
0
          (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4346
0
        SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4347
0
      }
4348
0
      SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_SENT);
4349
0
      sctp_stop_timers_for_shutdown(stcb);
4350
0
      if (asoc->alternate) {
4351
0
        netp = asoc->alternate;
4352
0
      } else {
4353
0
        netp = asoc->primary_destination;
4354
0
      }
4355
0
      sctp_send_shutdown(stcb, netp);
4356
0
      sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
4357
0
           stcb->sctp_ep, stcb, netp);
4358
0
      sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
4359
0
           stcb->sctp_ep, stcb, NULL);
4360
0
    } else if ((SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
4361
0
         (asoc->stream_queue_cnt == 0)) {
4362
0
      struct sctp_nets *netp;
4363
4364
0
      SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4365
0
      SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_ACK_SENT);
4366
0
      sctp_stop_timers_for_shutdown(stcb);
4367
0
      if (asoc->alternate) {
4368
0
        netp = asoc->alternate;
4369
0
      } else {
4370
0
        netp = asoc->primary_destination;
4371
0
      }
4372
0
      sctp_send_shutdown_ack(stcb, netp);
4373
0
      sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
4374
0
           stcb->sctp_ep, stcb, netp);
4375
0
    }
4376
0
  }
4377
  /*********************************************/
4378
  /* Here we perform PR-SCTP procedures        */
4379
  /* (section 4.2)                             */
4380
  /*********************************************/
4381
  /* C1. update advancedPeerAckPoint */
4382
0
  if (SCTP_TSN_GT(cumack, asoc->advanced_peer_ack_point)) {
4383
0
    asoc->advanced_peer_ack_point = cumack;
4384
0
  }
4385
  /* PR-Sctp issues need to be addressed too */
4386
0
  if ((asoc->prsctp_supported) && (asoc->pr_sctp_cnt > 0)) {
4387
0
    struct sctp_tmit_chunk *lchk;
4388
0
    uint32_t old_adv_peer_ack_point;
4389
4390
0
    old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
4391
0
    lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
4392
    /* C3. See if we need to send a Fwd-TSN */
4393
0
    if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cumack)) {
4394
      /*
4395
       * ISSUE with ECN, see FWD-TSN processing.
4396
       */
4397
0
      if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) {
4398
0
        send_forward_tsn(stcb, asoc);
4399
0
      } else if (lchk) {
4400
        /* try to FR fwd-tsn's that get lost too */
4401
0
        if (lchk->rec.data.fwd_tsn_cnt >= 3) {
4402
0
          send_forward_tsn(stcb, asoc);
4403
0
        }
4404
0
      }
4405
0
    }
4406
0
    for (; lchk != NULL; lchk = TAILQ_NEXT(lchk, sctp_next)) {
4407
0
      if (lchk->whoTo != NULL) {
4408
0
        break;
4409
0
      }
4410
0
    }
4411
0
    if (lchk != NULL) {
4412
      /* Assure a timer is up */
4413
0
      sctp_timer_start(SCTP_TIMER_TYPE_SEND,
4414
0
                       stcb->sctp_ep, stcb, lchk->whoTo);
4415
0
    }
4416
0
  }
4417
0
  if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
4418
0
    sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
4419
0
             rwnd,
4420
0
             stcb->asoc.peers_rwnd,
4421
0
             stcb->asoc.total_flight,
4422
0
             stcb->asoc.total_output_queue_size);
4423
0
  }
4424
0
}
4425
4426
void
4427
sctp_handle_sack(struct mbuf *m, int offset_seg, int offset_dup,
4428
                 struct sctp_tcb *stcb,
4429
                 uint16_t num_seg, uint16_t num_nr_seg, uint16_t num_dup,
4430
                 int *abort_now, uint8_t flags,
4431
                 uint32_t cum_ack, uint32_t rwnd, int ecne_seen)
4432
1.34k
{
4433
1.34k
  struct sctp_association *asoc;
4434
1.34k
  struct sctp_tmit_chunk *tp1, *tp2;
4435
1.34k
  uint32_t last_tsn, biggest_tsn_acked, biggest_tsn_newly_acked, this_sack_lowest_newack;
4436
1.34k
  uint16_t wake_him = 0;
4437
1.34k
  uint32_t send_s = 0;
4438
1.34k
  long j;
4439
1.34k
  int accum_moved = 0;
4440
1.34k
  int will_exit_fast_recovery = 0;
4441
1.34k
  uint32_t a_rwnd, old_rwnd;
4442
1.34k
  int win_probe_recovery = 0;
4443
1.34k
  int win_probe_recovered = 0;
4444
1.34k
  struct sctp_nets *net = NULL;
4445
1.34k
  int done_once;
4446
1.34k
  int rto_ok = 1;
4447
1.34k
  uint8_t reneged_all = 0;
4448
1.34k
  uint8_t cmt_dac_flag;
4449
  /*
4450
   * we take any chance we can to service our queues since we cannot
4451
   * get awoken when the socket is read from :<
4452
   */
4453
  /*
4454
   * Now perform the actual SACK handling: 1) Verify that it is not an
4455
   * old sack, if so discard. 2) If there is nothing left in the send
4456
   * queue (cum-ack is equal to last acked) then you have a duplicate
4457
   * too, update any rwnd change and verify no timers are running.
4458
   * then return. 3) Process any new consecutive data i.e. cum-ack
4459
   * moved process these first and note that it moved. 4) Process any
4460
   * sack blocks. 5) Drop any acked from the queue. 6) Check for any
4461
   * revoked blocks and mark. 7) Update the cwnd. 8) Nothing left,
4462
   * sync up flightsizes and things, stop all timers and also check
4463
   * for shutdown_pending state. If so then go ahead and send off the
4464
   * shutdown. If in shutdown recv, send off the shutdown-ack and
4465
   * start that timer, Ret. 9) Strike any non-acked things and do FR
4466
   * procedure if needed being sure to set the FR flag. 10) Do pr-sctp
4467
   * procedures. 11) Apply any FR penalties. 12) Assure we will SACK
4468
   * if in shutdown_recv state.
4469
   */
4470
1.34k
  SCTP_TCB_LOCK_ASSERT(stcb);
4471
  /* CMT DAC algo */
4472
1.34k
  this_sack_lowest_newack = 0;
4473
1.34k
  SCTP_STAT_INCR(sctps_slowpath_sack);
4474
1.34k
  last_tsn = cum_ack;
4475
1.34k
  cmt_dac_flag = flags & SCTP_SACK_CMT_DAC;
4476
#ifdef SCTP_ASOCLOG_OF_TSNS
4477
  stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cum_ack;
4478
  stcb->asoc.cumack_log_at++;
4479
  if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
4480
    stcb->asoc.cumack_log_at = 0;
4481
  }
4482
#endif
4483
1.34k
  a_rwnd = rwnd;
4484
4485
1.34k
  if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
4486
0
    sctp_misc_ints(SCTP_SACK_LOG_NORMAL, cum_ack,
4487
0
                   rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
4488
0
  }
4489
4490
1.34k
  old_rwnd = stcb->asoc.peers_rwnd;
4491
1.34k
  if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
4492
0
    sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
4493
0
                   stcb->asoc.overall_error_count,
4494
0
                   0,
4495
0
                   SCTP_FROM_SCTP_INDATA,
4496
0
                   __LINE__);
4497
0
  }
4498
1.34k
  stcb->asoc.overall_error_count = 0;
4499
1.34k
  asoc = &stcb->asoc;
4500
1.34k
  if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4501
0
    sctp_log_sack(asoc->last_acked_seq,
4502
0
                  cum_ack,
4503
0
                  0,
4504
0
                  num_seg,
4505
0
                  num_dup,
4506
0
                  SCTP_LOG_NEW_SACK);
4507
0
  }
4508
1.34k
  if ((num_dup) && (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE)) {
4509
0
    uint16_t i;
4510
0
    uint32_t *dupdata, dblock;
4511
4512
0
    for (i = 0; i < num_dup; i++) {
4513
0
      dupdata = (uint32_t *)sctp_m_getptr(m, offset_dup + i * sizeof(uint32_t),
4514
0
                                          sizeof(uint32_t), (uint8_t *)&dblock);
4515
0
      if (dupdata == NULL) {
4516
0
        break;
4517
0
      }
4518
0
      sctp_log_fr(*dupdata, 0, 0, SCTP_FR_DUPED);
4519
0
    }
4520
0
  }
4521
  /* reality check */
4522
1.34k
  if (!TAILQ_EMPTY(&asoc->sent_queue)) {
4523
246
    tp1 = TAILQ_LAST(&asoc->sent_queue,
4524
246
         sctpchunk_listhead);
4525
246
    send_s = tp1->rec.data.tsn + 1;
4526
1.10k
  } else {
4527
1.10k
    tp1 = NULL;
4528
1.10k
    send_s = asoc->sending_seq;
4529
1.10k
  }
4530
1.34k
  if (SCTP_TSN_GE(cum_ack, send_s)) {
4531
45
    struct mbuf *op_err;
4532
45
    char msg[SCTP_DIAG_INFO_LEN];
4533
4534
    /*
4535
     * no way, we have not even sent this TSN out yet.
4536
     * Peer is hopelessly messed up with us.
4537
     */
4538
45
    SCTP_PRINTF("NEW cum_ack:%x send_s:%x is smaller or equal\n",
4539
45
          cum_ack, send_s);
4540
45
    if (tp1) {
4541
3
      SCTP_PRINTF("Got send_s from tsn:%x + 1 of tp1: %p\n",
4542
3
            tp1->rec.data.tsn, (void *)tp1);
4543
3
    }
4544
45
  hopeless_peer:
4545
45
    *abort_now = 1;
4546
    /* XXX */
4547
45
    SCTP_SNPRINTF(msg, sizeof(msg),
4548
45
                  "Cum ack %8.8x greater or equal than TSN %8.8x",
4549
45
                  cum_ack, send_s);
4550
45
    op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
4551
45
    stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_29;
4552
45
    sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, false, SCTP_SO_NOT_LOCKED);
4553
45
    return;
4554
45
  }
4555
  /**********************/
4556
  /* 1) check the range */
4557
  /**********************/
4558
1.30k
  if (SCTP_TSN_GT(asoc->last_acked_seq, last_tsn)) {
4559
    /* acking something behind */
4560
1.30k
    return;
4561
1.30k
  }
4562
4563
  /* update the Rwnd of the peer */
4564
0
  if (TAILQ_EMPTY(&asoc->sent_queue) &&
4565
0
      TAILQ_EMPTY(&asoc->send_queue) &&
4566
0
      (asoc->stream_queue_cnt == 0)) {
4567
    /* nothing left on send/sent and strmq */
4568
0
    if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4569
0
      sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4570
0
                        asoc->peers_rwnd, 0, 0, a_rwnd);
4571
0
    }
4572
0
    asoc->peers_rwnd = a_rwnd;
4573
0
    if (asoc->sent_queue_retran_cnt) {
4574
0
      asoc->sent_queue_retran_cnt = 0;
4575
0
    }
4576
0
    if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4577
      /* SWS sender side engages */
4578
0
      asoc->peers_rwnd = 0;
4579
0
    }
4580
    /* stop any timers */
4581
0
    TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4582
0
      sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4583
0
                      stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_30);
4584
0
      net->partial_bytes_acked = 0;
4585
0
      net->flight_size = 0;
4586
0
    }
4587
0
    asoc->total_flight = 0;
4588
0
    asoc->total_flight_count = 0;
4589
0
    return;
4590
0
  }
4591
  /*
4592
   * We init netAckSz and netAckSz2 to 0. These are used to track 2
4593
   * things. The total byte count acked is tracked in netAckSz AND
4594
   * netAck2 is used to track the total bytes acked that are un-
4595
   * ambiguous and were never retransmitted. We track these on a per
4596
   * destination address basis.
4597
   */
4598
0
  TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4599
0
    if (SCTP_TSN_GT(cum_ack, net->cwr_window_tsn)) {
4600
      /* Drag along the window_tsn for cwr's */
4601
0
      net->cwr_window_tsn = cum_ack;
4602
0
    }
4603
0
    net->prev_cwnd = net->cwnd;
4604
0
    net->net_ack = 0;
4605
0
    net->net_ack2 = 0;
4606
4607
    /*
4608
     * CMT: Reset CUC and Fast recovery algo variables before
4609
     * SACK processing
4610
     */
4611
0
    net->new_pseudo_cumack = 0;
4612
0
    net->will_exit_fast_recovery = 0;
4613
0
    if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) {
4614
0
      (*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack)(stcb, net);
4615
0
    }
4616
4617
    /*
4618
     * CMT: SFR algo (and HTNA) - this_sack_highest_newack has
4619
     * to be greater than the cumack. Also reset saw_newack to 0
4620
     * for all dests.
4621
     */
4622
0
    net->saw_newack = 0;
4623
0
    net->this_sack_highest_newack = last_tsn;
4624
0
  }
4625
  /* process the new consecutive TSN first */
4626
0
  TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4627
0
    if (SCTP_TSN_GE(last_tsn, tp1->rec.data.tsn)) {
4628
0
      if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
4629
0
        accum_moved = 1;
4630
0
        if (tp1->sent < SCTP_DATAGRAM_ACKED) {
4631
          /*
4632
           * If it is less than ACKED, it is
4633
           * now no-longer in flight. Higher
4634
           * values may occur during marking
4635
           */
4636
0
          if ((tp1->whoTo->dest_state &
4637
0
               SCTP_ADDR_UNCONFIRMED) &&
4638
0
              (tp1->snd_count < 2)) {
4639
            /*
4640
             * If there was no retran
4641
             * and the address is
4642
             * un-confirmed and we sent
4643
             * there and are now
4644
             * sacked.. its confirmed,
4645
             * mark it so.
4646
             */
4647
0
            tp1->whoTo->dest_state &=
4648
0
              ~SCTP_ADDR_UNCONFIRMED;
4649
0
          }
4650
0
          if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4651
0
            if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4652
0
              sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
4653
0
                             tp1->whoTo->flight_size,
4654
0
                             tp1->book_size,
4655
0
                             (uint32_t)(uintptr_t)tp1->whoTo,
4656
0
                             tp1->rec.data.tsn);
4657
0
            }
4658
0
            sctp_flight_size_decrease(tp1);
4659
0
            sctp_total_flight_decrease(stcb, tp1);
4660
0
            if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
4661
0
              (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged)(tp1->whoTo,
4662
0
                                 tp1);
4663
0
            }
4664
0
          }
4665
0
          tp1->whoTo->net_ack += tp1->send_size;
4666
4667
          /* CMT SFR and DAC algos */
4668
0
          this_sack_lowest_newack = tp1->rec.data.tsn;
4669
0
          tp1->whoTo->saw_newack = 1;
4670
4671
0
          if (tp1->snd_count < 2) {
4672
            /*
4673
             * True non-retransmitted
4674
             * chunk
4675
             */
4676
0
            tp1->whoTo->net_ack2 +=
4677
0
              tp1->send_size;
4678
4679
            /* update RTO too? */
4680
0
            if (tp1->do_rtt) {
4681
0
              if (rto_ok &&
4682
0
                  sctp_calculate_rto(stcb,
4683
0
                         &stcb->asoc,
4684
0
                         tp1->whoTo,
4685
0
                         &tp1->sent_rcv_time,
4686
0
                         SCTP_RTT_FROM_DATA)) {
4687
0
                rto_ok = 0;
4688
0
              }
4689
0
              if (tp1->whoTo->rto_needed == 0) {
4690
0
                tp1->whoTo->rto_needed = 1;
4691
0
              }
4692
0
              tp1->do_rtt = 0;
4693
0
            }
4694
0
          }
4695
          /*
4696
           * CMT: CUCv2 algorithm. From the
4697
           * cumack'd TSNs, for each TSN being
4698
           * acked for the first time, set the
4699
           * following variables for the
4700
           * corresp destination.
4701
           * new_pseudo_cumack will trigger a
4702
           * cwnd update.
4703
           * find_(rtx_)pseudo_cumack will
4704
           * trigger search for the next
4705
           * expected (rtx-)pseudo-cumack.
4706
           */
4707
0
          tp1->whoTo->new_pseudo_cumack = 1;
4708
0
          tp1->whoTo->find_pseudo_cumack = 1;
4709
0
          tp1->whoTo->find_rtx_pseudo_cumack = 1;
4710
0
          if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4711
0
            sctp_log_sack(asoc->last_acked_seq,
4712
0
                          cum_ack,
4713
0
                          tp1->rec.data.tsn,
4714
0
                          0,
4715
0
                          0,
4716
0
                          SCTP_LOG_TSN_ACKED);
4717
0
          }
4718
0
          if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
4719
0
            sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.tsn, SCTP_CWND_LOG_FROM_SACK);
4720
0
          }
4721
0
        }
4722
0
        if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4723
0
          sctp_ucount_decr(asoc->sent_queue_retran_cnt);
4724
#ifdef SCTP_AUDITING_ENABLED
4725
          sctp_audit_log(0xB3,
4726
                         (asoc->sent_queue_retran_cnt & 0x000000ff));
4727
#endif
4728
0
        }
4729
0
        if (tp1->rec.data.chunk_was_revoked) {
4730
          /* deflate the cwnd */
4731
0
          tp1->whoTo->cwnd -= tp1->book_size;
4732
0
          tp1->rec.data.chunk_was_revoked = 0;
4733
0
        }
4734
0
        if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
4735
0
          tp1->sent = SCTP_DATAGRAM_ACKED;
4736
0
        }
4737
0
      }
4738
0
    } else {
4739
0
      break;
4740
0
    }
4741
0
  }
4742
0
  biggest_tsn_newly_acked = biggest_tsn_acked = last_tsn;
4743
  /* always set this up to cum-ack */
4744
0
  asoc->this_sack_highest_gap = last_tsn;
4745
4746
0
  if ((num_seg > 0) || (num_nr_seg > 0)) {
4747
    /*
4748
     * thisSackHighestGap will increase while handling NEW
4749
     * segments this_sack_highest_newack will increase while
4750
     * handling NEWLY ACKED chunks. this_sack_lowest_newack is
4751
     * used for CMT DAC algo. saw_newack will also change.
4752
     */
4753
0
    if (sctp_handle_segments(m, &offset_seg, stcb, asoc, last_tsn, &biggest_tsn_acked,
4754
0
      &biggest_tsn_newly_acked, &this_sack_lowest_newack,
4755
0
      num_seg, num_nr_seg, &rto_ok)) {
4756
0
      wake_him++;
4757
0
    }
4758
    /*
4759
     * validate the biggest_tsn_acked in the gap acks if
4760
     * strict adherence is wanted.
4761
     */
4762
0
    if (SCTP_TSN_GE(biggest_tsn_acked, send_s)) {
4763
      /*
4764
       * peer is either confused or we are under
4765
       * attack. We must abort.
4766
       */
4767
0
      SCTP_PRINTF("Hopeless peer! biggest_tsn_acked:%x largest seq:%x\n",
4768
0
            biggest_tsn_acked, send_s);
4769
0
      goto hopeless_peer;
4770
0
    }
4771
0
  }
4772
  /*******************************************/
4773
  /* cancel ALL T3-send timer if accum moved */
4774
  /*******************************************/
4775
0
  if (asoc->sctp_cmt_on_off > 0) {
4776
0
    TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4777
0
      if (net->new_pseudo_cumack)
4778
0
        sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4779
0
                        stcb, net,
4780
0
                        SCTP_FROM_SCTP_INDATA + SCTP_LOC_31);
4781
0
    }
4782
0
  } else {
4783
0
    if (accum_moved) {
4784
0
      TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4785
0
        sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4786
0
                        stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_32);
4787
0
      }
4788
0
    }
4789
0
  }
4790
  /********************************************/
4791
  /* drop the acked chunks from the sentqueue */
4792
  /********************************************/
4793
0
  asoc->last_acked_seq = cum_ack;
4794
4795
0
  TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
4796
0
    if (SCTP_TSN_GT(tp1->rec.data.tsn, cum_ack)) {
4797
0
      break;
4798
0
    }
4799
0
    if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
4800
0
      if (asoc->strmout[tp1->rec.data.sid].chunks_on_queues > 0) {
4801
0
        asoc->strmout[tp1->rec.data.sid].chunks_on_queues--;
4802
0
#ifdef INVARIANTS
4803
0
      } else {
4804
0
        panic("No chunks on the queues for sid %u.", tp1->rec.data.sid);
4805
0
#endif
4806
0
      }
4807
0
    }
4808
0
    if ((asoc->strmout[tp1->rec.data.sid].chunks_on_queues == 0) &&
4809
0
        (asoc->strmout[tp1->rec.data.sid].state == SCTP_STREAM_RESET_PENDING) &&
4810
0
        TAILQ_EMPTY(&asoc->strmout[tp1->rec.data.sid].outqueue)) {
4811
0
      asoc->trigger_reset = 1;
4812
0
    }
4813
0
    TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
4814
0
    if (PR_SCTP_ENABLED(tp1->flags)) {
4815
0
      if (asoc->pr_sctp_cnt != 0)
4816
0
        asoc->pr_sctp_cnt--;
4817
0
    }
4818
0
    asoc->sent_queue_cnt--;
4819
0
    if (tp1->data) {
4820
      /* sa_ignore NO_NULL_CHK */
4821
0
      sctp_free_bufspace(stcb, asoc, tp1, 1);
4822
0
      sctp_m_freem(tp1->data);
4823
0
      tp1->data = NULL;
4824
0
      if (asoc->prsctp_supported && PR_SCTP_BUF_ENABLED(tp1->flags)) {
4825
0
        asoc->sent_queue_cnt_removeable--;
4826
0
      }
4827
0
    }
4828
0
    if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4829
0
      sctp_log_sack(asoc->last_acked_seq,
4830
0
                    cum_ack,
4831
0
                    tp1->rec.data.tsn,
4832
0
                    0,
4833
0
                    0,
4834
0
                    SCTP_LOG_FREE_SENT);
4835
0
    }
4836
0
    sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED);
4837
0
    wake_him++;
4838
0
  }
4839
0
  if (TAILQ_EMPTY(&asoc->sent_queue) && (asoc->total_flight > 0)) {
4840
0
#ifdef INVARIANTS
4841
0
    panic("Warning flight size is positive and should be 0");
4842
#else
4843
    SCTP_PRINTF("Warning flight size incorrect should be 0 is %d\n",
4844
                asoc->total_flight);
4845
#endif
4846
0
    asoc->total_flight = 0;
4847
0
  }
4848
4849
0
#if defined(__Userspace__)
4850
0
  if (stcb->sctp_ep->recv_callback) {
4851
0
    if (stcb->sctp_socket) {
4852
0
      uint32_t inqueue_bytes, sb_free_now;
4853
0
      struct sctp_inpcb *inp;
4854
4855
0
      inp = stcb->sctp_ep;
4856
0
      inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * sizeof(struct sctp_data_chunk));
4857
0
      sb_free_now = SCTP_SB_LIMIT_SND(stcb->sctp_socket) - (inqueue_bytes + stcb->asoc.sb_send_resv);
4858
4859
      /* check if the amount free in the send socket buffer crossed the threshold */
4860
0
      if (inp->send_callback &&
4861
0
         (((inp->send_sb_threshold > 0) && (sb_free_now >= inp->send_sb_threshold)) ||
4862
0
          (inp->send_sb_threshold == 0))) {
4863
0
        atomic_add_int(&stcb->asoc.refcnt, 1);
4864
0
        SCTP_TCB_UNLOCK(stcb);
4865
0
        inp->send_callback(stcb->sctp_socket, sb_free_now, inp->ulp_info);
4866
0
        SCTP_TCB_LOCK(stcb);
4867
0
        atomic_subtract_int(&stcb->asoc.refcnt, 1);
4868
0
      }
4869
0
    }
4870
0
  } else if ((wake_him) && (stcb->sctp_socket)) {
4871
#else
4872
  /* sa_ignore NO_NULL_CHK */
4873
  if ((wake_him) && (stcb->sctp_socket)) {
4874
#endif
4875
#if defined(__APPLE__) && !defined(__Userspace__)
4876
    struct socket *so;
4877
4878
#endif
4879
0
    SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
4880
0
    if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4881
0
      sctp_wakeup_log(stcb, wake_him, SCTP_WAKESND_FROM_SACK);
4882
0
    }
4883
#if defined(__APPLE__) && !defined(__Userspace__)
4884
    so = SCTP_INP_SO(stcb->sctp_ep);
4885
    atomic_add_int(&stcb->asoc.refcnt, 1);
4886
    SCTP_TCB_UNLOCK(stcb);
4887
    SCTP_SOCKET_LOCK(so, 1);
4888
    SCTP_TCB_LOCK(stcb);
4889
    atomic_subtract_int(&stcb->asoc.refcnt, 1);
4890
    if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4891
      /* assoc was freed while we were unlocked */
4892
      SCTP_SOCKET_UNLOCK(so, 1);
4893
      return;
4894
    }
4895
#endif
4896
0
    sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
4897
#if defined(__APPLE__) && !defined(__Userspace__)
4898
    SCTP_SOCKET_UNLOCK(so, 1);
4899
#endif
4900
0
  } else {
4901
0
    if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4902
0
      sctp_wakeup_log(stcb, wake_him, SCTP_NOWAKE_FROM_SACK);
4903
0
    }
4904
0
  }
4905
4906
0
  if (asoc->fast_retran_loss_recovery && accum_moved) {
4907
0
    if (SCTP_TSN_GE(asoc->last_acked_seq, asoc->fast_recovery_tsn)) {
4908
      /* Setup so we will exit RFC2582 fast recovery */
4909
0
      will_exit_fast_recovery = 1;
4910
0
    }
4911
0
  }
4912
  /*
4913
   * Check for revoked fragments:
4914
   *
4915
   * if Previous sack - Had no frags then we can't have any revoked if
4916
   * Previous sack - Had frag's then - If we now have frags aka
4917
   * num_seg > 0 call sctp_check_for_revoked() to tell if peer revoked
4918
   * some of them. else - The peer revoked all ACKED fragments, since
4919
   * we had some before and now we have NONE.
4920
   */
4921
4922
0
  if (num_seg) {
4923
0
    sctp_check_for_revoked(stcb, asoc, cum_ack, biggest_tsn_acked);
4924
0
    asoc->saw_sack_with_frags = 1;
4925
0
  } else if (asoc->saw_sack_with_frags) {
4926
0
    int cnt_revoked = 0;
4927
4928
    /* Peer revoked all dg's marked or acked */
4929
0
    TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4930
0
      if (tp1->sent == SCTP_DATAGRAM_ACKED) {
4931
0
        tp1->sent = SCTP_DATAGRAM_SENT;
4932
0
        if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4933
0
          sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
4934
0
                         tp1->whoTo->flight_size,
4935
0
                         tp1->book_size,
4936
0
                         (uint32_t)(uintptr_t)tp1->whoTo,
4937
0
                         tp1->rec.data.tsn);
4938
0
        }
4939
0
        sctp_flight_size_increase(tp1);
4940
0
        sctp_total_flight_increase(stcb, tp1);
4941
0
        tp1->rec.data.chunk_was_revoked = 1;
4942
        /*
4943
         * To ensure that this increase in
4944
         * flightsize, which is artificial,
4945
         * does not throttle the sender, we
4946
         * also increase the cwnd
4947
         * artificially.
4948
         */
4949
0
        tp1->whoTo->cwnd += tp1->book_size;
4950
0
        cnt_revoked++;
4951
0
      }
4952
0
    }
4953
0
    if (cnt_revoked) {
4954
0
      reneged_all = 1;
4955
0
    }
4956
0
    asoc->saw_sack_with_frags = 0;
4957
0
  }
4958
0
  if (num_nr_seg > 0)
4959
0
    asoc->saw_sack_with_nr_frags = 1;
4960
0
  else
4961
0
    asoc->saw_sack_with_nr_frags = 0;
4962
4963
  /* JRS - Use the congestion control given in the CC module */
4964
0
  if (ecne_seen == 0) {
4965
0
    TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4966
0
      if (net->net_ack2 > 0) {
4967
        /*
4968
         * Karn's rule applies to clearing error count, this
4969
         * is optional.
4970
         */
4971
0
        net->error_count = 0;
4972
0
        if ((net->dest_state & SCTP_ADDR_REACHABLE) == 0) {
4973
          /* addr came good */
4974
0
          net->dest_state |= SCTP_ADDR_REACHABLE;
4975
0
          sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
4976
0
                          0, (void *)net, SCTP_SO_NOT_LOCKED);
4977
0
        }
4978
4979
0
        if (net == stcb->asoc.primary_destination) {
4980
0
          if (stcb->asoc.alternate) {
4981
            /* release the alternate, primary is good */
4982
0
            sctp_free_remote_addr(stcb->asoc.alternate);
4983
0
            stcb->asoc.alternate = NULL;
4984
0
          }
4985
0
        }
4986
4987
0
        if (net->dest_state & SCTP_ADDR_PF) {
4988
0
          net->dest_state &= ~SCTP_ADDR_PF;
4989
0
          sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT,
4990
0
                          stcb->sctp_ep, stcb, net,
4991
0
                          SCTP_FROM_SCTP_INDATA + SCTP_LOC_33);
4992
0
          sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
4993
0
          asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net);
4994
          /* Done with this net */
4995
0
          net->net_ack = 0;
4996
0
        }
4997
        /* restore any doubled timers */
4998
0
        net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
4999
0
        if (net->RTO < stcb->asoc.minrto) {
5000
0
          net->RTO = stcb->asoc.minrto;
5001
0
        }
5002
0
        if (net->RTO > stcb->asoc.maxrto) {
5003
0
          net->RTO = stcb->asoc.maxrto;
5004
0
        }
5005
0
      }
5006
0
    }
5007
0
    asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, accum_moved, reneged_all, will_exit_fast_recovery);
5008
0
  }
5009
5010
0
  if (TAILQ_EMPTY(&asoc->sent_queue)) {
5011
    /* nothing left in-flight */
5012
0
    TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5013
      /* stop all timers */
5014
0
      sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
5015
0
                      stcb, net,
5016
0
                      SCTP_FROM_SCTP_INDATA + SCTP_LOC_34);
5017
0
      net->flight_size = 0;
5018
0
      net->partial_bytes_acked = 0;
5019
0
    }
5020
0
    asoc->total_flight = 0;
5021
0
    asoc->total_flight_count = 0;
5022
0
  }
5023
5024
  /**********************************/
5025
  /* Now what about shutdown issues */
5026
  /**********************************/
5027
0
  if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
5028
    /* nothing left on sendqueue.. consider done */
5029
0
    if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
5030
0
      sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
5031
0
                        asoc->peers_rwnd, 0, 0, a_rwnd);
5032
0
    }
5033
0
    asoc->peers_rwnd = a_rwnd;
5034
0
    if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
5035
      /* SWS sender side engages */
5036
0
      asoc->peers_rwnd = 0;
5037
0
    }
5038
    /* clean up */
5039
0
    if ((asoc->stream_queue_cnt == 1) &&
5040
0
        ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
5041
0
         (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) &&
5042
0
        ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete)(stcb, asoc))) {
5043
0
      SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_PARTIAL_MSG_LEFT);
5044
0
    }
5045
0
    if (((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
5046
0
         (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) &&
5047
0
        (asoc->stream_queue_cnt == 1) &&
5048
0
        (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) {
5049
0
      struct mbuf *op_err;
5050
5051
0
      *abort_now = 1;
5052
      /* XXX */
5053
0
      op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, "");
5054
0
      stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_35;
5055
0
      sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, false, SCTP_SO_NOT_LOCKED);
5056
0
      return;
5057
0
    }
5058
0
    if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
5059
0
        (asoc->stream_queue_cnt == 0)) {
5060
0
      struct sctp_nets *netp;
5061
5062
0
      if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) ||
5063
0
          (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
5064
0
        SCTP_STAT_DECR_GAUGE32(sctps_currestab);
5065
0
      }
5066
0
      SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_SENT);
5067
0
      sctp_stop_timers_for_shutdown(stcb);
5068
0
      if (asoc->alternate) {
5069
0
        netp = asoc->alternate;
5070
0
      } else {
5071
0
        netp = asoc->primary_destination;
5072
0
      }
5073
0
      sctp_send_shutdown(stcb, netp);
5074
0
      sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
5075
0
           stcb->sctp_ep, stcb, netp);
5076
0
      sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
5077
0
           stcb->sctp_ep, stcb, NULL);
5078
0
      return;
5079
0
    } else if ((SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
5080
0
         (asoc->stream_queue_cnt == 0)) {
5081
0
      struct sctp_nets *netp;
5082
5083
0
      SCTP_STAT_DECR_GAUGE32(sctps_currestab);
5084
0
      SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_ACK_SENT);
5085
0
      sctp_stop_timers_for_shutdown(stcb);
5086
0
      if (asoc->alternate) {
5087
0
        netp = asoc->alternate;
5088
0
      } else {
5089
0
        netp = asoc->primary_destination;
5090
0
      }
5091
0
      sctp_send_shutdown_ack(stcb, netp);
5092
0
      sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
5093
0
                       stcb->sctp_ep, stcb, netp);
5094
0
      return;
5095
0
    }
5096
0
  }
5097
  /*
5098
   * Now here we are going to recycle net_ack for a different use...
5099
   * HEADS UP.
5100
   */
5101
0
  TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5102
0
    net->net_ack = 0;
5103
0
  }
5104
5105
  /*
5106
   * CMT DAC algorithm: If SACK DAC flag was 0, then no extra marking
5107
   * to be done. Setting this_sack_lowest_newack to the cum_ack will
5108
   * automatically ensure that.
5109
   */
5110
0
  if ((asoc->sctp_cmt_on_off > 0) &&
5111
0
      SCTP_BASE_SYSCTL(sctp_cmt_use_dac) &&
5112
0
      (cmt_dac_flag == 0)) {
5113
0
    this_sack_lowest_newack = cum_ack;
5114
0
  }
5115
0
  if ((num_seg > 0) || (num_nr_seg > 0)) {
5116
0
    sctp_strike_gap_ack_chunks(stcb, asoc, biggest_tsn_acked,
5117
0
                               biggest_tsn_newly_acked, this_sack_lowest_newack, accum_moved);
5118
0
  }
5119
  /* JRS - Use the congestion control given in the CC module */
5120
0
  asoc->cc_functions.sctp_cwnd_update_after_fr(stcb, asoc);
5121
5122
  /* Now are we exiting loss recovery ? */
5123
0
  if (will_exit_fast_recovery) {
5124
    /* Ok, we must exit fast recovery */
5125
0
    asoc->fast_retran_loss_recovery = 0;
5126
0
  }
5127
0
  if ((asoc->sat_t3_loss_recovery) &&
5128
0
      SCTP_TSN_GE(asoc->last_acked_seq, asoc->sat_t3_recovery_tsn)) {
5129
    /* end satellite t3 loss recovery */
5130
0
    asoc->sat_t3_loss_recovery = 0;
5131
0
  }
5132
  /*
5133
   * CMT Fast recovery
5134
   */
5135
0
  TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5136
0
    if (net->will_exit_fast_recovery) {
5137
      /* Ok, we must exit fast recovery */
5138
0
      net->fast_retran_loss_recovery = 0;
5139
0
    }
5140
0
  }
5141
5142
  /* Adjust and set the new rwnd value */
5143
0
  if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
5144
0
    sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
5145
0
                      asoc->peers_rwnd, asoc->total_flight, (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)), a_rwnd);
5146
0
  }
5147
0
  asoc->peers_rwnd = sctp_sbspace_sub(a_rwnd,
5148
0
                                      (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
5149
0
  if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
5150
    /* SWS sender side engages */
5151
0
    asoc->peers_rwnd = 0;
5152
0
  }
5153
0
  if (asoc->peers_rwnd > old_rwnd) {
5154
0
    win_probe_recovery = 1;
5155
0
  }
5156
5157
  /*
5158
   * Now we must setup so we have a timer up for anyone with
5159
   * outstanding data.
5160
   */
5161
0
  done_once = 0;
5162
0
again:
5163
0
  j = 0;
5164
0
  TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5165
0
    if (win_probe_recovery && (net->window_probe)) {
5166
0
      win_probe_recovered = 1;
5167
      /*-
5168
       * Find first chunk that was used with
5169
       * window probe and clear the event. Put
5170
       * it back into the send queue as if has
5171
       * not been sent.
5172
       */
5173
0
      TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5174
0
        if (tp1->window_probe) {
5175
0
          sctp_window_probe_recovery(stcb, asoc, tp1);
5176
0
          break;
5177
0
        }
5178
0
      }
5179
0
    }
5180
0
    if (net->flight_size) {
5181
0
      j++;
5182
0
      if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5183
0
        sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5184
0
                         stcb->sctp_ep, stcb, net);
5185
0
      }
5186
0
      if (net->window_probe) {
5187
0
        net->window_probe = 0;
5188
0
      }
5189
0
    } else {
5190
0
      if (net->window_probe) {
5191
        /* In window probes we must assure a timer is still running there */
5192
0
        if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5193
0
          sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5194
0
                           stcb->sctp_ep, stcb, net);
5195
0
        }
5196
0
      } else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5197
0
        sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
5198
0
                        stcb, net,
5199
0
                        SCTP_FROM_SCTP_INDATA + SCTP_LOC_36);
5200
0
      }
5201
0
    }
5202
0
  }
5203
0
  if ((j == 0) &&
5204
0
      (!TAILQ_EMPTY(&asoc->sent_queue)) &&
5205
0
      (asoc->sent_queue_retran_cnt == 0) &&
5206
0
      (win_probe_recovered == 0) &&
5207
0
      (done_once == 0)) {
5208
    /* huh, this should not happen unless all packets
5209
     * are PR-SCTP and marked to skip of course.
5210
     */
5211
0
    if (sctp_fs_audit(asoc)) {
5212
0
      TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5213
0
        net->flight_size = 0;
5214
0
      }
5215
0
      asoc->total_flight = 0;
5216
0
      asoc->total_flight_count = 0;
5217
0
      asoc->sent_queue_retran_cnt = 0;
5218
0
      TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5219
0
        if (tp1->sent < SCTP_DATAGRAM_RESEND) {
5220
0
          sctp_flight_size_increase(tp1);
5221
0
          sctp_total_flight_increase(stcb, tp1);
5222
0
        } else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
5223
0
          sctp_ucount_incr(asoc->sent_queue_retran_cnt);
5224
0
        }
5225
0
      }
5226
0
    }
5227
0
    done_once = 1;
5228
0
    goto again;
5229
0
  }
5230
  /*********************************************/
5231
  /* Here we perform PR-SCTP procedures        */
5232
  /* (section 4.2)                             */
5233
  /*********************************************/
5234
  /* C1. update advancedPeerAckPoint */
5235
0
  if (SCTP_TSN_GT(cum_ack, asoc->advanced_peer_ack_point)) {
5236
0
    asoc->advanced_peer_ack_point = cum_ack;
5237
0
  }
5238
  /* C2. try to further move advancedPeerAckPoint ahead */
5239
0
  if ((asoc->prsctp_supported) && (asoc->pr_sctp_cnt > 0)) {
5240
0
    struct sctp_tmit_chunk *lchk;
5241
0
    uint32_t old_adv_peer_ack_point;
5242
5243
0
    old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
5244
0
    lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
5245
    /* C3. See if we need to send a Fwd-TSN */
5246
0
    if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cum_ack)) {
5247
      /*
5248
       * ISSUE with ECN, see FWD-TSN processing.
5249
       */
5250
0
      if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
5251
0
        sctp_misc_ints(SCTP_FWD_TSN_CHECK,
5252
0
                       0xee, cum_ack, asoc->advanced_peer_ack_point,
5253
0
                       old_adv_peer_ack_point);
5254
0
      }
5255
0
      if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) {
5256
0
        send_forward_tsn(stcb, asoc);
5257
0
      } else if (lchk) {
5258
        /* try to FR fwd-tsn's that get lost too */
5259
0
        if (lchk->rec.data.fwd_tsn_cnt >= 3) {
5260
0
          send_forward_tsn(stcb, asoc);
5261
0
        }
5262
0
      }
5263
0
    }
5264
0
    for (; lchk != NULL; lchk = TAILQ_NEXT(lchk, sctp_next)) {
5265
0
      if (lchk->whoTo != NULL) {
5266
0
        break;
5267
0
      }
5268
0
    }
5269
0
    if (lchk != NULL) {
5270
      /* Assure a timer is up */
5271
0
      sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5272
0
                       stcb->sctp_ep, stcb, lchk->whoTo);
5273
0
    }
5274
0
  }
5275
0
  if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
5276
0
    sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
5277
0
                   a_rwnd,
5278
0
                   stcb->asoc.peers_rwnd,
5279
0
                   stcb->asoc.total_flight,
5280
0
                   stcb->asoc.total_output_queue_size);
5281
0
  }
5282
0
}
5283
5284
void
5285
sctp_update_acked(struct sctp_tcb *stcb, struct sctp_shutdown_chunk *cp, int *abort_flag)
5286
760
{
5287
  /* Copy cum-ack */
5288
760
  uint32_t cum_ack, a_rwnd;
5289
5290
760
  cum_ack = ntohl(cp->cumulative_tsn_ack);
5291
  /* Arrange so a_rwnd does NOT change */
5292
760
  a_rwnd = stcb->asoc.peers_rwnd + stcb->asoc.total_flight;
5293
5294
  /* Now call the express sack handling */
5295
760
  sctp_express_handle_sack(stcb, cum_ack, a_rwnd, abort_flag, 0);
5296
760
}
5297
5298
static void
5299
sctp_kick_prsctp_reorder_queue(struct sctp_tcb *stcb,
5300
                               struct sctp_stream_in *strmin)
5301
6.06k
{
5302
6.06k
  struct sctp_queued_to_read *control, *ncontrol;
5303
6.06k
  struct sctp_association *asoc;
5304
6.06k
  uint32_t mid;
5305
6.06k
  int need_reasm_check = 0;
5306
5307
6.06k
  KASSERT(stcb != NULL, ("stcb == NULL"));
5308
6.06k
  SCTP_TCB_LOCK_ASSERT(stcb);
5309
6.06k
  SCTP_INP_READ_LOCK_ASSERT(stcb->sctp_ep);
5310
5311
6.06k
  asoc = &stcb->asoc;
5312
6.06k
  mid = strmin->last_mid_delivered;
5313
  /*
5314
   * First deliver anything prior to and including the stream no that
5315
   * came in.
5316
   */
5317
6.06k
  TAILQ_FOREACH_SAFE(control, &strmin->inqueue, next_instrm, ncontrol) {
5318
0
    if (SCTP_MID_GE(asoc->idata_supported, mid, control->mid)) {
5319
      /* this is deliverable now */
5320
0
      if (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
5321
0
        if (control->on_strm_q) {
5322
0
          if (control->on_strm_q == SCTP_ON_ORDERED) {
5323
0
            TAILQ_REMOVE(&strmin->inqueue, control, next_instrm);
5324
0
          } else if (control->on_strm_q == SCTP_ON_UNORDERED) {
5325
0
            TAILQ_REMOVE(&strmin->uno_inqueue, control, next_instrm);
5326
0
#ifdef INVARIANTS
5327
0
          } else {
5328
0
            panic("strmin: %p ctl: %p unknown %d",
5329
0
                  strmin, control, control->on_strm_q);
5330
0
#endif
5331
0
          }
5332
0
          control->on_strm_q = 0;
5333
0
        }
5334
        /* subtract pending on streams */
5335
0
        if (asoc->size_on_all_streams >= control->length) {
5336
0
          asoc->size_on_all_streams -= control->length;
5337
0
        } else {
5338
0
#ifdef INVARIANTS
5339
0
          panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
5340
#else
5341
          asoc->size_on_all_streams = 0;
5342
#endif
5343
0
        }
5344
0
        sctp_ucount_decr(asoc->cnt_on_all_streams);
5345
        /* deliver it to at least the delivery-q */
5346
0
        if (stcb->sctp_socket) {
5347
0
          sctp_mark_non_revokable(asoc, control->sinfo_tsn);
5348
0
          sctp_add_to_readq(stcb->sctp_ep, stcb, control,
5349
0
                            &stcb->sctp_socket->so_rcv, 1,
5350
0
                            SCTP_READ_LOCK_HELD, SCTP_SO_NOT_LOCKED);
5351
0
        }
5352
0
      } else {
5353
        /* Its a fragmented message */
5354
0
        if (control->first_frag_seen) {
5355
          /* Make it so this is next to deliver, we restore later */
5356
0
          strmin->last_mid_delivered = control->mid - 1;
5357
0
          need_reasm_check = 1;
5358
0
          break;
5359
0
        }
5360
0
      }
5361
0
    } else {
5362
      /* no more delivery now. */
5363
0
      break;
5364
0
    }
5365
0
  }
5366
6.06k
  if (need_reasm_check) {
5367
0
    int ret;
5368
0
    ret = sctp_deliver_reasm_check(stcb, &stcb->asoc, strmin, SCTP_READ_LOCK_HELD);
5369
0
    if (SCTP_MID_GT(asoc->idata_supported, mid, strmin->last_mid_delivered)) {
5370
      /* Restore the next to deliver unless we are ahead */
5371
0
      strmin->last_mid_delivered = mid;
5372
0
    }
5373
0
    if (ret == 0) {
5374
      /* Left the front Partial one on */
5375
0
      return;
5376
0
    }
5377
0
    need_reasm_check = 0;
5378
0
  }
5379
  /*
5380
   * now we must deliver things in queue the normal way  if any are
5381
   * now ready.
5382
   */
5383
6.06k
  mid = strmin->last_mid_delivered + 1;
5384
6.06k
  TAILQ_FOREACH_SAFE(control, &strmin->inqueue, next_instrm, ncontrol) {
5385
0
    if (SCTP_MID_EQ(asoc->idata_supported, mid, control->mid)) {
5386
0
      if (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
5387
        /* this is deliverable now */
5388
0
        if (control->on_strm_q) {
5389
0
          if (control->on_strm_q == SCTP_ON_ORDERED) {
5390
0
            TAILQ_REMOVE(&strmin->inqueue, control, next_instrm);
5391
0
          } else if (control->on_strm_q == SCTP_ON_UNORDERED) {
5392
0
            TAILQ_REMOVE(&strmin->uno_inqueue, control, next_instrm);
5393
0
#ifdef INVARIANTS
5394
0
          } else {
5395
0
            panic("strmin: %p ctl: %p unknown %d",
5396
0
                  strmin, control, control->on_strm_q);
5397
0
#endif
5398
0
          }
5399
0
          control->on_strm_q = 0;
5400
0
        }
5401
        /* subtract pending on streams */
5402
0
        if (asoc->size_on_all_streams >= control->length) {
5403
0
          asoc->size_on_all_streams -= control->length;
5404
0
        } else {
5405
0
#ifdef INVARIANTS
5406
0
          panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
5407
#else
5408
          asoc->size_on_all_streams = 0;
5409
#endif
5410
0
        }
5411
0
        sctp_ucount_decr(asoc->cnt_on_all_streams);
5412
        /* deliver it to at least the delivery-q */
5413
0
        strmin->last_mid_delivered = control->mid;
5414
0
        if (stcb->sctp_socket) {
5415
0
          sctp_mark_non_revokable(asoc, control->sinfo_tsn);
5416
0
          sctp_add_to_readq(stcb->sctp_ep, stcb, control,
5417
0
                            &stcb->sctp_socket->so_rcv, 1,
5418
0
                            SCTP_READ_LOCK_HELD, SCTP_SO_NOT_LOCKED);
5419
0
        }
5420
0
        mid = strmin->last_mid_delivered + 1;
5421
0
      } else {
5422
        /* Its a fragmented message */
5423
0
        if (control->first_frag_seen) {
5424
          /* Make it so this is next to deliver */
5425
0
          strmin->last_mid_delivered = control->mid - 1;
5426
0
          need_reasm_check = 1;
5427
0
          break;
5428
0
        }
5429
0
      }
5430
0
    } else {
5431
0
      break;
5432
0
    }
5433
0
  }
5434
6.06k
  if (need_reasm_check) {
5435
0
    (void)sctp_deliver_reasm_check(stcb, &stcb->asoc, strmin, SCTP_READ_LOCK_HELD);
5436
0
  }
5437
6.06k
}
5438
5439
static void
5440
sctp_flush_reassm_for_str_seq(struct sctp_tcb *stcb,
5441
                              struct sctp_association *asoc, struct sctp_stream_in *strm,
5442
                              struct sctp_queued_to_read *control, int ordered, uint32_t cumtsn)
5443
0
{
5444
0
  struct sctp_tmit_chunk *chk, *nchk;
5445
5446
  /*
5447
   * For now large messages held on the stream reasm that are
5448
   * complete will be tossed too. We could in theory do more
5449
   * work to spin through and stop after dumping one msg aka
5450
   * seeing the start of a new msg at the head, and call the
5451
   * delivery function... to see if it can be delivered... But
5452
   * for now we just dump everything on the queue.
5453
   */
5454
5455
0
  KASSERT(stcb != NULL, ("stcb == NULL"));
5456
0
  SCTP_TCB_LOCK_ASSERT(stcb);
5457
0
  SCTP_INP_READ_LOCK_ASSERT(stcb->sctp_ep);
5458
5459
0
  if (!asoc->idata_supported && !ordered &&
5460
0
      control->first_frag_seen &&
5461
0
      SCTP_TSN_GT(control->fsn_included, cumtsn)) {
5462
0
    return;
5463
0
  }
5464
0
  TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, nchk) {
5465
    /* Purge hanging chunks */
5466
0
    if (!asoc->idata_supported && !ordered) {
5467
0
      if (SCTP_TSN_GT(chk->rec.data.tsn, cumtsn)) {
5468
0
        break;
5469
0
      }
5470
0
    }
5471
0
    TAILQ_REMOVE(&control->reasm, chk, sctp_next);
5472
0
    if (asoc->size_on_reasm_queue >= chk->send_size) {
5473
0
      asoc->size_on_reasm_queue -= chk->send_size;
5474
0
    } else {
5475
0
#ifdef INVARIANTS
5476
0
      panic("size_on_reasm_queue = %u smaller than chunk length %u", asoc->size_on_reasm_queue, chk->send_size);
5477
#else
5478
      asoc->size_on_reasm_queue = 0;
5479
#endif
5480
0
    }
5481
0
    sctp_ucount_decr(asoc->cnt_on_reasm_queue);
5482
0
    if (chk->data) {
5483
0
      sctp_m_freem(chk->data);
5484
0
      chk->data = NULL;
5485
0
    }
5486
0
    sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
5487
0
  }
5488
0
  if (!TAILQ_EMPTY(&control->reasm)) {
5489
0
    KASSERT(!asoc->idata_supported,
5490
0
        ("Reassembly queue not empty for I-DATA"));
5491
0
    KASSERT(!ordered,
5492
0
        ("Reassembly queue not empty for ordered data"));
5493
0
    if (control->data) {
5494
0
      sctp_m_freem(control->data);
5495
0
      control->data = NULL;
5496
0
    }
5497
0
    control->fsn_included = 0xffffffff;
5498
0
    control->first_frag_seen = 0;
5499
0
    control->last_frag_seen = 0;
5500
0
    if (control->on_read_q) {
5501
      /*
5502
       * We have to purge it from there,
5503
       * hopefully this will work :-)
5504
       */
5505
0
      TAILQ_REMOVE(&stcb->sctp_ep->read_queue, control, next);
5506
0
      control->on_read_q = 0;
5507
0
    }
5508
0
    chk = TAILQ_FIRST(&control->reasm);
5509
0
    if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
5510
0
      TAILQ_REMOVE(&control->reasm, chk, sctp_next);
5511
0
      sctp_add_chk_to_control(control, strm, stcb, asoc,
5512
0
                              chk, SCTP_READ_LOCK_HELD);
5513
0
    }
5514
0
    sctp_deliver_reasm_check(stcb, asoc, strm, SCTP_READ_LOCK_HELD);
5515
0
    return;
5516
0
  }
5517
0
  if (control->on_strm_q == SCTP_ON_ORDERED) {
5518
0
    TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
5519
0
    if (asoc->size_on_all_streams >= control->length) {
5520
0
      asoc->size_on_all_streams -= control->length;
5521
0
    } else {
5522
0
#ifdef INVARIANTS
5523
0
      panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
5524
#else
5525
      asoc->size_on_all_streams = 0;
5526
#endif
5527
0
    }
5528
0
    sctp_ucount_decr(asoc->cnt_on_all_streams);
5529
0
    control->on_strm_q = 0;
5530
0
  } else if (control->on_strm_q == SCTP_ON_UNORDERED) {
5531
0
    TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
5532
0
    control->on_strm_q = 0;
5533
0
#ifdef INVARIANTS
5534
0
  } else if (control->on_strm_q) {
5535
0
    panic("strm: %p ctl: %p unknown %d",
5536
0
        strm, control, control->on_strm_q);
5537
0
#endif
5538
0
  }
5539
0
  control->on_strm_q = 0;
5540
0
  if (control->on_read_q == 0) {
5541
0
    sctp_free_remote_addr(control->whoFrom);
5542
0
    if (control->data) {
5543
0
      sctp_m_freem(control->data);
5544
0
      control->data = NULL;
5545
0
    }
5546
0
    sctp_free_a_readq(stcb, control);
5547
0
  }
5548
0
}
5549
5550
void
5551
sctp_handle_forward_tsn(struct sctp_tcb *stcb,
5552
                        struct sctp_forward_tsn_chunk *fwd,
5553
                        int *abort_flag, struct mbuf *m , int offset)
5554
3.80k
{
5555
  /* The pr-sctp fwd tsn */
5556
  /*
5557
   * here we will perform all the data receiver side steps for
5558
   * processing FwdTSN, as required in by pr-sctp draft:
5559
   *
5560
   * Assume we get FwdTSN(x):
5561
   *
5562
   * 1) update local cumTSN to x
5563
   * 2) try to further advance cumTSN to x + others we have
5564
   * 3) examine and update re-ordering queue on pr-in-streams
5565
   * 4) clean up re-assembly queue
5566
   * 5) Send a sack to report where we are.
5567
   */
5568
3.80k
  struct sctp_association *asoc;
5569
3.80k
  uint32_t new_cum_tsn, gap;
5570
3.80k
  unsigned int i, fwd_sz, m_size;
5571
3.80k
  struct sctp_stream_in *strm;
5572
3.80k
  struct sctp_queued_to_read *control, *ncontrol;
5573
5574
3.80k
  asoc = &stcb->asoc;
5575
3.80k
  if ((fwd_sz = ntohs(fwd->ch.chunk_length)) < sizeof(struct sctp_forward_tsn_chunk)) {
5576
0
    SCTPDBG(SCTP_DEBUG_INDATA1,
5577
0
      "Bad size too small/big fwd-tsn\n");
5578
0
    return;
5579
0
  }
5580
3.80k
  m_size = (stcb->asoc.mapping_array_size << 3);
5581
  /*************************************************************/
5582
  /* 1. Here we update local cumTSN and shift the bitmap array */
5583
  /*************************************************************/
5584
3.80k
  new_cum_tsn = ntohl(fwd->new_cumulative_tsn);
5585
5586
3.80k
  if (SCTP_TSN_GE(asoc->cumulative_tsn, new_cum_tsn)) {
5587
    /* Already got there ... */
5588
2.57k
    return;
5589
2.57k
  }
5590
  /*
5591
   * now we know the new TSN is more advanced, let's find the actual
5592
   * gap
5593
   */
5594
1.22k
  SCTP_CALC_TSN_TO_GAP(gap, new_cum_tsn, asoc->mapping_array_base_tsn);
5595
1.22k
  asoc->cumulative_tsn = new_cum_tsn;
5596
1.22k
  if (gap >= m_size) {
5597
722
    if ((long)gap > sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv)) {
5598
86
      struct mbuf *op_err;
5599
86
      char msg[SCTP_DIAG_INFO_LEN];
5600
5601
      /*
5602
       * out of range (of single byte chunks in the rwnd I
5603
       * give out). This must be an attacker.
5604
       */
5605
86
      *abort_flag = 1;
5606
86
      SCTP_SNPRINTF(msg, sizeof(msg),
5607
86
                    "New cum ack %8.8x too high, highest TSN %8.8x",
5608
86
                    new_cum_tsn, asoc->highest_tsn_inside_map);
5609
86
      op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
5610
86
      stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_37;
5611
86
      sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, false, SCTP_SO_NOT_LOCKED);
5612
86
      return;
5613
86
    }
5614
636
    SCTP_STAT_INCR(sctps_fwdtsn_map_over);
5615
5616
636
    memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size);
5617
636
    asoc->mapping_array_base_tsn = new_cum_tsn + 1;
5618
636
    asoc->highest_tsn_inside_map = new_cum_tsn;
5619
5620
636
    memset(stcb->asoc.nr_mapping_array, 0, stcb->asoc.mapping_array_size);
5621
636
    asoc->highest_tsn_inside_nr_map = new_cum_tsn;
5622
5623
636
    if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
5624
0
      sctp_log_map(0, 3, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
5625
0
    }
5626
636
  } else {
5627
503
    SCTP_TCB_LOCK_ASSERT(stcb);
5628
12.3k
    for (i = 0; i <= gap; i++) {
5629
11.8k
      if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, i) &&
5630
11.8k
          !SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, i)) {
5631
11.7k
        SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, i);
5632
11.7k
        if (SCTP_TSN_GT(asoc->mapping_array_base_tsn + i, asoc->highest_tsn_inside_nr_map)) {
5633
11.7k
          asoc->highest_tsn_inside_nr_map = asoc->mapping_array_base_tsn + i;
5634
11.7k
        }
5635
11.7k
      }
5636
11.8k
    }
5637
503
  }
5638
  /*************************************************************/
5639
  /* 2. Clear up re-assembly queue                             */
5640
  /*************************************************************/
5641
5642
  /* This is now done as part of clearing up the stream/seq */
5643
1.13k
  if (asoc->idata_supported == 0) {
5644
530
    uint16_t sid;
5645
5646
    /* Flush all the un-ordered data based on cum-tsn */
5647
530
    SCTP_INP_READ_LOCK(stcb->sctp_ep);
5648
106k
    for (sid = 0; sid < asoc->streamincnt; sid++) {
5649
105k
      strm = &asoc->strmin[sid];
5650
105k
      if (!TAILQ_EMPTY(&strm->uno_inqueue)) {
5651
0
        sctp_flush_reassm_for_str_seq(stcb, asoc, strm, TAILQ_FIRST(&strm->uno_inqueue), 0, new_cum_tsn);
5652
0
      }
5653
105k
    }
5654
530
    SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5655
530
  }
5656
  /*******************************************************/
5657
  /* 3. Update the PR-stream re-ordering queues and fix  */
5658
  /*    delivery issues as needed.                       */
5659
  /*******************************************************/
5660
1.13k
  fwd_sz -= sizeof(*fwd);
5661
1.13k
  if (m && fwd_sz) {
5662
    /* New method. */
5663
731
    unsigned int num_str;
5664
731
    uint32_t mid;
5665
731
    uint16_t sid;
5666
731
    uint16_t ordered, flags;
5667
731
    struct sctp_strseq *stseq, strseqbuf;
5668
731
    struct sctp_strseq_mid *stseq_m, strseqbuf_m;
5669
731
    offset += sizeof(*fwd);
5670
5671
731
    SCTP_INP_READ_LOCK(stcb->sctp_ep);
5672
731
    if (asoc->idata_supported) {
5673
424
      num_str = fwd_sz / sizeof(struct sctp_strseq_mid);
5674
424
    } else {
5675
307
      num_str = fwd_sz / sizeof(struct sctp_strseq);
5676
307
    }
5677
6.79k
    for (i = 0; i < num_str; i++) {
5678
6.30k
      if (asoc->idata_supported) {
5679
1.93k
        stseq_m = (struct sctp_strseq_mid *)sctp_m_getptr(m, offset,
5680
1.93k
                      sizeof(struct sctp_strseq_mid),
5681
1.93k
                      (uint8_t *)&strseqbuf_m);
5682
1.93k
        offset += sizeof(struct sctp_strseq_mid);
5683
1.93k
        if (stseq_m == NULL) {
5684
0
          break;
5685
0
        }
5686
1.93k
        sid = ntohs(stseq_m->sid);
5687
1.93k
        mid = ntohl(stseq_m->mid);
5688
1.93k
        flags = ntohs(stseq_m->flags);
5689
1.93k
        if (flags & PR_SCTP_UNORDERED_FLAG) {
5690
685
          ordered = 0;
5691
1.25k
        } else {
5692
1.25k
          ordered = 1;
5693
1.25k
        }
5694
4.36k
      } else {
5695
4.36k
        stseq = (struct sctp_strseq *)sctp_m_getptr(m, offset,
5696
4.36k
                      sizeof(struct sctp_strseq),
5697
4.36k
                      (uint8_t *)&strseqbuf);
5698
4.36k
        offset += sizeof(struct sctp_strseq);
5699
4.36k
        if (stseq == NULL) {
5700
0
          break;
5701
0
        }
5702
4.36k
        sid = ntohs(stseq->sid);
5703
4.36k
        mid = (uint32_t)ntohs(stseq->ssn);
5704
4.36k
        ordered = 1;
5705
4.36k
      }
5706
      /* Convert */
5707
5708
      /* now process */
5709
5710
      /*
5711
       * Ok we now look for the stream/seq on the read queue
5712
       * where its not all delivered. If we find it we transmute the
5713
       * read entry into a PDI_ABORTED.
5714
       */
5715
6.30k
      if (sid >= asoc->streamincnt) {
5716
        /* screwed up streams, stop!  */
5717
246
        break;
5718
246
      }
5719
6.06k
      if ((asoc->str_of_pdapi == sid) &&
5720
4.95k
          (asoc->ssn_of_pdapi == mid)) {
5721
        /* If this is the one we were partially delivering
5722
         * now then we no longer are. Note this will change
5723
         * with the reassembly re-write.
5724
         */
5725
1.81k
        asoc->fragmented_delivery_inprogress = 0;
5726
1.81k
      }
5727
6.06k
      strm = &asoc->strmin[sid];
5728
6.06k
      if (ordered) {
5729
5.41k
        TAILQ_FOREACH_SAFE(control, &strm->inqueue, next_instrm, ncontrol) {
5730
0
          if (SCTP_MID_GE(asoc->idata_supported, mid, control->mid)) {
5731
0
            sctp_flush_reassm_for_str_seq(stcb, asoc, strm, control, ordered, new_cum_tsn);
5732
0
          }
5733
0
        }
5734
5.41k
      } else {
5735
641
        if (asoc->idata_supported) {
5736
641
          TAILQ_FOREACH_SAFE(control, &strm->uno_inqueue, next_instrm, ncontrol) {
5737
0
            if (SCTP_MID_GE(asoc->idata_supported, mid, control->mid)) {
5738
0
              sctp_flush_reassm_for_str_seq(stcb, asoc, strm, control, ordered, new_cum_tsn);
5739
0
            }
5740
0
          }
5741
641
        } else {
5742
0
          if (!TAILQ_EMPTY(&strm->uno_inqueue)) {
5743
0
            sctp_flush_reassm_for_str_seq(stcb, asoc, strm, TAILQ_FIRST(&strm->uno_inqueue), ordered, new_cum_tsn);
5744
0
          }
5745
0
        }
5746
641
      }
5747
12.4M
      TAILQ_FOREACH(control, &stcb->sctp_ep->read_queue, next) {
5748
12.4M
        if ((control->sinfo_stream == sid) &&
5749
11.4M
            (SCTP_MID_EQ(asoc->idata_supported, control->mid, mid))) {
5750
1.22k
          control->pdapi_aborted = 1;
5751
1.22k
          control->end_added = 1;
5752
1.22k
          if (control->on_strm_q == SCTP_ON_ORDERED) {
5753
0
            TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
5754
0
            if (asoc->size_on_all_streams >= control->length) {
5755
0
              asoc->size_on_all_streams -= control->length;
5756
0
            } else {
5757
0
#ifdef INVARIANTS
5758
0
              panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
5759
#else
5760
              asoc->size_on_all_streams = 0;
5761
#endif
5762
0
            }
5763
0
            sctp_ucount_decr(asoc->cnt_on_all_streams);
5764
1.22k
          } else if (control->on_strm_q == SCTP_ON_UNORDERED) {
5765
0
            TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
5766
0
#ifdef INVARIANTS
5767
1.22k
          } else if (control->on_strm_q) {
5768
0
            panic("strm: %p ctl: %p unknown %d",
5769
0
                  strm, control, control->on_strm_q);
5770
0
#endif
5771
0
          }
5772
1.22k
          control->on_strm_q = 0;
5773
1.22k
          sctp_ulp_notify(SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION,
5774
1.22k
                          stcb,
5775
1.22k
                          SCTP_PARTIAL_DELIVERY_ABORTED,
5776
1.22k
                          (void *)control,
5777
1.22k
                          SCTP_SO_NOT_LOCKED);
5778
1.22k
          break;
5779
12.4M
        } else if ((control->sinfo_stream == sid) &&
5780
11.4M
             SCTP_MID_GT(asoc->idata_supported, control->mid, mid)) {
5781
          /* We are past our victim SSN */
5782
164
          break;
5783
164
        }
5784
12.4M
      }
5785
6.06k
      if (SCTP_MID_GT(asoc->idata_supported, mid, strm->last_mid_delivered)) {
5786
        /* Update the sequence number */
5787
1.05k
        strm->last_mid_delivered = mid;
5788
1.05k
      }
5789
      /* now kick the stream the new way */
5790
      /*sa_ignore NO_NULL_CHK*/
5791
6.06k
      sctp_kick_prsctp_reorder_queue(stcb, strm);
5792
6.06k
    }
5793
731
    SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5794
731
  }
5795
  /*
5796
   * Now slide thing forward.
5797
   */
5798
1.13k
  sctp_slide_mapping_arrays(stcb);
5799
1.13k
}