Coverage Report

Created: 2025-07-18 06:54

/src/usrsctp/usrsctplib/netinet/sctp_timer.c
Line
Count
Source (jump to first uncovered line)
1
/*-
2
 * SPDX-License-Identifier: BSD-3-Clause
3
 *
4
 * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
5
 * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
6
 * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
7
 *
8
 * Redistribution and use in source and binary forms, with or without
9
 * modification, are permitted provided that the following conditions are met:
10
 *
11
 * a) Redistributions of source code must retain the above copyright notice,
12
 *    this list of conditions and the following disclaimer.
13
 *
14
 * b) Redistributions in binary form must reproduce the above copyright
15
 *    notice, this list of conditions and the following disclaimer in
16
 *    the documentation and/or other materials provided with the distribution.
17
 *
18
 * c) Neither the name of Cisco Systems, Inc. nor the names of its
19
 *    contributors may be used to endorse or promote products derived
20
 *    from this software without specific prior written permission.
21
 *
22
 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23
 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
24
 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25
 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26
 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27
 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28
 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29
 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30
 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31
 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
32
 * THE POSSIBILITY OF SUCH DAMAGE.
33
 */
34
35
#define _IP_VHL
36
#include <netinet/sctp_os.h>
37
#include <netinet/sctp_pcb.h>
38
#ifdef INET6
39
#if defined(__FreeBSD__) && defined(__Userspace__)
40
#include <netinet6/sctp6_var.h>
41
#endif
42
#endif
43
#include <netinet/sctp_var.h>
44
#include <netinet/sctp_sysctl.h>
45
#include <netinet/sctp_timer.h>
46
#include <netinet/sctputil.h>
47
#include <netinet/sctp_output.h>
48
#include <netinet/sctp_header.h>
49
#include <netinet/sctp_indata.h>
50
#include <netinet/sctp_asconf.h>
51
#include <netinet/sctp_input.h>
52
#include <netinet/sctp.h>
53
#include <netinet/sctp_uio.h>
54
#if defined(INET) || defined(INET6)
55
#if !(defined(_WIN32) && defined(__Userspace__))
56
#include <netinet/udp.h>
57
#endif
58
#endif
59
60
void
61
sctp_audit_retranmission_queue(struct sctp_association *asoc)
62
0
{
63
0
  struct sctp_tmit_chunk *chk;
64
65
0
  SCTPDBG(SCTP_DEBUG_TIMER4, "Audit invoked on send queue cnt:%d onqueue:%d\n",
66
0
      asoc->sent_queue_retran_cnt,
67
0
      asoc->sent_queue_cnt);
68
0
  asoc->sent_queue_retran_cnt = 0;
69
0
  asoc->sent_queue_cnt = 0;
70
0
  TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
71
0
    if (chk->sent == SCTP_DATAGRAM_RESEND) {
72
0
      sctp_ucount_incr(asoc->sent_queue_retran_cnt);
73
0
    }
74
0
    asoc->sent_queue_cnt++;
75
0
  }
76
0
  TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
77
0
    if (chk->sent == SCTP_DATAGRAM_RESEND) {
78
0
      sctp_ucount_incr(asoc->sent_queue_retran_cnt);
79
0
    }
80
0
  }
81
0
  TAILQ_FOREACH(chk, &asoc->asconf_send_queue, sctp_next) {
82
0
    if (chk->sent == SCTP_DATAGRAM_RESEND) {
83
0
      sctp_ucount_incr(asoc->sent_queue_retran_cnt);
84
0
    }
85
0
  }
86
0
  SCTPDBG(SCTP_DEBUG_TIMER4, "Audit completes retran:%d onqueue:%d\n",
87
0
    asoc->sent_queue_retran_cnt,
88
0
    asoc->sent_queue_cnt);
89
0
}
90
91
static int
92
sctp_threshold_management(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
93
    struct sctp_nets *net, uint16_t threshold)
94
0
{
95
0
  KASSERT(stcb != NULL, ("stcb is NULL"));
96
0
  SCTP_TCB_LOCK_ASSERT(stcb);
97
98
0
  if (net != NULL) {
99
0
    net->error_count++;
100
0
    SCTPDBG(SCTP_DEBUG_TIMER4, "Error count for %p now %d thresh:%d\n",
101
0
      (void *)net, net->error_count,
102
0
      net->failure_threshold);
103
0
    if (net->error_count > net->failure_threshold) {
104
      /* We had a threshold failure */
105
0
      if (net->dest_state & SCTP_ADDR_REACHABLE) {
106
0
        net->dest_state &= ~SCTP_ADDR_REACHABLE;
107
0
        net->dest_state &= ~SCTP_ADDR_REQ_PRIMARY;
108
0
        net->dest_state &= ~SCTP_ADDR_PF;
109
0
        sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_DOWN,
110
0
            stcb, 0,
111
0
            (void *)net, SCTP_SO_NOT_LOCKED);
112
0
      }
113
0
    } else if ((net->pf_threshold < net->failure_threshold) &&
114
0
               (net->error_count > net->pf_threshold)) {
115
0
      if ((net->dest_state & SCTP_ADDR_PF) == 0) {
116
0
        net->dest_state |= SCTP_ADDR_PF;
117
0
        net->last_active = sctp_get_tick_count();
118
0
        sctp_send_hb(stcb, net, SCTP_SO_NOT_LOCKED);
119
0
        sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT,
120
0
                        inp, stcb, net,
121
0
                        SCTP_FROM_SCTP_TIMER + SCTP_LOC_1);
122
0
        sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net);
123
0
      }
124
0
    }
125
0
    if ((net->dest_state & SCTP_ADDR_UNCONFIRMED) == 0) {
126
0
      if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
127
0
        sctp_misc_ints(SCTP_THRESHOLD_INCR,
128
0
                       stcb->asoc.overall_error_count,
129
0
                       (stcb->asoc.overall_error_count+1),
130
0
                       SCTP_FROM_SCTP_TIMER,
131
0
                       __LINE__);
132
0
      }
133
0
      stcb->asoc.overall_error_count++;
134
0
    }
135
0
  } else {
136
0
    if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
137
0
      sctp_misc_ints(SCTP_THRESHOLD_INCR,
138
0
                     stcb->asoc.overall_error_count,
139
0
                     (stcb->asoc.overall_error_count+1),
140
0
                     SCTP_FROM_SCTP_TIMER,
141
0
                     __LINE__);
142
0
    }
143
0
    stcb->asoc.overall_error_count++;
144
0
  }
145
0
  SCTPDBG(SCTP_DEBUG_TIMER4, "Overall error count for %p now %d thresh:%u state:%x\n",
146
0
    (void *)&stcb->asoc, stcb->asoc.overall_error_count,
147
0
    (uint32_t)threshold,
148
0
    ((net == NULL) ? (uint32_t) 0 : (uint32_t) net->dest_state));
149
  /*
150
   * We specifically do not do >= to give the assoc one more change
151
   * before we fail it.
152
   */
153
0
  if (stcb->asoc.overall_error_count > threshold) {
154
    /* Abort notification sends a ULP notify */
155
0
    struct mbuf *op_err;
156
157
0
    op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
158
0
                                 "Association error counter exceeded");
159
0
    inp->last_abort_code = SCTP_FROM_SCTP_TIMER + SCTP_LOC_2;
160
0
    sctp_abort_an_association(inp, stcb, op_err, true, SCTP_SO_NOT_LOCKED);
161
0
    return (1);
162
0
  }
163
0
  return (0);
164
0
}
165
166
/*
167
 * sctp_find_alternate_net() returns a non-NULL pointer as long as there
168
 * exists nets, which are not being deleted.
169
 */
170
struct sctp_nets *
171
sctp_find_alternate_net(struct sctp_tcb *stcb,
172
    struct sctp_nets *net,
173
    int mode)
174
2.55k
{
175
  /* Find and return an alternate network if possible */
176
2.55k
  struct sctp_nets *alt, *mnet, *min_errors_net = NULL , *max_cwnd_net = NULL;
177
2.55k
  bool looped;
178
  /* JRS 5/14/07 - Initialize min_errors to an impossible value. */
179
2.55k
  int min_errors = -1;
180
2.55k
  uint32_t max_cwnd = 0;
181
182
2.55k
  if (stcb->asoc.numnets == 1) {
183
    /* No selection can be made. */
184
121
    return (TAILQ_FIRST(&stcb->asoc.nets));
185
121
  }
186
  /*
187
   * JRS 5/14/07 - If mode is set to 2, use the CMT PF find alternate net algorithm.
188
   * This algorithm chooses the active destination (not in PF state) with the largest
189
   * cwnd value. If all destinations are in PF state, unreachable, or unconfirmed, choose
190
   * the destination that is in PF state with the lowest error count. In case of a tie,
191
   * choose the destination that was most recently active.
192
   */
193
2.43k
  if (mode == 2) {
194
0
    TAILQ_FOREACH(mnet, &stcb->asoc.nets, sctp_next) {
195
      /* JRS 5/14/07 - If the destination is unreachable or unconfirmed, skip it. */
196
0
      if (((mnet->dest_state & SCTP_ADDR_REACHABLE) != SCTP_ADDR_REACHABLE) ||
197
0
          (mnet->dest_state & SCTP_ADDR_UNCONFIRMED)) {
198
0
        continue;
199
0
      }
200
      /*
201
       * JRS 5/14/07 -  If the destination is reachable but in PF state, compare
202
       *  the error count of the destination to the minimum error count seen thus far.
203
       *  Store the destination with the lower error count.  If the error counts are
204
       *  equal, store the destination that was most recently active.
205
       */
206
0
      if (mnet->dest_state & SCTP_ADDR_PF) {
207
        /*
208
         * JRS 5/14/07 - If the destination under consideration is the current
209
         *  destination, work as if the error count is one higher.  The
210
         *  actual error count will not be incremented until later in the
211
         *  t3 handler.
212
         */
213
0
        if (mnet == net) {
214
0
          if (min_errors == -1) {
215
0
            min_errors = mnet->error_count + 1;
216
0
            min_errors_net = mnet;
217
0
          } else if (mnet->error_count + 1 < min_errors) {
218
0
            min_errors = mnet->error_count + 1;
219
0
            min_errors_net = mnet;
220
0
          } else if (mnet->error_count + 1 == min_errors
221
0
                && mnet->last_active > min_errors_net->last_active) {
222
0
            min_errors_net = mnet;
223
0
            min_errors = mnet->error_count + 1;
224
0
          }
225
0
          continue;
226
0
        } else {
227
0
          if (min_errors == -1) {
228
0
            min_errors = mnet->error_count;
229
0
            min_errors_net = mnet;
230
0
          } else if (mnet->error_count < min_errors) {
231
0
            min_errors = mnet->error_count;
232
0
            min_errors_net = mnet;
233
0
          } else if (mnet->error_count == min_errors
234
0
                && mnet->last_active > min_errors_net->last_active) {
235
0
            min_errors_net = mnet;
236
0
            min_errors = mnet->error_count;
237
0
          }
238
0
          continue;
239
0
        }
240
0
      }
241
      /*
242
       * JRS 5/14/07 - If the destination is reachable and not in PF state, compare the
243
       *  cwnd of the destination to the highest cwnd seen thus far.  Store the
244
       *  destination with the higher cwnd value.  If the cwnd values are equal,
245
       *  randomly choose one of the two destinations.
246
       */
247
0
      if (max_cwnd < mnet->cwnd) {
248
0
        max_cwnd_net = mnet;
249
0
        max_cwnd = mnet->cwnd;
250
0
      } else if (max_cwnd == mnet->cwnd) {
251
0
        uint32_t rndval;
252
0
        uint8_t this_random;
253
254
0
        if (stcb->asoc.hb_random_idx > 3) {
255
0
          rndval = sctp_select_initial_TSN(&stcb->sctp_ep->sctp_ep);
256
0
          memcpy(stcb->asoc.hb_random_values, &rndval, sizeof(stcb->asoc.hb_random_values));
257
0
          this_random = stcb->asoc.hb_random_values[0];
258
0
          stcb->asoc.hb_random_idx++;
259
0
          stcb->asoc.hb_ect_randombit = 0;
260
0
        } else {
261
0
          this_random = stcb->asoc.hb_random_values[stcb->asoc.hb_random_idx];
262
0
          stcb->asoc.hb_random_idx++;
263
0
          stcb->asoc.hb_ect_randombit = 0;
264
0
        }
265
0
        if (this_random % 2 == 1) {
266
0
          max_cwnd_net = mnet;
267
0
          max_cwnd = mnet->cwnd; /* Useless? */
268
0
        }
269
0
      }
270
0
    }
271
0
    if (max_cwnd_net == NULL) {
272
0
      if (min_errors_net == NULL) {
273
0
        return (net);
274
0
      }
275
0
      return (min_errors_net);
276
0
    } else {
277
0
      return (max_cwnd_net);
278
0
    }
279
0
  } /* JRS 5/14/07 - If mode is set to 1, use the CMT policy for choosing an alternate net. */
280
2.43k
  else if (mode == 1) {
281
0
    TAILQ_FOREACH(mnet, &stcb->asoc.nets, sctp_next) {
282
0
      if (((mnet->dest_state & SCTP_ADDR_REACHABLE) != SCTP_ADDR_REACHABLE) ||
283
0
          (mnet->dest_state & SCTP_ADDR_UNCONFIRMED)) {
284
        /*
285
         * will skip ones that are not-reachable or
286
         * unconfirmed
287
         */
288
0
        continue;
289
0
      }
290
0
      if (max_cwnd < mnet->cwnd) {
291
0
        max_cwnd_net = mnet;
292
0
        max_cwnd = mnet->cwnd;
293
0
      } else if (max_cwnd == mnet->cwnd) {
294
0
        uint32_t rndval;
295
0
        uint8_t this_random;
296
297
0
        if (stcb->asoc.hb_random_idx > 3) {
298
0
          rndval = sctp_select_initial_TSN(&stcb->sctp_ep->sctp_ep);
299
0
          memcpy(stcb->asoc.hb_random_values, &rndval,
300
0
              sizeof(stcb->asoc.hb_random_values));
301
0
          this_random = stcb->asoc.hb_random_values[0];
302
0
          stcb->asoc.hb_random_idx = 0;
303
0
          stcb->asoc.hb_ect_randombit = 0;
304
0
        } else {
305
0
          this_random = stcb->asoc.hb_random_values[stcb->asoc.hb_random_idx];
306
0
          stcb->asoc.hb_random_idx++;
307
0
          stcb->asoc.hb_ect_randombit = 0;
308
0
        }
309
0
        if (this_random % 2) {
310
0
          max_cwnd_net = mnet;
311
0
          max_cwnd = mnet->cwnd;
312
0
        }
313
0
      }
314
0
    }
315
0
    if (max_cwnd_net) {
316
0
      return (max_cwnd_net);
317
0
    }
318
0
  }
319
  /* Look for an alternate net, which is active. */
320
2.43k
  if ((net != NULL) && ((net->dest_state & SCTP_ADDR_BEING_DELETED) == 0)) {
321
2.43k
    alt = TAILQ_NEXT(net, sctp_next);
322
2.43k
  } else {
323
0
    alt = TAILQ_FIRST(&stcb->asoc.nets);
324
0
  }
325
2.43k
  looped = false;
326
4.82k
  for (;;) {
327
4.82k
    if (alt == NULL) {
328
1.27k
      if (!looped) {
329
1.21k
        alt = TAILQ_FIRST(&stcb->asoc.nets);
330
1.21k
        looped = true;
331
1.21k
      }
332
      /* Definitely out of candidates. */
333
1.27k
      if (alt == NULL) {
334
67
        break;
335
67
      }
336
1.27k
    }
337
#if defined(__FreeBSD__) && !defined(__Userspace__)
338
    if (alt->ro.ro_nh == NULL) {
339
#else
340
4.75k
    if (alt->ro.ro_rt == NULL) {
341
0
#endif
342
0
      if (alt->ro._s_addr) {
343
0
        sctp_free_ifa(alt->ro._s_addr);
344
0
        alt->ro._s_addr = NULL;
345
0
      }
346
0
      alt->src_addr_selected = 0;
347
0
    }
348
4.75k
    if (((alt->dest_state & SCTP_ADDR_REACHABLE) == SCTP_ADDR_REACHABLE) &&
349
#if defined(__FreeBSD__) && !defined(__Userspace__)
350
        (alt->ro.ro_nh != NULL) &&
351
#else
352
4.75k
        (alt->ro.ro_rt != NULL) &&
353
4.75k
#endif
354
4.75k
        ((alt->dest_state & SCTP_ADDR_UNCONFIRMED) == 0) &&
355
4.75k
        (alt != net)) {
356
      /* Found an alternate net, which is reachable. */
357
2.37k
      break;
358
2.37k
    }
359
2.38k
    alt = TAILQ_NEXT(alt, sctp_next);
360
2.38k
  }
361
362
2.43k
  if (alt == NULL) {
363
    /*
364
     * In case no active alternate net has been found, look for
365
     * an alternate net, which is confirmed.
366
     */
367
67
    if ((net != NULL) && ((net->dest_state & SCTP_ADDR_BEING_DELETED) == 0)) {
368
67
      alt = TAILQ_NEXT(net, sctp_next);
369
67
    } else {
370
0
      alt = TAILQ_FIRST(&stcb->asoc.nets);
371
0
    }
372
67
    looped = false;
373
724
    for (;;) {
374
724
      if (alt == NULL) {
375
134
        if (!looped) {
376
67
          alt = TAILQ_FIRST(&stcb->asoc.nets);
377
67
          looped = true;
378
67
        }
379
        /* Definitely out of candidates. */
380
134
        if (alt == NULL) {
381
67
          break;
382
67
        }
383
134
      }
384
657
      if (((alt->dest_state & SCTP_ADDR_UNCONFIRMED) == 0) &&
385
657
          (alt != net)) {
386
        /* Found an alternate net, which is confirmed. */
387
0
        break;
388
0
      }
389
657
      alt = TAILQ_NEXT(alt, sctp_next);
390
657
    }
391
67
  }
392
2.43k
  if (alt == NULL) {
393
    /*
394
     * In case no confirmed alternate net has been found, just
395
     * return net, if it is not being deleted. In the other case
396
     * just return the first net.
397
     */
398
67
    if ((net != NULL) && ((net->dest_state & SCTP_ADDR_BEING_DELETED) == 0)) {
399
67
      alt = net;
400
67
    }
401
67
    if (alt == NULL) {
402
0
      alt = TAILQ_FIRST(&stcb->asoc.nets);
403
0
    }
404
67
  }
405
2.43k
  return (alt);
406
2.43k
}
407
408
static void
409
sctp_backoff_on_timeout(struct sctp_tcb *stcb,
410
    struct sctp_nets *net,
411
    int win_probe,
412
    int num_marked, int num_abandoned)
413
0
{
414
0
  if (net->RTO == 0) {
415
0
    if (net->RTO_measured) {
416
0
      net->RTO = stcb->asoc.minrto;
417
0
    } else {
418
0
      net->RTO = stcb->asoc.initial_rto;
419
0
    }
420
0
  }
421
0
  net->RTO <<= 1;
422
0
  if (net->RTO > stcb->asoc.maxrto) {
423
0
    net->RTO = stcb->asoc.maxrto;
424
0
  }
425
0
  if ((win_probe == 0) && (num_marked || num_abandoned)) {
426
    /* We don't apply penalty to window probe scenarios */
427
    /* JRS - Use the congestion control given in the CC module */
428
0
    stcb->asoc.cc_functions.sctp_cwnd_update_after_timeout(stcb, net);
429
0
  }
430
0
}
431
432
#ifndef INVARIANTS
433
static void
434
sctp_recover_sent_list(struct sctp_tcb *stcb)
435
{
436
  struct sctp_tmit_chunk *chk, *nchk;
437
  struct sctp_association *asoc;
438
439
  asoc = &stcb->asoc;
440
  TAILQ_FOREACH_SAFE(chk, &asoc->sent_queue, sctp_next, nchk) {
441
    if (SCTP_TSN_GE(asoc->last_acked_seq, chk->rec.data.tsn)) {
442
      SCTP_PRINTF("Found chk:%p tsn:%x <= last_acked_seq:%x\n",
443
                  (void *)chk, chk->rec.data.tsn, asoc->last_acked_seq);
444
      if (chk->sent != SCTP_DATAGRAM_NR_ACKED) {
445
        if (asoc->strmout[chk->rec.data.sid].chunks_on_queues > 0) {
446
          asoc->strmout[chk->rec.data.sid].chunks_on_queues--;
447
        }
448
      }
449
      if ((asoc->strmout[chk->rec.data.sid].chunks_on_queues == 0) &&
450
          (asoc->strmout[chk->rec.data.sid].state == SCTP_STREAM_RESET_PENDING) &&
451
          TAILQ_EMPTY(&asoc->strmout[chk->rec.data.sid].outqueue)) {
452
        asoc->trigger_reset = 1;
453
      }
454
      TAILQ_REMOVE(&asoc->sent_queue, chk, sctp_next);
455
      if (PR_SCTP_ENABLED(chk->flags)) {
456
        if (asoc->pr_sctp_cnt != 0)
457
          asoc->pr_sctp_cnt--;
458
      }
459
      if (chk->data) {
460
        /*sa_ignore NO_NULL_CHK*/
461
        sctp_free_bufspace(stcb, asoc, chk, 1);
462
        sctp_m_freem(chk->data);
463
        chk->data = NULL;
464
        if (asoc->prsctp_supported && PR_SCTP_BUF_ENABLED(chk->flags)) {
465
          asoc->sent_queue_cnt_removeable--;
466
        }
467
      }
468
      asoc->sent_queue_cnt--;
469
      sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
470
    }
471
  }
472
  SCTP_PRINTF("after recover order is as follows\n");
473
  TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
474
    SCTP_PRINTF("chk:%p TSN:%x\n", (void *)chk, chk->rec.data.tsn);
475
  }
476
}
477
#endif
478
479
static int
480
sctp_mark_all_for_resend(struct sctp_tcb *stcb,
481
    struct sctp_nets *net,
482
    struct sctp_nets *alt,
483
    int window_probe,
484
    int *num_marked,
485
    int *num_abandoned)
486
0
{
487
488
  /*
489
   * Mark all chunks (well not all) that were sent to *net for
490
   * retransmission. Move them to alt for there destination as well...
491
   * We only mark chunks that have been outstanding long enough to
492
   * have received feed-back.
493
   */
494
0
  struct sctp_tmit_chunk *chk, *nchk;
495
0
  struct sctp_nets *lnets;
496
0
  struct timeval now, min_wait, tv;
497
0
  int cur_rto;
498
0
  int cnt_abandoned;
499
0
  int audit_tf, num_mk, fir;
500
0
  unsigned int cnt_mk;
501
0
  uint32_t orig_flight, orig_tf;
502
0
  uint32_t tsnlast, tsnfirst;
503
#ifndef INVARIANTS
504
  int recovery_cnt = 0;
505
#endif
506
507
  /* none in flight now */
508
0
  audit_tf = 0;
509
0
  fir = 0;
510
  /*
511
   * figure out how long a data chunk must be pending before we can
512
   * mark it ..
513
   */
514
0
  (void)SCTP_GETTIME_TIMEVAL(&now);
515
  /* get cur rto in micro-seconds */
516
0
  cur_rto = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
517
0
  cur_rto *= 1000;
518
0
  if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
519
0
    sctp_log_fr(cur_rto,
520
0
          stcb->asoc.peers_rwnd,
521
0
          window_probe,
522
0
          SCTP_FR_T3_MARK_TIME);
523
0
    sctp_log_fr(net->flight_size, 0, 0, SCTP_FR_CWND_REPORT);
524
0
    sctp_log_fr(net->flight_size, net->cwnd, stcb->asoc.total_flight, SCTP_FR_CWND_REPORT);
525
0
  }
526
0
  tv.tv_sec = cur_rto / 1000000;
527
0
  tv.tv_usec = cur_rto % 1000000;
528
0
#if !(defined(__FreeBSD__) && !defined(__Userspace__))
529
0
  timersub(&now, &tv, &min_wait);
530
#else
531
  min_wait = now;
532
  timevalsub(&min_wait, &tv);
533
#endif
534
0
  if (min_wait.tv_sec < 0 || min_wait.tv_usec < 0) {
535
    /*
536
     * if we hit here, we don't have enough seconds on the clock
537
     * to account for the RTO. We just let the lower seconds be
538
     * the bounds and don't worry about it. This may mean we
539
     * will mark a lot more than we should.
540
     */
541
0
    min_wait.tv_sec = min_wait.tv_usec = 0;
542
0
  }
543
0
  if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
544
0
    sctp_log_fr(cur_rto, (uint32_t)now.tv_sec, now.tv_usec, SCTP_FR_T3_MARK_TIME);
545
0
    sctp_log_fr(0, (uint32_t)min_wait.tv_sec, min_wait.tv_usec, SCTP_FR_T3_MARK_TIME);
546
0
  }
547
  /*
548
   * Our rwnd will be incorrect here since we are not adding back the
549
   * cnt * mbuf but we will fix that down below.
550
   */
551
0
  orig_flight = net->flight_size;
552
0
  orig_tf = stcb->asoc.total_flight;
553
554
0
  net->fast_retran_ip = 0;
555
  /* Now on to each chunk */
556
0
  cnt_abandoned = 0;
557
0
  num_mk = cnt_mk = 0;
558
0
  tsnfirst = tsnlast = 0;
559
#ifndef INVARIANTS
560
 start_again:
561
#endif
562
0
  TAILQ_FOREACH_SAFE(chk, &stcb->asoc.sent_queue, sctp_next, nchk) {
563
0
    if (SCTP_TSN_GE(stcb->asoc.last_acked_seq, chk->rec.data.tsn)) {
564
      /* Strange case our list got out of order? */
565
0
      SCTP_PRINTF("Our list is out of order? last_acked:%x chk:%x\n",
566
0
                  (unsigned int)stcb->asoc.last_acked_seq, (unsigned int)chk->rec.data.tsn);
567
0
#ifdef INVARIANTS
568
0
      panic("last acked >= chk on sent-Q");
569
#else
570
      recovery_cnt++;
571
      SCTP_PRINTF("Recover attempts a restart cnt:%d\n", recovery_cnt);
572
      sctp_recover_sent_list(stcb);
573
      if (recovery_cnt < 10) {
574
        goto start_again;
575
      } else {
576
        SCTP_PRINTF("Recovery fails %d times??\n", recovery_cnt);
577
      }
578
#endif
579
0
    }
580
0
    if ((chk->whoTo == net) && (chk->sent < SCTP_DATAGRAM_ACKED)) {
581
      /*
582
       * found one to mark: If it is less than
583
       * DATAGRAM_ACKED it MUST not be a skipped or marked
584
       * TSN but instead one that is either already set
585
       * for retransmission OR one that needs
586
       * retransmission.
587
       */
588
589
      /* validate its been outstanding long enough */
590
0
      if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
591
0
        sctp_log_fr(chk->rec.data.tsn,
592
0
              (uint32_t)chk->sent_rcv_time.tv_sec,
593
0
              chk->sent_rcv_time.tv_usec,
594
0
              SCTP_FR_T3_MARK_TIME);
595
0
      }
596
0
      if ((chk->sent_rcv_time.tv_sec > min_wait.tv_sec) && (window_probe == 0)) {
597
        /*
598
         * we have reached a chunk that was sent
599
         * some seconds past our min.. forget it we
600
         * will find no more to send.
601
         */
602
0
        if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
603
0
          sctp_log_fr(0,
604
0
                (uint32_t)chk->sent_rcv_time.tv_sec,
605
0
                chk->sent_rcv_time.tv_usec,
606
0
                SCTP_FR_T3_STOPPED);
607
0
        }
608
0
        continue;
609
0
      } else if ((chk->sent_rcv_time.tv_sec == min_wait.tv_sec) &&
610
0
           (window_probe == 0)) {
611
        /*
612
         * we must look at the micro seconds to
613
         * know.
614
         */
615
0
        if (chk->sent_rcv_time.tv_usec >= min_wait.tv_usec) {
616
          /*
617
           * ok it was sent after our boundary
618
           * time.
619
           */
620
0
          continue;
621
0
        }
622
0
      }
623
0
      if (stcb->asoc.prsctp_supported && PR_SCTP_TTL_ENABLED(chk->flags)) {
624
        /* Is it expired? */
625
0
#if !(defined(__FreeBSD__) && !defined(__Userspace__))
626
0
        if (timercmp(&now, &chk->rec.data.timetodrop, >)) {
627
#else
628
        if (timevalcmp(&now, &chk->rec.data.timetodrop, >)) {
629
#endif
630
          /* Yes so drop it */
631
0
          if (chk->data) {
632
0
            (void)sctp_release_pr_sctp_chunk(stcb,
633
0
                     chk,
634
0
                     1,
635
0
                     SCTP_SO_NOT_LOCKED);
636
0
            cnt_abandoned++;
637
0
          }
638
0
          continue;
639
0
        }
640
0
      }
641
0
      if (stcb->asoc.prsctp_supported && PR_SCTP_RTX_ENABLED(chk->flags)) {
642
        /* Has it been retransmitted tv_sec times? */
643
0
        if (chk->snd_count > chk->rec.data.timetodrop.tv_sec) {
644
0
          if (chk->data) {
645
0
            (void)sctp_release_pr_sctp_chunk(stcb,
646
0
                     chk,
647
0
                     1,
648
0
                     SCTP_SO_NOT_LOCKED);
649
0
            cnt_abandoned++;
650
0
          }
651
0
          continue;
652
0
        }
653
0
      }
654
0
      if (chk->sent < SCTP_DATAGRAM_RESEND) {
655
0
        sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
656
0
        num_mk++;
657
0
        if (fir == 0) {
658
0
          fir = 1;
659
0
          tsnfirst = chk->rec.data.tsn;
660
0
        }
661
0
        tsnlast = chk->rec.data.tsn;
662
0
        if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
663
0
          sctp_log_fr(chk->rec.data.tsn, chk->snd_count,
664
0
                0, SCTP_FR_T3_MARKED);
665
0
        }
666
667
0
        if (chk->rec.data.chunk_was_revoked) {
668
          /* deflate the cwnd */
669
0
          chk->whoTo->cwnd -= chk->book_size;
670
0
          chk->rec.data.chunk_was_revoked = 0;
671
0
        }
672
0
        net->marked_retrans++;
673
0
        stcb->asoc.marked_retrans++;
674
0
        if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
675
0
          sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_RSND_TO,
676
0
                   chk->whoTo->flight_size,
677
0
                   chk->book_size,
678
0
                   (uint32_t)(uintptr_t)chk->whoTo,
679
0
                   chk->rec.data.tsn);
680
0
        }
681
0
        sctp_flight_size_decrease(chk);
682
0
        sctp_total_flight_decrease(stcb, chk);
683
0
        stcb->asoc.peers_rwnd += chk->send_size;
684
0
        stcb->asoc.peers_rwnd += SCTP_BASE_SYSCTL(sctp_peer_chunk_oh);
685
0
      }
686
0
      chk->sent = SCTP_DATAGRAM_RESEND;
687
0
      chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
688
0
      SCTP_STAT_INCR(sctps_markedretrans);
689
690
      /* reset the TSN for striking and other FR stuff */
691
0
      chk->rec.data.doing_fast_retransmit = 0;
692
      /* Clear any time so NO RTT is being done */
693
694
0
      if (chk->do_rtt) {
695
0
        if (chk->whoTo->rto_needed == 0) {
696
0
          chk->whoTo->rto_needed = 1;
697
0
        }
698
0
      }
699
0
      chk->do_rtt = 0;
700
0
      if (alt != net) {
701
0
        sctp_free_remote_addr(chk->whoTo);
702
0
        chk->no_fr_allowed = 1;
703
0
        chk->whoTo = alt;
704
0
        atomic_add_int(&alt->ref_count, 1);
705
0
      } else {
706
0
        chk->no_fr_allowed = 0;
707
0
        if (TAILQ_EMPTY(&stcb->asoc.send_queue)) {
708
0
          chk->rec.data.fast_retran_tsn = stcb->asoc.sending_seq;
709
0
        } else {
710
0
          chk->rec.data.fast_retran_tsn = (TAILQ_FIRST(&stcb->asoc.send_queue))->rec.data.tsn;
711
0
        }
712
0
      }
713
      /* CMT: Do not allow FRs on retransmitted TSNs.
714
       */
715
0
      if (stcb->asoc.sctp_cmt_on_off > 0) {
716
0
        chk->no_fr_allowed = 1;
717
0
      }
718
#ifdef THIS_SHOULD_NOT_BE_DONE
719
    } else if (chk->sent == SCTP_DATAGRAM_ACKED) {
720
      /* remember highest acked one */
721
      could_be_sent = chk;
722
#endif
723
0
    }
724
0
    if (chk->sent == SCTP_DATAGRAM_RESEND) {
725
0
      cnt_mk++;
726
0
    }
727
0
  }
728
0
  if ((orig_flight - net->flight_size) != (orig_tf - stcb->asoc.total_flight)) {
729
    /* we did not subtract the same things? */
730
0
    audit_tf = 1;
731
0
  }
732
733
0
  if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
734
0
    sctp_log_fr(tsnfirst, tsnlast, num_mk, SCTP_FR_T3_TIMEOUT);
735
0
  }
736
#ifdef SCTP_DEBUG
737
  if (num_mk) {
738
    SCTPDBG(SCTP_DEBUG_TIMER1, "LAST TSN marked was %x\n",
739
      tsnlast);
740
    SCTPDBG(SCTP_DEBUG_TIMER1, "Num marked for retransmission was %d peer-rwd:%u\n",
741
      num_mk,
742
      stcb->asoc.peers_rwnd);
743
  }
744
#endif
745
0
  *num_marked = num_mk;
746
0
  *num_abandoned = cnt_abandoned;
747
  /* Now check for a ECN Echo that may be stranded And
748
   * include the cnt_mk'd to have all resends in the
749
   * control queue.
750
   */
751
0
  TAILQ_FOREACH(chk, &stcb->asoc.control_send_queue, sctp_next) {
752
0
    if (chk->sent == SCTP_DATAGRAM_RESEND) {
753
0
      cnt_mk++;
754
0
    }
755
0
    if ((chk->whoTo == net) &&
756
0
        (chk->rec.chunk_id.id == SCTP_ECN_ECHO)) {
757
0
      sctp_free_remote_addr(chk->whoTo);
758
0
      chk->whoTo = alt;
759
0
      if (chk->sent != SCTP_DATAGRAM_RESEND) {
760
0
        chk->sent = SCTP_DATAGRAM_RESEND;
761
0
        chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
762
0
        sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
763
0
        cnt_mk++;
764
0
      }
765
0
      atomic_add_int(&alt->ref_count, 1);
766
0
    }
767
0
  }
768
#ifdef THIS_SHOULD_NOT_BE_DONE
769
  if ((stcb->asoc.sent_queue_retran_cnt == 0) && (could_be_sent)) {
770
    /* fix it so we retransmit the highest acked anyway */
771
    sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
772
    cnt_mk++;
773
    could_be_sent->sent = SCTP_DATAGRAM_RESEND;
774
  }
775
#endif
776
0
  if (stcb->asoc.sent_queue_retran_cnt != cnt_mk) {
777
0
#ifdef INVARIANTS
778
0
    SCTP_PRINTF("Local Audit says there are %d for retran asoc cnt:%d we marked:%d this time\n",
779
0
          cnt_mk, stcb->asoc.sent_queue_retran_cnt, num_mk);
780
0
#endif
781
0
#ifndef SCTP_AUDITING_ENABLED
782
0
    stcb->asoc.sent_queue_retran_cnt = cnt_mk;
783
0
#endif
784
0
  }
785
0
  if (audit_tf) {
786
0
    SCTPDBG(SCTP_DEBUG_TIMER4,
787
0
      "Audit total flight due to negative value net:%p\n",
788
0
      (void *)net);
789
0
    stcb->asoc.total_flight = 0;
790
0
    stcb->asoc.total_flight_count = 0;
791
    /* Clear all networks flight size */
792
0
    TAILQ_FOREACH(lnets, &stcb->asoc.nets, sctp_next) {
793
0
      lnets->flight_size = 0;
794
0
      SCTPDBG(SCTP_DEBUG_TIMER4,
795
0
        "Net:%p c-f cwnd:%d ssthresh:%d\n",
796
0
        (void *)lnets, lnets->cwnd, lnets->ssthresh);
797
0
    }
798
0
    TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
799
0
      if (chk->sent < SCTP_DATAGRAM_RESEND) {
800
0
        if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
801
0
          sctp_misc_ints(SCTP_FLIGHT_LOG_UP,
802
0
                   chk->whoTo->flight_size,
803
0
                   chk->book_size,
804
0
                   (uint32_t)(uintptr_t)chk->whoTo,
805
0
                   chk->rec.data.tsn);
806
0
        }
807
808
0
        sctp_flight_size_increase(chk);
809
0
        sctp_total_flight_increase(stcb, chk);
810
0
      }
811
0
    }
812
0
  }
813
  /* We return 1 if we only have a window probe outstanding */
814
0
  return (0);
815
0
}
816
817
int
818
sctp_t3rxt_timer(struct sctp_inpcb *inp,
819
    struct sctp_tcb *stcb,
820
    struct sctp_nets *net)
821
0
{
822
0
  struct sctp_nets *alt;
823
0
  int win_probe, num_mk, num_abandoned;
824
825
0
  if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
826
0
    sctp_log_fr(0, 0, 0, SCTP_FR_T3_TIMEOUT);
827
0
  }
828
0
  if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
829
0
    struct sctp_nets *lnet;
830
831
0
    TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
832
0
      if (net == lnet) {
833
0
        sctp_log_cwnd(stcb, lnet, 1, SCTP_CWND_LOG_FROM_T3);
834
0
      } else {
835
0
        sctp_log_cwnd(stcb, lnet, 0, SCTP_CWND_LOG_FROM_T3);
836
0
      }
837
0
    }
838
0
  }
839
  /* Find an alternate and mark those for retransmission */
840
0
  if ((stcb->asoc.peers_rwnd == 0) &&
841
0
      (stcb->asoc.total_flight < net->mtu)) {
842
0
    SCTP_STAT_INCR(sctps_timowindowprobe);
843
0
    win_probe = 1;
844
0
  } else {
845
0
    win_probe = 0;
846
0
  }
847
848
0
  if (win_probe == 0) {
849
    /* We don't do normal threshold management on window probes */
850
0
    if (sctp_threshold_management(inp, stcb, net,
851
0
        stcb->asoc.max_send_times)) {
852
      /* Association was destroyed */
853
0
      return (1);
854
0
    } else {
855
0
      if (net != stcb->asoc.primary_destination) {
856
        /* send a immediate HB if our RTO is stale */
857
0
        struct timeval now;
858
0
        uint32_t ms_goneby;
859
860
0
        (void)SCTP_GETTIME_TIMEVAL(&now);
861
0
        if (net->last_sent_time.tv_sec) {
862
0
          ms_goneby = (uint32_t)(now.tv_sec - net->last_sent_time.tv_sec) * 1000;
863
0
        } else {
864
0
          ms_goneby = 0;
865
0
        }
866
0
        if ((net->dest_state & SCTP_ADDR_PF) == 0) {
867
0
          if ((ms_goneby > net->RTO) || (net->RTO == 0)) {
868
            /*
869
             * no recent feed back in an RTO or
870
             * more, request a RTT update
871
             */
872
0
            sctp_send_hb(stcb, net, SCTP_SO_NOT_LOCKED);
873
0
          }
874
0
        }
875
0
      }
876
0
    }
877
0
  } else {
878
    /*
879
     * For a window probe we don't penalize the net's but only
880
     * the association. This may fail it if SACKs are not coming
881
     * back. If sack's are coming with rwnd locked at 0, we will
882
     * continue to hold things waiting for rwnd to raise
883
     */
884
0
    if (sctp_threshold_management(inp, stcb, NULL,
885
0
        stcb->asoc.max_send_times)) {
886
      /* Association was destroyed */
887
0
      return (1);
888
0
    }
889
0
  }
890
0
  if (stcb->asoc.sctp_cmt_on_off > 0) {
891
0
    if (net->pf_threshold < net->failure_threshold) {
892
0
      alt = sctp_find_alternate_net(stcb, net, 2);
893
0
    } else {
894
            /*
895
       * CMT: Using RTX_SSTHRESH policy for CMT.
896
       * If CMT is being used, then pick dest with
897
       * largest ssthresh for any retransmission.
898
       */
899
0
      alt = sctp_find_alternate_net(stcb, net, 1);
900
      /*
901
       * CUCv2: If a different dest is picked for
902
       * the retransmission, then new
903
       * (rtx-)pseudo_cumack needs to be tracked
904
       * for orig dest. Let CUCv2 track new (rtx-)
905
       * pseudo-cumack always.
906
       */
907
0
      net->find_pseudo_cumack = 1;
908
0
      net->find_rtx_pseudo_cumack = 1;
909
0
    }
910
0
  } else {
911
0
    alt = sctp_find_alternate_net(stcb, net, 0);
912
0
  }
913
914
0
  num_mk = 0;
915
0
  num_abandoned = 0;
916
0
  (void)sctp_mark_all_for_resend(stcb, net, alt, win_probe,
917
0
                                 &num_mk, &num_abandoned);
918
  /* FR Loss recovery just ended with the T3. */
919
0
  stcb->asoc.fast_retran_loss_recovery = 0;
920
921
  /* CMT FR loss recovery ended with the T3 */
922
0
  net->fast_retran_loss_recovery = 0;
923
0
  if ((stcb->asoc.cc_functions.sctp_cwnd_new_transmission_begins) &&
924
0
      (net->flight_size == 0)) {
925
0
    (*stcb->asoc.cc_functions.sctp_cwnd_new_transmission_begins)(stcb, net);
926
0
  }
927
928
  /*
929
   * setup the sat loss recovery that prevents satellite cwnd advance.
930
   */
931
0
  stcb->asoc.sat_t3_loss_recovery = 1;
932
0
  stcb->asoc.sat_t3_recovery_tsn = stcb->asoc.sending_seq;
933
934
  /* Backoff the timer and cwnd */
935
0
  sctp_backoff_on_timeout(stcb, net, win_probe, num_mk, num_abandoned);
936
0
  if (((net->dest_state & SCTP_ADDR_REACHABLE) == 0) ||
937
0
      (net->dest_state & SCTP_ADDR_PF)) {
938
    /* Move all pending over too */
939
0
    sctp_move_chunks_from_net(stcb, net);
940
941
    /* Get the address that failed, to
942
     * force a new src address selection and
943
     * a route allocation.
944
     */
945
0
    if (net->ro._s_addr != NULL) {
946
0
      sctp_free_ifa(net->ro._s_addr);
947
0
      net->ro._s_addr = NULL;
948
0
    }
949
0
    net->src_addr_selected = 0;
950
951
    /* Force a route allocation too */
952
#if defined(__FreeBSD__) && !defined(__Userspace__)
953
    RO_NHFREE(&net->ro);
954
#else
955
0
    if (net->ro.ro_rt != NULL) {
956
0
      RTFREE(net->ro.ro_rt);
957
0
      net->ro.ro_rt = NULL;
958
0
    }
959
0
#endif
960
961
    /* Was it our primary? */
962
0
    if ((stcb->asoc.primary_destination == net) && (alt != net)) {
963
      /*
964
       * Yes, note it as such and find an alternate note:
965
       * this means HB code must use this to resent the
966
       * primary if it goes active AND if someone does a
967
       * change-primary then this flag must be cleared
968
       * from any net structures.
969
       */
970
0
      if (stcb->asoc.alternate != NULL) {
971
0
        sctp_free_remote_addr(stcb->asoc.alternate);
972
0
      }
973
0
      stcb->asoc.alternate = alt;
974
0
      atomic_add_int(&stcb->asoc.alternate->ref_count, 1);
975
0
    }
976
0
  }
977
  /*
978
   * Special case for cookie-echo'ed case, we don't do output but must
979
   * await the COOKIE-ACK before retransmission
980
   */
981
0
  if (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED) {
982
    /*
983
     * Here we just reset the timer and start again since we
984
     * have not established the asoc
985
     */
986
0
    sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net);
987
0
    return (0);
988
0
  }
989
0
  if (stcb->asoc.prsctp_supported) {
990
0
    struct sctp_tmit_chunk *lchk;
991
992
0
    lchk = sctp_try_advance_peer_ack_point(stcb, &stcb->asoc);
993
    /* C3. See if we need to send a Fwd-TSN */
994
0
    if (SCTP_TSN_GT(stcb->asoc.advanced_peer_ack_point, stcb->asoc.last_acked_seq)) {
995
0
      send_forward_tsn(stcb, &stcb->asoc);
996
0
      for (; lchk != NULL; lchk = TAILQ_NEXT(lchk, sctp_next)) {
997
0
        if (lchk->whoTo != NULL) {
998
0
          break;
999
0
        }
1000
0
      }
1001
0
      if (lchk != NULL) {
1002
        /* Assure a timer is up */
1003
0
        sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, lchk->whoTo);
1004
0
      }
1005
0
    }
1006
0
  }
1007
0
  if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
1008
0
    sctp_log_cwnd(stcb, net, net->cwnd, SCTP_CWND_LOG_FROM_RTX);
1009
0
  }
1010
0
  return (0);
1011
0
}
1012
1013
int
1014
sctp_t1init_timer(struct sctp_inpcb *inp,
1015
    struct sctp_tcb *stcb,
1016
    struct sctp_nets *net)
1017
0
{
1018
  /* bump the thresholds */
1019
0
  if (stcb->asoc.delayed_connection) {
1020
    /*
1021
     * special hook for delayed connection. The library did NOT
1022
     * complete the rest of its sends.
1023
     */
1024
0
    stcb->asoc.delayed_connection = 0;
1025
0
    sctp_send_initiate(inp, stcb, SCTP_SO_NOT_LOCKED);
1026
0
    return (0);
1027
0
  }
1028
0
  if (SCTP_GET_STATE(stcb) != SCTP_STATE_COOKIE_WAIT) {
1029
0
    return (0);
1030
0
  }
1031
0
  if (sctp_threshold_management(inp, stcb, net,
1032
0
      stcb->asoc.max_init_times)) {
1033
    /* Association was destroyed */
1034
0
    return (1);
1035
0
  }
1036
0
  stcb->asoc.dropped_special_cnt = 0;
1037
0
  sctp_backoff_on_timeout(stcb, stcb->asoc.primary_destination, 1, 0, 0);
1038
0
  if (stcb->asoc.initial_init_rto_max < net->RTO) {
1039
0
    net->RTO = stcb->asoc.initial_init_rto_max;
1040
0
  }
1041
0
  if (stcb->asoc.numnets > 1) {
1042
    /* If we have more than one addr use it */
1043
0
    struct sctp_nets *alt;
1044
1045
0
    alt = sctp_find_alternate_net(stcb, stcb->asoc.primary_destination, 0);
1046
0
    if (alt != stcb->asoc.primary_destination) {
1047
0
      sctp_move_chunks_from_net(stcb, stcb->asoc.primary_destination);
1048
0
      stcb->asoc.primary_destination = alt;
1049
0
    }
1050
0
  }
1051
  /* Send out a new init */
1052
0
  sctp_send_initiate(inp, stcb, SCTP_SO_NOT_LOCKED);
1053
0
  return (0);
1054
0
}
1055
1056
/*
1057
 * For cookie and asconf we actually need to find and mark for resend, then
1058
 * increment the resend counter (after all the threshold management stuff of
1059
 * course).
1060
 */
1061
int
1062
sctp_cookie_timer(struct sctp_inpcb *inp,
1063
    struct sctp_tcb *stcb,
1064
    struct sctp_nets *net SCTP_UNUSED)
1065
0
{
1066
0
  struct sctp_nets *alt;
1067
0
  struct sctp_tmit_chunk *cookie;
1068
1069
  /* first before all else we must find the cookie */
1070
0
  TAILQ_FOREACH(cookie, &stcb->asoc.control_send_queue, sctp_next) {
1071
0
    if (cookie->rec.chunk_id.id == SCTP_COOKIE_ECHO) {
1072
0
      break;
1073
0
    }
1074
0
  }
1075
0
  if (cookie == NULL) {
1076
0
    if (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED) {
1077
      /* FOOBAR! */
1078
0
      struct mbuf *op_err;
1079
1080
0
      op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
1081
0
                                   "Cookie timer expired, but no cookie");
1082
0
      inp->last_abort_code = SCTP_FROM_SCTP_TIMER + SCTP_LOC_3;
1083
0
      sctp_abort_an_association(inp, stcb, op_err, false, SCTP_SO_NOT_LOCKED);
1084
0
    } else {
1085
0
#ifdef INVARIANTS
1086
0
      panic("Cookie timer expires in wrong state?");
1087
#else
1088
      SCTP_PRINTF("Strange in state %d not cookie-echoed yet c-e timer expires?\n", SCTP_GET_STATE(stcb));
1089
      return (0);
1090
#endif
1091
0
    }
1092
0
    return (0);
1093
0
  }
1094
  /* Ok we found the cookie, threshold management next */
1095
0
  if (sctp_threshold_management(inp, stcb, cookie->whoTo,
1096
0
      stcb->asoc.max_init_times)) {
1097
    /* Assoc is over */
1098
0
    return (1);
1099
0
  }
1100
  /*
1101
   * Cleared threshold management, now lets backoff the address
1102
   * and select an alternate
1103
   */
1104
0
  stcb->asoc.dropped_special_cnt = 0;
1105
0
  sctp_backoff_on_timeout(stcb, cookie->whoTo, 1, 0, 0);
1106
0
  alt = sctp_find_alternate_net(stcb, cookie->whoTo, 0);
1107
0
  if (alt != cookie->whoTo) {
1108
0
    sctp_free_remote_addr(cookie->whoTo);
1109
0
    cookie->whoTo = alt;
1110
0
    atomic_add_int(&alt->ref_count, 1);
1111
0
  }
1112
  /* Now mark the retran info */
1113
0
  if (cookie->sent != SCTP_DATAGRAM_RESEND) {
1114
0
    sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
1115
0
  }
1116
0
  cookie->sent = SCTP_DATAGRAM_RESEND;
1117
0
  cookie->flags |= CHUNK_FLAGS_FRAGMENT_OK;
1118
  /*
1119
   * Now call the output routine to kick out the cookie again, Note we
1120
   * don't mark any chunks for retran so that FR will need to kick in
1121
   * to move these (or a send timer).
1122
   */
1123
0
  return (0);
1124
0
}
1125
1126
int
1127
sctp_strreset_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb)
1128
0
{
1129
0
  struct sctp_nets *alt, *net;
1130
0
  struct sctp_tmit_chunk *strrst = NULL, *chk = NULL;
1131
1132
0
  if (stcb->asoc.stream_reset_outstanding == 0) {
1133
0
    return (0);
1134
0
  }
1135
  /* find the existing STRRESET, we use the seq number we sent out on */
1136
0
  (void)sctp_find_stream_reset(stcb, stcb->asoc.str_reset_seq_out, &strrst);
1137
0
  if (strrst == NULL) {
1138
0
    return (0);
1139
0
  }
1140
0
  net = strrst->whoTo;
1141
  /* do threshold management */
1142
0
  if (sctp_threshold_management(inp, stcb, net, stcb->asoc.max_send_times)) {
1143
    /* Assoc is over */
1144
0
    return (1);
1145
0
  }
1146
  /*
1147
   * Cleared threshold management, now lets backoff the address
1148
   * and select an alternate
1149
   */
1150
0
  sctp_backoff_on_timeout(stcb, net, 1, 0, 0);
1151
0
  alt = sctp_find_alternate_net(stcb, net, 0);
1152
0
  strrst->whoTo = alt;
1153
0
  atomic_add_int(&alt->ref_count, 1);
1154
1155
  /* See if a ECN Echo is also stranded */
1156
0
  TAILQ_FOREACH(chk, &stcb->asoc.control_send_queue, sctp_next) {
1157
0
    if ((chk->whoTo == net) &&
1158
0
        (chk->rec.chunk_id.id == SCTP_ECN_ECHO)) {
1159
0
      sctp_free_remote_addr(chk->whoTo);
1160
0
      if (chk->sent != SCTP_DATAGRAM_RESEND) {
1161
0
        chk->sent = SCTP_DATAGRAM_RESEND;
1162
0
        chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
1163
0
        sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
1164
0
      }
1165
0
      chk->whoTo = alt;
1166
0
      atomic_add_int(&alt->ref_count, 1);
1167
0
    }
1168
0
  }
1169
0
  if ((net->dest_state & SCTP_ADDR_REACHABLE) == 0) {
1170
    /*
1171
     * If the address went un-reachable, we need to move to
1172
     * alternates for ALL chk's in queue
1173
     */
1174
0
    sctp_move_chunks_from_net(stcb, net);
1175
0
  }
1176
0
  sctp_free_remote_addr(net);
1177
1178
  /* mark the retran info */
1179
0
  if (strrst->sent != SCTP_DATAGRAM_RESEND)
1180
0
    sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
1181
0
  strrst->sent = SCTP_DATAGRAM_RESEND;
1182
0
  strrst->flags |= CHUNK_FLAGS_FRAGMENT_OK;
1183
1184
  /* restart the timer */
1185
0
  sctp_timer_start(SCTP_TIMER_TYPE_STRRESET, inp, stcb, alt);
1186
0
  return (0);
1187
0
}
1188
1189
int
1190
sctp_asconf_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
1191
      struct sctp_nets *net)
1192
0
{
1193
0
  struct sctp_nets *alt;
1194
0
  struct sctp_tmit_chunk *asconf, *chk;
1195
1196
  /* is this a first send, or a retransmission? */
1197
0
  if (TAILQ_EMPTY(&stcb->asoc.asconf_send_queue)) {
1198
    /* compose a new ASCONF chunk and send it */
1199
0
    sctp_send_asconf(stcb, net, SCTP_ADDR_NOT_LOCKED);
1200
0
  } else {
1201
    /*
1202
     * Retransmission of the existing ASCONF is needed
1203
     */
1204
1205
    /* find the existing ASCONF */
1206
0
    asconf = TAILQ_FIRST(&stcb->asoc.asconf_send_queue);
1207
0
    if (asconf == NULL) {
1208
0
      return (0);
1209
0
    }
1210
0
    net = asconf->whoTo;
1211
    /* do threshold management */
1212
0
    if (sctp_threshold_management(inp, stcb, net,
1213
0
        stcb->asoc.max_send_times)) {
1214
      /* Assoc is over */
1215
0
      return (1);
1216
0
    }
1217
0
    if (asconf->snd_count > stcb->asoc.max_send_times) {
1218
      /*
1219
       * Something is rotten: our peer is not responding to
1220
       * ASCONFs but apparently is to other chunks.  i.e. it
1221
       * is not properly handling the chunk type upper bits.
1222
       * Mark this peer as ASCONF incapable and cleanup.
1223
       */
1224
0
      SCTPDBG(SCTP_DEBUG_TIMER1, "asconf_timer: Peer has not responded to our repeated ASCONFs\n");
1225
0
      sctp_asconf_cleanup(stcb);
1226
0
      return (0);
1227
0
    }
1228
    /*
1229
     * cleared threshold management, so now backoff the net and
1230
     * select an alternate
1231
     */
1232
0
    sctp_backoff_on_timeout(stcb, net, 1, 0, 0);
1233
0
    alt = sctp_find_alternate_net(stcb, net, 0);
1234
0
    if (asconf->whoTo != alt) {
1235
0
      asconf->whoTo = alt;
1236
0
      atomic_add_int(&alt->ref_count, 1);
1237
0
    }
1238
1239
    /* See if an ECN Echo is also stranded */
1240
0
    TAILQ_FOREACH(chk, &stcb->asoc.control_send_queue, sctp_next) {
1241
0
      if ((chk->whoTo == net) &&
1242
0
          (chk->rec.chunk_id.id == SCTP_ECN_ECHO)) {
1243
0
        sctp_free_remote_addr(chk->whoTo);
1244
0
        chk->whoTo = alt;
1245
0
        if (chk->sent != SCTP_DATAGRAM_RESEND) {
1246
0
          chk->sent = SCTP_DATAGRAM_RESEND;
1247
0
          chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
1248
0
          sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
1249
0
        }
1250
0
        atomic_add_int(&alt->ref_count, 1);
1251
0
      }
1252
0
    }
1253
0
    TAILQ_FOREACH(chk, &stcb->asoc.asconf_send_queue, sctp_next) {
1254
0
      if (chk->whoTo != alt) {
1255
0
        sctp_free_remote_addr(chk->whoTo);
1256
0
        chk->whoTo = alt;
1257
0
        atomic_add_int(&alt->ref_count, 1);
1258
0
      }
1259
0
      if (asconf->sent != SCTP_DATAGRAM_RESEND && chk->sent != SCTP_DATAGRAM_UNSENT)
1260
0
        sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
1261
0
      chk->sent = SCTP_DATAGRAM_RESEND;
1262
0
      chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
1263
0
    }
1264
0
    if ((net->dest_state & SCTP_ADDR_REACHABLE) == 0) {
1265
      /*
1266
       * If the address went un-reachable, we need to move
1267
       * to the alternate for ALL chunks in queue
1268
       */
1269
0
      sctp_move_chunks_from_net(stcb, net);
1270
0
    }
1271
0
    sctp_free_remote_addr(net);
1272
1273
    /* mark the retran info */
1274
0
    if (asconf->sent != SCTP_DATAGRAM_RESEND)
1275
0
      sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
1276
0
    asconf->sent = SCTP_DATAGRAM_RESEND;
1277
0
    asconf->flags |= CHUNK_FLAGS_FRAGMENT_OK;
1278
1279
    /* send another ASCONF if any and we can do */
1280
0
    sctp_send_asconf(stcb, alt, SCTP_ADDR_NOT_LOCKED);
1281
0
  }
1282
0
  return (0);
1283
0
}
1284
1285
/* Mobility adaptation */
1286
void
1287
sctp_delete_prim_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb)
1288
0
{
1289
0
  if (stcb->asoc.deleted_primary == NULL) {
1290
0
    SCTPDBG(SCTP_DEBUG_ASCONF1, "delete_prim_timer: deleted_primary is not stored...\n");
1291
0
    sctp_mobility_feature_off(inp, SCTP_MOBILITY_PRIM_DELETED);
1292
0
    return;
1293
0
  }
1294
0
  SCTPDBG(SCTP_DEBUG_ASCONF1, "delete_prim_timer: finished to keep deleted primary ");
1295
0
  SCTPDBG_ADDR(SCTP_DEBUG_ASCONF1, &stcb->asoc.deleted_primary->ro._l_addr.sa);
1296
0
  sctp_free_remote_addr(stcb->asoc.deleted_primary);
1297
0
  stcb->asoc.deleted_primary = NULL;
1298
0
  sctp_mobility_feature_off(inp, SCTP_MOBILITY_PRIM_DELETED);
1299
0
  return;
1300
0
}
1301
1302
/*
1303
 * For the shutdown and shutdown-ack, we do not keep one around on the
1304
 * control queue. This means we must generate a new one and call the general
1305
 * chunk output routine, AFTER having done threshold management.
1306
 * It is assumed that net is non-NULL.
1307
 */
1308
int
1309
sctp_shutdown_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
1310
    struct sctp_nets *net)
1311
0
{
1312
0
  struct sctp_nets *alt;
1313
1314
  /* first threshold management */
1315
0
  if (sctp_threshold_management(inp, stcb, net, stcb->asoc.max_send_times)) {
1316
    /* Assoc is over */
1317
0
    return (1);
1318
0
  }
1319
0
  sctp_backoff_on_timeout(stcb, net, 1, 0, 0);
1320
  /* second select an alternative */
1321
0
  alt = sctp_find_alternate_net(stcb, net, 0);
1322
1323
  /* third generate a shutdown into the queue for out net */
1324
0
  sctp_send_shutdown(stcb, alt);
1325
1326
  /* fourth restart timer */
1327
0
  sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, inp, stcb, alt);
1328
0
  return (0);
1329
0
}
1330
1331
int
1332
sctp_shutdownack_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
1333
    struct sctp_nets *net)
1334
0
{
1335
0
  struct sctp_nets *alt;
1336
1337
  /* first threshold management */
1338
0
  if (sctp_threshold_management(inp, stcb, net, stcb->asoc.max_send_times)) {
1339
    /* Assoc is over */
1340
0
    return (1);
1341
0
  }
1342
0
  sctp_backoff_on_timeout(stcb, net, 1, 0, 0);
1343
  /* second select an alternative */
1344
0
  alt = sctp_find_alternate_net(stcb, net, 0);
1345
1346
  /* third generate a shutdown into the queue for out net */
1347
0
  sctp_send_shutdown_ack(stcb, alt);
1348
1349
  /* fourth restart timer */
1350
0
  sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK, inp, stcb, alt);
1351
0
  return (0);
1352
0
}
1353
1354
static void
1355
sctp_audit_stream_queues_for_size(struct sctp_inpcb *inp, struct sctp_tcb *stcb)
1356
0
{
1357
0
  struct sctp_stream_queue_pending *sp;
1358
0
  unsigned int i, chks_in_queue = 0;
1359
0
  int being_filled = 0;
1360
1361
0
  KASSERT(inp != NULL, ("inp is NULL"));
1362
0
  KASSERT(stcb != NULL, ("stcb is NULL"));
1363
0
  SCTP_TCB_LOCK_ASSERT(stcb);
1364
0
  KASSERT(TAILQ_EMPTY(&stcb->asoc.send_queue), ("send_queue not empty"));
1365
0
  KASSERT(TAILQ_EMPTY(&stcb->asoc.sent_queue), ("sent_queue not empty"));
1366
1367
0
  if (stcb->asoc.sent_queue_retran_cnt) {
1368
0
    SCTP_PRINTF("Hmm, sent_queue_retran_cnt is non-zero %d\n",
1369
0
          stcb->asoc.sent_queue_retran_cnt);
1370
0
    stcb->asoc.sent_queue_retran_cnt = 0;
1371
0
  }
1372
0
  if (stcb->asoc.ss_functions.sctp_ss_is_empty(stcb, &stcb->asoc)) {
1373
    /* No stream scheduler information, initialize scheduler */
1374
0
    stcb->asoc.ss_functions.sctp_ss_init(stcb, &stcb->asoc);
1375
0
    if (!stcb->asoc.ss_functions.sctp_ss_is_empty(stcb, &stcb->asoc)) {
1376
      /* yep, we lost a stream or two */
1377
0
      SCTP_PRINTF("Found additional streams NOT managed by scheduler, corrected\n");
1378
0
    } else {
1379
      /* no streams lost */
1380
0
      stcb->asoc.total_output_queue_size = 0;
1381
0
    }
1382
0
  }
1383
  /* Check to see if some data queued, if so report it */
1384
0
  for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
1385
0
    if (!TAILQ_EMPTY(&stcb->asoc.strmout[i].outqueue)) {
1386
0
      TAILQ_FOREACH(sp, &stcb->asoc.strmout[i].outqueue, next) {
1387
0
        if (sp->msg_is_complete)
1388
0
          being_filled++;
1389
0
        chks_in_queue++;
1390
0
      }
1391
0
    }
1392
0
  }
1393
0
  if (chks_in_queue != stcb->asoc.stream_queue_cnt) {
1394
0
    SCTP_PRINTF("Hmm, stream queue cnt at %d I counted %d in stream out wheel\n",
1395
0
          stcb->asoc.stream_queue_cnt, chks_in_queue);
1396
0
  }
1397
0
  if (chks_in_queue) {
1398
    /* call the output queue function */
1399
0
    sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1400
0
    if ((TAILQ_EMPTY(&stcb->asoc.send_queue)) &&
1401
0
        (TAILQ_EMPTY(&stcb->asoc.sent_queue))) {
1402
      /*
1403
       * Probably should go in and make it go back through
1404
       * and add fragments allowed
1405
       */
1406
0
      if (being_filled == 0) {
1407
0
        SCTP_PRINTF("Still nothing moved %d chunks are stuck\n",
1408
0
              chks_in_queue);
1409
0
      }
1410
0
    }
1411
0
  } else {
1412
0
    SCTP_PRINTF("Found no chunks on any queue tot:%lu\n",
1413
0
          (u_long)stcb->asoc.total_output_queue_size);
1414
0
    stcb->asoc.total_output_queue_size = 0;
1415
0
  }
1416
0
}
1417
1418
int
1419
sctp_heartbeat_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
1420
    struct sctp_nets *net)
1421
0
{
1422
0
  bool net_was_pf;
1423
1424
0
  net_was_pf = (net->dest_state & SCTP_ADDR_PF) != 0;
1425
0
  if (net->hb_responded == 0) {
1426
0
    if (net->ro._s_addr != NULL) {
1427
      /* Invalidate the src address if we did not get
1428
       * a response last time.
1429
       */
1430
0
      sctp_free_ifa(net->ro._s_addr);
1431
0
      net->ro._s_addr = NULL;
1432
0
      net->src_addr_selected = 0;
1433
0
    }
1434
0
    sctp_backoff_on_timeout(stcb, net, 1, 0, 0);
1435
0
    if (sctp_threshold_management(inp, stcb, net, stcb->asoc.max_send_times)) {
1436
      /* Assoc is over */
1437
0
      return (1);
1438
0
    }
1439
0
  }
1440
  /* Zero PBA, if it needs it */
1441
0
  if (net->partial_bytes_acked > 0) {
1442
0
    net->partial_bytes_acked = 0;
1443
0
  }
1444
0
  if ((stcb->asoc.total_output_queue_size > 0) &&
1445
0
      (TAILQ_EMPTY(&stcb->asoc.send_queue)) &&
1446
0
      (TAILQ_EMPTY(&stcb->asoc.sent_queue))) {
1447
0
    sctp_audit_stream_queues_for_size(inp, stcb);
1448
0
  }
1449
0
  if ((((net->dest_state & SCTP_ADDR_NOHB) == 0) ||
1450
0
       (net->dest_state & SCTP_ADDR_UNCONFIRMED)) &&
1451
0
      (net_was_pf || ((net->dest_state & SCTP_ADDR_PF) == 0))) {
1452
    /* When moving to PF during threshold management, a HB has been
1453
       queued in that routine. */
1454
0
    uint32_t ms_gone_by;
1455
1456
0
    if ((net->last_sent_time.tv_sec > 0) ||
1457
0
        (net->last_sent_time.tv_usec > 0)) {
1458
#if defined(__FreeBSD__) && !defined(__Userspace__)
1459
      struct timeval diff;
1460
1461
      SCTP_GETTIME_TIMEVAL(&diff);
1462
      timevalsub(&diff, &net->last_sent_time);
1463
#else
1464
0
      struct timeval diff, now;
1465
1466
0
      SCTP_GETTIME_TIMEVAL(&now);
1467
0
      timersub(&now, &net->last_sent_time, &diff);
1468
0
#endif
1469
0
      ms_gone_by = (uint32_t)(diff.tv_sec * 1000) +
1470
0
                   (uint32_t)(diff.tv_usec / 1000);
1471
0
    } else {
1472
0
      ms_gone_by = 0xffffffff;
1473
0
    }
1474
0
    if ((ms_gone_by >= net->heart_beat_delay) ||
1475
0
        (net->dest_state & SCTP_ADDR_UNCONFIRMED) ||
1476
0
        (net->dest_state & SCTP_ADDR_PF)) {
1477
0
      sctp_send_hb(stcb, net, SCTP_SO_NOT_LOCKED);
1478
0
    }
1479
0
  }
1480
0
  return (0);
1481
0
}
1482
1483
void
1484
sctp_pathmtu_timer(struct sctp_inpcb *inp,
1485
    struct sctp_tcb *stcb,
1486
    struct sctp_nets *net)
1487
0
{
1488
0
  uint32_t next_mtu, mtu;
1489
1490
0
  next_mtu = sctp_get_next_mtu(net->mtu);
1491
1492
0
  if ((next_mtu > net->mtu) && (net->port == 0)) {
1493
0
    if ((net->src_addr_selected == 0) ||
1494
0
        (net->ro._s_addr == NULL) ||
1495
0
        (net->ro._s_addr->localifa_flags & SCTP_BEING_DELETED)) {
1496
0
      if ((net->ro._s_addr != NULL) && (net->ro._s_addr->localifa_flags & SCTP_BEING_DELETED)) {
1497
0
        sctp_free_ifa(net->ro._s_addr);
1498
0
        net->ro._s_addr = NULL;
1499
0
        net->src_addr_selected = 0;
1500
0
      } else  if (net->ro._s_addr == NULL) {
1501
#if defined(INET6) && defined(SCTP_EMBEDDED_V6_SCOPE)
1502
        if (net->ro._l_addr.sa.sa_family == AF_INET6) {
1503
          struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&net->ro._l_addr;
1504
          /* KAME hack: embed scopeid */
1505
#if defined(__APPLE__) && !defined(__Userspace__)
1506
#if defined(APPLE_LEOPARD) || defined(APPLE_SNOWLEOPARD)
1507
          (void)in6_embedscope(&sin6->sin6_addr, sin6, NULL, NULL);
1508
#else
1509
          (void)in6_embedscope(&sin6->sin6_addr, sin6, NULL, NULL, NULL);
1510
#endif
1511
#elif defined(SCTP_KAME)
1512
          (void)sa6_embedscope(sin6, MODULE_GLOBAL(ip6_use_defzone));
1513
#else
1514
          (void)in6_embedscope(&sin6->sin6_addr, sin6);
1515
#endif
1516
        }
1517
#endif
1518
1519
0
        net->ro._s_addr = sctp_source_address_selection(inp,
1520
0
                    stcb,
1521
0
                    (sctp_route_t *)&net->ro,
1522
0
                    net, 0, stcb->asoc.vrf_id);
1523
#if defined(INET6) && defined(SCTP_EMBEDDED_V6_SCOPE)
1524
        if (net->ro._l_addr.sa.sa_family == AF_INET6) {
1525
          struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&net->ro._l_addr;
1526
#ifdef SCTP_KAME
1527
          (void)sa6_recoverscope(sin6);
1528
#else
1529
          (void)in6_recoverscope(sin6, &sin6->sin6_addr, NULL);
1530
#endif  /* SCTP_KAME */
1531
        }
1532
#endif  /* INET6 */
1533
0
      }
1534
0
      if (net->ro._s_addr)
1535
0
        net->src_addr_selected = 1;
1536
0
    }
1537
0
    if (net->ro._s_addr) {
1538
#if defined(__FreeBSD__) && !defined(__Userspace__)
1539
      mtu = SCTP_GATHER_MTU_FROM_ROUTE(net->ro._s_addr, &net->ro._s_addr.sa, net->ro.ro_nh);
1540
#else
1541
0
      mtu = SCTP_GATHER_MTU_FROM_ROUTE(net->ro._s_addr, &net->ro._s_addr.sa, net->ro.ro_rt);
1542
0
#endif
1543
0
#if defined(INET) || defined(INET6)
1544
0
      if (net->port) {
1545
0
        mtu -= sizeof(struct udphdr);
1546
0
      }
1547
0
#endif
1548
0
      if (mtu > next_mtu) {
1549
0
        net->mtu = next_mtu;
1550
0
      } else {
1551
0
        net->mtu = mtu;
1552
0
      }
1553
0
    }
1554
0
  }
1555
  /* restart the timer */
1556
0
  sctp_timer_start(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net);
1557
0
}
1558
1559
void
1560
sctp_autoclose_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb)
1561
0
{
1562
0
  struct timeval tn, *tim_touse;
1563
0
  struct sctp_association *asoc;
1564
0
  uint32_t ticks_gone_by;
1565
1566
0
  (void)SCTP_GETTIME_TIMEVAL(&tn);
1567
0
  if (stcb->asoc.sctp_autoclose_ticks > 0 &&
1568
0
      sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTOCLOSE)) {
1569
    /* Auto close is on */
1570
0
    asoc = &stcb->asoc;
1571
    /* pick the time to use */
1572
0
    if (asoc->time_last_rcvd.tv_sec >
1573
0
        asoc->time_last_sent.tv_sec) {
1574
0
      tim_touse = &asoc->time_last_rcvd;
1575
0
    } else {
1576
0
      tim_touse = &asoc->time_last_sent;
1577
0
    }
1578
    /* Now has long enough transpired to autoclose? */
1579
0
    ticks_gone_by = sctp_secs_to_ticks((uint32_t)(tn.tv_sec - tim_touse->tv_sec));
1580
0
    if (ticks_gone_by >= asoc->sctp_autoclose_ticks) {
1581
      /*
1582
       * autoclose time has hit, call the output routine,
1583
       * which should do nothing just to be SURE we don't
1584
       * have hanging data. We can then safely check the
1585
       * queues and know that we are clear to send
1586
       * shutdown
1587
       */
1588
0
      sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_AUTOCLOSE_TMR, SCTP_SO_NOT_LOCKED);
1589
      /* Are we clean? */
1590
0
      if (TAILQ_EMPTY(&asoc->send_queue) &&
1591
0
          TAILQ_EMPTY(&asoc->sent_queue)) {
1592
        /*
1593
         * there is nothing queued to send, so I'm
1594
         * done...
1595
         */
1596
0
        if (SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_SENT) {
1597
          /* only send SHUTDOWN 1st time thru */
1598
0
          struct sctp_nets *net;
1599
1600
0
          if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) ||
1601
0
              (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
1602
0
            SCTP_STAT_DECR_GAUGE32(sctps_currestab);
1603
0
          }
1604
0
          SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_SENT);
1605
0
          sctp_stop_timers_for_shutdown(stcb);
1606
0
          if (stcb->asoc.alternate) {
1607
0
            net = stcb->asoc.alternate;
1608
0
          } else {
1609
0
            net = stcb->asoc.primary_destination;
1610
0
          }
1611
0
          sctp_send_shutdown(stcb, net);
1612
0
          sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
1613
0
               stcb->sctp_ep, stcb, net);
1614
0
          sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
1615
0
               stcb->sctp_ep, stcb, NULL);
1616
0
        }
1617
0
      }
1618
0
    } else {
1619
      /*
1620
       * No auto close at this time, reset t-o to check
1621
       * later
1622
       */
1623
0
      uint32_t tmp;
1624
1625
      /* fool the timer startup to use the time left */
1626
0
      tmp = asoc->sctp_autoclose_ticks;
1627
0
      asoc->sctp_autoclose_ticks -= ticks_gone_by;
1628
0
      sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE, inp, stcb, NULL);
1629
      /* restore the real tick value */
1630
0
      asoc->sctp_autoclose_ticks = tmp;
1631
0
    }
1632
0
  }
1633
0
}
1634