Coverage Report

Created: 2024-07-27 06:19

/src/usrsctp/usrsctplib/user_mbuf.c
Line
Count
Source (jump to first uncovered line)
1
/*-
2
 * Copyright (c) 1982, 1986, 1988, 1993
3
 *      The Regents of the University of California.
4
 * All rights reserved.
5
 *
6
 * Redistribution and use in source and binary forms, with or without
7
 * modification, are permitted provided that the following conditions
8
 * are met:
9
 * 1. Redistributions of source code must retain the above copyright
10
 *    notice, this list of conditions and the following disclaimer.
11
 * 2. Redistributions in binary form must reproduce the above copyright
12
 *    notice, this list of conditions and the following disclaimer in the
13
 *    documentation and/or other materials provided with the distribution.
14
 * 3. Neither the name of the University nor the names of its contributors
15
 *    may be used to endorse or promote products derived from this software
16
 *    without specific prior written permission.
17
 *
18
 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
19
 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20
 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21
 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
22
 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23
 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24
 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25
 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26
 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27
 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28
 * SUCH DAMAGE.
29
 *
30
 */
31
32
/*
33
 *  __Userspace__ version of /usr/src/sys/kern/kern_mbuf.c
34
 *  We are initializing two zones for Mbufs and Clusters.
35
 *
36
 */
37
38
#include <stdio.h>
39
#include <string.h>
40
/* #include <sys/param.h> This defines MSIZE 256 */
41
#if !defined(SCTP_SIMPLE_ALLOCATOR)
42
#include "umem.h"
43
#endif
44
#include "user_mbuf.h"
45
#include "user_environment.h"
46
#include "user_atomic.h"
47
#include "netinet/sctp_pcb.h"
48
49
#define KIPC_MAX_LINKHDR        4       /* int: max length of link header (see sys/sysclt.h) */
50
#define KIPC_MAX_PROTOHDR 5 /* int: max length of network header (see sys/sysclt.h)*/
51
int max_linkhdr = KIPC_MAX_LINKHDR;
52
int max_protohdr = KIPC_MAX_PROTOHDR; /* Size of largest protocol layer header. */
53
54
/*
55
 * Zones from which we allocate.
56
 */
57
sctp_zone_t zone_mbuf;
58
sctp_zone_t zone_clust;
59
sctp_zone_t zone_ext_refcnt;
60
61
/* __Userspace__ clust_mb_args will be passed as callback data to mb_ctor_clust
62
 * and mb_dtor_clust.
63
 * Note: I had to use struct clust_args as an encapsulation for an mbuf pointer.
64
 * struct mbuf * clust_mb_args; does not work.
65
 */
66
struct clust_args clust_mb_args;
67
68
69
/* __Userspace__
70
 * Local prototypes.
71
 */
72
static int  mb_ctor_mbuf(void *, void *, int);
73
static int      mb_ctor_clust(void *, void *, int);
74
static void mb_dtor_mbuf(void *,  void *);
75
static void mb_dtor_clust(void *, void *);
76
77
78
/***************** Functions taken from user_mbuf.h *************/
79
80
static int mbuf_constructor_dup(struct mbuf *m, int pkthdr, short type)
81
21.9M
{
82
21.9M
  int flags = pkthdr;
83
84
21.9M
  m->m_next = NULL;
85
21.9M
  m->m_nextpkt = NULL;
86
21.9M
  m->m_len = 0;
87
21.9M
  m->m_flags = flags;
88
21.9M
  m->m_type = type;
89
21.9M
  if (flags & M_PKTHDR) {
90
10.2M
    m->m_data = m->m_pktdat;
91
10.2M
    m->m_pkthdr.rcvif = NULL;
92
10.2M
    m->m_pkthdr.len = 0;
93
10.2M
    m->m_pkthdr.header = NULL;
94
10.2M
    m->m_pkthdr.csum_flags = 0;
95
10.2M
    m->m_pkthdr.csum_data = 0;
96
10.2M
    m->m_pkthdr.tso_segsz = 0;
97
10.2M
    m->m_pkthdr.ether_vtag = 0;
98
10.2M
    SLIST_INIT(&m->m_pkthdr.tags);
99
10.2M
  } else
100
11.7M
    m->m_data = m->m_dat;
101
102
21.9M
  return (0);
103
21.9M
}
104
105
/* __Userspace__ */
106
struct mbuf *
107
m_get(int how, short type)
108
11.7M
{
109
11.7M
  struct mbuf *mret;
110
11.7M
#if defined(SCTP_SIMPLE_ALLOCATOR)
111
11.7M
  struct mb_args mbuf_mb_args;
112
113
  /* The following setter function is not yet being enclosed within
114
   * #if USING_MBUF_CONSTRUCTOR - #endif, until I have thoroughly tested
115
   * mb_dtor_mbuf. See comment there
116
   */
117
11.7M
  mbuf_mb_args.flags = 0;
118
11.7M
  mbuf_mb_args.type = type;
119
11.7M
#endif
120
  /* Mbuf master zone, zone_mbuf, has already been
121
   * created in mbuf_initialize() */
122
11.7M
  mret = SCTP_ZONE_GET(zone_mbuf, struct mbuf);
123
11.7M
#if defined(SCTP_SIMPLE_ALLOCATOR)
124
11.7M
  mb_ctor_mbuf(mret, &mbuf_mb_args, 0);
125
11.7M
#endif
126
  /*mret =  ((struct mbuf *)umem_cache_alloc(zone_mbuf, UMEM_DEFAULT));*/
127
128
  /* There are cases when an object available in the current CPU's
129
   * loaded magazine and in those cases the object's constructor is not applied.
130
   * If that is the case, then we are duplicating constructor initialization here,
131
   * so that the mbuf is properly constructed before returning it.
132
   */
133
11.7M
  if (mret) {
134
#if USING_MBUF_CONSTRUCTOR
135
    if (! (mret->m_type == type) ) {
136
      mbuf_constructor_dup(mret, 0, type);
137
    }
138
#else
139
11.7M
    mbuf_constructor_dup(mret, 0, type);
140
11.7M
#endif
141
142
11.7M
  }
143
11.7M
  return mret;
144
11.7M
}
145
146
147
/* __Userspace__ */
148
struct mbuf *
149
m_gethdr(int how, short type)
150
10.2M
{
151
10.2M
  struct mbuf *mret;
152
10.2M
#if defined(SCTP_SIMPLE_ALLOCATOR)
153
10.2M
  struct mb_args mbuf_mb_args;
154
155
  /* The following setter function is not yet being enclosed within
156
   * #if USING_MBUF_CONSTRUCTOR - #endif, until I have thoroughly tested
157
   * mb_dtor_mbuf. See comment there
158
   */
159
10.2M
  mbuf_mb_args.flags = M_PKTHDR;
160
10.2M
  mbuf_mb_args.type = type;
161
10.2M
#endif
162
10.2M
  mret = SCTP_ZONE_GET(zone_mbuf, struct mbuf);
163
10.2M
#if defined(SCTP_SIMPLE_ALLOCATOR)
164
10.2M
  mb_ctor_mbuf(mret, &mbuf_mb_args, 0);
165
10.2M
#endif
166
  /*mret = ((struct mbuf *)umem_cache_alloc(zone_mbuf, UMEM_DEFAULT));*/
167
  /* There are cases when an object available in the current CPU's
168
   * loaded magazine and in those cases the object's constructor is not applied.
169
   * If that is the case, then we are duplicating constructor initialization here,
170
   * so that the mbuf is properly constructed before returning it.
171
   */
172
10.2M
  if (mret) {
173
#if USING_MBUF_CONSTRUCTOR
174
    if (! ((mret->m_flags & M_PKTHDR) && (mret->m_type == type)) ) {
175
      mbuf_constructor_dup(mret, M_PKTHDR, type);
176
    }
177
#else
178
10.2M
    mbuf_constructor_dup(mret, M_PKTHDR, type);
179
10.2M
#endif
180
10.2M
  }
181
10.2M
  return mret;
182
10.2M
}
183
184
/* __Userspace__ */
185
struct mbuf *
186
m_free(struct mbuf *m)
187
21.9M
{
188
189
21.9M
  struct mbuf *n = m->m_next;
190
191
21.9M
  if (m->m_flags & M_EXT)
192
678k
    mb_free_ext(m);
193
21.2M
  else if ((m->m_flags & M_NOFREE) == 0) {
194
21.2M
#if defined(SCTP_SIMPLE_ALLOCATOR)
195
21.2M
    mb_dtor_mbuf(m, NULL);
196
21.2M
#endif
197
21.2M
    SCTP_ZONE_FREE(zone_mbuf, m);
198
21.2M
  }
199
    /*umem_cache_free(zone_mbuf, m);*/
200
21.9M
  return (n);
201
21.9M
}
202
203
204
static void
205
clust_constructor_dup(caddr_t m_clust, struct mbuf* m)
206
126k
{
207
126k
  u_int *refcnt;
208
126k
  int type, size;
209
210
126k
  if (m == NULL) {
211
0
    return;
212
0
  }
213
  /* Assigning cluster of MCLBYTES. TODO: Add jumbo frame functionality */
214
126k
  type = EXT_CLUSTER;
215
126k
  size = MCLBYTES;
216
217
126k
  refcnt = SCTP_ZONE_GET(zone_ext_refcnt, u_int);
218
  /*refcnt = (u_int *)umem_cache_alloc(zone_ext_refcnt, UMEM_DEFAULT);*/
219
#if !defined(SCTP_SIMPLE_ALLOCATOR)
220
  if (refcnt == NULL) {
221
    umem_reap();
222
    refcnt = SCTP_ZONE_GET(zone_ext_refcnt, u_int);
223
    /*refcnt = (u_int *)umem_cache_alloc(zone_ext_refcnt, UMEM_DEFAULT);*/
224
  }
225
#endif
226
126k
  *refcnt = 1;
227
126k
  m->m_ext.ext_buf = (caddr_t)m_clust;
228
126k
  m->m_data = m->m_ext.ext_buf;
229
126k
  m->m_flags |= M_EXT;
230
126k
  m->m_ext.ext_free = NULL;
231
126k
  m->m_ext.ext_args = NULL;
232
126k
  m->m_ext.ext_size = size;
233
126k
  m->m_ext.ext_type = type;
234
126k
  m->m_ext.ref_cnt = refcnt;
235
126k
  return;
236
126k
}
237
238
239
/* __Userspace__ */
240
void
241
m_clget(struct mbuf *m, int how)
242
126k
{
243
126k
  caddr_t mclust_ret;
244
126k
#if defined(SCTP_SIMPLE_ALLOCATOR)
245
126k
  struct clust_args clust_mb_args_l;
246
126k
#endif
247
126k
  if (m->m_flags & M_EXT) {
248
0
    SCTPDBG(SCTP_DEBUG_USR, "%s: %p mbuf already has cluster\n", __func__, (void *)m);
249
0
  }
250
126k
  m->m_ext.ext_buf = (char *)NULL;
251
126k
#if defined(SCTP_SIMPLE_ALLOCATOR)
252
126k
  clust_mb_args_l.parent_mbuf = m;
253
126k
#endif
254
126k
  mclust_ret = SCTP_ZONE_GET(zone_clust, char);
255
126k
#if defined(SCTP_SIMPLE_ALLOCATOR)
256
126k
  mb_ctor_clust(mclust_ret, &clust_mb_args_l, 0);
257
126k
#endif
258
  /*mclust_ret = umem_cache_alloc(zone_clust, UMEM_DEFAULT);*/
259
  /*
260
   On a cluster allocation failure, call umem_reap() and retry.
261
   */
262
263
126k
  if (mclust_ret == NULL) {
264
#if !defined(SCTP_SIMPLE_ALLOCATOR)
265
  /*  mclust_ret = SCTP_ZONE_GET(zone_clust, char);
266
    mb_ctor_clust(mclust_ret, &clust_mb_args, 0);
267
#else*/
268
    umem_reap();
269
    mclust_ret = SCTP_ZONE_GET(zone_clust, char);
270
#endif
271
    /*mclust_ret = umem_cache_alloc(zone_clust, UMEM_DEFAULT);*/
272
    /* if (NULL == mclust_ret) { */
273
0
    SCTPDBG(SCTP_DEBUG_USR, "Memory allocation failure in %s\n", __func__);
274
    /* } */
275
0
  }
276
277
#if USING_MBUF_CONSTRUCTOR
278
  if ((m->m_ext.ext_buf == NULL)) {
279
    clust_constructor_dup(mclust_ret, m);
280
  }
281
#else
282
126k
  clust_constructor_dup(mclust_ret, m);
283
126k
#endif
284
126k
}
285
286
struct mbuf *
287
m_getm2(struct mbuf *m, int len, int how, short type, int flags, int allonebuf)
288
11.2M
{
289
11.2M
  struct mbuf *mb, *nm = NULL, *mtail = NULL;
290
11.2M
  int size, mbuf_threshold, space_needed = len;
291
292
11.2M
  KASSERT(len >= 0, ("%s: len is < 0", __func__));
293
294
  /* Validate flags. */
295
11.2M
  flags &= (M_PKTHDR | M_EOR);
296
297
  /* Packet header mbuf must be first in chain. */
298
11.2M
  if ((flags & M_PKTHDR) && m != NULL) {
299
0
    flags &= ~M_PKTHDR;
300
0
  }
301
302
11.2M
  if (allonebuf == 0)
303
10.0M
    mbuf_threshold = SCTP_BASE_SYSCTL(sctp_mbuf_threshold_count);
304
1.18M
  else
305
1.18M
    mbuf_threshold = 1;
306
307
  /* Loop and append maximum sized mbufs to the chain tail. */
308
22.5M
  while (len > 0) {
309
11.3M
    if ((!allonebuf && len >= MCLBYTES) || (len > (int)(((mbuf_threshold - 1) * MLEN) + MHLEN))) {
310
126k
      mb = m_gethdr(how, type);
311
126k
      MCLGET(mb, how);
312
126k
      size = MCLBYTES;
313
      /* SCTP_BUF_LEN(mb) = MCLBYTES; */
314
11.1M
    } else if (flags & M_PKTHDR) {
315
10.0M
      mb = m_gethdr(how, type);
316
10.0M
      if (len < MHLEN) {
317
10.0M
        size = len;
318
10.0M
      } else {
319
32.4k
        size = MHLEN;
320
32.4k
      }
321
10.0M
    } else {
322
1.11M
      mb = m_get(how, type);
323
1.11M
      if (len < MLEN) {
324
1.09M
        size = len;
325
1.09M
      } else {
326
23.8k
        size = MLEN;
327
23.8k
      }
328
1.11M
    }
329
330
    /* Fail the whole operation if one mbuf can't be allocated. */
331
11.3M
    if (mb == NULL) {
332
0
      if (nm != NULL)
333
0
        m_freem(nm);
334
0
      return (NULL);
335
0
    }
336
337
11.3M
    if (allonebuf != 0 && size < space_needed) {
338
0
      m_freem(mb);
339
0
      return (NULL);
340
0
    }
341
342
    /* Book keeping. */
343
11.3M
    len -= size;
344
11.3M
    if (mtail != NULL)
345
109k
      mtail->m_next = mb;
346
11.2M
    else
347
11.2M
      nm = mb;
348
11.3M
    mtail = mb;
349
11.3M
    flags &= ~M_PKTHDR;     /* Only valid on the first mbuf. */
350
11.3M
  }
351
11.2M
  if (flags & M_EOR) {
352
0
    mtail->m_flags |= M_EOR;  /* Only valid on the last mbuf. */
353
0
  }
354
355
  /* If mbuf was supplied, append new chain to the end of it. */
356
11.2M
  if (m != NULL) {
357
0
    for (mtail = m; mtail->m_next != NULL; mtail = mtail->m_next);
358
0
    mtail->m_next = nm;
359
0
    mtail->m_flags &= ~M_EOR;
360
11.2M
  } else {
361
11.2M
    m = nm;
362
11.2M
  }
363
364
11.2M
  return (m);
365
11.2M
}
366
367
/*
368
 * Copy the contents of uio into a properly sized mbuf chain.
369
 */
370
struct mbuf *
371
m_uiotombuf(struct uio *uio, int how, int len, int align, int flags)
372
25.6k
{
373
25.6k
  struct mbuf *m, *mb;
374
25.6k
  int error, length;
375
25.6k
  ssize_t total;
376
25.6k
  int progress = 0;
377
378
  /*
379
   * len can be zero or an arbitrary large value bound by
380
   * the total data supplied by the uio.
381
   */
382
25.6k
  if (len > 0)
383
25.6k
    total = min(uio->uio_resid, len);
384
0
  else
385
0
    total = uio->uio_resid;
386
  /*
387
   * The smallest unit returned by m_getm2() is a single mbuf
388
   * with pkthdr.  We can't align past it.
389
   */
390
25.6k
  if (align >= MHLEN)
391
0
    return (NULL);
392
  /*
393
   * Give us the full allocation or nothing.
394
   * If len is zero return the smallest empty mbuf.
395
   */
396
25.6k
  m = m_getm2(NULL, (int)max(total + align, 1), how, MT_DATA, flags, 0);
397
25.6k
  if (m == NULL)
398
0
    return (NULL);
399
25.6k
  m->m_data += align;
400
401
  /* Fill all mbufs with uio data and update header information. */
402
90.1k
  for (mb = m; mb != NULL; mb = mb->m_next) {
403
64.5k
    length = (int)min(M_TRAILINGSPACE(mb), total - progress);
404
64.5k
    error = uiomove(mtod(mb, void *), length, uio);
405
64.5k
    if (error) {
406
0
      m_freem(m);
407
0
      return (NULL);
408
0
    }
409
410
64.5k
    mb->m_len = length;
411
64.5k
    progress += length;
412
64.5k
    if (flags & M_PKTHDR)
413
0
      m->m_pkthdr.len += length;
414
64.5k
  }
415
25.6k
  KASSERT(progress == total, ("%s: progress != total", __func__));
416
417
25.6k
  return (m);
418
25.6k
}
419
420
u_int
421
m_length(struct mbuf *m0, struct mbuf **last)
422
0
{
423
0
  struct mbuf *m;
424
0
  u_int len;
425
426
0
  len = 0;
427
0
  for (m = m0; m != NULL; m = m->m_next) {
428
0
    len += m->m_len;
429
0
    if (m->m_next == NULL)
430
0
      break;
431
0
  }
432
0
  if (last != NULL)
433
0
  *last = m;
434
0
  return (len);
435
0
}
436
437
struct mbuf *
438
m_last(struct mbuf *m)
439
25.6k
{
440
64.5k
  while (m->m_next) {
441
38.8k
    m = m->m_next;
442
38.8k
  }
443
25.6k
  return (m);
444
25.6k
}
445
446
/*
447
 * Unlink a tag from the list of tags associated with an mbuf.
448
 */
449
static __inline void
450
m_tag_unlink(struct mbuf *m, struct m_tag *t)
451
0
{
452
453
0
  SLIST_REMOVE(&m->m_pkthdr.tags, t, m_tag, m_tag_link);
454
0
}
455
456
/*
457
 * Reclaim resources associated with a tag.
458
 */
459
static __inline void
460
m_tag_free(struct m_tag *t)
461
0
{
462
463
0
  (*t->m_tag_free)(t);
464
0
}
465
466
/*
467
 * Set up the contents of a tag.  Note that this does not fill in the free
468
 * method; the caller is expected to do that.
469
 *
470
 * XXX probably should be called m_tag_init, but that was already taken.
471
 */
472
static __inline void
473
m_tag_setup(struct m_tag *t, uint32_t cookie, int type, int len)
474
0
{
475
476
0
  t->m_tag_id = type;
477
0
  t->m_tag_len = len;
478
0
  t->m_tag_cookie = cookie;
479
0
}
480
481
/************ End functions from user_mbuf.h  ******************/
482
483
484
485
/************ End functions to substitute umem_cache_alloc and umem_cache_free **************/
486
487
void
488
mbuf_initialize(void *dummy)
489
3
{
490
491
  /*
492
   * __Userspace__Configure UMA zones for Mbufs and Clusters.
493
   * (TODO: m_getcl() - using packet secondary zone).
494
   * There is no provision for trash_init and trash_fini in umem.
495
   *
496
   */
497
 /* zone_mbuf = umem_cache_create(MBUF_MEM_NAME, MSIZE, 0,
498
        mb_ctor_mbuf, mb_dtor_mbuf, NULL,
499
        &mbuf_mb_args,
500
        NULL, 0);
501
  zone_mbuf = umem_cache_create(MBUF_MEM_NAME, MSIZE, 0, NULL, NULL, NULL, NULL, NULL, 0);*/
502
3
#if defined(SCTP_SIMPLE_ALLOCATOR)
503
3
  SCTP_ZONE_INIT(zone_mbuf, MBUF_MEM_NAME, MSIZE, 0);
504
#else
505
  zone_mbuf = umem_cache_create(MBUF_MEM_NAME, MSIZE, 0,
506
                                mb_ctor_mbuf, mb_dtor_mbuf, NULL,
507
                                NULL,
508
                                NULL, 0);
509
#endif
510
  /*zone_ext_refcnt = umem_cache_create(MBUF_EXTREFCNT_MEM_NAME, sizeof(u_int), 0,
511
        NULL, NULL, NULL,
512
        NULL,
513
        NULL, 0);*/
514
3
  SCTP_ZONE_INIT(zone_ext_refcnt, MBUF_EXTREFCNT_MEM_NAME, sizeof(u_int), 0);
515
516
  /*zone_clust = umem_cache_create(MBUF_CLUSTER_MEM_NAME, MCLBYTES, 0,
517
         mb_ctor_clust, mb_dtor_clust, NULL,
518
         &clust_mb_args,
519
         NULL, 0);
520
  zone_clust = umem_cache_create(MBUF_CLUSTER_MEM_NAME, MCLBYTES, 0, NULL, NULL, NULL, NULL, NULL,0);*/
521
3
#if defined(SCTP_SIMPLE_ALLOCATOR)
522
3
  SCTP_ZONE_INIT(zone_clust, MBUF_CLUSTER_MEM_NAME, MCLBYTES, 0);
523
#else
524
  zone_clust = umem_cache_create(MBUF_CLUSTER_MEM_NAME, MCLBYTES, 0,
525
                   mb_ctor_clust, mb_dtor_clust, NULL,
526
                   &clust_mb_args,
527
                   NULL, 0);
528
#endif
529
530
  /* uma_prealloc() goes here... */
531
532
  /* __Userspace__ Add umem_reap here for low memory situation?
533
   *
534
   */
535
536
3
}
537
538
539
540
/*
541
 * __Userspace__
542
 *
543
 * Constructor for Mbuf master zone. We have a different constructor
544
 * for allocating the cluster.
545
 *
546
 * The 'arg' pointer points to a mb_args structure which
547
 * contains call-specific information required to support the
548
 * mbuf allocation API.  See user_mbuf.h.
549
 *
550
 * The flgs parameter below can be UMEM_DEFAULT or UMEM_NOFAIL depending on what
551
 * was passed when umem_cache_alloc was called.
552
 * TODO: Use UMEM_NOFAIL in umem_cache_alloc and also define a failure handler
553
 * and call umem_nofail_callback(my_failure_handler) in the stack initialization routines
554
 * The advantage of using UMEM_NOFAIL is that we don't have to check if umem_cache_alloc
555
 * was successful or not. The failure handler would take care of it, if we use the UMEM_NOFAIL
556
 * flag.
557
 *
558
 * NOTE Ref: http://docs.sun.com/app/docs/doc/819-2243/6n4i099p2?l=en&a=view&q=umem_zalloc)
559
 * The umem_nofail_callback() function sets the **process-wide** UMEM_NOFAIL callback.
560
 * It also mentions that umem_nofail_callback is Evolving.
561
 *
562
 */
563
static int
564
mb_ctor_mbuf(void *mem, void *arg, int flgs)
565
21.9M
{
566
#if USING_MBUF_CONSTRUCTOR
567
  struct mbuf *m;
568
  struct mb_args *args;
569
570
  int flags;
571
  short type;
572
573
  m = (struct mbuf *)mem;
574
  args = (struct mb_args *)arg;
575
  flags = args->flags;
576
  type = args->type;
577
578
  m->m_next = NULL;
579
  m->m_nextpkt = NULL;
580
  m->m_len = 0;
581
  m->m_flags = flags;
582
  m->m_type = type;
583
  if (flags & M_PKTHDR) {
584
    m->m_data = m->m_pktdat;
585
    m->m_pkthdr.rcvif = NULL;
586
    m->m_pkthdr.len = 0;
587
    m->m_pkthdr.header = NULL;
588
    m->m_pkthdr.csum_flags = 0;
589
    m->m_pkthdr.csum_data = 0;
590
    m->m_pkthdr.tso_segsz = 0;
591
    m->m_pkthdr.ether_vtag = 0;
592
    SLIST_INIT(&m->m_pkthdr.tags);
593
  } else
594
    m->m_data = m->m_dat;
595
#endif
596
21.9M
  return (0);
597
21.9M
}
598
599
600
/*
601
 * __Userspace__
602
 * The Mbuf master zone destructor.
603
 * This would be called in response to umem_cache_destroy
604
 * TODO: Recheck if this is what we want to do in this destructor.
605
 * (Note: the number of times mb_dtor_mbuf is called is equal to the
606
 * number of individual mbufs allocated from zone_mbuf.
607
 */
608
static void
609
mb_dtor_mbuf(void *mem, void *arg)
610
21.9M
{
611
21.9M
  struct mbuf *m;
612
613
21.9M
  m = (struct mbuf *)mem;
614
21.9M
  if ((m->m_flags & M_PKTHDR) != 0) {
615
10.2M
    m_tag_delete_chain(m, NULL);
616
10.2M
  }
617
21.9M
}
618
619
620
/* __Userspace__
621
 * The Cluster zone constructor.
622
 *
623
 * Here the 'arg' pointer points to the Mbuf which we
624
 * are configuring cluster storage for.  If 'arg' is
625
 * empty we allocate just the cluster without setting
626
 * the mbuf to it.  See mbuf.h.
627
 */
628
static int
629
mb_ctor_clust(void *mem, void *arg, int flgs)
630
126k
{
631
632
#if USING_MBUF_CONSTRUCTOR
633
  struct mbuf *m;
634
  struct clust_args * cla;
635
  u_int *refcnt;
636
  int type, size;
637
  sctp_zone_t zone;
638
639
  /* Assigning cluster of MCLBYTES. TODO: Add jumbo frame functionality */
640
  type = EXT_CLUSTER;
641
  zone = zone_clust;
642
  size = MCLBYTES;
643
644
  cla = (struct clust_args *)arg;
645
  m = cla->parent_mbuf;
646
647
  refcnt = SCTP_ZONE_GET(zone_ext_refcnt, u_int);
648
  /*refcnt = (u_int *)umem_cache_alloc(zone_ext_refcnt, UMEM_DEFAULT);*/
649
  *refcnt = 1;
650
651
  if (m != NULL) {
652
    m->m_ext.ext_buf = (caddr_t)mem;
653
    m->m_data = m->m_ext.ext_buf;
654
    m->m_flags |= M_EXT;
655
    m->m_ext.ext_free = NULL;
656
    m->m_ext.ext_args = NULL;
657
    m->m_ext.ext_size = size;
658
    m->m_ext.ext_type = type;
659
    m->m_ext.ref_cnt = refcnt;
660
  }
661
#endif
662
126k
  return (0);
663
126k
}
664
665
/* __Userspace__ */
666
static void
667
mb_dtor_clust(void *mem, void *arg)
668
126k
{
669
670
  /* mem is of type caddr_t.  In sys/types.h we have typedef char * caddr_t;  */
671
  /* mb_dtor_clust is called at time of umem_cache_destroy() (the number of times
672
   * mb_dtor_clust is called is equal to the number of individual mbufs allocated
673
   * from zone_clust. Similarly for mb_dtor_mbuf).
674
   * At this point the following:
675
   *  struct mbuf *m;
676
   *   m = (struct mbuf *)arg;
677
   *  assert (*(m->m_ext.ref_cnt) == 0); is not meaningful since  m->m_ext.ref_cnt = NULL;
678
   *  has been done in mb_free_ext().
679
   */
680
681
126k
}
682
683
684
685
686
/* Unlink and free a packet tag. */
687
void
688
m_tag_delete(struct mbuf *m, struct m_tag *t)
689
0
{
690
0
  KASSERT(m && t, ("m_tag_delete: null argument, m %p t %p", (void *)m, (void *)t));
691
0
  m_tag_unlink(m, t);
692
0
  m_tag_free(t);
693
0
}
694
695
696
/* Unlink and free a packet tag chain, starting from given tag. */
697
void
698
m_tag_delete_chain(struct mbuf *m, struct m_tag *t)
699
10.2M
{
700
701
10.2M
  struct m_tag *p, *q;
702
703
10.2M
  KASSERT(m, ("m_tag_delete_chain: null mbuf"));
704
10.2M
  if (t != NULL)
705
0
    p = t;
706
10.2M
  else
707
10.2M
    p = SLIST_FIRST(&m->m_pkthdr.tags);
708
10.2M
  if (p == NULL)
709
10.2M
    return;
710
0
  while ((q = SLIST_NEXT(p, m_tag_link)) != NULL)
711
0
    m_tag_delete(m, q);
712
0
  m_tag_delete(m, p);
713
0
}
714
715
#if 0
716
static void
717
sctp_print_mbuf_chain(struct mbuf *m)
718
{
719
  SCTP_DEBUG_USR(SCTP_DEBUG_USR, "Printing mbuf chain %p.\n", (void *)m);
720
  for(; m; m=m->m_next) {
721
    SCTP_DEBUG_USR(SCTP_DEBUG_USR, "%p: m_len = %ld, m_type = %x, m_next = %p.\n", (void *)m, m->m_len, m->m_type, (void *)m->m_next);
722
    if (m->m_flags & M_EXT)
723
      SCTP_DEBUG_USR(SCTP_DEBUG_USR, "%p: extend_size = %d, extend_buffer = %p, ref_cnt = %d.\n", (void *)m, m->m_ext.ext_size, (void *)m->m_ext.ext_buf, *(m->m_ext.ref_cnt));
724
  }
725
}
726
#endif
727
728
/*
729
 * Free an entire chain of mbufs and associated external buffers, if
730
 * applicable.
731
 */
732
void
733
m_freem(struct mbuf *mb)
734
10.3M
{
735
12.5M
  while (mb != NULL)
736
2.18M
    mb = m_free(mb);
737
10.3M
}
738
739
/*
740
 * __Userspace__
741
 * clean mbufs with M_EXT storage attached to them
742
 * if the reference count hits 1.
743
 */
744
void
745
mb_free_ext(struct mbuf *m)
746
678k
{
747
748
678k
  int skipmbuf;
749
750
678k
  KASSERT((m->m_flags & M_EXT) == M_EXT, ("%s: M_EXT not set", __func__));
751
678k
  KASSERT(m->m_ext.ref_cnt != NULL, ("%s: ref_cnt not set", __func__));
752
753
  /*
754
   * check if the header is embedded in the cluster
755
   */
756
678k
  skipmbuf = (m->m_flags & M_NOFREE);
757
758
  /* Free the external attached storage if this
759
   * mbuf is the only reference to it.
760
   *__Userspace__ TODO: jumbo frames
761
   *
762
  */
763
  /* NOTE: We had the same code that SCTP_DECREMENT_AND_CHECK_REFCOUNT
764
           reduces to here before but the IPHONE malloc commit had changed
765
           this to compare to 0 instead of 1 (see next line).  Why?
766
          . .. this caused a huge memory leak in Linux.
767
  */
768
#ifdef IPHONE
769
  if (atomic_fetchadd_int(m->m_ext.ref_cnt, -1) == 0)
770
#else
771
678k
  if (SCTP_DECREMENT_AND_CHECK_REFCOUNT(m->m_ext.ref_cnt))
772
126k
#endif
773
126k
  {
774
126k
    if (m->m_ext.ext_type == EXT_CLUSTER){
775
126k
#if defined(SCTP_SIMPLE_ALLOCATOR)
776
126k
      mb_dtor_clust(m->m_ext.ext_buf, &clust_mb_args);
777
126k
#endif
778
126k
      SCTP_ZONE_FREE(zone_clust, m->m_ext.ext_buf);
779
126k
      SCTP_ZONE_FREE(zone_ext_refcnt, (u_int*)m->m_ext.ref_cnt);
780
126k
      m->m_ext.ref_cnt = NULL;
781
126k
    }
782
126k
  }
783
784
678k
  if (skipmbuf)
785
0
    return;
786
787
788
  /* __Userspace__ Also freeing the storage for ref_cnt
789
   * Free this mbuf back to the mbuf zone with all m_ext
790
   * information purged.
791
   */
792
678k
  m->m_ext.ext_buf = NULL;
793
678k
  m->m_ext.ext_free = NULL;
794
678k
  m->m_ext.ext_args = NULL;
795
678k
  m->m_ext.ref_cnt = NULL;
796
678k
  m->m_ext.ext_size = 0;
797
678k
  m->m_ext.ext_type = 0;
798
678k
  m->m_flags &= ~M_EXT;
799
678k
#if defined(SCTP_SIMPLE_ALLOCATOR)
800
678k
  mb_dtor_mbuf(m, NULL);
801
678k
#endif
802
678k
  SCTP_ZONE_FREE(zone_mbuf, m);
803
804
  /*umem_cache_free(zone_mbuf, m);*/
805
678k
}
806
807
/*
808
 * "Move" mbuf pkthdr from "from" to "to".
809
 * "from" must have M_PKTHDR set, and "to" must be empty.
810
 */
811
void
812
m_move_pkthdr(struct mbuf *to, struct mbuf *from)
813
9.65M
{
814
815
9.65M
  to->m_flags = (from->m_flags & M_COPYFLAGS) | (to->m_flags & M_EXT);
816
9.65M
  if ((to->m_flags & M_EXT) == 0)
817
9.65M
    to->m_data = to->m_pktdat;
818
9.65M
  to->m_pkthdr = from->m_pkthdr;   /* especially tags */
819
9.65M
  SLIST_INIT(&from->m_pkthdr.tags); /* purge tags from src */
820
9.65M
  from->m_flags &= ~M_PKTHDR;
821
9.65M
}
822
823
824
/*
825
 * Rearange an mbuf chain so that len bytes are contiguous
826
 * and in the data area of an mbuf (so that mtod and dtom
827
 * will work for a structure of size len).  Returns the resulting
828
 * mbuf chain on success, frees it and returns null on failure.
829
 * If there is room, it will add up to max_protohdr-len extra bytes to the
830
 * contiguous region in an attempt to avoid being called next time.
831
 */
832
struct mbuf *
833
m_pullup(struct mbuf *n, int len)
834
9.65M
{
835
9.65M
  struct mbuf *m;
836
9.65M
  int count;
837
9.65M
  int space;
838
839
  /*
840
   * If first mbuf has no cluster, and has room for len bytes
841
   * without shifting current data, pullup into it,
842
   * otherwise allocate a new mbuf to prepend to the chain.
843
   */
844
9.65M
  if ((n->m_flags & M_EXT) == 0 &&
845
9.65M
      n->m_data + len < &n->m_dat[MLEN] && n->m_next) {
846
0
    if (n->m_len >= len)
847
0
      return (n);
848
0
    m = n;
849
0
    n = n->m_next;
850
0
    len -= m->m_len;
851
9.65M
  } else {
852
9.65M
    if (len > MHLEN)
853
0
      goto bad;
854
9.65M
    MGET(m, M_NOWAIT, n->m_type);
855
9.65M
    if (m == NULL)
856
0
      goto bad;
857
9.65M
    m->m_len = 0;
858
9.65M
    if (n->m_flags & M_PKTHDR)
859
9.65M
      M_MOVE_PKTHDR(m, n);
860
9.65M
  }
861
9.65M
  space = (int)(&m->m_dat[MLEN] - (m->m_data + m->m_len));
862
9.65M
  do {
863
9.65M
    count = min(min(max(len, max_protohdr), space), n->m_len);
864
9.65M
    memcpy(mtod(m, caddr_t) + m->m_len,mtod(n, caddr_t), (u_int)count);
865
9.65M
    len -= count;
866
9.65M
    m->m_len += count;
867
9.65M
    n->m_len -= count;
868
9.65M
    space -= count;
869
9.65M
    if (n->m_len)
870
0
      n->m_data += count;
871
9.65M
    else
872
9.65M
      n = m_free(n);
873
9.65M
  } while (len > 0 && n);
874
9.65M
  if (len > 0) {
875
9.65M
    (void) m_free(m);
876
9.65M
    goto bad;
877
9.65M
  }
878
0
  m->m_next = n;
879
0
  return (m);
880
9.65M
bad:
881
9.65M
  m_freem(n);
882
9.65M
  return (NULL);
883
9.65M
}
884
885
886
static struct mbuf *
887
m_dup1(struct mbuf *m, int off, int len, int wait)
888
0
{
889
0
  struct mbuf *n = NULL;
890
0
  int copyhdr;
891
892
0
  if (len > MCLBYTES)
893
0
    return NULL;
894
0
  if (off == 0 && (m->m_flags & M_PKTHDR) != 0)
895
0
    copyhdr = 1;
896
0
  else
897
0
    copyhdr = 0;
898
0
  if (len >= MINCLSIZE) {
899
0
    if (copyhdr == 1) {
900
0
      m_clget(n, wait); /* TODO: include code for copying the header */
901
0
      m_dup_pkthdr(n, m, wait);
902
0
    } else
903
0
      m_clget(n, wait);
904
0
  } else {
905
0
    if (copyhdr == 1)
906
0
      n = m_gethdr(wait, m->m_type);
907
0
    else
908
0
      n = m_get(wait, m->m_type);
909
0
  }
910
0
  if (!n)
911
0
    return NULL; /* ENOBUFS */
912
913
0
  if (copyhdr && !m_dup_pkthdr(n, m, wait)) {
914
0
    m_free(n);
915
0
    return NULL;
916
0
  }
917
0
  m_copydata(m, off, len, mtod(n, caddr_t));
918
0
  n->m_len = len;
919
0
  return n;
920
0
}
921
922
923
/* Taken from sys/kern/uipc_mbuf2.c */
924
struct mbuf *
925
m_pulldown(struct mbuf *m, int off, int len, int *offp)
926
0
{
927
0
  struct mbuf *n, *o;
928
0
  int hlen, tlen, olen;
929
0
  int writable;
930
931
  /* check invalid arguments. */
932
0
  KASSERT(m, ("m == NULL in m_pulldown()"));
933
0
  if (len > MCLBYTES) {
934
0
    m_freem(m);
935
0
    return NULL;    /* impossible */
936
0
  }
937
938
#ifdef PULLDOWN_DEBUG
939
  {
940
    struct mbuf *t;
941
    SCTP_DEBUG_USR(SCTP_DEBUG_USR, "before:");
942
    for (t = m; t; t = t->m_next)
943
      SCTP_DEBUG_USR(SCTP_DEBUG_USR, " %d", t->m_len);
944
    SCTP_DEBUG_USR(SCTP_DEBUG_USR, "\n");
945
  }
946
#endif
947
0
  n = m;
948
0
  while (n != NULL && off > 0) {
949
0
    if (n->m_len > off)
950
0
      break;
951
0
    off -= n->m_len;
952
0
    n = n->m_next;
953
0
  }
954
  /* be sure to point non-empty mbuf */
955
0
  while (n != NULL && n->m_len == 0)
956
0
    n = n->m_next;
957
0
  if (!n) {
958
0
    m_freem(m);
959
0
    return NULL;    /* mbuf chain too short */
960
0
  }
961
962
0
  writable = 0;
963
0
  if ((n->m_flags & M_EXT) == 0 ||
964
0
      (n->m_ext.ext_type == EXT_CLUSTER && M_WRITABLE(n)))
965
0
    writable = 1;
966
967
  /*
968
   * the target data is on <n, off>.
969
   * if we got enough data on the mbuf "n", we're done.
970
   */
971
0
  if ((off == 0 || offp) && len <= n->m_len - off && writable)
972
0
    goto ok;
973
974
  /*
975
   * when len <= n->m_len - off and off != 0, it is a special case.
976
   * len bytes from <n, off> sits in single mbuf, but the caller does
977
   * not like the starting position (off).
978
   * chop the current mbuf into two pieces, set off to 0.
979
   */
980
0
  if (len <= n->m_len - off) {
981
0
    o = m_dup1(n, off, n->m_len - off, M_NOWAIT);
982
0
    if (o == NULL) {
983
0
      m_freem(m);
984
0
    return NULL;    /* ENOBUFS */
985
0
    }
986
0
    n->m_len = off;
987
0
    o->m_next = n->m_next;
988
0
    n->m_next = o;
989
0
    n = n->m_next;
990
0
    off = 0;
991
0
    goto ok;
992
0
  }
993
  /*
994
   * we need to take hlen from <n, off> and tlen from <n->m_next, 0>,
995
   * and construct contiguous mbuf with m_len == len.
996
   * note that hlen + tlen == len, and tlen > 0.
997
   */
998
0
  hlen = n->m_len - off;
999
0
  tlen = len - hlen;
1000
1001
  /*
1002
   * ensure that we have enough trailing data on mbuf chain.
1003
   * if not, we can do nothing about the chain.
1004
   */
1005
0
  olen = 0;
1006
0
  for (o = n->m_next; o != NULL; o = o->m_next)
1007
0
    olen += o->m_len;
1008
0
  if (hlen + olen < len) {
1009
0
    m_freem(m);
1010
0
    return NULL;    /* mbuf chain too short */
1011
0
  }
1012
1013
  /*
1014
   * easy cases first.
1015
   * we need to use m_copydata() to get data from <n->m_next, 0>.
1016
   */
1017
0
  if ((off == 0 || offp) && (M_TRAILINGSPACE(n) >= tlen) && writable) {
1018
0
    m_copydata(n->m_next, 0, tlen, mtod(n, caddr_t) + n->m_len);
1019
0
    n->m_len += tlen;
1020
0
    m_adj(n->m_next, tlen);
1021
0
    goto ok;
1022
0
  }
1023
1024
0
  if ((off == 0 || offp) && (M_LEADINGSPACE(n->m_next) >= hlen) && writable) {
1025
0
    n->m_next->m_data -= hlen;
1026
0
    n->m_next->m_len += hlen;
1027
0
    memcpy( mtod(n->m_next, caddr_t), mtod(n, caddr_t) + off,hlen);
1028
0
    n->m_len -= hlen;
1029
0
    n = n->m_next;
1030
0
    off = 0;
1031
0
    goto ok;
1032
0
  }
1033
1034
  /*
1035
   * now, we need to do the hard way.  don't m_copy as there's no room
1036
   * on both end.
1037
   */
1038
0
  if (len > MLEN)
1039
0
    m_clget(o, M_NOWAIT);
1040
    /* o = m_getcl(M_NOWAIT, m->m_type, 0);*/
1041
0
  else
1042
0
    o = m_get(M_NOWAIT, m->m_type);
1043
0
  if (!o) {
1044
0
    m_freem(m);
1045
0
    return NULL;    /* ENOBUFS */
1046
0
  }
1047
  /* get hlen from <n, off> into <o, 0> */
1048
0
  o->m_len = hlen;
1049
0
  memcpy(mtod(o, caddr_t), mtod(n, caddr_t) + off, hlen);
1050
0
  n->m_len -= hlen;
1051
  /* get tlen from <n->m_next, 0> into <o, hlen> */
1052
0
  m_copydata(n->m_next, 0, tlen, mtod(o, caddr_t) + o->m_len);
1053
0
  o->m_len += tlen;
1054
0
  m_adj(n->m_next, tlen);
1055
0
  o->m_next = n->m_next;
1056
0
  n->m_next = o;
1057
0
  n = o;
1058
0
  off = 0;
1059
0
ok:
1060
#ifdef PULLDOWN_DEBUG
1061
  {
1062
    struct mbuf *t;
1063
    SCTP_DEBUG_USR(SCTP_DEBUG_USR, "after:");
1064
    for (t = m; t; t = t->m_next)
1065
      SCTP_DEBUG_USR(SCTP_DEBUG_USR, "%c%d", t == n ? '*' : ' ', t->m_len);
1066
    SCTP_DEBUG_USR(SCTP_DEBUG_USR, " (off=%d)\n", off);
1067
  }
1068
#endif
1069
0
  if (offp)
1070
0
    *offp = off;
1071
0
  return n;
1072
0
}
1073
1074
/*
1075
 * Attach the the cluster from *m to *n, set up m_ext in *n
1076
 * and bump the refcount of the cluster.
1077
 */
1078
static void
1079
mb_dupcl(struct mbuf *n, struct mbuf *m)
1080
552k
{
1081
552k
  KASSERT((m->m_flags & M_EXT) == M_EXT, ("%s: M_EXT not set", __func__));
1082
552k
  KASSERT(m->m_ext.ref_cnt != NULL, ("%s: ref_cnt not set", __func__));
1083
552k
  KASSERT((n->m_flags & M_EXT) == 0, ("%s: M_EXT set", __func__));
1084
1085
552k
  if (*(m->m_ext.ref_cnt) == 1)
1086
10.3k
    *(m->m_ext.ref_cnt) += 1;
1087
541k
  else
1088
541k
    atomic_add_int(m->m_ext.ref_cnt, 1);
1089
552k
  n->m_ext.ext_buf = m->m_ext.ext_buf;
1090
552k
  n->m_ext.ext_free = m->m_ext.ext_free;
1091
552k
  n->m_ext.ext_args = m->m_ext.ext_args;
1092
552k
  n->m_ext.ext_size = m->m_ext.ext_size;
1093
552k
  n->m_ext.ref_cnt = m->m_ext.ref_cnt;
1094
552k
  n->m_ext.ext_type = m->m_ext.ext_type;
1095
552k
  n->m_flags |= M_EXT;
1096
552k
}
1097
1098
1099
/*
1100
 * Make a copy of an mbuf chain starting "off0" bytes from the beginning,
1101
 * continuing for "len" bytes.  If len is M_COPYALL, copy to end of mbuf.
1102
 * The wait parameter is a choice of M_TRYWAIT/M_NOWAIT from caller.
1103
 * Note that the copy is read-only, because clusters are not copied,
1104
 * only their reference counts are incremented.
1105
 */
1106
1107
struct mbuf *
1108
m_copym(struct mbuf *m, int off0, int len, int wait)
1109
439k
{
1110
439k
  struct mbuf *n, **np;
1111
439k
  int off = off0;
1112
439k
  struct mbuf *top;
1113
439k
  int copyhdr = 0;
1114
1115
439k
  KASSERT(off >= 0, ("m_copym, negative off %d", off));
1116
439k
  KASSERT(len >= 0, ("m_copym, negative len %d", len));
1117
439k
  KASSERT(m != NULL, ("m_copym, m is NULL"));
1118
1119
#if !defined(INVARIANTS)
1120
  if (m == NULL) {
1121
    return (NULL);
1122
  }
1123
#endif
1124
439k
  if (off == 0 && m->m_flags & M_PKTHDR)
1125
5.05k
    copyhdr = 1;
1126
3.95M
  while (off > 0) {
1127
3.94M
    KASSERT(m != NULL, ("m_copym, offset > size of mbuf chain"));
1128
3.94M
    if (off < m->m_len)
1129
427k
      break;
1130
3.51M
    off -= m->m_len;
1131
3.51M
    m = m->m_next;
1132
3.51M
  }
1133
439k
  np = &top;
1134
439k
  top = 0;
1135
1.23M
  while (len > 0) {
1136
803k
    if (m == NULL) {
1137
7.07k
      KASSERT(len == M_COPYALL, ("m_copym, length > size of mbuf chain"));
1138
7.07k
      break;
1139
7.07k
    }
1140
796k
    if (copyhdr)
1141
5.05k
      MGETHDR(n, wait, m->m_type);
1142
791k
    else
1143
791k
      MGET(n, wait, m->m_type);
1144
796k
    *np = n;
1145
796k
    if (n == NULL)
1146
0
      goto nospace;
1147
796k
    if (copyhdr) {
1148
5.05k
      if (!m_dup_pkthdr(n, m, wait))
1149
0
        goto nospace;
1150
5.05k
      if (len == M_COPYALL)
1151
708
        n->m_pkthdr.len -= off0;
1152
4.34k
      else
1153
4.34k
        n->m_pkthdr.len = len;
1154
5.05k
      copyhdr = 0;
1155
5.05k
    }
1156
796k
    n->m_len = min(len, m->m_len - off);
1157
796k
    if (m->m_flags & M_EXT) {
1158
549k
      n->m_data = m->m_data + off;
1159
549k
      mb_dupcl(n, m);
1160
549k
    } else
1161
246k
      memcpy(mtod(n, caddr_t), mtod(m, caddr_t) + off, (u_int)n->m_len);
1162
796k
    if (len != M_COPYALL)
1163
456k
      len -= n->m_len;
1164
796k
    off = 0;
1165
796k
    m = m->m_next;
1166
796k
    np = &n->m_next;
1167
796k
  }
1168
1169
439k
  return (top);
1170
0
nospace:
1171
0
  m_freem(top);
1172
0
  return (NULL);
1173
439k
}
1174
1175
1176
int
1177
m_tag_copy_chain(struct mbuf *to, struct mbuf *from, int how)
1178
5.05k
{
1179
5.05k
  struct m_tag *p, *t, *tprev = NULL;
1180
1181
5.05k
  KASSERT(to && from, ("m_tag_copy_chain: null argument, to %p from %p", (void *)to, (void *)from));
1182
5.05k
  m_tag_delete_chain(to, NULL);
1183
5.05k
  SLIST_FOREACH(p, &from->m_pkthdr.tags, m_tag_link) {
1184
0
    t = m_tag_copy(p, how);
1185
0
    if (t == NULL) {
1186
0
      m_tag_delete_chain(to, NULL);
1187
0
      return 0;
1188
0
    }
1189
0
    if (tprev == NULL)
1190
0
      SLIST_INSERT_HEAD(&to->m_pkthdr.tags, t, m_tag_link);
1191
0
    else
1192
0
      SLIST_INSERT_AFTER(tprev, t, m_tag_link);
1193
0
    tprev = t;
1194
0
  }
1195
5.05k
  return 1;
1196
5.05k
}
1197
1198
/*
1199
 * Duplicate "from"'s mbuf pkthdr in "to".
1200
 * "from" must have M_PKTHDR set, and "to" must be empty.
1201
 * In particular, this does a deep copy of the packet tags.
1202
 */
1203
int
1204
m_dup_pkthdr(struct mbuf *to, struct mbuf *from, int how)
1205
5.05k
{
1206
1207
5.05k
  KASSERT(to, ("m_dup_pkthdr: to is NULL"));
1208
5.05k
  KASSERT(from, ("m_dup_pkthdr: from is NULL"));
1209
5.05k
  to->m_flags = (from->m_flags & M_COPYFLAGS) | (to->m_flags & M_EXT);
1210
5.05k
  if ((to->m_flags & M_EXT) == 0)
1211
5.05k
    to->m_data = to->m_pktdat;
1212
5.05k
  to->m_pkthdr = from->m_pkthdr;
1213
5.05k
  SLIST_INIT(&to->m_pkthdr.tags);
1214
5.05k
  return (m_tag_copy_chain(to, from, MBTOM(how)));
1215
5.05k
}
1216
1217
/* Copy a single tag. */
1218
struct m_tag *
1219
m_tag_copy(struct m_tag *t, int how)
1220
0
{
1221
0
  struct m_tag *p;
1222
1223
0
  KASSERT(t, ("m_tag_copy: null tag"));
1224
0
  p = m_tag_alloc(t->m_tag_cookie, t->m_tag_id, t->m_tag_len, how);
1225
0
  if (p == NULL)
1226
0
    return (NULL);
1227
0
  memcpy(p + 1, t + 1, t->m_tag_len); /* Copy the data */
1228
0
  return p;
1229
0
}
1230
1231
/* Get a packet tag structure along with specified data following. */
1232
struct m_tag *
1233
m_tag_alloc(uint32_t cookie, int type, int len, int wait)
1234
0
{
1235
0
  struct m_tag *t;
1236
1237
0
  if (len < 0)
1238
0
    return NULL;
1239
0
  t = malloc(len + sizeof(struct m_tag));
1240
0
  if (t == NULL)
1241
0
    return NULL;
1242
0
  m_tag_setup(t, cookie, type, len);
1243
0
  t->m_tag_free = m_tag_free_default;
1244
0
  return t;
1245
0
}
1246
1247
/* Free a packet tag. */
1248
void
1249
m_tag_free_default(struct m_tag *t)
1250
0
{
1251
0
  free(t);
1252
0
}
1253
1254
/*
1255
 * Copy data from a buffer back into the indicated mbuf chain,
1256
 * starting "off" bytes from the beginning, extending the mbuf
1257
 * chain if necessary.
1258
 */
1259
void
1260
m_copyback(struct mbuf *m0, int off, int len, caddr_t cp)
1261
9.99M
{
1262
9.99M
  int mlen;
1263
9.99M
  struct mbuf *m = m0, *n;
1264
9.99M
  int totlen = 0;
1265
1266
9.99M
  if (m0 == NULL)
1267
0
    return;
1268
9.99M
  while (off > (mlen = m->m_len)) {
1269
39
    off -= mlen;
1270
39
    totlen += mlen;
1271
39
    if (m->m_next == NULL) {
1272
0
      n = m_get(M_NOWAIT, m->m_type);
1273
0
      if (n == NULL)
1274
0
        goto out;
1275
0
      memset(mtod(n, caddr_t), 0, MLEN);
1276
0
      n->m_len = min(MLEN, len + off);
1277
0
      m->m_next = n;
1278
0
    }
1279
39
    m = m->m_next;
1280
39
  }
1281
10.0M
  while (len > 0) {
1282
10.0M
    mlen = min (m->m_len - off, len);
1283
10.0M
    memcpy(off + mtod(m, caddr_t), cp, (u_int)mlen);
1284
10.0M
    cp += mlen;
1285
10.0M
    len -= mlen;
1286
10.0M
    mlen += off;
1287
10.0M
    off = 0;
1288
10.0M
    totlen += mlen;
1289
10.0M
    if (len == 0)
1290
9.99M
      break;
1291
70.4k
    if (m->m_next == NULL) {
1292
0
      n = m_get(M_NOWAIT, m->m_type);
1293
0
      if (n == NULL)
1294
0
        break;
1295
0
      n->m_len = min(MLEN, len);
1296
0
      m->m_next = n;
1297
0
    }
1298
70.4k
    m = m->m_next;
1299
70.4k
  }
1300
9.99M
out:  if (((m = m0)->m_flags & M_PKTHDR) && (m->m_pkthdr.len < totlen))
1301
0
    m->m_pkthdr.len = totlen;
1302
9.99M
}
1303
1304
/*
1305
 * Apply function f to the data in an mbuf chain starting "off" bytes from
1306
 * the beginning, continuing for "len" bytes.
1307
 */
1308
int
1309
m_apply(struct mbuf *m, int off, int len,
1310
        int (*f)(void *, void *, u_int), void *arg)
1311
0
{
1312
0
  u_int count;
1313
0
  int rval;
1314
1315
0
  KASSERT(off >= 0, ("m_apply, negative off %d", off));
1316
0
  KASSERT(len >= 0, ("m_apply, negative len %d", len));
1317
0
  while (off > 0) {
1318
0
    KASSERT(m != NULL, ("m_apply, offset > size of mbuf chain"));
1319
0
    if (off < m->m_len)
1320
0
      break;
1321
0
    off -= m->m_len;
1322
0
    m = m->m_next;
1323
0
  }
1324
0
  while (len > 0) {
1325
0
    KASSERT(m != NULL, ("m_apply, offset > size of mbuf chain"));
1326
0
    count = min(m->m_len - off, len);
1327
0
    rval = (*f)(arg, mtod(m, caddr_t) + off, count);
1328
0
    if (rval)
1329
0
      return (rval);
1330
0
    len -= count;
1331
0
    off = 0;
1332
0
    m = m->m_next;
1333
0
  }
1334
0
  return (0);
1335
0
}
1336
1337
/*
1338
 * Lesser-used path for M_PREPEND:
1339
 * allocate new mbuf to prepend to chain,
1340
 * copy junk along.
1341
 */
1342
struct mbuf *
1343
m_prepend(struct mbuf *m, int len, int how)
1344
197k
{
1345
197k
  struct mbuf *mn;
1346
1347
197k
  if (m->m_flags & M_PKTHDR)
1348
0
    MGETHDR(mn, how, m->m_type);
1349
197k
  else
1350
197k
    MGET(mn, how, m->m_type);
1351
197k
  if (mn == NULL) {
1352
0
    m_freem(m);
1353
0
    return (NULL);
1354
0
  }
1355
197k
  if (m->m_flags & M_PKTHDR)
1356
0
    M_MOVE_PKTHDR(mn, m);
1357
197k
  mn->m_next = m;
1358
197k
  m = mn;
1359
197k
  if (m->m_flags & M_PKTHDR) {
1360
0
    if (len < MHLEN)
1361
0
      MH_ALIGN(m, len);
1362
197k
  } else {
1363
197k
    if (len < MLEN)
1364
197k
      M_ALIGN(m, len);
1365
197k
  }
1366
197k
  m->m_len = len;
1367
197k
  return (m);
1368
197k
}
1369
1370
/*
1371
 * Copy data from an mbuf chain starting "off" bytes from the beginning,
1372
 * continuing for "len" bytes, into the indicated buffer.
1373
 */
1374
void
1375
m_copydata(const struct mbuf *m, int off, int len, caddr_t cp)
1376
238k
{
1377
238k
  u_int count;
1378
1379
238k
  KASSERT(off >= 0, ("m_copydata, negative off %d", off));
1380
238k
  KASSERT(len >= 0, ("m_copydata, negative len %d", len));
1381
239k
  while (off > 0) {
1382
1.40k
    KASSERT(m != NULL, ("m_copydata, offset > size of mbuf chain"));
1383
1.40k
    if (off < m->m_len)
1384
440
      break;
1385
969
    off -= m->m_len;
1386
969
    m = m->m_next;
1387
969
  }
1388
1.56M
  while (len > 0) {
1389
1.32M
    KASSERT(m != NULL, ("m_copydata, length > size of mbuf chain"));
1390
1.32M
    count = min(m->m_len - off, len);
1391
1.32M
    memcpy(cp, mtod(m, caddr_t) + off, count);
1392
1.32M
    len -= count;
1393
1.32M
    cp += count;
1394
1.32M
    off = 0;
1395
1.32M
    m = m->m_next;
1396
1.32M
  }
1397
238k
}
1398
1399
1400
/*
1401
 * Concatenate mbuf chain n to m.
1402
 * Both chains must be of the same type (e.g. MT_DATA).
1403
 * Any m_pkthdr is not updated.
1404
 */
1405
void
1406
m_cat(struct mbuf *m, struct mbuf *n)
1407
0
{
1408
0
  while (m->m_next)
1409
0
    m = m->m_next;
1410
0
  while (n) {
1411
0
    if (m->m_flags & M_EXT ||
1412
0
        m->m_data + m->m_len + n->m_len >= &m->m_dat[MLEN]) {
1413
      /* just join the two chains */
1414
0
      m->m_next = n;
1415
0
      return;
1416
0
    }
1417
    /* splat the data from one into the other */
1418
0
    memcpy(mtod(m, caddr_t) + m->m_len, mtod(n, caddr_t), (u_int)n->m_len);
1419
0
    m->m_len += n->m_len;
1420
0
    n = m_free(n);
1421
0
  }
1422
0
}
1423
1424
1425
void
1426
m_adj(struct mbuf *mp, int req_len)
1427
5.78k
{
1428
5.78k
  int len = req_len;
1429
5.78k
  struct mbuf *m;
1430
5.78k
  int count;
1431
1432
5.78k
  if ((m = mp) == NULL)
1433
0
    return;
1434
5.78k
  if (len >= 0) {
1435
    /*
1436
     * Trim from head.
1437
     */
1438
12.3k
    while (m != NULL && len > 0) {
1439
7.28k
      if (m->m_len <= len) {
1440
2.23k
        len -= m->m_len;
1441
2.23k
        m->m_len = 0;
1442
2.23k
        m = m->m_next;
1443
5.04k
      } else {
1444
5.04k
        m->m_len -= len;
1445
5.04k
        m->m_data += len;
1446
5.04k
        len = 0;
1447
5.04k
      }
1448
7.28k
    }
1449
5.07k
    m = mp;
1450
5.07k
    if (mp->m_flags & M_PKTHDR)
1451
4.45k
      m->m_pkthdr.len -= (req_len - len);
1452
5.07k
  } else {
1453
    /*
1454
     * Trim from tail.  Scan the mbuf chain,
1455
     * calculating its length and finding the last mbuf.
1456
     * If the adjustment only affects this mbuf, then just
1457
     * adjust and return.  Otherwise, rescan and truncate
1458
     * after the remaining size.
1459
     */
1460
709
    len = -len;
1461
709
    count = 0;
1462
1.48k
    for (;;) {
1463
1.48k
      count += m->m_len;
1464
1.48k
      if (m->m_next == (struct mbuf *)0)
1465
709
        break;
1466
775
      m = m->m_next;
1467
775
    }
1468
709
    if (m->m_len >= len) {
1469
709
      m->m_len -= len;
1470
709
      if (mp->m_flags & M_PKTHDR)
1471
87
        mp->m_pkthdr.len -= len;
1472
709
      return;
1473
709
    }
1474
0
    count -= len;
1475
0
    if (count < 0)
1476
0
      count = 0;
1477
    /*
1478
     * Correct length for chain is "count".
1479
     * Find the mbuf with last data, adjust its length,
1480
     * and toss data from remaining mbufs on chain.
1481
     */
1482
0
    m = mp;
1483
0
    if (m->m_flags & M_PKTHDR)
1484
0
      m->m_pkthdr.len = count;
1485
0
    for (; m; m = m->m_next) {
1486
0
      if (m->m_len >= count) {
1487
0
        m->m_len = count;
1488
0
        if (m->m_next != NULL) {
1489
0
          m_freem(m->m_next);
1490
0
          m->m_next = NULL;
1491
0
        }
1492
0
        break;
1493
0
      }
1494
0
      count -= m->m_len;
1495
0
    }
1496
0
  }
1497
5.78k
}
1498
1499
1500
/* m_split is used within sctp_handle_cookie_echo. */
1501
1502
/*
1503
 * Partition an mbuf chain in two pieces, returning the tail --
1504
 * all but the first len0 bytes.  In case of failure, it returns NULL and
1505
 * attempts to restore the chain to its original state.
1506
 *
1507
 * Note that the resulting mbufs might be read-only, because the new
1508
 * mbuf can end up sharing an mbuf cluster with the original mbuf if
1509
 * the "breaking point" happens to lie within a cluster mbuf. Use the
1510
 * M_WRITABLE() macro to check for this case.
1511
 */
1512
struct mbuf *
1513
m_split(struct mbuf *m0, int len0, int wait)
1514
13.0k
{
1515
13.0k
  struct mbuf *m, *n;
1516
13.0k
  u_int len = len0, remain;
1517
1518
  /* MBUF_CHECKSLEEP(wait); */
1519
15.7k
  for (m = m0; m && (int)len > m->m_len; m = m->m_next)
1520
2.67k
    len -= m->m_len;
1521
13.0k
  if (m == NULL)
1522
0
    return (NULL);
1523
13.0k
  remain = m->m_len - len;
1524
13.0k
  if (m0->m_flags & M_PKTHDR) {
1525
13.0k
    MGETHDR(n, wait, m0->m_type);
1526
13.0k
    if (n == NULL)
1527
0
      return (NULL);
1528
13.0k
    n->m_pkthdr.rcvif = m0->m_pkthdr.rcvif;
1529
13.0k
    n->m_pkthdr.len = m0->m_pkthdr.len - len0;
1530
13.0k
    m0->m_pkthdr.len = len0;
1531
13.0k
    if (m->m_flags & M_EXT)
1532
2.35k
      goto extpacket;
1533
10.6k
    if (remain > MHLEN) {
1534
      /* m can't be the lead packet */
1535
33
      MH_ALIGN(n, 0);
1536
33
      n->m_next = m_split(m, len, wait);
1537
33
      if (n->m_next == NULL) {
1538
0
        (void) m_free(n);
1539
0
        return (NULL);
1540
33
      } else {
1541
33
        n->m_len = 0;
1542
33
        return (n);
1543
33
      }
1544
33
    } else
1545
10.6k
      MH_ALIGN(n, remain);
1546
10.6k
  } else if (remain == 0) {
1547
0
    n = m->m_next;
1548
0
    m->m_next = NULL;
1549
0
    return (n);
1550
33
  } else {
1551
33
    MGET(n, wait, m->m_type);
1552
33
    if (n == NULL)
1553
0
      return (NULL);
1554
33
    M_ALIGN(n, remain);
1555
33
  }
1556
13.0k
extpacket:
1557
13.0k
  if (m->m_flags & M_EXT) {
1558
2.35k
    n->m_data = m->m_data + len;
1559
2.35k
    mb_dupcl(n, m);
1560
10.6k
  } else {
1561
10.6k
    memcpy(mtod(n, caddr_t), mtod(m, caddr_t) + len, remain);
1562
10.6k
  }
1563
13.0k
  n->m_len = remain;
1564
13.0k
  m->m_len = len;
1565
13.0k
  n->m_next = m->m_next;
1566
13.0k
  m->m_next = NULL;
1567
13.0k
  return (n);
1568
13.0k
}
1569
1570
1571
1572
1573
int
1574
0
pack_send_buffer(caddr_t buffer, struct mbuf* mb){
1575
1576
0
  int count_to_copy;
1577
0
  int total_count_copied = 0;
1578
0
  int offset = 0;
1579
1580
0
  do {
1581
0
    count_to_copy = mb->m_len;
1582
0
    memcpy(buffer+offset, mtod(mb, caddr_t), count_to_copy);
1583
0
    offset += count_to_copy;
1584
0
    total_count_copied += count_to_copy;
1585
0
    mb = mb->m_next;
1586
0
  } while(mb);
1587
1588
0
  return (total_count_copied);
1589
0
}