/src/usrsctp/usrsctplib/user_mbuf.c
Line | Count | Source |
1 | | /*- |
2 | | * Copyright (c) 1982, 1986, 1988, 1993 |
3 | | * The Regents of the University of California. |
4 | | * All rights reserved. |
5 | | * |
6 | | * Redistribution and use in source and binary forms, with or without |
7 | | * modification, are permitted provided that the following conditions |
8 | | * are met: |
9 | | * 1. Redistributions of source code must retain the above copyright |
10 | | * notice, this list of conditions and the following disclaimer. |
11 | | * 2. Redistributions in binary form must reproduce the above copyright |
12 | | * notice, this list of conditions and the following disclaimer in the |
13 | | * documentation and/or other materials provided with the distribution. |
14 | | * 3. Neither the name of the University nor the names of its contributors |
15 | | * may be used to endorse or promote products derived from this software |
16 | | * without specific prior written permission. |
17 | | * |
18 | | * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND |
19 | | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
20 | | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
21 | | * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE |
22 | | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL |
23 | | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS |
24 | | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) |
25 | | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT |
26 | | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY |
27 | | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF |
28 | | * SUCH DAMAGE. |
29 | | * |
30 | | */ |
31 | | |
32 | | /* |
33 | | * __Userspace__ version of /usr/src/sys/kern/kern_mbuf.c |
34 | | * We are initializing two zones for Mbufs and Clusters. |
35 | | * |
36 | | */ |
37 | | |
38 | | #if defined(_WIN32) && defined(__MINGW32__) |
39 | | #include <minmax.h> |
40 | | #endif |
41 | | |
42 | | #include <stdio.h> |
43 | | #include <string.h> |
44 | | /* #include <sys/param.h> This defines MSIZE 256 */ |
45 | | #if !defined(SCTP_SIMPLE_ALLOCATOR) |
46 | | #include "umem.h" |
47 | | #endif |
48 | | #include "user_mbuf.h" |
49 | | #include "user_environment.h" |
50 | | #include "user_atomic.h" |
51 | | #include "netinet/sctp_pcb.h" |
52 | | |
53 | | #define KIPC_MAX_LINKHDR 4 /* int: max length of link header (see sys/sysclt.h) */ |
54 | | #define KIPC_MAX_PROTOHDR 5 /* int: max length of network header (see sys/sysclt.h)*/ |
55 | | int max_linkhdr = KIPC_MAX_LINKHDR; |
56 | | int max_protohdr = KIPC_MAX_PROTOHDR; /* Size of largest protocol layer header. */ |
57 | | |
58 | | /* |
59 | | * Zones from which we allocate. |
60 | | */ |
61 | | sctp_zone_t zone_mbuf; |
62 | | sctp_zone_t zone_clust; |
63 | | sctp_zone_t zone_ext_refcnt; |
64 | | |
65 | | /* __Userspace__ clust_mb_args will be passed as callback data to mb_ctor_clust |
66 | | * and mb_dtor_clust. |
67 | | * Note: I had to use struct clust_args as an encapsulation for an mbuf pointer. |
68 | | * struct mbuf * clust_mb_args; does not work. |
69 | | */ |
70 | | struct clust_args clust_mb_args; |
71 | | |
72 | | |
73 | | /* __Userspace__ |
74 | | * Local prototypes. |
75 | | */ |
76 | | static int mb_ctor_mbuf(void *, void *, int); |
77 | | static int mb_ctor_clust(void *, void *, int); |
78 | | static void mb_dtor_mbuf(void *, void *); |
79 | | static void mb_dtor_clust(void *, void *); |
80 | | |
81 | | |
82 | | /***************** Functions taken from user_mbuf.h *************/ |
83 | | |
84 | | static int mbuf_constructor_dup(struct mbuf *m, int pkthdr, short type) |
85 | 1.40M | { |
86 | 1.40M | int flags = pkthdr; |
87 | | |
88 | 1.40M | m->m_next = NULL; |
89 | 1.40M | m->m_nextpkt = NULL; |
90 | 1.40M | m->m_len = 0; |
91 | 1.40M | m->m_flags = flags; |
92 | 1.40M | m->m_type = type; |
93 | 1.40M | if (flags & M_PKTHDR) { |
94 | 164k | m->m_data = m->m_pktdat; |
95 | 164k | m->m_pkthdr.rcvif = NULL; |
96 | 164k | m->m_pkthdr.len = 0; |
97 | 164k | m->m_pkthdr.header = NULL; |
98 | 164k | m->m_pkthdr.csum_flags = 0; |
99 | 164k | m->m_pkthdr.csum_data = 0; |
100 | 164k | m->m_pkthdr.tso_segsz = 0; |
101 | 164k | m->m_pkthdr.ether_vtag = 0; |
102 | 164k | SLIST_INIT(&m->m_pkthdr.tags); |
103 | 164k | } else |
104 | 1.23M | m->m_data = m->m_dat; |
105 | | |
106 | 1.40M | return (0); |
107 | 1.40M | } |
108 | | |
109 | | /* __Userspace__ */ |
110 | | struct mbuf * |
111 | | m_get(int how, short type) |
112 | 1.23M | { |
113 | 1.23M | struct mbuf *mret; |
114 | 1.23M | #if defined(SCTP_SIMPLE_ALLOCATOR) |
115 | 1.23M | struct mb_args mbuf_mb_args; |
116 | | |
117 | | /* The following setter function is not yet being enclosed within |
118 | | * #if USING_MBUF_CONSTRUCTOR - #endif, until I have thoroughly tested |
119 | | * mb_dtor_mbuf. See comment there |
120 | | */ |
121 | 1.23M | mbuf_mb_args.flags = 0; |
122 | 1.23M | mbuf_mb_args.type = type; |
123 | 1.23M | #endif |
124 | | /* Mbuf master zone, zone_mbuf, has already been |
125 | | * created in mbuf_initialize() */ |
126 | 1.23M | mret = SCTP_ZONE_GET(zone_mbuf, struct mbuf); |
127 | 1.23M | #if defined(SCTP_SIMPLE_ALLOCATOR) |
128 | 1.23M | mb_ctor_mbuf(mret, &mbuf_mb_args, 0); |
129 | 1.23M | #endif |
130 | | /*mret = ((struct mbuf *)umem_cache_alloc(zone_mbuf, UMEM_DEFAULT));*/ |
131 | | |
132 | | /* There are cases when an object available in the current CPU's |
133 | | * loaded magazine and in those cases the object's constructor is not applied. |
134 | | * If that is the case, then we are duplicating constructor initialization here, |
135 | | * so that the mbuf is properly constructed before returning it. |
136 | | */ |
137 | 1.23M | if (mret) { |
138 | | #if USING_MBUF_CONSTRUCTOR |
139 | | if (! (mret->m_type == type) ) { |
140 | | mbuf_constructor_dup(mret, 0, type); |
141 | | } |
142 | | #else |
143 | 1.23M | mbuf_constructor_dup(mret, 0, type); |
144 | 1.23M | #endif |
145 | | |
146 | 1.23M | } |
147 | 1.23M | return mret; |
148 | 1.23M | } |
149 | | |
150 | | |
151 | | /* __Userspace__ */ |
152 | | struct mbuf * |
153 | | m_gethdr(int how, short type) |
154 | 164k | { |
155 | 164k | struct mbuf *mret; |
156 | 164k | #if defined(SCTP_SIMPLE_ALLOCATOR) |
157 | 164k | struct mb_args mbuf_mb_args; |
158 | | |
159 | | /* The following setter function is not yet being enclosed within |
160 | | * #if USING_MBUF_CONSTRUCTOR - #endif, until I have thoroughly tested |
161 | | * mb_dtor_mbuf. See comment there |
162 | | */ |
163 | 164k | mbuf_mb_args.flags = M_PKTHDR; |
164 | 164k | mbuf_mb_args.type = type; |
165 | 164k | #endif |
166 | 164k | mret = SCTP_ZONE_GET(zone_mbuf, struct mbuf); |
167 | 164k | #if defined(SCTP_SIMPLE_ALLOCATOR) |
168 | 164k | mb_ctor_mbuf(mret, &mbuf_mb_args, 0); |
169 | 164k | #endif |
170 | | /*mret = ((struct mbuf *)umem_cache_alloc(zone_mbuf, UMEM_DEFAULT));*/ |
171 | | /* There are cases when an object available in the current CPU's |
172 | | * loaded magazine and in those cases the object's constructor is not applied. |
173 | | * If that is the case, then we are duplicating constructor initialization here, |
174 | | * so that the mbuf is properly constructed before returning it. |
175 | | */ |
176 | 164k | if (mret) { |
177 | | #if USING_MBUF_CONSTRUCTOR |
178 | | if (! ((mret->m_flags & M_PKTHDR) && (mret->m_type == type)) ) { |
179 | | mbuf_constructor_dup(mret, M_PKTHDR, type); |
180 | | } |
181 | | #else |
182 | 164k | mbuf_constructor_dup(mret, M_PKTHDR, type); |
183 | 164k | #endif |
184 | 164k | } |
185 | 164k | return mret; |
186 | 164k | } |
187 | | |
188 | | /* __Userspace__ */ |
189 | | struct mbuf * |
190 | | m_free(struct mbuf *m) |
191 | 1.40M | { |
192 | | |
193 | 1.40M | struct mbuf *n = m->m_next; |
194 | | |
195 | 1.40M | if (m->m_flags & M_EXT) |
196 | 252k | mb_free_ext(m); |
197 | 1.14M | else if ((m->m_flags & M_NOFREE) == 0) { |
198 | 1.14M | #if defined(SCTP_SIMPLE_ALLOCATOR) |
199 | 1.14M | mb_dtor_mbuf(m, NULL); |
200 | 1.14M | #endif |
201 | 1.14M | SCTP_ZONE_FREE(zone_mbuf, m); |
202 | 1.14M | } |
203 | | /*umem_cache_free(zone_mbuf, m);*/ |
204 | 1.40M | return (n); |
205 | 1.40M | } |
206 | | |
207 | | |
208 | | static void |
209 | | clust_constructor_dup(caddr_t m_clust, struct mbuf* m) |
210 | 86.2k | { |
211 | 86.2k | u_int *refcnt; |
212 | 86.2k | int type, size; |
213 | | |
214 | 86.2k | if (m == NULL) { |
215 | 0 | return; |
216 | 0 | } |
217 | | /* Assigning cluster of MCLBYTES. TODO: Add jumbo frame functionality */ |
218 | 86.2k | type = EXT_CLUSTER; |
219 | 86.2k | size = MCLBYTES; |
220 | | |
221 | 86.2k | refcnt = SCTP_ZONE_GET(zone_ext_refcnt, u_int); |
222 | | /*refcnt = (u_int *)umem_cache_alloc(zone_ext_refcnt, UMEM_DEFAULT);*/ |
223 | | #if !defined(SCTP_SIMPLE_ALLOCATOR) |
224 | | if (refcnt == NULL) { |
225 | | umem_reap(); |
226 | | refcnt = SCTP_ZONE_GET(zone_ext_refcnt, u_int); |
227 | | /*refcnt = (u_int *)umem_cache_alloc(zone_ext_refcnt, UMEM_DEFAULT);*/ |
228 | | } |
229 | | #endif |
230 | 86.2k | *refcnt = 1; |
231 | 86.2k | m->m_ext.ext_buf = (caddr_t)m_clust; |
232 | 86.2k | m->m_data = m->m_ext.ext_buf; |
233 | 86.2k | m->m_flags |= M_EXT; |
234 | 86.2k | m->m_ext.ext_free = NULL; |
235 | 86.2k | m->m_ext.ext_args = NULL; |
236 | 86.2k | m->m_ext.ext_size = size; |
237 | 86.2k | m->m_ext.ext_type = type; |
238 | 86.2k | m->m_ext.ref_cnt = refcnt; |
239 | 86.2k | return; |
240 | 86.2k | } |
241 | | |
242 | | |
243 | | /* __Userspace__ */ |
244 | | void |
245 | | m_clget(struct mbuf *m, int how) |
246 | 86.2k | { |
247 | 86.2k | caddr_t mclust_ret; |
248 | 86.2k | #if defined(SCTP_SIMPLE_ALLOCATOR) |
249 | 86.2k | struct clust_args clust_mb_args_l; |
250 | 86.2k | #endif |
251 | 86.2k | if (m->m_flags & M_EXT) { |
252 | 0 | SCTPDBG(SCTP_DEBUG_USR, "%s: %p mbuf already has cluster\n", __func__, (void *)m); |
253 | 0 | } |
254 | 86.2k | m->m_ext.ext_buf = (char *)NULL; |
255 | 86.2k | #if defined(SCTP_SIMPLE_ALLOCATOR) |
256 | 86.2k | clust_mb_args_l.parent_mbuf = m; |
257 | 86.2k | #endif |
258 | 86.2k | mclust_ret = SCTP_ZONE_GET(zone_clust, char); |
259 | 86.2k | #if defined(SCTP_SIMPLE_ALLOCATOR) |
260 | 86.2k | mb_ctor_clust(mclust_ret, &clust_mb_args_l, 0); |
261 | 86.2k | #endif |
262 | | /*mclust_ret = umem_cache_alloc(zone_clust, UMEM_DEFAULT);*/ |
263 | | /* |
264 | | On a cluster allocation failure, call umem_reap() and retry. |
265 | | */ |
266 | | |
267 | 86.2k | if (mclust_ret == NULL) { |
268 | | #if !defined(SCTP_SIMPLE_ALLOCATOR) |
269 | | /* mclust_ret = SCTP_ZONE_GET(zone_clust, char); |
270 | | mb_ctor_clust(mclust_ret, &clust_mb_args, 0); |
271 | | #else*/ |
272 | | umem_reap(); |
273 | | mclust_ret = SCTP_ZONE_GET(zone_clust, char); |
274 | | #endif |
275 | | /*mclust_ret = umem_cache_alloc(zone_clust, UMEM_DEFAULT);*/ |
276 | | /* if (NULL == mclust_ret) { */ |
277 | 0 | SCTPDBG(SCTP_DEBUG_USR, "Memory allocation failure in %s\n", __func__); |
278 | | /* } */ |
279 | 0 | } |
280 | | |
281 | | #if USING_MBUF_CONSTRUCTOR |
282 | | if ((m->m_ext.ext_buf == NULL)) { |
283 | | clust_constructor_dup(mclust_ret, m); |
284 | | } |
285 | | #else |
286 | 86.2k | clust_constructor_dup(mclust_ret, m); |
287 | 86.2k | #endif |
288 | 86.2k | } |
289 | | |
290 | | struct mbuf * |
291 | | m_getm2(struct mbuf *m, int len, int how, short type, int flags, int allonebuf) |
292 | 1.01M | { |
293 | 1.01M | struct mbuf *mb, *nm = NULL, *mtail = NULL; |
294 | 1.01M | int size, mbuf_threshold, space_needed = len; |
295 | | |
296 | 1.01M | KASSERT(len >= 0, ("%s: len is < 0", __func__)); |
297 | | |
298 | | /* Validate flags. */ |
299 | 1.01M | flags &= (M_PKTHDR | M_EOR); |
300 | | |
301 | | /* Packet header mbuf must be first in chain. */ |
302 | 1.01M | if ((flags & M_PKTHDR) && m != NULL) { |
303 | 0 | flags &= ~M_PKTHDR; |
304 | 0 | } |
305 | | |
306 | 1.01M | if (allonebuf == 0) |
307 | 29.4k | mbuf_threshold = SCTP_BASE_SYSCTL(sctp_mbuf_threshold_count); |
308 | 982k | else |
309 | 982k | mbuf_threshold = 1; |
310 | | |
311 | | /* Loop and append maximum sized mbufs to the chain tail. */ |
312 | 2.05M | while (len > 0) { |
313 | 1.03M | if ((!allonebuf && len >= MCLBYTES) || (len > (int)(((mbuf_threshold - 1) * MLEN) + MHLEN))) { |
314 | 86.2k | mb = m_gethdr(how, type); |
315 | 86.2k | MCLGET(mb, how); |
316 | 86.2k | size = MCLBYTES; |
317 | | /* SCTP_BUF_LEN(mb) = MCLBYTES; */ |
318 | 953k | } else if (flags & M_PKTHDR) { |
319 | 78.6k | mb = m_gethdr(how, type); |
320 | 78.6k | if (len < MHLEN) { |
321 | 67.5k | size = len; |
322 | 67.5k | } else { |
323 | 11.0k | size = MHLEN; |
324 | 11.0k | } |
325 | 874k | } else { |
326 | 874k | mb = m_get(how, type); |
327 | 874k | if (len < MLEN) { |
328 | 865k | size = len; |
329 | 865k | } else { |
330 | 9.67k | size = MLEN; |
331 | 9.67k | } |
332 | 874k | } |
333 | | |
334 | | /* Fail the whole operation if one mbuf can't be allocated. */ |
335 | 1.03M | if (mb == NULL) { |
336 | 0 | if (nm != NULL) |
337 | 0 | m_freem(nm); |
338 | 0 | return (NULL); |
339 | 0 | } |
340 | | |
341 | 1.03M | if (allonebuf != 0 && size < space_needed) { |
342 | 0 | m_freem(mb); |
343 | 0 | return (NULL); |
344 | 0 | } |
345 | | |
346 | | /* Book keeping. */ |
347 | 1.03M | len -= size; |
348 | 1.03M | if (mtail != NULL) |
349 | 28.0k | mtail->m_next = mb; |
350 | 1.01M | else |
351 | 1.01M | nm = mb; |
352 | 1.03M | mtail = mb; |
353 | 1.03M | flags &= ~M_PKTHDR; /* Only valid on the first mbuf. */ |
354 | 1.03M | } |
355 | 1.01M | if (flags & M_EOR) { |
356 | 0 | mtail->m_flags |= M_EOR; /* Only valid on the last mbuf. */ |
357 | 0 | } |
358 | | |
359 | | /* If mbuf was supplied, append new chain to the end of it. */ |
360 | 1.01M | if (m != NULL) { |
361 | 0 | for (mtail = m; mtail->m_next != NULL; mtail = mtail->m_next); |
362 | 0 | mtail->m_next = nm; |
363 | 0 | mtail->m_flags &= ~M_EOR; |
364 | 1.01M | } else { |
365 | 1.01M | m = nm; |
366 | 1.01M | } |
367 | | |
368 | 1.01M | return (m); |
369 | 1.01M | } |
370 | | |
371 | | /* |
372 | | * Copy the contents of uio into a properly sized mbuf chain. |
373 | | */ |
374 | | struct mbuf * |
375 | | m_uiotombuf(struct uio *uio, int how, int len, int align, int flags) |
376 | 5.34k | { |
377 | 5.34k | struct mbuf *m, *mb; |
378 | 5.34k | int error, length; |
379 | 5.34k | ssize_t total; |
380 | 5.34k | int progress = 0; |
381 | | |
382 | | /* |
383 | | * len can be zero or an arbitrary large value bound by |
384 | | * the total data supplied by the uio. |
385 | | */ |
386 | 5.34k | if (len > 0) |
387 | 5.34k | total = min(uio->uio_resid, len); |
388 | 0 | else |
389 | 0 | total = uio->uio_resid; |
390 | | /* |
391 | | * The smallest unit returned by m_getm2() is a single mbuf |
392 | | * with pkthdr. We can't align past it. |
393 | | */ |
394 | 5.34k | if (align >= MHLEN) |
395 | 0 | return (NULL); |
396 | | /* |
397 | | * Give us the full allocation or nothing. |
398 | | * If len is zero return the smallest empty mbuf. |
399 | | */ |
400 | 5.34k | m = m_getm2(NULL, (int)max(total + align, 1), how, MT_DATA, flags, 0); |
401 | 5.34k | if (m == NULL) |
402 | 0 | return (NULL); |
403 | 5.34k | m->m_data += align; |
404 | | |
405 | | /* Fill all mbufs with uio data and update header information. */ |
406 | 10.6k | for (mb = m; mb != NULL; mb = mb->m_next) { |
407 | 5.34k | length = (int)min(M_TRAILINGSPACE(mb), total - progress); |
408 | 5.34k | error = uiomove(mtod(mb, void *), length, uio); |
409 | 5.34k | if (error) { |
410 | 0 | m_freem(m); |
411 | 0 | return (NULL); |
412 | 0 | } |
413 | | |
414 | 5.34k | mb->m_len = length; |
415 | 5.34k | progress += length; |
416 | 5.34k | if (flags & M_PKTHDR) |
417 | 0 | m->m_pkthdr.len += length; |
418 | 5.34k | } |
419 | 5.34k | KASSERT(progress == total, ("%s: progress != total", __func__)); |
420 | | |
421 | 5.34k | return (m); |
422 | 5.34k | } |
423 | | |
424 | | u_int |
425 | | m_length(struct mbuf *m0, struct mbuf **last) |
426 | 0 | { |
427 | 0 | struct mbuf *m; |
428 | 0 | u_int len; |
429 | |
|
430 | 0 | len = 0; |
431 | 0 | for (m = m0; m != NULL; m = m->m_next) { |
432 | 0 | len += m->m_len; |
433 | 0 | if (m->m_next == NULL) |
434 | 0 | break; |
435 | 0 | } |
436 | 0 | if (last != NULL) |
437 | 0 | *last = m; |
438 | 0 | return (len); |
439 | 0 | } |
440 | | |
441 | | struct mbuf * |
442 | | m_last(struct mbuf *m) |
443 | 5.34k | { |
444 | 5.34k | while (m->m_next) { |
445 | 0 | m = m->m_next; |
446 | 0 | } |
447 | 5.34k | return (m); |
448 | 5.34k | } |
449 | | |
450 | | /* |
451 | | * Unlink a tag from the list of tags associated with an mbuf. |
452 | | */ |
453 | | static __inline void |
454 | | m_tag_unlink(struct mbuf *m, struct m_tag *t) |
455 | 0 | { |
456 | |
|
457 | 0 | SLIST_REMOVE(&m->m_pkthdr.tags, t, m_tag, m_tag_link); |
458 | 0 | } |
459 | | |
460 | | /* |
461 | | * Reclaim resources associated with a tag. |
462 | | */ |
463 | | static __inline void |
464 | | m_tag_free(struct m_tag *t) |
465 | 0 | { |
466 | |
|
467 | 0 | (*t->m_tag_free)(t); |
468 | 0 | } |
469 | | |
470 | | /* |
471 | | * Set up the contents of a tag. Note that this does not fill in the free |
472 | | * method; the caller is expected to do that. |
473 | | * |
474 | | * XXX probably should be called m_tag_init, but that was already taken. |
475 | | */ |
476 | | static __inline void |
477 | | m_tag_setup(struct m_tag *t, uint32_t cookie, int type, int len) |
478 | 0 | { |
479 | |
|
480 | 0 | t->m_tag_id = type; |
481 | 0 | t->m_tag_len = len; |
482 | 0 | t->m_tag_cookie = cookie; |
483 | 0 | } |
484 | | |
485 | | /************ End functions from user_mbuf.h ******************/ |
486 | | |
487 | | |
488 | | |
489 | | /************ End functions to substitute umem_cache_alloc and umem_cache_free **************/ |
490 | | |
491 | | void |
492 | | mbuf_initialize(void *dummy) |
493 | 1 | { |
494 | | |
495 | | /* |
496 | | * __Userspace__Configure UMA zones for Mbufs and Clusters. |
497 | | * (TODO: m_getcl() - using packet secondary zone). |
498 | | * There is no provision for trash_init and trash_fini in umem. |
499 | | * |
500 | | */ |
501 | | /* zone_mbuf = umem_cache_create(MBUF_MEM_NAME, MSIZE, 0, |
502 | | mb_ctor_mbuf, mb_dtor_mbuf, NULL, |
503 | | &mbuf_mb_args, |
504 | | NULL, 0); |
505 | | zone_mbuf = umem_cache_create(MBUF_MEM_NAME, MSIZE, 0, NULL, NULL, NULL, NULL, NULL, 0);*/ |
506 | 1 | #if defined(SCTP_SIMPLE_ALLOCATOR) |
507 | 1 | SCTP_ZONE_INIT(zone_mbuf, MBUF_MEM_NAME, MSIZE, 0); |
508 | | #else |
509 | | zone_mbuf = umem_cache_create(MBUF_MEM_NAME, MSIZE, 0, |
510 | | mb_ctor_mbuf, mb_dtor_mbuf, NULL, |
511 | | NULL, |
512 | | NULL, 0); |
513 | | #endif |
514 | | /*zone_ext_refcnt = umem_cache_create(MBUF_EXTREFCNT_MEM_NAME, sizeof(u_int), 0, |
515 | | NULL, NULL, NULL, |
516 | | NULL, |
517 | | NULL, 0);*/ |
518 | 1 | SCTP_ZONE_INIT(zone_ext_refcnt, MBUF_EXTREFCNT_MEM_NAME, sizeof(u_int), 0); |
519 | | |
520 | | /*zone_clust = umem_cache_create(MBUF_CLUSTER_MEM_NAME, MCLBYTES, 0, |
521 | | mb_ctor_clust, mb_dtor_clust, NULL, |
522 | | &clust_mb_args, |
523 | | NULL, 0); |
524 | | zone_clust = umem_cache_create(MBUF_CLUSTER_MEM_NAME, MCLBYTES, 0, NULL, NULL, NULL, NULL, NULL,0);*/ |
525 | 1 | #if defined(SCTP_SIMPLE_ALLOCATOR) |
526 | 1 | SCTP_ZONE_INIT(zone_clust, MBUF_CLUSTER_MEM_NAME, MCLBYTES, 0); |
527 | | #else |
528 | | zone_clust = umem_cache_create(MBUF_CLUSTER_MEM_NAME, MCLBYTES, 0, |
529 | | mb_ctor_clust, mb_dtor_clust, NULL, |
530 | | &clust_mb_args, |
531 | | NULL, 0); |
532 | | #endif |
533 | | |
534 | | /* uma_prealloc() goes here... */ |
535 | | |
536 | | /* __Userspace__ Add umem_reap here for low memory situation? |
537 | | * |
538 | | */ |
539 | | |
540 | 1 | } |
541 | | |
542 | | |
543 | | |
544 | | /* |
545 | | * __Userspace__ |
546 | | * |
547 | | * Constructor for Mbuf master zone. We have a different constructor |
548 | | * for allocating the cluster. |
549 | | * |
550 | | * The 'arg' pointer points to a mb_args structure which |
551 | | * contains call-specific information required to support the |
552 | | * mbuf allocation API. See user_mbuf.h. |
553 | | * |
554 | | * The flgs parameter below can be UMEM_DEFAULT or UMEM_NOFAIL depending on what |
555 | | * was passed when umem_cache_alloc was called. |
556 | | * TODO: Use UMEM_NOFAIL in umem_cache_alloc and also define a failure handler |
557 | | * and call umem_nofail_callback(my_failure_handler) in the stack initialization routines |
558 | | * The advantage of using UMEM_NOFAIL is that we don't have to check if umem_cache_alloc |
559 | | * was successful or not. The failure handler would take care of it, if we use the UMEM_NOFAIL |
560 | | * flag. |
561 | | * |
562 | | * NOTE Ref: http://docs.sun.com/app/docs/doc/819-2243/6n4i099p2?l=en&a=view&q=umem_zalloc) |
563 | | * The umem_nofail_callback() function sets the **process-wide** UMEM_NOFAIL callback. |
564 | | * It also mentions that umem_nofail_callback is Evolving. |
565 | | * |
566 | | */ |
567 | | static int |
568 | | mb_ctor_mbuf(void *mem, void *arg, int flgs) |
569 | 1.40M | { |
570 | | #if USING_MBUF_CONSTRUCTOR |
571 | | struct mbuf *m; |
572 | | struct mb_args *args; |
573 | | |
574 | | int flags; |
575 | | short type; |
576 | | |
577 | | m = (struct mbuf *)mem; |
578 | | args = (struct mb_args *)arg; |
579 | | flags = args->flags; |
580 | | type = args->type; |
581 | | |
582 | | m->m_next = NULL; |
583 | | m->m_nextpkt = NULL; |
584 | | m->m_len = 0; |
585 | | m->m_flags = flags; |
586 | | m->m_type = type; |
587 | | if (flags & M_PKTHDR) { |
588 | | m->m_data = m->m_pktdat; |
589 | | m->m_pkthdr.rcvif = NULL; |
590 | | m->m_pkthdr.len = 0; |
591 | | m->m_pkthdr.header = NULL; |
592 | | m->m_pkthdr.csum_flags = 0; |
593 | | m->m_pkthdr.csum_data = 0; |
594 | | m->m_pkthdr.tso_segsz = 0; |
595 | | m->m_pkthdr.ether_vtag = 0; |
596 | | SLIST_INIT(&m->m_pkthdr.tags); |
597 | | } else |
598 | | m->m_data = m->m_dat; |
599 | | #endif |
600 | 1.40M | return (0); |
601 | 1.40M | } |
602 | | |
603 | | |
604 | | /* |
605 | | * __Userspace__ |
606 | | * The Mbuf master zone destructor. |
607 | | * This would be called in response to umem_cache_destroy |
608 | | * TODO: Recheck if this is what we want to do in this destructor. |
609 | | * (Note: the number of times mb_dtor_mbuf is called is equal to the |
610 | | * number of individual mbufs allocated from zone_mbuf. |
611 | | */ |
612 | | static void |
613 | | mb_dtor_mbuf(void *mem, void *arg) |
614 | 1.40M | { |
615 | 1.40M | struct mbuf *m; |
616 | | |
617 | 1.40M | m = (struct mbuf *)mem; |
618 | 1.40M | if ((m->m_flags & M_PKTHDR) != 0) { |
619 | 164k | m_tag_delete_chain(m, NULL); |
620 | 164k | } |
621 | 1.40M | } |
622 | | |
623 | | |
624 | | /* __Userspace__ |
625 | | * The Cluster zone constructor. |
626 | | * |
627 | | * Here the 'arg' pointer points to the Mbuf which we |
628 | | * are configuring cluster storage for. If 'arg' is |
629 | | * empty we allocate just the cluster without setting |
630 | | * the mbuf to it. See mbuf.h. |
631 | | */ |
632 | | static int |
633 | | mb_ctor_clust(void *mem, void *arg, int flgs) |
634 | 86.2k | { |
635 | | |
636 | | #if USING_MBUF_CONSTRUCTOR |
637 | | struct mbuf *m; |
638 | | struct clust_args * cla; |
639 | | u_int *refcnt; |
640 | | int type, size; |
641 | | sctp_zone_t zone; |
642 | | |
643 | | /* Assigning cluster of MCLBYTES. TODO: Add jumbo frame functionality */ |
644 | | type = EXT_CLUSTER; |
645 | | zone = zone_clust; |
646 | | size = MCLBYTES; |
647 | | |
648 | | cla = (struct clust_args *)arg; |
649 | | m = cla->parent_mbuf; |
650 | | |
651 | | refcnt = SCTP_ZONE_GET(zone_ext_refcnt, u_int); |
652 | | /*refcnt = (u_int *)umem_cache_alloc(zone_ext_refcnt, UMEM_DEFAULT);*/ |
653 | | *refcnt = 1; |
654 | | |
655 | | if (m != NULL) { |
656 | | m->m_ext.ext_buf = (caddr_t)mem; |
657 | | m->m_data = m->m_ext.ext_buf; |
658 | | m->m_flags |= M_EXT; |
659 | | m->m_ext.ext_free = NULL; |
660 | | m->m_ext.ext_args = NULL; |
661 | | m->m_ext.ext_size = size; |
662 | | m->m_ext.ext_type = type; |
663 | | m->m_ext.ref_cnt = refcnt; |
664 | | } |
665 | | #endif |
666 | 86.2k | return (0); |
667 | 86.2k | } |
668 | | |
669 | | /* __Userspace__ */ |
670 | | static void |
671 | | mb_dtor_clust(void *mem, void *arg) |
672 | 86.0k | { |
673 | | |
674 | | /* mem is of type caddr_t. In sys/types.h we have typedef char * caddr_t; */ |
675 | | /* mb_dtor_clust is called at time of umem_cache_destroy() (the number of times |
676 | | * mb_dtor_clust is called is equal to the number of individual mbufs allocated |
677 | | * from zone_clust. Similarly for mb_dtor_mbuf). |
678 | | * At this point the following: |
679 | | * struct mbuf *m; |
680 | | * m = (struct mbuf *)arg; |
681 | | * assert (*(m->m_ext.ref_cnt) == 0); is not meaningful since m->m_ext.ref_cnt = NULL; |
682 | | * has been done in mb_free_ext(). |
683 | | */ |
684 | | |
685 | 86.0k | } |
686 | | |
687 | | |
688 | | |
689 | | |
690 | | /* Unlink and free a packet tag. */ |
691 | | void |
692 | | m_tag_delete(struct mbuf *m, struct m_tag *t) |
693 | 0 | { |
694 | 0 | KASSERT(m && t, ("m_tag_delete: null argument, m %p t %p", (void *)m, (void *)t)); |
695 | 0 | m_tag_unlink(m, t); |
696 | 0 | m_tag_free(t); |
697 | 0 | } |
698 | | |
699 | | |
700 | | /* Unlink and free a packet tag chain, starting from given tag. */ |
701 | | void |
702 | | m_tag_delete_chain(struct mbuf *m, struct m_tag *t) |
703 | 164k | { |
704 | | |
705 | 164k | struct m_tag *p, *q; |
706 | | |
707 | 164k | KASSERT(m, ("m_tag_delete_chain: null mbuf")); |
708 | 164k | if (t != NULL) |
709 | 0 | p = t; |
710 | 164k | else |
711 | 164k | p = SLIST_FIRST(&m->m_pkthdr.tags); |
712 | 164k | if (p == NULL) |
713 | 164k | return; |
714 | 0 | while ((q = SLIST_NEXT(p, m_tag_link)) != NULL) |
715 | 0 | m_tag_delete(m, q); |
716 | 0 | m_tag_delete(m, p); |
717 | 0 | } |
718 | | |
719 | | #if 0 |
720 | | static void |
721 | | sctp_print_mbuf_chain(struct mbuf *m) |
722 | | { |
723 | | SCTP_DEBUG_USR(SCTP_DEBUG_USR, "Printing mbuf chain %p.\n", (void *)m); |
724 | | for(; m; m=m->m_next) { |
725 | | SCTP_DEBUG_USR(SCTP_DEBUG_USR, "%p: m_len = %ld, m_type = %x, m_next = %p.\n", (void *)m, m->m_len, m->m_type, (void *)m->m_next); |
726 | | if (m->m_flags & M_EXT) |
727 | | SCTP_DEBUG_USR(SCTP_DEBUG_USR, "%p: extend_size = %d, extend_buffer = %p, ref_cnt = %d.\n", (void *)m, m->m_ext.ext_size, (void *)m->m_ext.ext_buf, *(m->m_ext.ref_cnt)); |
728 | | } |
729 | | } |
730 | | #endif |
731 | | |
732 | | /* |
733 | | * Free an entire chain of mbufs and associated external buffers, if |
734 | | * applicable. |
735 | | */ |
736 | | void |
737 | | m_freem(struct mbuf *mb) |
738 | 295k | { |
739 | 1.10M | while (mb != NULL) |
740 | 812k | mb = m_free(mb); |
741 | 295k | } |
742 | | |
743 | | /* |
744 | | * __Userspace__ |
745 | | * clean mbufs with M_EXT storage attached to them |
746 | | * if the reference count hits 1. |
747 | | */ |
748 | | void |
749 | | mb_free_ext(struct mbuf *m) |
750 | 252k | { |
751 | | |
752 | 252k | int skipmbuf; |
753 | | |
754 | 252k | KASSERT((m->m_flags & M_EXT) == M_EXT, ("%s: M_EXT not set", __func__)); |
755 | 252k | KASSERT(m->m_ext.ref_cnt != NULL, ("%s: ref_cnt not set", __func__)); |
756 | | |
757 | | /* |
758 | | * check if the header is embedded in the cluster |
759 | | */ |
760 | 252k | skipmbuf = (m->m_flags & M_NOFREE); |
761 | | |
762 | | /* Free the external attached storage if this |
763 | | * mbuf is the only reference to it. |
764 | | *__Userspace__ TODO: jumbo frames |
765 | | * |
766 | | */ |
767 | | /* NOTE: We had the same code that SCTP_DECREMENT_AND_CHECK_REFCOUNT |
768 | | reduces to here before but the IPHONE malloc commit had changed |
769 | | this to compare to 0 instead of 1 (see next line). Why? |
770 | | . .. this caused a huge memory leak in Linux. |
771 | | */ |
772 | | #ifdef IPHONE |
773 | | if (atomic_fetchadd_int(m->m_ext.ref_cnt, -1) == 0) |
774 | | #else |
775 | 252k | if (SCTP_DECREMENT_AND_CHECK_REFCOUNT(m->m_ext.ref_cnt)) |
776 | 86.0k | #endif |
777 | 86.0k | { |
778 | 86.0k | if (m->m_ext.ext_type == EXT_CLUSTER){ |
779 | 86.0k | #if defined(SCTP_SIMPLE_ALLOCATOR) |
780 | 86.0k | mb_dtor_clust(m->m_ext.ext_buf, &clust_mb_args); |
781 | 86.0k | #endif |
782 | 86.0k | SCTP_ZONE_FREE(zone_clust, m->m_ext.ext_buf); |
783 | 86.0k | SCTP_ZONE_FREE(zone_ext_refcnt, (u_int*)m->m_ext.ref_cnt); |
784 | 86.0k | m->m_ext.ref_cnt = NULL; |
785 | 86.0k | } |
786 | 86.0k | } |
787 | | |
788 | 252k | if (skipmbuf) |
789 | 0 | return; |
790 | | |
791 | | |
792 | | /* __Userspace__ Also freeing the storage for ref_cnt |
793 | | * Free this mbuf back to the mbuf zone with all m_ext |
794 | | * information purged. |
795 | | */ |
796 | 252k | m->m_ext.ext_buf = NULL; |
797 | 252k | m->m_ext.ext_free = NULL; |
798 | 252k | m->m_ext.ext_args = NULL; |
799 | 252k | m->m_ext.ref_cnt = NULL; |
800 | 252k | m->m_ext.ext_size = 0; |
801 | 252k | m->m_ext.ext_type = 0; |
802 | 252k | m->m_flags &= ~M_EXT; |
803 | 252k | #if defined(SCTP_SIMPLE_ALLOCATOR) |
804 | 252k | mb_dtor_mbuf(m, NULL); |
805 | 252k | #endif |
806 | 252k | SCTP_ZONE_FREE(zone_mbuf, m); |
807 | | |
808 | | /*umem_cache_free(zone_mbuf, m);*/ |
809 | 252k | } |
810 | | |
811 | | /* |
812 | | * "Move" mbuf pkthdr from "from" to "to". |
813 | | * "from" must have M_PKTHDR set, and "to" must be empty. |
814 | | */ |
815 | | void |
816 | | m_move_pkthdr(struct mbuf *to, struct mbuf *from) |
817 | 0 | { |
818 | |
|
819 | 0 | to->m_flags = (from->m_flags & M_COPYFLAGS) | (to->m_flags & M_EXT); |
820 | 0 | if ((to->m_flags & M_EXT) == 0) |
821 | 0 | to->m_data = to->m_pktdat; |
822 | 0 | to->m_pkthdr = from->m_pkthdr; /* especially tags */ |
823 | 0 | SLIST_INIT(&from->m_pkthdr.tags); /* purge tags from src */ |
824 | 0 | from->m_flags &= ~M_PKTHDR; |
825 | 0 | } |
826 | | |
827 | | |
828 | | /* |
829 | | * Rearange an mbuf chain so that len bytes are contiguous |
830 | | * and in the data area of an mbuf (so that mtod and dtom |
831 | | * will work for a structure of size len). Returns the resulting |
832 | | * mbuf chain on success, frees it and returns null on failure. |
833 | | * If there is room, it will add up to max_protohdr-len extra bytes to the |
834 | | * contiguous region in an attempt to avoid being called next time. |
835 | | */ |
836 | | struct mbuf * |
837 | | m_pullup(struct mbuf *n, int len) |
838 | 0 | { |
839 | 0 | struct mbuf *m; |
840 | 0 | int count; |
841 | 0 | int space; |
842 | | |
843 | | /* |
844 | | * If first mbuf has no cluster, and has room for len bytes |
845 | | * without shifting current data, pullup into it, |
846 | | * otherwise allocate a new mbuf to prepend to the chain. |
847 | | */ |
848 | 0 | if ((n->m_flags & M_EXT) == 0 && |
849 | 0 | n->m_data + len < &n->m_dat[MLEN] && n->m_next) { |
850 | 0 | if (n->m_len >= len) |
851 | 0 | return (n); |
852 | 0 | m = n; |
853 | 0 | n = n->m_next; |
854 | 0 | len -= m->m_len; |
855 | 0 | } else { |
856 | 0 | if (len > MHLEN) |
857 | 0 | goto bad; |
858 | 0 | MGET(m, M_NOWAIT, n->m_type); |
859 | 0 | if (m == NULL) |
860 | 0 | goto bad; |
861 | 0 | m->m_len = 0; |
862 | 0 | if (n->m_flags & M_PKTHDR) |
863 | 0 | M_MOVE_PKTHDR(m, n); |
864 | 0 | } |
865 | 0 | space = (int)(&m->m_dat[MLEN] - (m->m_data + m->m_len)); |
866 | 0 | do { |
867 | 0 | count = min(min(max(len, max_protohdr), space), n->m_len); |
868 | 0 | memcpy(mtod(m, caddr_t) + m->m_len,mtod(n, caddr_t), (u_int)count); |
869 | 0 | len -= count; |
870 | 0 | m->m_len += count; |
871 | 0 | n->m_len -= count; |
872 | 0 | space -= count; |
873 | 0 | if (n->m_len) |
874 | 0 | n->m_data += count; |
875 | 0 | else |
876 | 0 | n = m_free(n); |
877 | 0 | } while (len > 0 && n); |
878 | 0 | if (len > 0) { |
879 | 0 | (void) m_free(m); |
880 | 0 | goto bad; |
881 | 0 | } |
882 | 0 | m->m_next = n; |
883 | 0 | return (m); |
884 | 0 | bad: |
885 | 0 | m_freem(n); |
886 | 0 | return (NULL); |
887 | 0 | } |
888 | | |
889 | | |
890 | | static struct mbuf * |
891 | | m_dup1(struct mbuf *m, int off, int len, int wait) |
892 | 0 | { |
893 | 0 | struct mbuf *n = NULL; |
894 | 0 | int copyhdr; |
895 | |
|
896 | 0 | if (len > MCLBYTES) |
897 | 0 | return NULL; |
898 | 0 | if (off == 0 && (m->m_flags & M_PKTHDR) != 0) |
899 | 0 | copyhdr = 1; |
900 | 0 | else |
901 | 0 | copyhdr = 0; |
902 | 0 | if (len >= MINCLSIZE) { |
903 | 0 | if (copyhdr == 1) { |
904 | 0 | m_clget(n, wait); /* TODO: include code for copying the header */ |
905 | 0 | m_dup_pkthdr(n, m, wait); |
906 | 0 | } else |
907 | 0 | m_clget(n, wait); |
908 | 0 | } else { |
909 | 0 | if (copyhdr == 1) |
910 | 0 | n = m_gethdr(wait, m->m_type); |
911 | 0 | else |
912 | 0 | n = m_get(wait, m->m_type); |
913 | 0 | } |
914 | 0 | if (!n) |
915 | 0 | return NULL; /* ENOBUFS */ |
916 | | |
917 | 0 | if (copyhdr && !m_dup_pkthdr(n, m, wait)) { |
918 | 0 | m_free(n); |
919 | 0 | return NULL; |
920 | 0 | } |
921 | 0 | m_copydata(m, off, len, mtod(n, caddr_t)); |
922 | 0 | n->m_len = len; |
923 | 0 | return n; |
924 | 0 | } |
925 | | |
926 | | |
927 | | /* Taken from sys/kern/uipc_mbuf2.c */ |
928 | | struct mbuf * |
929 | | m_pulldown(struct mbuf *m, int off, int len, int *offp) |
930 | 0 | { |
931 | 0 | struct mbuf *n, *o; |
932 | 0 | int hlen, tlen, olen; |
933 | 0 | int writable; |
934 | | |
935 | | /* check invalid arguments. */ |
936 | 0 | KASSERT(m, ("m == NULL in m_pulldown()")); |
937 | 0 | if (len > MCLBYTES) { |
938 | 0 | m_freem(m); |
939 | 0 | return NULL; /* impossible */ |
940 | 0 | } |
941 | | |
942 | | #ifdef PULLDOWN_DEBUG |
943 | | { |
944 | | struct mbuf *t; |
945 | | SCTP_DEBUG_USR(SCTP_DEBUG_USR, "before:"); |
946 | | for (t = m; t; t = t->m_next) |
947 | | SCTP_DEBUG_USR(SCTP_DEBUG_USR, " %d", t->m_len); |
948 | | SCTP_DEBUG_USR(SCTP_DEBUG_USR, "\n"); |
949 | | } |
950 | | #endif |
951 | 0 | n = m; |
952 | 0 | while (n != NULL && off > 0) { |
953 | 0 | if (n->m_len > off) |
954 | 0 | break; |
955 | 0 | off -= n->m_len; |
956 | 0 | n = n->m_next; |
957 | 0 | } |
958 | | /* be sure to point non-empty mbuf */ |
959 | 0 | while (n != NULL && n->m_len == 0) |
960 | 0 | n = n->m_next; |
961 | 0 | if (!n) { |
962 | 0 | m_freem(m); |
963 | 0 | return NULL; /* mbuf chain too short */ |
964 | 0 | } |
965 | | |
966 | 0 | writable = 0; |
967 | 0 | if ((n->m_flags & M_EXT) == 0 || |
968 | 0 | (n->m_ext.ext_type == EXT_CLUSTER && M_WRITABLE(n))) |
969 | 0 | writable = 1; |
970 | | |
971 | | /* |
972 | | * the target data is on <n, off>. |
973 | | * if we got enough data on the mbuf "n", we're done. |
974 | | */ |
975 | 0 | if ((off == 0 || offp) && len <= n->m_len - off && writable) |
976 | 0 | goto ok; |
977 | | |
978 | | /* |
979 | | * when len <= n->m_len - off and off != 0, it is a special case. |
980 | | * len bytes from <n, off> sits in single mbuf, but the caller does |
981 | | * not like the starting position (off). |
982 | | * chop the current mbuf into two pieces, set off to 0. |
983 | | */ |
984 | 0 | if (len <= n->m_len - off) { |
985 | 0 | o = m_dup1(n, off, n->m_len - off, M_NOWAIT); |
986 | 0 | if (o == NULL) { |
987 | 0 | m_freem(m); |
988 | 0 | return NULL; /* ENOBUFS */ |
989 | 0 | } |
990 | 0 | n->m_len = off; |
991 | 0 | o->m_next = n->m_next; |
992 | 0 | n->m_next = o; |
993 | 0 | n = n->m_next; |
994 | 0 | off = 0; |
995 | 0 | goto ok; |
996 | 0 | } |
997 | | /* |
998 | | * we need to take hlen from <n, off> and tlen from <n->m_next, 0>, |
999 | | * and construct contiguous mbuf with m_len == len. |
1000 | | * note that hlen + tlen == len, and tlen > 0. |
1001 | | */ |
1002 | 0 | hlen = n->m_len - off; |
1003 | 0 | tlen = len - hlen; |
1004 | | |
1005 | | /* |
1006 | | * ensure that we have enough trailing data on mbuf chain. |
1007 | | * if not, we can do nothing about the chain. |
1008 | | */ |
1009 | 0 | olen = 0; |
1010 | 0 | for (o = n->m_next; o != NULL; o = o->m_next) |
1011 | 0 | olen += o->m_len; |
1012 | 0 | if (hlen + olen < len) { |
1013 | 0 | m_freem(m); |
1014 | 0 | return NULL; /* mbuf chain too short */ |
1015 | 0 | } |
1016 | | |
1017 | | /* |
1018 | | * easy cases first. |
1019 | | * we need to use m_copydata() to get data from <n->m_next, 0>. |
1020 | | */ |
1021 | 0 | if ((off == 0 || offp) && (M_TRAILINGSPACE(n) >= tlen) && writable) { |
1022 | 0 | m_copydata(n->m_next, 0, tlen, mtod(n, caddr_t) + n->m_len); |
1023 | 0 | n->m_len += tlen; |
1024 | 0 | m_adj(n->m_next, tlen); |
1025 | 0 | goto ok; |
1026 | 0 | } |
1027 | | |
1028 | 0 | if ((off == 0 || offp) && (M_LEADINGSPACE(n->m_next) >= hlen) && writable) { |
1029 | 0 | n->m_next->m_data -= hlen; |
1030 | 0 | n->m_next->m_len += hlen; |
1031 | 0 | memcpy( mtod(n->m_next, caddr_t), mtod(n, caddr_t) + off,hlen); |
1032 | 0 | n->m_len -= hlen; |
1033 | 0 | n = n->m_next; |
1034 | 0 | off = 0; |
1035 | 0 | goto ok; |
1036 | 0 | } |
1037 | | |
1038 | | /* |
1039 | | * now, we need to do the hard way. don't m_copy as there's no room |
1040 | | * on both end. |
1041 | | */ |
1042 | 0 | if (len > MLEN) |
1043 | 0 | m_clget(o, M_NOWAIT); |
1044 | | /* o = m_getcl(M_NOWAIT, m->m_type, 0);*/ |
1045 | 0 | else |
1046 | 0 | o = m_get(M_NOWAIT, m->m_type); |
1047 | 0 | if (!o) { |
1048 | 0 | m_freem(m); |
1049 | 0 | return NULL; /* ENOBUFS */ |
1050 | 0 | } |
1051 | | /* get hlen from <n, off> into <o, 0> */ |
1052 | 0 | o->m_len = hlen; |
1053 | 0 | memcpy(mtod(o, caddr_t), mtod(n, caddr_t) + off, hlen); |
1054 | 0 | n->m_len -= hlen; |
1055 | | /* get tlen from <n->m_next, 0> into <o, hlen> */ |
1056 | 0 | m_copydata(n->m_next, 0, tlen, mtod(o, caddr_t) + o->m_len); |
1057 | 0 | o->m_len += tlen; |
1058 | 0 | m_adj(n->m_next, tlen); |
1059 | 0 | o->m_next = n->m_next; |
1060 | 0 | n->m_next = o; |
1061 | 0 | n = o; |
1062 | 0 | off = 0; |
1063 | 0 | ok: |
1064 | | #ifdef PULLDOWN_DEBUG |
1065 | | { |
1066 | | struct mbuf *t; |
1067 | | SCTP_DEBUG_USR(SCTP_DEBUG_USR, "after:"); |
1068 | | for (t = m; t; t = t->m_next) |
1069 | | SCTP_DEBUG_USR(SCTP_DEBUG_USR, "%c%d", t == n ? '*' : ' ', t->m_len); |
1070 | | SCTP_DEBUG_USR(SCTP_DEBUG_USR, " (off=%d)\n", off); |
1071 | | } |
1072 | | #endif |
1073 | 0 | if (offp) |
1074 | 0 | *offp = off; |
1075 | 0 | return n; |
1076 | 0 | } |
1077 | | |
1078 | | /* |
1079 | | * Attach the the cluster from *m to *n, set up m_ext in *n |
1080 | | * and bump the refcount of the cluster. |
1081 | | */ |
1082 | | static void |
1083 | | mb_dupcl(struct mbuf *n, struct mbuf *m) |
1084 | 166k | { |
1085 | 166k | KASSERT((m->m_flags & M_EXT) == M_EXT, ("%s: M_EXT not set", __func__)); |
1086 | 166k | KASSERT(m->m_ext.ref_cnt != NULL, ("%s: ref_cnt not set", __func__)); |
1087 | 166k | KASSERT((n->m_flags & M_EXT) == 0, ("%s: M_EXT set", __func__)); |
1088 | | |
1089 | 166k | if (*(m->m_ext.ref_cnt) == 1) |
1090 | 3.31k | *(m->m_ext.ref_cnt) += 1; |
1091 | 163k | else |
1092 | 163k | atomic_add_int(m->m_ext.ref_cnt, 1); |
1093 | 166k | n->m_ext.ext_buf = m->m_ext.ext_buf; |
1094 | 166k | n->m_ext.ext_free = m->m_ext.ext_free; |
1095 | 166k | n->m_ext.ext_args = m->m_ext.ext_args; |
1096 | 166k | n->m_ext.ext_size = m->m_ext.ext_size; |
1097 | 166k | n->m_ext.ref_cnt = m->m_ext.ref_cnt; |
1098 | 166k | n->m_ext.ext_type = m->m_ext.ext_type; |
1099 | 166k | n->m_flags |= M_EXT; |
1100 | 166k | } |
1101 | | |
1102 | | |
1103 | | /* |
1104 | | * Make a copy of an mbuf chain starting "off0" bytes from the beginning, |
1105 | | * continuing for "len" bytes. If len is M_COPYALL, copy to end of mbuf. |
1106 | | * The wait parameter is a choice of M_TRYWAIT/M_NOWAIT from caller. |
1107 | | * Note that the copy is read-only, because clusters are not copied, |
1108 | | * only their reference counts are incremented. |
1109 | | */ |
1110 | | |
1111 | | struct mbuf * |
1112 | | m_copym(struct mbuf *m, int off0, int len, int wait) |
1113 | 190k | { |
1114 | 190k | struct mbuf *n, **np; |
1115 | 190k | int off = off0; |
1116 | 190k | struct mbuf *top; |
1117 | 190k | int copyhdr = 0; |
1118 | | |
1119 | 190k | KASSERT(off >= 0, ("m_copym, negative off %d", off)); |
1120 | 190k | KASSERT(len >= 0, ("m_copym, negative len %d", len)); |
1121 | 190k | KASSERT(m != NULL, ("m_copym, m is NULL")); |
1122 | | |
1123 | | #if !defined(INVARIANTS) |
1124 | | if (m == NULL) { |
1125 | | return (NULL); |
1126 | | } |
1127 | | #endif |
1128 | 190k | if (off == 0 && m->m_flags & M_PKTHDR) |
1129 | 0 | copyhdr = 1; |
1130 | 1.43M | while (off > 0) { |
1131 | 1.43M | KASSERT(m != NULL, ("m_copym, offset > size of mbuf chain")); |
1132 | 1.43M | if (off < m->m_len) |
1133 | 188k | break; |
1134 | 1.24M | off -= m->m_len; |
1135 | 1.24M | m = m->m_next; |
1136 | 1.24M | } |
1137 | 190k | np = ⊤ |
1138 | 190k | top = 0; |
1139 | 414k | while (len > 0) { |
1140 | 224k | if (m == NULL) { |
1141 | 722 | KASSERT(len == M_COPYALL, ("m_copym, length > size of mbuf chain")); |
1142 | 722 | break; |
1143 | 722 | } |
1144 | 223k | if (copyhdr) |
1145 | 0 | MGETHDR(n, wait, m->m_type); |
1146 | 223k | else |
1147 | 223k | MGET(n, wait, m->m_type); |
1148 | 223k | *np = n; |
1149 | 223k | if (n == NULL) |
1150 | 0 | goto nospace; |
1151 | 223k | if (copyhdr) { |
1152 | 0 | if (!m_dup_pkthdr(n, m, wait)) |
1153 | 0 | goto nospace; |
1154 | 0 | if (len == M_COPYALL) |
1155 | 0 | n->m_pkthdr.len -= off0; |
1156 | 0 | else |
1157 | 0 | n->m_pkthdr.len = len; |
1158 | 0 | copyhdr = 0; |
1159 | 0 | } |
1160 | 223k | n->m_len = min(len, m->m_len - off); |
1161 | 223k | if (m->m_flags & M_EXT) { |
1162 | 166k | n->m_data = m->m_data + off; |
1163 | 166k | mb_dupcl(n, m); |
1164 | 166k | } else |
1165 | 57.3k | memcpy(mtod(n, caddr_t), mtod(m, caddr_t) + off, (u_int)n->m_len); |
1166 | 223k | if (len != M_COPYALL) |
1167 | 206k | len -= n->m_len; |
1168 | 223k | off = 0; |
1169 | 223k | m = m->m_next; |
1170 | 223k | np = &n->m_next; |
1171 | 223k | } |
1172 | | |
1173 | 190k | return (top); |
1174 | 0 | nospace: |
1175 | 0 | m_freem(top); |
1176 | 0 | return (NULL); |
1177 | 190k | } |
1178 | | |
1179 | | |
1180 | | int |
1181 | | m_tag_copy_chain(struct mbuf *to, struct mbuf *from, int how) |
1182 | 0 | { |
1183 | 0 | struct m_tag *p, *t, *tprev = NULL; |
1184 | |
|
1185 | 0 | KASSERT(to && from, ("m_tag_copy_chain: null argument, to %p from %p", (void *)to, (void *)from)); |
1186 | 0 | m_tag_delete_chain(to, NULL); |
1187 | 0 | SLIST_FOREACH(p, &from->m_pkthdr.tags, m_tag_link) { |
1188 | 0 | t = m_tag_copy(p, how); |
1189 | 0 | if (t == NULL) { |
1190 | 0 | m_tag_delete_chain(to, NULL); |
1191 | 0 | return 0; |
1192 | 0 | } |
1193 | 0 | if (tprev == NULL) |
1194 | 0 | SLIST_INSERT_HEAD(&to->m_pkthdr.tags, t, m_tag_link); |
1195 | 0 | else |
1196 | 0 | SLIST_INSERT_AFTER(tprev, t, m_tag_link); |
1197 | 0 | tprev = t; |
1198 | 0 | } |
1199 | 0 | return 1; |
1200 | 0 | } |
1201 | | |
1202 | | /* |
1203 | | * Duplicate "from"'s mbuf pkthdr in "to". |
1204 | | * "from" must have M_PKTHDR set, and "to" must be empty. |
1205 | | * In particular, this does a deep copy of the packet tags. |
1206 | | */ |
1207 | | int |
1208 | | m_dup_pkthdr(struct mbuf *to, struct mbuf *from, int how) |
1209 | 0 | { |
1210 | |
|
1211 | 0 | KASSERT(to, ("m_dup_pkthdr: to is NULL")); |
1212 | 0 | KASSERT(from, ("m_dup_pkthdr: from is NULL")); |
1213 | 0 | to->m_flags = (from->m_flags & M_COPYFLAGS) | (to->m_flags & M_EXT); |
1214 | 0 | if ((to->m_flags & M_EXT) == 0) |
1215 | 0 | to->m_data = to->m_pktdat; |
1216 | 0 | to->m_pkthdr = from->m_pkthdr; |
1217 | 0 | SLIST_INIT(&to->m_pkthdr.tags); |
1218 | 0 | return (m_tag_copy_chain(to, from, MBTOM(how))); |
1219 | 0 | } |
1220 | | |
1221 | | /* Copy a single tag. */ |
1222 | | struct m_tag * |
1223 | | m_tag_copy(struct m_tag *t, int how) |
1224 | 0 | { |
1225 | 0 | struct m_tag *p; |
1226 | |
|
1227 | 0 | KASSERT(t, ("m_tag_copy: null tag")); |
1228 | 0 | p = m_tag_alloc(t->m_tag_cookie, t->m_tag_id, t->m_tag_len, how); |
1229 | 0 | if (p == NULL) |
1230 | 0 | return (NULL); |
1231 | 0 | memcpy(p + 1, t + 1, t->m_tag_len); /* Copy the data */ |
1232 | 0 | return p; |
1233 | 0 | } |
1234 | | |
1235 | | /* Get a packet tag structure along with specified data following. */ |
1236 | | struct m_tag * |
1237 | | m_tag_alloc(uint32_t cookie, int type, int len, int wait) |
1238 | 0 | { |
1239 | 0 | struct m_tag *t; |
1240 | |
|
1241 | 0 | if (len < 0) |
1242 | 0 | return NULL; |
1243 | 0 | t = malloc(len + sizeof(struct m_tag)); |
1244 | 0 | if (t == NULL) |
1245 | 0 | return NULL; |
1246 | 0 | m_tag_setup(t, cookie, type, len); |
1247 | 0 | t->m_tag_free = m_tag_free_default; |
1248 | 0 | return t; |
1249 | 0 | } |
1250 | | |
1251 | | /* Free a packet tag. */ |
1252 | | void |
1253 | | m_tag_free_default(struct m_tag *t) |
1254 | 0 | { |
1255 | 0 | free(t); |
1256 | 0 | } |
1257 | | |
1258 | | /* |
1259 | | * Copy data from a buffer back into the indicated mbuf chain, |
1260 | | * starting "off" bytes from the beginning, extending the mbuf |
1261 | | * chain if necessary. |
1262 | | */ |
1263 | | void |
1264 | | m_copyback(struct mbuf *m0, int off, int len, caddr_t cp) |
1265 | 24.0k | { |
1266 | 24.0k | int mlen; |
1267 | 24.0k | struct mbuf *m = m0, *n; |
1268 | 24.0k | int totlen = 0; |
1269 | | |
1270 | 24.0k | if (m0 == NULL) |
1271 | 0 | return; |
1272 | 24.0k | while (off > (mlen = m->m_len)) { |
1273 | 0 | off -= mlen; |
1274 | 0 | totlen += mlen; |
1275 | 0 | if (m->m_next == NULL) { |
1276 | 0 | n = m_get(M_NOWAIT, m->m_type); |
1277 | 0 | if (n == NULL) |
1278 | 0 | goto out; |
1279 | 0 | memset(mtod(n, caddr_t), 0, MLEN); |
1280 | 0 | n->m_len = min(MLEN, len + off); |
1281 | 0 | m->m_next = n; |
1282 | 0 | } |
1283 | 0 | m = m->m_next; |
1284 | 0 | } |
1285 | 52.0k | while (len > 0) { |
1286 | 52.0k | mlen = min (m->m_len - off, len); |
1287 | 52.0k | memcpy(off + mtod(m, caddr_t), cp, (u_int)mlen); |
1288 | 52.0k | cp += mlen; |
1289 | 52.0k | len -= mlen; |
1290 | 52.0k | mlen += off; |
1291 | 52.0k | off = 0; |
1292 | 52.0k | totlen += mlen; |
1293 | 52.0k | if (len == 0) |
1294 | 24.0k | break; |
1295 | 28.0k | if (m->m_next == NULL) { |
1296 | 0 | n = m_get(M_NOWAIT, m->m_type); |
1297 | 0 | if (n == NULL) |
1298 | 0 | break; |
1299 | 0 | n->m_len = min(MLEN, len); |
1300 | 0 | m->m_next = n; |
1301 | 0 | } |
1302 | 28.0k | m = m->m_next; |
1303 | 28.0k | } |
1304 | 24.0k | out: if (((m = m0)->m_flags & M_PKTHDR) && (m->m_pkthdr.len < totlen)) |
1305 | 0 | m->m_pkthdr.len = totlen; |
1306 | 24.0k | } |
1307 | | |
1308 | | /* |
1309 | | * Apply function f to the data in an mbuf chain starting "off" bytes from |
1310 | | * the beginning, continuing for "len" bytes. |
1311 | | */ |
1312 | | int |
1313 | | m_apply(struct mbuf *m, int off, int len, |
1314 | | int (*f)(void *, void *, u_int), void *arg) |
1315 | 0 | { |
1316 | 0 | u_int count; |
1317 | 0 | int rval; |
1318 | |
|
1319 | 0 | KASSERT(off >= 0, ("m_apply, negative off %d", off)); |
1320 | 0 | KASSERT(len >= 0, ("m_apply, negative len %d", len)); |
1321 | 0 | while (off > 0) { |
1322 | 0 | KASSERT(m != NULL, ("m_apply, offset > size of mbuf chain")); |
1323 | 0 | if (off < m->m_len) |
1324 | 0 | break; |
1325 | 0 | off -= m->m_len; |
1326 | 0 | m = m->m_next; |
1327 | 0 | } |
1328 | 0 | while (len > 0) { |
1329 | 0 | KASSERT(m != NULL, ("m_apply, offset > size of mbuf chain")); |
1330 | 0 | count = min(m->m_len - off, len); |
1331 | 0 | rval = (*f)(arg, mtod(m, caddr_t) + off, count); |
1332 | 0 | if (rval) |
1333 | 0 | return (rval); |
1334 | 0 | len -= count; |
1335 | 0 | off = 0; |
1336 | 0 | m = m->m_next; |
1337 | 0 | } |
1338 | 0 | return (0); |
1339 | 0 | } |
1340 | | |
1341 | | /* |
1342 | | * Lesser-used path for M_PREPEND: |
1343 | | * allocate new mbuf to prepend to chain, |
1344 | | * copy junk along. |
1345 | | */ |
1346 | | struct mbuf * |
1347 | | m_prepend(struct mbuf *m, int len, int how) |
1348 | 137k | { |
1349 | 137k | struct mbuf *mn; |
1350 | | |
1351 | 137k | if (m->m_flags & M_PKTHDR) |
1352 | 0 | MGETHDR(mn, how, m->m_type); |
1353 | 137k | else |
1354 | 137k | MGET(mn, how, m->m_type); |
1355 | 137k | if (mn == NULL) { |
1356 | 0 | m_freem(m); |
1357 | 0 | return (NULL); |
1358 | 0 | } |
1359 | 137k | if (m->m_flags & M_PKTHDR) |
1360 | 0 | M_MOVE_PKTHDR(mn, m); |
1361 | 137k | mn->m_next = m; |
1362 | 137k | m = mn; |
1363 | 137k | if (m->m_flags & M_PKTHDR) { |
1364 | 0 | if (len < MHLEN) |
1365 | 0 | MH_ALIGN(m, len); |
1366 | 137k | } else { |
1367 | 137k | if (len < MLEN) |
1368 | 137k | M_ALIGN(m, len); |
1369 | 137k | } |
1370 | 137k | m->m_len = len; |
1371 | 137k | return (m); |
1372 | 137k | } |
1373 | | |
1374 | | /* |
1375 | | * Copy data from an mbuf chain starting "off" bytes from the beginning, |
1376 | | * continuing for "len" bytes, into the indicated buffer. |
1377 | | */ |
1378 | | void |
1379 | | m_copydata(const struct mbuf *m, int off, int len, caddr_t cp) |
1380 | 193k | { |
1381 | 193k | u_int count; |
1382 | | |
1383 | 193k | KASSERT(off >= 0, ("m_copydata, negative off %d", off)); |
1384 | 193k | KASSERT(len >= 0, ("m_copydata, negative len %d", len)); |
1385 | 193k | while (off > 0) { |
1386 | 1.05k | KASSERT(m != NULL, ("m_copydata, offset > size of mbuf chain")); |
1387 | 1.05k | if (off < m->m_len) |
1388 | 462 | break; |
1389 | 588 | off -= m->m_len; |
1390 | 588 | m = m->m_next; |
1391 | 588 | } |
1392 | 749k | while (len > 0) { |
1393 | 556k | KASSERT(m != NULL, ("m_copydata, length > size of mbuf chain")); |
1394 | 556k | count = min(m->m_len - off, len); |
1395 | 556k | memcpy(cp, mtod(m, caddr_t) + off, count); |
1396 | 556k | len -= count; |
1397 | 556k | cp += count; |
1398 | 556k | off = 0; |
1399 | 556k | m = m->m_next; |
1400 | 556k | } |
1401 | 193k | } |
1402 | | |
1403 | | |
1404 | | /* |
1405 | | * Concatenate mbuf chain n to m. |
1406 | | * Both chains must be of the same type (e.g. MT_DATA). |
1407 | | * Any m_pkthdr is not updated. |
1408 | | */ |
1409 | | void |
1410 | | m_cat(struct mbuf *m, struct mbuf *n) |
1411 | 0 | { |
1412 | 0 | while (m->m_next) |
1413 | 0 | m = m->m_next; |
1414 | 0 | while (n) { |
1415 | 0 | if (m->m_flags & M_EXT || |
1416 | 0 | m->m_data + m->m_len + n->m_len >= &m->m_dat[MLEN]) { |
1417 | | /* just join the two chains */ |
1418 | 0 | m->m_next = n; |
1419 | 0 | return; |
1420 | 0 | } |
1421 | | /* splat the data from one into the other */ |
1422 | 0 | memcpy(mtod(m, caddr_t) + m->m_len, mtod(n, caddr_t), (u_int)n->m_len); |
1423 | 0 | m->m_len += n->m_len; |
1424 | 0 | n = m_free(n); |
1425 | 0 | } |
1426 | 0 | } |
1427 | | |
1428 | | |
1429 | | void |
1430 | | m_adj(struct mbuf *mp, int req_len) |
1431 | 1.16k | { |
1432 | 1.16k | int len = req_len; |
1433 | 1.16k | struct mbuf *m; |
1434 | 1.16k | int count; |
1435 | | |
1436 | 1.16k | if ((m = mp) == NULL) |
1437 | 0 | return; |
1438 | 1.16k | if (len >= 0) { |
1439 | | /* |
1440 | | * Trim from head. |
1441 | | */ |
1442 | 1.89k | while (m != NULL && len > 0) { |
1443 | 1.30k | if (m->m_len <= len) { |
1444 | 746 | len -= m->m_len; |
1445 | 746 | m->m_len = 0; |
1446 | 746 | m = m->m_next; |
1447 | 746 | } else { |
1448 | 563 | m->m_len -= len; |
1449 | 563 | m->m_data += len; |
1450 | 563 | len = 0; |
1451 | 563 | } |
1452 | 1.30k | } |
1453 | 589 | m = mp; |
1454 | 589 | if (mp->m_flags & M_PKTHDR) |
1455 | 93 | m->m_pkthdr.len -= (req_len - len); |
1456 | 589 | } else { |
1457 | | /* |
1458 | | * Trim from tail. Scan the mbuf chain, |
1459 | | * calculating its length and finding the last mbuf. |
1460 | | * If the adjustment only affects this mbuf, then just |
1461 | | * adjust and return. Otherwise, rescan and truncate |
1462 | | * after the remaining size. |
1463 | | */ |
1464 | 574 | len = -len; |
1465 | 574 | count = 0; |
1466 | 1.23k | for (;;) { |
1467 | 1.23k | count += m->m_len; |
1468 | 1.23k | if (m->m_next == (struct mbuf *)0) |
1469 | 574 | break; |
1470 | 660 | m = m->m_next; |
1471 | 660 | } |
1472 | 574 | if (m->m_len >= len) { |
1473 | 574 | m->m_len -= len; |
1474 | 574 | if (mp->m_flags & M_PKTHDR) |
1475 | 87 | mp->m_pkthdr.len -= len; |
1476 | 574 | return; |
1477 | 574 | } |
1478 | 0 | count -= len; |
1479 | 0 | if (count < 0) |
1480 | 0 | count = 0; |
1481 | | /* |
1482 | | * Correct length for chain is "count". |
1483 | | * Find the mbuf with last data, adjust its length, |
1484 | | * and toss data from remaining mbufs on chain. |
1485 | | */ |
1486 | 0 | m = mp; |
1487 | 0 | if (m->m_flags & M_PKTHDR) |
1488 | 0 | m->m_pkthdr.len = count; |
1489 | 0 | for (; m; m = m->m_next) { |
1490 | 0 | if (m->m_len >= count) { |
1491 | 0 | m->m_len = count; |
1492 | 0 | if (m->m_next != NULL) { |
1493 | 0 | m_freem(m->m_next); |
1494 | 0 | m->m_next = NULL; |
1495 | 0 | } |
1496 | 0 | break; |
1497 | 0 | } |
1498 | 0 | count -= m->m_len; |
1499 | 0 | } |
1500 | 0 | } |
1501 | 1.16k | } |
1502 | | |
1503 | | |
1504 | | /* m_split is used within sctp_handle_cookie_echo. */ |
1505 | | |
1506 | | /* |
1507 | | * Partition an mbuf chain in two pieces, returning the tail -- |
1508 | | * all but the first len0 bytes. In case of failure, it returns NULL and |
1509 | | * attempts to restore the chain to its original state. |
1510 | | * |
1511 | | * Note that the resulting mbufs might be read-only, because the new |
1512 | | * mbuf can end up sharing an mbuf cluster with the original mbuf if |
1513 | | * the "breaking point" happens to lie within a cluster mbuf. Use the |
1514 | | * M_WRITABLE() macro to check for this case. |
1515 | | */ |
1516 | | struct mbuf * |
1517 | | m_split(struct mbuf *m0, int len0, int wait) |
1518 | 0 | { |
1519 | 0 | struct mbuf *m, *n; |
1520 | 0 | u_int len = len0, remain; |
1521 | | |
1522 | | /* MBUF_CHECKSLEEP(wait); */ |
1523 | 0 | for (m = m0; m && (int)len > m->m_len; m = m->m_next) |
1524 | 0 | len -= m->m_len; |
1525 | 0 | if (m == NULL) |
1526 | 0 | return (NULL); |
1527 | 0 | remain = m->m_len - len; |
1528 | 0 | if (m0->m_flags & M_PKTHDR) { |
1529 | 0 | MGETHDR(n, wait, m0->m_type); |
1530 | 0 | if (n == NULL) |
1531 | 0 | return (NULL); |
1532 | 0 | n->m_pkthdr.rcvif = m0->m_pkthdr.rcvif; |
1533 | 0 | n->m_pkthdr.len = m0->m_pkthdr.len - len0; |
1534 | 0 | m0->m_pkthdr.len = len0; |
1535 | 0 | if (m->m_flags & M_EXT) |
1536 | 0 | goto extpacket; |
1537 | 0 | if (remain > MHLEN) { |
1538 | | /* m can't be the lead packet */ |
1539 | 0 | MH_ALIGN(n, 0); |
1540 | 0 | n->m_next = m_split(m, len, wait); |
1541 | 0 | if (n->m_next == NULL) { |
1542 | 0 | (void) m_free(n); |
1543 | 0 | return (NULL); |
1544 | 0 | } else { |
1545 | 0 | n->m_len = 0; |
1546 | 0 | return (n); |
1547 | 0 | } |
1548 | 0 | } else |
1549 | 0 | MH_ALIGN(n, remain); |
1550 | 0 | } else if (remain == 0) { |
1551 | 0 | n = m->m_next; |
1552 | 0 | m->m_next = NULL; |
1553 | 0 | return (n); |
1554 | 0 | } else { |
1555 | 0 | MGET(n, wait, m->m_type); |
1556 | 0 | if (n == NULL) |
1557 | 0 | return (NULL); |
1558 | 0 | M_ALIGN(n, remain); |
1559 | 0 | } |
1560 | 0 | extpacket: |
1561 | 0 | if (m->m_flags & M_EXT) { |
1562 | 0 | n->m_data = m->m_data + len; |
1563 | 0 | mb_dupcl(n, m); |
1564 | 0 | } else { |
1565 | 0 | memcpy(mtod(n, caddr_t), mtod(m, caddr_t) + len, remain); |
1566 | 0 | } |
1567 | 0 | n->m_len = remain; |
1568 | 0 | m->m_len = len; |
1569 | 0 | n->m_next = m->m_next; |
1570 | 0 | m->m_next = NULL; |
1571 | 0 | return (n); |
1572 | 0 | } |
1573 | | |
1574 | | |
1575 | | |
1576 | | |
1577 | | int |
1578 | 0 | pack_send_buffer(caddr_t buffer, struct mbuf* mb){ |
1579 | |
|
1580 | 0 | int count_to_copy; |
1581 | 0 | int total_count_copied = 0; |
1582 | 0 | int offset = 0; |
1583 | |
|
1584 | 0 | do { |
1585 | 0 | count_to_copy = mb->m_len; |
1586 | 0 | memcpy(buffer+offset, mtod(mb, caddr_t), count_to_copy); |
1587 | 0 | offset += count_to_copy; |
1588 | 0 | total_count_copied += count_to_copy; |
1589 | 0 | mb = mb->m_next; |
1590 | 0 | } while(mb); |
1591 | |
|
1592 | 0 | return (total_count_copied); |
1593 | 0 | } |