/src/SockFuzzer/third_party/xnu/bsd/net/bpf.c
Line | Count | Source (jump to first uncovered line) |
1 | | /* |
2 | | * Copyright (c) 2000-2020 Apple Inc. All rights reserved. |
3 | | * |
4 | | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
5 | | * |
6 | | * This file contains Original Code and/or Modifications of Original Code |
7 | | * as defined in and that are subject to the Apple Public Source License |
8 | | * Version 2.0 (the 'License'). You may not use this file except in |
9 | | * compliance with the License. The rights granted to you under the License |
10 | | * may not be used to create, or enable the creation or redistribution of, |
11 | | * unlawful or unlicensed copies of an Apple operating system, or to |
12 | | * circumvent, violate, or enable the circumvention or violation of, any |
13 | | * terms of an Apple operating system software license agreement. |
14 | | * |
15 | | * Please obtain a copy of the License at |
16 | | * http://www.opensource.apple.com/apsl/ and read it before using this file. |
17 | | * |
18 | | * The Original Code and all software distributed under the License are |
19 | | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER |
20 | | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
21 | | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, |
22 | | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
23 | | * Please see the License for the specific language governing rights and |
24 | | * limitations under the License. |
25 | | * |
26 | | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
27 | | */ |
28 | | /* |
29 | | * Copyright (c) 1990, 1991, 1993 |
30 | | * The Regents of the University of California. All rights reserved. |
31 | | * |
32 | | * This code is derived from the Stanford/CMU enet packet filter, |
33 | | * (net/enet.c) distributed as part of 4.3BSD, and code contributed |
34 | | * to Berkeley by Steven McCanne and Van Jacobson both of Lawrence |
35 | | * Berkeley Laboratory. |
36 | | * |
37 | | * Redistribution and use in source and binary forms, with or without |
38 | | * modification, are permitted provided that the following conditions |
39 | | * are met: |
40 | | * 1. Redistributions of source code must retain the above copyright |
41 | | * notice, this list of conditions and the following disclaimer. |
42 | | * 2. Redistributions in binary form must reproduce the above copyright |
43 | | * notice, this list of conditions and the following disclaimer in the |
44 | | * documentation and/or other materials provided with the distribution. |
45 | | * 3. All advertising materials mentioning features or use of this software |
46 | | * must display the following acknowledgement: |
47 | | * This product includes software developed by the University of |
48 | | * California, Berkeley and its contributors. |
49 | | * 4. Neither the name of the University nor the names of its contributors |
50 | | * may be used to endorse or promote products derived from this software |
51 | | * without specific prior written permission. |
52 | | * |
53 | | * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND |
54 | | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
55 | | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
56 | | * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE |
57 | | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL |
58 | | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS |
59 | | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) |
60 | | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT |
61 | | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY |
62 | | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF |
63 | | * SUCH DAMAGE. |
64 | | * |
65 | | * @(#)bpf.c 8.2 (Berkeley) 3/28/94 |
66 | | * |
67 | | * $FreeBSD: src/sys/net/bpf.c,v 1.59.2.5 2001/01/05 04:49:09 jdp Exp $ |
68 | | */ |
69 | | /* |
70 | | * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce |
71 | | * support for mandatory and extensible security protections. This notice |
72 | | * is included in support of clause 2.2 (b) of the Apple Public License, |
73 | | * Version 2.0. |
74 | | */ |
75 | | |
76 | | #include "bpf.h" |
77 | | |
78 | | #ifndef __GNUC__ |
79 | | #define inline |
80 | | #else |
81 | | #define inline __inline |
82 | | #endif |
83 | | |
84 | | #include <sys/param.h> |
85 | | #include <sys/systm.h> |
86 | | #include <sys/conf.h> |
87 | | #include <sys/malloc.h> |
88 | | #include <sys/mbuf.h> |
89 | | #include <sys/time.h> |
90 | | #include <sys/proc.h> |
91 | | #include <sys/signalvar.h> |
92 | | #include <sys/filio.h> |
93 | | #include <sys/sockio.h> |
94 | | #include <sys/ttycom.h> |
95 | | #include <sys/filedesc.h> |
96 | | #include <sys/uio_internal.h> |
97 | | #include <sys/file_internal.h> |
98 | | #include <sys/event.h> |
99 | | |
100 | | #include <sys/poll.h> |
101 | | |
102 | | #include <sys/socket.h> |
103 | | #include <sys/socketvar.h> |
104 | | #include <sys/vnode.h> |
105 | | |
106 | | #include <net/if.h> |
107 | | #include <net/bpf.h> |
108 | | #include <net/bpfdesc.h> |
109 | | |
110 | | #include <netinet/in.h> |
111 | | #include <netinet/ip.h> |
112 | | #include <netinet/ip6.h> |
113 | | #include <netinet/in_pcb.h> |
114 | | #include <netinet/in_var.h> |
115 | | #include <netinet/ip_var.h> |
116 | | #include <netinet/tcp.h> |
117 | | #include <netinet/tcp_var.h> |
118 | | #include <netinet/udp.h> |
119 | | #include <netinet/udp_var.h> |
120 | | #include <netinet/if_ether.h> |
121 | | #include <netinet/isakmp.h> |
122 | | #include <netinet6/esp.h> |
123 | | #include <sys/kernel.h> |
124 | | #include <sys/sysctl.h> |
125 | | #include <net/firewire.h> |
126 | | |
127 | | #include <miscfs/devfs/devfs.h> |
128 | | #include <net/dlil.h> |
129 | | #include <net/pktap.h> |
130 | | |
131 | | #include <kern/locks.h> |
132 | | #include <kern/thread_call.h> |
133 | | #include <libkern/section_keywords.h> |
134 | | |
135 | | #include <os/log.h> |
136 | | |
137 | | extern int tvtohz(struct timeval *); |
138 | | |
139 | | #define BPF_BUFSIZE 4096 |
140 | 0 | #define UIOMOVE(cp, len, code, uio) uiomove(cp, len, uio) |
141 | | |
142 | 0 | #define PRINET 26 /* interruptible */ |
143 | | |
144 | 0 | #define ISAKMP_HDR_SIZE (sizeof(struct isakmp) + sizeof(struct isakmp_gen)) |
145 | 0 | #define ESP_HDR_SIZE sizeof(struct newesp) |
146 | | |
147 | | typedef void (*pktcopyfunc_t)(const void *, void *, size_t); |
148 | | |
149 | | /* |
150 | | * The default read buffer size is patchable. |
151 | | */ |
152 | | static unsigned int bpf_bufsize = BPF_BUFSIZE; |
153 | | SYSCTL_INT(_debug, OID_AUTO, bpf_bufsize, CTLFLAG_RW | CTLFLAG_LOCKED, |
154 | | &bpf_bufsize, 0, ""); |
155 | | |
156 | | static int sysctl_bpf_maxbufsize SYSCTL_HANDLER_ARGS; |
157 | | extern const int copysize_limit_panic; |
158 | 0 | #define BPF_MAXSIZE_CAP (copysize_limit_panic >> 1) |
159 | | __private_extern__ unsigned int bpf_maxbufsize = BPF_MAXBUFSIZE; |
160 | | SYSCTL_PROC(_debug, OID_AUTO, bpf_maxbufsize, CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, |
161 | | &bpf_maxbufsize, 0, |
162 | | sysctl_bpf_maxbufsize, "I", "Default BPF max buffer size"); |
163 | | |
164 | | static unsigned int bpf_maxdevices = 256; |
165 | | SYSCTL_UINT(_debug, OID_AUTO, bpf_maxdevices, CTLFLAG_RW | CTLFLAG_LOCKED, |
166 | | &bpf_maxdevices, 0, ""); |
167 | | /* |
168 | | * bpf_wantpktap controls the defaul visibility of DLT_PKTAP |
169 | | * For OS X is off by default so process need to use the ioctl BPF_WANT_PKTAP |
170 | | * explicitly to be able to use DLT_PKTAP. |
171 | | */ |
172 | | #if !XNU_TARGET_OS_OSX |
173 | | static unsigned int bpf_wantpktap = 1; |
174 | | #else /* XNU_TARGET_OS_OSX */ |
175 | | static unsigned int bpf_wantpktap = 0; |
176 | | #endif /* XNU_TARGET_OS_OSX */ |
177 | | SYSCTL_UINT(_debug, OID_AUTO, bpf_wantpktap, CTLFLAG_RW | CTLFLAG_LOCKED, |
178 | | &bpf_wantpktap, 0, ""); |
179 | | |
180 | | static int bpf_debug = 0; |
181 | | SYSCTL_INT(_debug, OID_AUTO, bpf_debug, CTLFLAG_RW | CTLFLAG_LOCKED, |
182 | | &bpf_debug, 0, ""); |
183 | | |
184 | | /* |
185 | | * bpf_iflist is the list of interfaces; each corresponds to an ifnet |
186 | | * bpf_dtab holds pointer to the descriptors, indexed by minor device # |
187 | | */ |
188 | | static struct bpf_if *bpf_iflist; |
189 | | #ifdef __APPLE__ |
190 | | /* |
191 | | * BSD now stores the bpf_d in the dev_t which is a struct |
192 | | * on their system. Our dev_t is an int, so we still store |
193 | | * the bpf_d in a separate table indexed by minor device #. |
194 | | * |
195 | | * The value stored in bpf_dtab[n] represent three states: |
196 | | * NULL: device not opened |
197 | | * BPF_DEV_RESERVED: device opening or closing |
198 | | * other: device <n> opened with pointer to storage |
199 | | */ |
200 | 0 | #define BPF_DEV_RESERVED ((struct bpf_d *)(uintptr_t)1) |
201 | | static struct bpf_d **bpf_dtab = NULL; |
202 | | static unsigned int bpf_dtab_size = 0; |
203 | | static unsigned int nbpfilter = 0; |
204 | | |
205 | | decl_lck_mtx_data(static, bpf_mlock_data); |
206 | | static lck_mtx_t *bpf_mlock = &bpf_mlock_data; |
207 | | static lck_grp_t *bpf_mlock_grp; |
208 | | static lck_grp_attr_t *bpf_mlock_grp_attr; |
209 | | static lck_attr_t *bpf_mlock_attr; |
210 | | |
211 | | #endif /* __APPLE__ */ |
212 | | |
213 | | static int bpf_allocbufs(struct bpf_d *); |
214 | | static errno_t bpf_attachd(struct bpf_d *d, struct bpf_if *bp); |
215 | | static int bpf_detachd(struct bpf_d *d, int); |
216 | | static void bpf_freed(struct bpf_d *); |
217 | | static int bpf_movein(struct uio *, int, |
218 | | struct mbuf **, struct sockaddr *, int *); |
219 | | static int bpf_setif(struct bpf_d *, ifnet_t ifp, bool, bool); |
220 | | static void bpf_timed_out(void *, void *); |
221 | | static void bpf_wakeup(struct bpf_d *); |
222 | | static u_int get_pkt_trunc_len(u_char *, u_int); |
223 | | static void catchpacket(struct bpf_d *, struct bpf_packet *, u_int, int); |
224 | | static void reset_d(struct bpf_d *); |
225 | | static int bpf_setf(struct bpf_d *, u_int, user_addr_t, u_long); |
226 | | static int bpf_getdltlist(struct bpf_d *, caddr_t, struct proc *); |
227 | | static int bpf_setdlt(struct bpf_d *, u_int); |
228 | | static int bpf_set_traffic_class(struct bpf_d *, int); |
229 | | static void bpf_set_packet_service_class(struct mbuf *, int); |
230 | | |
231 | | static void bpf_acquire_d(struct bpf_d *); |
232 | | static void bpf_release_d(struct bpf_d *); |
233 | | |
234 | | static int bpf_devsw_installed; |
235 | | |
236 | | void bpf_init(void *unused); |
237 | | static int bpf_tap_callback(struct ifnet *ifp, struct mbuf *m); |
238 | | |
239 | | /* |
240 | | * Darwin differs from BSD here, the following are static |
241 | | * on BSD and not static on Darwin. |
242 | | */ |
243 | | d_open_t bpfopen; |
244 | | d_close_t bpfclose; |
245 | | d_read_t bpfread; |
246 | | d_write_t bpfwrite; |
247 | | ioctl_fcn_t bpfioctl; |
248 | | select_fcn_t bpfselect; |
249 | | |
250 | | /* Darwin's cdevsw struct differs slightly from BSDs */ |
251 | 0 | #define CDEV_MAJOR 23 |
252 | | static const struct cdevsw bpf_cdevsw = { |
253 | | .d_open = bpfopen, |
254 | | .d_close = bpfclose, |
255 | | .d_read = bpfread, |
256 | | .d_write = bpfwrite, |
257 | | .d_ioctl = bpfioctl, |
258 | | .d_stop = eno_stop, |
259 | | .d_reset = eno_reset, |
260 | | .d_ttys = NULL, |
261 | | .d_select = bpfselect, |
262 | | .d_mmap = eno_mmap, |
263 | | .d_strategy = eno_strat, |
264 | | .d_reserved_1 = eno_getc, |
265 | | .d_reserved_2 = eno_putc, |
266 | | .d_type = 0 |
267 | | }; |
268 | | |
269 | 0 | #define SOCKADDR_HDR_LEN offsetof(struct sockaddr, sa_data) |
270 | | |
271 | | static int |
272 | | bpf_movein(struct uio *uio, int linktype, struct mbuf **mp, |
273 | | struct sockaddr *sockp, int *datlen) |
274 | 0 | { |
275 | 0 | struct mbuf *m; |
276 | 0 | int error; |
277 | 0 | int len; |
278 | 0 | uint8_t sa_family; |
279 | 0 | int hlen; |
280 | |
|
281 | 0 | switch (linktype) { |
282 | | #if SLIP |
283 | | case DLT_SLIP: |
284 | | sa_family = AF_INET; |
285 | | hlen = 0; |
286 | | break; |
287 | | #endif /* SLIP */ |
288 | | |
289 | 0 | case DLT_EN10MB: |
290 | 0 | sa_family = AF_UNSPEC; |
291 | | /* XXX Would MAXLINKHDR be better? */ |
292 | 0 | hlen = sizeof(struct ether_header); |
293 | 0 | break; |
294 | | |
295 | | #if FDDI |
296 | | case DLT_FDDI: |
297 | | #if defined(__FreeBSD__) || defined(__bsdi__) |
298 | | sa_family = AF_IMPLINK; |
299 | | hlen = 0; |
300 | | #else |
301 | | sa_family = AF_UNSPEC; |
302 | | /* XXX 4(FORMAC)+6(dst)+6(src)+3(LLC)+5(SNAP) */ |
303 | | hlen = 24; |
304 | | #endif |
305 | | break; |
306 | | #endif /* FDDI */ |
307 | | |
308 | 0 | case DLT_RAW: |
309 | 0 | case DLT_NULL: |
310 | 0 | sa_family = AF_UNSPEC; |
311 | 0 | hlen = 0; |
312 | 0 | break; |
313 | | |
314 | | #ifdef __FreeBSD__ |
315 | | case DLT_ATM_RFC1483: |
316 | | /* |
317 | | * en atm driver requires 4-byte atm pseudo header. |
318 | | * though it isn't standard, vpi:vci needs to be |
319 | | * specified anyway. |
320 | | */ |
321 | | sa_family = AF_UNSPEC; |
322 | | hlen = 12; /* XXX 4(ATM_PH) + 3(LLC) + 5(SNAP) */ |
323 | | break; |
324 | | #endif |
325 | | |
326 | 0 | case DLT_PPP: |
327 | 0 | sa_family = AF_UNSPEC; |
328 | 0 | hlen = 4; /* This should match PPP_HDRLEN */ |
329 | 0 | break; |
330 | | |
331 | 0 | case DLT_APPLE_IP_OVER_IEEE1394: |
332 | 0 | sa_family = AF_UNSPEC; |
333 | 0 | hlen = sizeof(struct firewire_header); |
334 | 0 | break; |
335 | | |
336 | 0 | case DLT_IEEE802_11: /* IEEE 802.11 wireless */ |
337 | 0 | sa_family = AF_IEEE80211; |
338 | 0 | hlen = 0; |
339 | 0 | break; |
340 | | |
341 | 0 | case DLT_IEEE802_11_RADIO: |
342 | 0 | sa_family = AF_IEEE80211; |
343 | 0 | hlen = 0; |
344 | 0 | break; |
345 | | |
346 | 0 | default: |
347 | 0 | return EIO; |
348 | 0 | } |
349 | | |
350 | | // LP64todo - fix this! |
351 | 0 | len = uio_resid(uio); |
352 | 0 | *datlen = len - hlen; |
353 | 0 | if ((unsigned)len > MCLBYTES) { |
354 | 0 | return EIO; |
355 | 0 | } |
356 | | |
357 | 0 | if (sockp) { |
358 | | /* |
359 | | * Build a sockaddr based on the data link layer type. |
360 | | * We do this at this level because the ethernet header |
361 | | * is copied directly into the data field of the sockaddr. |
362 | | * In the case of SLIP, there is no header and the packet |
363 | | * is forwarded as is. |
364 | | * Also, we are careful to leave room at the front of the mbuf |
365 | | * for the link level header. |
366 | | */ |
367 | 0 | if ((hlen + SOCKADDR_HDR_LEN) > sockp->sa_len) { |
368 | 0 | return EIO; |
369 | 0 | } |
370 | 0 | sockp->sa_family = sa_family; |
371 | 0 | } else { |
372 | | /* |
373 | | * We're directly sending the packet data supplied by |
374 | | * the user; we don't need to make room for the link |
375 | | * header, and don't need the header length value any |
376 | | * more, so set it to 0. |
377 | | */ |
378 | 0 | hlen = 0; |
379 | 0 | } |
380 | | |
381 | 0 | MGETHDR(m, M_WAIT, MT_DATA); |
382 | 0 | if (m == 0) { |
383 | 0 | return ENOBUFS; |
384 | 0 | } |
385 | 0 | if ((unsigned)len > MHLEN) { |
386 | 0 | MCLGET(m, M_WAIT); |
387 | 0 | if ((m->m_flags & M_EXT) == 0) { |
388 | 0 | error = ENOBUFS; |
389 | 0 | goto bad; |
390 | 0 | } |
391 | 0 | } |
392 | 0 | m->m_pkthdr.len = m->m_len = len; |
393 | 0 | m->m_pkthdr.rcvif = NULL; |
394 | 0 | *mp = m; |
395 | | |
396 | | /* |
397 | | * Make room for link header. |
398 | | */ |
399 | 0 | if (hlen != 0) { |
400 | 0 | m->m_pkthdr.len -= hlen; |
401 | 0 | m->m_len -= hlen; |
402 | 0 | m->m_data += hlen; /* XXX */ |
403 | 0 | error = UIOMOVE((caddr_t)sockp->sa_data, hlen, UIO_WRITE, uio); |
404 | 0 | if (error) { |
405 | 0 | goto bad; |
406 | 0 | } |
407 | 0 | } |
408 | 0 | error = UIOMOVE(mtod(m, caddr_t), len - hlen, UIO_WRITE, uio); |
409 | 0 | if (error) { |
410 | 0 | goto bad; |
411 | 0 | } |
412 | | |
413 | | /* Check for multicast destination */ |
414 | 0 | switch (linktype) { |
415 | 0 | case DLT_EN10MB: { |
416 | 0 | struct ether_header *eh; |
417 | |
|
418 | 0 | eh = mtod(m, struct ether_header *); |
419 | 0 | if (ETHER_IS_MULTICAST(eh->ether_dhost)) { |
420 | 0 | if (_ether_cmp(etherbroadcastaddr, |
421 | 0 | eh->ether_dhost) == 0) { |
422 | 0 | m->m_flags |= M_BCAST; |
423 | 0 | } else { |
424 | 0 | m->m_flags |= M_MCAST; |
425 | 0 | } |
426 | 0 | } |
427 | 0 | break; |
428 | 0 | } |
429 | 0 | } |
430 | | |
431 | 0 | return 0; |
432 | 0 | bad: |
433 | 0 | m_freem(m); |
434 | 0 | return error; |
435 | 0 | } |
436 | | |
437 | | #ifdef __APPLE__ |
438 | | |
439 | | /* |
440 | | * The dynamic addition of a new device node must block all processes that |
441 | | * are opening the last device so that no process will get an unexpected |
442 | | * ENOENT |
443 | | */ |
444 | | static void |
445 | | bpf_make_dev_t(int maj) |
446 | 0 | { |
447 | 0 | static int bpf_growing = 0; |
448 | 0 | unsigned int cur_size = nbpfilter, i; |
449 | |
|
450 | 0 | if (nbpfilter >= bpf_maxdevices) { |
451 | 0 | return; |
452 | 0 | } |
453 | | |
454 | 0 | while (bpf_growing) { |
455 | | /* Wait until new device has been created */ |
456 | 0 | (void) tsleep((caddr_t)&bpf_growing, PZERO, "bpf_growing", 0); |
457 | 0 | } |
458 | 0 | if (nbpfilter > cur_size) { |
459 | | /* other thread grew it already */ |
460 | 0 | return; |
461 | 0 | } |
462 | 0 | bpf_growing = 1; |
463 | | |
464 | | /* need to grow bpf_dtab first */ |
465 | 0 | if (nbpfilter == bpf_dtab_size) { |
466 | 0 | int new_dtab_size; |
467 | 0 | struct bpf_d **new_dtab = NULL; |
468 | 0 | struct bpf_d **old_dtab = NULL; |
469 | |
|
470 | 0 | new_dtab_size = bpf_dtab_size + NBPFILTER; |
471 | 0 | new_dtab = (struct bpf_d **)_MALLOC( |
472 | 0 | sizeof(struct bpf_d *) * new_dtab_size, M_DEVBUF, M_WAIT); |
473 | 0 | if (new_dtab == 0) { |
474 | 0 | printf("bpf_make_dev_t: malloc bpf_dtab failed\n"); |
475 | 0 | goto done; |
476 | 0 | } |
477 | 0 | if (bpf_dtab) { |
478 | 0 | bcopy(bpf_dtab, new_dtab, |
479 | 0 | sizeof(struct bpf_d *) * bpf_dtab_size); |
480 | 0 | } |
481 | 0 | bzero(new_dtab + bpf_dtab_size, |
482 | 0 | sizeof(struct bpf_d *) * NBPFILTER); |
483 | 0 | old_dtab = bpf_dtab; |
484 | 0 | bpf_dtab = new_dtab; |
485 | 0 | bpf_dtab_size = new_dtab_size; |
486 | 0 | if (old_dtab != NULL) { |
487 | 0 | _FREE(old_dtab, M_DEVBUF); |
488 | 0 | } |
489 | 0 | } |
490 | 0 | i = nbpfilter++; |
491 | 0 | (void) devfs_make_node(makedev(maj, i), |
492 | 0 | DEVFS_CHAR, UID_ROOT, GID_WHEEL, 0600, |
493 | 0 | "bpf%d", i); |
494 | 0 | done: |
495 | 0 | bpf_growing = 0; |
496 | 0 | wakeup((caddr_t)&bpf_growing); |
497 | 0 | } |
498 | | |
499 | | #endif |
500 | | |
501 | | /* |
502 | | * Attach file to the bpf interface, i.e. make d listen on bp. |
503 | | */ |
504 | | static errno_t |
505 | | bpf_attachd(struct bpf_d *d, struct bpf_if *bp) |
506 | 0 | { |
507 | 0 | int first = bp->bif_dlist == NULL; |
508 | 0 | int error = 0; |
509 | | |
510 | | /* |
511 | | * Point d at bp, and add d to the interface's list of listeners. |
512 | | * Finally, point the driver's bpf cookie at the interface so |
513 | | * it will divert packets to bpf. |
514 | | */ |
515 | 0 | d->bd_bif = bp; |
516 | 0 | d->bd_next = bp->bif_dlist; |
517 | 0 | bp->bif_dlist = d; |
518 | | |
519 | | /* |
520 | | * Take a reference on the device even if an error is returned |
521 | | * because we keep the device in the interface's list of listeners |
522 | | */ |
523 | 0 | bpf_acquire_d(d); |
524 | |
|
525 | 0 | if (first) { |
526 | | /* Find the default bpf entry for this ifp */ |
527 | 0 | if (bp->bif_ifp->if_bpf == NULL) { |
528 | 0 | struct bpf_if *tmp, *primary = NULL; |
529 | |
|
530 | 0 | for (tmp = bpf_iflist; tmp; tmp = tmp->bif_next) { |
531 | 0 | if (tmp->bif_ifp == bp->bif_ifp) { |
532 | 0 | primary = tmp; |
533 | 0 | break; |
534 | 0 | } |
535 | 0 | } |
536 | 0 | bp->bif_ifp->if_bpf = primary; |
537 | 0 | } |
538 | | /* Only call dlil_set_bpf_tap for primary dlt */ |
539 | 0 | if (bp->bif_ifp->if_bpf == bp) { |
540 | 0 | dlil_set_bpf_tap(bp->bif_ifp, BPF_TAP_INPUT_OUTPUT, |
541 | 0 | bpf_tap_callback); |
542 | 0 | } |
543 | |
|
544 | 0 | if (bp->bif_tap != NULL) { |
545 | 0 | error = bp->bif_tap(bp->bif_ifp, bp->bif_dlt, |
546 | 0 | BPF_TAP_INPUT_OUTPUT); |
547 | 0 | } |
548 | 0 | } |
549 | | |
550 | | /* |
551 | | * Reset the detach flags in case we previously detached an interface |
552 | | */ |
553 | 0 | d->bd_flags &= ~(BPF_DETACHING | BPF_DETACHED); |
554 | |
|
555 | 0 | if (bp->bif_dlt == DLT_PKTAP) { |
556 | 0 | d->bd_flags |= BPF_FINALIZE_PKTAP; |
557 | 0 | } else { |
558 | 0 | d->bd_flags &= ~BPF_FINALIZE_PKTAP; |
559 | 0 | } |
560 | 0 | return error; |
561 | 0 | } |
562 | | |
563 | | /* |
564 | | * Detach a file from its interface. |
565 | | * |
566 | | * Return 1 if was closed by some thread, 0 otherwise |
567 | | */ |
568 | | static int |
569 | | bpf_detachd(struct bpf_d *d, int closing) |
570 | 0 | { |
571 | 0 | struct bpf_d **p; |
572 | 0 | struct bpf_if *bp; |
573 | 0 | struct ifnet *ifp; |
574 | |
|
575 | 0 | int bpf_closed = d->bd_flags & BPF_CLOSING; |
576 | | /* |
577 | | * Some other thread already detached |
578 | | */ |
579 | 0 | if ((d->bd_flags & (BPF_DETACHED | BPF_DETACHING)) != 0) { |
580 | 0 | goto done; |
581 | 0 | } |
582 | | /* |
583 | | * This thread is doing the detach |
584 | | */ |
585 | 0 | d->bd_flags |= BPF_DETACHING; |
586 | |
|
587 | 0 | ifp = d->bd_bif->bif_ifp; |
588 | 0 | bp = d->bd_bif; |
589 | |
|
590 | 0 | if (bpf_debug != 0) { |
591 | 0 | printf("%s: %llx %s%s\n", |
592 | 0 | __func__, (uint64_t)VM_KERNEL_ADDRPERM(d), |
593 | 0 | if_name(ifp), closing ? " closing" : ""); |
594 | 0 | } |
595 | | |
596 | | /* Remove d from the interface's descriptor list. */ |
597 | 0 | p = &bp->bif_dlist; |
598 | 0 | while (*p != d) { |
599 | 0 | p = &(*p)->bd_next; |
600 | 0 | if (*p == 0) { |
601 | 0 | panic("bpf_detachd: descriptor not in list"); |
602 | 0 | } |
603 | 0 | } |
604 | 0 | *p = (*p)->bd_next; |
605 | 0 | if (bp->bif_dlist == 0) { |
606 | | /* |
607 | | * Let the driver know that there are no more listeners. |
608 | | */ |
609 | | /* Only call dlil_set_bpf_tap for primary dlt */ |
610 | 0 | if (bp->bif_ifp->if_bpf == bp) { |
611 | 0 | dlil_set_bpf_tap(ifp, BPF_TAP_DISABLE, NULL); |
612 | 0 | } |
613 | 0 | if (bp->bif_tap) { |
614 | 0 | bp->bif_tap(ifp, bp->bif_dlt, BPF_TAP_DISABLE); |
615 | 0 | } |
616 | |
|
617 | 0 | for (bp = bpf_iflist; bp; bp = bp->bif_next) { |
618 | 0 | if (bp->bif_ifp == ifp && bp->bif_dlist != 0) { |
619 | 0 | break; |
620 | 0 | } |
621 | 0 | } |
622 | 0 | if (bp == NULL) { |
623 | 0 | ifp->if_bpf = NULL; |
624 | 0 | } |
625 | 0 | } |
626 | 0 | d->bd_bif = NULL; |
627 | | /* |
628 | | * Check if this descriptor had requested promiscuous mode. |
629 | | * If so, turn it off. |
630 | | */ |
631 | 0 | if (d->bd_promisc) { |
632 | 0 | d->bd_promisc = 0; |
633 | 0 | lck_mtx_unlock(bpf_mlock); |
634 | 0 | if (ifnet_set_promiscuous(ifp, 0)) { |
635 | | /* |
636 | | * Something is really wrong if we were able to put |
637 | | * the driver into promiscuous mode, but can't |
638 | | * take it out. |
639 | | * Most likely the network interface is gone. |
640 | | */ |
641 | 0 | printf("%s: ifnet_set_promiscuous failed\n", __func__); |
642 | 0 | } |
643 | 0 | lck_mtx_lock(bpf_mlock); |
644 | 0 | } |
645 | | |
646 | | /* |
647 | | * Wake up other thread that are waiting for this thread to finish |
648 | | * detaching |
649 | | */ |
650 | 0 | d->bd_flags &= ~BPF_DETACHING; |
651 | 0 | d->bd_flags |= BPF_DETACHED; |
652 | | |
653 | | /* Refresh the local variable as d could have been modified */ |
654 | 0 | bpf_closed = d->bd_flags & BPF_CLOSING; |
655 | | /* |
656 | | * Note that We've kept the reference because we may have dropped |
657 | | * the lock when turning off promiscuous mode |
658 | | */ |
659 | 0 | bpf_release_d(d); |
660 | |
|
661 | 0 | done: |
662 | | /* |
663 | | * When closing makes sure no other thread refer to the bpf_d |
664 | | */ |
665 | 0 | if (bpf_debug != 0) { |
666 | 0 | printf("%s: %llx done\n", |
667 | 0 | __func__, (uint64_t)VM_KERNEL_ADDRPERM(d)); |
668 | 0 | } |
669 | | /* |
670 | | * Let the caller know the bpf_d is closed |
671 | | */ |
672 | 0 | if (bpf_closed) { |
673 | 0 | return 1; |
674 | 0 | } else { |
675 | 0 | return 0; |
676 | 0 | } |
677 | 0 | } |
678 | | |
679 | | /* |
680 | | * Start asynchronous timer, if necessary. |
681 | | * Must be called with bpf_mlock held. |
682 | | */ |
683 | | static void |
684 | | bpf_start_timer(struct bpf_d *d) |
685 | 0 | { |
686 | 0 | uint64_t deadline; |
687 | 0 | struct timeval tv; |
688 | |
|
689 | 0 | if (d->bd_rtout > 0 && d->bd_state == BPF_IDLE) { |
690 | 0 | tv.tv_sec = d->bd_rtout / hz; |
691 | 0 | tv.tv_usec = (d->bd_rtout % hz) * tick; |
692 | |
|
693 | 0 | clock_interval_to_deadline( |
694 | 0 | (uint64_t)tv.tv_sec * USEC_PER_SEC + tv.tv_usec, |
695 | 0 | NSEC_PER_USEC, &deadline); |
696 | | /* |
697 | | * The state is BPF_IDLE, so the timer hasn't |
698 | | * been started yet, and hasn't gone off yet; |
699 | | * there is no thread call scheduled, so this |
700 | | * won't change the schedule. |
701 | | * |
702 | | * XXX - what if, by the time it gets entered, |
703 | | * the deadline has already passed? |
704 | | */ |
705 | 0 | thread_call_enter_delayed(d->bd_thread_call, deadline); |
706 | 0 | d->bd_state = BPF_WAITING; |
707 | 0 | } |
708 | 0 | } |
709 | | |
710 | | /* |
711 | | * Cancel asynchronous timer. |
712 | | * Must be called with bpf_mlock held. |
713 | | */ |
714 | | static boolean_t |
715 | | bpf_stop_timer(struct bpf_d *d) |
716 | 0 | { |
717 | | /* |
718 | | * If the timer has already gone off, this does nothing. |
719 | | * Our caller is expected to set d->bd_state to BPF_IDLE, |
720 | | * with the bpf_mlock, after we are called. bpf_timed_out() |
721 | | * also grabs bpf_mlock, so, if the timer has gone off and |
722 | | * bpf_timed_out() hasn't finished, it's waiting for the |
723 | | * lock; when this thread releases the lock, it will |
724 | | * find the state is BPF_IDLE, and just release the |
725 | | * lock and return. |
726 | | */ |
727 | 0 | return thread_call_cancel(d->bd_thread_call); |
728 | 0 | } |
729 | | |
730 | | void |
731 | | bpf_acquire_d(struct bpf_d *d) |
732 | 0 | { |
733 | 0 | void *lr_saved = __builtin_return_address(0); |
734 | |
|
735 | 0 | LCK_MTX_ASSERT(bpf_mlock, LCK_MTX_ASSERT_OWNED); |
736 | |
|
737 | 0 | d->bd_refcnt += 1; |
738 | |
|
739 | 0 | d->bd_ref_lr[d->bd_next_ref_lr] = lr_saved; |
740 | 0 | d->bd_next_ref_lr = (d->bd_next_ref_lr + 1) % BPF_REF_HIST; |
741 | 0 | } |
742 | | |
743 | | void |
744 | | bpf_release_d(struct bpf_d *d) |
745 | 0 | { |
746 | 0 | void *lr_saved = __builtin_return_address(0); |
747 | |
|
748 | 0 | LCK_MTX_ASSERT(bpf_mlock, LCK_MTX_ASSERT_OWNED); |
749 | |
|
750 | 0 | if (d->bd_refcnt <= 0) { |
751 | 0 | panic("%s: %p refcnt <= 0", __func__, d); |
752 | 0 | } |
753 | | |
754 | 0 | d->bd_refcnt -= 1; |
755 | |
|
756 | 0 | d->bd_unref_lr[d->bd_next_unref_lr] = lr_saved; |
757 | 0 | d->bd_next_unref_lr = (d->bd_next_unref_lr + 1) % BPF_REF_HIST; |
758 | |
|
759 | 0 | if (d->bd_refcnt == 0) { |
760 | | /* Assert the device is detached */ |
761 | 0 | if ((d->bd_flags & BPF_DETACHED) == 0) { |
762 | 0 | panic("%s: %p BPF_DETACHED not set", __func__, d); |
763 | 0 | } |
764 | | |
765 | 0 | _FREE(d, M_DEVBUF); |
766 | 0 | } |
767 | 0 | } |
768 | | |
769 | | /* |
770 | | * Open ethernet device. Returns ENXIO for illegal minor device number, |
771 | | * EBUSY if file is open by another process. |
772 | | */ |
773 | | /* ARGSUSED */ |
774 | | int |
775 | | bpfopen(dev_t dev, int flags, __unused int fmt, |
776 | | struct proc *p) |
777 | 0 | { |
778 | 0 | struct bpf_d *d; |
779 | |
|
780 | 0 | lck_mtx_lock(bpf_mlock); |
781 | 0 | if ((unsigned int) minor(dev) >= nbpfilter) { |
782 | 0 | lck_mtx_unlock(bpf_mlock); |
783 | 0 | return ENXIO; |
784 | 0 | } |
785 | | /* |
786 | | * New device nodes are created on demand when opening the last one. |
787 | | * The programming model is for processes to loop on the minor starting |
788 | | * at 0 as long as EBUSY is returned. The loop stops when either the |
789 | | * open succeeds or an error other that EBUSY is returned. That means |
790 | | * that bpf_make_dev_t() must block all processes that are opening the |
791 | | * last node. If not all processes are blocked, they could unexpectedly |
792 | | * get ENOENT and abort their opening loop. |
793 | | */ |
794 | 0 | if ((unsigned int) minor(dev) == (nbpfilter - 1)) { |
795 | 0 | bpf_make_dev_t(major(dev)); |
796 | 0 | } |
797 | | |
798 | | /* |
799 | | * Each minor can be opened by only one process. If the requested |
800 | | * minor is in use, return EBUSY. |
801 | | * |
802 | | * Important: bpfopen() and bpfclose() have to check and set the status |
803 | | * of a device in the same lockin context otherwise the device may be |
804 | | * leaked because the vnode use count will be unpextectly greater than 1 |
805 | | * when close() is called. |
806 | | */ |
807 | 0 | if (bpf_dtab[minor(dev)] == NULL) { |
808 | | /* Reserve while opening */ |
809 | 0 | bpf_dtab[minor(dev)] = BPF_DEV_RESERVED; |
810 | 0 | } else { |
811 | 0 | lck_mtx_unlock(bpf_mlock); |
812 | 0 | return EBUSY; |
813 | 0 | } |
814 | 0 | d = (struct bpf_d *)_MALLOC(sizeof(struct bpf_d), M_DEVBUF, |
815 | 0 | M_WAIT | M_ZERO); |
816 | 0 | if (d == NULL) { |
817 | | /* this really is a catastrophic failure */ |
818 | 0 | printf("bpfopen: malloc bpf_d failed\n"); |
819 | 0 | bpf_dtab[minor(dev)] = NULL; |
820 | 0 | lck_mtx_unlock(bpf_mlock); |
821 | 0 | return ENOMEM; |
822 | 0 | } |
823 | | |
824 | | /* Mark "in use" and do most initialization. */ |
825 | 0 | bpf_acquire_d(d); |
826 | 0 | d->bd_bufsize = bpf_bufsize; |
827 | 0 | d->bd_sig = SIGIO; |
828 | 0 | d->bd_seesent = 1; |
829 | 0 | d->bd_oflags = flags; |
830 | 0 | d->bd_state = BPF_IDLE; |
831 | 0 | d->bd_traffic_class = SO_TC_BE; |
832 | 0 | d->bd_flags |= BPF_DETACHED; |
833 | 0 | if (bpf_wantpktap) { |
834 | 0 | d->bd_flags |= BPF_WANT_PKTAP; |
835 | 0 | } else { |
836 | 0 | d->bd_flags &= ~BPF_WANT_PKTAP; |
837 | 0 | } |
838 | 0 | d->bd_thread_call = thread_call_allocate(bpf_timed_out, d); |
839 | 0 | if (d->bd_thread_call == NULL) { |
840 | 0 | printf("bpfopen: malloc thread call failed\n"); |
841 | 0 | bpf_dtab[minor(dev)] = NULL; |
842 | 0 | bpf_release_d(d); |
843 | 0 | lck_mtx_unlock(bpf_mlock); |
844 | |
|
845 | 0 | return ENOMEM; |
846 | 0 | } |
847 | 0 | d->bd_opened_by = p; |
848 | 0 | uuid_generate(d->bd_uuid); |
849 | |
|
850 | 0 | bpf_dtab[minor(dev)] = d; /* Mark opened */ |
851 | 0 | lck_mtx_unlock(bpf_mlock); |
852 | |
|
853 | 0 | return 0; |
854 | 0 | } |
855 | | |
856 | | /* |
857 | | * Close the descriptor by detaching it from its interface, |
858 | | * deallocating its buffers, and marking it free. |
859 | | */ |
860 | | /* ARGSUSED */ |
861 | | int |
862 | | bpfclose(dev_t dev, __unused int flags, __unused int fmt, |
863 | | __unused struct proc *p) |
864 | 0 | { |
865 | 0 | struct bpf_d *d; |
866 | | |
867 | | /* Take BPF lock to ensure no other thread is using the device */ |
868 | 0 | lck_mtx_lock(bpf_mlock); |
869 | |
|
870 | 0 | d = bpf_dtab[minor(dev)]; |
871 | 0 | if (d == NULL || d == BPF_DEV_RESERVED) { |
872 | 0 | lck_mtx_unlock(bpf_mlock); |
873 | 0 | return ENXIO; |
874 | 0 | } |
875 | | |
876 | | /* |
877 | | * Other threads may call bpd_detachd() if we drop the bpf_mlock |
878 | | */ |
879 | 0 | d->bd_flags |= BPF_CLOSING; |
880 | |
|
881 | 0 | if (bpf_debug != 0) { |
882 | 0 | printf("%s: %llx\n", |
883 | 0 | __func__, (uint64_t)VM_KERNEL_ADDRPERM(d)); |
884 | 0 | } |
885 | |
|
886 | 0 | bpf_dtab[minor(dev)] = BPF_DEV_RESERVED; /* Reserve while closing */ |
887 | | |
888 | | /* |
889 | | * Deal with any in-progress timeouts. |
890 | | */ |
891 | 0 | switch (d->bd_state) { |
892 | 0 | case BPF_IDLE: |
893 | | /* |
894 | | * Not waiting for a timeout, and no timeout happened. |
895 | | */ |
896 | 0 | break; |
897 | | |
898 | 0 | case BPF_WAITING: |
899 | | /* |
900 | | * Waiting for a timeout. |
901 | | * Cancel any timer that has yet to go off, |
902 | | * and mark the state as "closing". |
903 | | * Then drop the lock to allow any timers that |
904 | | * *have* gone off to run to completion, and wait |
905 | | * for them to finish. |
906 | | */ |
907 | 0 | if (!bpf_stop_timer(d)) { |
908 | | /* |
909 | | * There was no pending call, so the call must |
910 | | * have been in progress. Wait for the call to |
911 | | * complete; we have to drop the lock while |
912 | | * waiting. to let the in-progrss call complete |
913 | | */ |
914 | 0 | d->bd_state = BPF_DRAINING; |
915 | 0 | while (d->bd_state == BPF_DRAINING) { |
916 | 0 | msleep((caddr_t)d, bpf_mlock, PRINET, |
917 | 0 | "bpfdraining", NULL); |
918 | 0 | } |
919 | 0 | } |
920 | 0 | d->bd_state = BPF_IDLE; |
921 | 0 | break; |
922 | | |
923 | 0 | case BPF_TIMED_OUT: |
924 | | /* |
925 | | * Timer went off, and the timeout routine finished. |
926 | | */ |
927 | 0 | d->bd_state = BPF_IDLE; |
928 | 0 | break; |
929 | | |
930 | 0 | case BPF_DRAINING: |
931 | | /* |
932 | | * Another thread is blocked on a close waiting for |
933 | | * a timeout to finish. |
934 | | * This "shouldn't happen", as the first thread to enter |
935 | | * bpfclose() will set bpf_dtab[minor(dev)] to 1, and |
936 | | * all subsequent threads should see that and fail with |
937 | | * ENXIO. |
938 | | */ |
939 | 0 | panic("Two threads blocked in a BPF close"); |
940 | 0 | break; |
941 | 0 | } |
942 | | |
943 | 0 | if (d->bd_bif) { |
944 | 0 | bpf_detachd(d, 1); |
945 | 0 | } |
946 | 0 | selthreadclear(&d->bd_sel); |
947 | 0 | thread_call_free(d->bd_thread_call); |
948 | |
|
949 | 0 | while (d->bd_hbuf_read != 0) { |
950 | 0 | msleep((caddr_t)d, bpf_mlock, PRINET, "bpf_reading", NULL); |
951 | 0 | } |
952 | |
|
953 | 0 | bpf_freed(d); |
954 | | |
955 | | /* Mark free in same context as bpfopen comes to check */ |
956 | 0 | bpf_dtab[minor(dev)] = NULL; /* Mark closed */ |
957 | |
|
958 | 0 | bpf_release_d(d); |
959 | |
|
960 | 0 | lck_mtx_unlock(bpf_mlock); |
961 | |
|
962 | 0 | return 0; |
963 | 0 | } |
964 | | |
965 | 0 | #define BPF_SLEEP bpf_sleep |
966 | | |
967 | | static int |
968 | | bpf_sleep(struct bpf_d *d, int pri, const char *wmesg, int timo) |
969 | 0 | { |
970 | 0 | u_int64_t abstime = 0; |
971 | |
|
972 | 0 | if (timo != 0) { |
973 | 0 | clock_interval_to_deadline(timo, NSEC_PER_SEC / hz, &abstime); |
974 | 0 | } |
975 | |
|
976 | 0 | return msleep1((caddr_t)d, bpf_mlock, pri, wmesg, abstime); |
977 | 0 | } |
978 | | |
979 | | static void |
980 | | bpf_finalize_pktap(struct bpf_hdr *hp, struct pktap_header *pktaphdr) |
981 | 0 | { |
982 | 0 | if (pktaphdr->pth_flags & PTH_FLAG_V2_HDR) { |
983 | 0 | struct pktap_v2_hdr *pktap_v2_hdr; |
984 | |
|
985 | 0 | pktap_v2_hdr = (struct pktap_v2_hdr *)pktaphdr; |
986 | |
|
987 | 0 | if (pktap_v2_hdr->pth_flags & PTH_FLAG_DELAY_PKTAP) { |
988 | 0 | pktap_v2_finalize_proc_info(pktap_v2_hdr); |
989 | 0 | } |
990 | 0 | } else { |
991 | 0 | if (pktaphdr->pth_flags & PTH_FLAG_DELAY_PKTAP) { |
992 | 0 | pktap_finalize_proc_info(pktaphdr); |
993 | 0 | } |
994 | |
|
995 | 0 | if (pktaphdr->pth_flags & PTH_FLAG_TSTAMP) { |
996 | 0 | hp->bh_tstamp.tv_sec = pktaphdr->pth_tstamp.tv_sec; |
997 | 0 | hp->bh_tstamp.tv_usec = pktaphdr->pth_tstamp.tv_usec; |
998 | 0 | } |
999 | 0 | } |
1000 | 0 | } |
1001 | | |
1002 | | /* |
1003 | | * Rotate the packet buffers in descriptor d. Move the store buffer |
1004 | | * into the hold slot, and the free buffer into the store slot. |
1005 | | * Zero the length of the new store buffer. |
1006 | | */ |
1007 | | #define ROTATE_BUFFERS(d) \ |
1008 | 0 | if (d->bd_hbuf_read != 0) \ |
1009 | 0 | panic("rotating bpf buffers during read"); \ |
1010 | 0 | (d)->bd_hbuf = (d)->bd_sbuf; \ |
1011 | 0 | (d)->bd_hlen = (d)->bd_slen; \ |
1012 | 0 | (d)->bd_hcnt = (d)->bd_scnt; \ |
1013 | 0 | (d)->bd_sbuf = (d)->bd_fbuf; \ |
1014 | 0 | (d)->bd_slen = 0; \ |
1015 | 0 | (d)->bd_scnt = 0; \ |
1016 | 0 | (d)->bd_fbuf = NULL; |
1017 | | /* |
1018 | | * bpfread - read next chunk of packets from buffers |
1019 | | */ |
1020 | | int |
1021 | | bpfread(dev_t dev, struct uio *uio, int ioflag) |
1022 | 0 | { |
1023 | 0 | struct bpf_d *d; |
1024 | 0 | caddr_t hbuf; |
1025 | 0 | int timed_out, hbuf_len; |
1026 | 0 | int error; |
1027 | 0 | int flags; |
1028 | |
|
1029 | 0 | lck_mtx_lock(bpf_mlock); |
1030 | |
|
1031 | 0 | d = bpf_dtab[minor(dev)]; |
1032 | 0 | if (d == NULL || d == BPF_DEV_RESERVED || |
1033 | 0 | (d->bd_flags & BPF_CLOSING) != 0) { |
1034 | 0 | lck_mtx_unlock(bpf_mlock); |
1035 | 0 | return ENXIO; |
1036 | 0 | } |
1037 | | |
1038 | 0 | bpf_acquire_d(d); |
1039 | | |
1040 | | /* |
1041 | | * Restrict application to use a buffer the same size as |
1042 | | * as kernel buffers. |
1043 | | */ |
1044 | 0 | if (uio_resid(uio) != d->bd_bufsize) { |
1045 | 0 | bpf_release_d(d); |
1046 | 0 | lck_mtx_unlock(bpf_mlock); |
1047 | 0 | return EINVAL; |
1048 | 0 | } |
1049 | | |
1050 | 0 | if (d->bd_state == BPF_WAITING) { |
1051 | 0 | bpf_stop_timer(d); |
1052 | 0 | } |
1053 | |
|
1054 | 0 | timed_out = (d->bd_state == BPF_TIMED_OUT); |
1055 | 0 | d->bd_state = BPF_IDLE; |
1056 | |
|
1057 | 0 | while (d->bd_hbuf_read != 0) { |
1058 | 0 | msleep((caddr_t)d, bpf_mlock, PRINET, "bpf_reading", NULL); |
1059 | 0 | } |
1060 | |
|
1061 | 0 | if ((d->bd_flags & BPF_CLOSING) != 0) { |
1062 | 0 | bpf_release_d(d); |
1063 | 0 | lck_mtx_unlock(bpf_mlock); |
1064 | 0 | return ENXIO; |
1065 | 0 | } |
1066 | | /* |
1067 | | * If the hold buffer is empty, then do a timed sleep, which |
1068 | | * ends when the timeout expires or when enough packets |
1069 | | * have arrived to fill the store buffer. |
1070 | | */ |
1071 | 0 | while (d->bd_hbuf == 0) { |
1072 | 0 | if ((d->bd_immediate || timed_out || (ioflag & IO_NDELAY)) && |
1073 | 0 | d->bd_slen != 0) { |
1074 | | /* |
1075 | | * We're in immediate mode, or are reading |
1076 | | * in non-blocking mode, or a timer was |
1077 | | * started before the read (e.g., by select() |
1078 | | * or poll()) and has expired and a packet(s) |
1079 | | * either arrived since the previous |
1080 | | * read or arrived while we were asleep. |
1081 | | * Rotate the buffers and return what's here. |
1082 | | */ |
1083 | 0 | ROTATE_BUFFERS(d); |
1084 | 0 | break; |
1085 | 0 | } |
1086 | | |
1087 | | /* |
1088 | | * No data is available, check to see if the bpf device |
1089 | | * is still pointed at a real interface. If not, return |
1090 | | * ENXIO so that the userland process knows to rebind |
1091 | | * it before using it again. |
1092 | | */ |
1093 | 0 | if (d->bd_bif == NULL) { |
1094 | 0 | bpf_release_d(d); |
1095 | 0 | lck_mtx_unlock(bpf_mlock); |
1096 | 0 | return ENXIO; |
1097 | 0 | } |
1098 | 0 | if (ioflag & IO_NDELAY) { |
1099 | 0 | bpf_release_d(d); |
1100 | 0 | lck_mtx_unlock(bpf_mlock); |
1101 | 0 | return EWOULDBLOCK; |
1102 | 0 | } |
1103 | 0 | error = BPF_SLEEP(d, PRINET | PCATCH, "bpf", d->bd_rtout); |
1104 | | /* |
1105 | | * Make sure device is still opened |
1106 | | */ |
1107 | 0 | if ((d->bd_flags & BPF_CLOSING) != 0) { |
1108 | 0 | bpf_release_d(d); |
1109 | 0 | lck_mtx_unlock(bpf_mlock); |
1110 | 0 | return ENXIO; |
1111 | 0 | } |
1112 | | |
1113 | 0 | while (d->bd_hbuf_read != 0) { |
1114 | 0 | msleep((caddr_t)d, bpf_mlock, PRINET, "bpf_reading", |
1115 | 0 | NULL); |
1116 | 0 | } |
1117 | |
|
1118 | 0 | if ((d->bd_flags & BPF_CLOSING) != 0) { |
1119 | 0 | bpf_release_d(d); |
1120 | 0 | lck_mtx_unlock(bpf_mlock); |
1121 | 0 | return ENXIO; |
1122 | 0 | } |
1123 | | |
1124 | 0 | if (error == EINTR || error == ERESTART) { |
1125 | 0 | if (d->bd_hbuf != NULL) { |
1126 | | /* |
1127 | | * Because we msleep, the hold buffer might |
1128 | | * be filled when we wake up. Avoid rotating |
1129 | | * in this case. |
1130 | | */ |
1131 | 0 | break; |
1132 | 0 | } |
1133 | 0 | if (d->bd_slen != 0) { |
1134 | | /* |
1135 | | * Sometimes we may be interrupted often and |
1136 | | * the sleep above will not timeout. |
1137 | | * Regardless, we should rotate the buffers |
1138 | | * if there's any new data pending and |
1139 | | * return it. |
1140 | | */ |
1141 | 0 | ROTATE_BUFFERS(d); |
1142 | 0 | break; |
1143 | 0 | } |
1144 | 0 | bpf_release_d(d); |
1145 | 0 | lck_mtx_unlock(bpf_mlock); |
1146 | 0 | if (error == ERESTART) { |
1147 | 0 | printf("%s: %llx ERESTART to EINTR\n", |
1148 | 0 | __func__, (uint64_t)VM_KERNEL_ADDRPERM(d)); |
1149 | 0 | error = EINTR; |
1150 | 0 | } |
1151 | 0 | return error; |
1152 | 0 | } |
1153 | 0 | if (error == EWOULDBLOCK) { |
1154 | | /* |
1155 | | * On a timeout, return what's in the buffer, |
1156 | | * which may be nothing. If there is something |
1157 | | * in the store buffer, we can rotate the buffers. |
1158 | | */ |
1159 | 0 | if (d->bd_hbuf) { |
1160 | | /* |
1161 | | * We filled up the buffer in between |
1162 | | * getting the timeout and arriving |
1163 | | * here, so we don't need to rotate. |
1164 | | */ |
1165 | 0 | break; |
1166 | 0 | } |
1167 | | |
1168 | 0 | if (d->bd_slen == 0) { |
1169 | 0 | bpf_release_d(d); |
1170 | 0 | lck_mtx_unlock(bpf_mlock); |
1171 | 0 | return 0; |
1172 | 0 | } |
1173 | 0 | ROTATE_BUFFERS(d); |
1174 | 0 | break; |
1175 | 0 | } |
1176 | 0 | } |
1177 | | /* |
1178 | | * At this point, we know we have something in the hold slot. |
1179 | | */ |
1180 | | |
1181 | | /* |
1182 | | * Set the hold buffer read. So we do not |
1183 | | * rotate the buffers until the hold buffer |
1184 | | * read is complete. Also to avoid issues resulting |
1185 | | * from page faults during disk sleep (<rdar://problem/13436396>). |
1186 | | */ |
1187 | 0 | d->bd_hbuf_read = 1; |
1188 | 0 | hbuf = d->bd_hbuf; |
1189 | 0 | hbuf_len = d->bd_hlen; |
1190 | 0 | flags = d->bd_flags; |
1191 | 0 | lck_mtx_unlock(bpf_mlock); |
1192 | |
|
1193 | 0 | #ifdef __APPLE__ |
1194 | | /* |
1195 | | * Before we move data to userland, we fill out the extended |
1196 | | * header fields. |
1197 | | */ |
1198 | 0 | if (flags & BPF_EXTENDED_HDR) { |
1199 | 0 | char *p; |
1200 | |
|
1201 | 0 | p = hbuf; |
1202 | 0 | while (p < hbuf + hbuf_len) { |
1203 | 0 | struct bpf_hdr_ext *ehp; |
1204 | 0 | uint32_t flowid; |
1205 | 0 | struct so_procinfo soprocinfo; |
1206 | 0 | int found = 0; |
1207 | |
|
1208 | 0 | ehp = (struct bpf_hdr_ext *)(void *)p; |
1209 | 0 | if ((flowid = ehp->bh_flowid) != 0) { |
1210 | 0 | if (ehp->bh_proto == IPPROTO_TCP) { |
1211 | 0 | found = inp_findinpcb_procinfo(&tcbinfo, |
1212 | 0 | flowid, &soprocinfo); |
1213 | 0 | } else if (ehp->bh_proto == IPPROTO_UDP) { |
1214 | 0 | found = inp_findinpcb_procinfo(&udbinfo, |
1215 | 0 | flowid, &soprocinfo); |
1216 | 0 | } |
1217 | 0 | if (found == 1) { |
1218 | 0 | ehp->bh_pid = soprocinfo.spi_pid; |
1219 | 0 | strlcpy(&ehp->bh_comm[0], &soprocinfo.spi_proc_name[0], sizeof(ehp->bh_comm)); |
1220 | 0 | } |
1221 | 0 | ehp->bh_flowid = 0; |
1222 | 0 | } |
1223 | |
|
1224 | 0 | if (flags & BPF_FINALIZE_PKTAP) { |
1225 | 0 | struct pktap_header *pktaphdr; |
1226 | |
|
1227 | 0 | pktaphdr = (struct pktap_header *)(void *) |
1228 | 0 | (p + BPF_WORDALIGN(ehp->bh_hdrlen)); |
1229 | |
|
1230 | 0 | bpf_finalize_pktap((struct bpf_hdr *) ehp, |
1231 | 0 | pktaphdr); |
1232 | 0 | } |
1233 | 0 | p += BPF_WORDALIGN(ehp->bh_hdrlen + ehp->bh_caplen); |
1234 | 0 | } |
1235 | 0 | } else if (flags & BPF_FINALIZE_PKTAP) { |
1236 | 0 | char *p; |
1237 | |
|
1238 | 0 | p = hbuf; |
1239 | 0 | while (p < hbuf + hbuf_len) { |
1240 | 0 | struct bpf_hdr *hp; |
1241 | 0 | struct pktap_header *pktaphdr; |
1242 | |
|
1243 | 0 | hp = (struct bpf_hdr *)(void *)p; |
1244 | 0 | pktaphdr = (struct pktap_header *)(void *) |
1245 | 0 | (p + BPF_WORDALIGN(hp->bh_hdrlen)); |
1246 | |
|
1247 | 0 | bpf_finalize_pktap(hp, pktaphdr); |
1248 | |
|
1249 | 0 | p += BPF_WORDALIGN(hp->bh_hdrlen + hp->bh_caplen); |
1250 | 0 | } |
1251 | 0 | } |
1252 | 0 | #endif |
1253 | | |
1254 | | /* |
1255 | | * Move data from hold buffer into user space. |
1256 | | * We know the entire buffer is transferred since |
1257 | | * we checked above that the read buffer is bpf_bufsize bytes. |
1258 | | */ |
1259 | 0 | error = UIOMOVE(hbuf, hbuf_len, UIO_READ, uio); |
1260 | |
|
1261 | 0 | lck_mtx_lock(bpf_mlock); |
1262 | | /* |
1263 | | * Make sure device is still opened |
1264 | | */ |
1265 | 0 | if ((d->bd_flags & BPF_CLOSING) != 0) { |
1266 | 0 | bpf_release_d(d); |
1267 | 0 | lck_mtx_unlock(bpf_mlock); |
1268 | 0 | return ENXIO; |
1269 | 0 | } |
1270 | | |
1271 | 0 | d->bd_hbuf_read = 0; |
1272 | 0 | d->bd_fbuf = d->bd_hbuf; |
1273 | 0 | d->bd_hbuf = NULL; |
1274 | 0 | d->bd_hlen = 0; |
1275 | 0 | d->bd_hcnt = 0; |
1276 | 0 | wakeup((caddr_t)d); |
1277 | |
|
1278 | 0 | bpf_release_d(d); |
1279 | 0 | lck_mtx_unlock(bpf_mlock); |
1280 | 0 | return error; |
1281 | 0 | } |
1282 | | |
1283 | | /* |
1284 | | * If there are processes sleeping on this descriptor, wake them up. |
1285 | | */ |
1286 | | static void |
1287 | | bpf_wakeup(struct bpf_d *d) |
1288 | 0 | { |
1289 | 0 | if (d->bd_state == BPF_WAITING) { |
1290 | 0 | bpf_stop_timer(d); |
1291 | 0 | d->bd_state = BPF_IDLE; |
1292 | 0 | } |
1293 | 0 | wakeup((caddr_t)d); |
1294 | 0 | if (d->bd_async && d->bd_sig && d->bd_sigio) { |
1295 | 0 | pgsigio(d->bd_sigio, d->bd_sig); |
1296 | 0 | } |
1297 | |
|
1298 | 0 | selwakeup(&d->bd_sel); |
1299 | 0 | if ((d->bd_flags & BPF_KNOTE)) { |
1300 | 0 | KNOTE(&d->bd_sel.si_note, 1); |
1301 | 0 | } |
1302 | 0 | } |
1303 | | |
1304 | | static void |
1305 | | bpf_timed_out(void *arg, __unused void *dummy) |
1306 | 0 | { |
1307 | 0 | struct bpf_d *d = (struct bpf_d *)arg; |
1308 | |
|
1309 | 0 | lck_mtx_lock(bpf_mlock); |
1310 | 0 | if (d->bd_state == BPF_WAITING) { |
1311 | | /* |
1312 | | * There's a select or kqueue waiting for this; if there's |
1313 | | * now stuff to read, wake it up. |
1314 | | */ |
1315 | 0 | d->bd_state = BPF_TIMED_OUT; |
1316 | 0 | if (d->bd_slen != 0) { |
1317 | 0 | bpf_wakeup(d); |
1318 | 0 | } |
1319 | 0 | } else if (d->bd_state == BPF_DRAINING) { |
1320 | | /* |
1321 | | * A close is waiting for this to finish. |
1322 | | * Mark it as finished, and wake the close up. |
1323 | | */ |
1324 | 0 | d->bd_state = BPF_IDLE; |
1325 | 0 | bpf_wakeup(d); |
1326 | 0 | } |
1327 | 0 | lck_mtx_unlock(bpf_mlock); |
1328 | 0 | } |
1329 | | |
1330 | | /* keep in sync with bpf_movein above: */ |
1331 | | #define MAX_DATALINK_HDR_LEN (sizeof(struct firewire_header)) |
1332 | | |
1333 | | int |
1334 | | bpfwrite(dev_t dev, struct uio *uio, __unused int ioflag) |
1335 | 0 | { |
1336 | 0 | struct bpf_d *d; |
1337 | 0 | struct ifnet *ifp; |
1338 | 0 | struct mbuf *m = NULL; |
1339 | 0 | int error; |
1340 | 0 | char dst_buf[SOCKADDR_HDR_LEN + MAX_DATALINK_HDR_LEN]; |
1341 | 0 | int datlen = 0; |
1342 | 0 | int bif_dlt; |
1343 | 0 | int bd_hdrcmplt; |
1344 | |
|
1345 | 0 | lck_mtx_lock(bpf_mlock); |
1346 | |
|
1347 | 0 | d = bpf_dtab[minor(dev)]; |
1348 | 0 | if (d == NULL || d == BPF_DEV_RESERVED || |
1349 | 0 | (d->bd_flags & BPF_CLOSING) != 0) { |
1350 | 0 | lck_mtx_unlock(bpf_mlock); |
1351 | 0 | return ENXIO; |
1352 | 0 | } |
1353 | | |
1354 | 0 | bpf_acquire_d(d); |
1355 | |
|
1356 | 0 | if (d->bd_bif == 0) { |
1357 | 0 | bpf_release_d(d); |
1358 | 0 | lck_mtx_unlock(bpf_mlock); |
1359 | 0 | return ENXIO; |
1360 | 0 | } |
1361 | | |
1362 | 0 | ifp = d->bd_bif->bif_ifp; |
1363 | |
|
1364 | 0 | if ((ifp->if_flags & IFF_UP) == 0) { |
1365 | 0 | bpf_release_d(d); |
1366 | 0 | lck_mtx_unlock(bpf_mlock); |
1367 | 0 | return ENETDOWN; |
1368 | 0 | } |
1369 | 0 | if (uio_resid(uio) == 0) { |
1370 | 0 | bpf_release_d(d); |
1371 | 0 | lck_mtx_unlock(bpf_mlock); |
1372 | 0 | return 0; |
1373 | 0 | } |
1374 | 0 | ((struct sockaddr *)dst_buf)->sa_len = sizeof(dst_buf); |
1375 | | |
1376 | | /* |
1377 | | * fix for PR-6849527 |
1378 | | * geting variables onto stack before dropping lock for bpf_movein() |
1379 | | */ |
1380 | 0 | bif_dlt = (int)d->bd_bif->bif_dlt; |
1381 | 0 | bd_hdrcmplt = d->bd_hdrcmplt; |
1382 | | |
1383 | | /* bpf_movein allocating mbufs; drop lock */ |
1384 | 0 | lck_mtx_unlock(bpf_mlock); |
1385 | |
|
1386 | 0 | error = bpf_movein(uio, bif_dlt, &m, |
1387 | 0 | bd_hdrcmplt ? NULL : (struct sockaddr *)dst_buf, |
1388 | 0 | &datlen); |
1389 | | |
1390 | | /* take the lock again */ |
1391 | 0 | lck_mtx_lock(bpf_mlock); |
1392 | 0 | if (error) { |
1393 | 0 | bpf_release_d(d); |
1394 | 0 | lck_mtx_unlock(bpf_mlock); |
1395 | 0 | return error; |
1396 | 0 | } |
1397 | | |
1398 | | /* verify the device is still open */ |
1399 | 0 | if ((d->bd_flags & BPF_CLOSING) != 0) { |
1400 | 0 | bpf_release_d(d); |
1401 | 0 | lck_mtx_unlock(bpf_mlock); |
1402 | 0 | m_freem(m); |
1403 | 0 | return ENXIO; |
1404 | 0 | } |
1405 | | |
1406 | 0 | if (d->bd_bif == NULL) { |
1407 | 0 | bpf_release_d(d); |
1408 | 0 | lck_mtx_unlock(bpf_mlock); |
1409 | 0 | m_free(m); |
1410 | 0 | return ENXIO; |
1411 | 0 | } |
1412 | | |
1413 | 0 | if ((unsigned)datlen > ifp->if_mtu) { |
1414 | 0 | bpf_release_d(d); |
1415 | 0 | lck_mtx_unlock(bpf_mlock); |
1416 | 0 | m_freem(m); |
1417 | 0 | return EMSGSIZE; |
1418 | 0 | } |
1419 | | |
1420 | 0 | bpf_set_packet_service_class(m, d->bd_traffic_class); |
1421 | |
|
1422 | 0 | lck_mtx_unlock(bpf_mlock); |
1423 | | |
1424 | | /* |
1425 | | * The driver frees the mbuf. |
1426 | | */ |
1427 | 0 | if (d->bd_hdrcmplt) { |
1428 | 0 | if (d->bd_bif->bif_send) { |
1429 | 0 | error = d->bd_bif->bif_send(ifp, d->bd_bif->bif_dlt, m); |
1430 | 0 | } else { |
1431 | 0 | error = dlil_output(ifp, 0, m, NULL, NULL, 1, NULL); |
1432 | 0 | } |
1433 | 0 | } else { |
1434 | 0 | error = dlil_output(ifp, PF_INET, m, NULL, |
1435 | 0 | (struct sockaddr *)dst_buf, 0, NULL); |
1436 | 0 | } |
1437 | |
|
1438 | 0 | lck_mtx_lock(bpf_mlock); |
1439 | 0 | bpf_release_d(d); |
1440 | 0 | lck_mtx_unlock(bpf_mlock); |
1441 | |
|
1442 | 0 | return error; |
1443 | 0 | } |
1444 | | |
1445 | | /* |
1446 | | * Reset a descriptor by flushing its packet buffer and clearing the |
1447 | | * receive and drop counts. |
1448 | | */ |
1449 | | static void |
1450 | | reset_d(struct bpf_d *d) |
1451 | 0 | { |
1452 | 0 | if (d->bd_hbuf_read != 0) { |
1453 | 0 | panic("resetting buffers during read"); |
1454 | 0 | } |
1455 | | |
1456 | 0 | if (d->bd_hbuf) { |
1457 | | /* Free the hold buffer. */ |
1458 | 0 | d->bd_fbuf = d->bd_hbuf; |
1459 | 0 | d->bd_hbuf = NULL; |
1460 | 0 | } |
1461 | 0 | d->bd_slen = 0; |
1462 | 0 | d->bd_hlen = 0; |
1463 | 0 | d->bd_scnt = 0; |
1464 | 0 | d->bd_hcnt = 0; |
1465 | 0 | d->bd_rcount = 0; |
1466 | 0 | d->bd_dcount = 0; |
1467 | 0 | } |
1468 | | |
1469 | | static struct bpf_d * |
1470 | | bpf_get_device_from_uuid(uuid_t uuid) |
1471 | 0 | { |
1472 | 0 | unsigned int i; |
1473 | |
|
1474 | 0 | for (i = 0; i < nbpfilter; i++) { |
1475 | 0 | struct bpf_d *d = bpf_dtab[i]; |
1476 | |
|
1477 | 0 | if (d == NULL || d == BPF_DEV_RESERVED || |
1478 | 0 | (d->bd_flags & BPF_CLOSING) != 0) { |
1479 | 0 | continue; |
1480 | 0 | } |
1481 | 0 | if (uuid_compare(uuid, d->bd_uuid) == 0) { |
1482 | 0 | return d; |
1483 | 0 | } |
1484 | 0 | } |
1485 | | |
1486 | 0 | return NULL; |
1487 | 0 | } |
1488 | | |
1489 | | /* |
1490 | | * The BIOCSETUP command "atomically" attach to the interface and |
1491 | | * copy the buffer from another interface. This minimizes the risk |
1492 | | * of missing packet because this is done while holding |
1493 | | * the BPF global lock |
1494 | | */ |
1495 | | static int |
1496 | | bpf_setup(struct bpf_d *d_to, uuid_t uuid_from, ifnet_t ifp) |
1497 | 0 | { |
1498 | 0 | struct bpf_d *d_from; |
1499 | 0 | int error = 0; |
1500 | |
|
1501 | 0 | LCK_MTX_ASSERT(bpf_mlock, LCK_MTX_ASSERT_OWNED); |
1502 | | |
1503 | | /* |
1504 | | * Sanity checks |
1505 | | */ |
1506 | 0 | d_from = bpf_get_device_from_uuid(uuid_from); |
1507 | 0 | if (d_from == NULL) { |
1508 | 0 | error = ENOENT; |
1509 | 0 | os_log_info(OS_LOG_DEFAULT, |
1510 | 0 | "%s: uuids not found error %d", |
1511 | 0 | __func__, error); |
1512 | 0 | return error; |
1513 | 0 | } |
1514 | 0 | if (d_from->bd_opened_by != d_to->bd_opened_by) { |
1515 | 0 | error = EACCES; |
1516 | 0 | os_log_info(OS_LOG_DEFAULT, |
1517 | 0 | "%s: processes not matching error %d", |
1518 | 0 | __func__, error); |
1519 | 0 | return error; |
1520 | 0 | } |
1521 | | |
1522 | | /* |
1523 | | * Prevent any read while copying |
1524 | | */ |
1525 | 0 | while (d_to->bd_hbuf_read != 0) { |
1526 | 0 | msleep((caddr_t)d_to, bpf_mlock, PRINET, __func__, NULL); |
1527 | 0 | } |
1528 | 0 | d_to->bd_hbuf_read = 1; |
1529 | |
|
1530 | 0 | while (d_from->bd_hbuf_read != 0) { |
1531 | 0 | msleep((caddr_t)d_from, bpf_mlock, PRINET, __func__, NULL); |
1532 | 0 | } |
1533 | 0 | d_from->bd_hbuf_read = 1; |
1534 | | |
1535 | | /* |
1536 | | * Verify the devices have not been closed |
1537 | | */ |
1538 | 0 | if (d_to->bd_flags & BPF_CLOSING) { |
1539 | 0 | error = ENXIO; |
1540 | 0 | os_log_info(OS_LOG_DEFAULT, |
1541 | 0 | "%s: d_to is closing error %d", |
1542 | 0 | __func__, error); |
1543 | 0 | goto done; |
1544 | 0 | } |
1545 | 0 | if (d_from->bd_flags & BPF_CLOSING) { |
1546 | 0 | error = ENXIO; |
1547 | 0 | os_log_info(OS_LOG_DEFAULT, |
1548 | 0 | "%s: d_from is closing error %d", |
1549 | 0 | __func__, error); |
1550 | 0 | goto done; |
1551 | 0 | } |
1552 | | |
1553 | | /* |
1554 | | * For now require the same buffer size |
1555 | | */ |
1556 | 0 | if (d_from->bd_bufsize != d_to->bd_bufsize) { |
1557 | 0 | error = EINVAL; |
1558 | 0 | os_log_info(OS_LOG_DEFAULT, |
1559 | 0 | "%s: bufsizes not matching error %d", |
1560 | 0 | __func__, error); |
1561 | 0 | goto done; |
1562 | 0 | } |
1563 | | |
1564 | | /* |
1565 | | * Attach to the interface |
1566 | | */ |
1567 | 0 | error = bpf_setif(d_to, ifp, false, true); |
1568 | 0 | if (error != 0) { |
1569 | 0 | os_log_info(OS_LOG_DEFAULT, |
1570 | 0 | "%s: bpf_setif() failed error %d", |
1571 | 0 | __func__, error); |
1572 | 0 | goto done; |
1573 | 0 | } |
1574 | | |
1575 | | /* |
1576 | | * Make sure the buffers are setup as expected by bpf_setif() |
1577 | | */ |
1578 | 0 | ASSERT(d_to->bd_hbuf == NULL); |
1579 | 0 | ASSERT(d_to->bd_sbuf != NULL); |
1580 | 0 | ASSERT(d_to->bd_fbuf != NULL); |
1581 | | |
1582 | | /* |
1583 | | * Copy the buffers and update the pointers and counts |
1584 | | */ |
1585 | 0 | memcpy(d_to->bd_sbuf, d_from->bd_sbuf, d_from->bd_slen); |
1586 | 0 | d_to->bd_slen = d_from->bd_slen; |
1587 | 0 | d_to->bd_scnt = d_from->bd_scnt; |
1588 | |
|
1589 | 0 | if (d_from->bd_hbuf != NULL) { |
1590 | 0 | d_to->bd_hbuf = d_to->bd_fbuf; |
1591 | 0 | d_to->bd_fbuf = NULL; |
1592 | 0 | memcpy(d_to->bd_hbuf, d_from->bd_hbuf, d_from->bd_hlen); |
1593 | 0 | } |
1594 | 0 | d_to->bd_hlen = d_from->bd_hlen; |
1595 | 0 | d_to->bd_hcnt = d_from->bd_hcnt; |
1596 | |
|
1597 | 0 | if (bpf_debug > 0) { |
1598 | 0 | os_log_info(OS_LOG_DEFAULT, |
1599 | 0 | "%s: done slen %u scnt %u hlen %u hcnt %u", |
1600 | 0 | __func__, d_to->bd_slen, d_to->bd_scnt, |
1601 | 0 | d_to->bd_hlen, d_to->bd_hcnt); |
1602 | 0 | } |
1603 | 0 | done: |
1604 | 0 | d_from->bd_hbuf_read = 0; |
1605 | 0 | wakeup((caddr_t)d_from); |
1606 | |
|
1607 | 0 | d_to->bd_hbuf_read = 0; |
1608 | 0 | wakeup((caddr_t)d_to); |
1609 | |
|
1610 | 0 | return error; |
1611 | 0 | } |
1612 | | |
1613 | | /* |
1614 | | * FIONREAD Check for read packet available. |
1615 | | * SIOCGIFADDR Get interface address - convenient hook to driver. |
1616 | | * BIOCGBLEN Get buffer len [for read()]. |
1617 | | * BIOCSETF Set ethernet read filter. |
1618 | | * BIOCFLUSH Flush read packet buffer. |
1619 | | * BIOCPROMISC Put interface into promiscuous mode. |
1620 | | * BIOCGDLT Get link layer type. |
1621 | | * BIOCGETIF Get interface name. |
1622 | | * BIOCSETIF Set interface. |
1623 | | * BIOCSRTIMEOUT Set read timeout. |
1624 | | * BIOCGRTIMEOUT Get read timeout. |
1625 | | * BIOCGSTATS Get packet stats. |
1626 | | * BIOCIMMEDIATE Set immediate mode. |
1627 | | * BIOCVERSION Get filter language version. |
1628 | | * BIOCGHDRCMPLT Get "header already complete" flag |
1629 | | * BIOCSHDRCMPLT Set "header already complete" flag |
1630 | | * BIOCGSEESENT Get "see packets sent" flag |
1631 | | * BIOCSSEESENT Set "see packets sent" flag |
1632 | | * BIOCSETTC Set traffic class. |
1633 | | * BIOCGETTC Get traffic class. |
1634 | | * BIOCSEXTHDR Set "extended header" flag |
1635 | | * BIOCSHEADDROP Drop head of the buffer if user is not reading |
1636 | | * BIOCGHEADDROP Get "head-drop" flag |
1637 | | */ |
1638 | | /* ARGSUSED */ |
1639 | | int |
1640 | | bpfioctl(dev_t dev, u_long cmd, caddr_t addr, __unused int flags, |
1641 | | struct proc *p) |
1642 | 0 | { |
1643 | 0 | struct bpf_d *d; |
1644 | 0 | int error = 0; |
1645 | 0 | u_int int_arg; |
1646 | 0 | struct ifreq ifr; |
1647 | |
|
1648 | 0 | lck_mtx_lock(bpf_mlock); |
1649 | |
|
1650 | 0 | d = bpf_dtab[minor(dev)]; |
1651 | 0 | if (d == NULL || d == BPF_DEV_RESERVED || |
1652 | 0 | (d->bd_flags & BPF_CLOSING) != 0) { |
1653 | 0 | lck_mtx_unlock(bpf_mlock); |
1654 | 0 | return ENXIO; |
1655 | 0 | } |
1656 | | |
1657 | 0 | bpf_acquire_d(d); |
1658 | |
|
1659 | 0 | if (d->bd_state == BPF_WAITING) { |
1660 | 0 | bpf_stop_timer(d); |
1661 | 0 | } |
1662 | 0 | d->bd_state = BPF_IDLE; |
1663 | |
|
1664 | 0 | switch (cmd) { |
1665 | 0 | default: |
1666 | 0 | error = EINVAL; |
1667 | 0 | break; |
1668 | | |
1669 | | /* |
1670 | | * Check for read packet available. |
1671 | | */ |
1672 | 0 | case FIONREAD: /* int */ |
1673 | 0 | { |
1674 | 0 | int n; |
1675 | |
|
1676 | 0 | n = d->bd_slen; |
1677 | 0 | if (d->bd_hbuf && d->bd_hbuf_read == 0) { |
1678 | 0 | n += d->bd_hlen; |
1679 | 0 | } |
1680 | |
|
1681 | 0 | bcopy(&n, addr, sizeof(n)); |
1682 | 0 | break; |
1683 | 0 | } |
1684 | | |
1685 | 0 | case SIOCGIFADDR: /* struct ifreq */ |
1686 | 0 | { |
1687 | 0 | struct ifnet *ifp; |
1688 | |
|
1689 | 0 | if (d->bd_bif == 0) { |
1690 | 0 | error = EINVAL; |
1691 | 0 | } else { |
1692 | 0 | ifp = d->bd_bif->bif_ifp; |
1693 | 0 | error = ifnet_ioctl(ifp, 0, cmd, addr); |
1694 | 0 | } |
1695 | 0 | break; |
1696 | 0 | } |
1697 | | |
1698 | | /* |
1699 | | * Get buffer len [for read()]. |
1700 | | */ |
1701 | 0 | case BIOCGBLEN: /* u_int */ |
1702 | 0 | bcopy(&d->bd_bufsize, addr, sizeof(u_int)); |
1703 | 0 | break; |
1704 | | |
1705 | | /* |
1706 | | * Set buffer length. |
1707 | | */ |
1708 | 0 | case BIOCSBLEN: { /* u_int */ |
1709 | 0 | u_int size; |
1710 | 0 | unsigned int maxbufsize = bpf_maxbufsize; |
1711 | | |
1712 | | /* |
1713 | | * Allow larger buffer in head drop mode to with the |
1714 | | * assumption the reading process may be low priority but |
1715 | | * is interested in the most recent traffic |
1716 | | */ |
1717 | 0 | if (d->bd_headdrop != 0) { |
1718 | 0 | maxbufsize = 2 * bpf_maxbufsize; |
1719 | 0 | } |
1720 | |
|
1721 | 0 | if (d->bd_bif != 0 || (d->bd_flags & BPF_DETACHING)) { |
1722 | | /* |
1723 | | * Interface already attached, unable to change buffers |
1724 | | */ |
1725 | 0 | error = EINVAL; |
1726 | 0 | break; |
1727 | 0 | } |
1728 | 0 | bcopy(addr, &size, sizeof(size)); |
1729 | |
|
1730 | 0 | if (size > maxbufsize) { |
1731 | 0 | d->bd_bufsize = maxbufsize; |
1732 | |
|
1733 | 0 | os_log_info(OS_LOG_DEFAULT, |
1734 | 0 | "%s bufsize capped to %u from %u", |
1735 | 0 | __func__, d->bd_bufsize, size); |
1736 | 0 | } else if (size < BPF_MINBUFSIZE) { |
1737 | 0 | d->bd_bufsize = BPF_MINBUFSIZE; |
1738 | |
|
1739 | 0 | os_log_info(OS_LOG_DEFAULT, |
1740 | 0 | "%s bufsize bumped to %u from %u", |
1741 | 0 | __func__, d->bd_bufsize, size); |
1742 | 0 | } else { |
1743 | 0 | d->bd_bufsize = size; |
1744 | 0 | } |
1745 | | |
1746 | | /* It's a read/write ioctl */ |
1747 | 0 | bcopy(&d->bd_bufsize, addr, sizeof(u_int)); |
1748 | 0 | break; |
1749 | 0 | } |
1750 | | /* |
1751 | | * Set link layer read filter. |
1752 | | */ |
1753 | 0 | case BIOCSETF32: |
1754 | 0 | case BIOCSETFNR32: { /* struct bpf_program32 */ |
1755 | 0 | struct bpf_program32 prg32; |
1756 | |
|
1757 | 0 | bcopy(addr, &prg32, sizeof(prg32)); |
1758 | 0 | error = bpf_setf(d, prg32.bf_len, |
1759 | 0 | CAST_USER_ADDR_T(prg32.bf_insns), cmd); |
1760 | 0 | break; |
1761 | 0 | } |
1762 | | |
1763 | 0 | case BIOCSETF64: |
1764 | 0 | case BIOCSETFNR64: { /* struct bpf_program64 */ |
1765 | 0 | struct bpf_program64 prg64; |
1766 | |
|
1767 | 0 | bcopy(addr, &prg64, sizeof(prg64)); |
1768 | 0 | error = bpf_setf(d, prg64.bf_len, prg64.bf_insns, cmd); |
1769 | 0 | break; |
1770 | 0 | } |
1771 | | |
1772 | | /* |
1773 | | * Flush read packet buffer. |
1774 | | */ |
1775 | 0 | case BIOCFLUSH: |
1776 | 0 | while (d->bd_hbuf_read != 0) { |
1777 | 0 | msleep((caddr_t)d, bpf_mlock, PRINET, "bpf_reading", |
1778 | 0 | NULL); |
1779 | 0 | } |
1780 | 0 | if ((d->bd_flags & BPF_CLOSING) != 0) { |
1781 | 0 | error = ENXIO; |
1782 | 0 | break; |
1783 | 0 | } |
1784 | 0 | reset_d(d); |
1785 | 0 | break; |
1786 | | |
1787 | | /* |
1788 | | * Put interface into promiscuous mode. |
1789 | | */ |
1790 | 0 | case BIOCPROMISC: |
1791 | 0 | if (d->bd_bif == 0) { |
1792 | | /* |
1793 | | * No interface attached yet. |
1794 | | */ |
1795 | 0 | error = EINVAL; |
1796 | 0 | break; |
1797 | 0 | } |
1798 | 0 | if (d->bd_promisc == 0) { |
1799 | 0 | lck_mtx_unlock(bpf_mlock); |
1800 | 0 | error = ifnet_set_promiscuous(d->bd_bif->bif_ifp, 1); |
1801 | 0 | lck_mtx_lock(bpf_mlock); |
1802 | 0 | if (error == 0) { |
1803 | 0 | d->bd_promisc = 1; |
1804 | 0 | } |
1805 | 0 | } |
1806 | 0 | break; |
1807 | | |
1808 | | /* |
1809 | | * Get device parameters. |
1810 | | */ |
1811 | 0 | case BIOCGDLT: /* u_int */ |
1812 | 0 | if (d->bd_bif == 0) { |
1813 | 0 | error = EINVAL; |
1814 | 0 | } else { |
1815 | 0 | bcopy(&d->bd_bif->bif_dlt, addr, sizeof(u_int)); |
1816 | 0 | } |
1817 | 0 | break; |
1818 | | |
1819 | | /* |
1820 | | * Get a list of supported data link types. |
1821 | | */ |
1822 | 0 | case BIOCGDLTLIST: /* struct bpf_dltlist */ |
1823 | 0 | if (d->bd_bif == NULL) { |
1824 | 0 | error = EINVAL; |
1825 | 0 | } else { |
1826 | 0 | error = bpf_getdltlist(d, addr, p); |
1827 | 0 | } |
1828 | 0 | break; |
1829 | | |
1830 | | /* |
1831 | | * Set data link type. |
1832 | | */ |
1833 | 0 | case BIOCSDLT: /* u_int */ |
1834 | 0 | if (d->bd_bif == NULL) { |
1835 | 0 | error = EINVAL; |
1836 | 0 | } else { |
1837 | 0 | u_int dlt; |
1838 | |
|
1839 | 0 | bcopy(addr, &dlt, sizeof(dlt)); |
1840 | |
|
1841 | 0 | if (dlt == DLT_PKTAP && |
1842 | 0 | !(d->bd_flags & BPF_WANT_PKTAP)) { |
1843 | 0 | dlt = DLT_RAW; |
1844 | 0 | } |
1845 | 0 | error = bpf_setdlt(d, dlt); |
1846 | 0 | } |
1847 | 0 | break; |
1848 | | |
1849 | | /* |
1850 | | * Get interface name. |
1851 | | */ |
1852 | 0 | case BIOCGETIF: /* struct ifreq */ |
1853 | 0 | if (d->bd_bif == 0) { |
1854 | 0 | error = EINVAL; |
1855 | 0 | } else { |
1856 | 0 | struct ifnet *const ifp = d->bd_bif->bif_ifp; |
1857 | |
|
1858 | 0 | snprintf(((struct ifreq *)(void *)addr)->ifr_name, |
1859 | 0 | sizeof(ifr.ifr_name), "%s", if_name(ifp)); |
1860 | 0 | } |
1861 | 0 | break; |
1862 | | |
1863 | | /* |
1864 | | * Set interface. |
1865 | | */ |
1866 | 0 | case BIOCSETIF: { /* struct ifreq */ |
1867 | 0 | ifnet_t ifp; |
1868 | |
|
1869 | 0 | bcopy(addr, &ifr, sizeof(ifr)); |
1870 | 0 | ifr.ifr_name[IFNAMSIZ - 1] = '\0'; |
1871 | 0 | ifp = ifunit(ifr.ifr_name); |
1872 | 0 | if (ifp == NULL) { |
1873 | 0 | error = ENXIO; |
1874 | 0 | } else { |
1875 | 0 | error = bpf_setif(d, ifp, true, false); |
1876 | 0 | } |
1877 | 0 | break; |
1878 | 0 | } |
1879 | | |
1880 | | /* |
1881 | | * Set read timeout. |
1882 | | */ |
1883 | 0 | case BIOCSRTIMEOUT32: { /* struct user32_timeval */ |
1884 | 0 | struct user32_timeval _tv; |
1885 | 0 | struct timeval tv; |
1886 | |
|
1887 | 0 | bcopy(addr, &_tv, sizeof(_tv)); |
1888 | 0 | tv.tv_sec = _tv.tv_sec; |
1889 | 0 | tv.tv_usec = _tv.tv_usec; |
1890 | | |
1891 | | /* |
1892 | | * Subtract 1 tick from tvtohz() since this isn't |
1893 | | * a one-shot timer. |
1894 | | */ |
1895 | 0 | if ((error = itimerfix(&tv)) == 0) { |
1896 | 0 | d->bd_rtout = tvtohz(&tv) - 1; |
1897 | 0 | } |
1898 | 0 | break; |
1899 | 0 | } |
1900 | | |
1901 | 0 | case BIOCSRTIMEOUT64: { /* struct user64_timeval */ |
1902 | 0 | struct user64_timeval _tv; |
1903 | 0 | struct timeval tv; |
1904 | |
|
1905 | 0 | bcopy(addr, &_tv, sizeof(_tv)); |
1906 | 0 | tv.tv_sec = _tv.tv_sec; |
1907 | 0 | tv.tv_usec = _tv.tv_usec; |
1908 | | |
1909 | | /* |
1910 | | * Subtract 1 tick from tvtohz() since this isn't |
1911 | | * a one-shot timer. |
1912 | | */ |
1913 | 0 | if ((error = itimerfix(&tv)) == 0) { |
1914 | 0 | d->bd_rtout = tvtohz(&tv) - 1; |
1915 | 0 | } |
1916 | 0 | break; |
1917 | 0 | } |
1918 | | |
1919 | | /* |
1920 | | * Get read timeout. |
1921 | | */ |
1922 | 0 | case BIOCGRTIMEOUT32: { /* struct user32_timeval */ |
1923 | 0 | struct user32_timeval tv; |
1924 | |
|
1925 | 0 | bzero(&tv, sizeof(tv)); |
1926 | 0 | tv.tv_sec = d->bd_rtout / hz; |
1927 | 0 | tv.tv_usec = (d->bd_rtout % hz) * tick; |
1928 | 0 | bcopy(&tv, addr, sizeof(tv)); |
1929 | 0 | break; |
1930 | 0 | } |
1931 | | |
1932 | 0 | case BIOCGRTIMEOUT64: { /* struct user64_timeval */ |
1933 | 0 | struct user64_timeval tv; |
1934 | |
|
1935 | 0 | bzero(&tv, sizeof(tv)); |
1936 | 0 | tv.tv_sec = d->bd_rtout / hz; |
1937 | 0 | tv.tv_usec = (d->bd_rtout % hz) * tick; |
1938 | 0 | bcopy(&tv, addr, sizeof(tv)); |
1939 | 0 | break; |
1940 | 0 | } |
1941 | | |
1942 | | /* |
1943 | | * Get packet stats. |
1944 | | */ |
1945 | 0 | case BIOCGSTATS: { /* struct bpf_stat */ |
1946 | 0 | struct bpf_stat bs; |
1947 | |
|
1948 | 0 | bzero(&bs, sizeof(bs)); |
1949 | 0 | bs.bs_recv = d->bd_rcount; |
1950 | 0 | bs.bs_drop = d->bd_dcount; |
1951 | 0 | bcopy(&bs, addr, sizeof(bs)); |
1952 | 0 | break; |
1953 | 0 | } |
1954 | | |
1955 | | /* |
1956 | | * Set immediate mode. |
1957 | | */ |
1958 | 0 | case BIOCIMMEDIATE: /* u_int */ |
1959 | 0 | d->bd_immediate = *(u_int *)(void *)addr; |
1960 | 0 | break; |
1961 | | |
1962 | 0 | case BIOCVERSION: { /* struct bpf_version */ |
1963 | 0 | struct bpf_version bv; |
1964 | |
|
1965 | 0 | bzero(&bv, sizeof(bv)); |
1966 | 0 | bv.bv_major = BPF_MAJOR_VERSION; |
1967 | 0 | bv.bv_minor = BPF_MINOR_VERSION; |
1968 | 0 | bcopy(&bv, addr, sizeof(bv)); |
1969 | 0 | break; |
1970 | 0 | } |
1971 | | |
1972 | | /* |
1973 | | * Get "header already complete" flag |
1974 | | */ |
1975 | 0 | case BIOCGHDRCMPLT: /* u_int */ |
1976 | 0 | bcopy(&d->bd_hdrcmplt, addr, sizeof(u_int)); |
1977 | 0 | break; |
1978 | | |
1979 | | /* |
1980 | | * Set "header already complete" flag |
1981 | | */ |
1982 | 0 | case BIOCSHDRCMPLT: /* u_int */ |
1983 | 0 | bcopy(addr, &int_arg, sizeof(int_arg)); |
1984 | 0 | d->bd_hdrcmplt = int_arg ? 1 : 0; |
1985 | 0 | break; |
1986 | | |
1987 | | /* |
1988 | | * Get "see sent packets" flag |
1989 | | */ |
1990 | 0 | case BIOCGSEESENT: /* u_int */ |
1991 | 0 | bcopy(&d->bd_seesent, addr, sizeof(u_int)); |
1992 | 0 | break; |
1993 | | |
1994 | | /* |
1995 | | * Set "see sent packets" flag |
1996 | | */ |
1997 | 0 | case BIOCSSEESENT: /* u_int */ |
1998 | 0 | bcopy(addr, &d->bd_seesent, sizeof(u_int)); |
1999 | 0 | break; |
2000 | | |
2001 | | /* |
2002 | | * Set traffic service class |
2003 | | */ |
2004 | 0 | case BIOCSETTC: { /* int */ |
2005 | 0 | int tc; |
2006 | |
|
2007 | 0 | bcopy(addr, &tc, sizeof(int)); |
2008 | 0 | error = bpf_set_traffic_class(d, tc); |
2009 | 0 | break; |
2010 | 0 | } |
2011 | | |
2012 | | /* |
2013 | | * Get traffic service class |
2014 | | */ |
2015 | 0 | case BIOCGETTC: /* int */ |
2016 | 0 | bcopy(&d->bd_traffic_class, addr, sizeof(int)); |
2017 | 0 | break; |
2018 | | |
2019 | 0 | case FIONBIO: /* Non-blocking I/O; int */ |
2020 | 0 | break; |
2021 | | |
2022 | 0 | case FIOASYNC: /* Send signal on receive packets; int */ |
2023 | 0 | bcopy(addr, &d->bd_async, sizeof(int)); |
2024 | 0 | break; |
2025 | | #ifndef __APPLE__ |
2026 | | case FIOSETOWN: |
2027 | | error = fsetown(*(int *)addr, &d->bd_sigio); |
2028 | | break; |
2029 | | |
2030 | | case FIOGETOWN: |
2031 | | *(int *)addr = fgetown(d->bd_sigio); |
2032 | | break; |
2033 | | |
2034 | | /* This is deprecated, FIOSETOWN should be used instead. */ |
2035 | | case TIOCSPGRP: |
2036 | | error = fsetown(-(*(int *)addr), &d->bd_sigio); |
2037 | | break; |
2038 | | |
2039 | | /* This is deprecated, FIOGETOWN should be used instead. */ |
2040 | | case TIOCGPGRP: |
2041 | | *(int *)addr = -fgetown(d->bd_sigio); |
2042 | | break; |
2043 | | #endif |
2044 | 0 | case BIOCSRSIG: { /* Set receive signal; u_int */ |
2045 | 0 | u_int sig; |
2046 | |
|
2047 | 0 | bcopy(addr, &sig, sizeof(u_int)); |
2048 | |
|
2049 | 0 | if (sig >= NSIG) { |
2050 | 0 | error = EINVAL; |
2051 | 0 | } else { |
2052 | 0 | d->bd_sig = sig; |
2053 | 0 | } |
2054 | 0 | break; |
2055 | 0 | } |
2056 | 0 | case BIOCGRSIG: /* u_int */ |
2057 | 0 | bcopy(&d->bd_sig, addr, sizeof(u_int)); |
2058 | 0 | break; |
2059 | 0 | #ifdef __APPLE__ |
2060 | 0 | case BIOCSEXTHDR: /* u_int */ |
2061 | 0 | bcopy(addr, &int_arg, sizeof(int_arg)); |
2062 | 0 | if (int_arg) { |
2063 | 0 | d->bd_flags |= BPF_EXTENDED_HDR; |
2064 | 0 | } else { |
2065 | 0 | d->bd_flags &= ~BPF_EXTENDED_HDR; |
2066 | 0 | } |
2067 | 0 | break; |
2068 | | |
2069 | 0 | case BIOCGIFATTACHCOUNT: { /* struct ifreq */ |
2070 | 0 | ifnet_t ifp; |
2071 | 0 | struct bpf_if *bp; |
2072 | |
|
2073 | 0 | bcopy(addr, &ifr, sizeof(ifr)); |
2074 | 0 | ifr.ifr_name[IFNAMSIZ - 1] = '\0'; |
2075 | 0 | ifp = ifunit(ifr.ifr_name); |
2076 | 0 | if (ifp == NULL) { |
2077 | 0 | error = ENXIO; |
2078 | 0 | break; |
2079 | 0 | } |
2080 | 0 | ifr.ifr_intval = 0; |
2081 | 0 | for (bp = bpf_iflist; bp != 0; bp = bp->bif_next) { |
2082 | 0 | struct bpf_d *bpf_d; |
2083 | |
|
2084 | 0 | if (bp->bif_ifp == NULL || bp->bif_ifp != ifp) { |
2085 | 0 | continue; |
2086 | 0 | } |
2087 | 0 | for (bpf_d = bp->bif_dlist; bpf_d; |
2088 | 0 | bpf_d = bpf_d->bd_next) { |
2089 | 0 | ifr.ifr_intval += 1; |
2090 | 0 | } |
2091 | 0 | } |
2092 | 0 | bcopy(&ifr, addr, sizeof(ifr)); |
2093 | 0 | break; |
2094 | 0 | } |
2095 | 0 | case BIOCGWANTPKTAP: /* u_int */ |
2096 | 0 | int_arg = d->bd_flags & BPF_WANT_PKTAP ? 1 : 0; |
2097 | 0 | bcopy(&int_arg, addr, sizeof(int_arg)); |
2098 | 0 | break; |
2099 | | |
2100 | 0 | case BIOCSWANTPKTAP: /* u_int */ |
2101 | 0 | bcopy(addr, &int_arg, sizeof(int_arg)); |
2102 | 0 | if (int_arg) { |
2103 | 0 | d->bd_flags |= BPF_WANT_PKTAP; |
2104 | 0 | } else { |
2105 | 0 | d->bd_flags &= ~BPF_WANT_PKTAP; |
2106 | 0 | } |
2107 | 0 | break; |
2108 | 0 | #endif |
2109 | | |
2110 | 0 | case BIOCSHEADDROP: |
2111 | 0 | bcopy(addr, &int_arg, sizeof(int_arg)); |
2112 | 0 | d->bd_headdrop = int_arg ? 1 : 0; |
2113 | 0 | break; |
2114 | | |
2115 | 0 | case BIOCGHEADDROP: |
2116 | 0 | bcopy(&d->bd_headdrop, addr, sizeof(int)); |
2117 | 0 | break; |
2118 | | |
2119 | 0 | case BIOCSTRUNCATE: |
2120 | 0 | bcopy(addr, &int_arg, sizeof(int_arg)); |
2121 | 0 | if (int_arg) { |
2122 | 0 | d->bd_flags |= BPF_TRUNCATE; |
2123 | 0 | } else { |
2124 | 0 | d->bd_flags &= ~BPF_TRUNCATE; |
2125 | 0 | } |
2126 | 0 | break; |
2127 | | |
2128 | 0 | case BIOCGETUUID: |
2129 | 0 | bcopy(&d->bd_uuid, addr, sizeof(uuid_t)); |
2130 | 0 | break; |
2131 | | |
2132 | 0 | case BIOCSETUP: { |
2133 | 0 | struct bpf_setup_args bsa; |
2134 | 0 | ifnet_t ifp; |
2135 | |
|
2136 | 0 | bcopy(addr, &bsa, sizeof(struct bpf_setup_args)); |
2137 | 0 | bsa.bsa_ifname[IFNAMSIZ - 1] = 0; |
2138 | 0 | ifp = ifunit(bsa.bsa_ifname); |
2139 | 0 | if (ifp == NULL) { |
2140 | 0 | error = ENXIO; |
2141 | 0 | os_log_info(OS_LOG_DEFAULT, |
2142 | 0 | "%s: ifnet not found for %s error %d", |
2143 | 0 | __func__, bsa.bsa_ifname, error); |
2144 | 0 | break; |
2145 | 0 | } |
2146 | | |
2147 | 0 | error = bpf_setup(d, bsa.bsa_uuid, ifp); |
2148 | 0 | break; |
2149 | 0 | } |
2150 | 0 | case BIOCSPKTHDRV2: |
2151 | 0 | bcopy(addr, &int_arg, sizeof(int_arg)); |
2152 | 0 | if (int_arg != 0) { |
2153 | 0 | d->bd_flags |= BPF_PKTHDRV2; |
2154 | 0 | } else { |
2155 | 0 | d->bd_flags &= ~BPF_PKTHDRV2; |
2156 | 0 | } |
2157 | 0 | break; |
2158 | | |
2159 | 0 | case BIOCGPKTHDRV2: |
2160 | 0 | int_arg = d->bd_flags & BPF_PKTHDRV2 ? 1 : 0; |
2161 | 0 | bcopy(&int_arg, addr, sizeof(int)); |
2162 | 0 | break; |
2163 | 0 | } |
2164 | | |
2165 | 0 | bpf_release_d(d); |
2166 | 0 | lck_mtx_unlock(bpf_mlock); |
2167 | |
|
2168 | 0 | return error; |
2169 | 0 | } |
2170 | | |
2171 | | /* |
2172 | | * Set d's packet filter program to fp. If this file already has a filter, |
2173 | | * free it and replace it. Returns EINVAL for bogus requests. |
2174 | | */ |
2175 | | static int |
2176 | | bpf_setf(struct bpf_d *d, u_int bf_len, user_addr_t bf_insns, |
2177 | | u_long cmd) |
2178 | 0 | { |
2179 | 0 | struct bpf_insn *fcode, *old; |
2180 | 0 | u_int flen, size; |
2181 | |
|
2182 | 0 | while (d->bd_hbuf_read != 0) { |
2183 | 0 | msleep((caddr_t)d, bpf_mlock, PRINET, "bpf_reading", NULL); |
2184 | 0 | } |
2185 | |
|
2186 | 0 | if ((d->bd_flags & BPF_CLOSING) != 0) { |
2187 | 0 | return ENXIO; |
2188 | 0 | } |
2189 | | |
2190 | 0 | old = d->bd_filter; |
2191 | 0 | if (bf_insns == USER_ADDR_NULL) { |
2192 | 0 | if (bf_len != 0) { |
2193 | 0 | return EINVAL; |
2194 | 0 | } |
2195 | 0 | d->bd_filter = NULL; |
2196 | 0 | reset_d(d); |
2197 | 0 | if (old != 0) { |
2198 | 0 | FREE(old, M_DEVBUF); |
2199 | 0 | } |
2200 | 0 | return 0; |
2201 | 0 | } |
2202 | 0 | flen = bf_len; |
2203 | 0 | if (flen > BPF_MAXINSNS) { |
2204 | 0 | return EINVAL; |
2205 | 0 | } |
2206 | | |
2207 | 0 | size = flen * sizeof(struct bpf_insn); |
2208 | 0 | fcode = (struct bpf_insn *) _MALLOC(size, M_DEVBUF, M_WAIT); |
2209 | 0 | #ifdef __APPLE__ |
2210 | 0 | if (fcode == NULL) { |
2211 | 0 | return ENOBUFS; |
2212 | 0 | } |
2213 | 0 | #endif |
2214 | 0 | if (copyin(bf_insns, (caddr_t)fcode, size) == 0 && |
2215 | 0 | bpf_validate(fcode, (int)flen)) { |
2216 | 0 | d->bd_filter = fcode; |
2217 | |
|
2218 | 0 | if (cmd == BIOCSETF32 || cmd == BIOCSETF64) { |
2219 | 0 | reset_d(d); |
2220 | 0 | } |
2221 | |
|
2222 | 0 | if (old != 0) { |
2223 | 0 | FREE(old, M_DEVBUF); |
2224 | 0 | } |
2225 | |
|
2226 | 0 | return 0; |
2227 | 0 | } |
2228 | 0 | FREE(fcode, M_DEVBUF); |
2229 | 0 | return EINVAL; |
2230 | 0 | } |
2231 | | |
2232 | | /* |
2233 | | * Detach a file from its current interface (if attached at all) and attach |
2234 | | * to the interface indicated by the name stored in ifr. |
2235 | | * Return an errno or 0. |
2236 | | */ |
2237 | | static int |
2238 | | bpf_setif(struct bpf_d *d, ifnet_t theywant, bool do_reset, bool has_hbuf_read) |
2239 | 0 | { |
2240 | 0 | struct bpf_if *bp; |
2241 | 0 | int error; |
2242 | |
|
2243 | 0 | while (d->bd_hbuf_read != 0 && !has_hbuf_read) { |
2244 | 0 | msleep((caddr_t)d, bpf_mlock, PRINET, "bpf_reading", NULL); |
2245 | 0 | } |
2246 | |
|
2247 | 0 | if ((d->bd_flags & BPF_CLOSING) != 0) { |
2248 | 0 | return ENXIO; |
2249 | 0 | } |
2250 | | |
2251 | | /* |
2252 | | * Look through attached interfaces for the named one. |
2253 | | */ |
2254 | 0 | for (bp = bpf_iflist; bp != 0; bp = bp->bif_next) { |
2255 | 0 | struct ifnet *ifp = bp->bif_ifp; |
2256 | |
|
2257 | 0 | if (ifp == 0 || ifp != theywant) { |
2258 | 0 | continue; |
2259 | 0 | } |
2260 | | /* |
2261 | | * Do not use DLT_PKTAP, unless requested explicitly |
2262 | | */ |
2263 | 0 | if (bp->bif_dlt == DLT_PKTAP && !(d->bd_flags & BPF_WANT_PKTAP)) { |
2264 | 0 | continue; |
2265 | 0 | } |
2266 | | /* |
2267 | | * Skip the coprocessor interface |
2268 | | */ |
2269 | 0 | if (!intcoproc_unrestricted && IFNET_IS_INTCOPROC(ifp)) { |
2270 | 0 | continue; |
2271 | 0 | } |
2272 | | /* |
2273 | | * We found the requested interface. |
2274 | | * Allocate the packet buffers. |
2275 | | */ |
2276 | 0 | error = bpf_allocbufs(d); |
2277 | 0 | if (error != 0) { |
2278 | 0 | return error; |
2279 | 0 | } |
2280 | | /* |
2281 | | * Detach if attached to something else. |
2282 | | */ |
2283 | 0 | if (bp != d->bd_bif) { |
2284 | 0 | if (d->bd_bif != NULL) { |
2285 | 0 | if (bpf_detachd(d, 0) != 0) { |
2286 | 0 | return ENXIO; |
2287 | 0 | } |
2288 | 0 | } |
2289 | 0 | if (bpf_attachd(d, bp) != 0) { |
2290 | 0 | return ENXIO; |
2291 | 0 | } |
2292 | 0 | } |
2293 | 0 | if (do_reset) { |
2294 | 0 | reset_d(d); |
2295 | 0 | } |
2296 | 0 | return 0; |
2297 | 0 | } |
2298 | | /* Not found. */ |
2299 | 0 | return ENXIO; |
2300 | 0 | } |
2301 | | |
2302 | | /* |
2303 | | * Get a list of available data link type of the interface. |
2304 | | */ |
2305 | | static int |
2306 | | bpf_getdltlist(struct bpf_d *d, caddr_t addr, struct proc *p) |
2307 | 0 | { |
2308 | 0 | u_int n; |
2309 | 0 | int error; |
2310 | 0 | struct ifnet *ifp; |
2311 | 0 | struct bpf_if *bp; |
2312 | 0 | user_addr_t dlist; |
2313 | 0 | struct bpf_dltlist bfl; |
2314 | |
|
2315 | 0 | bcopy(addr, &bfl, sizeof(bfl)); |
2316 | 0 | if (proc_is64bit(p)) { |
2317 | 0 | dlist = (user_addr_t)bfl.bfl_u.bflu_pad; |
2318 | 0 | } else { |
2319 | 0 | dlist = CAST_USER_ADDR_T(bfl.bfl_u.bflu_list); |
2320 | 0 | } |
2321 | |
|
2322 | 0 | ifp = d->bd_bif->bif_ifp; |
2323 | 0 | n = 0; |
2324 | 0 | error = 0; |
2325 | |
|
2326 | 0 | for (bp = bpf_iflist; bp; bp = bp->bif_next) { |
2327 | 0 | if (bp->bif_ifp != ifp) { |
2328 | 0 | continue; |
2329 | 0 | } |
2330 | | /* |
2331 | | * Do not use DLT_PKTAP, unless requested explicitly |
2332 | | */ |
2333 | 0 | if (bp->bif_dlt == DLT_PKTAP && !(d->bd_flags & BPF_WANT_PKTAP)) { |
2334 | 0 | continue; |
2335 | 0 | } |
2336 | 0 | if (dlist != USER_ADDR_NULL) { |
2337 | 0 | if (n >= bfl.bfl_len) { |
2338 | 0 | return ENOMEM; |
2339 | 0 | } |
2340 | 0 | error = copyout(&bp->bif_dlt, dlist, |
2341 | 0 | sizeof(bp->bif_dlt)); |
2342 | 0 | if (error != 0) { |
2343 | 0 | break; |
2344 | 0 | } |
2345 | 0 | dlist += sizeof(bp->bif_dlt); |
2346 | 0 | } |
2347 | 0 | n++; |
2348 | 0 | } |
2349 | 0 | bfl.bfl_len = n; |
2350 | 0 | bcopy(&bfl, addr, sizeof(bfl)); |
2351 | |
|
2352 | 0 | return error; |
2353 | 0 | } |
2354 | | |
2355 | | /* |
2356 | | * Set the data link type of a BPF instance. |
2357 | | */ |
2358 | | static int |
2359 | | bpf_setdlt(struct bpf_d *d, uint32_t dlt) |
2360 | 0 | { |
2361 | 0 | int error, opromisc; |
2362 | 0 | struct ifnet *ifp; |
2363 | 0 | struct bpf_if *bp; |
2364 | |
|
2365 | 0 | if (d->bd_bif->bif_dlt == dlt) { |
2366 | 0 | return 0; |
2367 | 0 | } |
2368 | | |
2369 | 0 | while (d->bd_hbuf_read != 0) { |
2370 | 0 | msleep((caddr_t)d, bpf_mlock, PRINET, "bpf_reading", NULL); |
2371 | 0 | } |
2372 | |
|
2373 | 0 | if ((d->bd_flags & BPF_CLOSING) != 0) { |
2374 | 0 | return ENXIO; |
2375 | 0 | } |
2376 | | |
2377 | 0 | ifp = d->bd_bif->bif_ifp; |
2378 | 0 | for (bp = bpf_iflist; bp; bp = bp->bif_next) { |
2379 | 0 | if (bp->bif_ifp == ifp && bp->bif_dlt == dlt) { |
2380 | | /* |
2381 | | * Do not use DLT_PKTAP, unless requested explicitly |
2382 | | */ |
2383 | 0 | if (bp->bif_dlt == DLT_PKTAP && |
2384 | 0 | !(d->bd_flags & BPF_WANT_PKTAP)) { |
2385 | 0 | continue; |
2386 | 0 | } |
2387 | 0 | break; |
2388 | 0 | } |
2389 | 0 | } |
2390 | 0 | if (bp != NULL) { |
2391 | 0 | opromisc = d->bd_promisc; |
2392 | 0 | if (bpf_detachd(d, 0) != 0) { |
2393 | 0 | return ENXIO; |
2394 | 0 | } |
2395 | 0 | error = bpf_attachd(d, bp); |
2396 | 0 | if (error) { |
2397 | 0 | printf("bpf_setdlt: bpf_attachd %s%d failed (%d)\n", |
2398 | 0 | ifnet_name(bp->bif_ifp), ifnet_unit(bp->bif_ifp), |
2399 | 0 | error); |
2400 | 0 | return error; |
2401 | 0 | } |
2402 | 0 | reset_d(d); |
2403 | 0 | if (opromisc) { |
2404 | 0 | lck_mtx_unlock(bpf_mlock); |
2405 | 0 | error = ifnet_set_promiscuous(bp->bif_ifp, 1); |
2406 | 0 | lck_mtx_lock(bpf_mlock); |
2407 | 0 | if (error) { |
2408 | 0 | printf("%s: ifpromisc %s%d failed (%d)\n", |
2409 | 0 | __func__, ifnet_name(bp->bif_ifp), |
2410 | 0 | ifnet_unit(bp->bif_ifp), error); |
2411 | 0 | } else { |
2412 | 0 | d->bd_promisc = 1; |
2413 | 0 | } |
2414 | 0 | } |
2415 | 0 | } |
2416 | 0 | return bp == NULL ? EINVAL : 0; |
2417 | 0 | } |
2418 | | |
2419 | | static int |
2420 | | bpf_set_traffic_class(struct bpf_d *d, int tc) |
2421 | 0 | { |
2422 | 0 | int error = 0; |
2423 | |
|
2424 | 0 | if (!SO_VALID_TC(tc)) { |
2425 | 0 | error = EINVAL; |
2426 | 0 | } else { |
2427 | 0 | d->bd_traffic_class = tc; |
2428 | 0 | } |
2429 | |
|
2430 | 0 | return error; |
2431 | 0 | } |
2432 | | |
2433 | | static void |
2434 | | bpf_set_packet_service_class(struct mbuf *m, int tc) |
2435 | 0 | { |
2436 | 0 | if (!(m->m_flags & M_PKTHDR)) { |
2437 | 0 | return; |
2438 | 0 | } |
2439 | | |
2440 | 0 | VERIFY(SO_VALID_TC(tc)); |
2441 | 0 | (void) m_set_service_class(m, so_tc2msc(tc)); |
2442 | 0 | } |
2443 | | |
2444 | | /* |
2445 | | * Support for select() |
2446 | | * |
2447 | | * Return true iff the specific operation will not block indefinitely. |
2448 | | * Otherwise, return false but make a note that a selwakeup() must be done. |
2449 | | */ |
2450 | | int |
2451 | | bpfselect(dev_t dev, int which, void * wql, struct proc *p) |
2452 | 0 | { |
2453 | 0 | struct bpf_d *d; |
2454 | 0 | int ret = 0; |
2455 | |
|
2456 | 0 | lck_mtx_lock(bpf_mlock); |
2457 | |
|
2458 | 0 | d = bpf_dtab[minor(dev)]; |
2459 | 0 | if (d == NULL || d == BPF_DEV_RESERVED || |
2460 | 0 | (d->bd_flags & BPF_CLOSING) != 0) { |
2461 | 0 | lck_mtx_unlock(bpf_mlock); |
2462 | 0 | return ENXIO; |
2463 | 0 | } |
2464 | | |
2465 | 0 | bpf_acquire_d(d); |
2466 | |
|
2467 | 0 | if (d->bd_bif == NULL) { |
2468 | 0 | bpf_release_d(d); |
2469 | 0 | lck_mtx_unlock(bpf_mlock); |
2470 | 0 | return ENXIO; |
2471 | 0 | } |
2472 | | |
2473 | 0 | while (d->bd_hbuf_read != 0) { |
2474 | 0 | msleep((caddr_t)d, bpf_mlock, PRINET, "bpf_reading", NULL); |
2475 | 0 | } |
2476 | |
|
2477 | 0 | if ((d->bd_flags & BPF_CLOSING) != 0) { |
2478 | 0 | bpf_release_d(d); |
2479 | 0 | lck_mtx_unlock(bpf_mlock); |
2480 | 0 | return ENXIO; |
2481 | 0 | } |
2482 | | |
2483 | 0 | switch (which) { |
2484 | 0 | case FREAD: |
2485 | 0 | if (d->bd_hlen != 0 || |
2486 | 0 | ((d->bd_immediate || |
2487 | 0 | d->bd_state == BPF_TIMED_OUT) && d->bd_slen != 0)) { |
2488 | 0 | ret = 1; /* read has data to return */ |
2489 | 0 | } else { |
2490 | | /* |
2491 | | * Read has no data to return. |
2492 | | * Make the select wait, and start a timer if |
2493 | | * necessary. |
2494 | | */ |
2495 | 0 | selrecord(p, &d->bd_sel, wql); |
2496 | 0 | bpf_start_timer(d); |
2497 | 0 | } |
2498 | 0 | break; |
2499 | | |
2500 | 0 | case FWRITE: |
2501 | | /* can't determine whether a write would block */ |
2502 | 0 | ret = 1; |
2503 | 0 | break; |
2504 | 0 | } |
2505 | | |
2506 | 0 | bpf_release_d(d); |
2507 | 0 | lck_mtx_unlock(bpf_mlock); |
2508 | |
|
2509 | 0 | return ret; |
2510 | 0 | } |
2511 | | |
2512 | | /* |
2513 | | * Support for kevent() system call. Register EVFILT_READ filters and |
2514 | | * reject all others. |
2515 | | */ |
2516 | | int bpfkqfilter(dev_t dev, struct knote *kn); |
2517 | | static void filt_bpfdetach(struct knote *); |
2518 | | static int filt_bpfread(struct knote *, long); |
2519 | | static int filt_bpftouch(struct knote *kn, struct kevent_qos_s *kev); |
2520 | | static int filt_bpfprocess(struct knote *kn, struct kevent_qos_s *kev); |
2521 | | |
2522 | | SECURITY_READ_ONLY_EARLY(struct filterops) bpfread_filtops = { |
2523 | | .f_isfd = 1, |
2524 | | .f_detach = filt_bpfdetach, |
2525 | | .f_event = filt_bpfread, |
2526 | | .f_touch = filt_bpftouch, |
2527 | | .f_process = filt_bpfprocess, |
2528 | | }; |
2529 | | |
2530 | | static int |
2531 | | filt_bpfread_common(struct knote *kn, struct kevent_qos_s *kev, struct bpf_d *d) |
2532 | 0 | { |
2533 | 0 | int ready = 0; |
2534 | 0 | int64_t data = 0; |
2535 | |
|
2536 | 0 | if (d->bd_immediate) { |
2537 | | /* |
2538 | | * If there's data in the hold buffer, it's the |
2539 | | * amount of data a read will return. |
2540 | | * |
2541 | | * If there's no data in the hold buffer, but |
2542 | | * there's data in the store buffer, a read will |
2543 | | * immediately rotate the store buffer to the |
2544 | | * hold buffer, the amount of data in the store |
2545 | | * buffer is the amount of data a read will |
2546 | | * return. |
2547 | | * |
2548 | | * If there's no data in either buffer, we're not |
2549 | | * ready to read. |
2550 | | */ |
2551 | 0 | data = (d->bd_hlen == 0 || d->bd_hbuf_read != 0 ? |
2552 | 0 | d->bd_slen : d->bd_hlen); |
2553 | 0 | int64_t lowwat = knote_low_watermark(kn); |
2554 | 0 | if (lowwat > d->bd_bufsize) { |
2555 | 0 | lowwat = d->bd_bufsize; |
2556 | 0 | } |
2557 | 0 | ready = (data >= lowwat); |
2558 | 0 | } else { |
2559 | | /* |
2560 | | * If there's data in the hold buffer, it's the |
2561 | | * amount of data a read will return. |
2562 | | * |
2563 | | * If there's no data in the hold buffer, but |
2564 | | * there's data in the store buffer, if the |
2565 | | * timer has expired a read will immediately |
2566 | | * rotate the store buffer to the hold buffer, |
2567 | | * so the amount of data in the store buffer is |
2568 | | * the amount of data a read will return. |
2569 | | * |
2570 | | * If there's no data in either buffer, or there's |
2571 | | * no data in the hold buffer and the timer hasn't |
2572 | | * expired, we're not ready to read. |
2573 | | */ |
2574 | 0 | data = ((d->bd_hlen == 0 || d->bd_hbuf_read != 0) && |
2575 | 0 | d->bd_state == BPF_TIMED_OUT ? d->bd_slen : d->bd_hlen); |
2576 | 0 | ready = (data > 0); |
2577 | 0 | } |
2578 | 0 | if (!ready) { |
2579 | 0 | bpf_start_timer(d); |
2580 | 0 | } else if (kev) { |
2581 | 0 | knote_fill_kevent(kn, kev, data); |
2582 | 0 | } |
2583 | |
|
2584 | 0 | return ready; |
2585 | 0 | } |
2586 | | |
2587 | | int |
2588 | | bpfkqfilter(dev_t dev, struct knote *kn) |
2589 | 0 | { |
2590 | 0 | struct bpf_d *d; |
2591 | 0 | int res; |
2592 | | |
2593 | | /* |
2594 | | * Is this device a bpf? |
2595 | | */ |
2596 | 0 | if (major(dev) != CDEV_MAJOR || kn->kn_filter != EVFILT_READ) { |
2597 | 0 | knote_set_error(kn, EINVAL); |
2598 | 0 | return 0; |
2599 | 0 | } |
2600 | | |
2601 | 0 | lck_mtx_lock(bpf_mlock); |
2602 | |
|
2603 | 0 | d = bpf_dtab[minor(dev)]; |
2604 | |
|
2605 | 0 | if (d == NULL || d == BPF_DEV_RESERVED || |
2606 | 0 | (d->bd_flags & BPF_CLOSING) != 0 || |
2607 | 0 | d->bd_bif == NULL) { |
2608 | 0 | lck_mtx_unlock(bpf_mlock); |
2609 | 0 | knote_set_error(kn, ENXIO); |
2610 | 0 | return 0; |
2611 | 0 | } |
2612 | | |
2613 | 0 | kn->kn_hook = d; |
2614 | 0 | kn->kn_filtid = EVFILTID_BPFREAD; |
2615 | 0 | KNOTE_ATTACH(&d->bd_sel.si_note, kn); |
2616 | 0 | d->bd_flags |= BPF_KNOTE; |
2617 | | |
2618 | | /* capture the current state */ |
2619 | 0 | res = filt_bpfread_common(kn, NULL, d); |
2620 | |
|
2621 | 0 | lck_mtx_unlock(bpf_mlock); |
2622 | |
|
2623 | 0 | return res; |
2624 | 0 | } |
2625 | | |
2626 | | static void |
2627 | | filt_bpfdetach(struct knote *kn) |
2628 | 0 | { |
2629 | 0 | struct bpf_d *d = (struct bpf_d *)kn->kn_hook; |
2630 | |
|
2631 | 0 | lck_mtx_lock(bpf_mlock); |
2632 | 0 | if (d->bd_flags & BPF_KNOTE) { |
2633 | 0 | KNOTE_DETACH(&d->bd_sel.si_note, kn); |
2634 | 0 | d->bd_flags &= ~BPF_KNOTE; |
2635 | 0 | } |
2636 | 0 | lck_mtx_unlock(bpf_mlock); |
2637 | 0 | } |
2638 | | |
2639 | | static int |
2640 | | filt_bpfread(struct knote *kn, long hint) |
2641 | 0 | { |
2642 | 0 | #pragma unused(hint) |
2643 | 0 | struct bpf_d *d = (struct bpf_d *)kn->kn_hook; |
2644 | |
|
2645 | 0 | return filt_bpfread_common(kn, NULL, d); |
2646 | 0 | } |
2647 | | |
2648 | | static int |
2649 | | filt_bpftouch(struct knote *kn, struct kevent_qos_s *kev) |
2650 | 0 | { |
2651 | 0 | struct bpf_d *d = (struct bpf_d *)kn->kn_hook; |
2652 | 0 | int res; |
2653 | |
|
2654 | 0 | lck_mtx_lock(bpf_mlock); |
2655 | | |
2656 | | /* save off the lowat threshold and flag */ |
2657 | 0 | kn->kn_sdata = kev->data; |
2658 | 0 | kn->kn_sfflags = kev->fflags; |
2659 | | |
2660 | | /* output data will be re-generated here */ |
2661 | 0 | res = filt_bpfread_common(kn, NULL, d); |
2662 | |
|
2663 | 0 | lck_mtx_unlock(bpf_mlock); |
2664 | |
|
2665 | 0 | return res; |
2666 | 0 | } |
2667 | | |
2668 | | static int |
2669 | | filt_bpfprocess(struct knote *kn, struct kevent_qos_s *kev) |
2670 | 0 | { |
2671 | 0 | struct bpf_d *d = (struct bpf_d *)kn->kn_hook; |
2672 | 0 | int res; |
2673 | |
|
2674 | 0 | lck_mtx_lock(bpf_mlock); |
2675 | 0 | res = filt_bpfread_common(kn, kev, d); |
2676 | 0 | lck_mtx_unlock(bpf_mlock); |
2677 | |
|
2678 | 0 | return res; |
2679 | 0 | } |
2680 | | |
2681 | | /* |
2682 | | * Copy data from an mbuf chain into a buffer. This code is derived |
2683 | | * from m_copydata in kern/uipc_mbuf.c. |
2684 | | */ |
2685 | | static void |
2686 | | bpf_mcopy(struct mbuf * m, void *dst_arg, size_t len) |
2687 | 0 | { |
2688 | 0 | u_int count; |
2689 | 0 | u_char *dst; |
2690 | |
|
2691 | 0 | dst = dst_arg; |
2692 | 0 | while (len > 0) { |
2693 | 0 | if (m == 0) { |
2694 | 0 | panic("bpf_mcopy"); |
2695 | 0 | } |
2696 | 0 | count = min(m->m_len, len); |
2697 | 0 | bcopy(mbuf_data(m), dst, count); |
2698 | 0 | m = m->m_next; |
2699 | 0 | dst += count; |
2700 | 0 | len -= count; |
2701 | 0 | } |
2702 | 0 | } |
2703 | | |
2704 | | static inline void |
2705 | | bpf_tap_imp( |
2706 | | ifnet_t ifp, |
2707 | | u_int32_t dlt, |
2708 | | struct bpf_packet *bpf_pkt, |
2709 | | int outbound) |
2710 | 0 | { |
2711 | 0 | struct bpf_d *d; |
2712 | 0 | u_int slen; |
2713 | 0 | struct bpf_if *bp; |
2714 | | |
2715 | | /* |
2716 | | * It's possible that we get here after the bpf descriptor has been |
2717 | | * detached from the interface; in such a case we simply return. |
2718 | | * Lock ordering is important since we can be called asynchronously |
2719 | | * (from IOKit) to process an inbound packet; when that happens |
2720 | | * we would have been holding its "gateLock" and will be acquiring |
2721 | | * "bpf_mlock" upon entering this routine. Due to that, we release |
2722 | | * "bpf_mlock" prior to calling ifnet_set_promiscuous (which will |
2723 | | * acquire "gateLock" in the IOKit), in order to avoid a deadlock |
2724 | | * when a ifnet_set_promiscuous request simultaneously collides with |
2725 | | * an inbound packet being passed into the tap callback. |
2726 | | */ |
2727 | 0 | lck_mtx_lock(bpf_mlock); |
2728 | 0 | if (ifp->if_bpf == NULL) { |
2729 | 0 | lck_mtx_unlock(bpf_mlock); |
2730 | 0 | return; |
2731 | 0 | } |
2732 | 0 | for (bp = ifp->if_bpf; bp != NULL; bp = bp->bif_next) { |
2733 | 0 | if (bp->bif_ifp != ifp) { |
2734 | | /* wrong interface */ |
2735 | 0 | bp = NULL; |
2736 | 0 | break; |
2737 | 0 | } |
2738 | 0 | if (dlt == 0 || bp->bif_dlt == dlt) { |
2739 | | /* tapping default DLT or DLT matches */ |
2740 | 0 | break; |
2741 | 0 | } |
2742 | 0 | } |
2743 | 0 | if (bp == NULL) { |
2744 | 0 | goto done; |
2745 | 0 | } |
2746 | 0 | for (d = bp->bif_dlist; d; d = d->bd_next) { |
2747 | 0 | struct bpf_packet *bpf_pkt_saved = bpf_pkt; |
2748 | 0 | struct bpf_packet bpf_pkt_tmp; |
2749 | 0 | struct pktap_header_buffer bpfp_header_tmp; |
2750 | |
|
2751 | 0 | if (outbound && !d->bd_seesent) { |
2752 | 0 | continue; |
2753 | 0 | } |
2754 | | |
2755 | 0 | ++d->bd_rcount; |
2756 | 0 | slen = bpf_filter(d->bd_filter, (u_char *)bpf_pkt, |
2757 | 0 | bpf_pkt->bpfp_total_length, 0); |
2758 | 0 | if (bp->bif_ifp->if_type == IFT_PKTAP && |
2759 | 0 | bp->bif_dlt == DLT_PKTAP) { |
2760 | | /* |
2761 | | * Need to copy the bpf_pkt because the conversion |
2762 | | * to v2 pktap header modifies the content of the |
2763 | | * bpfp_header |
2764 | | */ |
2765 | 0 | if ((d->bd_flags & BPF_PKTHDRV2) && |
2766 | 0 | bpf_pkt->bpfp_header_length <= sizeof(bpfp_header_tmp)) { |
2767 | 0 | bpf_pkt_tmp = *bpf_pkt; |
2768 | |
|
2769 | 0 | bpf_pkt = &bpf_pkt_tmp; |
2770 | |
|
2771 | 0 | memcpy(&bpfp_header_tmp, bpf_pkt->bpfp_header, |
2772 | 0 | bpf_pkt->bpfp_header_length); |
2773 | |
|
2774 | 0 | bpf_pkt->bpfp_header = &bpfp_header_tmp; |
2775 | |
|
2776 | 0 | convert_to_pktap_header_to_v2(bpf_pkt, |
2777 | 0 | !!(d->bd_flags & BPF_TRUNCATE)); |
2778 | 0 | } |
2779 | |
|
2780 | 0 | if (d->bd_flags & BPF_TRUNCATE) { |
2781 | 0 | slen = min(slen, |
2782 | 0 | get_pkt_trunc_len((u_char *)bpf_pkt, |
2783 | 0 | bpf_pkt->bpfp_total_length)); |
2784 | 0 | } |
2785 | 0 | } |
2786 | 0 | if (slen != 0) { |
2787 | 0 | catchpacket(d, bpf_pkt, slen, outbound); |
2788 | 0 | } |
2789 | 0 | bpf_pkt = bpf_pkt_saved; |
2790 | 0 | } |
2791 | |
|
2792 | 0 | done: |
2793 | 0 | lck_mtx_unlock(bpf_mlock); |
2794 | 0 | } |
2795 | | |
2796 | | static inline void |
2797 | | bpf_tap_mbuf( |
2798 | | ifnet_t ifp, |
2799 | | u_int32_t dlt, |
2800 | | mbuf_t m, |
2801 | | void* hdr, |
2802 | | size_t hlen, |
2803 | | int outbound) |
2804 | 0 | { |
2805 | 0 | struct bpf_packet bpf_pkt; |
2806 | 0 | struct mbuf *m0; |
2807 | |
|
2808 | 0 | if (ifp->if_bpf == NULL) { |
2809 | | /* quickly check without taking lock */ |
2810 | 0 | return; |
2811 | 0 | } |
2812 | 0 | bpf_pkt.bpfp_type = BPF_PACKET_TYPE_MBUF; |
2813 | 0 | bpf_pkt.bpfp_mbuf = m; |
2814 | 0 | bpf_pkt.bpfp_total_length = 0; |
2815 | 0 | for (m0 = m; m0 != NULL; m0 = m0->m_next) { |
2816 | 0 | bpf_pkt.bpfp_total_length += m0->m_len; |
2817 | 0 | } |
2818 | 0 | bpf_pkt.bpfp_header = hdr; |
2819 | 0 | if (hdr != NULL) { |
2820 | 0 | bpf_pkt.bpfp_total_length += hlen; |
2821 | 0 | bpf_pkt.bpfp_header_length = hlen; |
2822 | 0 | } else { |
2823 | 0 | bpf_pkt.bpfp_header_length = 0; |
2824 | 0 | } |
2825 | 0 | bpf_tap_imp(ifp, dlt, &bpf_pkt, outbound); |
2826 | 0 | } |
2827 | | |
2828 | | void |
2829 | | bpf_tap_out( |
2830 | | ifnet_t ifp, |
2831 | | u_int32_t dlt, |
2832 | | mbuf_t m, |
2833 | | void* hdr, |
2834 | | size_t hlen) |
2835 | 0 | { |
2836 | 0 | bpf_tap_mbuf(ifp, dlt, m, hdr, hlen, 1); |
2837 | 0 | } |
2838 | | |
2839 | | void |
2840 | | bpf_tap_in( |
2841 | | ifnet_t ifp, |
2842 | | u_int32_t dlt, |
2843 | | mbuf_t m, |
2844 | | void* hdr, |
2845 | | size_t hlen) |
2846 | 0 | { |
2847 | 0 | bpf_tap_mbuf(ifp, dlt, m, hdr, hlen, 0); |
2848 | 0 | } |
2849 | | |
2850 | | /* Callback registered with Ethernet driver. */ |
2851 | | static int |
2852 | | bpf_tap_callback(struct ifnet *ifp, struct mbuf *m) |
2853 | 0 | { |
2854 | 0 | bpf_tap_mbuf(ifp, 0, m, NULL, 0, mbuf_pkthdr_rcvif(m) == NULL); |
2855 | |
|
2856 | 0 | return 0; |
2857 | 0 | } |
2858 | | |
2859 | | |
2860 | | static errno_t |
2861 | | bpf_copydata(struct bpf_packet *pkt, size_t off, size_t len, void* out_data) |
2862 | 0 | { |
2863 | 0 | errno_t err = 0; |
2864 | 0 | if (pkt->bpfp_type == BPF_PACKET_TYPE_MBUF) { |
2865 | 0 | err = mbuf_copydata(pkt->bpfp_mbuf, off, len, out_data); |
2866 | 0 | } else { |
2867 | 0 | err = EINVAL; |
2868 | 0 | } |
2869 | |
|
2870 | 0 | return err; |
2871 | 0 | } |
2872 | | |
2873 | | static void |
2874 | | copy_bpf_packet(struct bpf_packet * pkt, void * dst, size_t len) |
2875 | 0 | { |
2876 | | /* copy the optional header */ |
2877 | 0 | if (pkt->bpfp_header_length != 0) { |
2878 | 0 | size_t count = min(len, pkt->bpfp_header_length); |
2879 | 0 | bcopy(pkt->bpfp_header, dst, count); |
2880 | 0 | len -= count; |
2881 | 0 | dst += count; |
2882 | 0 | } |
2883 | 0 | if (len == 0) { |
2884 | | /* nothing past the header */ |
2885 | 0 | return; |
2886 | 0 | } |
2887 | | /* copy the packet */ |
2888 | 0 | switch (pkt->bpfp_type) { |
2889 | 0 | case BPF_PACKET_TYPE_MBUF: |
2890 | 0 | bpf_mcopy(pkt->bpfp_mbuf, dst, len); |
2891 | 0 | break; |
2892 | 0 | default: |
2893 | 0 | break; |
2894 | 0 | } |
2895 | 0 | } |
2896 | | |
2897 | | static uint16_t |
2898 | | get_esp_trunc_len(__unused struct bpf_packet *pkt, __unused uint16_t off, |
2899 | | const uint16_t remaining_caplen) |
2900 | 0 | { |
2901 | | /* |
2902 | | * For some reason tcpdump expects to have one byte beyond the ESP header |
2903 | | */ |
2904 | 0 | uint16_t trunc_len = ESP_HDR_SIZE + 1; |
2905 | |
|
2906 | 0 | if (trunc_len > remaining_caplen) { |
2907 | 0 | return remaining_caplen; |
2908 | 0 | } |
2909 | | |
2910 | 0 | return trunc_len; |
2911 | 0 | } |
2912 | | |
2913 | | static uint16_t |
2914 | | get_isakmp_trunc_len(__unused struct bpf_packet *pkt, __unused uint16_t off, |
2915 | | const uint16_t remaining_caplen) |
2916 | 0 | { |
2917 | | /* |
2918 | | * Include the payload generic header |
2919 | | */ |
2920 | 0 | uint16_t trunc_len = ISAKMP_HDR_SIZE; |
2921 | |
|
2922 | 0 | if (trunc_len > remaining_caplen) { |
2923 | 0 | return remaining_caplen; |
2924 | 0 | } |
2925 | | |
2926 | 0 | return trunc_len; |
2927 | 0 | } |
2928 | | |
2929 | | static uint16_t |
2930 | | get_isakmp_natt_trunc_len(struct bpf_packet *pkt, uint16_t off, |
2931 | | const uint16_t remaining_caplen) |
2932 | 0 | { |
2933 | 0 | int err = 0; |
2934 | 0 | uint16_t trunc_len = 0; |
2935 | 0 | char payload[remaining_caplen]; |
2936 | |
|
2937 | 0 | err = bpf_copydata(pkt, off, remaining_caplen, payload); |
2938 | 0 | if (err != 0) { |
2939 | 0 | return remaining_caplen; |
2940 | 0 | } |
2941 | | /* |
2942 | | * They are three cases: |
2943 | | * - IKE: payload start with 4 bytes header set to zero before ISAKMP header |
2944 | | * - keep alive: 1 byte payload |
2945 | | * - otherwise it's ESP |
2946 | | */ |
2947 | 0 | if (remaining_caplen >= 4 && |
2948 | 0 | payload[0] == 0 && payload[1] == 0 && |
2949 | 0 | payload[2] == 0 && payload[3] == 0) { |
2950 | 0 | trunc_len = 4 + get_isakmp_trunc_len(pkt, off + 4, remaining_caplen - 4); |
2951 | 0 | } else if (remaining_caplen == 1) { |
2952 | 0 | trunc_len = 1; |
2953 | 0 | } else { |
2954 | 0 | trunc_len = get_esp_trunc_len(pkt, off, remaining_caplen); |
2955 | 0 | } |
2956 | |
|
2957 | 0 | if (trunc_len > remaining_caplen) { |
2958 | 0 | return remaining_caplen; |
2959 | 0 | } |
2960 | | |
2961 | 0 | return trunc_len; |
2962 | 0 | } |
2963 | | |
2964 | | static uint16_t |
2965 | | get_udp_trunc_len(struct bpf_packet *pkt, uint16_t off, const uint16_t remaining_caplen) |
2966 | 0 | { |
2967 | 0 | int err = 0; |
2968 | 0 | uint16_t trunc_len = sizeof(struct udphdr); /* By default no UDP payload */ |
2969 | |
|
2970 | 0 | if (trunc_len >= remaining_caplen) { |
2971 | 0 | return remaining_caplen; |
2972 | 0 | } |
2973 | | |
2974 | 0 | struct udphdr udphdr; |
2975 | 0 | err = bpf_copydata(pkt, off, sizeof(struct udphdr), &udphdr); |
2976 | 0 | if (err != 0) { |
2977 | 0 | return remaining_caplen; |
2978 | 0 | } |
2979 | | |
2980 | 0 | u_short sport, dport; |
2981 | |
|
2982 | 0 | sport = EXTRACT_SHORT(&udphdr.uh_sport); |
2983 | 0 | dport = EXTRACT_SHORT(&udphdr.uh_dport); |
2984 | |
|
2985 | 0 | if (dport == PORT_DNS || sport == PORT_DNS) { |
2986 | | /* |
2987 | | * Full UDP payload for DNS |
2988 | | */ |
2989 | 0 | trunc_len = remaining_caplen; |
2990 | 0 | } else if ((sport == PORT_BOOTPS && dport == PORT_BOOTPC) || |
2991 | 0 | (sport == PORT_BOOTPC && dport == PORT_BOOTPS)) { |
2992 | | /* |
2993 | | * Full UDP payload for BOOTP and DHCP |
2994 | | */ |
2995 | 0 | trunc_len = remaining_caplen; |
2996 | 0 | } else if (dport == PORT_ISAKMP && sport == PORT_ISAKMP) { |
2997 | | /* |
2998 | | * Return the ISAKMP header |
2999 | | */ |
3000 | 0 | trunc_len += get_isakmp_trunc_len(pkt, off + sizeof(struct udphdr), |
3001 | 0 | remaining_caplen - sizeof(struct udphdr)); |
3002 | 0 | } else if (dport == PORT_ISAKMP_NATT && sport == PORT_ISAKMP_NATT) { |
3003 | 0 | trunc_len += get_isakmp_natt_trunc_len(pkt, off + sizeof(struct udphdr), |
3004 | 0 | remaining_caplen - sizeof(struct udphdr)); |
3005 | 0 | } |
3006 | 0 | if (trunc_len >= remaining_caplen) { |
3007 | 0 | return remaining_caplen; |
3008 | 0 | } |
3009 | | |
3010 | 0 | return trunc_len; |
3011 | 0 | } |
3012 | | |
3013 | | static uint16_t |
3014 | | get_tcp_trunc_len(struct bpf_packet *pkt, uint16_t off, const uint16_t remaining_caplen) |
3015 | 0 | { |
3016 | 0 | int err = 0; |
3017 | 0 | uint16_t trunc_len = sizeof(struct tcphdr); /* By default no TCP payload */ |
3018 | 0 | if (trunc_len >= remaining_caplen) { |
3019 | 0 | return remaining_caplen; |
3020 | 0 | } |
3021 | | |
3022 | 0 | struct tcphdr tcphdr; |
3023 | 0 | err = bpf_copydata(pkt, off, sizeof(struct tcphdr), &tcphdr); |
3024 | 0 | if (err != 0) { |
3025 | 0 | return remaining_caplen; |
3026 | 0 | } |
3027 | | |
3028 | 0 | u_short sport, dport; |
3029 | 0 | sport = EXTRACT_SHORT(&tcphdr.th_sport); |
3030 | 0 | dport = EXTRACT_SHORT(&tcphdr.th_dport); |
3031 | |
|
3032 | 0 | if (dport == PORT_DNS || sport == PORT_DNS) { |
3033 | | /* |
3034 | | * Full TCP payload for DNS |
3035 | | */ |
3036 | 0 | trunc_len = remaining_caplen; |
3037 | 0 | } else { |
3038 | 0 | trunc_len = tcphdr.th_off << 2; |
3039 | 0 | } |
3040 | 0 | if (trunc_len >= remaining_caplen) { |
3041 | 0 | return remaining_caplen; |
3042 | 0 | } |
3043 | | |
3044 | 0 | return trunc_len; |
3045 | 0 | } |
3046 | | |
3047 | | static uint16_t |
3048 | | get_proto_trunc_len(uint8_t proto, struct bpf_packet *pkt, uint16_t off, const uint16_t remaining_caplen) |
3049 | 0 | { |
3050 | 0 | uint16_t trunc_len; |
3051 | |
|
3052 | 0 | switch (proto) { |
3053 | 0 | case IPPROTO_ICMP: { |
3054 | | /* |
3055 | | * Full IMCP payload |
3056 | | */ |
3057 | 0 | trunc_len = remaining_caplen; |
3058 | 0 | break; |
3059 | 0 | } |
3060 | 0 | case IPPROTO_ICMPV6: { |
3061 | | /* |
3062 | | * Full IMCPV6 payload |
3063 | | */ |
3064 | 0 | trunc_len = remaining_caplen; |
3065 | 0 | break; |
3066 | 0 | } |
3067 | 0 | case IPPROTO_IGMP: { |
3068 | | /* |
3069 | | * Full IGMP payload |
3070 | | */ |
3071 | 0 | trunc_len = remaining_caplen; |
3072 | 0 | break; |
3073 | 0 | } |
3074 | 0 | case IPPROTO_UDP: { |
3075 | 0 | trunc_len = get_udp_trunc_len(pkt, off, remaining_caplen); |
3076 | 0 | break; |
3077 | 0 | } |
3078 | 0 | case IPPROTO_TCP: { |
3079 | 0 | trunc_len = get_tcp_trunc_len(pkt, off, remaining_caplen); |
3080 | 0 | break; |
3081 | 0 | } |
3082 | 0 | case IPPROTO_ESP: { |
3083 | 0 | trunc_len = get_esp_trunc_len(pkt, off, remaining_caplen); |
3084 | 0 | break; |
3085 | 0 | } |
3086 | 0 | default: { |
3087 | | /* |
3088 | | * By default we only include the IP header |
3089 | | */ |
3090 | 0 | trunc_len = 0; |
3091 | 0 | break; |
3092 | 0 | } |
3093 | 0 | } |
3094 | 0 | if (trunc_len >= remaining_caplen) { |
3095 | 0 | return remaining_caplen; |
3096 | 0 | } |
3097 | | |
3098 | 0 | return trunc_len; |
3099 | 0 | } |
3100 | | |
3101 | | static uint16_t |
3102 | | get_ip_trunc_len(struct bpf_packet *pkt, uint16_t off, const uint16_t remaining_caplen) |
3103 | 0 | { |
3104 | 0 | int err = 0; |
3105 | 0 | uint16_t iplen = sizeof(struct ip); |
3106 | 0 | if (iplen >= remaining_caplen) { |
3107 | 0 | return remaining_caplen; |
3108 | 0 | } |
3109 | | |
3110 | 0 | struct ip iphdr; |
3111 | 0 | err = bpf_copydata(pkt, off, sizeof(struct ip), &iphdr); |
3112 | 0 | if (err != 0) { |
3113 | 0 | return remaining_caplen; |
3114 | 0 | } |
3115 | | |
3116 | 0 | uint8_t proto = 0; |
3117 | |
|
3118 | 0 | iplen = iphdr.ip_hl << 2; |
3119 | 0 | if (iplen >= remaining_caplen) { |
3120 | 0 | return remaining_caplen; |
3121 | 0 | } |
3122 | | |
3123 | 0 | proto = iphdr.ip_p; |
3124 | 0 | iplen += get_proto_trunc_len(proto, pkt, off + iplen, remaining_caplen - iplen); |
3125 | |
|
3126 | 0 | if (iplen >= remaining_caplen) { |
3127 | 0 | return remaining_caplen; |
3128 | 0 | } |
3129 | | |
3130 | 0 | return iplen; |
3131 | 0 | } |
3132 | | |
3133 | | static uint16_t |
3134 | | get_ip6_trunc_len(struct bpf_packet *pkt, uint16_t off, const uint16_t remaining_caplen) |
3135 | 0 | { |
3136 | 0 | int err = 0; |
3137 | 0 | uint16_t iplen = sizeof(struct ip6_hdr); |
3138 | 0 | if (iplen >= remaining_caplen) { |
3139 | 0 | return remaining_caplen; |
3140 | 0 | } |
3141 | | |
3142 | 0 | struct ip6_hdr ip6hdr; |
3143 | 0 | err = bpf_copydata(pkt, off, sizeof(struct ip6_hdr), &ip6hdr); |
3144 | 0 | if (err != 0) { |
3145 | 0 | return remaining_caplen; |
3146 | 0 | } |
3147 | | |
3148 | 0 | uint8_t proto = 0; |
3149 | | |
3150 | | /* |
3151 | | * TBD: process the extension headers |
3152 | | */ |
3153 | 0 | proto = ip6hdr.ip6_nxt; |
3154 | 0 | iplen += get_proto_trunc_len(proto, pkt, off + iplen, remaining_caplen - iplen); |
3155 | |
|
3156 | 0 | if (iplen >= remaining_caplen) { |
3157 | 0 | return remaining_caplen; |
3158 | 0 | } |
3159 | | |
3160 | 0 | return iplen; |
3161 | 0 | } |
3162 | | |
3163 | | static uint16_t |
3164 | | get_ether_trunc_len(struct bpf_packet *pkt, int off, const uint16_t remaining_caplen) |
3165 | 0 | { |
3166 | 0 | int err = 0; |
3167 | 0 | uint16_t ethlen = sizeof(struct ether_header); |
3168 | 0 | if (ethlen >= remaining_caplen) { |
3169 | 0 | return remaining_caplen; |
3170 | 0 | } |
3171 | | |
3172 | 0 | struct ether_header eh; |
3173 | 0 | u_short type; |
3174 | 0 | err = bpf_copydata(pkt, off, sizeof(struct ether_header), &eh); |
3175 | 0 | if (err != 0) { |
3176 | 0 | return remaining_caplen; |
3177 | 0 | } |
3178 | | |
3179 | 0 | type = EXTRACT_SHORT(&eh.ether_type); |
3180 | | /* Include full ARP */ |
3181 | 0 | if (type == ETHERTYPE_ARP) { |
3182 | 0 | ethlen = remaining_caplen; |
3183 | 0 | } else if (type != ETHERTYPE_IP && type != ETHERTYPE_IPV6) { |
3184 | 0 | ethlen = min(BPF_MIN_PKT_SIZE, remaining_caplen); |
3185 | 0 | } else { |
3186 | 0 | if (type == ETHERTYPE_IP) { |
3187 | 0 | ethlen += get_ip_trunc_len(pkt, sizeof(struct ether_header), |
3188 | 0 | remaining_caplen); |
3189 | 0 | } else if (type == ETHERTYPE_IPV6) { |
3190 | 0 | ethlen += get_ip6_trunc_len(pkt, sizeof(struct ether_header), |
3191 | 0 | remaining_caplen); |
3192 | 0 | } |
3193 | 0 | } |
3194 | 0 | return ethlen; |
3195 | 0 | } |
3196 | | |
3197 | | static uint32_t |
3198 | | get_pkt_trunc_len(u_char *p, u_int len) |
3199 | 0 | { |
3200 | 0 | struct bpf_packet *pkt = (struct bpf_packet *)(void *) p; |
3201 | 0 | struct pktap_header *pktap = (struct pktap_header *) (pkt->bpfp_header); |
3202 | 0 | uint32_t out_pkt_len = 0, tlen = 0; |
3203 | | /* |
3204 | | * pktap->pth_frame_pre_length is L2 header length and accounts |
3205 | | * for both pre and pre_adjust. |
3206 | | * pktap->pth_length is sizeof(pktap_header) (excl the pre/pre_adjust) |
3207 | | * pkt->bpfp_header_length is (pktap->pth_length + pre_adjust) |
3208 | | * pre is the offset to the L3 header after the bpfp_header, or length |
3209 | | * of L2 header after bpfp_header, if present. |
3210 | | */ |
3211 | 0 | int32_t pre = pktap->pth_frame_pre_length - |
3212 | 0 | (pkt->bpfp_header_length - pktap->pth_length); |
3213 | | |
3214 | | /* Length of the input packet starting from L3 header */ |
3215 | 0 | uint32_t in_pkt_len = len - pkt->bpfp_header_length - pre; |
3216 | 0 | if (pktap->pth_protocol_family == AF_INET || |
3217 | 0 | pktap->pth_protocol_family == AF_INET6) { |
3218 | | /* Contains L2 header */ |
3219 | 0 | if (pre > 0) { |
3220 | 0 | if (pre < (int32_t)sizeof(struct ether_header)) { |
3221 | 0 | goto too_short; |
3222 | 0 | } |
3223 | | |
3224 | 0 | out_pkt_len = get_ether_trunc_len(pkt, 0, in_pkt_len); |
3225 | 0 | } else if (pre == 0) { |
3226 | 0 | if (pktap->pth_protocol_family == AF_INET) { |
3227 | 0 | out_pkt_len = get_ip_trunc_len(pkt, pre, in_pkt_len); |
3228 | 0 | } else if (pktap->pth_protocol_family == AF_INET6) { |
3229 | 0 | out_pkt_len = get_ip6_trunc_len(pkt, pre, in_pkt_len); |
3230 | 0 | } |
3231 | 0 | } else { |
3232 | | /* Ideally pre should be >= 0. This is an exception */ |
3233 | 0 | out_pkt_len = min(BPF_MIN_PKT_SIZE, in_pkt_len); |
3234 | 0 | } |
3235 | 0 | } else { |
3236 | 0 | if (pktap->pth_iftype == IFT_ETHER) { |
3237 | 0 | if (in_pkt_len < sizeof(struct ether_header)) { |
3238 | 0 | goto too_short; |
3239 | 0 | } |
3240 | | /* At most include the Ethernet header and 16 bytes */ |
3241 | 0 | out_pkt_len = MIN(sizeof(struct ether_header) + 16, |
3242 | 0 | in_pkt_len); |
3243 | 0 | } else { |
3244 | | /* |
3245 | | * For unknown protocols include at most 16 bytes |
3246 | | */ |
3247 | 0 | out_pkt_len = MIN(16, in_pkt_len); |
3248 | 0 | } |
3249 | 0 | } |
3250 | 0 | done: |
3251 | 0 | tlen = pkt->bpfp_header_length + out_pkt_len + pre; |
3252 | 0 | return tlen; |
3253 | 0 | too_short: |
3254 | 0 | out_pkt_len = in_pkt_len; |
3255 | 0 | goto done; |
3256 | 0 | } |
3257 | | |
3258 | | /* |
3259 | | * Move the packet data from interface memory (pkt) into the |
3260 | | * store buffer. Return 1 if it's time to wakeup a listener (buffer full), |
3261 | | * otherwise 0. |
3262 | | */ |
3263 | | static void |
3264 | | catchpacket(struct bpf_d *d, struct bpf_packet * pkt, |
3265 | | u_int snaplen, int outbound) |
3266 | 0 | { |
3267 | 0 | struct bpf_hdr *hp; |
3268 | 0 | struct bpf_hdr_ext *ehp; |
3269 | 0 | int totlen, curlen; |
3270 | 0 | int hdrlen, caplen; |
3271 | 0 | int do_wakeup = 0; |
3272 | 0 | u_char *payload; |
3273 | 0 | struct timeval tv; |
3274 | |
|
3275 | 0 | hdrlen = (d->bd_flags & BPF_EXTENDED_HDR) ? d->bd_bif->bif_exthdrlen : |
3276 | 0 | d->bd_bif->bif_hdrlen; |
3277 | | /* |
3278 | | * Figure out how many bytes to move. If the packet is |
3279 | | * greater or equal to the snapshot length, transfer that |
3280 | | * much. Otherwise, transfer the whole packet (unless |
3281 | | * we hit the buffer size limit). |
3282 | | */ |
3283 | 0 | totlen = hdrlen + min(snaplen, pkt->bpfp_total_length); |
3284 | 0 | if (totlen > d->bd_bufsize) { |
3285 | 0 | totlen = d->bd_bufsize; |
3286 | 0 | } |
3287 | |
|
3288 | 0 | if (hdrlen > totlen) { |
3289 | 0 | return; |
3290 | 0 | } |
3291 | | |
3292 | | /* |
3293 | | * Round up the end of the previous packet to the next longword. |
3294 | | */ |
3295 | 0 | curlen = BPF_WORDALIGN(d->bd_slen); |
3296 | 0 | if (curlen + totlen > d->bd_bufsize) { |
3297 | | /* |
3298 | | * This packet will overflow the storage buffer. |
3299 | | * Rotate the buffers if we can, then wakeup any |
3300 | | * pending reads. |
3301 | | * |
3302 | | * We cannot rotate buffers if a read is in progress |
3303 | | * so drop the packet |
3304 | | */ |
3305 | 0 | if (d->bd_hbuf_read != 0) { |
3306 | 0 | ++d->bd_dcount; |
3307 | 0 | return; |
3308 | 0 | } |
3309 | | |
3310 | 0 | if (d->bd_fbuf == NULL) { |
3311 | 0 | if (d->bd_headdrop == 0) { |
3312 | | /* |
3313 | | * We haven't completed the previous read yet, |
3314 | | * so drop the packet. |
3315 | | */ |
3316 | 0 | ++d->bd_dcount; |
3317 | 0 | return; |
3318 | 0 | } |
3319 | | /* |
3320 | | * Drop the hold buffer as it contains older packets |
3321 | | */ |
3322 | 0 | d->bd_dcount += d->bd_hcnt; |
3323 | 0 | d->bd_fbuf = d->bd_hbuf; |
3324 | 0 | ROTATE_BUFFERS(d); |
3325 | 0 | } else { |
3326 | 0 | ROTATE_BUFFERS(d); |
3327 | 0 | } |
3328 | 0 | do_wakeup = 1; |
3329 | 0 | curlen = 0; |
3330 | 0 | } else if (d->bd_immediate || d->bd_state == BPF_TIMED_OUT) { |
3331 | | /* |
3332 | | * Immediate mode is set, or the read timeout has |
3333 | | * already expired during a select call. A packet |
3334 | | * arrived, so the reader should be woken up. |
3335 | | */ |
3336 | 0 | do_wakeup = 1; |
3337 | 0 | } |
3338 | | |
3339 | | /* |
3340 | | * Append the bpf header. |
3341 | | */ |
3342 | 0 | microtime(&tv); |
3343 | 0 | if (d->bd_flags & BPF_EXTENDED_HDR) { |
3344 | 0 | struct mbuf *m; |
3345 | |
|
3346 | 0 | m = (pkt->bpfp_type == BPF_PACKET_TYPE_MBUF) |
3347 | 0 | ? pkt->bpfp_mbuf : NULL; |
3348 | 0 | ehp = (struct bpf_hdr_ext *)(void *)(d->bd_sbuf + curlen); |
3349 | 0 | memset(ehp, 0, sizeof(*ehp)); |
3350 | 0 | ehp->bh_tstamp.tv_sec = tv.tv_sec; |
3351 | 0 | ehp->bh_tstamp.tv_usec = tv.tv_usec; |
3352 | |
|
3353 | 0 | ehp->bh_datalen = pkt->bpfp_total_length; |
3354 | 0 | ehp->bh_hdrlen = hdrlen; |
3355 | 0 | caplen = ehp->bh_caplen = totlen - hdrlen; |
3356 | 0 | if (m == NULL) { |
3357 | 0 | if (outbound) { |
3358 | 0 | ehp->bh_flags |= BPF_HDR_EXT_FLAGS_DIR_OUT; |
3359 | 0 | } else { |
3360 | 0 | ehp->bh_flags |= BPF_HDR_EXT_FLAGS_DIR_IN; |
3361 | 0 | } |
3362 | 0 | } else if (outbound) { |
3363 | 0 | ehp->bh_flags |= BPF_HDR_EXT_FLAGS_DIR_OUT; |
3364 | | |
3365 | | /* only do lookups on non-raw INPCB */ |
3366 | 0 | if ((m->m_pkthdr.pkt_flags & (PKTF_FLOW_ID | |
3367 | 0 | PKTF_FLOW_LOCALSRC | PKTF_FLOW_RAWSOCK)) == |
3368 | 0 | (PKTF_FLOW_ID | PKTF_FLOW_LOCALSRC) && |
3369 | 0 | m->m_pkthdr.pkt_flowsrc == FLOWSRC_INPCB) { |
3370 | 0 | ehp->bh_flowid = m->m_pkthdr.pkt_flowid; |
3371 | 0 | ehp->bh_proto = m->m_pkthdr.pkt_proto; |
3372 | 0 | } |
3373 | 0 | ehp->bh_svc = so_svc2tc(m->m_pkthdr.pkt_svc); |
3374 | 0 | if (m->m_pkthdr.pkt_flags & PKTF_TCP_REXMT) { |
3375 | 0 | ehp->bh_pktflags |= BPF_PKTFLAGS_TCP_REXMT; |
3376 | 0 | } |
3377 | 0 | if (m->m_pkthdr.pkt_flags & PKTF_START_SEQ) { |
3378 | 0 | ehp->bh_pktflags |= BPF_PKTFLAGS_START_SEQ; |
3379 | 0 | } |
3380 | 0 | if (m->m_pkthdr.pkt_flags & PKTF_LAST_PKT) { |
3381 | 0 | ehp->bh_pktflags |= BPF_PKTFLAGS_LAST_PKT; |
3382 | 0 | } |
3383 | 0 | if (m->m_pkthdr.pkt_flags & PKTF_VALID_UNSENT_DATA) { |
3384 | 0 | ehp->bh_unsent_bytes = |
3385 | 0 | m->m_pkthdr.bufstatus_if; |
3386 | 0 | ehp->bh_unsent_snd = |
3387 | 0 | m->m_pkthdr.bufstatus_sndbuf; |
3388 | 0 | } |
3389 | 0 | } else { |
3390 | 0 | ehp->bh_flags |= BPF_HDR_EXT_FLAGS_DIR_IN; |
3391 | 0 | } |
3392 | 0 | payload = (u_char *)ehp + hdrlen; |
3393 | 0 | } else { |
3394 | 0 | hp = (struct bpf_hdr *)(void *)(d->bd_sbuf + curlen); |
3395 | 0 | hp->bh_tstamp.tv_sec = tv.tv_sec; |
3396 | 0 | hp->bh_tstamp.tv_usec = tv.tv_usec; |
3397 | 0 | hp->bh_datalen = pkt->bpfp_total_length; |
3398 | 0 | hp->bh_hdrlen = hdrlen; |
3399 | 0 | caplen = hp->bh_caplen = totlen - hdrlen; |
3400 | 0 | payload = (u_char *)hp + hdrlen; |
3401 | 0 | } |
3402 | | /* |
3403 | | * Copy the packet data into the store buffer and update its length. |
3404 | | */ |
3405 | 0 | copy_bpf_packet(pkt, payload, caplen); |
3406 | 0 | d->bd_slen = curlen + totlen; |
3407 | 0 | d->bd_scnt += 1; |
3408 | |
|
3409 | 0 | if (do_wakeup) { |
3410 | 0 | bpf_wakeup(d); |
3411 | 0 | } |
3412 | 0 | } |
3413 | | |
3414 | | /* |
3415 | | * Initialize all nonzero fields of a descriptor. |
3416 | | */ |
3417 | | static int |
3418 | | bpf_allocbufs(struct bpf_d *d) |
3419 | 0 | { |
3420 | 0 | if (d->bd_sbuf != NULL) { |
3421 | 0 | FREE(d->bd_sbuf, M_DEVBUF); |
3422 | 0 | d->bd_sbuf = NULL; |
3423 | 0 | } |
3424 | 0 | if (d->bd_hbuf != NULL) { |
3425 | 0 | FREE(d->bd_hbuf, M_DEVBUF); |
3426 | 0 | d->bd_hbuf = NULL; |
3427 | 0 | } |
3428 | 0 | if (d->bd_fbuf != NULL) { |
3429 | 0 | FREE(d->bd_fbuf, M_DEVBUF); |
3430 | 0 | d->bd_fbuf = NULL; |
3431 | 0 | } |
3432 | |
|
3433 | 0 | d->bd_fbuf = (caddr_t) _MALLOC(d->bd_bufsize, M_DEVBUF, M_WAIT); |
3434 | 0 | if (d->bd_fbuf == NULL) { |
3435 | 0 | return ENOBUFS; |
3436 | 0 | } |
3437 | | |
3438 | 0 | d->bd_sbuf = (caddr_t) _MALLOC(d->bd_bufsize, M_DEVBUF, M_WAIT); |
3439 | 0 | if (d->bd_sbuf == NULL) { |
3440 | 0 | FREE(d->bd_fbuf, M_DEVBUF); |
3441 | 0 | d->bd_fbuf = NULL; |
3442 | 0 | return ENOBUFS; |
3443 | 0 | } |
3444 | 0 | d->bd_slen = 0; |
3445 | 0 | d->bd_hlen = 0; |
3446 | 0 | d->bd_scnt = 0; |
3447 | 0 | d->bd_hcnt = 0; |
3448 | 0 | return 0; |
3449 | 0 | } |
3450 | | |
3451 | | /* |
3452 | | * Free buffers currently in use by a descriptor. |
3453 | | * Called on close. |
3454 | | */ |
3455 | | static void |
3456 | | bpf_freed(struct bpf_d *d) |
3457 | 0 | { |
3458 | | /* |
3459 | | * We don't need to lock out interrupts since this descriptor has |
3460 | | * been detached from its interface and it yet hasn't been marked |
3461 | | * free. |
3462 | | */ |
3463 | 0 | if (d->bd_hbuf_read != 0) { |
3464 | 0 | panic("bpf buffer freed during read"); |
3465 | 0 | } |
3466 | | |
3467 | 0 | if (d->bd_sbuf != 0) { |
3468 | 0 | FREE(d->bd_sbuf, M_DEVBUF); |
3469 | 0 | if (d->bd_hbuf != 0) { |
3470 | 0 | FREE(d->bd_hbuf, M_DEVBUF); |
3471 | 0 | } |
3472 | 0 | if (d->bd_fbuf != 0) { |
3473 | 0 | FREE(d->bd_fbuf, M_DEVBUF); |
3474 | 0 | } |
3475 | 0 | } |
3476 | 0 | if (d->bd_filter) { |
3477 | 0 | FREE(d->bd_filter, M_DEVBUF); |
3478 | 0 | } |
3479 | 0 | } |
3480 | | |
3481 | | /* |
3482 | | * Attach an interface to bpf. driverp is a pointer to a (struct bpf_if *) |
3483 | | * in the driver's softc; dlt is the link layer type; hdrlen is the fixed |
3484 | | * size of the link header (variable length headers not yet supported). |
3485 | | */ |
3486 | | void |
3487 | | bpfattach(struct ifnet *ifp, u_int dlt, u_int hdrlen) |
3488 | 2 | { |
3489 | 2 | bpf_attach(ifp, dlt, hdrlen, NULL, NULL); |
3490 | 2 | } |
3491 | | |
3492 | | errno_t |
3493 | | bpf_attach( |
3494 | | ifnet_t ifp, |
3495 | | u_int32_t dlt, |
3496 | | u_int32_t hdrlen, |
3497 | | bpf_send_func send, |
3498 | | bpf_tap_func tap) |
3499 | 2 | { |
3500 | 2 | struct bpf_if *bp; |
3501 | 2 | struct bpf_if *bp_new; |
3502 | 2 | struct bpf_if *bp_before_first = NULL; |
3503 | 2 | struct bpf_if *bp_first = NULL; |
3504 | 2 | struct bpf_if *bp_last = NULL; |
3505 | 2 | boolean_t found; |
3506 | | |
3507 | 2 | bp_new = (struct bpf_if *) _MALLOC(sizeof(*bp_new), M_DEVBUF, |
3508 | 2 | M_WAIT | M_ZERO); |
3509 | 2 | if (bp_new == 0) { |
3510 | 0 | panic("bpfattach"); |
3511 | 0 | } |
3512 | | |
3513 | 2 | lck_mtx_lock(bpf_mlock); |
3514 | | |
3515 | | /* |
3516 | | * Check if this interface/dlt is already attached. Remember the |
3517 | | * first and last attachment for this interface, as well as the |
3518 | | * element before the first attachment. |
3519 | | */ |
3520 | 2 | found = FALSE; |
3521 | 3 | for (bp = bpf_iflist; bp != NULL; bp = bp->bif_next) { |
3522 | 1 | if (bp->bif_ifp != ifp) { |
3523 | 1 | if (bp_first != NULL) { |
3524 | | /* no more elements for this interface */ |
3525 | 0 | break; |
3526 | 0 | } |
3527 | 1 | bp_before_first = bp; |
3528 | 1 | } else { |
3529 | 0 | if (bp->bif_dlt == dlt) { |
3530 | 0 | found = TRUE; |
3531 | 0 | break; |
3532 | 0 | } |
3533 | 0 | if (bp_first == NULL) { |
3534 | 0 | bp_first = bp; |
3535 | 0 | } |
3536 | 0 | bp_last = bp; |
3537 | 0 | } |
3538 | 1 | } |
3539 | 2 | if (found) { |
3540 | 0 | lck_mtx_unlock(bpf_mlock); |
3541 | 0 | printf("bpfattach - %s with dlt %d is already attached\n", |
3542 | 0 | if_name(ifp), dlt); |
3543 | 0 | FREE(bp_new, M_DEVBUF); |
3544 | 0 | return EEXIST; |
3545 | 0 | } |
3546 | | |
3547 | 2 | bp_new->bif_ifp = ifp; |
3548 | 2 | bp_new->bif_dlt = dlt; |
3549 | 2 | bp_new->bif_send = send; |
3550 | 2 | bp_new->bif_tap = tap; |
3551 | | |
3552 | 2 | if (bp_first == NULL) { |
3553 | | /* No other entries for this ifp */ |
3554 | 2 | bp_new->bif_next = bpf_iflist; |
3555 | 2 | bpf_iflist = bp_new; |
3556 | 2 | } else { |
3557 | 0 | if (ifnet_type(ifp) == IFT_ETHER && dlt == DLT_EN10MB) { |
3558 | | /* Make this the first entry for this interface */ |
3559 | 0 | if (bp_before_first != NULL) { |
3560 | | /* point the previous to us */ |
3561 | 0 | bp_before_first->bif_next = bp_new; |
3562 | 0 | } else { |
3563 | | /* we're the new head */ |
3564 | 0 | bpf_iflist = bp_new; |
3565 | 0 | } |
3566 | 0 | bp_new->bif_next = bp_first; |
3567 | 0 | } else { |
3568 | | /* Add this after the last entry for this interface */ |
3569 | 0 | bp_new->bif_next = bp_last->bif_next; |
3570 | 0 | bp_last->bif_next = bp_new; |
3571 | 0 | } |
3572 | 0 | } |
3573 | | |
3574 | | /* |
3575 | | * Compute the length of the bpf header. This is not necessarily |
3576 | | * equal to SIZEOF_BPF_HDR because we want to insert spacing such |
3577 | | * that the network layer header begins on a longword boundary (for |
3578 | | * performance reasons and to alleviate alignment restrictions). |
3579 | | */ |
3580 | 2 | bp_new->bif_hdrlen = BPF_WORDALIGN(hdrlen + SIZEOF_BPF_HDR) - hdrlen; |
3581 | 2 | bp_new->bif_exthdrlen = BPF_WORDALIGN(hdrlen + |
3582 | 2 | sizeof(struct bpf_hdr_ext)) - hdrlen; |
3583 | | |
3584 | | /* Take a reference on the interface */ |
3585 | 2 | ifnet_reference(ifp); |
3586 | | |
3587 | 2 | lck_mtx_unlock(bpf_mlock); |
3588 | | |
3589 | | #ifndef __APPLE__ |
3590 | | if (bootverbose) { |
3591 | | printf("bpf: %s attached\n", if_name(ifp)); |
3592 | | } |
3593 | | #endif |
3594 | | |
3595 | 2 | return 0; |
3596 | 2 | } |
3597 | | |
3598 | | /* |
3599 | | * Detach bpf from an interface. This involves detaching each descriptor |
3600 | | * associated with the interface, and leaving bd_bif NULL. Notify each |
3601 | | * descriptor as it's detached so that any sleepers wake up and get |
3602 | | * ENXIO. |
3603 | | */ |
3604 | | void |
3605 | | bpfdetach(struct ifnet *ifp) |
3606 | 0 | { |
3607 | 0 | struct bpf_if *bp, *bp_prev, *bp_next; |
3608 | 0 | struct bpf_d *d; |
3609 | |
|
3610 | 0 | if (bpf_debug != 0) { |
3611 | 0 | printf("%s: %s\n", __func__, if_name(ifp)); |
3612 | 0 | } |
3613 | |
|
3614 | 0 | lck_mtx_lock(bpf_mlock); |
3615 | | |
3616 | | /* |
3617 | | * Build the list of devices attached to that interface |
3618 | | * that we need to free while keeping the lock to maintain |
3619 | | * the integrity of the interface list |
3620 | | */ |
3621 | 0 | bp_prev = NULL; |
3622 | 0 | for (bp = bpf_iflist; bp != NULL; bp = bp_next) { |
3623 | 0 | bp_next = bp->bif_next; |
3624 | |
|
3625 | 0 | if (ifp != bp->bif_ifp) { |
3626 | 0 | bp_prev = bp; |
3627 | 0 | continue; |
3628 | 0 | } |
3629 | | /* Unlink from the interface list */ |
3630 | 0 | if (bp_prev) { |
3631 | 0 | bp_prev->bif_next = bp->bif_next; |
3632 | 0 | } else { |
3633 | 0 | bpf_iflist = bp->bif_next; |
3634 | 0 | } |
3635 | | |
3636 | | /* Detach the devices attached to the interface */ |
3637 | 0 | while ((d = bp->bif_dlist) != NULL) { |
3638 | | /* |
3639 | | * Take an extra reference to prevent the device |
3640 | | * from being freed when bpf_detachd() releases |
3641 | | * the reference for the interface list |
3642 | | */ |
3643 | 0 | bpf_acquire_d(d); |
3644 | 0 | bpf_detachd(d, 0); |
3645 | 0 | bpf_wakeup(d); |
3646 | 0 | bpf_release_d(d); |
3647 | 0 | } |
3648 | 0 | ifnet_release(ifp); |
3649 | 0 | } |
3650 | |
|
3651 | 0 | lck_mtx_unlock(bpf_mlock); |
3652 | 0 | } |
3653 | | |
3654 | | void |
3655 | | bpf_init(__unused void *unused) |
3656 | 0 | { |
3657 | 0 | #ifdef __APPLE__ |
3658 | 0 | int i; |
3659 | 0 | int maj; |
3660 | |
|
3661 | 0 | if (bpf_devsw_installed == 0) { |
3662 | 0 | bpf_devsw_installed = 1; |
3663 | 0 | bpf_mlock_grp_attr = lck_grp_attr_alloc_init(); |
3664 | 0 | bpf_mlock_grp = lck_grp_alloc_init("bpf", bpf_mlock_grp_attr); |
3665 | 0 | bpf_mlock_attr = lck_attr_alloc_init(); |
3666 | 0 | lck_mtx_init(bpf_mlock, bpf_mlock_grp, bpf_mlock_attr); |
3667 | 0 | maj = cdevsw_add(CDEV_MAJOR, &bpf_cdevsw); |
3668 | 0 | if (maj == -1) { |
3669 | 0 | if (bpf_mlock_attr) { |
3670 | 0 | lck_attr_free(bpf_mlock_attr); |
3671 | 0 | } |
3672 | 0 | if (bpf_mlock_grp) { |
3673 | 0 | lck_grp_free(bpf_mlock_grp); |
3674 | 0 | } |
3675 | 0 | if (bpf_mlock_grp_attr) { |
3676 | 0 | lck_grp_attr_free(bpf_mlock_grp_attr); |
3677 | 0 | } |
3678 | |
|
3679 | 0 | bpf_mlock = NULL; |
3680 | 0 | bpf_mlock_attr = NULL; |
3681 | 0 | bpf_mlock_grp = NULL; |
3682 | 0 | bpf_mlock_grp_attr = NULL; |
3683 | 0 | bpf_devsw_installed = 0; |
3684 | 0 | printf("bpf_init: failed to allocate a major number\n"); |
3685 | 0 | return; |
3686 | 0 | } |
3687 | | |
3688 | 0 | for (i = 0; i < NBPFILTER; i++) { |
3689 | 0 | bpf_make_dev_t(maj); |
3690 | 0 | } |
3691 | 0 | } |
3692 | | #else |
3693 | | cdevsw_add(&bpf_cdevsw); |
3694 | | #endif |
3695 | 0 | } |
3696 | | |
3697 | | #ifndef __APPLE__ |
3698 | | SYSINIT(bpfdev, SI_SUB_DRIVERS, SI_ORDER_MIDDLE + CDEV_MAJOR, bpf_drvinit, NULL); |
3699 | | #endif |
3700 | | |
3701 | | static int |
3702 | | sysctl_bpf_maxbufsize SYSCTL_HANDLER_ARGS |
3703 | 0 | { |
3704 | 0 | #pragma unused(arg1, arg2) |
3705 | 0 | int i, err; |
3706 | |
|
3707 | 0 | i = bpf_maxbufsize; |
3708 | |
|
3709 | 0 | err = sysctl_handle_int(oidp, &i, 0, req); |
3710 | 0 | if (err != 0 || req->newptr == USER_ADDR_NULL) { |
3711 | 0 | return err; |
3712 | 0 | } |
3713 | | |
3714 | 0 | if (i < 0 || i > BPF_MAXSIZE_CAP) { |
3715 | 0 | i = BPF_MAXSIZE_CAP; |
3716 | 0 | } |
3717 | |
|
3718 | 0 | bpf_maxbufsize = i; |
3719 | 0 | return err; |
3720 | 0 | } |