Coverage Report

Created: 2024-10-03 06:24

/src/SockFuzzer/third_party/xnu/bsd/netinet/igmp.c
Line
Count
Source (jump to first uncovered line)
1
/*
2
 * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
3
 *
4
 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5
 *
6
 * This file contains Original Code and/or Modifications of Original Code
7
 * as defined in and that are subject to the Apple Public Source License
8
 * Version 2.0 (the 'License'). You may not use this file except in
9
 * compliance with the License. The rights granted to you under the License
10
 * may not be used to create, or enable the creation or redistribution of,
11
 * unlawful or unlicensed copies of an Apple operating system, or to
12
 * circumvent, violate, or enable the circumvention or violation of, any
13
 * terms of an Apple operating system software license agreement.
14
 *
15
 * Please obtain a copy of the License at
16
 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17
 *
18
 * The Original Code and all software distributed under the License are
19
 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20
 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21
 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22
 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23
 * Please see the License for the specific language governing rights and
24
 * limitations under the License.
25
 *
26
 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27
 */
28
/*-
29
 * Copyright (c) 2007-2009 Bruce Simpson.
30
 * Copyright (c) 1988 Stephen Deering.
31
 * Copyright (c) 1992, 1993
32
 *  The Regents of the University of California.  All rights reserved.
33
 *
34
 * This code is derived from software contributed to Berkeley by
35
 * Stephen Deering of Stanford University.
36
 *
37
 * Redistribution and use in source and binary forms, with or without
38
 * modification, are permitted provided that the following conditions
39
 * are met:
40
 * 1. Redistributions of source code must retain the above copyright
41
 *    notice, this list of conditions and the following disclaimer.
42
 * 2. Redistributions in binary form must reproduce the above copyright
43
 *    notice, this list of conditions and the following disclaimer in the
44
 *    documentation and/or other materials provided with the distribution.
45
 * 3. All advertising materials mentioning features or use of this software
46
 *    must display the following acknowledgement:
47
 *  This product includes software developed by the University of
48
 *  California, Berkeley and its contributors.
49
 * 4. Neither the name of the University nor the names of its contributors
50
 *    may be used to endorse or promote products derived from this software
51
 *    without specific prior written permission.
52
 *
53
 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
54
 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
55
 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
56
 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
57
 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
58
 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
59
 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
60
 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
61
 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
62
 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
63
 * SUCH DAMAGE.
64
 *
65
 *  @(#)igmp.c  8.1 (Berkeley) 7/19/93
66
 */
67
/*
68
 * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
69
 * support for mandatory and extensible security protections.  This notice
70
 * is included in support of clause 2.2 (b) of the Apple Public License,
71
 * Version 2.0.
72
 */
73
74
/*
75
 * Internet Group Management Protocol (IGMP) routines.
76
 * [RFC1112, RFC2236, RFC3376]
77
 *
78
 * Written by Steve Deering, Stanford, May 1988.
79
 * Modified by Rosen Sharma, Stanford, Aug 1994.
80
 * Modified by Bill Fenner, Xerox PARC, Feb 1995.
81
 * Modified to fully comply to IGMPv2 by Bill Fenner, Oct 1995.
82
 * Significantly rewritten for IGMPv3, VIMAGE, and SMP by Bruce Simpson.
83
 *
84
 * MULTICAST Revision: 3.5.1.4
85
 */
86
87
#include <sys/cdefs.h>
88
89
#include <sys/param.h>
90
#include <sys/systm.h>
91
#include <sys/malloc.h>
92
#include <sys/mbuf.h>
93
#include <sys/socket.h>
94
#include <sys/protosw.h>
95
#include <sys/kernel.h>
96
#include <sys/sysctl.h>
97
#include <sys/mcache.h>
98
99
#include <libkern/libkern.h>
100
#include <kern/zalloc.h>
101
102
#include <net/if.h>
103
#include <net/route.h>
104
105
#include <netinet/in.h>
106
#include <netinet/in_var.h>
107
#include <netinet/in_systm.h>
108
#include <netinet/ip.h>
109
#include <netinet/ip_var.h>
110
#include <netinet/igmp.h>
111
#include <netinet/igmp_var.h>
112
#include <netinet/kpi_ipfilter_var.h>
113
114
SLIST_HEAD(igmp_inm_relhead, in_multi);
115
116
static void     igi_initvar(struct igmp_ifinfo *, struct ifnet *, int);
117
static struct igmp_ifinfo *igi_alloc(zalloc_flags_t);
118
static void     igi_free(struct igmp_ifinfo *);
119
static void     igi_delete(const struct ifnet *, struct igmp_inm_relhead *);
120
static void     igmp_dispatch_queue(struct igmp_ifinfo *, struct ifqueue *,
121
    int, const int);
122
static void     igmp_final_leave(struct in_multi *, struct igmp_ifinfo *,
123
    struct igmp_tparams *);
124
static int      igmp_handle_state_change(struct in_multi *,
125
    struct igmp_ifinfo *, struct igmp_tparams *);
126
static int      igmp_initial_join(struct in_multi *, struct igmp_ifinfo *,
127
    struct igmp_tparams *);
128
static int      igmp_input_v1_query(struct ifnet *, const struct ip *,
129
    const struct igmp *);
130
static int      igmp_input_v2_query(struct ifnet *, const struct ip *,
131
    const struct igmp *);
132
static int      igmp_input_v3_query(struct ifnet *, const struct ip *,
133
    /*const*/ struct igmpv3 *);
134
static int      igmp_input_v3_group_query(struct in_multi *,
135
    int, /*const*/ struct igmpv3 *);
136
static int      igmp_input_v1_report(struct ifnet *, struct mbuf *,
137
    /*const*/ struct ip *, /*const*/ struct igmp *);
138
static int      igmp_input_v2_report(struct ifnet *, struct mbuf *,
139
    /*const*/ struct ip *, /*const*/ struct igmp *);
140
static void     igmp_sendpkt(struct mbuf *);
141
static __inline__ int   igmp_isgroupreported(const struct in_addr);
142
static struct mbuf *igmp_ra_alloc(void);
143
#ifdef IGMP_DEBUG
144
static const char *igmp_rec_type_to_str(const int);
145
#endif
146
static uint32_t igmp_set_version(struct igmp_ifinfo *, const int);
147
static void     igmp_flush_relq(struct igmp_ifinfo *,
148
    struct igmp_inm_relhead *);
149
static int      igmp_v1v2_queue_report(struct in_multi *, const int);
150
static void     igmp_v1v2_process_group_timer(struct in_multi *, const int);
151
static void     igmp_v1v2_process_querier_timers(struct igmp_ifinfo *);
152
static uint32_t igmp_v2_update_group(struct in_multi *, const int);
153
static void     igmp_v3_cancel_link_timers(struct igmp_ifinfo *);
154
static uint32_t igmp_v3_dispatch_general_query(struct igmp_ifinfo *);
155
static struct mbuf *
156
igmp_v3_encap_report(struct ifnet *, struct mbuf *);
157
static int      igmp_v3_enqueue_group_record(struct ifqueue *,
158
    struct in_multi *, const int, const int, const int);
159
static int      igmp_v3_enqueue_filter_change(struct ifqueue *,
160
    struct in_multi *);
161
static void     igmp_v3_process_group_timers(struct igmp_ifinfo *,
162
    struct ifqueue *, struct ifqueue *, struct in_multi *,
163
    const unsigned int);
164
static int      igmp_v3_merge_state_changes(struct in_multi *,
165
    struct ifqueue *);
166
static void     igmp_v3_suppress_group_record(struct in_multi *);
167
static int      sysctl_igmp_ifinfo SYSCTL_HANDLER_ARGS;
168
static int      sysctl_igmp_gsr SYSCTL_HANDLER_ARGS;
169
static int      sysctl_igmp_default_version SYSCTL_HANDLER_ARGS;
170
171
static int igmp_timeout_run;            /* IGMP timer is scheduled to run */
172
void igmp_timeout(void *);
173
void igmp_sched_timeout(void);
174
175
static struct mbuf *m_raopt;            /* Router Alert option */
176
177
static int querier_present_timers_running;      /* IGMPv1/v2 older version
178
                                                 * querier present */
179
static int interface_timers_running;            /* IGMPv3 general
180
                                                 * query response */
181
static int state_change_timers_running;         /* IGMPv3 state-change
182
                                                 * retransmit */
183
static int current_state_timers_running;        /* IGMPv1/v2 host
184
                                                 * report; IGMPv3 g/sg
185
                                                 * query response */
186
187
/*
188
 * Subsystem lock macros.
189
 */
190
#define IGMP_LOCK()                     \
191
409k
  lck_mtx_lock(&igmp_mtx)
192
#define IGMP_LOCK_ASSERT_HELD()         \
193
408k
  LCK_MTX_ASSERT(&igmp_mtx, LCK_MTX_ASSERT_OWNED)
194
#define IGMP_LOCK_ASSERT_NOTHELD()      \
195
6.85k
  LCK_MTX_ASSERT(&igmp_mtx, LCK_MTX_ASSERT_NOTOWNED)
196
#define IGMP_UNLOCK()                   \
197
409k
  lck_mtx_unlock(&igmp_mtx)
198
199
static LIST_HEAD(, igmp_ifinfo) igi_head;
200
static struct igmpstat_v3 igmpstat_v3 = {
201
  .igps_version = IGPS_VERSION_3,
202
  .igps_len = sizeof(struct igmpstat_v3),
203
};
204
static struct igmpstat igmpstat; /* old IGMPv2 stats structure */
205
static struct timeval igmp_gsrdelay = {.tv_sec = 10, .tv_usec = 0};
206
207
static int igmp_recvifkludge = 1;
208
static int igmp_sendra = 1;
209
static int igmp_sendlocal = 1;
210
static int igmp_v1enable = 1;
211
static int igmp_v2enable = 1;
212
static int igmp_legacysupp = 0;
213
static int igmp_default_version = IGMP_VERSION_3;
214
215
SYSCTL_STRUCT(_net_inet_igmp, IGMPCTL_STATS, stats, CTLFLAG_RD | CTLFLAG_LOCKED,
216
    &igmpstat, igmpstat, "");
217
SYSCTL_STRUCT(_net_inet_igmp, OID_AUTO, v3stats,
218
    CTLFLAG_RD | CTLFLAG_LOCKED, &igmpstat_v3, igmpstat_v3, "");
219
SYSCTL_INT(_net_inet_igmp, OID_AUTO, recvifkludge, CTLFLAG_RW | CTLFLAG_LOCKED,
220
    &igmp_recvifkludge, 0,
221
    "Rewrite IGMPv1/v2 reports from 0.0.0.0 to contain subnet address");
222
SYSCTL_INT(_net_inet_igmp, OID_AUTO, sendra, CTLFLAG_RW | CTLFLAG_LOCKED,
223
    &igmp_sendra, 0,
224
    "Send IP Router Alert option in IGMPv2/v3 messages");
225
SYSCTL_INT(_net_inet_igmp, OID_AUTO, sendlocal, CTLFLAG_RW | CTLFLAG_LOCKED,
226
    &igmp_sendlocal, 0,
227
    "Send IGMP membership reports for 224.0.0.0/24 groups");
228
SYSCTL_INT(_net_inet_igmp, OID_AUTO, v1enable, CTLFLAG_RW | CTLFLAG_LOCKED,
229
    &igmp_v1enable, 0,
230
    "Enable backwards compatibility with IGMPv1");
231
SYSCTL_INT(_net_inet_igmp, OID_AUTO, v2enable, CTLFLAG_RW | CTLFLAG_LOCKED,
232
    &igmp_v2enable, 0,
233
    "Enable backwards compatibility with IGMPv2");
234
SYSCTL_INT(_net_inet_igmp, OID_AUTO, legacysupp, CTLFLAG_RW | CTLFLAG_LOCKED,
235
    &igmp_legacysupp, 0,
236
    "Allow v1/v2 reports to suppress v3 group responses");
237
SYSCTL_PROC(_net_inet_igmp, OID_AUTO, default_version,
238
    CTLTYPE_INT | CTLFLAG_RW,
239
    &igmp_default_version, 0, sysctl_igmp_default_version, "I",
240
    "Default version of IGMP to run on each interface");
241
SYSCTL_PROC(_net_inet_igmp, OID_AUTO, gsrdelay,
242
    CTLTYPE_INT | CTLFLAG_RW,
243
    &igmp_gsrdelay.tv_sec, 0, sysctl_igmp_gsr, "I",
244
    "Rate limit for IGMPv3 Group-and-Source queries in seconds");
245
#ifdef IGMP_DEBUG
246
int igmp_debug = 0;
247
SYSCTL_INT(_net_inet_igmp, OID_AUTO,
248
    debug, CTLFLAG_RW | CTLFLAG_LOCKED, &igmp_debug, 0, "");
249
#endif
250
251
SYSCTL_NODE(_net_inet_igmp, OID_AUTO, ifinfo, CTLFLAG_RD | CTLFLAG_LOCKED,
252
    sysctl_igmp_ifinfo, "Per-interface IGMPv3 state");
253
254
/* Lock group and attribute for igmp_mtx */
255
static lck_attr_t       *igmp_mtx_attr;
256
static lck_grp_t        *igmp_mtx_grp;
257
static lck_grp_attr_t   *igmp_mtx_grp_attr;
258
259
/*
260
 * Locking and reference counting:
261
 *
262
 * igmp_mtx mainly protects igi_head.  In cases where both igmp_mtx and
263
 * in_multihead_lock must be held, the former must be acquired first in order
264
 * to maintain lock ordering.  It is not a requirement that igmp_mtx be
265
 * acquired first before in_multihead_lock, but in case both must be acquired
266
 * in succession, the correct lock ordering must be followed.
267
 *
268
 * Instead of walking the if_multiaddrs list at the interface and returning
269
 * the ifma_protospec value of a matching entry, we search the global list
270
 * of in_multi records and find it that way; this is done with in_multihead
271
 * lock held.  Doing so avoids the race condition issues that many other BSDs
272
 * suffer from (therefore in our implementation, ifma_protospec will never be
273
 * NULL for as long as the in_multi is valid.)
274
 *
275
 * The above creates a requirement for the in_multi to stay in in_multihead
276
 * list even after the final IGMP leave (in IGMPv3 mode) until no longer needs
277
 * be retransmitted (this is not required for IGMPv1/v2.)  In order to handle
278
 * this, the request and reference counts of the in_multi are bumped up when
279
 * the state changes to IGMP_LEAVING_MEMBER, and later dropped in the timeout
280
 * handler.  Each in_multi holds a reference to the underlying igmp_ifinfo.
281
 *
282
 * Thus, the permitted lock oder is:
283
 *
284
 *  igmp_mtx, in_multihead_lock, inm_lock, igi_lock
285
 *
286
 * Any may be taken independently, but if any are held at the same time,
287
 * the above lock order must be followed.
288
 */
289
static decl_lck_mtx_data(, igmp_mtx);
290
static int igmp_timers_are_running;
291
292
0
#define IGMP_ADD_DETACHED_INM(_head, _inm) {                            \
293
0
  SLIST_INSERT_HEAD(_head, _inm, inm_dtle);                       \
294
0
}
295
296
408k
#define IGMP_REMOVE_DETACHED_INM(_head) {                               \
297
408k
  struct in_multi *_inm, *_inm_tmp;                               \
298
408k
  SLIST_FOREACH_SAFE(_inm, _head, inm_dtle, _inm_tmp) {           \
299
0
          SLIST_REMOVE(_head, _inm, in_multi, inm_dtle);          \
300
0
          INM_REMREF(_inm);                                       \
301
0
  }                                                               \
302
408k
  VERIFY(SLIST_EMPTY(_head));                                     \
303
408k
}
304
305
static ZONE_DECLARE(igi_zone, "igmp_ifinfo",
306
    sizeof(struct igmp_ifinfo), ZC_ZFREE_CLEARMEM);
307
308
/* Store IGMPv3 record count in the module private scratch space */
309
0
#define vt_nrecs        pkt_mpriv.__mpriv_u.__mpriv32[0].__mpriv32_u.__val16[0]
310
311
static __inline void
312
igmp_save_context(struct mbuf *m, struct ifnet *ifp)
313
0
{
314
0
  m->m_pkthdr.rcvif = ifp;
315
0
}
316
317
static __inline void
318
igmp_scrub_context(struct mbuf *m)
319
0
{
320
0
  m->m_pkthdr.rcvif = NULL;
321
0
}
322
323
#ifdef IGMP_DEBUG
324
static __inline const char *
325
inet_ntop_haddr(in_addr_t haddr, char *buf, socklen_t size)
326
0
{
327
0
  struct in_addr ia;
328
329
0
  ia.s_addr = htonl(haddr);
330
0
  return inet_ntop(AF_INET, &ia, buf, size);
331
0
}
332
#endif
333
334
/*
335
 * Restore context from a queued IGMP output chain.
336
 * Return saved ifp.
337
 */
338
static __inline struct ifnet *
339
igmp_restore_context(struct mbuf *m)
340
0
{
341
0
  return m->m_pkthdr.rcvif;
342
0
}
343
344
/*
345
 * Retrieve or set default IGMP version.
346
 */
347
static int
348
sysctl_igmp_default_version SYSCTL_HANDLER_ARGS
349
0
{
350
0
#pragma unused(oidp, arg2)
351
0
  int      error;
352
0
  int      new;
353
354
0
  IGMP_LOCK();
355
356
0
  error = SYSCTL_OUT(req, arg1, sizeof(int));
357
0
  if (error || !req->newptr) {
358
0
    goto out_locked;
359
0
  }
360
361
0
  new = igmp_default_version;
362
363
0
  error = SYSCTL_IN(req, &new, sizeof(int));
364
0
  if (error) {
365
0
    goto out_locked;
366
0
  }
367
368
0
  if (new < IGMP_VERSION_1 || new > IGMP_VERSION_3) {
369
0
    error = EINVAL;
370
0
    goto out_locked;
371
0
  }
372
373
0
  IGMP_PRINTF(("%s: change igmp_default_version from %d to %d\n",
374
0
      __func__, igmp_default_version, new));
375
376
0
  igmp_default_version = new;
377
378
0
out_locked:
379
0
  IGMP_UNLOCK();
380
0
  return error;
381
0
}
382
383
/*
384
 * Retrieve or set threshold between group-source queries in seconds.
385
 *
386
 */
387
static int
388
sysctl_igmp_gsr SYSCTL_HANDLER_ARGS
389
0
{
390
0
#pragma unused(arg1, arg2)
391
0
  int error;
392
0
  int i;
393
394
0
  IGMP_LOCK();
395
396
0
  i = (int)igmp_gsrdelay.tv_sec;
397
398
0
  error = sysctl_handle_int(oidp, &i, 0, req);
399
0
  if (error || !req->newptr) {
400
0
    goto out_locked;
401
0
  }
402
403
0
  if (i < -1 || i >= 60) {
404
0
    error = EINVAL;
405
0
    goto out_locked;
406
0
  }
407
408
0
  igmp_gsrdelay.tv_sec = i;
409
410
0
out_locked:
411
0
  IGMP_UNLOCK();
412
0
  return error;
413
0
}
414
415
/*
416
 * Expose struct igmp_ifinfo to userland, keyed by ifindex.
417
 * For use by ifmcstat(8).
418
 *
419
 */
420
static int
421
sysctl_igmp_ifinfo SYSCTL_HANDLER_ARGS
422
0
{
423
0
#pragma unused(oidp)
424
0
  int                     *name;
425
0
  int                      error;
426
0
  u_int                    namelen;
427
0
  struct ifnet            *ifp;
428
0
  struct igmp_ifinfo      *igi;
429
0
  struct igmp_ifinfo_u    igi_u;
430
431
0
  name = (int *)arg1;
432
0
  namelen = arg2;
433
434
0
  if (req->newptr != USER_ADDR_NULL) {
435
0
    return EPERM;
436
0
  }
437
438
0
  if (namelen != 1) {
439
0
    return EINVAL;
440
0
  }
441
442
0
  IGMP_LOCK();
443
444
0
  if (name[0] <= 0 || name[0] > (u_int)if_index) {
445
0
    error = ENOENT;
446
0
    goto out_locked;
447
0
  }
448
449
0
  error = ENOENT;
450
451
0
  ifnet_head_lock_shared();
452
0
  ifp = ifindex2ifnet[name[0]];
453
0
  ifnet_head_done();
454
0
  if (ifp == NULL) {
455
0
    goto out_locked;
456
0
  }
457
458
0
  bzero(&igi_u, sizeof(igi_u));
459
460
0
  LIST_FOREACH(igi, &igi_head, igi_link) {
461
0
    IGI_LOCK(igi);
462
0
    if (ifp != igi->igi_ifp) {
463
0
      IGI_UNLOCK(igi);
464
0
      continue;
465
0
    }
466
0
    igi_u.igi_ifindex = igi->igi_ifp->if_index;
467
0
    igi_u.igi_version = igi->igi_version;
468
0
    igi_u.igi_v1_timer = igi->igi_v1_timer;
469
0
    igi_u.igi_v2_timer = igi->igi_v2_timer;
470
0
    igi_u.igi_v3_timer = igi->igi_v3_timer;
471
0
    igi_u.igi_flags = igi->igi_flags;
472
0
    igi_u.igi_rv = igi->igi_rv;
473
0
    igi_u.igi_qi = igi->igi_qi;
474
0
    igi_u.igi_qri = igi->igi_qri;
475
0
    igi_u.igi_uri = igi->igi_uri;
476
0
    IGI_UNLOCK(igi);
477
478
0
    error = SYSCTL_OUT(req, &igi_u, sizeof(igi_u));
479
0
    break;
480
0
  }
481
482
0
out_locked:
483
0
  IGMP_UNLOCK();
484
0
  return error;
485
0
}
486
487
/*
488
 * Dispatch an entire queue of pending packet chains
489
 *
490
 * Must not be called with inm_lock held.
491
 */
492
static void
493
igmp_dispatch_queue(struct igmp_ifinfo *igi, struct ifqueue *ifq, int limit,
494
    const int loop)
495
0
{
496
0
  struct mbuf *m;
497
0
  struct ip *ip;
498
499
0
  if (igi != NULL) {
500
0
    IGI_LOCK_ASSERT_HELD(igi);
501
0
  }
502
503
0
  for (;;) {
504
0
    IF_DEQUEUE(ifq, m);
505
0
    if (m == NULL) {
506
0
      break;
507
0
    }
508
0
    IGMP_PRINTF(("%s: dispatch 0x%llx from 0x%llx\n", __func__,
509
0
        (uint64_t)VM_KERNEL_ADDRPERM(ifq),
510
0
        (uint64_t)VM_KERNEL_ADDRPERM(m)));
511
0
    ip = mtod(m, struct ip *);
512
0
    if (loop) {
513
0
      m->m_flags |= M_IGMP_LOOP;
514
0
    }
515
0
    if (igi != NULL) {
516
0
      IGI_UNLOCK(igi);
517
0
    }
518
0
    igmp_sendpkt(m);
519
0
    if (igi != NULL) {
520
0
      IGI_LOCK(igi);
521
0
    }
522
0
    if (--limit == 0) {
523
0
      break;
524
0
    }
525
0
  }
526
527
0
  if (igi != NULL) {
528
0
    IGI_LOCK_ASSERT_HELD(igi);
529
0
  }
530
0
}
531
532
/*
533
 * Filter outgoing IGMP report state by group.
534
 *
535
 * Reports are ALWAYS suppressed for ALL-HOSTS (224.0.0.1).
536
 * If the net.inet.igmp.sendlocal sysctl is 0, then IGMP reports are
537
 * disabled for all groups in the 224.0.0.0/24 link-local scope. However,
538
 * this may break certain IGMP snooping switches which rely on the old
539
 * report behaviour.
540
 *
541
 * Return zero if the given group is one for which IGMP reports
542
 * should be suppressed, or non-zero if reports should be issued.
543
 */
544
545
static __inline__
546
int
547
igmp_isgroupreported(const struct in_addr addr)
548
0
{
549
0
  if (in_allhosts(addr) ||
550
0
      ((!igmp_sendlocal && IN_LOCAL_GROUP(ntohl(addr.s_addr))))) {
551
0
    return 0;
552
0
  }
553
554
0
  return 1;
555
0
}
556
557
/*
558
 * Construct a Router Alert option to use in outgoing packets.
559
 */
560
static struct mbuf *
561
igmp_ra_alloc(void)
562
1
{
563
1
  struct mbuf     *m;
564
1
  struct ipoption *p;
565
566
1
  MGET(m, M_WAITOK, MT_DATA);
567
1
  p = mtod(m, struct ipoption *);
568
1
  p->ipopt_dst.s_addr = INADDR_ANY;
569
1
  p->ipopt_list[0] = (char)IPOPT_RA;      /* Router Alert Option */
570
1
  p->ipopt_list[1] = 0x04;        /* 4 bytes long */
571
1
  p->ipopt_list[2] = IPOPT_EOL;   /* End of IP option list */
572
1
  p->ipopt_list[3] = 0x00;        /* pad byte */
573
1
  m->m_len = sizeof(p->ipopt_dst) + p->ipopt_list[1];
574
575
1
  return m;
576
1
}
577
578
/*
579
 * Attach IGMP when PF_INET is attached to an interface.
580
 */
581
struct igmp_ifinfo *
582
igmp_domifattach(struct ifnet *ifp, zalloc_flags_t how)
583
2
{
584
2
  struct igmp_ifinfo *igi;
585
586
2
  IGMP_PRINTF(("%s: called for ifp 0x%llx(%s)\n",
587
2
      __func__, (uint64_t)VM_KERNEL_ADDRPERM(ifp), ifp->if_name));
588
589
2
  igi = igi_alloc(how);
590
2
  if (igi == NULL) {
591
0
    return NULL;
592
0
  }
593
594
2
  IGMP_LOCK();
595
596
2
  IGI_LOCK(igi);
597
2
  igi_initvar(igi, ifp, 0);
598
2
  igi->igi_debug |= IFD_ATTACHED;
599
2
  IGI_ADDREF_LOCKED(igi); /* hold a reference for igi_head */
600
2
  IGI_ADDREF_LOCKED(igi); /* hold a reference for caller */
601
2
  IGI_UNLOCK(igi);
602
2
  ifnet_lock_shared(ifp);
603
2
  igmp_initsilent(ifp, igi);
604
2
  ifnet_lock_done(ifp);
605
606
2
  LIST_INSERT_HEAD(&igi_head, igi, igi_link);
607
608
2
  IGMP_UNLOCK();
609
610
2
  IGMP_PRINTF(("%s: allocate igmp_ifinfo for ifp 0x%llx(%s)\n", __func__,
611
2
      (uint64_t)VM_KERNEL_ADDRPERM(ifp), ifp->if_name));
612
613
2
  return igi;
614
2
}
615
616
/*
617
 * Attach IGMP when PF_INET is reattached to an interface.  Caller is
618
 * expected to have an outstanding reference to the igi.
619
 */
620
void
621
igmp_domifreattach(struct igmp_ifinfo *igi)
622
0
{
623
0
  struct ifnet *ifp;
624
625
0
  IGMP_LOCK();
626
627
0
  IGI_LOCK(igi);
628
0
  VERIFY(!(igi->igi_debug & IFD_ATTACHED));
629
0
  ifp = igi->igi_ifp;
630
0
  VERIFY(ifp != NULL);
631
0
  igi_initvar(igi, ifp, 1);
632
0
  igi->igi_debug |= IFD_ATTACHED;
633
0
  IGI_ADDREF_LOCKED(igi); /* hold a reference for igi_head */
634
0
  IGI_UNLOCK(igi);
635
0
  ifnet_lock_shared(ifp);
636
0
  igmp_initsilent(ifp, igi);
637
0
  ifnet_lock_done(ifp);
638
639
0
  LIST_INSERT_HEAD(&igi_head, igi, igi_link);
640
641
0
  IGMP_UNLOCK();
642
643
0
  IGMP_PRINTF(("%s: reattached igmp_ifinfo for ifp 0x%llx(%s)\n",
644
0
      __func__, (uint64_t)VM_KERNEL_ADDRPERM(ifp), ifp->if_name));
645
0
}
646
647
/*
648
 * Hook for domifdetach.
649
 */
650
void
651
igmp_domifdetach(struct ifnet *ifp)
652
0
{
653
0
  SLIST_HEAD(, in_multi) inm_dthead;
654
655
0
  SLIST_INIT(&inm_dthead);
656
657
0
  IGMP_PRINTF(("%s: called for ifp 0x%llx(%s%d)\n", __func__,
658
0
      (uint64_t)VM_KERNEL_ADDRPERM(ifp), ifp->if_name, ifp->if_unit));
659
660
0
  IGMP_LOCK();
661
0
  igi_delete(ifp, (struct igmp_inm_relhead *)&inm_dthead);
662
0
  IGMP_UNLOCK();
663
664
  /* Now that we're dropped all locks, release detached records */
665
0
  IGMP_REMOVE_DETACHED_INM(&inm_dthead);
666
0
}
667
668
/*
669
 * Called at interface detach time.  Note that we only flush all deferred
670
 * responses and record releases; all remaining inm records and their source
671
 * entries related to this interface are left intact, in order to handle
672
 * the reattach case.
673
 */
674
static void
675
igi_delete(const struct ifnet *ifp, struct igmp_inm_relhead *inm_dthead)
676
0
{
677
0
  struct igmp_ifinfo *igi, *tigi;
678
679
0
  IGMP_LOCK_ASSERT_HELD();
680
681
0
  LIST_FOREACH_SAFE(igi, &igi_head, igi_link, tigi) {
682
0
    IGI_LOCK(igi);
683
0
    if (igi->igi_ifp == ifp) {
684
      /*
685
       * Free deferred General Query responses.
686
       */
687
0
      IF_DRAIN(&igi->igi_gq);
688
0
      IF_DRAIN(&igi->igi_v2q);
689
0
      igmp_flush_relq(igi, inm_dthead);
690
0
      VERIFY(SLIST_EMPTY(&igi->igi_relinmhead));
691
0
      igi->igi_debug &= ~IFD_ATTACHED;
692
0
      IGI_UNLOCK(igi);
693
694
0
      LIST_REMOVE(igi, igi_link);
695
0
      IGI_REMREF(igi); /* release igi_head reference */
696
0
      return;
697
0
    }
698
0
    IGI_UNLOCK(igi);
699
0
  }
700
0
  panic("%s: igmp_ifinfo not found for ifp %p(%s)\n", __func__,
701
0
      ifp, ifp->if_xname);
702
0
}
703
704
__private_extern__ void
705
igmp_initsilent(struct ifnet *ifp, struct igmp_ifinfo *igi)
706
2
{
707
2
  ifnet_lock_assert(ifp, IFNET_LCK_ASSERT_OWNED);
708
709
2
  IGI_LOCK_ASSERT_NOTHELD(igi);
710
2
  IGI_LOCK(igi);
711
2
  if (!(ifp->if_flags & IFF_MULTICAST)) {
712
1
    igi->igi_flags |= IGIF_SILENT;
713
1
  } else {
714
1
    igi->igi_flags &= ~IGIF_SILENT;
715
1
  }
716
2
  IGI_UNLOCK(igi);
717
2
}
718
719
static void
720
igi_initvar(struct igmp_ifinfo *igi, struct ifnet *ifp, int reattach)
721
2
{
722
2
  IGI_LOCK_ASSERT_HELD(igi);
723
724
2
  igi->igi_ifp = ifp;
725
2
  igi->igi_version = igmp_default_version;
726
2
  igi->igi_flags = 0;
727
2
  igi->igi_rv = IGMP_RV_INIT;
728
2
  igi->igi_qi = IGMP_QI_INIT;
729
2
  igi->igi_qri = IGMP_QRI_INIT;
730
2
  igi->igi_uri = IGMP_URI_INIT;
731
732
2
  if (!reattach) {
733
2
    SLIST_INIT(&igi->igi_relinmhead);
734
2
  }
735
736
  /*
737
   * Responses to general queries are subject to bounds.
738
   */
739
2
  igi->igi_gq.ifq_maxlen =  IGMP_MAX_RESPONSE_PACKETS;
740
2
  igi->igi_v2q.ifq_maxlen = IGMP_MAX_RESPONSE_PACKETS;
741
2
}
742
743
static struct igmp_ifinfo *
744
igi_alloc(zalloc_flags_t how)
745
2
{
746
2
  struct igmp_ifinfo *igi = zalloc_flags(igi_zone, how | Z_ZERO);
747
2
  if (igi != NULL) {
748
2
    lck_mtx_init(&igi->igi_lock, igmp_mtx_grp, igmp_mtx_attr);
749
2
    igi->igi_debug |= IFD_ALLOC;
750
2
  }
751
2
  return igi;
752
2
}
753
754
static void
755
igi_free(struct igmp_ifinfo *igi)
756
0
{
757
0
  IGI_LOCK(igi);
758
0
  if (igi->igi_debug & IFD_ATTACHED) {
759
0
    panic("%s: attached igi=%p is being freed", __func__, igi);
760
    /* NOTREACHED */
761
0
  } else if (igi->igi_ifp != NULL) {
762
0
    panic("%s: ifp not NULL for igi=%p", __func__, igi);
763
    /* NOTREACHED */
764
0
  } else if (!(igi->igi_debug & IFD_ALLOC)) {
765
0
    panic("%s: igi %p cannot be freed", __func__, igi);
766
    /* NOTREACHED */
767
0
  } else if (igi->igi_refcnt != 0) {
768
0
    panic("%s: non-zero refcnt igi=%p", __func__, igi);
769
    /* NOTREACHED */
770
0
  }
771
0
  igi->igi_debug &= ~IFD_ALLOC;
772
0
  IGI_UNLOCK(igi);
773
774
0
  lck_mtx_destroy(&igi->igi_lock, igmp_mtx_grp);
775
0
  zfree(igi_zone, igi);
776
0
}
777
778
void
779
igi_addref(struct igmp_ifinfo *igi, int locked)
780
5
{
781
5
  if (!locked) {
782
1
    IGI_LOCK_SPIN(igi);
783
4
  } else {
784
4
    IGI_LOCK_ASSERT_HELD(igi);
785
4
  }
786
787
5
  if (++igi->igi_refcnt == 0) {
788
0
    panic("%s: igi=%p wraparound refcnt", __func__, igi);
789
    /* NOTREACHED */
790
0
  }
791
5
  if (!locked) {
792
1
    IGI_UNLOCK(igi);
793
1
  }
794
5
}
795
796
void
797
igi_remref(struct igmp_ifinfo *igi)
798
0
{
799
0
  SLIST_HEAD(, in_multi) inm_dthead;
800
0
  struct ifnet *ifp;
801
802
0
  IGI_LOCK_SPIN(igi);
803
804
0
  if (igi->igi_refcnt == 0) {
805
0
    panic("%s: igi=%p negative refcnt", __func__, igi);
806
    /* NOTREACHED */
807
0
  }
808
809
0
  --igi->igi_refcnt;
810
0
  if (igi->igi_refcnt > 0) {
811
0
    IGI_UNLOCK(igi);
812
0
    return;
813
0
  }
814
815
0
  ifp = igi->igi_ifp;
816
0
  igi->igi_ifp = NULL;
817
0
  IF_DRAIN(&igi->igi_gq);
818
0
  IF_DRAIN(&igi->igi_v2q);
819
0
  SLIST_INIT(&inm_dthead);
820
0
  igmp_flush_relq(igi, (struct igmp_inm_relhead *)&inm_dthead);
821
0
  VERIFY(SLIST_EMPTY(&igi->igi_relinmhead));
822
0
  IGI_UNLOCK(igi);
823
824
  /* Now that we're dropped all locks, release detached records */
825
0
  IGMP_REMOVE_DETACHED_INM(&inm_dthead);
826
827
0
  IGMP_PRINTF(("%s: freeing igmp_ifinfo for ifp 0x%llx(%s)\n",
828
0
      __func__, (uint64_t)VM_KERNEL_ADDRPERM(ifp), if_name(ifp)));
829
830
0
  igi_free(igi);
831
0
}
832
833
/*
834
 * Process a received IGMPv1 query.
835
 * Return non-zero if the message should be dropped.
836
 */
837
static int
838
igmp_input_v1_query(struct ifnet *ifp, const struct ip *ip,
839
    const struct igmp *igmp)
840
212
{
841
212
  struct igmp_ifinfo      *igi;
842
212
  struct in_multi         *inm;
843
212
  struct in_multistep     step;
844
212
  struct igmp_tparams     itp = { .qpt = 0, .it = 0, .cst = 0, .sct = 0 };
845
846
212
  IGMP_LOCK_ASSERT_NOTHELD();
847
848
  /*
849
   * IGMPv1 Host Membership Queries SHOULD always be addressed to
850
   * 224.0.0.1. They are always treated as General Queries.
851
   * igmp_group is always ignored. Do not drop it as a userland
852
   * daemon may wish to see it.
853
   */
854
212
  if (!in_allhosts(ip->ip_dst) || !in_nullhost(igmp->igmp_group)) {
855
112
    IGMPSTAT_INC(igps_rcv_badqueries);
856
112
    OIGMPSTAT_INC(igps_rcv_badqueries);
857
112
    goto done;
858
112
  }
859
100
  IGMPSTAT_INC(igps_rcv_gen_queries);
860
861
100
  igi = IGMP_IFINFO(ifp);
862
100
  VERIFY(igi != NULL);
863
864
100
  IGI_LOCK(igi);
865
100
  if (igi->igi_flags & IGIF_LOOPBACK) {
866
0
    IGMP_PRINTF(("%s: ignore v1 query on IGIF_LOOPBACK "
867
0
        "ifp 0x%llx(%s)\n", __func__,
868
0
        (uint64_t)VM_KERNEL_ADDRPERM(ifp), if_name(ifp)));
869
0
    IGI_UNLOCK(igi);
870
0
    goto done;
871
0
  }
872
  /*
873
   * Switch to IGMPv1 host compatibility mode.
874
   */
875
100
  itp.qpt = igmp_set_version(igi, IGMP_VERSION_1);
876
100
  IGI_UNLOCK(igi);
877
878
100
  IGMP_PRINTF(("%s: process v1 query on ifp 0x%llx(%s)\n", __func__,
879
100
      (uint64_t)VM_KERNEL_ADDRPERM(ifp), if_name(ifp)));
880
881
  /*
882
   * Start the timers in all of our group records
883
   * for the interface on which the query arrived,
884
   * except those which are already running.
885
   */
886
100
  in_multihead_lock_shared();
887
100
  IN_FIRST_MULTI(step, inm);
888
200
  while (inm != NULL) {
889
100
    INM_LOCK(inm);
890
100
    if (inm->inm_ifp != ifp || inm->inm_timer != 0) {
891
0
      goto next;
892
0
    }
893
894
100
    switch (inm->inm_state) {
895
0
    case IGMP_NOT_MEMBER:
896
100
    case IGMP_SILENT_MEMBER:
897
100
      break;
898
0
    case IGMP_G_QUERY_PENDING_MEMBER:
899
0
    case IGMP_SG_QUERY_PENDING_MEMBER:
900
0
    case IGMP_REPORTING_MEMBER:
901
0
    case IGMP_IDLE_MEMBER:
902
0
    case IGMP_LAZY_MEMBER:
903
0
    case IGMP_SLEEPING_MEMBER:
904
0
    case IGMP_AWAKENING_MEMBER:
905
0
      inm->inm_state = IGMP_REPORTING_MEMBER;
906
0
      inm->inm_timer = IGMP_RANDOM_DELAY(IGMP_V1V2_MAX_RI);
907
0
      itp.cst = 1;
908
0
      break;
909
0
    case IGMP_LEAVING_MEMBER:
910
0
      break;
911
100
    }
912
100
next:
913
100
    INM_UNLOCK(inm);
914
100
    IN_NEXT_MULTI(step, inm);
915
100
  }
916
100
  in_multihead_lock_done();
917
212
done:
918
212
  igmp_set_timeout(&itp);
919
920
212
  return 0;
921
100
}
922
923
/*
924
 * Process a received IGMPv2 general or group-specific query.
925
 */
926
static int
927
igmp_input_v2_query(struct ifnet *ifp, const struct ip *ip,
928
    const struct igmp *igmp)
929
1.42k
{
930
1.42k
  struct igmp_ifinfo      *igi;
931
1.42k
  struct in_multi         *inm;
932
1.42k
  int                      is_general_query;
933
1.42k
  uint16_t                 timer;
934
1.42k
  struct igmp_tparams      itp = { .qpt = 0, .it = 0, .cst = 0, .sct = 0 };
935
936
1.42k
  IGMP_LOCK_ASSERT_NOTHELD();
937
938
1.42k
  is_general_query = 0;
939
940
  /*
941
   * Validate address fields upfront.
942
   */
943
1.42k
  if (in_nullhost(igmp->igmp_group)) {
944
    /*
945
     * IGMPv2 General Query.
946
     * If this was not sent to the all-hosts group, ignore it.
947
     */
948
60
    if (!in_allhosts(ip->ip_dst)) {
949
13
      goto done;
950
13
    }
951
47
    IGMPSTAT_INC(igps_rcv_gen_queries);
952
47
    is_general_query = 1;
953
1.36k
  } else {
954
    /* IGMPv2 Group-Specific Query. */
955
1.36k
    IGMPSTAT_INC(igps_rcv_group_queries);
956
1.36k
  }
957
958
1.40k
  igi = IGMP_IFINFO(ifp);
959
1.40k
  VERIFY(igi != NULL);
960
961
1.40k
  IGI_LOCK(igi);
962
1.40k
  if (igi->igi_flags & IGIF_LOOPBACK) {
963
0
    IGMP_PRINTF(("%s: ignore v2 query on IGIF_LOOPBACK "
964
0
        "ifp 0x%llx(%s)\n", __func__,
965
0
        (uint64_t)VM_KERNEL_ADDRPERM(ifp), if_name(ifp)));
966
0
    IGI_UNLOCK(igi);
967
0
    goto done;
968
0
  }
969
  /*
970
   * Ignore v2 query if in v1 Compatibility Mode.
971
   */
972
1.40k
  if (igi->igi_version == IGMP_VERSION_1) {
973
1.38k
    IGI_UNLOCK(igi);
974
1.38k
    goto done;
975
1.38k
  }
976
21
  itp.qpt = igmp_set_version(igi, IGMP_VERSION_2);
977
21
  IGI_UNLOCK(igi);
978
979
21
  timer = igmp->igmp_code / IGMP_TIMER_SCALE;
980
21
  if (timer == 0) {
981
3
    timer = 1;
982
3
  }
983
984
21
  if (is_general_query) {
985
1
    struct in_multistep step;
986
987
1
    IGMP_PRINTF(("%s: process v2 general query on ifp 0x%llx(%s)\n",
988
1
        __func__, (uint64_t)VM_KERNEL_ADDRPERM(ifp), if_name(ifp)));
989
    /*
990
     * For each reporting group joined on this
991
     * interface, kick the report timer.
992
     */
993
1
    in_multihead_lock_shared();
994
1
    IN_FIRST_MULTI(step, inm);
995
2
    while (inm != NULL) {
996
1
      INM_LOCK(inm);
997
1
      if (inm->inm_ifp == ifp) {
998
1
        itp.cst += igmp_v2_update_group(inm, timer);
999
1
      }
1000
1
      INM_UNLOCK(inm);
1001
1
      IN_NEXT_MULTI(step, inm);
1002
1
    }
1003
1
    in_multihead_lock_done();
1004
20
  } else {
1005
    /*
1006
     * Group-specific IGMPv2 query, we need only
1007
     * look up the single group to process it.
1008
     */
1009
20
    in_multihead_lock_shared();
1010
20
    IN_LOOKUP_MULTI(&igmp->igmp_group, ifp, inm);
1011
20
    in_multihead_lock_done();
1012
20
    if (inm != NULL) {
1013
0
      INM_LOCK(inm);
1014
0
      IGMP_INET_PRINTF(igmp->igmp_group,
1015
0
          ("process v2 query %s on ifp 0x%llx(%s)\n",
1016
0
          _igmp_inet_buf,
1017
0
          (uint64_t)VM_KERNEL_ADDRPERM(ifp), if_name(ifp)));
1018
0
      itp.cst = igmp_v2_update_group(inm, timer);
1019
0
      INM_UNLOCK(inm);
1020
0
      INM_REMREF(inm); /* from IN_LOOKUP_MULTI */
1021
0
    }
1022
20
  }
1023
1.42k
done:
1024
1.42k
  igmp_set_timeout(&itp);
1025
1026
1.42k
  return 0;
1027
21
}
1028
1029
/*
1030
 * Update the report timer on a group in response to an IGMPv2 query.
1031
 *
1032
 * If we are becoming the reporting member for this group, start the timer.
1033
 * If we already are the reporting member for this group, and timer is
1034
 * below the threshold, reset it.
1035
 *
1036
 * We may be updating the group for the first time since we switched
1037
 * to IGMPv3. If we are, then we must clear any recorded source lists,
1038
 * and transition to REPORTING state; the group timer is overloaded
1039
 * for group and group-source query responses.
1040
 *
1041
 * Unlike IGMPv3, the delay per group should be jittered
1042
 * to avoid bursts of IGMPv2 reports.
1043
 */
1044
static uint32_t
1045
igmp_v2_update_group(struct in_multi *inm, const int timer)
1046
1
{
1047
1
  IGMP_INET_PRINTF(inm->inm_addr, ("%s: %s/%s timer=%d\n",
1048
1
      __func__, _igmp_inet_buf, if_name(inm->inm_ifp),
1049
1
      timer));
1050
1051
1
  INM_LOCK_ASSERT_HELD(inm);
1052
1053
1
  switch (inm->inm_state) {
1054
0
  case IGMP_NOT_MEMBER:
1055
1
  case IGMP_SILENT_MEMBER:
1056
1
    break;
1057
0
  case IGMP_REPORTING_MEMBER:
1058
0
    if (inm->inm_timer != 0 &&
1059
0
        inm->inm_timer <= timer) {
1060
0
      IGMP_PRINTF(("%s: REPORTING and timer running, "
1061
0
          "skipping.\n", __func__));
1062
0
      break;
1063
0
    }
1064
0
    OS_FALLTHROUGH;
1065
0
  case IGMP_SG_QUERY_PENDING_MEMBER:
1066
0
  case IGMP_G_QUERY_PENDING_MEMBER:
1067
0
  case IGMP_IDLE_MEMBER:
1068
0
  case IGMP_LAZY_MEMBER:
1069
0
  case IGMP_AWAKENING_MEMBER:
1070
0
    IGMP_PRINTF(("%s: ->REPORTING\n", __func__));
1071
0
    inm->inm_state = IGMP_REPORTING_MEMBER;
1072
0
    inm->inm_timer = IGMP_RANDOM_DELAY(timer);
1073
0
    break;
1074
0
  case IGMP_SLEEPING_MEMBER:
1075
0
    IGMP_PRINTF(("%s: ->AWAKENING\n", __func__));
1076
0
    inm->inm_state = IGMP_AWAKENING_MEMBER;
1077
0
    break;
1078
0
  case IGMP_LEAVING_MEMBER:
1079
0
    break;
1080
1
  }
1081
1082
1
  return inm->inm_timer;
1083
1
}
1084
1085
/*
1086
 * Process a received IGMPv3 general, group-specific or
1087
 * group-and-source-specific query.
1088
 * Assumes m has already been pulled up to the full IGMP message length.
1089
 * Return 0 if successful, otherwise an appropriate error code is returned.
1090
 */
1091
static int
1092
igmp_input_v3_query(struct ifnet *ifp, const struct ip *ip,
1093
    /*const*/ struct igmpv3 *igmpv3)
1094
432
{
1095
432
  struct igmp_ifinfo      *igi;
1096
432
  struct in_multi         *inm;
1097
432
  int                      is_general_query;
1098
432
  uint32_t                 maxresp, nsrc, qqi;
1099
432
  uint32_t                 timer;
1100
432
  uint8_t                  qrv;
1101
432
  struct igmp_tparams      itp = { .qpt = 0, .it = 0, .cst = 0, .sct = 0 };
1102
1103
432
  IGMP_LOCK_ASSERT_NOTHELD();
1104
1105
432
  is_general_query = 0;
1106
1107
432
  IGMP_PRINTF(("%s: process v3 query on ifp 0x%llx(%s)\n", __func__,
1108
432
      (uint64_t)VM_KERNEL_ADDRPERM(ifp), if_name(ifp)));
1109
1110
432
  maxresp = igmpv3->igmp_code;    /* in 1/10ths of a second */
1111
432
  if (maxresp >= 128) {
1112
162
    maxresp = IGMP_MANT(igmpv3->igmp_code) <<
1113
162
        (IGMP_EXP(igmpv3->igmp_code) + 3);
1114
162
  }
1115
1116
  /*
1117
   * Robustness must never be less than 2 for on-wire IGMPv3.
1118
   * FUTURE: Check if ifp has IGIF_LOOPBACK set, as we will make
1119
   * an exception for interfaces whose IGMPv3 state changes
1120
   * are redirected to loopback (e.g. MANET).
1121
   */
1122
432
  qrv = IGMP_QRV(igmpv3->igmp_misc);
1123
432
  if (qrv < 2) {
1124
318
    IGMP_PRINTF(("%s: clamping qrv %d to %d\n", __func__,
1125
318
        qrv, IGMP_RV_INIT));
1126
318
    qrv = IGMP_RV_INIT;
1127
318
  }
1128
1129
432
  qqi = igmpv3->igmp_qqi;
1130
432
  if (qqi >= 128) {
1131
39
    qqi = IGMP_MANT(igmpv3->igmp_qqi) <<
1132
39
        (IGMP_EXP(igmpv3->igmp_qqi) + 3);
1133
39
  }
1134
1135
432
  timer = maxresp / IGMP_TIMER_SCALE;
1136
432
  if (timer == 0) {
1137
45
    timer = 1;
1138
45
  }
1139
1140
432
  nsrc = ntohs(igmpv3->igmp_numsrc);
1141
1142
  /*
1143
   * Validate address fields and versions upfront before
1144
   * accepting v3 query.
1145
   */
1146
432
  if (in_nullhost(igmpv3->igmp_group)) {
1147
    /*
1148
     * IGMPv3 General Query.
1149
     *
1150
     * General Queries SHOULD be directed to 224.0.0.1.
1151
     * A general query with a source list has undefined
1152
     * behaviour; discard it.
1153
     */
1154
59
    IGMPSTAT_INC(igps_rcv_gen_queries);
1155
59
    if (!in_allhosts(ip->ip_dst) || nsrc > 0) {
1156
33
      IGMPSTAT_INC(igps_rcv_badqueries);
1157
33
      OIGMPSTAT_INC(igps_rcv_badqueries);
1158
33
      goto done;
1159
33
    }
1160
26
    is_general_query = 1;
1161
373
  } else {
1162
    /* Group or group-source specific query. */
1163
373
    if (nsrc == 0) {
1164
69
      IGMPSTAT_INC(igps_rcv_group_queries);
1165
304
    } else {
1166
304
      IGMPSTAT_INC(igps_rcv_gsr_queries);
1167
304
    }
1168
373
  }
1169
1170
399
  igi = IGMP_IFINFO(ifp);
1171
399
  VERIFY(igi != NULL);
1172
1173
399
  IGI_LOCK(igi);
1174
399
  if (igi->igi_flags & IGIF_LOOPBACK) {
1175
0
    IGMP_PRINTF(("%s: ignore v3 query on IGIF_LOOPBACK "
1176
0
        "ifp 0x%llx(%s)\n", __func__,
1177
0
        (uint64_t)VM_KERNEL_ADDRPERM(ifp), if_name(ifp)));
1178
0
    IGI_UNLOCK(igi);
1179
0
    goto done;
1180
0
  }
1181
1182
  /*
1183
   * Discard the v3 query if we're in Compatibility Mode.
1184
   * The RFC is not obviously worded that hosts need to stay in
1185
   * compatibility mode until the Old Version Querier Present
1186
   * timer expires.
1187
   */
1188
399
  if (igi->igi_version != IGMP_VERSION_3) {
1189
399
    IGMP_PRINTF(("%s: ignore v3 query in v%d mode on "
1190
399
        "ifp 0x%llx(%s)\n", __func__, igi->igi_version,
1191
399
        (uint64_t)VM_KERNEL_ADDRPERM(ifp), if_name(ifp)));
1192
399
    IGI_UNLOCK(igi);
1193
399
    goto done;
1194
399
  }
1195
1196
0
  itp.qpt = igmp_set_version(igi, IGMP_VERSION_3);
1197
0
  igi->igi_rv = qrv;
1198
0
  igi->igi_qi = qqi;
1199
0
  igi->igi_qri = MAX(timer, IGMP_QRI_MIN);
1200
1201
0
  IGMP_PRINTF(("%s: qrv %d qi %d qri %d\n", __func__, igi->igi_rv,
1202
0
      igi->igi_qi, igi->igi_qri));
1203
1204
0
  if (is_general_query) {
1205
    /*
1206
     * Schedule a current-state report on this ifp for
1207
     * all groups, possibly containing source lists.
1208
     * If there is a pending General Query response
1209
     * scheduled earlier than the selected delay, do
1210
     * not schedule any other reports.
1211
     * Otherwise, reset the interface timer.
1212
     */
1213
0
    IGMP_PRINTF(("%s: process v3 general query on ifp 0x%llx(%s)\n",
1214
0
        __func__, (uint64_t)VM_KERNEL_ADDRPERM(ifp), if_name(ifp)));
1215
0
    if (igi->igi_v3_timer == 0 || igi->igi_v3_timer >= timer) {
1216
0
      itp.it = igi->igi_v3_timer = IGMP_RANDOM_DELAY(timer);
1217
0
    }
1218
0
    IGI_UNLOCK(igi);
1219
0
  } else {
1220
0
    IGI_UNLOCK(igi);
1221
    /*
1222
     * Group-source-specific queries are throttled on
1223
     * a per-group basis to defeat denial-of-service attempts.
1224
     * Queries for groups we are not a member of on this
1225
     * link are simply ignored.
1226
     */
1227
0
    in_multihead_lock_shared();
1228
0
    IN_LOOKUP_MULTI(&igmpv3->igmp_group, ifp, inm);
1229
0
    in_multihead_lock_done();
1230
0
    if (inm == NULL) {
1231
0
      goto done;
1232
0
    }
1233
1234
0
    INM_LOCK(inm);
1235
0
    if (nsrc > 0) {
1236
0
      if (!ratecheck(&inm->inm_lastgsrtv,
1237
0
          &igmp_gsrdelay)) {
1238
0
        IGMP_PRINTF(("%s: GS query throttled.\n",
1239
0
            __func__));
1240
0
        IGMPSTAT_INC(igps_drop_gsr_queries);
1241
0
        INM_UNLOCK(inm);
1242
0
        INM_REMREF(inm); /* from IN_LOOKUP_MULTI */
1243
0
        goto done;
1244
0
      }
1245
0
    }
1246
0
    IGMP_INET_PRINTF(igmpv3->igmp_group,
1247
0
        ("process v3 %s query on ifp 0x%llx(%s)\n", _igmp_inet_buf,
1248
0
        (uint64_t)VM_KERNEL_ADDRPERM(ifp), if_name(ifp)));
1249
    /*
1250
     * If there is a pending General Query response
1251
     * scheduled sooner than the selected delay, no
1252
     * further report need be scheduled.
1253
     * Otherwise, prepare to respond to the
1254
     * group-specific or group-and-source query.
1255
     */
1256
0
    IGI_LOCK(igi);
1257
0
    itp.it = igi->igi_v3_timer;
1258
0
    IGI_UNLOCK(igi);
1259
0
    if (itp.it == 0 || itp.it >= timer) {
1260
0
      (void) igmp_input_v3_group_query(inm, timer, igmpv3);
1261
0
      itp.cst = inm->inm_timer;
1262
0
    }
1263
0
    INM_UNLOCK(inm);
1264
0
    INM_REMREF(inm); /* from IN_LOOKUP_MULTI */
1265
0
  }
1266
432
done:
1267
432
  if (itp.it > 0) {
1268
0
    IGMP_PRINTF(("%s: v3 general query response scheduled in "
1269
0
        "T+%d seconds on ifp 0x%llx(%s)\n", __func__, itp.it,
1270
0
        (uint64_t)VM_KERNEL_ADDRPERM(ifp), if_name(ifp)));
1271
0
  }
1272
432
  igmp_set_timeout(&itp);
1273
1274
432
  return 0;
1275
0
}
1276
1277
/*
1278
 * Process a recieved IGMPv3 group-specific or group-and-source-specific
1279
 * query.
1280
 * Return <0 if any error occured. Currently this is ignored.
1281
 */
1282
static int
1283
igmp_input_v3_group_query(struct in_multi *inm,
1284
    int timer, /*const*/ struct igmpv3 *igmpv3)
1285
0
{
1286
0
  int                      retval;
1287
0
  uint16_t                 nsrc;
1288
1289
0
  INM_LOCK_ASSERT_HELD(inm);
1290
1291
0
  retval = 0;
1292
1293
0
  switch (inm->inm_state) {
1294
0
  case IGMP_NOT_MEMBER:
1295
0
  case IGMP_SILENT_MEMBER:
1296
0
  case IGMP_SLEEPING_MEMBER:
1297
0
  case IGMP_LAZY_MEMBER:
1298
0
  case IGMP_AWAKENING_MEMBER:
1299
0
  case IGMP_IDLE_MEMBER:
1300
0
  case IGMP_LEAVING_MEMBER:
1301
0
    return retval;
1302
0
  case IGMP_REPORTING_MEMBER:
1303
0
  case IGMP_G_QUERY_PENDING_MEMBER:
1304
0
  case IGMP_SG_QUERY_PENDING_MEMBER:
1305
0
    break;
1306
0
  }
1307
1308
0
  nsrc = ntohs(igmpv3->igmp_numsrc);
1309
1310
  /*
1311
   * Deal with group-specific queries upfront.
1312
   * If any group query is already pending, purge any recorded
1313
   * source-list state if it exists, and schedule a query response
1314
   * for this group-specific query.
1315
   */
1316
0
  if (nsrc == 0) {
1317
0
    if (inm->inm_state == IGMP_G_QUERY_PENDING_MEMBER ||
1318
0
        inm->inm_state == IGMP_SG_QUERY_PENDING_MEMBER) {
1319
0
      inm_clear_recorded(inm);
1320
0
      timer = min(inm->inm_timer, timer);
1321
0
    }
1322
0
    inm->inm_state = IGMP_G_QUERY_PENDING_MEMBER;
1323
0
    inm->inm_timer = IGMP_RANDOM_DELAY(timer);
1324
0
    return retval;
1325
0
  }
1326
1327
  /*
1328
   * Deal with the case where a group-and-source-specific query has
1329
   * been received but a group-specific query is already pending.
1330
   */
1331
0
  if (inm->inm_state == IGMP_G_QUERY_PENDING_MEMBER) {
1332
0
    timer = min(inm->inm_timer, timer);
1333
0
    inm->inm_timer = IGMP_RANDOM_DELAY(timer);
1334
0
    return retval;
1335
0
  }
1336
1337
  /*
1338
   * Finally, deal with the case where a group-and-source-specific
1339
   * query has been received, where a response to a previous g-s-r
1340
   * query exists, or none exists.
1341
   * In this case, we need to parse the source-list which the Querier
1342
   * has provided us with and check if we have any source list filter
1343
   * entries at T1 for these sources. If we do not, there is no need
1344
   * schedule a report and the query may be dropped.
1345
   * If we do, we must record them and schedule a current-state
1346
   * report for those sources.
1347
   * FIXME: Handling source lists larger than 1 mbuf requires that
1348
   * we pass the mbuf chain pointer down to this function, and use
1349
   * m_getptr() to walk the chain.
1350
   */
1351
0
  if (inm->inm_nsrc > 0) {
1352
0
    const struct in_addr    *ap;
1353
0
    int                      i, nrecorded;
1354
1355
0
    ap = (const struct in_addr *)(igmpv3 + 1);
1356
0
    nrecorded = 0;
1357
0
    for (i = 0; i < nsrc; i++, ap++) {
1358
0
      retval = inm_record_source(inm, ap->s_addr);
1359
0
      if (retval < 0) {
1360
0
        break;
1361
0
      }
1362
0
      nrecorded += retval;
1363
0
    }
1364
0
    if (nrecorded > 0) {
1365
0
      IGMP_PRINTF(("%s: schedule response to SG query\n",
1366
0
          __func__));
1367
0
      inm->inm_state = IGMP_SG_QUERY_PENDING_MEMBER;
1368
0
      inm->inm_timer = IGMP_RANDOM_DELAY(timer);
1369
0
    }
1370
0
  }
1371
1372
0
  return retval;
1373
0
}
1374
1375
/*
1376
 * Process a received IGMPv1 host membership report.
1377
 *
1378
 * NOTE: 0.0.0.0 workaround breaks const correctness.
1379
 */
1380
static int
1381
igmp_input_v1_report(struct ifnet *ifp, struct mbuf *m, /*const*/ struct ip *ip,
1382
    /*const*/ struct igmp *igmp)
1383
145
{
1384
145
  struct in_ifaddr *ia;
1385
145
  struct in_multi *inm;
1386
1387
145
  IGMPSTAT_INC(igps_rcv_reports);
1388
145
  OIGMPSTAT_INC(igps_rcv_reports);
1389
1390
145
  if ((ifp->if_flags & IFF_LOOPBACK) ||
1391
145
      (m->m_pkthdr.pkt_flags & PKTF_LOOP)) {
1392
145
    return 0;
1393
145
  }
1394
1395
0
  if (!IN_MULTICAST(ntohl(igmp->igmp_group.s_addr) ||
1396
0
      !in_hosteq(igmp->igmp_group, ip->ip_dst))) {
1397
0
    IGMPSTAT_INC(igps_rcv_badreports);
1398
0
    OIGMPSTAT_INC(igps_rcv_badreports);
1399
0
    return EINVAL;
1400
0
  }
1401
1402
  /*
1403
   * RFC 3376, Section 4.2.13, 9.2, 9.3:
1404
   * Booting clients may use the source address 0.0.0.0. Some
1405
   * IGMP daemons may not know how to use IP_RECVIF to determine
1406
   * the interface upon which this message was received.
1407
   * Replace 0.0.0.0 with the subnet address if told to do so.
1408
   */
1409
0
  if (igmp_recvifkludge && in_nullhost(ip->ip_src)) {
1410
0
    IFP_TO_IA(ifp, ia);
1411
0
    if (ia != NULL) {
1412
0
      IFA_LOCK(&ia->ia_ifa);
1413
0
      ip->ip_src.s_addr = htonl(ia->ia_subnet);
1414
0
      IFA_UNLOCK(&ia->ia_ifa);
1415
0
      IFA_REMREF(&ia->ia_ifa);
1416
0
    }
1417
0
  }
1418
1419
0
  IGMP_INET_PRINTF(igmp->igmp_group,
1420
0
      ("process v1 report %s on ifp 0x%llx(%s)\n", _igmp_inet_buf,
1421
0
      (uint64_t)VM_KERNEL_ADDRPERM(ifp), if_name(ifp)));
1422
1423
  /*
1424
   * IGMPv1 report suppression.
1425
   * If we are a member of this group, and our membership should be
1426
   * reported, stop our group timer and transition to the 'lazy' state.
1427
   */
1428
0
  in_multihead_lock_shared();
1429
0
  IN_LOOKUP_MULTI(&igmp->igmp_group, ifp, inm);
1430
0
  in_multihead_lock_done();
1431
0
  if (inm != NULL) {
1432
0
    struct igmp_ifinfo *igi;
1433
1434
0
    INM_LOCK(inm);
1435
1436
0
    igi = inm->inm_igi;
1437
0
    VERIFY(igi != NULL);
1438
1439
0
    IGMPSTAT_INC(igps_rcv_ourreports);
1440
0
    OIGMPSTAT_INC(igps_rcv_ourreports);
1441
1442
    /*
1443
     * If we are in IGMPv3 host mode, do not allow the
1444
     * other host's IGMPv1 report to suppress our reports
1445
     * unless explicitly configured to do so.
1446
     */
1447
0
    IGI_LOCK(igi);
1448
0
    if (igi->igi_version == IGMP_VERSION_3) {
1449
0
      if (igmp_legacysupp) {
1450
0
        igmp_v3_suppress_group_record(inm);
1451
0
      }
1452
0
      IGI_UNLOCK(igi);
1453
0
      INM_UNLOCK(inm);
1454
0
      INM_REMREF(inm); /* from IN_LOOKUP_MULTI */
1455
0
      return 0;
1456
0
    }
1457
1458
0
    INM_LOCK_ASSERT_HELD(inm);
1459
0
    inm->inm_timer = 0;
1460
1461
0
    switch (inm->inm_state) {
1462
0
    case IGMP_NOT_MEMBER:
1463
0
    case IGMP_SILENT_MEMBER:
1464
0
      break;
1465
0
    case IGMP_IDLE_MEMBER:
1466
0
    case IGMP_LAZY_MEMBER:
1467
0
    case IGMP_AWAKENING_MEMBER:
1468
0
      IGMP_INET_PRINTF(igmp->igmp_group,
1469
0
          ("report suppressed for %s on ifp 0x%llx(%s)\n",
1470
0
          _igmp_inet_buf,
1471
0
          (uint64_t)VM_KERNEL_ADDRPERM(ifp), if_name(ifp)));
1472
0
      OS_FALLTHROUGH;
1473
0
    case IGMP_SLEEPING_MEMBER:
1474
0
      inm->inm_state = IGMP_SLEEPING_MEMBER;
1475
0
      break;
1476
0
    case IGMP_REPORTING_MEMBER:
1477
0
      IGMP_INET_PRINTF(igmp->igmp_group,
1478
0
          ("report suppressed for %s on ifp 0x%llx(%s)\n",
1479
0
          _igmp_inet_buf,
1480
0
          (uint64_t)VM_KERNEL_ADDRPERM(ifp), if_name(ifp)));
1481
0
      if (igi->igi_version == IGMP_VERSION_1) {
1482
0
        inm->inm_state = IGMP_LAZY_MEMBER;
1483
0
      } else if (igi->igi_version == IGMP_VERSION_2) {
1484
0
        inm->inm_state = IGMP_SLEEPING_MEMBER;
1485
0
      }
1486
0
      break;
1487
0
    case IGMP_G_QUERY_PENDING_MEMBER:
1488
0
    case IGMP_SG_QUERY_PENDING_MEMBER:
1489
0
    case IGMP_LEAVING_MEMBER:
1490
0
      break;
1491
0
    }
1492
0
    IGI_UNLOCK(igi);
1493
0
    INM_UNLOCK(inm);
1494
0
    INM_REMREF(inm); /* from IN_LOOKUP_MULTI */
1495
0
  }
1496
1497
0
  return 0;
1498
0
}
1499
1500
/*
1501
 * Process a received IGMPv2 host membership report.
1502
 *
1503
 * NOTE: 0.0.0.0 workaround breaks const correctness.
1504
 */
1505
static int
1506
igmp_input_v2_report(struct ifnet *ifp, struct mbuf *m, /*const*/ struct ip *ip,
1507
    /*const*/ struct igmp *igmp)
1508
384
{
1509
384
  struct in_ifaddr *ia;
1510
384
  struct in_multi *inm;
1511
1512
  /*
1513
   * Make sure we don't hear our own membership report.  Fast
1514
   * leave requires knowing that we are the only member of a
1515
   * group.
1516
   */
1517
384
  IFP_TO_IA(ifp, ia);
1518
384
  if (ia != NULL) {
1519
384
    IFA_LOCK(&ia->ia_ifa);
1520
384
    if (in_hosteq(ip->ip_src, IA_SIN(ia)->sin_addr)) {
1521
54
      IFA_UNLOCK(&ia->ia_ifa);
1522
54
      IFA_REMREF(&ia->ia_ifa);
1523
54
      return 0;
1524
54
    }
1525
330
    IFA_UNLOCK(&ia->ia_ifa);
1526
330
  }
1527
1528
330
  IGMPSTAT_INC(igps_rcv_reports);
1529
330
  OIGMPSTAT_INC(igps_rcv_reports);
1530
1531
330
  if ((ifp->if_flags & IFF_LOOPBACK) ||
1532
330
      (m->m_pkthdr.pkt_flags & PKTF_LOOP)) {
1533
330
    if (ia != NULL) {
1534
330
      IFA_REMREF(&ia->ia_ifa);
1535
330
    }
1536
330
    return 0;
1537
330
  }
1538
1539
0
  if (!IN_MULTICAST(ntohl(igmp->igmp_group.s_addr)) ||
1540
0
      !in_hosteq(igmp->igmp_group, ip->ip_dst)) {
1541
0
    if (ia != NULL) {
1542
0
      IFA_REMREF(&ia->ia_ifa);
1543
0
    }
1544
0
    IGMPSTAT_INC(igps_rcv_badreports);
1545
0
    OIGMPSTAT_INC(igps_rcv_badreports);
1546
0
    return EINVAL;
1547
0
  }
1548
1549
  /*
1550
   * RFC 3376, Section 4.2.13, 9.2, 9.3:
1551
   * Booting clients may use the source address 0.0.0.0. Some
1552
   * IGMP daemons may not know how to use IP_RECVIF to determine
1553
   * the interface upon which this message was received.
1554
   * Replace 0.0.0.0 with the subnet address if told to do so.
1555
   */
1556
0
  if (igmp_recvifkludge && in_nullhost(ip->ip_src)) {
1557
0
    if (ia != NULL) {
1558
0
      IFA_LOCK(&ia->ia_ifa);
1559
0
      ip->ip_src.s_addr = htonl(ia->ia_subnet);
1560
0
      IFA_UNLOCK(&ia->ia_ifa);
1561
0
    }
1562
0
  }
1563
0
  if (ia != NULL) {
1564
0
    IFA_REMREF(&ia->ia_ifa);
1565
0
  }
1566
1567
0
  IGMP_INET_PRINTF(igmp->igmp_group,
1568
0
      ("process v2 report %s on ifp 0x%llx(%s)\n", _igmp_inet_buf,
1569
0
      (uint64_t)VM_KERNEL_ADDRPERM(ifp), if_name(ifp)));
1570
1571
  /*
1572
   * IGMPv2 report suppression.
1573
   * If we are a member of this group, and our membership should be
1574
   * reported, and our group timer is pending or about to be reset,
1575
   * stop our group timer by transitioning to the 'lazy' state.
1576
   */
1577
0
  in_multihead_lock_shared();
1578
0
  IN_LOOKUP_MULTI(&igmp->igmp_group, ifp, inm);
1579
0
  in_multihead_lock_done();
1580
0
  if (inm != NULL) {
1581
0
    struct igmp_ifinfo *igi;
1582
1583
0
    INM_LOCK(inm);
1584
0
    igi = inm->inm_igi;
1585
0
    VERIFY(igi != NULL);
1586
1587
0
    IGMPSTAT_INC(igps_rcv_ourreports);
1588
0
    OIGMPSTAT_INC(igps_rcv_ourreports);
1589
1590
    /*
1591
     * If we are in IGMPv3 host mode, do not allow the
1592
     * other host's IGMPv1 report to suppress our reports
1593
     * unless explicitly configured to do so.
1594
     */
1595
0
    IGI_LOCK(igi);
1596
0
    if (igi->igi_version == IGMP_VERSION_3) {
1597
0
      if (igmp_legacysupp) {
1598
0
        igmp_v3_suppress_group_record(inm);
1599
0
      }
1600
0
      IGI_UNLOCK(igi);
1601
0
      INM_UNLOCK(inm);
1602
0
      INM_REMREF(inm);
1603
0
      return 0;
1604
0
    }
1605
1606
0
    inm->inm_timer = 0;
1607
1608
0
    switch (inm->inm_state) {
1609
0
    case IGMP_NOT_MEMBER:
1610
0
    case IGMP_SILENT_MEMBER:
1611
0
    case IGMP_SLEEPING_MEMBER:
1612
0
      break;
1613
0
    case IGMP_REPORTING_MEMBER:
1614
0
    case IGMP_IDLE_MEMBER:
1615
0
    case IGMP_AWAKENING_MEMBER:
1616
0
      IGMP_INET_PRINTF(igmp->igmp_group,
1617
0
          ("report suppressed for %s on ifp 0x%llx(%s)\n",
1618
0
          _igmp_inet_buf, (uint64_t)VM_KERNEL_ADDRPERM(ifp),
1619
0
          if_name(ifp)));
1620
0
      OS_FALLTHROUGH;
1621
0
    case IGMP_LAZY_MEMBER:
1622
0
      inm->inm_state = IGMP_LAZY_MEMBER;
1623
0
      break;
1624
0
    case IGMP_G_QUERY_PENDING_MEMBER:
1625
0
    case IGMP_SG_QUERY_PENDING_MEMBER:
1626
0
    case IGMP_LEAVING_MEMBER:
1627
0
      break;
1628
0
    }
1629
0
    IGI_UNLOCK(igi);
1630
0
    INM_UNLOCK(inm);
1631
0
    INM_REMREF(inm);
1632
0
  }
1633
1634
0
  return 0;
1635
0
}
1636
1637
void
1638
igmp_input(struct mbuf *m, int off)
1639
3.53k
{
1640
3.53k
  int iphlen;
1641
3.53k
  struct ifnet *ifp;
1642
3.53k
  struct igmp *igmp;
1643
3.53k
  struct ip *ip;
1644
3.53k
  int igmplen;
1645
3.53k
  int minlen;
1646
3.53k
  int queryver;
1647
1648
3.53k
  IGMP_PRINTF(("%s: called w/mbuf (0x%llx,%d)\n", __func__,
1649
3.53k
      (uint64_t)VM_KERNEL_ADDRPERM(m), off));
1650
1651
3.53k
  ifp = m->m_pkthdr.rcvif;
1652
1653
3.53k
  IGMPSTAT_INC(igps_rcv_total);
1654
3.53k
  OIGMPSTAT_INC(igps_rcv_total);
1655
1656
  /* Expect 32-bit aligned data pointer on strict-align platforms */
1657
3.53k
  MBUF_STRICT_DATA_ALIGNMENT_CHECK_32(m);
1658
1659
3.53k
  ip = mtod(m, struct ip *);
1660
3.53k
  iphlen = off;
1661
1662
  /* By now, ip_len no longer contains the length of IP header */
1663
3.53k
  igmplen = ip->ip_len;
1664
1665
  /*
1666
   * Validate lengths.
1667
   */
1668
3.53k
  if (igmplen < IGMP_MINLEN) {
1669
543
    IGMPSTAT_INC(igps_rcv_tooshort);
1670
543
    OIGMPSTAT_INC(igps_rcv_tooshort);
1671
543
    m_freem(m);
1672
543
    return;
1673
543
  }
1674
1675
  /*
1676
   * Always pullup to the minimum size for v1/v2 or v3
1677
   * to amortize calls to m_pulldown().
1678
   */
1679
2.99k
  if (igmplen >= IGMP_V3_QUERY_MINLEN) {
1680
668
    minlen = IGMP_V3_QUERY_MINLEN;
1681
2.32k
  } else {
1682
2.32k
    minlen = IGMP_MINLEN;
1683
2.32k
  }
1684
1685
  /* A bit more expensive than M_STRUCT_GET, but ensures alignment */
1686
2.99k
  M_STRUCT_GET0(igmp, struct igmp *, m, off, minlen);
1687
2.99k
  if (igmp == NULL) {
1688
0
    IGMPSTAT_INC(igps_rcv_tooshort);
1689
0
    OIGMPSTAT_INC(igps_rcv_tooshort);
1690
0
    return;
1691
0
  }
1692
  /* N.B.: we assume the packet was correctly aligned in ip_input. */
1693
1694
  /*
1695
   * Validate checksum.
1696
   */
1697
2.99k
  m->m_data += iphlen;
1698
2.99k
  m->m_len -= iphlen;
1699
2.99k
  if (in_cksum(m, igmplen)) {
1700
0
    IGMPSTAT_INC(igps_rcv_badsum);
1701
0
    OIGMPSTAT_INC(igps_rcv_badsum);
1702
0
    m_freem(m);
1703
0
    return;
1704
0
  }
1705
2.99k
  m->m_data -= iphlen;
1706
2.99k
  m->m_len += iphlen;
1707
1708
  /*
1709
   * IGMP control traffic is link-scope, and must have a TTL of 1.
1710
   * DVMRP traffic (e.g. mrinfo, mtrace) is an exception;
1711
   * probe packets may come from beyond the LAN.
1712
   */
1713
2.99k
  if (igmp->igmp_type != IGMP_DVMRP && ip->ip_ttl != 1) {
1714
169
    IGMPSTAT_INC(igps_rcv_badttl);
1715
169
    m_freem(m);
1716
169
    return;
1717
169
  }
1718
1719
2.82k
  switch (igmp->igmp_type) {
1720
2.16k
  case IGMP_HOST_MEMBERSHIP_QUERY:
1721
2.16k
    if (igmplen == IGMP_MINLEN) {
1722
1.63k
      if (igmp->igmp_code == 0) {
1723
212
        queryver = IGMP_VERSION_1;
1724
1.42k
      } else {
1725
1.42k
        queryver = IGMP_VERSION_2;
1726
1.42k
      }
1727
1.63k
    } else if (igmplen >= IGMP_V3_QUERY_MINLEN) {
1728
516
      queryver = IGMP_VERSION_3;
1729
516
    } else {
1730
19
      IGMPSTAT_INC(igps_rcv_tooshort);
1731
19
      OIGMPSTAT_INC(igps_rcv_tooshort);
1732
19
      m_freem(m);
1733
19
      return;
1734
19
    }
1735
1736
2.14k
    OIGMPSTAT_INC(igps_rcv_queries);
1737
1738
2.14k
    switch (queryver) {
1739
212
    case IGMP_VERSION_1:
1740
212
      IGMPSTAT_INC(igps_rcv_v1v2_queries);
1741
212
      if (!igmp_v1enable) {
1742
0
        break;
1743
0
      }
1744
212
      if (igmp_input_v1_query(ifp, ip, igmp) != 0) {
1745
0
        m_freem(m);
1746
0
        return;
1747
0
      }
1748
212
      break;
1749
1750
1.42k
    case IGMP_VERSION_2:
1751
1.42k
      IGMPSTAT_INC(igps_rcv_v1v2_queries);
1752
1.42k
      if (!igmp_v2enable) {
1753
0
        break;
1754
0
      }
1755
1.42k
      if (igmp_input_v2_query(ifp, ip, igmp) != 0) {
1756
0
        m_freem(m);
1757
0
        return;
1758
0
      }
1759
1.42k
      break;
1760
1761
1.42k
    case IGMP_VERSION_3: {
1762
516
      struct igmpv3 *igmpv3;
1763
516
      uint16_t igmpv3len;
1764
516
      uint16_t srclen;
1765
516
      int nsrc;
1766
1767
516
      IGMPSTAT_INC(igps_rcv_v3_queries);
1768
516
      igmpv3 = (struct igmpv3 *)igmp;
1769
      /*
1770
       * Validate length based on source count.
1771
       */
1772
516
      nsrc = ntohs(igmpv3->igmp_numsrc);
1773
      /*
1774
       * The max vaue of nsrc is limited by the
1775
       * MTU of the network on which the datagram
1776
       * is received
1777
       */
1778
516
      if (nsrc < 0 || nsrc > IGMP_V3_QUERY_MAX_SRCS) {
1779
24
        IGMPSTAT_INC(igps_rcv_tooshort);
1780
24
        OIGMPSTAT_INC(igps_rcv_tooshort);
1781
24
        m_freem(m);
1782
24
        return;
1783
24
      }
1784
492
      srclen = sizeof(struct in_addr) * (uint16_t)nsrc;
1785
492
      if (igmplen < (IGMP_V3_QUERY_MINLEN + srclen)) {
1786
60
        IGMPSTAT_INC(igps_rcv_tooshort);
1787
60
        OIGMPSTAT_INC(igps_rcv_tooshort);
1788
60
        m_freem(m);
1789
60
        return;
1790
60
      }
1791
432
      igmpv3len = IGMP_V3_QUERY_MINLEN + srclen;
1792
      /*
1793
       * A bit more expensive than M_STRUCT_GET,
1794
       * but ensures alignment.
1795
       */
1796
432
      M_STRUCT_GET0(igmpv3, struct igmpv3 *, m,
1797
432
          off, igmpv3len);
1798
432
      if (igmpv3 == NULL) {
1799
0
        IGMPSTAT_INC(igps_rcv_tooshort);
1800
0
        OIGMPSTAT_INC(igps_rcv_tooshort);
1801
0
        return;
1802
0
      }
1803
      /*
1804
       * N.B.: we assume the packet was correctly
1805
       * aligned in ip_input.
1806
       */
1807
432
      if (igmp_input_v3_query(ifp, ip, igmpv3) != 0) {
1808
0
        m_freem(m);
1809
0
        return;
1810
0
      }
1811
432
    }
1812
432
    break;
1813
2.14k
    }
1814
2.06k
    break;
1815
1816
2.06k
  case IGMP_v1_HOST_MEMBERSHIP_REPORT:
1817
145
    if (!igmp_v1enable) {
1818
0
      break;
1819
0
    }
1820
145
    if (igmp_input_v1_report(ifp, m, ip, igmp) != 0) {
1821
0
      m_freem(m);
1822
0
      return;
1823
0
    }
1824
145
    break;
1825
1826
384
  case IGMP_v2_HOST_MEMBERSHIP_REPORT:
1827
384
    if (!igmp_v2enable) {
1828
0
      break;
1829
0
    }
1830
384
    if (!ip_checkrouteralert(m)) {
1831
373
      IGMPSTAT_INC(igps_rcv_nora);
1832
373
    }
1833
384
    if (igmp_input_v2_report(ifp, m, ip, igmp) != 0) {
1834
0
      m_freem(m);
1835
0
      return;
1836
0
    }
1837
384
    break;
1838
1839
384
  case IGMP_v3_HOST_MEMBERSHIP_REPORT:
1840
    /*
1841
     * Hosts do not need to process IGMPv3 membership reports,
1842
     * as report suppression is no longer required.
1843
     */
1844
52
    if (!ip_checkrouteralert(m)) {
1845
32
      IGMPSTAT_INC(igps_rcv_nora);
1846
32
    }
1847
52
    break;
1848
1849
73
  default:
1850
73
    break;
1851
2.82k
  }
1852
1853
2.71k
  IGMP_LOCK_ASSERT_NOTHELD();
1854
  /*
1855
   * Pass all valid IGMP packets up to any process(es) listening on a
1856
   * raw IGMP socket.
1857
   */
1858
2.71k
  rip_input(m, off);
1859
2.71k
}
1860
1861
/*
1862
 * Schedule IGMP timer based on various parameters; caller must ensure that
1863
 * lock ordering is maintained as this routine acquires IGMP global lock.
1864
 */
1865
void
1866
igmp_set_timeout(struct igmp_tparams *itp)
1867
2.06k
{
1868
2.06k
  IGMP_LOCK_ASSERT_NOTHELD();
1869
2.06k
  VERIFY(itp != NULL);
1870
1871
2.06k
  if (itp->qpt != 0 || itp->it != 0 || itp->cst != 0 || itp->sct != 0) {
1872
121
    IGMP_LOCK();
1873
121
    if (itp->qpt != 0) {
1874
121
      querier_present_timers_running = 1;
1875
121
    }
1876
121
    if (itp->it != 0) {
1877
0
      interface_timers_running = 1;
1878
0
    }
1879
121
    if (itp->cst != 0) {
1880
0
      current_state_timers_running = 1;
1881
0
    }
1882
121
    if (itp->sct != 0) {
1883
0
      state_change_timers_running = 1;
1884
0
    }
1885
    // igmp_sched_timeout();
1886
121
    IGMP_UNLOCK();
1887
121
  }
1888
2.06k
}
1889
1890
/*
1891
 * IGMP timer handler (per 1 second).
1892
 */
1893
void
1894
igmp_timeout(void *arg)
1895
408k
{
1896
408k
#pragma unused(arg)
1897
408k
  struct ifqueue           scq;   /* State-change packets */
1898
408k
  struct ifqueue           qrq;   /* Query response packets */
1899
408k
  struct ifnet            *ifp;
1900
408k
  struct igmp_ifinfo      *igi;
1901
408k
  struct in_multi         *inm;
1902
408k
  unsigned int             loop = 0, uri_sec = 0;
1903
408k
  SLIST_HEAD(, in_multi)  inm_dthead;
1904
1905
408k
  SLIST_INIT(&inm_dthead);
1906
1907
  /*
1908
   * Update coarse-grained networking timestamp (in sec.); the idea
1909
   * is to piggy-back on the timeout callout to update the counter
1910
   * returnable via net_uptime().
1911
   */
1912
408k
  net_update_uptime();
1913
1914
408k
  IGMP_LOCK();
1915
1916
408k
  IGMP_PRINTF(("%s: qpt %d, it %d, cst %d, sct %d\n", __func__,
1917
408k
      querier_present_timers_running, interface_timers_running,
1918
408k
      current_state_timers_running, state_change_timers_running));
1919
1920
  /*
1921
   * IGMPv1/v2 querier present timer processing.
1922
   */
1923
408k
  if (querier_present_timers_running) {
1924
5.00k
    querier_present_timers_running = 0;
1925
10.0k
    LIST_FOREACH(igi, &igi_head, igi_link) {
1926
10.0k
      IGI_LOCK(igi);
1927
10.0k
      igmp_v1v2_process_querier_timers(igi);
1928
10.0k
      if (igi->igi_v1_timer > 0 || igi->igi_v2_timer > 0) {
1929
4.98k
        querier_present_timers_running = 1;
1930
4.98k
      }
1931
10.0k
      IGI_UNLOCK(igi);
1932
10.0k
    }
1933
5.00k
  }
1934
1935
  /*
1936
   * IGMPv3 General Query response timer processing.
1937
   */
1938
408k
  if (interface_timers_running) {
1939
0
    IGMP_PRINTF(("%s: interface timers running\n", __func__));
1940
0
    interface_timers_running = 0;
1941
0
    LIST_FOREACH(igi, &igi_head, igi_link) {
1942
0
      IGI_LOCK(igi);
1943
0
      if (igi->igi_version != IGMP_VERSION_3) {
1944
0
        IGI_UNLOCK(igi);
1945
0
        continue;
1946
0
      }
1947
0
      if (igi->igi_v3_timer == 0) {
1948
        /* Do nothing. */
1949
0
      } else if (--igi->igi_v3_timer == 0) {
1950
0
        if (igmp_v3_dispatch_general_query(igi) > 0) {
1951
0
          interface_timers_running = 1;
1952
0
        }
1953
0
      } else {
1954
0
        interface_timers_running = 1;
1955
0
      }
1956
0
      IGI_UNLOCK(igi);
1957
0
    }
1958
0
  }
1959
1960
408k
  if (!current_state_timers_running &&
1961
408k
      !state_change_timers_running) {
1962
408k
    goto out_locked;
1963
408k
  }
1964
1965
0
  current_state_timers_running = 0;
1966
0
  state_change_timers_running = 0;
1967
1968
0
  memset(&qrq, 0, sizeof(struct ifqueue));
1969
0
  qrq.ifq_maxlen = IGMP_MAX_G_GS_PACKETS;
1970
1971
0
  memset(&scq, 0, sizeof(struct ifqueue));
1972
0
  scq.ifq_maxlen =  IGMP_MAX_STATE_CHANGE_PACKETS;
1973
1974
0
  IGMP_PRINTF(("%s: state change timers running\n", __func__));
1975
1976
  /*
1977
   * IGMPv1/v2/v3 host report and state-change timer processing.
1978
   * Note: Processing a v3 group timer may remove a node.
1979
   */
1980
0
  LIST_FOREACH(igi, &igi_head, igi_link) {
1981
0
    struct in_multistep step;
1982
1983
0
    IGI_LOCK(igi);
1984
0
    ifp = igi->igi_ifp;
1985
0
    loop = (igi->igi_flags & IGIF_LOOPBACK) ? 1 : 0;
1986
0
    uri_sec = IGMP_RANDOM_DELAY(igi->igi_uri);
1987
0
    IGI_UNLOCK(igi);
1988
1989
0
    in_multihead_lock_shared();
1990
0
    IN_FIRST_MULTI(step, inm);
1991
0
    while (inm != NULL) {
1992
0
      INM_LOCK(inm);
1993
0
      if (inm->inm_ifp != ifp) {
1994
0
        goto next;
1995
0
      }
1996
1997
0
      IGI_LOCK(igi);
1998
0
      switch (igi->igi_version) {
1999
0
      case IGMP_VERSION_1:
2000
0
      case IGMP_VERSION_2:
2001
0
        igmp_v1v2_process_group_timer(inm,
2002
0
            igi->igi_version);
2003
0
        break;
2004
0
      case IGMP_VERSION_3:
2005
0
        igmp_v3_process_group_timers(igi, &qrq,
2006
0
            &scq, inm, uri_sec);
2007
0
        break;
2008
0
      }
2009
0
      IGI_UNLOCK(igi);
2010
0
next:
2011
0
      INM_UNLOCK(inm);
2012
0
      IN_NEXT_MULTI(step, inm);
2013
0
    }
2014
0
    in_multihead_lock_done();
2015
2016
0
    IGI_LOCK(igi);
2017
0
    if (igi->igi_version == IGMP_VERSION_1 ||
2018
0
        igi->igi_version == IGMP_VERSION_2) {
2019
0
      igmp_dispatch_queue(igi, &igi->igi_v2q, 0, loop);
2020
0
    } else if (igi->igi_version == IGMP_VERSION_3) {
2021
0
      IGI_UNLOCK(igi);
2022
0
      igmp_dispatch_queue(NULL, &qrq, 0, loop);
2023
0
      igmp_dispatch_queue(NULL, &scq, 0, loop);
2024
0
      VERIFY(qrq.ifq_len == 0);
2025
0
      VERIFY(scq.ifq_len == 0);
2026
0
      IGI_LOCK(igi);
2027
0
    }
2028
    /*
2029
     * In case there are still any pending membership reports
2030
     * which didn't get drained at version change time.
2031
     */
2032
0
    IF_DRAIN(&igi->igi_v2q);
2033
    /*
2034
     * Release all deferred inm records, and drain any locally
2035
     * enqueued packets; do it even if the current IGMP version
2036
     * for the link is no longer IGMPv3, in order to handle the
2037
     * version change case.
2038
     */
2039
0
    igmp_flush_relq(igi, (struct igmp_inm_relhead *)&inm_dthead);
2040
0
    VERIFY(SLIST_EMPTY(&igi->igi_relinmhead));
2041
0
    IGI_UNLOCK(igi);
2042
2043
0
    IF_DRAIN(&qrq);
2044
0
    IF_DRAIN(&scq);
2045
0
  }
2046
2047
408k
out_locked:
2048
  /* re-arm the timer if there's work to do */
2049
408k
  igmp_timeout_run = 0;
2050
408k
  igmp_sched_timeout();
2051
408k
  IGMP_UNLOCK();
2052
2053
  /* Now that we're dropped all locks, release detached records */
2054
408k
  IGMP_REMOVE_DETACHED_INM(&inm_dthead);
2055
408k
}
2056
2057
void
2058
igmp_sched_timeout(void)
2059
408k
{
2060
408k
  IGMP_LOCK_ASSERT_HELD();
2061
2062
408k
  if (!igmp_timeout_run &&
2063
408k
      (querier_present_timers_running || current_state_timers_running ||
2064
408k
      interface_timers_running || state_change_timers_running)) {
2065
4.98k
    igmp_timeout_run = 1;
2066
    // timeout(igmp_timeout, NULL, hz);
2067
4.98k
  }
2068
408k
}
2069
2070
/*
2071
 * Free the in_multi reference(s) for this IGMP lifecycle.
2072
 *
2073
 * Caller must be holding igi_lock.
2074
 */
2075
static void
2076
igmp_flush_relq(struct igmp_ifinfo *igi, struct igmp_inm_relhead *inm_dthead)
2077
0
{
2078
0
  struct in_multi *inm;
2079
2080
0
again:
2081
0
  IGI_LOCK_ASSERT_HELD(igi);
2082
0
  inm = SLIST_FIRST(&igi->igi_relinmhead);
2083
0
  if (inm != NULL) {
2084
0
    int lastref;
2085
2086
0
    SLIST_REMOVE_HEAD(&igi->igi_relinmhead, inm_nrele);
2087
0
    IGI_UNLOCK(igi);
2088
2089
0
    in_multihead_lock_exclusive();
2090
0
    INM_LOCK(inm);
2091
0
    VERIFY(inm->inm_nrelecnt != 0);
2092
0
    inm->inm_nrelecnt--;
2093
0
    lastref = in_multi_detach(inm);
2094
0
    VERIFY(!lastref || (!(inm->inm_debug & IFD_ATTACHED) &&
2095
0
        inm->inm_reqcnt == 0));
2096
0
    INM_UNLOCK(inm);
2097
0
    in_multihead_lock_done();
2098
    /* from igi_relinmhead */
2099
0
    INM_REMREF(inm);
2100
    /* from in_multihead list */
2101
0
    if (lastref) {
2102
      /*
2103
       * Defer releasing our final reference, as we
2104
       * are holding the IGMP lock at this point, and
2105
       * we could end up with locking issues later on
2106
       * (while issuing SIOCDELMULTI) when this is the
2107
       * final reference count.  Let the caller do it
2108
       * when it is safe.
2109
       */
2110
0
      IGMP_ADD_DETACHED_INM(inm_dthead, inm);
2111
0
    }
2112
0
    IGI_LOCK(igi);
2113
0
    goto again;
2114
0
  }
2115
0
}
2116
2117
/*
2118
 * Update host report group timer for IGMPv1/v2.
2119
 * Will update the global pending timer flags.
2120
 */
2121
static void
2122
igmp_v1v2_process_group_timer(struct in_multi *inm, const int igmp_version)
2123
0
{
2124
0
  int report_timer_expired;
2125
2126
0
  IGMP_LOCK_ASSERT_HELD();
2127
0
  INM_LOCK_ASSERT_HELD(inm);
2128
0
  IGI_LOCK_ASSERT_HELD(inm->inm_igi);
2129
2130
0
  if (inm->inm_timer == 0) {
2131
0
    report_timer_expired = 0;
2132
0
  } else if (--inm->inm_timer == 0) {
2133
0
    report_timer_expired = 1;
2134
0
  } else {
2135
0
    current_state_timers_running = 1;
2136
    /* caller will schedule timer */
2137
0
    return;
2138
0
  }
2139
2140
0
  switch (inm->inm_state) {
2141
0
  case IGMP_NOT_MEMBER:
2142
0
  case IGMP_SILENT_MEMBER:
2143
0
  case IGMP_IDLE_MEMBER:
2144
0
  case IGMP_LAZY_MEMBER:
2145
0
  case IGMP_SLEEPING_MEMBER:
2146
0
  case IGMP_AWAKENING_MEMBER:
2147
0
    break;
2148
0
  case IGMP_REPORTING_MEMBER:
2149
0
    if (report_timer_expired) {
2150
0
      inm->inm_state = IGMP_IDLE_MEMBER;
2151
0
      (void) igmp_v1v2_queue_report(inm,
2152
0
          (igmp_version == IGMP_VERSION_2) ?
2153
0
          IGMP_v2_HOST_MEMBERSHIP_REPORT :
2154
0
          IGMP_v1_HOST_MEMBERSHIP_REPORT);
2155
0
      INM_LOCK_ASSERT_HELD(inm);
2156
0
      IGI_LOCK_ASSERT_HELD(inm->inm_igi);
2157
0
    }
2158
0
    break;
2159
0
  case IGMP_G_QUERY_PENDING_MEMBER:
2160
0
  case IGMP_SG_QUERY_PENDING_MEMBER:
2161
0
  case IGMP_LEAVING_MEMBER:
2162
0
    break;
2163
0
  }
2164
0
}
2165
2166
/*
2167
 * Update a group's timers for IGMPv3.
2168
 * Will update the global pending timer flags.
2169
 * Note: Unlocked read from igi.
2170
 */
2171
static void
2172
igmp_v3_process_group_timers(struct igmp_ifinfo *igi,
2173
    struct ifqueue *qrq, struct ifqueue *scq,
2174
    struct in_multi *inm, const unsigned int uri_sec)
2175
0
{
2176
0
  int query_response_timer_expired;
2177
0
  int state_change_retransmit_timer_expired;
2178
2179
0
  IGMP_LOCK_ASSERT_HELD();
2180
0
  INM_LOCK_ASSERT_HELD(inm);
2181
0
  IGI_LOCK_ASSERT_HELD(igi);
2182
0
  VERIFY(igi == inm->inm_igi);
2183
2184
0
  query_response_timer_expired = 0;
2185
0
  state_change_retransmit_timer_expired = 0;
2186
2187
  /*
2188
   * During a transition from v1/v2 compatibility mode back to v3,
2189
   * a group record in REPORTING state may still have its group
2190
   * timer active. This is a no-op in this function; it is easier
2191
   * to deal with it here than to complicate the timeout path.
2192
   */
2193
0
  if (inm->inm_timer == 0) {
2194
0
    query_response_timer_expired = 0;
2195
0
  } else if (--inm->inm_timer == 0) {
2196
0
    query_response_timer_expired = 1;
2197
0
  } else {
2198
0
    current_state_timers_running = 1;
2199
    /* caller will schedule timer */
2200
0
  }
2201
2202
0
  if (inm->inm_sctimer == 0) {
2203
0
    state_change_retransmit_timer_expired = 0;
2204
0
  } else if (--inm->inm_sctimer == 0) {
2205
0
    state_change_retransmit_timer_expired = 1;
2206
0
  } else {
2207
0
    state_change_timers_running = 1;
2208
    /* caller will schedule timer */
2209
0
  }
2210
2211
  /* We are in timer callback, so be quick about it. */
2212
0
  if (!state_change_retransmit_timer_expired &&
2213
0
      !query_response_timer_expired) {
2214
0
    return;
2215
0
  }
2216
2217
0
  switch (inm->inm_state) {
2218
0
  case IGMP_NOT_MEMBER:
2219
0
  case IGMP_SILENT_MEMBER:
2220
0
  case IGMP_SLEEPING_MEMBER:
2221
0
  case IGMP_LAZY_MEMBER:
2222
0
  case IGMP_AWAKENING_MEMBER:
2223
0
  case IGMP_IDLE_MEMBER:
2224
0
    break;
2225
0
  case IGMP_G_QUERY_PENDING_MEMBER:
2226
0
  case IGMP_SG_QUERY_PENDING_MEMBER:
2227
    /*
2228
     * Respond to a previously pending Group-Specific
2229
     * or Group-and-Source-Specific query by enqueueing
2230
     * the appropriate Current-State report for
2231
     * immediate transmission.
2232
     */
2233
0
    if (query_response_timer_expired) {
2234
0
      int retval;
2235
2236
0
      retval = igmp_v3_enqueue_group_record(qrq, inm, 0, 1,
2237
0
          (inm->inm_state == IGMP_SG_QUERY_PENDING_MEMBER));
2238
0
      IGMP_PRINTF(("%s: enqueue record = %d\n",
2239
0
          __func__, retval));
2240
0
      inm->inm_state = IGMP_REPORTING_MEMBER;
2241
      /* XXX Clear recorded sources for next time. */
2242
0
      inm_clear_recorded(inm);
2243
0
    }
2244
0
    OS_FALLTHROUGH;
2245
0
  case IGMP_REPORTING_MEMBER:
2246
0
  case IGMP_LEAVING_MEMBER:
2247
0
    if (state_change_retransmit_timer_expired) {
2248
      /*
2249
       * State-change retransmission timer fired.
2250
       * If there are any further pending retransmissions,
2251
       * set the global pending state-change flag, and
2252
       * reset the timer.
2253
       */
2254
0
      if (--inm->inm_scrv > 0) {
2255
0
        inm->inm_sctimer = (uint16_t)uri_sec;
2256
0
        state_change_timers_running = 1;
2257
        /* caller will schedule timer */
2258
0
      }
2259
      /*
2260
       * Retransmit the previously computed state-change
2261
       * report. If there are no further pending
2262
       * retransmissions, the mbuf queue will be consumed.
2263
       * Update T0 state to T1 as we have now sent
2264
       * a state-change.
2265
       */
2266
0
      (void) igmp_v3_merge_state_changes(inm, scq);
2267
2268
0
      inm_commit(inm);
2269
0
      IGMP_INET_PRINTF(inm->inm_addr,
2270
0
          ("%s: T1 -> T0 for %s/%s\n", __func__,
2271
0
          _igmp_inet_buf, if_name(inm->inm_ifp)));
2272
2273
      /*
2274
       * If we are leaving the group for good, make sure
2275
       * we release IGMP's reference to it.
2276
       * This release must be deferred using a SLIST,
2277
       * as we are called from a loop which traverses
2278
       * the in_multihead list.
2279
       */
2280
0
      if (inm->inm_state == IGMP_LEAVING_MEMBER &&
2281
0
          inm->inm_scrv == 0) {
2282
0
        inm->inm_state = IGMP_NOT_MEMBER;
2283
        /*
2284
         * A reference has already been held in
2285
         * igmp_final_leave() for this inm, so
2286
         * no need to hold another one.  We also
2287
         * bumped up its request count then, so
2288
         * that it stays in in_multihead.  Both
2289
         * of them will be released when it is
2290
         * dequeued later on.
2291
         */
2292
0
        VERIFY(inm->inm_nrelecnt != 0);
2293
0
        SLIST_INSERT_HEAD(&igi->igi_relinmhead,
2294
0
            inm, inm_nrele);
2295
0
      }
2296
0
    }
2297
0
    break;
2298
0
  }
2299
0
}
2300
2301
/*
2302
 * Suppress a group's pending response to a group or source/group query.
2303
 *
2304
 * Do NOT suppress state changes. This leads to IGMPv3 inconsistency.
2305
 * Do NOT update ST1/ST0 as this operation merely suppresses
2306
 * the currently pending group record.
2307
 * Do NOT suppress the response to a general query. It is possible but
2308
 * it would require adding another state or flag.
2309
 */
2310
static void
2311
igmp_v3_suppress_group_record(struct in_multi *inm)
2312
0
{
2313
0
  INM_LOCK_ASSERT_HELD(inm);
2314
0
  IGI_LOCK_ASSERT_HELD(inm->inm_igi);
2315
2316
0
  VERIFY(inm->inm_igi->igi_version == IGMP_VERSION_3);
2317
2318
0
  if (inm->inm_state != IGMP_G_QUERY_PENDING_MEMBER ||
2319
0
      inm->inm_state != IGMP_SG_QUERY_PENDING_MEMBER) {
2320
0
    return;
2321
0
  }
2322
2323
0
  if (inm->inm_state == IGMP_SG_QUERY_PENDING_MEMBER) {
2324
0
    inm_clear_recorded(inm);
2325
0
  }
2326
2327
0
  inm->inm_timer = 0;
2328
0
  inm->inm_state = IGMP_REPORTING_MEMBER;
2329
0
}
2330
2331
/*
2332
 * Switch to a different IGMP version on the given interface,
2333
 * as per Section 7.2.1.
2334
 */
2335
static uint32_t
2336
igmp_set_version(struct igmp_ifinfo *igi, const int igmp_version)
2337
121
{
2338
121
  int old_version_timer;
2339
2340
121
  IGI_LOCK_ASSERT_HELD(igi);
2341
2342
121
  IGMP_PRINTF(("%s: switching to v%d on ifp 0x%llx(%s)\n", __func__,
2343
121
      igmp_version, (uint64_t)VM_KERNEL_ADDRPERM(igi->igi_ifp),
2344
121
      if_name(igi->igi_ifp)));
2345
2346
121
  if (igmp_version == IGMP_VERSION_1 || igmp_version == IGMP_VERSION_2) {
2347
    /*
2348
     * Compute the "Older Version Querier Present" timer as per
2349
     * Section 8.12, in seconds.
2350
     */
2351
121
    old_version_timer = igi->igi_rv * igi->igi_qi + igi->igi_qri;
2352
2353
121
    if (igmp_version == IGMP_VERSION_1) {
2354
100
      igi->igi_v1_timer = old_version_timer;
2355
100
      igi->igi_v2_timer = 0;
2356
100
    } else if (igmp_version == IGMP_VERSION_2) {
2357
21
      igi->igi_v1_timer = 0;
2358
21
      igi->igi_v2_timer = old_version_timer;
2359
21
    }
2360
121
  }
2361
2362
121
  if (igi->igi_v1_timer == 0 && igi->igi_v2_timer > 0) {
2363
21
    if (igi->igi_version != IGMP_VERSION_2) {
2364
1
      igi->igi_version = IGMP_VERSION_2;
2365
1
      igmp_v3_cancel_link_timers(igi);
2366
1
    }
2367
100
  } else if (igi->igi_v1_timer > 0) {
2368
100
    if (igi->igi_version != IGMP_VERSION_1) {
2369
1
      igi->igi_version = IGMP_VERSION_1;
2370
1
      igmp_v3_cancel_link_timers(igi);
2371
1
    }
2372
100
  }
2373
2374
121
  IGI_LOCK_ASSERT_HELD(igi);
2375
2376
121
  return MAX(igi->igi_v1_timer, igi->igi_v2_timer);
2377
121
}
2378
2379
/*
2380
 * Cancel pending IGMPv3 timers for the given link and all groups
2381
 * joined on it; state-change, general-query, and group-query timers.
2382
 *
2383
 * Only ever called on a transition from v3 to Compatibility mode. Kill
2384
 * the timers stone dead (this may be expensive for large N groups), they
2385
 * will be restarted if Compatibility Mode deems that they must be due to
2386
 * query processing.
2387
 */
2388
static void
2389
igmp_v3_cancel_link_timers(struct igmp_ifinfo *igi)
2390
2
{
2391
2
  struct ifnet            *ifp;
2392
2
  struct in_multi         *inm;
2393
2
  struct in_multistep     step;
2394
2395
2
  IGI_LOCK_ASSERT_HELD(igi);
2396
2397
2
  IGMP_PRINTF(("%s: cancel v3 timers on ifp 0x%llx(%s)\n", __func__,
2398
2
      (uint64_t)VM_KERNEL_ADDRPERM(igi->igi_ifp), if_name(igi->igi_ifp)));
2399
2400
  /*
2401
   * Stop the v3 General Query Response on this link stone dead.
2402
   * If timer is woken up due to interface_timers_running,
2403
   * the flag will be cleared if there are no pending link timers.
2404
   */
2405
2
  igi->igi_v3_timer = 0;
2406
2407
  /*
2408
   * Now clear the current-state and state-change report timers
2409
   * for all memberships scoped to this link.
2410
   */
2411
2
  ifp = igi->igi_ifp;
2412
2
  IGI_UNLOCK(igi);
2413
2414
2
  in_multihead_lock_shared();
2415
2
  IN_FIRST_MULTI(step, inm);
2416
4
  while (inm != NULL) {
2417
2
    INM_LOCK(inm);
2418
2
    if (inm->inm_ifp != ifp) {
2419
0
      goto next;
2420
0
    }
2421
2422
2
    switch (inm->inm_state) {
2423
0
    case IGMP_NOT_MEMBER:
2424
2
    case IGMP_SILENT_MEMBER:
2425
2
    case IGMP_IDLE_MEMBER:
2426
2
    case IGMP_LAZY_MEMBER:
2427
2
    case IGMP_SLEEPING_MEMBER:
2428
2
    case IGMP_AWAKENING_MEMBER:
2429
      /*
2430
       * These states are either not relevant in v3 mode,
2431
       * or are unreported. Do nothing.
2432
       */
2433
2
      break;
2434
0
    case IGMP_LEAVING_MEMBER:
2435
      /*
2436
       * If we are leaving the group and switching to
2437
       * compatibility mode, we need to release the final
2438
       * reference held for issuing the INCLUDE {}, and
2439
       * transition to REPORTING to ensure the host leave
2440
       * message is sent upstream to the old querier --
2441
       * transition to NOT would lose the leave and race.
2442
       * During igmp_final_leave(), we bumped up both the
2443
       * request and reference counts.  Since we cannot
2444
       * call in_multi_detach() here, defer this task to
2445
       * the timer routine.
2446
       */
2447
0
      VERIFY(inm->inm_nrelecnt != 0);
2448
0
      IGI_LOCK(igi);
2449
0
      SLIST_INSERT_HEAD(&igi->igi_relinmhead, inm, inm_nrele);
2450
0
      IGI_UNLOCK(igi);
2451
0
      OS_FALLTHROUGH;
2452
0
    case IGMP_G_QUERY_PENDING_MEMBER:
2453
0
    case IGMP_SG_QUERY_PENDING_MEMBER:
2454
0
      inm_clear_recorded(inm);
2455
0
      OS_FALLTHROUGH;
2456
0
    case IGMP_REPORTING_MEMBER:
2457
0
      inm->inm_state = IGMP_REPORTING_MEMBER;
2458
0
      break;
2459
2
    }
2460
    /*
2461
     * Always clear state-change and group report timers.
2462
     * Free any pending IGMPv3 state-change records.
2463
     */
2464
2
    inm->inm_sctimer = 0;
2465
2
    inm->inm_timer = 0;
2466
2
    IF_DRAIN(&inm->inm_scq);
2467
2
next:
2468
2
    INM_UNLOCK(inm);
2469
2
    IN_NEXT_MULTI(step, inm);
2470
2
  }
2471
2
  in_multihead_lock_done();
2472
2473
2
  IGI_LOCK(igi);
2474
2
}
2475
2476
/*
2477
 * Update the Older Version Querier Present timers for a link.
2478
 * See Section 7.2.1 of RFC 3376.
2479
 */
2480
static void
2481
igmp_v1v2_process_querier_timers(struct igmp_ifinfo *igi)
2482
10.0k
{
2483
10.0k
  IGI_LOCK_ASSERT_HELD(igi);
2484
2485
10.0k
  if (igi->igi_v1_timer == 0 && igi->igi_v2_timer == 0) {
2486
    /*
2487
     * IGMPv1 and IGMPv2 Querier Present timers expired.
2488
     *
2489
     * Revert to IGMPv3.
2490
     */
2491
5.00k
    if (igi->igi_version != IGMP_VERSION_3) {
2492
0
      IGMP_PRINTF(("%s: transition from v%d -> v%d "
2493
0
          "on 0x%llx(%s)\n", __func__,
2494
0
          igi->igi_version, IGMP_VERSION_3,
2495
0
          (uint64_t)VM_KERNEL_ADDRPERM(igi->igi_ifp),
2496
0
          if_name(igi->igi_ifp)));
2497
0
      igi->igi_version = IGMP_VERSION_3;
2498
0
      IF_DRAIN(&igi->igi_v2q);
2499
0
    }
2500
5.00k
  } else if (igi->igi_v1_timer == 0 && igi->igi_v2_timer > 0) {
2501
    /*
2502
     * IGMPv1 Querier Present timer expired,
2503
     * IGMPv2 Querier Present timer running.
2504
     * If IGMPv2 was disabled since last timeout,
2505
     * revert to IGMPv3.
2506
     * If IGMPv2 is enabled, revert to IGMPv2.
2507
     */
2508
697
    if (!igmp_v2enable) {
2509
0
      IGMP_PRINTF(("%s: transition from v%d -> v%d "
2510
0
          "on 0x%llx(%s%d)\n", __func__,
2511
0
          igi->igi_version, IGMP_VERSION_3,
2512
0
          (uint64_t)VM_KERNEL_ADDRPERM(igi->igi_ifp),
2513
0
          igi->igi_ifp->if_name, igi->igi_ifp->if_unit));
2514
0
      igi->igi_v2_timer = 0;
2515
0
      igi->igi_version = IGMP_VERSION_3;
2516
0
      IF_DRAIN(&igi->igi_v2q);
2517
697
    } else {
2518
697
      --igi->igi_v2_timer;
2519
697
      if (igi->igi_version != IGMP_VERSION_2) {
2520
0
        IGMP_PRINTF(("%s: transition from v%d -> v%d "
2521
0
            "on 0x%llx(%s)\n", __func__,
2522
0
            igi->igi_version, IGMP_VERSION_2,
2523
0
            (uint64_t)VM_KERNEL_ADDRPERM(igi->igi_ifp),
2524
0
            if_name(igi->igi_ifp)));
2525
0
        igi->igi_version = IGMP_VERSION_2;
2526
0
        IF_DRAIN(&igi->igi_gq);
2527
0
        igmp_v3_cancel_link_timers(igi);
2528
0
      }
2529
697
    }
2530
4.30k
  } else if (igi->igi_v1_timer > 0) {
2531
    /*
2532
     * IGMPv1 Querier Present timer running.
2533
     * Stop IGMPv2 timer if running.
2534
     *
2535
     * If IGMPv1 was disabled since last timeout,
2536
     * revert to IGMPv3.
2537
     * If IGMPv1 is enabled, reset IGMPv2 timer if running.
2538
     */
2539
4.30k
    if (!igmp_v1enable) {
2540
0
      IGMP_PRINTF(("%s: transition from v%d -> v%d "
2541
0
          "on 0x%llx(%s%d)\n", __func__,
2542
0
          igi->igi_version, IGMP_VERSION_3,
2543
0
          (uint64_t)VM_KERNEL_ADDRPERM(igi->igi_ifp),
2544
0
          igi->igi_ifp->if_name, igi->igi_ifp->if_unit));
2545
0
      igi->igi_v1_timer = 0;
2546
0
      igi->igi_version = IGMP_VERSION_3;
2547
0
      IF_DRAIN(&igi->igi_v2q);
2548
4.30k
    } else {
2549
4.30k
      --igi->igi_v1_timer;
2550
4.30k
    }
2551
4.30k
    if (igi->igi_v2_timer > 0) {
2552
0
      IGMP_PRINTF(("%s: cancel v2 timer on 0x%llx(%s%d)\n",
2553
0
          __func__,
2554
0
          (uint64_t)VM_KERNEL_ADDRPERM(igi->igi_ifp),
2555
0
          igi->igi_ifp->if_name, igi->igi_ifp->if_unit));
2556
0
      igi->igi_v2_timer = 0;
2557
0
    }
2558
4.30k
  }
2559
10.0k
}
2560
2561
/*
2562
 * Dispatch an IGMPv1/v2 host report or leave message.
2563
 * These are always small enough to fit inside a single mbuf.
2564
 */
2565
static int
2566
igmp_v1v2_queue_report(struct in_multi *inm, const int type)
2567
0
{
2568
0
  struct ifnet            *ifp;
2569
0
  struct igmp             *igmp;
2570
0
  struct ip               *ip;
2571
0
  struct mbuf             *m;
2572
0
  int                     error = 0;
2573
2574
0
  INM_LOCK_ASSERT_HELD(inm);
2575
0
  IGI_LOCK_ASSERT_HELD(inm->inm_igi);
2576
2577
0
  ifp = inm->inm_ifp;
2578
2579
0
  MGETHDR(m, M_DONTWAIT, MT_DATA);
2580
0
  if (m == NULL) {
2581
0
    return ENOMEM;
2582
0
  }
2583
0
  MH_ALIGN(m, sizeof(struct ip) + sizeof(struct igmp));
2584
2585
0
  m->m_pkthdr.len = sizeof(struct ip) + sizeof(struct igmp);
2586
2587
0
  m->m_data += sizeof(struct ip);
2588
0
  m->m_len = sizeof(struct igmp);
2589
2590
0
  igmp = mtod(m, struct igmp *);
2591
0
  igmp->igmp_type = (u_char)type;
2592
0
  igmp->igmp_code = 0;
2593
0
  igmp->igmp_group = inm->inm_addr;
2594
0
  igmp->igmp_cksum = 0;
2595
0
  igmp->igmp_cksum = in_cksum(m, sizeof(struct igmp));
2596
2597
0
  m->m_data -= sizeof(struct ip);
2598
0
  m->m_len += sizeof(struct ip);
2599
2600
0
  ip = mtod(m, struct ip *);
2601
0
  ip->ip_tos = 0;
2602
0
  ip->ip_len = sizeof(struct ip) + sizeof(struct igmp);
2603
0
  ip->ip_off = 0;
2604
0
  ip->ip_p = IPPROTO_IGMP;
2605
0
  ip->ip_src.s_addr = INADDR_ANY;
2606
2607
0
  if (type == IGMP_HOST_LEAVE_MESSAGE) {
2608
0
    ip->ip_dst.s_addr = htonl(INADDR_ALLRTRS_GROUP);
2609
0
  } else {
2610
0
    ip->ip_dst = inm->inm_addr;
2611
0
  }
2612
2613
0
  igmp_save_context(m, ifp);
2614
2615
0
  m->m_flags |= M_IGMPV2;
2616
0
  if (inm->inm_igi->igi_flags & IGIF_LOOPBACK) {
2617
0
    m->m_flags |= M_IGMP_LOOP;
2618
0
  }
2619
2620
  /*
2621
   * Due to the fact that at this point we are possibly holding
2622
   * in_multihead_lock in shared or exclusive mode, we can't call
2623
   * igmp_sendpkt() here since that will eventually call ip_output(),
2624
   * which will try to lock in_multihead_lock and cause a deadlock.
2625
   * Instead we defer the work to the igmp_timeout() thread, thus
2626
   * avoiding unlocking in_multihead_lock here.
2627
   */
2628
0
  if (IF_QFULL(&inm->inm_igi->igi_v2q)) {
2629
0
    IGMP_PRINTF(("%s: v1/v2 outbound queue full\n", __func__));
2630
0
    error = ENOMEM;
2631
0
    m_freem(m);
2632
0
  } else {
2633
0
    IF_ENQUEUE(&inm->inm_igi->igi_v2q, m);
2634
0
    VERIFY(error == 0);
2635
0
  }
2636
0
  return error;
2637
0
}
2638
2639
/*
2640
 * Process a state change from the upper layer for the given IPv4 group.
2641
 *
2642
 * Each socket holds a reference on the in_multi in its own ip_moptions.
2643
 * The socket layer will have made the necessary updates to the group
2644
 * state, it is now up to IGMP to issue a state change report if there
2645
 * has been any change between T0 (when the last state-change was issued)
2646
 * and T1 (now).
2647
 *
2648
 * We use the IGMPv3 state machine at group level. The IGMP module
2649
 * however makes the decision as to which IGMP protocol version to speak.
2650
 * A state change *from* INCLUDE {} always means an initial join.
2651
 * A state change *to* INCLUDE {} always means a final leave.
2652
 *
2653
 * FUTURE: If IGIF_V3LITE is enabled for this interface, then we can
2654
 * save ourselves a bunch of work; any exclusive mode groups need not
2655
 * compute source filter lists.
2656
 */
2657
int
2658
igmp_change_state(struct in_multi *inm, struct igmp_tparams *itp)
2659
1
{
2660
1
  struct igmp_ifinfo *igi;
2661
1
  struct ifnet *ifp;
2662
1
  int error = 0;
2663
2664
1
  VERIFY(itp != NULL);
2665
0
  bzero(itp, sizeof(*itp));
2666
2667
1
  INM_LOCK_ASSERT_HELD(inm);
2668
1
  VERIFY(inm->inm_igi != NULL);
2669
1
  IGI_LOCK_ASSERT_NOTHELD(inm->inm_igi);
2670
2671
  /*
2672
   * Try to detect if the upper layer just asked us to change state
2673
   * for an interface which has now gone away.
2674
   */
2675
1
  VERIFY(inm->inm_ifma != NULL);
2676
0
  ifp = inm->inm_ifma->ifma_ifp;
2677
  /*
2678
   * Sanity check that netinet's notion of ifp is the same as net's.
2679
   */
2680
1
  VERIFY(inm->inm_ifp == ifp);
2681
2682
1
  igi = IGMP_IFINFO(ifp);
2683
1
  VERIFY(igi != NULL);
2684
2685
  /*
2686
   * If we detect a state transition to or from MCAST_UNDEFINED
2687
   * for this group, then we are starting or finishing an IGMP
2688
   * life cycle for this group.
2689
   */
2690
1
  if (inm->inm_st[1].iss_fmode != inm->inm_st[0].iss_fmode) {
2691
1
    IGMP_PRINTF(("%s: inm transition %d -> %d\n", __func__,
2692
1
        inm->inm_st[0].iss_fmode, inm->inm_st[1].iss_fmode));
2693
1
    if (inm->inm_st[0].iss_fmode == MCAST_UNDEFINED) {
2694
1
      IGMP_PRINTF(("%s: initial join\n", __func__));
2695
1
      error = igmp_initial_join(inm, igi, itp);
2696
1
      goto out;
2697
1
    } else if (inm->inm_st[1].iss_fmode == MCAST_UNDEFINED) {
2698
0
      IGMP_PRINTF(("%s: final leave\n", __func__));
2699
0
      igmp_final_leave(inm, igi, itp);
2700
0
      goto out;
2701
0
    }
2702
1
  } else {
2703
0
    IGMP_PRINTF(("%s: filter set change\n", __func__));
2704
0
  }
2705
2706
0
  error = igmp_handle_state_change(inm, igi, itp);
2707
1
out:
2708
1
  return error;
2709
0
}
2710
2711
/*
2712
 * Perform the initial join for an IGMP group.
2713
 *
2714
 * When joining a group:
2715
 *  If the group should have its IGMP traffic suppressed, do nothing.
2716
 *  IGMPv1 starts sending IGMPv1 host membership reports.
2717
 *  IGMPv2 starts sending IGMPv2 host membership reports.
2718
 *  IGMPv3 will schedule an IGMPv3 state-change report containing the
2719
 *  initial state of the membership.
2720
 */
2721
static int
2722
igmp_initial_join(struct in_multi *inm, struct igmp_ifinfo *igi,
2723
    struct igmp_tparams *itp)
2724
1
{
2725
1
  struct ifnet            *ifp;
2726
1
  struct ifqueue          *ifq;
2727
1
  int                      error, retval, syncstates;
2728
2729
1
  INM_LOCK_ASSERT_HELD(inm);
2730
1
  IGI_LOCK_ASSERT_NOTHELD(igi);
2731
1
  VERIFY(itp != NULL);
2732
2733
1
  IGMP_INET_PRINTF(inm->inm_addr,
2734
1
      ("%s: initial join %s on ifp 0x%llx(%s)\n", __func__,
2735
1
      _igmp_inet_buf, (uint64_t)VM_KERNEL_ADDRPERM(inm->inm_ifp),
2736
1
      if_name(inm->inm_ifp)));
2737
2738
1
  error = 0;
2739
1
  syncstates = 1;
2740
2741
1
  ifp = inm->inm_ifp;
2742
2743
1
  IGI_LOCK(igi);
2744
1
  VERIFY(igi->igi_ifp == ifp);
2745
2746
  /*
2747
   * Groups joined on loopback or marked as 'not reported',
2748
   * e.g. 224.0.0.1, enter the IGMP_SILENT_MEMBER state and
2749
   * are never reported in any IGMP protocol exchanges.
2750
   * All other groups enter the appropriate IGMP state machine
2751
   * for the version in use on this link.
2752
   * A link marked as IGIF_SILENT causes IGMP to be completely
2753
   * disabled for the link.
2754
   */
2755
1
  if ((ifp->if_flags & IFF_LOOPBACK) ||
2756
1
      (igi->igi_flags & IGIF_SILENT) ||
2757
1
      !igmp_isgroupreported(inm->inm_addr)) {
2758
1
    IGMP_PRINTF(("%s: not kicking state machine for silent group\n",
2759
1
        __func__));
2760
1
    inm->inm_state = IGMP_SILENT_MEMBER;
2761
1
    inm->inm_timer = 0;
2762
1
  } else {
2763
    /*
2764
     * Deal with overlapping in_multi lifecycle.
2765
     * If this group was LEAVING, then make sure
2766
     * we drop the reference we picked up to keep the
2767
     * group around for the final INCLUDE {} enqueue.
2768
     * Since we cannot call in_multi_detach() here,
2769
     * defer this task to the timer routine.
2770
     */
2771
0
    if (igi->igi_version == IGMP_VERSION_3 &&
2772
0
        inm->inm_state == IGMP_LEAVING_MEMBER) {
2773
0
      VERIFY(inm->inm_nrelecnt != 0);
2774
0
      SLIST_INSERT_HEAD(&igi->igi_relinmhead, inm, inm_nrele);
2775
0
    }
2776
2777
0
    inm->inm_state = IGMP_REPORTING_MEMBER;
2778
2779
0
    switch (igi->igi_version) {
2780
0
    case IGMP_VERSION_1:
2781
0
    case IGMP_VERSION_2:
2782
0
      inm->inm_state = IGMP_IDLE_MEMBER;
2783
0
      error = igmp_v1v2_queue_report(inm,
2784
0
          (igi->igi_version == IGMP_VERSION_2) ?
2785
0
          IGMP_v2_HOST_MEMBERSHIP_REPORT :
2786
0
          IGMP_v1_HOST_MEMBERSHIP_REPORT);
2787
2788
0
      INM_LOCK_ASSERT_HELD(inm);
2789
0
      IGI_LOCK_ASSERT_HELD(igi);
2790
2791
0
      if (error == 0) {
2792
0
        inm->inm_timer =
2793
0
            IGMP_RANDOM_DELAY(IGMP_V1V2_MAX_RI);
2794
0
        itp->cst = 1;
2795
0
      }
2796
0
      break;
2797
2798
0
    case IGMP_VERSION_3:
2799
      /*
2800
       * Defer update of T0 to T1, until the first copy
2801
       * of the state change has been transmitted.
2802
       */
2803
0
      syncstates = 0;
2804
2805
      /*
2806
       * Immediately enqueue a State-Change Report for
2807
       * this interface, freeing any previous reports.
2808
       * Don't kick the timers if there is nothing to do,
2809
       * or if an error occurred.
2810
       */
2811
0
      ifq = &inm->inm_scq;
2812
0
      IF_DRAIN(ifq);
2813
0
      retval = igmp_v3_enqueue_group_record(ifq, inm, 1,
2814
0
          0, 0);
2815
0
      itp->cst = (ifq->ifq_len > 0);
2816
0
      IGMP_PRINTF(("%s: enqueue record = %d\n",
2817
0
          __func__, retval));
2818
0
      if (retval <= 0) {
2819
0
        error = retval * -1;
2820
0
        break;
2821
0
      }
2822
2823
      /*
2824
       * Schedule transmission of pending state-change
2825
       * report up to RV times for this link. The timer
2826
       * will fire at the next igmp_timeout (1 second),
2827
       * giving us an opportunity to merge the reports.
2828
       */
2829
0
      if (igi->igi_flags & IGIF_LOOPBACK) {
2830
0
        inm->inm_scrv = 1;
2831
0
      } else {
2832
0
        VERIFY(igi->igi_rv > 1);
2833
0
        inm->inm_scrv = (uint16_t)igi->igi_rv;
2834
0
      }
2835
0
      inm->inm_sctimer = 1;
2836
0
      itp->sct = 1;
2837
2838
0
      error = 0;
2839
0
      break;
2840
0
    }
2841
0
  }
2842
1
  IGI_UNLOCK(igi);
2843
2844
  /*
2845
   * Only update the T0 state if state change is atomic,
2846
   * i.e. we don't need to wait for a timer to fire before we
2847
   * can consider the state change to have been communicated.
2848
   */
2849
1
  if (syncstates) {
2850
1
    inm_commit(inm);
2851
1
    IGMP_INET_PRINTF(inm->inm_addr,
2852
1
        ("%s: T1 -> T0 for %s/%s\n", __func__,
2853
1
        _igmp_inet_buf, if_name(inm->inm_ifp)));
2854
1
  }
2855
2856
1
  return error;
2857
1
}
2858
2859
/*
2860
 * Issue an intermediate state change during the IGMP life-cycle.
2861
 */
2862
static int
2863
igmp_handle_state_change(struct in_multi *inm, struct igmp_ifinfo *igi,
2864
    struct igmp_tparams *itp)
2865
0
{
2866
0
  struct ifnet            *ifp;
2867
0
  int                      retval = 0;
2868
2869
0
  INM_LOCK_ASSERT_HELD(inm);
2870
0
  IGI_LOCK_ASSERT_NOTHELD(igi);
2871
0
  VERIFY(itp != NULL);
2872
2873
0
  IGMP_INET_PRINTF(inm->inm_addr,
2874
0
      ("%s: state change for %s on ifp 0x%llx(%s)\n", __func__,
2875
0
      _igmp_inet_buf, (uint64_t)VM_KERNEL_ADDRPERM(inm->inm_ifp),
2876
0
      if_name(inm->inm_ifp)));
2877
2878
0
  ifp = inm->inm_ifp;
2879
2880
0
  IGI_LOCK(igi);
2881
0
  VERIFY(igi->igi_ifp == ifp);
2882
2883
0
  if ((ifp->if_flags & IFF_LOOPBACK) ||
2884
0
      (igi->igi_flags & IGIF_SILENT) ||
2885
0
      !igmp_isgroupreported(inm->inm_addr) ||
2886
0
      (igi->igi_version != IGMP_VERSION_3)) {
2887
0
    IGI_UNLOCK(igi);
2888
0
    if (!igmp_isgroupreported(inm->inm_addr)) {
2889
0
      IGMP_PRINTF(("%s: not kicking state "
2890
0
          "machine for silent group\n", __func__));
2891
0
    }
2892
0
    IGMP_PRINTF(("%s: nothing to do\n", __func__));
2893
0
    inm_commit(inm);
2894
0
    IGMP_INET_PRINTF(inm->inm_addr,
2895
0
        ("%s: T1 -> T0 for %s/%s\n", __func__,
2896
0
        _igmp_inet_buf, inm->inm_ifp->if_name));
2897
0
    goto done;
2898
0
  }
2899
2900
0
  IF_DRAIN(&inm->inm_scq);
2901
2902
0
  retval = igmp_v3_enqueue_group_record(&inm->inm_scq, inm, 1, 0, 0);
2903
0
  itp->cst = (inm->inm_scq.ifq_len > 0);
2904
0
  IGMP_PRINTF(("%s: enqueue record = %d\n", __func__, retval));
2905
0
  if (retval <= 0) {
2906
0
    IGI_UNLOCK(igi);
2907
0
    retval *= -1;
2908
0
    goto done;
2909
0
  }
2910
  /*
2911
   * If record(s) were enqueued, start the state-change
2912
   * report timer for this group.
2913
   */
2914
0
  inm->inm_scrv = ((igi->igi_flags & IGIF_LOOPBACK) ? 1 : (uint16_t)igi->igi_rv);
2915
0
  inm->inm_sctimer = 1;
2916
0
  itp->sct = 1;
2917
0
  IGI_UNLOCK(igi);
2918
0
done:
2919
0
  return retval;
2920
0
}
2921
2922
/*
2923
 * Perform the final leave for an IGMP group.
2924
 *
2925
 * When leaving a group:
2926
 *  IGMPv1 does nothing.
2927
 *  IGMPv2 sends a host leave message, if and only if we are the reporter.
2928
 *  IGMPv3 enqueues a state-change report containing a transition
2929
 *  to INCLUDE {} for immediate transmission.
2930
 */
2931
static void
2932
igmp_final_leave(struct in_multi *inm, struct igmp_ifinfo *igi,
2933
    struct igmp_tparams *itp)
2934
0
{
2935
0
  int syncstates = 1;
2936
2937
0
  INM_LOCK_ASSERT_HELD(inm);
2938
0
  IGI_LOCK_ASSERT_NOTHELD(igi);
2939
0
  VERIFY(itp != NULL);
2940
2941
0
  IGMP_INET_PRINTF(inm->inm_addr,
2942
0
      ("%s: final leave %s on ifp 0x%llx(%s)\n", __func__,
2943
0
      _igmp_inet_buf, (uint64_t)VM_KERNEL_ADDRPERM(inm->inm_ifp),
2944
0
      if_name(inm->inm_ifp)));
2945
2946
0
  switch (inm->inm_state) {
2947
0
  case IGMP_NOT_MEMBER:
2948
0
  case IGMP_SILENT_MEMBER:
2949
0
  case IGMP_LEAVING_MEMBER:
2950
    /* Already leaving or left; do nothing. */
2951
0
    IGMP_PRINTF(("%s: not kicking state machine for silent group\n",
2952
0
        __func__));
2953
0
    break;
2954
0
  case IGMP_REPORTING_MEMBER:
2955
0
  case IGMP_IDLE_MEMBER:
2956
0
  case IGMP_G_QUERY_PENDING_MEMBER:
2957
0
  case IGMP_SG_QUERY_PENDING_MEMBER:
2958
0
    IGI_LOCK(igi);
2959
0
    if (igi->igi_version == IGMP_VERSION_2) {
2960
0
      if (inm->inm_state == IGMP_G_QUERY_PENDING_MEMBER ||
2961
0
          inm->inm_state == IGMP_SG_QUERY_PENDING_MEMBER) {
2962
0
        panic("%s: IGMPv3 state reached, not IGMPv3 "
2963
0
            "mode\n", __func__);
2964
        /* NOTREACHED */
2965
0
      }
2966
      /* scheduler timer if enqueue is successful */
2967
0
      itp->cst = (igmp_v1v2_queue_report(inm,
2968
0
          IGMP_HOST_LEAVE_MESSAGE) == 0);
2969
2970
0
      INM_LOCK_ASSERT_HELD(inm);
2971
0
      IGI_LOCK_ASSERT_HELD(igi);
2972
2973
0
      inm->inm_state = IGMP_NOT_MEMBER;
2974
0
    } else if (igi->igi_version == IGMP_VERSION_3) {
2975
      /*
2976
       * Stop group timer and all pending reports.
2977
       * Immediately enqueue a state-change report
2978
       * TO_IN {} to be sent on the next timeout,
2979
       * giving us an opportunity to merge reports.
2980
       */
2981
0
      IF_DRAIN(&inm->inm_scq);
2982
0
      inm->inm_timer = 0;
2983
0
      if (igi->igi_flags & IGIF_LOOPBACK) {
2984
0
        inm->inm_scrv = 1;
2985
0
      } else {
2986
0
        inm->inm_scrv = (uint16_t)igi->igi_rv;
2987
0
      }
2988
0
      IGMP_INET_PRINTF(inm->inm_addr,
2989
0
          ("%s: Leaving %s/%s with %d "
2990
0
          "pending retransmissions.\n", __func__,
2991
0
          _igmp_inet_buf, if_name(inm->inm_ifp),
2992
0
          inm->inm_scrv));
2993
0
      if (inm->inm_scrv == 0) {
2994
0
        inm->inm_state = IGMP_NOT_MEMBER;
2995
0
        inm->inm_sctimer = 0;
2996
0
      } else {
2997
0
        int retval;
2998
        /*
2999
         * Stick around in the in_multihead list;
3000
         * the final detach will be issued by
3001
         * igmp_v3_process_group_timers() when
3002
         * the retransmit timer expires.
3003
         */
3004
0
        INM_ADDREF_LOCKED(inm);
3005
0
        VERIFY(inm->inm_debug & IFD_ATTACHED);
3006
0
        inm->inm_reqcnt++;
3007
0
        VERIFY(inm->inm_reqcnt >= 1);
3008
0
        inm->inm_nrelecnt++;
3009
0
        VERIFY(inm->inm_nrelecnt != 0);
3010
3011
0
        retval = igmp_v3_enqueue_group_record(
3012
0
          &inm->inm_scq, inm, 1, 0, 0);
3013
0
        itp->cst = (inm->inm_scq.ifq_len > 0);
3014
0
        KASSERT(retval != 0,
3015
0
            ("%s: enqueue record = %d\n", __func__,
3016
0
            retval));
3017
3018
0
        inm->inm_state = IGMP_LEAVING_MEMBER;
3019
0
        inm->inm_sctimer = 1;
3020
0
        itp->sct = 1;
3021
0
        syncstates = 0;
3022
0
      }
3023
0
    }
3024
0
    IGI_UNLOCK(igi);
3025
0
    break;
3026
0
  case IGMP_LAZY_MEMBER:
3027
0
  case IGMP_SLEEPING_MEMBER:
3028
0
  case IGMP_AWAKENING_MEMBER:
3029
    /* Our reports are suppressed; do nothing. */
3030
0
    break;
3031
0
  }
3032
3033
0
  if (syncstates) {
3034
0
    inm_commit(inm);
3035
0
    IGMP_INET_PRINTF(inm->inm_addr,
3036
0
        ("%s: T1 -> T0 for %s/%s\n", __func__,
3037
0
        _igmp_inet_buf, if_name(inm->inm_ifp)));
3038
0
    inm->inm_st[1].iss_fmode = MCAST_UNDEFINED;
3039
0
    IGMP_INET_PRINTF(inm->inm_addr,
3040
0
        ("%s: T1 now MCAST_UNDEFINED for %s/%s\n",
3041
0
        __func__, _igmp_inet_buf, if_name(inm->inm_ifp)));
3042
0
  }
3043
0
}
3044
3045
/*
3046
 * Enqueue an IGMPv3 group record to the given output queue.
3047
 *
3048
 * XXX This function could do with having the allocation code
3049
 * split out, and the multiple-tree-walks coalesced into a single
3050
 * routine as has been done in igmp_v3_enqueue_filter_change().
3051
 *
3052
 * If is_state_change is zero, a current-state record is appended.
3053
 * If is_state_change is non-zero, a state-change report is appended.
3054
 *
3055
 * If is_group_query is non-zero, an mbuf packet chain is allocated.
3056
 * If is_group_query is zero, and if there is a packet with free space
3057
 * at the tail of the queue, it will be appended to providing there
3058
 * is enough free space.
3059
 * Otherwise a new mbuf packet chain is allocated.
3060
 *
3061
 * If is_source_query is non-zero, each source is checked to see if
3062
 * it was recorded for a Group-Source query, and will be omitted if
3063
 * it is not both in-mode and recorded.
3064
 *
3065
 * The function will attempt to allocate leading space in the packet
3066
 * for the IP/IGMP header to be prepended without fragmenting the chain.
3067
 *
3068
 * If successful the size of all data appended to the queue is returned,
3069
 * otherwise an error code less than zero is returned, or zero if
3070
 * no record(s) were appended.
3071
 */
3072
static int
3073
igmp_v3_enqueue_group_record(struct ifqueue *ifq, struct in_multi *inm,
3074
    const int is_state_change, const int is_group_query,
3075
    const int is_source_query)
3076
0
{
3077
0
  struct igmp_grouprec     ig;
3078
0
  struct igmp_grouprec    *pig;
3079
0
  struct ifnet            *ifp;
3080
0
  struct ip_msource       *ims, *nims;
3081
0
  struct mbuf             *m0, *m, *md;
3082
0
  int                      error, is_filter_list_change;
3083
0
  int                      minrec0len, m0srcs, nbytes, off;
3084
0
  uint16_t                 msrcs;
3085
0
  int                      record_has_sources;
3086
0
  int                      now;
3087
0
  int                      type;
3088
0
  in_addr_t                naddr;
3089
0
  uint16_t                 mode;
3090
0
  u_int16_t                ig_numsrc;
3091
3092
0
  INM_LOCK_ASSERT_HELD(inm);
3093
0
  IGI_LOCK_ASSERT_HELD(inm->inm_igi);
3094
3095
0
  error = 0;
3096
0
  ifp = inm->inm_ifp;
3097
0
  is_filter_list_change = 0;
3098
0
  m = NULL;
3099
0
  m0 = NULL;
3100
0
  m0srcs = 0;
3101
0
  msrcs = 0;
3102
0
  nbytes = 0;
3103
0
  nims = NULL;
3104
0
  record_has_sources = 1;
3105
0
  pig = NULL;
3106
0
  type = IGMP_DO_NOTHING;
3107
0
  mode = inm->inm_st[1].iss_fmode;
3108
3109
  /*
3110
   * If we did not transition out of ASM mode during t0->t1,
3111
   * and there are no source nodes to process, we can skip
3112
   * the generation of source records.
3113
   */
3114
0
  if (inm->inm_st[0].iss_asm > 0 && inm->inm_st[1].iss_asm > 0 &&
3115
0
      inm->inm_nsrc == 0) {
3116
0
    record_has_sources = 0;
3117
0
  }
3118
3119
0
  if (is_state_change) {
3120
    /*
3121
     * Queue a state change record.
3122
     * If the mode did not change, and there are non-ASM
3123
     * listeners or source filters present,
3124
     * we potentially need to issue two records for the group.
3125
     * If we are transitioning to MCAST_UNDEFINED, we need
3126
     * not send any sources.
3127
     * If there are ASM listeners, and there was no filter
3128
     * mode transition of any kind, do nothing.
3129
     */
3130
0
    if (mode != inm->inm_st[0].iss_fmode) {
3131
0
      if (mode == MCAST_EXCLUDE) {
3132
0
        IGMP_PRINTF(("%s: change to EXCLUDE\n",
3133
0
            __func__));
3134
0
        type = IGMP_CHANGE_TO_EXCLUDE_MODE;
3135
0
      } else {
3136
0
        IGMP_PRINTF(("%s: change to INCLUDE\n",
3137
0
            __func__));
3138
0
        type = IGMP_CHANGE_TO_INCLUDE_MODE;
3139
0
        if (mode == MCAST_UNDEFINED) {
3140
0
          record_has_sources = 0;
3141
0
        }
3142
0
      }
3143
0
    } else {
3144
0
      if (record_has_sources) {
3145
0
        is_filter_list_change = 1;
3146
0
      } else {
3147
0
        type = IGMP_DO_NOTHING;
3148
0
      }
3149
0
    }
3150
0
  } else {
3151
    /*
3152
     * Queue a current state record.
3153
     */
3154
0
    if (mode == MCAST_EXCLUDE) {
3155
0
      type = IGMP_MODE_IS_EXCLUDE;
3156
0
    } else if (mode == MCAST_INCLUDE) {
3157
0
      type = IGMP_MODE_IS_INCLUDE;
3158
0
      VERIFY(inm->inm_st[1].iss_asm == 0);
3159
0
    }
3160
0
  }
3161
3162
  /*
3163
   * Generate the filter list changes using a separate function.
3164
   */
3165
0
  if (is_filter_list_change) {
3166
0
    return igmp_v3_enqueue_filter_change(ifq, inm);
3167
0
  }
3168
3169
0
  if (type == IGMP_DO_NOTHING) {
3170
0
    IGMP_INET_PRINTF(inm->inm_addr,
3171
0
        ("%s: nothing to do for %s/%s\n",
3172
0
        __func__, _igmp_inet_buf,
3173
0
        if_name(inm->inm_ifp)));
3174
0
    return 0;
3175
0
  }
3176
3177
  /*
3178
   * If any sources are present, we must be able to fit at least
3179
   * one in the trailing space of the tail packet's mbuf,
3180
   * ideally more.
3181
   */
3182
0
  minrec0len = sizeof(struct igmp_grouprec);
3183
0
  if (record_has_sources) {
3184
0
    minrec0len += sizeof(in_addr_t);
3185
0
  }
3186
3187
0
  IGMP_INET_PRINTF(inm->inm_addr,
3188
0
      ("%s: queueing %s for %s/%s\n", __func__,
3189
0
      igmp_rec_type_to_str(type), _igmp_inet_buf,
3190
0
      if_name(inm->inm_ifp)));
3191
3192
  /*
3193
   * Check if we have a packet in the tail of the queue for this
3194
   * group into which the first group record for this group will fit.
3195
   * Otherwise allocate a new packet.
3196
   * Always allocate leading space for IP+RA_OPT+IGMP+REPORT.
3197
   * Note: Group records for G/GSR query responses MUST be sent
3198
   * in their own packet.
3199
   */
3200
0
  m0 = ifq->ifq_tail;
3201
0
  if (!is_group_query &&
3202
0
      m0 != NULL &&
3203
0
      (m0->m_pkthdr.vt_nrecs + 1 <= IGMP_V3_REPORT_MAXRECS) &&
3204
0
      (m0->m_pkthdr.len + minrec0len) <
3205
0
      (ifp->if_mtu - IGMP_LEADINGSPACE)) {
3206
0
    m0srcs = (ifp->if_mtu - m0->m_pkthdr.len -
3207
0
        sizeof(struct igmp_grouprec)) / sizeof(in_addr_t);
3208
0
    m = m0;
3209
0
    IGMP_PRINTF(("%s: use existing packet\n", __func__));
3210
0
  } else {
3211
0
    if (IF_QFULL(ifq)) {
3212
0
      IGMP_PRINTF(("%s: outbound queue full\n", __func__));
3213
0
      return -ENOMEM;
3214
0
    }
3215
0
    m = NULL;
3216
0
    m0srcs = (ifp->if_mtu - IGMP_LEADINGSPACE -
3217
0
        sizeof(struct igmp_grouprec)) / sizeof(in_addr_t);
3218
0
    if (!is_state_change && !is_group_query) {
3219
0
      m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
3220
0
      if (m) {
3221
0
        m->m_data += IGMP_LEADINGSPACE;
3222
0
      }
3223
0
    }
3224
0
    if (m == NULL) {
3225
0
      m = m_gethdr(M_DONTWAIT, MT_DATA);
3226
0
      if (m) {
3227
0
        MH_ALIGN(m, IGMP_LEADINGSPACE);
3228
0
      }
3229
0
    }
3230
0
    if (m == NULL) {
3231
0
      return -ENOMEM;
3232
0
    }
3233
3234
0
    igmp_save_context(m, ifp);
3235
3236
0
    IGMP_PRINTF(("%s: allocated first packet\n", __func__));
3237
0
  }
3238
3239
  /*
3240
   * Append group record.
3241
   * If we have sources, we don't know how many yet.
3242
   */
3243
0
  ig.ig_type = (u_char)type;
3244
0
  ig.ig_datalen = 0;
3245
0
  ig.ig_numsrc = 0;
3246
0
  ig.ig_group = inm->inm_addr;
3247
0
  if (!m_append(m, sizeof(struct igmp_grouprec), (void *)&ig)) {
3248
0
    if (m != m0) {
3249
0
      m_freem(m);
3250
0
    }
3251
0
    IGMP_PRINTF(("%s: m_append() failed.\n", __func__));
3252
0
    return -ENOMEM;
3253
0
  }
3254
0
  nbytes += sizeof(struct igmp_grouprec);
3255
3256
  /*
3257
   * Append as many sources as will fit in the first packet.
3258
   * If we are appending to a new packet, the chain allocation
3259
   * may potentially use clusters; use m_getptr() in this case.
3260
   * If we are appending to an existing packet, we need to obtain
3261
   * a pointer to the group record after m_append(), in case a new
3262
   * mbuf was allocated.
3263
   * Only append sources which are in-mode at t1. If we are
3264
   * transitioning to MCAST_UNDEFINED state on the group, do not
3265
   * include source entries.
3266
   * Only report recorded sources in our filter set when responding
3267
   * to a group-source query.
3268
   */
3269
0
  if (record_has_sources) {
3270
0
    if (m == m0) {
3271
0
      md = m_last(m);
3272
0
      pig = (struct igmp_grouprec *)(void *)
3273
0
          (mtod(md, uint8_t *) + md->m_len - nbytes);
3274
0
    } else {
3275
0
      md = m_getptr(m, 0, &off);
3276
0
      pig = (struct igmp_grouprec *)(void *)
3277
0
          (mtod(md, uint8_t *) + off);
3278
0
    }
3279
0
    msrcs = 0;
3280
0
    RB_FOREACH_SAFE(ims, ip_msource_tree, &inm->inm_srcs, nims) {
3281
0
#ifdef IGMP_DEBUG
3282
0
      char buf[MAX_IPv4_STR_LEN];
3283
3284
0
      inet_ntop_haddr(ims->ims_haddr, buf, sizeof(buf));
3285
0
      IGMP_PRINTF(("%s: visit node %s\n", __func__, buf));
3286
0
#endif
3287
0
      now = ims_get_mode(inm, ims, 1);
3288
0
      IGMP_PRINTF(("%s: node is %d\n", __func__, now));
3289
0
      if ((now != mode) ||
3290
0
          (now == mode && mode == MCAST_UNDEFINED)) {
3291
0
        IGMP_PRINTF(("%s: skip node\n", __func__));
3292
0
        continue;
3293
0
      }
3294
0
      if (is_source_query && ims->ims_stp == 0) {
3295
0
        IGMP_PRINTF(("%s: skip unrecorded node\n",
3296
0
            __func__));
3297
0
        continue;
3298
0
      }
3299
0
      IGMP_PRINTF(("%s: append node\n", __func__));
3300
0
      naddr = htonl(ims->ims_haddr);
3301
0
      if (!m_append(m, sizeof(in_addr_t), (void *)&naddr)) {
3302
0
        if (m != m0) {
3303
0
          m_freem(m);
3304
0
        }
3305
0
        IGMP_PRINTF(("%s: m_append() failed.\n",
3306
0
            __func__));
3307
0
        return -ENOMEM;
3308
0
      }
3309
0
      nbytes += sizeof(in_addr_t);
3310
0
      ++msrcs;
3311
0
      if (msrcs == m0srcs) {
3312
0
        break;
3313
0
      }
3314
0
    }
3315
0
    IGMP_PRINTF(("%s: msrcs is %d this packet\n", __func__,
3316
0
        msrcs));
3317
0
    ig_numsrc = htons(msrcs);
3318
0
    bcopy(&ig_numsrc, &pig->ig_numsrc, sizeof(ig_numsrc));
3319
0
    nbytes += (msrcs * sizeof(in_addr_t));
3320
0
  }
3321
3322
0
  if (is_source_query && msrcs == 0) {
3323
0
    IGMP_PRINTF(("%s: no recorded sources to report\n", __func__));
3324
0
    if (m != m0) {
3325
0
      m_freem(m);
3326
0
    }
3327
0
    return 0;
3328
0
  }
3329
3330
  /*
3331
   * We are good to go with first packet.
3332
   */
3333
0
  if (m != m0) {
3334
0
    IGMP_PRINTF(("%s: enqueueing first packet\n", __func__));
3335
0
    m->m_pkthdr.vt_nrecs = 1;
3336
0
    IF_ENQUEUE(ifq, m);
3337
0
  } else {
3338
0
    m->m_pkthdr.vt_nrecs++;
3339
0
  }
3340
  /*
3341
   * No further work needed if no source list in packet(s).
3342
   */
3343
0
  if (!record_has_sources) {
3344
0
    return nbytes;
3345
0
  }
3346
3347
  /*
3348
   * Whilst sources remain to be announced, we need to allocate
3349
   * a new packet and fill out as many sources as will fit.
3350
   * Always try for a cluster first.
3351
   */
3352
0
  while (nims != NULL) {
3353
0
    if (IF_QFULL(ifq)) {
3354
0
      IGMP_PRINTF(("%s: outbound queue full\n", __func__));
3355
0
      return -ENOMEM;
3356
0
    }
3357
0
    m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
3358
0
    if (m) {
3359
0
      m->m_data += IGMP_LEADINGSPACE;
3360
0
    }
3361
0
    if (m == NULL) {
3362
0
      m = m_gethdr(M_DONTWAIT, MT_DATA);
3363
0
      if (m) {
3364
0
        MH_ALIGN(m, IGMP_LEADINGSPACE);
3365
0
      }
3366
0
    }
3367
0
    if (m == NULL) {
3368
0
      return -ENOMEM;
3369
0
    }
3370
0
    igmp_save_context(m, ifp);
3371
0
    md = m_getptr(m, 0, &off);
3372
0
    pig = (struct igmp_grouprec *)(void *)
3373
0
        (mtod(md, uint8_t *) + off);
3374
0
    IGMP_PRINTF(("%s: allocated next packet\n", __func__));
3375
3376
0
    if (!m_append(m, sizeof(struct igmp_grouprec), (void *)&ig)) {
3377
0
      if (m != m0) {
3378
0
        m_freem(m);
3379
0
      }
3380
0
      IGMP_PRINTF(("%s: m_append() failed.\n", __func__));
3381
0
      return -ENOMEM;
3382
0
    }
3383
0
    m->m_pkthdr.vt_nrecs = 1;
3384
0
    nbytes += sizeof(struct igmp_grouprec);
3385
3386
0
    m0srcs = (ifp->if_mtu - IGMP_LEADINGSPACE -
3387
0
        sizeof(struct igmp_grouprec)) / sizeof(in_addr_t);
3388
3389
0
    msrcs = 0;
3390
0
    RB_FOREACH_FROM(ims, ip_msource_tree, nims) {
3391
0
#ifdef IGMP_DEBUG
3392
0
      char buf[MAX_IPv4_STR_LEN];
3393
3394
0
      inet_ntop_haddr(ims->ims_haddr, buf, sizeof(buf));
3395
0
      IGMP_PRINTF(("%s: visit node %s\n", __func__, buf));
3396
0
#endif
3397
0
      now = ims_get_mode(inm, ims, 1);
3398
0
      if ((now != mode) ||
3399
0
          (now == mode && mode == MCAST_UNDEFINED)) {
3400
0
        IGMP_PRINTF(("%s: skip node\n", __func__));
3401
0
        continue;
3402
0
      }
3403
0
      if (is_source_query && ims->ims_stp == 0) {
3404
0
        IGMP_PRINTF(("%s: skip unrecorded node\n",
3405
0
            __func__));
3406
0
        continue;
3407
0
      }
3408
0
      IGMP_PRINTF(("%s: append node\n", __func__));
3409
0
      naddr = htonl(ims->ims_haddr);
3410
0
      if (!m_append(m, sizeof(in_addr_t), (void *)&naddr)) {
3411
0
        if (m != m0) {
3412
0
          m_freem(m);
3413
0
        }
3414
0
        IGMP_PRINTF(("%s: m_append() failed.\n",
3415
0
            __func__));
3416
0
        return -ENOMEM;
3417
0
      }
3418
0
      ++msrcs;
3419
0
      if (msrcs == m0srcs) {
3420
0
        break;
3421
0
      }
3422
0
    }
3423
0
    ig_numsrc = htons(msrcs);
3424
0
    bcopy(&ig_numsrc, &pig->ig_numsrc, sizeof(ig_numsrc));
3425
0
    nbytes += (msrcs * sizeof(in_addr_t));
3426
3427
0
    IGMP_PRINTF(("%s: enqueueing next packet\n", __func__));
3428
0
    IF_ENQUEUE(ifq, m);
3429
0
  }
3430
3431
0
  return nbytes;
3432
0
}
3433
3434
/*
3435
 * Type used to mark record pass completion.
3436
 * We exploit the fact we can cast to this easily from the
3437
 * current filter modes on each ip_msource node.
3438
 */
3439
typedef enum {
3440
  REC_NONE = 0x00,        /* MCAST_UNDEFINED */
3441
  REC_ALLOW = 0x01,       /* MCAST_INCLUDE */
3442
  REC_BLOCK = 0x02,       /* MCAST_EXCLUDE */
3443
  REC_FULL = REC_ALLOW | REC_BLOCK
3444
} rectype_t;
3445
3446
/*
3447
 * Enqueue an IGMPv3 filter list change to the given output queue.
3448
 *
3449
 * Source list filter state is held in an RB-tree. When the filter list
3450
 * for a group is changed without changing its mode, we need to compute
3451
 * the deltas between T0 and T1 for each source in the filter set,
3452
 * and enqueue the appropriate ALLOW_NEW/BLOCK_OLD records.
3453
 *
3454
 * As we may potentially queue two record types, and the entire R-B tree
3455
 * needs to be walked at once, we break this out into its own function
3456
 * so we can generate a tightly packed queue of packets.
3457
 *
3458
 * XXX This could be written to only use one tree walk, although that makes
3459
 * serializing into the mbuf chains a bit harder. For now we do two walks
3460
 * which makes things easier on us, and it may or may not be harder on
3461
 * the L2 cache.
3462
 *
3463
 * If successful the size of all data appended to the queue is returned,
3464
 * otherwise an error code less than zero is returned, or zero if
3465
 * no record(s) were appended.
3466
 */
3467
static int
3468
igmp_v3_enqueue_filter_change(struct ifqueue *ifq, struct in_multi *inm)
3469
0
{
3470
0
  static const int MINRECLEN =
3471
0
      sizeof(struct igmp_grouprec) + sizeof(in_addr_t);
3472
0
  struct ifnet            *ifp;
3473
0
  struct igmp_grouprec     ig;
3474
0
  struct igmp_grouprec    *pig;
3475
0
  struct ip_msource       *ims, *nims;
3476
0
  struct mbuf             *m, *m0, *md;
3477
0
  in_addr_t                naddr;
3478
0
  int                      m0srcs, nbytes, npbytes, off, schanged;
3479
0
  uint16_t                 rsrcs;
3480
0
  int                      nallow, nblock;
3481
0
  uint16_t                 mode;
3482
0
  uint8_t                  now, then;
3483
0
  rectype_t                crt, drt, nrt;
3484
0
  u_int16_t                ig_numsrc;
3485
3486
0
  INM_LOCK_ASSERT_HELD(inm);
3487
3488
0
  if (inm->inm_nsrc == 0 ||
3489
0
      (inm->inm_st[0].iss_asm > 0 && inm->inm_st[1].iss_asm > 0)) {
3490
0
    return 0;
3491
0
  }
3492
3493
0
  ifp = inm->inm_ifp;                     /* interface */
3494
0
  mode = inm->inm_st[1].iss_fmode;        /* filter mode at t1 */
3495
0
  crt = REC_NONE; /* current group record type */
3496
0
  drt = REC_NONE; /* mask of completed group record types */
3497
0
  nrt = REC_NONE; /* record type for current node */
3498
0
  m0srcs = 0;     /* # source which will fit in current mbuf chain */
3499
0
  nbytes = 0;     /* # of bytes appended to group's state-change queue */
3500
0
  npbytes = 0;    /* # of bytes appended this packet */
3501
0
  rsrcs = 0;      /* # sources encoded in current record */
3502
0
  schanged = 0;   /* # nodes encoded in overall filter change */
3503
0
  nallow = 0;     /* # of source entries in ALLOW_NEW */
3504
0
  nblock = 0;     /* # of source entries in BLOCK_OLD */
3505
0
  nims = NULL;    /* next tree node pointer */
3506
3507
  /*
3508
   * For each possible filter record mode.
3509
   * The first kind of source we encounter tells us which
3510
   * is the first kind of record we start appending.
3511
   * If a node transitioned to UNDEFINED at t1, its mode is treated
3512
   * as the inverse of the group's filter mode.
3513
   */
3514
0
  while (drt != REC_FULL) {
3515
0
    do {
3516
0
      m0 = ifq->ifq_tail;
3517
0
      if (m0 != NULL &&
3518
0
          (m0->m_pkthdr.vt_nrecs + 1 <=
3519
0
          IGMP_V3_REPORT_MAXRECS) &&
3520
0
          (m0->m_pkthdr.len + MINRECLEN) <
3521
0
          (ifp->if_mtu - IGMP_LEADINGSPACE)) {
3522
0
        m = m0;
3523
0
        m0srcs = (ifp->if_mtu - m0->m_pkthdr.len -
3524
0
            sizeof(struct igmp_grouprec)) /
3525
0
            sizeof(in_addr_t);
3526
0
        IGMP_PRINTF(("%s: use previous packet\n",
3527
0
            __func__));
3528
0
      } else {
3529
0
        m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
3530
0
        if (m) {
3531
0
          m->m_data += IGMP_LEADINGSPACE;
3532
0
        }
3533
0
        if (m == NULL) {
3534
0
          m = m_gethdr(M_DONTWAIT, MT_DATA);
3535
0
          if (m) {
3536
0
            MH_ALIGN(m, IGMP_LEADINGSPACE);
3537
0
          }
3538
0
        }
3539
0
        if (m == NULL) {
3540
0
          IGMP_PRINTF(("%s: m_get*() failed\n",
3541
0
              __func__));
3542
0
          return -ENOMEM;
3543
0
        }
3544
0
        m->m_pkthdr.vt_nrecs = 0;
3545
0
        igmp_save_context(m, ifp);
3546
0
        m0srcs = (ifp->if_mtu - IGMP_LEADINGSPACE -
3547
0
            sizeof(struct igmp_grouprec)) /
3548
0
            sizeof(in_addr_t);
3549
0
        npbytes = 0;
3550
0
        IGMP_PRINTF(("%s: allocated new packet\n",
3551
0
            __func__));
3552
0
      }
3553
      /*
3554
       * Append the IGMP group record header to the
3555
       * current packet's data area.
3556
       * Recalculate pointer to free space for next
3557
       * group record, in case m_append() allocated
3558
       * a new mbuf or cluster.
3559
       */
3560
0
      memset(&ig, 0, sizeof(ig));
3561
0
      ig.ig_group = inm->inm_addr;
3562
0
      if (!m_append(m, sizeof(ig), (void *)&ig)) {
3563
0
        if (m != m0) {
3564
0
          m_freem(m);
3565
0
        }
3566
0
        IGMP_PRINTF(("%s: m_append() failed\n",
3567
0
            __func__));
3568
0
        return -ENOMEM;
3569
0
      }
3570
0
      npbytes += sizeof(struct igmp_grouprec);
3571
0
      if (m != m0) {
3572
        /* new packet; offset in c hain */
3573
0
        md = m_getptr(m, npbytes -
3574
0
            sizeof(struct igmp_grouprec), &off);
3575
0
        pig = (struct igmp_grouprec *)(void *)(mtod(md,
3576
0
            uint8_t *) + off);
3577
0
      } else {
3578
        /* current packet; offset from last append */
3579
0
        md = m_last(m);
3580
0
        pig = (struct igmp_grouprec *)(void *)(mtod(md,
3581
0
            uint8_t *) + md->m_len -
3582
0
            sizeof(struct igmp_grouprec));
3583
0
      }
3584
      /*
3585
       * Begin walking the tree for this record type
3586
       * pass, or continue from where we left off
3587
       * previously if we had to allocate a new packet.
3588
       * Only report deltas in-mode at t1.
3589
       * We need not report included sources as allowed
3590
       * if we are in inclusive mode on the group,
3591
       * however the converse is not true.
3592
       */
3593
0
      rsrcs = 0;
3594
0
      if (nims == NULL) {
3595
0
        nims = RB_MIN(ip_msource_tree, &inm->inm_srcs);
3596
0
      }
3597
0
      RB_FOREACH_FROM(ims, ip_msource_tree, nims) {
3598
0
#ifdef IGMP_DEBUG
3599
0
        char buf[MAX_IPv4_STR_LEN];
3600
3601
0
        inet_ntop_haddr(ims->ims_haddr, buf, sizeof(buf));
3602
0
        IGMP_PRINTF(("%s: visit node %s\n", __func__, buf));
3603
0
#endif
3604
0
        now = ims_get_mode(inm, ims, 1);
3605
0
        then = ims_get_mode(inm, ims, 0);
3606
0
        IGMP_PRINTF(("%s: mode: t0 %d, t1 %d\n",
3607
0
            __func__, then, now));
3608
0
        if (now == then) {
3609
0
          IGMP_PRINTF(("%s: skip unchanged\n",
3610
0
              __func__));
3611
0
          continue;
3612
0
        }
3613
0
        if (mode == MCAST_EXCLUDE &&
3614
0
            now == MCAST_INCLUDE) {
3615
0
          IGMP_PRINTF(("%s: skip IN src on EX "
3616
0
              "group\n", __func__));
3617
0
          continue;
3618
0
        }
3619
0
        nrt = (rectype_t)now;
3620
0
        if (nrt == REC_NONE) {
3621
0
          nrt = (rectype_t)(~mode & REC_FULL);
3622
0
        }
3623
0
        if (schanged++ == 0) {
3624
0
          crt = nrt;
3625
0
        } else if (crt != nrt) {
3626
0
          continue;
3627
0
        }
3628
0
        naddr = htonl(ims->ims_haddr);
3629
0
        if (!m_append(m, sizeof(in_addr_t),
3630
0
            (void *)&naddr)) {
3631
0
          if (m != m0) {
3632
0
            m_freem(m);
3633
0
          }
3634
0
          IGMP_PRINTF(("%s: m_append() failed\n",
3635
0
              __func__));
3636
0
          return -ENOMEM;
3637
0
        }
3638
0
        nallow += !!(crt == REC_ALLOW);
3639
0
        nblock += !!(crt == REC_BLOCK);
3640
0
        if (++rsrcs == m0srcs) {
3641
0
          break;
3642
0
        }
3643
0
      }
3644
      /*
3645
       * If we did not append any tree nodes on this
3646
       * pass, back out of allocations.
3647
       */
3648
0
      if (rsrcs == 0) {
3649
0
        npbytes -= sizeof(struct igmp_grouprec);
3650
0
        if (m != m0) {
3651
0
          IGMP_PRINTF(("%s: m_free(m)\n",
3652
0
              __func__));
3653
0
          m_freem(m);
3654
0
        } else {
3655
0
          IGMP_PRINTF(("%s: m_adj(m, -ig)\n",
3656
0
              __func__));
3657
0
          m_adj(m, -((int)sizeof(
3658
0
                struct igmp_grouprec)));
3659
0
        }
3660
0
        continue;
3661
0
      }
3662
0
      npbytes += (rsrcs * sizeof(in_addr_t));
3663
0
      if (crt == REC_ALLOW) {
3664
0
        pig->ig_type = IGMP_ALLOW_NEW_SOURCES;
3665
0
      } else if (crt == REC_BLOCK) {
3666
0
        pig->ig_type = IGMP_BLOCK_OLD_SOURCES;
3667
0
      }
3668
0
      ig_numsrc = htons(rsrcs);
3669
0
      bcopy(&ig_numsrc, &pig->ig_numsrc, sizeof(ig_numsrc));
3670
      /*
3671
       * Count the new group record, and enqueue this
3672
       * packet if it wasn't already queued.
3673
       */
3674
0
      m->m_pkthdr.vt_nrecs++;
3675
0
      if (m != m0) {
3676
0
        IF_ENQUEUE(ifq, m);
3677
0
      }
3678
0
      nbytes += npbytes;
3679
0
    } while (nims != NULL);
3680
0
    drt |= crt;
3681
0
    crt = (~crt & REC_FULL);
3682
0
  }
3683
3684
0
  IGMP_PRINTF(("%s: queued %d ALLOW_NEW, %d BLOCK_OLD\n", __func__,
3685
0
      nallow, nblock));
3686
3687
0
  return nbytes;
3688
0
}
3689
3690
static int
3691
igmp_v3_merge_state_changes(struct in_multi *inm, struct ifqueue *ifscq)
3692
0
{
3693
0
  struct ifqueue  *gq;
3694
0
  struct mbuf     *m;             /* pending state-change */
3695
0
  struct mbuf     *m0;            /* copy of pending state-change */
3696
0
  struct mbuf     *mt;            /* last state-change in packet */
3697
0
  struct mbuf     *n;
3698
0
  int              docopy, domerge;
3699
0
  u_int            recslen;
3700
3701
0
  INM_LOCK_ASSERT_HELD(inm);
3702
3703
0
  docopy = 0;
3704
0
  domerge = 0;
3705
0
  recslen = 0;
3706
3707
  /*
3708
   * If there are further pending retransmissions, make a writable
3709
   * copy of each queued state-change message before merging.
3710
   */
3711
0
  if (inm->inm_scrv > 0) {
3712
0
    docopy = 1;
3713
0
  }
3714
3715
0
  gq = &inm->inm_scq;
3716
0
#ifdef IGMP_DEBUG
3717
0
  if (gq->ifq_head == NULL) {
3718
0
    IGMP_PRINTF(("%s: WARNING: queue for inm 0x%llx is empty\n",
3719
0
        __func__, (uint64_t)VM_KERNEL_ADDRPERM(inm)));
3720
0
  }
3721
0
#endif
3722
3723
  /*
3724
   * Use IF_REMQUEUE() instead of IF_DEQUEUE() below, since the
3725
   * packet might not always be at the head of the ifqueue.
3726
   */
3727
0
  m = gq->ifq_head;
3728
0
  while (m != NULL) {
3729
    /*
3730
     * Only merge the report into the current packet if
3731
     * there is sufficient space to do so; an IGMPv3 report
3732
     * packet may only contain 65,535 group records.
3733
     * Always use a simple mbuf chain concatentation to do this,
3734
     * as large state changes for single groups may have
3735
     * allocated clusters.
3736
     */
3737
0
    domerge = 0;
3738
0
    mt = ifscq->ifq_tail;
3739
0
    if (mt != NULL) {
3740
0
      recslen = m_length(m);
3741
3742
0
      if ((mt->m_pkthdr.vt_nrecs +
3743
0
          m->m_pkthdr.vt_nrecs <=
3744
0
          IGMP_V3_REPORT_MAXRECS) &&
3745
0
          (mt->m_pkthdr.len + recslen <=
3746
0
          (inm->inm_ifp->if_mtu - IGMP_LEADINGSPACE))) {
3747
0
        domerge = 1;
3748
0
      }
3749
0
    }
3750
3751
0
    if (!domerge && IF_QFULL(gq)) {
3752
0
      IGMP_PRINTF(("%s: outbound queue full, skipping whole "
3753
0
          "packet 0x%llx\n", __func__,
3754
0
          (uint64_t)VM_KERNEL_ADDRPERM(m)));
3755
0
      n = m->m_nextpkt;
3756
0
      if (!docopy) {
3757
0
        IF_REMQUEUE(gq, m);
3758
0
        m_freem(m);
3759
0
      }
3760
0
      m = n;
3761
0
      continue;
3762
0
    }
3763
3764
0
    if (!docopy) {
3765
0
      IGMP_PRINTF(("%s: dequeueing 0x%llx\n", __func__,
3766
0
          (uint64_t)VM_KERNEL_ADDRPERM(m)));
3767
0
      n = m->m_nextpkt;
3768
0
      IF_REMQUEUE(gq, m);
3769
0
      m0 = m;
3770
0
      m = n;
3771
0
    } else {
3772
0
      IGMP_PRINTF(("%s: copying 0x%llx\n", __func__,
3773
0
          (uint64_t)VM_KERNEL_ADDRPERM(m)));
3774
0
      m0 = m_dup(m, M_NOWAIT);
3775
0
      if (m0 == NULL) {
3776
0
        return ENOMEM;
3777
0
      }
3778
0
      m0->m_nextpkt = NULL;
3779
0
      m = m->m_nextpkt;
3780
0
    }
3781
3782
0
    if (!domerge) {
3783
0
      IGMP_PRINTF(("%s: queueing 0x%llx to ifscq 0x%llx)\n",
3784
0
          __func__, (uint64_t)VM_KERNEL_ADDRPERM(m0),
3785
0
          (uint64_t)VM_KERNEL_ADDRPERM(ifscq)));
3786
0
      IF_ENQUEUE(ifscq, m0);
3787
0
    } else {
3788
0
      struct mbuf *mtl;       /* last mbuf of packet mt */
3789
3790
0
      IGMP_PRINTF(("%s: merging 0x%llx with ifscq tail "
3791
0
          "0x%llx)\n", __func__,
3792
0
          (uint64_t)VM_KERNEL_ADDRPERM(m0),
3793
0
          (uint64_t)VM_KERNEL_ADDRPERM(mt)));
3794
3795
0
      mtl = m_last(mt);
3796
0
      m0->m_flags &= ~M_PKTHDR;
3797
0
      mt->m_pkthdr.len += recslen;
3798
0
      mt->m_pkthdr.vt_nrecs +=
3799
0
          m0->m_pkthdr.vt_nrecs;
3800
3801
0
      mtl->m_next = m0;
3802
0
    }
3803
0
  }
3804
3805
0
  return 0;
3806
0
}
3807
3808
/*
3809
 * Respond to a pending IGMPv3 General Query.
3810
 */
3811
static uint32_t
3812
igmp_v3_dispatch_general_query(struct igmp_ifinfo *igi)
3813
0
{
3814
0
  struct ifnet            *ifp;
3815
0
  struct in_multi         *inm;
3816
0
  struct in_multistep     step;
3817
0
  int                      retval, loop;
3818
3819
0
  IGI_LOCK_ASSERT_HELD(igi);
3820
3821
0
  VERIFY(igi->igi_version == IGMP_VERSION_3);
3822
3823
0
  ifp = igi->igi_ifp;
3824
0
  IGI_UNLOCK(igi);
3825
3826
0
  in_multihead_lock_shared();
3827
0
  IN_FIRST_MULTI(step, inm);
3828
0
  while (inm != NULL) {
3829
0
    INM_LOCK(inm);
3830
0
    if (inm->inm_ifp != ifp) {
3831
0
      goto next;
3832
0
    }
3833
3834
0
    switch (inm->inm_state) {
3835
0
    case IGMP_NOT_MEMBER:
3836
0
    case IGMP_SILENT_MEMBER:
3837
0
      break;
3838
0
    case IGMP_REPORTING_MEMBER:
3839
0
    case IGMP_IDLE_MEMBER:
3840
0
    case IGMP_LAZY_MEMBER:
3841
0
    case IGMP_SLEEPING_MEMBER:
3842
0
    case IGMP_AWAKENING_MEMBER:
3843
0
      inm->inm_state = IGMP_REPORTING_MEMBER;
3844
0
      IGI_LOCK(igi);
3845
0
      retval = igmp_v3_enqueue_group_record(&igi->igi_gq,
3846
0
          inm, 0, 0, 0);
3847
0
      IGI_UNLOCK(igi);
3848
0
      IGMP_PRINTF(("%s: enqueue record = %d\n",
3849
0
          __func__, retval));
3850
0
      break;
3851
0
    case IGMP_G_QUERY_PENDING_MEMBER:
3852
0
    case IGMP_SG_QUERY_PENDING_MEMBER:
3853
0
    case IGMP_LEAVING_MEMBER:
3854
0
      break;
3855
0
    }
3856
0
next:
3857
0
    INM_UNLOCK(inm);
3858
0
    IN_NEXT_MULTI(step, inm);
3859
0
  }
3860
0
  in_multihead_lock_done();
3861
3862
0
  IGI_LOCK(igi);
3863
0
  loop = (igi->igi_flags & IGIF_LOOPBACK) ? 1 : 0;
3864
0
  igmp_dispatch_queue(igi, &igi->igi_gq, IGMP_MAX_RESPONSE_BURST,
3865
0
      loop);
3866
0
  IGI_LOCK_ASSERT_HELD(igi);
3867
  /*
3868
   * Slew transmission of bursts over 1 second intervals.
3869
   */
3870
0
  if (igi->igi_gq.ifq_head != NULL) {
3871
0
    igi->igi_v3_timer = 1 + IGMP_RANDOM_DELAY(
3872
0
      IGMP_RESPONSE_BURST_INTERVAL);
3873
0
  }
3874
3875
0
  return igi->igi_v3_timer;
3876
0
}
3877
3878
/*
3879
 * Transmit the next pending IGMP message in the output queue.
3880
 *
3881
 * Must not be called with inm_lock or igi_lock held.
3882
 */
3883
static void
3884
igmp_sendpkt(struct mbuf *m)
3885
0
{
3886
0
  struct ip_moptions      *imo;
3887
0
  struct mbuf             *ipopts, *m0;
3888
0
  int                     error;
3889
0
  struct route            ro;
3890
0
  struct ifnet            *ifp;
3891
3892
0
  IGMP_PRINTF(("%s: transmit 0x%llx\n", __func__,
3893
0
      (uint64_t)VM_KERNEL_ADDRPERM(m)));
3894
3895
0
  ifp = igmp_restore_context(m);
3896
  /*
3897
   * Check if the ifnet is still attached.
3898
   */
3899
0
  if (ifp == NULL || !ifnet_is_attached(ifp, 0)) {
3900
0
    IGMP_PRINTF(("%s: dropped 0x%llx as ifp went away.\n",
3901
0
        __func__, (uint64_t)VM_KERNEL_ADDRPERM(m)));
3902
0
    m_freem(m);
3903
0
    OSAddAtomic(1, &ipstat.ips_noroute);
3904
0
    return;
3905
0
  }
3906
3907
0
  ipopts = igmp_sendra ? m_raopt : NULL;
3908
3909
0
  imo = ip_allocmoptions(Z_WAITOK);
3910
0
  if (imo == NULL) {
3911
0
    m_freem(m);
3912
0
    return;
3913
0
  }
3914
3915
0
  imo->imo_multicast_ttl  = 1;
3916
0
  imo->imo_multicast_vif  = -1;
3917
0
  imo->imo_multicast_loop = 0;
3918
3919
  /*
3920
   * If the user requested that IGMP traffic be explicitly
3921
   * redirected to the loopback interface (e.g. they are running a
3922
   * MANET interface and the routing protocol needs to see the
3923
   * updates), handle this now.
3924
   */
3925
0
  if (m->m_flags & M_IGMP_LOOP) {
3926
0
    imo->imo_multicast_ifp = lo_ifp;
3927
0
  } else {
3928
0
    imo->imo_multicast_ifp = ifp;
3929
0
  }
3930
3931
0
  if (m->m_flags & M_IGMPV2) {
3932
0
    m0 = m;
3933
0
  } else {
3934
0
    m0 = igmp_v3_encap_report(ifp, m);
3935
0
    if (m0 == NULL) {
3936
      /*
3937
       * If igmp_v3_encap_report() failed, then M_PREPEND()
3938
       * already freed the original mbuf chain.
3939
       * This means that we don't have to m_freem(m) here.
3940
       */
3941
0
      IGMP_PRINTF(("%s: dropped 0x%llx\n", __func__,
3942
0
          (uint64_t)VM_KERNEL_ADDRPERM(m)));
3943
0
      IMO_REMREF(imo);
3944
0
      atomic_add_32(&ipstat.ips_odropped, 1);
3945
0
      return;
3946
0
    }
3947
0
  }
3948
3949
0
  igmp_scrub_context(m0);
3950
0
  m->m_flags &= ~(M_PROTOFLAGS | M_IGMP_LOOP);
3951
0
  m0->m_pkthdr.rcvif = lo_ifp;
3952
3953
0
  if (ifp->if_eflags & IFEF_TXSTART) {
3954
    /*
3955
     * Use control service class if the interface supports
3956
     * transmit-start model.
3957
     */
3958
0
    (void) m_set_service_class(m0, MBUF_SC_CTL);
3959
0
  }
3960
0
  bzero(&ro, sizeof(ro));
3961
0
  error = ip_output(m0, ipopts, &ro, 0, imo, NULL);
3962
0
  ROUTE_RELEASE(&ro);
3963
3964
0
  IMO_REMREF(imo);
3965
3966
0
  if (error) {
3967
0
    IGMP_PRINTF(("%s: ip_output(0x%llx) = %d\n", __func__,
3968
0
        (uint64_t)VM_KERNEL_ADDRPERM(m0), error));
3969
0
    return;
3970
0
  }
3971
3972
0
  IGMPSTAT_INC(igps_snd_reports);
3973
0
  OIGMPSTAT_INC(igps_snd_reports);
3974
0
}
3975
/*
3976
 * Encapsulate an IGMPv3 report.
3977
 *
3978
 * The internal mbuf flag M_IGMPV3_HDR is used to indicate that the mbuf
3979
 * chain has already had its IP/IGMPv3 header prepended. In this case
3980
 * the function will not attempt to prepend; the lengths and checksums
3981
 * will however be re-computed.
3982
 *
3983
 * Returns a pointer to the new mbuf chain head, or NULL if the
3984
 * allocation failed.
3985
 */
3986
static struct mbuf *
3987
igmp_v3_encap_report(struct ifnet *ifp, struct mbuf *m)
3988
0
{
3989
0
  struct igmp_report      *igmp;
3990
0
  struct ip               *ip;
3991
0
  unsigned int             hdrlen, igmpreclen;
3992
3993
0
  VERIFY((m->m_flags & M_PKTHDR));
3994
3995
0
  igmpreclen = m_length(m);
3996
0
  hdrlen = sizeof(struct ip) + sizeof(struct igmp_report);
3997
3998
0
  if (m->m_flags & M_IGMPV3_HDR) {
3999
0
    igmpreclen -= hdrlen;
4000
0
  } else {
4001
0
    M_PREPEND(m, hdrlen, M_DONTWAIT, 1);
4002
0
    if (m == NULL) {
4003
0
      return NULL;
4004
0
    }
4005
0
    m->m_flags |= M_IGMPV3_HDR;
4006
0
  }
4007
0
  if (hdrlen + igmpreclen > USHRT_MAX) {
4008
0
    IGMP_PRINTF(("%s: invalid length %d\n", __func__, hdrlen + igmpreclen));
4009
0
    m_freem(m);
4010
0
    return NULL;
4011
0
  }
4012
4013
4014
0
  IGMP_PRINTF(("%s: igmpreclen is %d\n", __func__, igmpreclen));
4015
4016
0
  m->m_data += sizeof(struct ip);
4017
0
  m->m_len -= sizeof(struct ip);
4018
4019
0
  igmp = mtod(m, struct igmp_report *);
4020
0
  igmp->ir_type = IGMP_v3_HOST_MEMBERSHIP_REPORT;
4021
0
  igmp->ir_rsv1 = 0;
4022
0
  igmp->ir_rsv2 = 0;
4023
0
  igmp->ir_numgrps = htons(m->m_pkthdr.vt_nrecs);
4024
0
  igmp->ir_cksum = 0;
4025
0
  igmp->ir_cksum = in_cksum(m, sizeof(struct igmp_report) + igmpreclen);
4026
0
  m->m_pkthdr.vt_nrecs = 0;
4027
4028
0
  m->m_data -= sizeof(struct ip);
4029
0
  m->m_len += sizeof(struct ip);
4030
4031
0
  ip = mtod(m, struct ip *);
4032
0
  ip->ip_tos = IPTOS_PREC_INTERNETCONTROL;
4033
0
  ip->ip_len = (u_short)(hdrlen + igmpreclen);
4034
0
  ip->ip_off = IP_DF;
4035
0
  ip->ip_p = IPPROTO_IGMP;
4036
0
  ip->ip_sum = 0;
4037
4038
0
  ip->ip_src.s_addr = INADDR_ANY;
4039
4040
0
  if (m->m_flags & M_IGMP_LOOP) {
4041
0
    struct in_ifaddr *ia;
4042
4043
0
    IFP_TO_IA(ifp, ia);
4044
0
    if (ia != NULL) {
4045
0
      IFA_LOCK(&ia->ia_ifa);
4046
0
      ip->ip_src = ia->ia_addr.sin_addr;
4047
0
      IFA_UNLOCK(&ia->ia_ifa);
4048
0
      IFA_REMREF(&ia->ia_ifa);
4049
0
    }
4050
0
  }
4051
4052
0
  ip->ip_dst.s_addr = htonl(INADDR_ALLRPTS_GROUP);
4053
4054
0
  return m;
4055
0
}
4056
4057
#ifdef IGMP_DEBUG
4058
static const char *
4059
igmp_rec_type_to_str(const int type)
4060
0
{
4061
0
  switch (type) {
4062
0
  case IGMP_CHANGE_TO_EXCLUDE_MODE:
4063
0
    return "TO_EX";
4064
0
  case IGMP_CHANGE_TO_INCLUDE_MODE:
4065
0
    return "TO_IN";
4066
0
  case IGMP_MODE_IS_EXCLUDE:
4067
0
    return "MODE_EX";
4068
0
  case IGMP_MODE_IS_INCLUDE:
4069
0
    return "MODE_IN";
4070
0
  case IGMP_ALLOW_NEW_SOURCES:
4071
0
    return "ALLOW_NEW";
4072
0
  case IGMP_BLOCK_OLD_SOURCES:
4073
0
    return "BLOCK_OLD";
4074
0
  default:
4075
0
    break;
4076
0
  }
4077
0
  return "unknown";
4078
0
}
4079
#endif
4080
4081
void
4082
igmp_init(struct protosw *pp, struct domain *dp)
4083
1
{
4084
1
#pragma unused(dp)
4085
1
  static int igmp_initialized = 0;
4086
4087
1
  VERIFY((pp->pr_flags & (PR_INITIALIZED | PR_ATTACHED)) == PR_ATTACHED);
4088
4089
1
  if (igmp_initialized) {
4090
0
    return;
4091
0
  }
4092
1
  igmp_initialized = 1;
4093
4094
1
  IGMP_PRINTF(("%s: initializing\n", __func__));
4095
4096
1
  igmp_timers_are_running = 0;
4097
4098
  /* Setup lock group and attribute for igmp_mtx */
4099
1
  igmp_mtx_grp_attr = lck_grp_attr_alloc_init();
4100
1
  igmp_mtx_grp = lck_grp_alloc_init("igmp_mtx", igmp_mtx_grp_attr);
4101
1
  igmp_mtx_attr = lck_attr_alloc_init();
4102
1
  lck_mtx_init(&igmp_mtx, igmp_mtx_grp, igmp_mtx_attr);
4103
4104
1
  LIST_INIT(&igi_head);
4105
1
  m_raopt = igmp_ra_alloc();
4106
1
}