Coverage Report

Created: 2025-11-11 06:17

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
/src/frr/pimd/pim6_mld.h
Line
Count
Source
1
// SPDX-License-Identifier: GPL-2.0-or-later
2
/*
3
 * PIMv6 MLD querier
4
 * Copyright (C) 2021-2022  David Lamparter for NetDEF, Inc.
5
 */
6
7
#ifndef PIM6_MLD_H
8
#define PIM6_MLD_H
9
10
#include "typesafe.h"
11
#include "pim_addr.h"
12
13
struct event;
14
struct pim_instance;
15
struct gm_packet_sg;
16
struct gm_if;
17
struct channel_oil;
18
19
2
#define MLD_DEFAULT_VERSION 2
20
21
/* see comment below on subs_negative/subs_positive */
22
enum gm_sub_sense {
23
  /* negative/pruning: S,G in EXCLUDE */
24
  GM_SUB_NEG = 0,
25
  /* positive/joining: *,G in EXCLUDE and S,G in INCLUDE */
26
  GM_SUB_POS = 1,
27
};
28
29
enum gm_sg_state {
30
  GM_SG_NOINFO = 0,
31
  GM_SG_JOIN,
32
  GM_SG_JOIN_EXPIRING,
33
  /* remaining 3 only valid for S,G when *,G in EXCLUDE */
34
  GM_SG_PRUNE,
35
  GM_SG_NOPRUNE,
36
  GM_SG_NOPRUNE_EXPIRING,
37
};
38
39
/* If the timer gm_t_sg_expire is started without a leave message being received,
40
 * the sg->state should be moved to expiring states.
41
 * When the timer expires, we do not expect the state to be in join state.
42
 * If a JOIN message is received while the timer is running,
43
 * the state will be moved to JOIN and this timer will be switched off.
44
 * Hence the below state transition is done.
45
 */
46
#define GM_UPDATE_SG_STATE(sg)                                                 \
47
  do {                                                                   \
48
    if (sg->state == GM_SG_JOIN)                                   \
49
      sg->state = GM_SG_JOIN_EXPIRING;                       \
50
    else if (sg->state == GM_SG_NOPRUNE)                           \
51
      sg->state = GM_SG_NOPRUNE_EXPIRING;                    \
52
  } while (0)
53
54
static inline bool gm_sg_state_want_join(enum gm_sg_state state)
55
0
{
56
0
  return state != GM_SG_NOINFO && state != GM_SG_PRUNE;
57
0
}
Unexecuted instantiation: pim_cmd_common.c:gm_sg_state_want_join
Unexecuted instantiation: pim_iface.c:gm_sg_state_want_join
Unexecuted instantiation: pim_nb_config.c:gm_sg_state_want_join
Unexecuted instantiation: pim_vty.c:gm_sg_state_want_join
58
59
/* MLD (S,G) state (on an interface)
60
 *
61
 * group is always != ::, src is :: for (*,G) joins.  sort order in RB tree is
62
 * such that sources for a particular group can be iterated by starting at the
63
 * group.  For INCLUDE, no (*,G) entry exists, only (S,G).
64
 */
65
66
PREDECL_RBTREE_UNIQ(gm_packet_sg_subs);
67
PREDECL_RBTREE_UNIQ(gm_sgs);
68
struct gm_sg {
69
  pim_sgaddr sgaddr;
70
  struct gm_if *iface;
71
  struct gm_sgs_item itm;
72
73
  enum gm_sg_state state;
74
  struct channel_oil *oil;
75
  bool tib_joined;
76
77
  struct timeval created;
78
79
  /* if a group- or group-and-source specific query is running
80
   * (implies we haven't received any report yet, since it's cancelled
81
   * by that)
82
   */
83
  struct event *t_sg_expire;
84
85
  /* last-member-left triggered queries (group/group-source specific)
86
   *
87
   * this timer will be running even if we aren't the elected querier,
88
   * in case the election result changes midway through.
89
   */
90
  struct event *t_sg_query;
91
92
  /* we must keep sending (QRV) queries even if we get a positive
93
   * response, to make sure other routers are updated.  query_sbit
94
   * will be set in that case, since other routers need the *response*,
95
   * not the *query*
96
   */
97
  uint8_t n_query;
98
  bool query_sbit;
99
100
  /* subs_positive tracks gm_packet_sg resulting in a JOIN, i.e. for
101
   * (*,G) it has *EXCLUDE* items, for (S,G) it has *INCLUDE* items.
102
   *
103
   * subs_negative is always empty for (*,G) and tracks EXCLUDE items
104
   * for (S,G).  This means that an (S,G) entry is active as a PRUNE if
105
   *   len(src->subs_negative) == len(grp->subs_positive)
106
   *   && len(src->subs_positive) == 0
107
   * (i.e. all receivers for the group opted to exclude this S,G and
108
   * noone did an SSM join for the S,G)
109
   */
110
  union {
111
    struct {
112
      struct gm_packet_sg_subs_head subs_negative[1];
113
      struct gm_packet_sg_subs_head subs_positive[1];
114
    };
115
    struct gm_packet_sg_subs_head subs[2];
116
  };
117
118
  /* If the elected querier is not ourselves, queries and reports might
119
   * get reordered in rare circumstances, i.e. the report could arrive
120
   * just a microsecond before the query kicks off the timer.  This can
121
   * then result in us thinking there are no more receivers since no
122
   * report might be received during the query period.
123
   *
124
   * To avoid this, keep track of the most recent report for this (S,G)
125
   * so we can do a quick check to add just a little bit of slack.
126
   *
127
   * EXCLUDE S,Gs are never in most_recent.
128
   */
129
  struct gm_packet_sg *most_recent;
130
};
131
int gm_sg_cmp(const struct gm_sg *a, const struct gm_sg *b);
132
DECLARE_RBTREE_UNIQ(gm_sgs, struct gm_sg, itm, gm_sg_cmp);
133
134
/* host tracking entry.  addr will be one of:
135
 *
136
 * ::   - used by hosts during address acquisition
137
 * ::1    - may show up on some OS for joins by the router itself
138
 * link-local - regular operation by MLDv2 hosts
139
 * ffff:..:ffff - MLDv1 entry (cannot be tracked due to report suppression)
140
 *
141
 * global scope IPv6 addresses can never show up here
142
 */
143
PREDECL_HASH(gm_subscribers);
144
PREDECL_DLIST(gm_packets);
145
struct gm_subscriber {
146
  pim_addr addr;
147
  struct gm_subscribers_item itm;
148
149
  struct gm_if *iface;
150
  size_t refcount;
151
152
  struct gm_packets_head packets[1];
153
154
  struct timeval created;
155
};
156
157
/*
158
 * MLD join state is kept batched by packet.  Since the timers for all items
159
 * in a packet are the same, this reduces the number of timers we're keeping
160
 * track of.  It also eases tracking for EXCLUDE state groups because the
161
 * excluded sources are in the same packet.  (MLD does not support splitting
162
 * that if it exceeds MTU, it's always a full replace for exclude.)
163
 *
164
 * Since packets may be partially superseded by newer packets, the "active"
165
 * field is used to track this.
166
 */
167
168
/* gm_packet_sg is allocated as part of gm_packet_state, note the items[0]
169
 * array at the end of that.  gm_packet_sg is NEVER directly allocated with
170
 * XMALLOC/XFREE.
171
 */
172
struct gm_packet_sg {
173
  /* non-NULL as long as this gm_packet_sg is the most recent entry
174
   * for (subscriber,S,G).  Cleared to NULL when a newer packet by the
175
   * subscriber replaces this item.
176
   *
177
   * (Old items are kept around so we don't need to realloc/resize
178
   * gm_packet_state, which would mess up a whole lot of pointers)
179
   */
180
  struct gm_sg *sg;
181
182
  /* gm_sg -> (subscriber, gm_packet_sg)
183
   * only on RB-tree while sg != NULL, i.e. not superseded by newer.
184
   */
185
  struct gm_packet_sg_subs_item subs_itm;
186
187
  bool is_src : 1; /* := (src != ::) */
188
  bool is_excl : 1;
189
190
  /* for getting back to struct gm_packet_state, cf.
191
   * gm_packet_sg2state() below
192
   */
193
  uint16_t offset;
194
195
  /* if this is a group entry in EXCLUDE state, n_exclude counts how
196
   * many sources are on the exclude list here.  They follow immediately
197
   * after.
198
   */
199
  uint16_t n_exclude;
200
};
201
202
#define gm_packet_sg2state(sg)                                                 \
203
  container_of(sg, struct gm_packet_state, items[sg->offset])
204
205
PREDECL_DLIST(gm_packet_expires);
206
struct gm_packet_state {
207
  struct gm_if *iface;
208
  struct gm_subscriber *subscriber;
209
  struct gm_packets_item pkt_itm;
210
211
  struct timeval received;
212
  struct gm_packet_expires_item exp_itm;
213
214
  /* n_active starts equal to n_sg;  whenever active is set to false on
215
   * an item it is decremented.  When n_active == 0, the packet can be
216
   * freed.
217
   */
218
  uint16_t n_sg, n_active;
219
  struct gm_packet_sg items[0];
220
};
221
222
/* general queries are rather different from group/S,G specific queries;  it's
223
 * not particularly efficient or useful to try to shoehorn them into the S,G
224
 * timers.  Instead, we keep a history of recent queries and their implied
225
 * expiries.
226
 */
227
struct gm_general_pending {
228
  struct timeval query, expiry;
229
};
230
231
/* similarly, group queries also age out S,G entries for the group, but in
232
 * this case we only keep one query for each group
233
 *
234
 * why is this not in the *,G gm_sg?  There may not be one (for INCLUDE mode
235
 * groups, or groups we don't know about.)  Also, malicious clients could spam
236
 * random group-specific queries to trigger resource exhaustion, so it makes
237
 * sense to limit these.
238
 */
239
PREDECL_RBTREE_UNIQ(gm_grp_pends);
240
struct gm_grp_pending {
241
  struct gm_grp_pends_item itm;
242
  struct gm_if *iface;
243
  pim_addr grp;
244
245
  struct timeval query;
246
  struct event *t_expire;
247
};
248
249
/* guaranteed MTU for IPv6 is 1280 bytes.  IPv6 header is 40 bytes, MLDv2
250
 * query header is 24 bytes, RA option is 8 bytes - leaves 1208 bytes for the
251
 * source list, which is 151 IPv6 addresses.  But we may have some more IPv6
252
 * extension headers (e.g. IPsec AH), so just cap to 128
253
 */
254
#define MLD_V2Q_MTU_MAX_SOURCES 128
255
256
/* group-and-source-specific queries are bundled together, if some host joins
257
 * multiple sources it's likely to drop all at the same time.
258
 *
259
 * Unlike gm_grp_pending, this is only used for aggregation since the S,G
260
 * state is kept directly in the gm_sg structure.
261
 */
262
PREDECL_HASH(gm_gsq_pends);
263
struct gm_gsq_pending {
264
  struct gm_gsq_pends_item itm;
265
266
  struct gm_if *iface;
267
  struct event *t_send;
268
269
  pim_addr grp;
270
  bool s_bit;
271
272
  size_t n_src;
273
  pim_addr srcs[MLD_V2Q_MTU_MAX_SOURCES];
274
};
275
276
277
/* The size of this history is limited by QRV, i.e. there can't be more than
278
 * 8 items here.
279
 */
280
#define GM_MAX_PENDING 8
281
282
enum gm_version {
283
  GM_NONE,
284
  GM_MLDV1,
285
  GM_MLDV2,
286
};
287
288
struct gm_if_stats {
289
  uint64_t rx_drop_csum;
290
  uint64_t rx_drop_srcaddr;
291
  uint64_t rx_drop_dstaddr;
292
  uint64_t rx_drop_ra;
293
  uint64_t rx_drop_malformed;
294
  uint64_t rx_trunc_report;
295
296
  /* since the types are different, this is rx_old_* not of rx_*_old */
297
  uint64_t rx_old_report;
298
  uint64_t rx_old_leave;
299
  uint64_t rx_new_report;
300
301
  uint64_t rx_query_new_general;
302
  uint64_t rx_query_new_group;
303
  uint64_t rx_query_new_groupsrc;
304
  uint64_t rx_query_new_sbit;
305
  uint64_t rx_query_old_general;
306
  uint64_t rx_query_old_group;
307
308
  uint64_t tx_query_new_general;
309
  uint64_t tx_query_new_group;
310
  uint64_t tx_query_new_groupsrc;
311
  uint64_t tx_query_old_general;
312
  uint64_t tx_query_old_group;
313
314
  uint64_t tx_query_fail;
315
};
316
317
struct gm_if {
318
  struct interface *ifp;
319
  struct pim_instance *pim;
320
  struct event *t_query, *t_other_querier, *t_expire;
321
322
  bool stopping;
323
324
  uint8_t n_startup;
325
326
  uint8_t cur_qrv;
327
  unsigned int cur_query_intv;    /* ms */
328
  unsigned int cur_query_intv_trig; /* ms */
329
  unsigned int cur_max_resp;    /* ms */
330
  enum gm_version cur_version;
331
  int cur_lmqc; /* last member query count in ds */
332
333
  /* this value (positive, default 10ms) defines our "timing tolerance":
334
   * - added to deadlines for expiring joins
335
   * - used to look backwards in time for queries, in case a report was
336
   *   reordered before the query
337
   */
338
  struct timeval cfg_timing_fuzz;
339
340
  /* items in pending[] are sorted by expiry, pending[0] is earliest */
341
  struct gm_general_pending pending[GM_MAX_PENDING];
342
  uint8_t n_pending;
343
  struct gm_grp_pends_head grp_pends[1];
344
  struct gm_gsq_pends_head gsq_pends[1];
345
346
  pim_addr querier;
347
  pim_addr cur_ll_lowest;
348
349
  struct gm_sgs_head sgs[1];
350
  struct gm_subscribers_head subscribers[1];
351
  struct gm_packet_expires_head expires[1];
352
353
  struct timeval started;
354
  struct gm_if_stats stats;
355
};
356
357
#if PIM_IPV == 6
358
extern void gm_ifp_update(struct interface *ifp);
359
extern void gm_ifp_teardown(struct interface *ifp);
360
extern void gm_group_delete(struct gm_if *gm_ifp);
361
#else
362
static inline void gm_ifp_update(struct interface *ifp)
363
1
{
364
1
}
Unexecuted instantiation: pim_cmd_common.c:gm_ifp_update
pim_iface.c:gm_ifp_update
Line
Count
Source
363
1
{
364
1
}
Unexecuted instantiation: pim_nb_config.c:gm_ifp_update
Unexecuted instantiation: pim_vty.c:gm_ifp_update
365
366
static inline void gm_ifp_teardown(struct interface *ifp)
367
0
{
368
0
}
Unexecuted instantiation: pim_cmd_common.c:gm_ifp_teardown
Unexecuted instantiation: pim_iface.c:gm_ifp_teardown
Unexecuted instantiation: pim_nb_config.c:gm_ifp_teardown
Unexecuted instantiation: pim_vty.c:gm_ifp_teardown
369
#endif
370
371
extern void gm_cli_init(void);
372
bool in6_multicast_nofwd(const pim_addr *addr);
373
374
#endif /* PIM6_MLD_H */