Line | Count | Source |
1 | | // SPDX-License-Identifier: GPL-2.0-or-later |
2 | | /* |
3 | | * PIM for Quagga |
4 | | * Copyright (C) 2015 Cumulus Networks, Inc. |
5 | | * Donald Sharp |
6 | | */ |
7 | | #include <zebra.h> |
8 | | |
9 | | #include "lib/json.h" |
10 | | #include "log.h" |
11 | | #include "network.h" |
12 | | #include "if.h" |
13 | | #include "linklist.h" |
14 | | #include "prefix.h" |
15 | | #include "memory.h" |
16 | | #include "vty.h" |
17 | | #include "vrf.h" |
18 | | #include "plist.h" |
19 | | #include "nexthop.h" |
20 | | #include "table.h" |
21 | | #include "lib_errors.h" |
22 | | |
23 | | #include "pimd.h" |
24 | | #include "pim_instance.h" |
25 | | #include "pim_vty.h" |
26 | | #include "pim_str.h" |
27 | | #include "pim_iface.h" |
28 | | #include "pim_rp.h" |
29 | | #include "pim_rpf.h" |
30 | | #include "pim_sock.h" |
31 | | #include "pim_memory.h" |
32 | | #include "pim_neighbor.h" |
33 | | #include "pim_msdp.h" |
34 | | #include "pim_nht.h" |
35 | | #include "pim_mroute.h" |
36 | | #include "pim_oil.h" |
37 | | #include "pim_zebra.h" |
38 | | #include "pim_bsm.h" |
39 | | #include "pim_util.h" |
40 | | #include "pim_ssm.h" |
41 | | #include "termtable.h" |
42 | | |
43 | | /* Cleanup pim->rpf_hash each node data */ |
44 | | void pim_rp_list_hash_clean(void *data) |
45 | 0 | { |
46 | 0 | struct pim_nexthop_cache *pnc = (struct pim_nexthop_cache *)data; |
47 | |
|
48 | 0 | list_delete(&pnc->rp_list); |
49 | |
|
50 | 0 | hash_clean_and_free(&pnc->upstream_hash, NULL); |
51 | 0 | if (pnc->nexthop) |
52 | 0 | nexthops_free(pnc->nexthop); |
53 | |
|
54 | 0 | XFREE(MTYPE_PIM_NEXTHOP_CACHE, pnc); |
55 | 0 | } |
56 | | |
57 | | static void pim_rp_info_free(struct rp_info *rp_info) |
58 | 0 | { |
59 | 0 | XFREE(MTYPE_PIM_FILTER_NAME, rp_info->plist); |
60 | |
|
61 | 0 | XFREE(MTYPE_PIM_RP, rp_info); |
62 | 0 | } |
63 | | |
64 | | int pim_rp_list_cmp(void *v1, void *v2) |
65 | 589k | { |
66 | 589k | struct rp_info *rp1 = (struct rp_info *)v1; |
67 | 589k | struct rp_info *rp2 = (struct rp_info *)v2; |
68 | 589k | int ret; |
69 | | |
70 | | /* |
71 | | * Sort by RP IP address |
72 | | */ |
73 | 589k | ret = pim_addr_cmp(rp1->rp.rpf_addr, rp2->rp.rpf_addr); |
74 | 589k | if (ret) |
75 | 551k | return ret; |
76 | | |
77 | | /* |
78 | | * Sort by group IP address |
79 | | */ |
80 | 37.9k | ret = prefix_cmp(&rp1->group, &rp2->group); |
81 | 37.9k | if (ret) |
82 | 37.9k | return ret; |
83 | | |
84 | 0 | return 0; |
85 | 37.9k | } |
86 | | |
87 | | void pim_rp_init(struct pim_instance *pim) |
88 | 1 | { |
89 | 1 | struct rp_info *rp_info; |
90 | 1 | struct route_node *rn; |
91 | | |
92 | 1 | pim->rp_list = list_new(); |
93 | 1 | pim->rp_list->del = (void (*)(void *))pim_rp_info_free; |
94 | 1 | pim->rp_list->cmp = pim_rp_list_cmp; |
95 | | |
96 | 1 | pim->rp_table = route_table_init(); |
97 | | |
98 | 1 | rp_info = XCALLOC(MTYPE_PIM_RP, sizeof(*rp_info)); |
99 | | |
100 | 1 | if (!pim_get_all_mcast_group(&rp_info->group)) { |
101 | 0 | flog_err(EC_LIB_DEVELOPMENT, |
102 | 0 | "Unable to convert all-multicast prefix"); |
103 | 0 | list_delete(&pim->rp_list); |
104 | 0 | route_table_finish(pim->rp_table); |
105 | 0 | XFREE(MTYPE_PIM_RP, rp_info); |
106 | 0 | return; |
107 | 0 | } |
108 | 1 | rp_info->rp.rpf_addr = PIMADDR_ANY; |
109 | | |
110 | 1 | listnode_add(pim->rp_list, rp_info); |
111 | | |
112 | 1 | rn = route_node_get(pim->rp_table, &rp_info->group); |
113 | 1 | rn->info = rp_info; |
114 | 1 | if (PIM_DEBUG_PIM_TRACE) |
115 | 0 | zlog_debug("Allocated: %p for rp_info: %p(%pFX) Lock: %d", rn, |
116 | 1 | rp_info, &rp_info->group, |
117 | 1 | route_node_get_lock_count(rn)); |
118 | 1 | } |
119 | | |
120 | | void pim_rp_free(struct pim_instance *pim) |
121 | 0 | { |
122 | 0 | if (pim->rp_table) |
123 | 0 | route_table_finish(pim->rp_table); |
124 | 0 | pim->rp_table = NULL; |
125 | |
|
126 | 0 | if (pim->rp_list) |
127 | 0 | list_delete(&pim->rp_list); |
128 | 0 | } |
129 | | |
130 | | /* |
131 | | * Given an RP's prefix-list, return the RP's rp_info for that prefix-list |
132 | | */ |
133 | | static struct rp_info *pim_rp_find_prefix_list(struct pim_instance *pim, |
134 | | pim_addr rp, const char *plist) |
135 | 0 | { |
136 | 0 | struct listnode *node; |
137 | 0 | struct rp_info *rp_info; |
138 | |
|
139 | 0 | for (ALL_LIST_ELEMENTS_RO(pim->rp_list, node, rp_info)) { |
140 | 0 | if ((!pim_addr_cmp(rp, rp_info->rp.rpf_addr)) && |
141 | 0 | rp_info->plist && strcmp(rp_info->plist, plist) == 0) { |
142 | 0 | return rp_info; |
143 | 0 | } |
144 | 0 | } |
145 | | |
146 | 0 | return NULL; |
147 | 0 | } |
148 | | |
149 | | /* |
150 | | * Return true if plist is used by any rp_info |
151 | | */ |
152 | | static int pim_rp_prefix_list_used(struct pim_instance *pim, const char *plist) |
153 | 0 | { |
154 | 0 | struct listnode *node; |
155 | 0 | struct rp_info *rp_info; |
156 | |
|
157 | 0 | for (ALL_LIST_ELEMENTS_RO(pim->rp_list, node, rp_info)) { |
158 | 0 | if (rp_info->plist && strcmp(rp_info->plist, plist) == 0) { |
159 | 0 | return 1; |
160 | 0 | } |
161 | 0 | } |
162 | | |
163 | 0 | return 0; |
164 | 0 | } |
165 | | |
166 | | /* |
167 | | * Given an RP's address, return the RP's rp_info that is an exact match for |
168 | | * 'group' |
169 | | */ |
170 | | static struct rp_info *pim_rp_find_exact(struct pim_instance *pim, pim_addr rp, |
171 | | const struct prefix *group) |
172 | 28.0k | { |
173 | 28.0k | struct listnode *node; |
174 | 28.0k | struct rp_info *rp_info; |
175 | | |
176 | 3.12M | for (ALL_LIST_ELEMENTS_RO(pim->rp_list, node, rp_info)) { |
177 | 3.12M | if ((!pim_addr_cmp(rp, rp_info->rp.rpf_addr)) && |
178 | 69.8k | prefix_same(&rp_info->group, group)) |
179 | 14.1k | return rp_info; |
180 | 3.12M | } |
181 | | |
182 | 13.8k | return NULL; |
183 | 28.0k | } |
184 | | |
185 | | /* |
186 | | * XXX: long-term issue: we don't actually have a good "ip address-list" |
187 | | * implementation. ("access-list XYZ" is the closest but honestly it's |
188 | | * kinda garbage.) |
189 | | * |
190 | | * So it's using a prefix-list to match an address here, which causes very |
191 | | * unexpected results for the user since prefix-lists by default only match |
192 | | * when the prefix length is an exact match too. i.e. you'd have to add the |
193 | | * "le 32" and do "ip prefix-list foo permit 10.0.0.0/24 le 32" |
194 | | * |
195 | | * To avoid this pitfall, this code uses "address_mode = true" for the prefix |
196 | | * list match (this is the only user for that.) |
197 | | * |
198 | | * In the long run, we need to add a "ip address-list", but that's a wholly |
199 | | * separate bag of worms, and existing configs using ip prefix-list would |
200 | | * drop into the UX pitfall. |
201 | | */ |
202 | | |
203 | | #include "lib/plist_int.h" |
204 | | |
205 | | /* |
206 | | * Given a group, return the rp_info for that group |
207 | | */ |
208 | | struct rp_info *pim_rp_find_match_group(struct pim_instance *pim, |
209 | | const struct prefix *group) |
210 | 23.5M | { |
211 | 23.5M | struct listnode *node; |
212 | 23.5M | struct rp_info *best = NULL; |
213 | 23.5M | struct rp_info *rp_info; |
214 | 23.5M | struct prefix_list *plist; |
215 | 23.5M | const struct prefix *bp; |
216 | 23.5M | const struct prefix_list_entry *entry; |
217 | 23.5M | struct route_node *rn; |
218 | | |
219 | 23.5M | bp = NULL; |
220 | 4.60G | for (ALL_LIST_ELEMENTS_RO(pim->rp_list, node, rp_info)) { |
221 | 4.60G | if (rp_info->plist) { |
222 | 0 | plist = prefix_list_lookup(PIM_AFI, rp_info->plist); |
223 | |
|
224 | 0 | if (prefix_list_apply_ext(plist, &entry, group, true) |
225 | 0 | == PREFIX_DENY || !entry) |
226 | 0 | continue; |
227 | | |
228 | 0 | if (!best) { |
229 | 0 | best = rp_info; |
230 | 0 | bp = &entry->prefix; |
231 | 0 | continue; |
232 | 0 | } |
233 | | |
234 | 0 | if (bp && bp->prefixlen < entry->prefix.prefixlen) { |
235 | 0 | best = rp_info; |
236 | 0 | bp = &entry->prefix; |
237 | 0 | } |
238 | 0 | } |
239 | 4.60G | } |
240 | | |
241 | 23.5M | rn = route_node_match(pim->rp_table, group); |
242 | 23.5M | if (!rn) { |
243 | 12.6M | flog_err( |
244 | 12.6M | EC_LIB_DEVELOPMENT, |
245 | 12.6M | "%s: BUG We should have found default group information", |
246 | 12.6M | __func__); |
247 | 12.6M | return best; |
248 | 12.6M | } |
249 | | |
250 | 10.9M | rp_info = rn->info; |
251 | 10.9M | if (PIM_DEBUG_PIM_TRACE) { |
252 | 0 | if (best) |
253 | 0 | zlog_debug( |
254 | 0 | "Lookedup(%pFX): prefix_list match %s, rn %p found: %pFX", |
255 | 0 | group, best->plist, rn, &rp_info->group); |
256 | 0 | else |
257 | 0 | zlog_debug("Lookedup(%pFX): rn %p found:%pFX", group, |
258 | 0 | rn, &rp_info->group); |
259 | 0 | } |
260 | | |
261 | 10.9M | route_unlock_node(rn); |
262 | | |
263 | | /* |
264 | | * rp's with prefix lists have the group as 224.0.0.0/4 which will |
265 | | * match anything. So if we have a rp_info that should match a prefix |
266 | | * list then if we do match then best should be the answer( even |
267 | | * if it is NULL ) |
268 | | */ |
269 | 10.9M | if (!rp_info || (rp_info && rp_info->plist)) |
270 | 0 | return best; |
271 | | |
272 | | /* |
273 | | * So we have a non plist rp_info found in the lookup and no plists |
274 | | * at all to be choosen, return it! |
275 | | */ |
276 | 10.9M | if (!best) |
277 | 10.9M | return rp_info; |
278 | | |
279 | | /* |
280 | | * If we have a matching non prefix list and a matching prefix |
281 | | * list we should return the actual rp_info that has the LPM |
282 | | * If they are equal, use the prefix-list( but let's hope |
283 | | * the end-operator doesn't do this ) |
284 | | */ |
285 | 0 | if (rp_info->group.prefixlen > bp->prefixlen) |
286 | 0 | best = rp_info; |
287 | |
|
288 | 0 | return best; |
289 | 10.9M | } |
290 | | |
291 | | /* |
292 | | * When the user makes "ip pim rp" configuration changes or if they change the |
293 | | * prefix-list(s) used by these statements we must tickle the upstream state |
294 | | * for each group to make them re-lookup who their RP should be. |
295 | | * |
296 | | * This is a placeholder function for now. |
297 | | */ |
298 | | void pim_rp_refresh_group_to_rp_mapping(struct pim_instance *pim) |
299 | 25.7k | { |
300 | 25.7k | pim_msdp_i_am_rp_changed(pim); |
301 | 25.7k | pim_upstream_reeval_use_rpt(pim); |
302 | 25.7k | } |
303 | | |
304 | | void pim_rp_prefix_list_update(struct pim_instance *pim, |
305 | | struct prefix_list *plist) |
306 | 0 | { |
307 | 0 | struct listnode *node; |
308 | 0 | struct rp_info *rp_info; |
309 | 0 | int refresh_needed = 0; |
310 | |
|
311 | 0 | for (ALL_LIST_ELEMENTS_RO(pim->rp_list, node, rp_info)) { |
312 | 0 | if (rp_info->plist |
313 | 0 | && strcmp(rp_info->plist, prefix_list_name(plist)) == 0) { |
314 | 0 | refresh_needed = 1; |
315 | 0 | break; |
316 | 0 | } |
317 | 0 | } |
318 | |
|
319 | 0 | if (refresh_needed) |
320 | 0 | pim_rp_refresh_group_to_rp_mapping(pim); |
321 | 0 | } |
322 | | |
323 | | static int pim_rp_check_interface_addrs(struct rp_info *rp_info, |
324 | | struct pim_interface *pim_ifp) |
325 | 34.2k | { |
326 | 34.2k | struct listnode *node; |
327 | 34.2k | struct pim_secondary_addr *sec_addr; |
328 | 34.2k | pim_addr sec_paddr; |
329 | | |
330 | 34.2k | if (!pim_addr_cmp(pim_ifp->primary_address, rp_info->rp.rpf_addr)) |
331 | 1.16k | return 1; |
332 | | |
333 | 33.0k | if (!pim_ifp->sec_addr_list) { |
334 | 0 | return 0; |
335 | 0 | } |
336 | | |
337 | 33.0k | for (ALL_LIST_ELEMENTS_RO(pim_ifp->sec_addr_list, node, sec_addr)) { |
338 | 0 | sec_paddr = pim_addr_from_prefix(&sec_addr->addr); |
339 | | /* If an RP-address is self, It should be enough to say |
340 | | * I am RP the prefix-length should not matter here */ |
341 | 0 | if (!pim_addr_cmp(sec_paddr, rp_info->rp.rpf_addr)) |
342 | 0 | return 1; |
343 | 0 | } |
344 | | |
345 | 33.0k | return 0; |
346 | 33.0k | } |
347 | | |
348 | | static void pim_rp_check_interfaces(struct pim_instance *pim, |
349 | | struct rp_info *rp_info) |
350 | 17.1k | { |
351 | 17.1k | struct interface *ifp; |
352 | | |
353 | 17.1k | rp_info->i_am_rp = 0; |
354 | 34.2k | FOR_ALL_INTERFACES (pim->vrf, ifp) { |
355 | 34.2k | struct pim_interface *pim_ifp = ifp->info; |
356 | | |
357 | 34.2k | if (!pim_ifp) |
358 | 0 | continue; |
359 | | |
360 | 34.2k | if (pim_rp_check_interface_addrs(rp_info, pim_ifp)) { |
361 | 1.16k | rp_info->i_am_rp = 1; |
362 | 1.16k | } |
363 | 34.2k | } |
364 | 17.1k | } |
365 | | |
366 | | void pim_upstream_update(struct pim_instance *pim, struct pim_upstream *up) |
367 | 183k | { |
368 | 183k | struct pim_rpf old_rpf; |
369 | 183k | enum pim_rpf_result rpf_result; |
370 | 183k | pim_addr old_upstream_addr; |
371 | 183k | pim_addr new_upstream_addr; |
372 | | |
373 | 183k | old_upstream_addr = up->upstream_addr; |
374 | 183k | pim_rp_set_upstream_addr(pim, &new_upstream_addr, up->sg.src, |
375 | 183k | up->sg.grp); |
376 | | |
377 | 183k | if (PIM_DEBUG_PIM_TRACE) |
378 | 0 | zlog_debug("%s: pim upstream update for old upstream %pPA", |
379 | 183k | __func__, &old_upstream_addr); |
380 | | |
381 | 183k | if (!pim_addr_cmp(old_upstream_addr, new_upstream_addr)) |
382 | 133k | return; |
383 | | |
384 | | /* Lets consider a case, where a PIM upstream has a better RP as a |
385 | | * result of a new RP configuration with more precise group range. |
386 | | * This upstream has to be added to the upstream hash of new RP's |
387 | | * NHT(pnc) and has to be removed from old RP's NHT upstream hash |
388 | | */ |
389 | 49.7k | if (!pim_addr_is_any(old_upstream_addr)) { |
390 | | /* Deregister addr with Zebra NHT */ |
391 | 24.8k | if (PIM_DEBUG_PIM_TRACE) |
392 | 0 | zlog_debug( |
393 | 24.8k | "%s: Deregister upstream %s addr %pPA with Zebra NHT", |
394 | 24.8k | __func__, up->sg_str, &old_upstream_addr); |
395 | 24.8k | pim_delete_tracked_nexthop(pim, old_upstream_addr, up, NULL); |
396 | 24.8k | } |
397 | | |
398 | | /* Update the upstream address */ |
399 | 49.7k | up->upstream_addr = new_upstream_addr; |
400 | | |
401 | 49.7k | old_rpf.source_nexthop.interface = up->rpf.source_nexthop.interface; |
402 | | |
403 | 49.7k | rpf_result = pim_rpf_update(pim, up, &old_rpf, __func__); |
404 | 49.7k | if (rpf_result == PIM_RPF_FAILURE) |
405 | 40.4k | pim_mroute_del(up->channel_oil, __func__); |
406 | | |
407 | | /* update kernel multicast forwarding cache (MFC) */ |
408 | 49.7k | if (up->rpf.source_nexthop.interface && up->channel_oil) |
409 | 0 | pim_upstream_mroute_iif_update(up->channel_oil, __func__); |
410 | | |
411 | 49.7k | if (rpf_result == PIM_RPF_CHANGED || |
412 | 49.7k | (rpf_result == PIM_RPF_FAILURE && |
413 | 40.4k | old_rpf.source_nexthop.interface)) |
414 | 0 | pim_zebra_upstream_rpf_changed(pim, up, &old_rpf); |
415 | | |
416 | 49.7k | } |
417 | | |
418 | | int pim_rp_new(struct pim_instance *pim, pim_addr rp_addr, struct prefix group, |
419 | | const char *plist, enum rp_source rp_src_flag) |
420 | 25.7k | { |
421 | 25.7k | int result = 0; |
422 | 25.7k | struct rp_info *rp_info; |
423 | 25.7k | struct rp_info *rp_all; |
424 | 25.7k | struct prefix group_all; |
425 | 25.7k | struct listnode *node, *nnode; |
426 | 25.7k | struct rp_info *tmp_rp_info; |
427 | 25.7k | char buffer[BUFSIZ]; |
428 | 25.7k | pim_addr nht_p; |
429 | 25.7k | struct route_node *rn = NULL; |
430 | 25.7k | struct pim_upstream *up; |
431 | 25.7k | bool upstream_updated = false; |
432 | | |
433 | 25.7k | if (pim_addr_is_any(rp_addr)) |
434 | 2.91k | return PIM_RP_BAD_ADDRESS; |
435 | | |
436 | 22.8k | rp_info = XCALLOC(MTYPE_PIM_RP, sizeof(*rp_info)); |
437 | | |
438 | 22.8k | rp_info->rp.rpf_addr = rp_addr; |
439 | 22.8k | prefix_copy(&rp_info->group, &group); |
440 | 22.8k | rp_info->rp_src = rp_src_flag; |
441 | | |
442 | 22.8k | if (plist) { |
443 | | /* |
444 | | * Return if the prefix-list is already configured for this RP |
445 | | */ |
446 | 0 | if (pim_rp_find_prefix_list(pim, rp_addr, plist)) { |
447 | 0 | XFREE(MTYPE_PIM_RP, rp_info); |
448 | 0 | return PIM_SUCCESS; |
449 | 0 | } |
450 | | |
451 | | /* |
452 | | * Barf if the prefix-list is already configured for an RP |
453 | | */ |
454 | 0 | if (pim_rp_prefix_list_used(pim, plist)) { |
455 | 0 | XFREE(MTYPE_PIM_RP, rp_info); |
456 | 0 | return PIM_RP_PFXLIST_IN_USE; |
457 | 0 | } |
458 | | |
459 | | /* |
460 | | * Free any existing rp_info entries for this RP |
461 | | */ |
462 | 0 | for (ALL_LIST_ELEMENTS(pim->rp_list, node, nnode, |
463 | 0 | tmp_rp_info)) { |
464 | 0 | if (!pim_addr_cmp(rp_info->rp.rpf_addr, |
465 | 0 | tmp_rp_info->rp.rpf_addr)) { |
466 | 0 | if (tmp_rp_info->plist) |
467 | 0 | pim_rp_del_config(pim, rp_addr, NULL, |
468 | 0 | tmp_rp_info->plist); |
469 | 0 | else |
470 | 0 | pim_rp_del_config( |
471 | 0 | pim, rp_addr, |
472 | 0 | prefix2str(&tmp_rp_info->group, |
473 | 0 | buffer, BUFSIZ), |
474 | 0 | NULL); |
475 | 0 | } |
476 | 0 | } |
477 | |
|
478 | 0 | rp_info->plist = XSTRDUP(MTYPE_PIM_FILTER_NAME, plist); |
479 | 22.8k | } else { |
480 | | |
481 | 22.8k | if (!pim_get_all_mcast_group(&group_all)) { |
482 | 0 | XFREE(MTYPE_PIM_RP, rp_info); |
483 | 0 | return PIM_GROUP_BAD_ADDRESS; |
484 | 0 | } |
485 | 22.8k | rp_all = pim_rp_find_match_group(pim, &group_all); |
486 | | |
487 | | /* |
488 | | * Barf if group is a non-multicast subnet |
489 | | */ |
490 | 22.8k | if (!prefix_match(&rp_all->group, &rp_info->group)) { |
491 | 5.73k | XFREE(MTYPE_PIM_RP, rp_info); |
492 | 5.73k | return PIM_GROUP_BAD_ADDRESS; |
493 | 5.73k | } |
494 | | |
495 | | /* |
496 | | * Remove any prefix-list rp_info entries for this RP |
497 | | */ |
498 | 17.1k | for (ALL_LIST_ELEMENTS(pim->rp_list, node, nnode, |
499 | 3.30M | tmp_rp_info)) { |
500 | 3.30M | if (tmp_rp_info->plist && |
501 | 0 | (!pim_addr_cmp(rp_info->rp.rpf_addr, |
502 | 0 | tmp_rp_info->rp.rpf_addr))) { |
503 | 0 | pim_rp_del_config(pim, rp_addr, NULL, |
504 | 0 | tmp_rp_info->plist); |
505 | 0 | } |
506 | 3.30M | } |
507 | | |
508 | | /* |
509 | | * Take over the 224.0.0.0/4 group if the rp is INADDR_ANY |
510 | | */ |
511 | 17.1k | if (prefix_same(&rp_all->group, &rp_info->group) && |
512 | 8.32k | pim_rpf_addr_is_inaddr_any(&rp_all->rp)) { |
513 | 8.32k | rp_all->rp.rpf_addr = rp_info->rp.rpf_addr; |
514 | 8.32k | rp_all->rp_src = rp_src_flag; |
515 | 8.32k | XFREE(MTYPE_PIM_RP, rp_info); |
516 | | |
517 | | /* Register addr with Zebra NHT */ |
518 | 8.32k | nht_p = rp_all->rp.rpf_addr; |
519 | 8.32k | if (PIM_DEBUG_PIM_NHT_RP) |
520 | 0 | zlog_debug( |
521 | 8.32k | "%s: NHT Register rp_all addr %pPA grp %pFX ", |
522 | 8.32k | __func__, &nht_p, &rp_all->group); |
523 | | |
524 | 5.90M | frr_each (rb_pim_upstream, &pim->upstream_head, up) { |
525 | | /* Find (*, G) upstream whose RP is not |
526 | | * configured yet |
527 | | */ |
528 | 5.90M | if (pim_addr_is_any(up->upstream_addr) && |
529 | 3.26M | pim_addr_is_any(up->sg.src)) { |
530 | 691k | struct prefix grp; |
531 | 691k | struct rp_info *trp_info; |
532 | | |
533 | 691k | pim_addr_to_prefix(&grp, up->sg.grp); |
534 | 691k | trp_info = pim_rp_find_match_group( |
535 | 691k | pim, &grp); |
536 | 691k | if (trp_info == rp_all) { |
537 | 20.1k | pim_upstream_update(pim, up); |
538 | 20.1k | upstream_updated = true; |
539 | 20.1k | } |
540 | 691k | } |
541 | 5.90M | } |
542 | 8.32k | if (upstream_updated) |
543 | 6.63k | pim_zebra_update_all_interfaces(pim); |
544 | | |
545 | 8.32k | pim_rp_check_interfaces(pim, rp_all); |
546 | 8.32k | pim_rp_refresh_group_to_rp_mapping(pim); |
547 | 8.32k | pim_find_or_track_nexthop(pim, nht_p, NULL, rp_all, |
548 | 8.32k | NULL); |
549 | | |
550 | 8.32k | if (!pim_ecmp_nexthop_lookup(pim, |
551 | 8.32k | &rp_all->rp.source_nexthop, |
552 | 8.32k | nht_p, &rp_all->group, 1)) |
553 | 8.32k | return PIM_RP_NO_PATH; |
554 | 0 | return PIM_SUCCESS; |
555 | 8.32k | } |
556 | | |
557 | | /* |
558 | | * Return if the group is already configured for this RP |
559 | | */ |
560 | 8.80k | tmp_rp_info = pim_rp_find_exact(pim, rp_addr, &rp_info->group); |
561 | 8.80k | if (tmp_rp_info) { |
562 | 0 | if ((tmp_rp_info->rp_src != rp_src_flag) |
563 | 0 | && (rp_src_flag == RP_SRC_STATIC)) |
564 | 0 | tmp_rp_info->rp_src = rp_src_flag; |
565 | 0 | XFREE(MTYPE_PIM_RP, rp_info); |
566 | 0 | return result; |
567 | 0 | } |
568 | | |
569 | | /* |
570 | | * Barf if this group is already covered by some other RP |
571 | | */ |
572 | 8.80k | tmp_rp_info = pim_rp_find_match_group(pim, &rp_info->group); |
573 | | |
574 | 8.80k | if (tmp_rp_info) { |
575 | 8.80k | if (tmp_rp_info->plist) { |
576 | 0 | XFREE(MTYPE_PIM_RP, rp_info); |
577 | 0 | return PIM_GROUP_PFXLIST_OVERLAP; |
578 | 8.80k | } else { |
579 | | /* |
580 | | * If the only RP that covers this group is an |
581 | | * RP configured for |
582 | | * 224.0.0.0/4 that is fine, ignore that one. |
583 | | * For all others |
584 | | * though we must return PIM_GROUP_OVERLAP |
585 | | */ |
586 | 8.80k | if (prefix_same(&rp_info->group, |
587 | 8.80k | &tmp_rp_info->group)) { |
588 | 0 | if ((rp_src_flag == RP_SRC_STATIC) |
589 | 0 | && (tmp_rp_info->rp_src |
590 | 0 | == RP_SRC_STATIC)) { |
591 | 0 | XFREE(MTYPE_PIM_RP, rp_info); |
592 | 0 | return PIM_GROUP_OVERLAP; |
593 | 0 | } |
594 | | |
595 | 0 | result = pim_rp_change( |
596 | 0 | pim, rp_addr, |
597 | 0 | tmp_rp_info->group, |
598 | 0 | rp_src_flag); |
599 | 0 | XFREE(MTYPE_PIM_RP, rp_info); |
600 | 0 | return result; |
601 | 0 | } |
602 | 8.80k | } |
603 | 8.80k | } |
604 | 8.80k | } |
605 | | |
606 | 8.80k | listnode_add_sort(pim->rp_list, rp_info); |
607 | | |
608 | 8.80k | if (!rp_info->plist) { |
609 | 8.80k | rn = route_node_get(pim->rp_table, &rp_info->group); |
610 | 8.80k | rn->info = rp_info; |
611 | 8.80k | } |
612 | | |
613 | 8.80k | if (PIM_DEBUG_PIM_TRACE) |
614 | 0 | zlog_debug("Allocated: %p for rp_info: %p(%pFX) Lock: %d", rn, |
615 | 8.80k | rp_info, &rp_info->group, |
616 | 8.80k | rn ? route_node_get_lock_count(rn) : 0); |
617 | | |
618 | 6.01M | frr_each (rb_pim_upstream, &pim->upstream_head, up) { |
619 | 6.01M | if (pim_addr_is_any(up->sg.src)) { |
620 | 1.43M | struct prefix grp; |
621 | 1.43M | struct rp_info *trp_info; |
622 | | |
623 | 1.43M | pim_addr_to_prefix(&grp, up->sg.grp); |
624 | 1.43M | trp_info = pim_rp_find_match_group(pim, &grp); |
625 | | |
626 | 1.43M | if (trp_info == rp_info) { |
627 | 4.37k | pim_upstream_update(pim, up); |
628 | 4.37k | upstream_updated = true; |
629 | 4.37k | } |
630 | 1.43M | } |
631 | 6.01M | } |
632 | | |
633 | 8.80k | if (upstream_updated) |
634 | 4.31k | pim_zebra_update_all_interfaces(pim); |
635 | | |
636 | 8.80k | pim_rp_check_interfaces(pim, rp_info); |
637 | 8.80k | pim_rp_refresh_group_to_rp_mapping(pim); |
638 | | |
639 | | /* Register addr with Zebra NHT */ |
640 | 8.80k | nht_p = rp_info->rp.rpf_addr; |
641 | 8.80k | if (PIM_DEBUG_PIM_NHT_RP) |
642 | 0 | zlog_debug("%s: NHT Register RP addr %pPA grp %pFX with Zebra ", |
643 | 8.80k | __func__, &nht_p, &rp_info->group); |
644 | 8.80k | pim_find_or_track_nexthop(pim, nht_p, NULL, rp_info, NULL); |
645 | 8.80k | if (!pim_ecmp_nexthop_lookup(pim, &rp_info->rp.source_nexthop, nht_p, |
646 | 8.80k | &rp_info->group, 1)) |
647 | 8.80k | return PIM_RP_NO_PATH; |
648 | | |
649 | 0 | return PIM_SUCCESS; |
650 | 8.80k | } |
651 | | |
652 | | void pim_rp_del_config(struct pim_instance *pim, pim_addr rp_addr, |
653 | | const char *group_range, const char *plist) |
654 | 0 | { |
655 | 0 | struct prefix group; |
656 | 0 | int result; |
657 | |
|
658 | 0 | if (group_range == NULL) |
659 | 0 | result = pim_get_all_mcast_group(&group); |
660 | 0 | else |
661 | 0 | result = str2prefix(group_range, &group); |
662 | |
|
663 | 0 | if (!result) { |
664 | 0 | if (PIM_DEBUG_PIM_TRACE) |
665 | 0 | zlog_debug( |
666 | 0 | "%s: String to prefix failed for %pPAs group", |
667 | 0 | __func__, &rp_addr); |
668 | 0 | return; |
669 | 0 | } |
670 | | |
671 | 0 | pim_rp_del(pim, rp_addr, group, plist, RP_SRC_STATIC); |
672 | 0 | } |
673 | | |
674 | | int pim_rp_del(struct pim_instance *pim, pim_addr rp_addr, struct prefix group, |
675 | | const char *plist, enum rp_source rp_src_flag) |
676 | 19.2k | { |
677 | 19.2k | struct prefix g_all; |
678 | 19.2k | struct rp_info *rp_info; |
679 | 19.2k | struct rp_info *rp_all; |
680 | 19.2k | pim_addr nht_p; |
681 | 19.2k | struct route_node *rn; |
682 | 19.2k | bool was_plist = false; |
683 | 19.2k | struct rp_info *trp_info; |
684 | 19.2k | struct pim_upstream *up; |
685 | 19.2k | struct bsgrp_node *bsgrp = NULL; |
686 | 19.2k | struct bsm_rpinfo *bsrp = NULL; |
687 | 19.2k | bool upstream_updated = false; |
688 | | |
689 | 19.2k | if (plist) |
690 | 0 | rp_info = pim_rp_find_prefix_list(pim, rp_addr, plist); |
691 | 19.2k | else |
692 | 19.2k | rp_info = pim_rp_find_exact(pim, rp_addr, &group); |
693 | | |
694 | 19.2k | if (!rp_info) |
695 | 5.07k | return PIM_RP_NOT_FOUND; |
696 | | |
697 | 14.1k | if (rp_info->plist) { |
698 | 0 | XFREE(MTYPE_PIM_FILTER_NAME, rp_info->plist); |
699 | 0 | was_plist = true; |
700 | 0 | } |
701 | | |
702 | 14.1k | if (PIM_DEBUG_PIM_TRACE) |
703 | 0 | zlog_debug("%s: Delete RP %pPA for the group %pFX", __func__, |
704 | 14.1k | &rp_addr, &group); |
705 | | |
706 | | /* While static RP is getting deleted, we need to check if dynamic RP |
707 | | * present for the same group in BSM RP table, then install the dynamic |
708 | | * RP for the group node into the main rp table |
709 | | */ |
710 | 14.1k | if (rp_src_flag == RP_SRC_STATIC) { |
711 | 0 | bsgrp = pim_bsm_get_bsgrp_node(&pim->global_scope, &group); |
712 | |
|
713 | 0 | if (bsgrp) { |
714 | 0 | bsrp = bsm_rpinfos_first(bsgrp->bsrp_list); |
715 | 0 | if (bsrp) { |
716 | 0 | if (PIM_DEBUG_PIM_TRACE) |
717 | 0 | zlog_debug( |
718 | 0 | "%s: BSM RP %pPA found for the group %pFX", |
719 | 0 | __func__, &bsrp->rp_address, |
720 | 0 | &group); |
721 | 0 | return pim_rp_change(pim, bsrp->rp_address, |
722 | 0 | group, RP_SRC_BSR); |
723 | 0 | } |
724 | 0 | } else { |
725 | 0 | if (PIM_DEBUG_PIM_TRACE) |
726 | 0 | zlog_debug( |
727 | 0 | "%s: BSM RP not found for the group %pFX", |
728 | 0 | __func__, &group); |
729 | 0 | } |
730 | 0 | } |
731 | | |
732 | | /* Deregister addr with Zebra NHT */ |
733 | 14.1k | nht_p = rp_info->rp.rpf_addr; |
734 | 14.1k | if (PIM_DEBUG_PIM_NHT_RP) |
735 | 0 | zlog_debug("%s: Deregister RP addr %pPA with Zebra ", __func__, |
736 | 14.1k | &nht_p); |
737 | 14.1k | pim_delete_tracked_nexthop(pim, nht_p, NULL, rp_info); |
738 | | |
739 | 14.1k | if (!pim_get_all_mcast_group(&g_all)) |
740 | 0 | return PIM_RP_BAD_ADDRESS; |
741 | | |
742 | 14.1k | rp_all = pim_rp_find_match_group(pim, &g_all); |
743 | | |
744 | 14.1k | if (rp_all == rp_info) { |
745 | 3.91M | frr_each (rb_pim_upstream, &pim->upstream_head, up) { |
746 | | /* Find the upstream (*, G) whose upstream address is |
747 | | * same as the deleted RP |
748 | | */ |
749 | 3.91M | pim_addr rpf_addr; |
750 | | |
751 | 3.91M | rpf_addr = rp_info->rp.rpf_addr; |
752 | 3.91M | if (!pim_addr_cmp(up->upstream_addr, rpf_addr) && |
753 | 355k | pim_addr_is_any(up->sg.src)) { |
754 | 85.4k | struct prefix grp; |
755 | | |
756 | 85.4k | pim_addr_to_prefix(&grp, up->sg.grp); |
757 | 85.4k | trp_info = pim_rp_find_match_group(pim, &grp); |
758 | 85.4k | if (trp_info == rp_all) { |
759 | 14.8k | pim_upstream_rpf_clear(pim, up); |
760 | 14.8k | up->upstream_addr = PIMADDR_ANY; |
761 | 14.8k | } |
762 | 85.4k | } |
763 | 3.91M | } |
764 | 5.57k | rp_all->rp.rpf_addr = PIMADDR_ANY; |
765 | 5.57k | rp_all->i_am_rp = 0; |
766 | 5.57k | return PIM_SUCCESS; |
767 | 5.57k | } |
768 | | |
769 | 8.59k | listnode_delete(pim->rp_list, rp_info); |
770 | | |
771 | 8.59k | if (!was_plist) { |
772 | 8.59k | rn = route_node_get(pim->rp_table, &rp_info->group); |
773 | 8.59k | if (rn) { |
774 | 8.59k | if (rn->info != rp_info) |
775 | 0 | flog_err( |
776 | 8.59k | EC_LIB_DEVELOPMENT, |
777 | 8.59k | "Expected rn->info to be equal to rp_info"); |
778 | | |
779 | 8.59k | if (PIM_DEBUG_PIM_TRACE) |
780 | 0 | zlog_debug( |
781 | 8.59k | "%s:Found for Freeing: %p for rp_info: %p(%pFX) Lock: %d", |
782 | 8.59k | __func__, rn, rp_info, &rp_info->group, |
783 | 8.59k | route_node_get_lock_count(rn)); |
784 | | |
785 | 8.59k | rn->info = NULL; |
786 | 8.59k | route_unlock_node(rn); |
787 | 8.59k | route_unlock_node(rn); |
788 | 8.59k | } |
789 | 8.59k | } |
790 | | |
791 | 8.59k | pim_rp_refresh_group_to_rp_mapping(pim); |
792 | | |
793 | 5.94M | frr_each (rb_pim_upstream, &pim->upstream_head, up) { |
794 | | /* Find the upstream (*, G) whose upstream address is same as |
795 | | * the deleted RP |
796 | | */ |
797 | 5.94M | pim_addr rpf_addr; |
798 | | |
799 | 5.94M | rpf_addr = rp_info->rp.rpf_addr; |
800 | 5.94M | if (!pim_addr_cmp(up->upstream_addr, rpf_addr) && |
801 | 134k | pim_addr_is_any(up->sg.src)) { |
802 | 132k | struct prefix grp; |
803 | | |
804 | 132k | pim_addr_to_prefix(&grp, up->sg.grp); |
805 | 132k | trp_info = pim_rp_find_match_group(pim, &grp); |
806 | | |
807 | 132k | if (!trp_info) |
808 | 412 | continue; |
809 | | |
810 | | /* RP not found for the group grp */ |
811 | 131k | if (pim_rpf_addr_is_inaddr_any(&trp_info->rp)) { |
812 | 3.38k | pim_upstream_rpf_clear(pim, up); |
813 | 3.38k | pim_rp_set_upstream_addr( |
814 | 3.38k | pim, &up->upstream_addr, up->sg.src, |
815 | 3.38k | up->sg.grp); |
816 | 3.38k | } |
817 | | |
818 | | /* RP found for the group grp */ |
819 | 128k | else { |
820 | 128k | pim_upstream_update(pim, up); |
821 | 128k | upstream_updated = true; |
822 | 128k | } |
823 | 131k | } |
824 | 5.94M | } |
825 | | |
826 | 8.59k | if (upstream_updated) |
827 | 3.76k | pim_zebra_update_all_interfaces(pim); |
828 | | |
829 | 8.59k | XFREE(MTYPE_PIM_RP, rp_info); |
830 | 8.59k | return PIM_SUCCESS; |
831 | 14.1k | } |
832 | | |
833 | | int pim_rp_change(struct pim_instance *pim, pim_addr new_rp_addr, |
834 | | struct prefix group, enum rp_source rp_src_flag) |
835 | 15.9k | { |
836 | 15.9k | pim_addr nht_p; |
837 | 15.9k | struct route_node *rn; |
838 | 15.9k | int result = 0; |
839 | 15.9k | struct rp_info *rp_info = NULL; |
840 | 15.9k | struct pim_upstream *up; |
841 | 15.9k | bool upstream_updated = false; |
842 | 15.9k | pim_addr old_rp_addr; |
843 | | |
844 | 15.9k | rn = route_node_lookup(pim->rp_table, &group); |
845 | 15.9k | if (!rn) { |
846 | 0 | result = pim_rp_new(pim, new_rp_addr, group, NULL, rp_src_flag); |
847 | 0 | return result; |
848 | 0 | } |
849 | | |
850 | 15.9k | rp_info = rn->info; |
851 | | |
852 | 15.9k | if (!rp_info) { |
853 | 0 | route_unlock_node(rn); |
854 | 0 | result = pim_rp_new(pim, new_rp_addr, group, NULL, rp_src_flag); |
855 | 0 | return result; |
856 | 0 | } |
857 | | |
858 | 15.9k | old_rp_addr = rp_info->rp.rpf_addr; |
859 | 15.9k | if (!pim_addr_cmp(new_rp_addr, old_rp_addr)) { |
860 | 2.85k | if (rp_info->rp_src != rp_src_flag) { |
861 | 0 | rp_info->rp_src = rp_src_flag; |
862 | 0 | route_unlock_node(rn); |
863 | 0 | return PIM_SUCCESS; |
864 | 0 | } |
865 | 2.85k | } |
866 | | |
867 | | /* Deregister old RP addr with Zebra NHT */ |
868 | | |
869 | 15.9k | if (!pim_addr_is_any(old_rp_addr)) { |
870 | 12.1k | nht_p = rp_info->rp.rpf_addr; |
871 | 12.1k | if (PIM_DEBUG_PIM_NHT_RP) |
872 | 0 | zlog_debug("%s: Deregister RP addr %pPA with Zebra ", |
873 | 12.1k | __func__, &nht_p); |
874 | 12.1k | pim_delete_tracked_nexthop(pim, nht_p, NULL, rp_info); |
875 | 12.1k | } |
876 | | |
877 | 15.9k | pim_rp_nexthop_del(rp_info); |
878 | 15.9k | listnode_delete(pim->rp_list, rp_info); |
879 | | /* Update the new RP address*/ |
880 | | |
881 | 15.9k | rp_info->rp.rpf_addr = new_rp_addr; |
882 | 15.9k | rp_info->rp_src = rp_src_flag; |
883 | 15.9k | rp_info->i_am_rp = 0; |
884 | | |
885 | 15.9k | listnode_add_sort(pim->rp_list, rp_info); |
886 | | |
887 | 11.2M | frr_each (rb_pim_upstream, &pim->upstream_head, up) { |
888 | 11.2M | if (pim_addr_is_any(up->sg.src)) { |
889 | 2.72M | struct prefix grp; |
890 | 2.72M | struct rp_info *trp_info; |
891 | | |
892 | 2.72M | pim_addr_to_prefix(&grp, up->sg.grp); |
893 | 2.72M | trp_info = pim_rp_find_match_group(pim, &grp); |
894 | | |
895 | 2.72M | if (trp_info == rp_info) { |
896 | 30.3k | pim_upstream_update(pim, up); |
897 | 30.3k | upstream_updated = true; |
898 | 30.3k | } |
899 | 2.72M | } |
900 | 11.2M | } |
901 | | |
902 | 15.9k | if (upstream_updated) |
903 | 10.4k | pim_zebra_update_all_interfaces(pim); |
904 | | |
905 | | /* Register new RP addr with Zebra NHT */ |
906 | 15.9k | nht_p = rp_info->rp.rpf_addr; |
907 | 15.9k | if (PIM_DEBUG_PIM_NHT_RP) |
908 | 0 | zlog_debug("%s: NHT Register RP addr %pPA grp %pFX with Zebra ", |
909 | 15.9k | __func__, &nht_p, &rp_info->group); |
910 | | |
911 | 15.9k | pim_find_or_track_nexthop(pim, nht_p, NULL, rp_info, NULL); |
912 | 15.9k | if (!pim_ecmp_nexthop_lookup(pim, &rp_info->rp.source_nexthop, nht_p, |
913 | 15.9k | &rp_info->group, 1)) { |
914 | 15.9k | route_unlock_node(rn); |
915 | 15.9k | return PIM_RP_NO_PATH; |
916 | 15.9k | } |
917 | | |
918 | 0 | pim_rp_check_interfaces(pim, rp_info); |
919 | |
|
920 | 0 | route_unlock_node(rn); |
921 | |
|
922 | 0 | pim_rp_refresh_group_to_rp_mapping(pim); |
923 | |
|
924 | 0 | return result; |
925 | 15.9k | } |
926 | | |
927 | | void pim_rp_setup(struct pim_instance *pim) |
928 | 236 | { |
929 | 236 | struct listnode *node; |
930 | 236 | struct rp_info *rp_info; |
931 | 236 | pim_addr nht_p; |
932 | | |
933 | 7.50k | for (ALL_LIST_ELEMENTS_RO(pim->rp_list, node, rp_info)) { |
934 | 7.50k | if (pim_rpf_addr_is_inaddr_any(&rp_info->rp)) |
935 | 206 | continue; |
936 | | |
937 | 7.30k | nht_p = rp_info->rp.rpf_addr; |
938 | | |
939 | 7.30k | pim_find_or_track_nexthop(pim, nht_p, NULL, rp_info, NULL); |
940 | 7.30k | if (!pim_ecmp_nexthop_lookup(pim, &rp_info->rp.source_nexthop, |
941 | 7.30k | nht_p, &rp_info->group, 1)) { |
942 | 7.30k | if (PIM_DEBUG_PIM_NHT_RP) |
943 | 0 | zlog_debug( |
944 | 7.30k | "Unable to lookup nexthop for rp specified"); |
945 | 7.30k | pim_rp_nexthop_del(rp_info); |
946 | 7.30k | } |
947 | 7.30k | } |
948 | 236 | } |
949 | | |
950 | | /* |
951 | | * Checks to see if we should elect ourself the actual RP when new if |
952 | | * addresses are added against an interface. |
953 | | */ |
954 | | void pim_rp_check_on_if_add(struct pim_interface *pim_ifp) |
955 | 0 | { |
956 | 0 | struct listnode *node; |
957 | 0 | struct rp_info *rp_info; |
958 | 0 | bool i_am_rp_changed = false; |
959 | 0 | struct pim_instance *pim = pim_ifp->pim; |
960 | |
|
961 | 0 | if (pim->rp_list == NULL) |
962 | 0 | return; |
963 | | |
964 | 0 | for (ALL_LIST_ELEMENTS_RO(pim->rp_list, node, rp_info)) { |
965 | 0 | if (pim_rpf_addr_is_inaddr_any(&rp_info->rp)) |
966 | 0 | continue; |
967 | | |
968 | | /* if i_am_rp is already set nothing to be done (adding new |
969 | | * addresses |
970 | | * is not going to make a difference). */ |
971 | 0 | if (rp_info->i_am_rp) { |
972 | 0 | continue; |
973 | 0 | } |
974 | | |
975 | 0 | if (pim_rp_check_interface_addrs(rp_info, pim_ifp)) { |
976 | 0 | i_am_rp_changed = true; |
977 | 0 | rp_info->i_am_rp = 1; |
978 | 0 | if (PIM_DEBUG_PIM_NHT_RP) |
979 | 0 | zlog_debug("%s: %pPA: i am rp", __func__, |
980 | 0 | &rp_info->rp.rpf_addr); |
981 | 0 | } |
982 | 0 | } |
983 | |
|
984 | 0 | if (i_am_rp_changed) { |
985 | 0 | pim_msdp_i_am_rp_changed(pim); |
986 | 0 | pim_upstream_reeval_use_rpt(pim); |
987 | 0 | } |
988 | 0 | } |
989 | | |
990 | | /* up-optimized re-evaluation of "i_am_rp". this is used when ifaddresses |
991 | | * are removed. Removing numbers is an uncommon event in an active network |
992 | | * so I have made no attempt to optimize it. */ |
993 | | void pim_i_am_rp_re_evaluate(struct pim_instance *pim) |
994 | 0 | { |
995 | 0 | struct listnode *node; |
996 | 0 | struct rp_info *rp_info; |
997 | 0 | bool i_am_rp_changed = false; |
998 | 0 | int old_i_am_rp; |
999 | |
|
1000 | 0 | if (pim->rp_list == NULL) |
1001 | 0 | return; |
1002 | | |
1003 | 0 | for (ALL_LIST_ELEMENTS_RO(pim->rp_list, node, rp_info)) { |
1004 | 0 | if (pim_rpf_addr_is_inaddr_any(&rp_info->rp)) |
1005 | 0 | continue; |
1006 | | |
1007 | 0 | old_i_am_rp = rp_info->i_am_rp; |
1008 | 0 | pim_rp_check_interfaces(pim, rp_info); |
1009 | |
|
1010 | 0 | if (old_i_am_rp != rp_info->i_am_rp) { |
1011 | 0 | i_am_rp_changed = true; |
1012 | 0 | if (PIM_DEBUG_PIM_NHT_RP) { |
1013 | 0 | if (rp_info->i_am_rp) |
1014 | 0 | zlog_debug("%s: %pPA: i am rp", |
1015 | 0 | __func__, |
1016 | 0 | &rp_info->rp.rpf_addr); |
1017 | 0 | else |
1018 | 0 | zlog_debug( |
1019 | 0 | "%s: %pPA: i am no longer rp", |
1020 | 0 | __func__, |
1021 | 0 | &rp_info->rp.rpf_addr); |
1022 | 0 | } |
1023 | 0 | } |
1024 | 0 | } |
1025 | |
|
1026 | 0 | if (i_am_rp_changed) { |
1027 | 0 | pim_msdp_i_am_rp_changed(pim); |
1028 | 0 | pim_upstream_reeval_use_rpt(pim); |
1029 | 0 | } |
1030 | 0 | } |
1031 | | |
1032 | | /* |
1033 | | * I_am_RP(G) is true if the group-to-RP mapping indicates that |
1034 | | * this router is the RP for the group. |
1035 | | * |
1036 | | * Since we only have static RP, all groups are part of this RP |
1037 | | */ |
1038 | | int pim_rp_i_am_rp(struct pim_instance *pim, pim_addr group) |
1039 | 13.6M | { |
1040 | 13.6M | struct prefix g; |
1041 | 13.6M | struct rp_info *rp_info; |
1042 | | |
1043 | 13.6M | memset(&g, 0, sizeof(g)); |
1044 | 13.6M | pim_addr_to_prefix(&g, group); |
1045 | 13.6M | rp_info = pim_rp_find_match_group(pim, &g); |
1046 | | |
1047 | 13.6M | if (rp_info) |
1048 | 5.85M | return rp_info->i_am_rp; |
1049 | 7.80M | return 0; |
1050 | 13.6M | } |
1051 | | |
1052 | | /* |
1053 | | * RP(G) |
1054 | | * |
1055 | | * Return the RP that the Group belongs too. |
1056 | | */ |
1057 | | struct pim_rpf *pim_rp_g(struct pim_instance *pim, pim_addr group) |
1058 | 1.22k | { |
1059 | 1.22k | struct prefix g; |
1060 | 1.22k | struct rp_info *rp_info; |
1061 | | |
1062 | 1.22k | memset(&g, 0, sizeof(g)); |
1063 | 1.22k | pim_addr_to_prefix(&g, group); |
1064 | | |
1065 | 1.22k | rp_info = pim_rp_find_match_group(pim, &g); |
1066 | | |
1067 | 1.22k | if (rp_info) { |
1068 | 837 | pim_addr nht_p; |
1069 | | |
1070 | 837 | if (pim_addr_is_any(rp_info->rp.rpf_addr)) { |
1071 | 263 | if (PIM_DEBUG_PIM_NHT_RP) |
1072 | 0 | zlog_debug( |
1073 | 263 | "%s: Skipping NHT Register since RP is not configured for the group %pPA", |
1074 | 263 | __func__, &group); |
1075 | 263 | return &rp_info->rp; |
1076 | 263 | } |
1077 | | |
1078 | | /* Register addr with Zebra NHT */ |
1079 | 574 | nht_p = rp_info->rp.rpf_addr; |
1080 | 574 | if (PIM_DEBUG_PIM_NHT_RP) |
1081 | 0 | zlog_debug( |
1082 | 574 | "%s: NHT Register RP addr %pPA grp %pFX with Zebra", |
1083 | 574 | __func__, &nht_p, &rp_info->group); |
1084 | 574 | pim_find_or_track_nexthop(pim, nht_p, NULL, rp_info, NULL); |
1085 | 574 | pim_rpf_set_refresh_time(pim); |
1086 | 574 | (void)pim_ecmp_nexthop_lookup(pim, &rp_info->rp.source_nexthop, |
1087 | 574 | nht_p, &rp_info->group, 1); |
1088 | 574 | return (&rp_info->rp); |
1089 | 837 | } |
1090 | | |
1091 | | // About to Go Down |
1092 | 383 | return NULL; |
1093 | 1.22k | } |
1094 | | |
1095 | | /* |
1096 | | * Set the upstream IP address we want to talk to based upon |
1097 | | * the rp configured and the source address |
1098 | | * |
1099 | | * If we have don't have a RP configured and the source address is * |
1100 | | * then set the upstream addr as INADDR_ANY and return failure. |
1101 | | * |
1102 | | */ |
1103 | | int pim_rp_set_upstream_addr(struct pim_instance *pim, pim_addr *up, |
1104 | | pim_addr source, pim_addr group) |
1105 | 231k | { |
1106 | 231k | struct rp_info *rp_info; |
1107 | 231k | struct prefix g; |
1108 | | |
1109 | 231k | memset(&g, 0, sizeof(g)); |
1110 | | |
1111 | 231k | pim_addr_to_prefix(&g, group); |
1112 | | |
1113 | 231k | rp_info = pim_rp_find_match_group(pim, &g); |
1114 | | |
1115 | 231k | if (!rp_info || ((pim_rpf_addr_is_inaddr_any(&rp_info->rp)) && |
1116 | 33.0k | (pim_addr_is_any(source)))) { |
1117 | 33.0k | if (PIM_DEBUG_PIM_NHT_RP) |
1118 | 0 | zlog_debug("%s: Received a (*,G) with no RP configured", |
1119 | 33.0k | __func__); |
1120 | 33.0k | *up = PIMADDR_ANY; |
1121 | 33.0k | return 0; |
1122 | 33.0k | } |
1123 | | |
1124 | 198k | if (pim_addr_is_any(source)) |
1125 | 168k | *up = rp_info->rp.rpf_addr; |
1126 | 30.2k | else |
1127 | 30.2k | *up = source; |
1128 | | |
1129 | 198k | return 1; |
1130 | 231k | } |
1131 | | |
1132 | | int pim_rp_config_write(struct pim_instance *pim, struct vty *vty, |
1133 | | const char *spaces) |
1134 | 0 | { |
1135 | 0 | struct listnode *node; |
1136 | 0 | struct rp_info *rp_info; |
1137 | 0 | int count = 0; |
1138 | 0 | pim_addr rp_addr; |
1139 | |
|
1140 | 0 | for (ALL_LIST_ELEMENTS_RO(pim->rp_list, node, rp_info)) { |
1141 | 0 | if (pim_rpf_addr_is_inaddr_any(&rp_info->rp)) |
1142 | 0 | continue; |
1143 | | |
1144 | 0 | if (rp_info->rp_src == RP_SRC_BSR) |
1145 | 0 | continue; |
1146 | | |
1147 | 0 | rp_addr = rp_info->rp.rpf_addr; |
1148 | 0 | if (rp_info->plist) |
1149 | 0 | vty_out(vty, |
1150 | 0 | "%s" PIM_AF_NAME |
1151 | 0 | " pim rp %pPA prefix-list %s\n", |
1152 | 0 | spaces, &rp_addr, rp_info->plist); |
1153 | 0 | else |
1154 | 0 | vty_out(vty, "%s" PIM_AF_NAME " pim rp %pPA %pFX\n", |
1155 | 0 | spaces, &rp_addr, &rp_info->group); |
1156 | 0 | count++; |
1157 | 0 | } |
1158 | |
|
1159 | 0 | return count; |
1160 | 0 | } |
1161 | | |
1162 | | void pim_rp_show_information(struct pim_instance *pim, struct prefix *range, |
1163 | | struct vty *vty, json_object *json) |
1164 | 0 | { |
1165 | 0 | struct rp_info *rp_info; |
1166 | 0 | struct rp_info *prev_rp_info = NULL; |
1167 | 0 | struct listnode *node; |
1168 | 0 | struct ttable *tt = NULL; |
1169 | 0 | char *table = NULL; |
1170 | 0 | char source[7]; |
1171 | 0 | char grp[INET6_ADDRSTRLEN]; |
1172 | |
|
1173 | 0 | json_object *json_rp_rows = NULL; |
1174 | 0 | json_object *json_row = NULL; |
1175 | |
|
1176 | 0 | if (!json) { |
1177 | | /* Prepare table. */ |
1178 | 0 | tt = ttable_new(&ttable_styles[TTSTYLE_BLANK]); |
1179 | 0 | ttable_add_row( |
1180 | 0 | tt, |
1181 | 0 | "RP address|group/prefix-list|OIF|I am RP|Source|Group-Type"); |
1182 | 0 | tt->style.cell.rpad = 2; |
1183 | 0 | tt->style.corner = '+'; |
1184 | 0 | ttable_restyle(tt); |
1185 | 0 | } |
1186 | |
|
1187 | 0 | for (ALL_LIST_ELEMENTS_RO(pim->rp_list, node, rp_info)) { |
1188 | 0 | if (pim_rpf_addr_is_inaddr_any(&rp_info->rp)) |
1189 | 0 | continue; |
1190 | | |
1191 | 0 | #if PIM_IPV == 4 |
1192 | 0 | pim_addr group = rp_info->group.u.prefix4; |
1193 | | #else |
1194 | | pim_addr group = rp_info->group.u.prefix6; |
1195 | | #endif |
1196 | 0 | const char *group_type = |
1197 | 0 | pim_is_grp_ssm(pim, group) ? "SSM" : "ASM"; |
1198 | |
|
1199 | 0 | if (range && !prefix_match(&rp_info->group, range)) |
1200 | 0 | continue; |
1201 | | |
1202 | 0 | if (rp_info->rp_src == RP_SRC_STATIC) |
1203 | 0 | strlcpy(source, "Static", sizeof(source)); |
1204 | 0 | else if (rp_info->rp_src == RP_SRC_BSR) |
1205 | 0 | strlcpy(source, "BSR", sizeof(source)); |
1206 | 0 | else |
1207 | 0 | strlcpy(source, "None", sizeof(source)); |
1208 | 0 | if (json) { |
1209 | | /* |
1210 | | * If we have moved on to a new RP then add the |
1211 | | * entry for the previous RP |
1212 | | */ |
1213 | 0 | if (prev_rp_info && |
1214 | 0 | (pim_addr_cmp(prev_rp_info->rp.rpf_addr, |
1215 | 0 | rp_info->rp.rpf_addr))) { |
1216 | 0 | json_object_object_addf( |
1217 | 0 | json, json_rp_rows, "%pPA", |
1218 | 0 | &prev_rp_info->rp.rpf_addr); |
1219 | 0 | json_rp_rows = NULL; |
1220 | 0 | } |
1221 | |
|
1222 | 0 | if (!json_rp_rows) |
1223 | 0 | json_rp_rows = json_object_new_array(); |
1224 | |
|
1225 | 0 | json_row = json_object_new_object(); |
1226 | 0 | json_object_string_addf(json_row, "rpAddress", "%pPA", |
1227 | 0 | &rp_info->rp.rpf_addr); |
1228 | 0 | if (rp_info->rp.source_nexthop.interface) |
1229 | 0 | json_object_string_add( |
1230 | 0 | json_row, "outboundInterface", |
1231 | 0 | rp_info->rp.source_nexthop |
1232 | 0 | .interface->name); |
1233 | 0 | else |
1234 | 0 | json_object_string_add(json_row, |
1235 | 0 | "outboundInterface", |
1236 | 0 | "Unknown"); |
1237 | 0 | if (rp_info->i_am_rp) |
1238 | 0 | json_object_boolean_true_add(json_row, "iAmRP"); |
1239 | 0 | else |
1240 | 0 | json_object_boolean_false_add(json_row, |
1241 | 0 | "iAmRP"); |
1242 | |
|
1243 | 0 | if (rp_info->plist) |
1244 | 0 | json_object_string_add(json_row, "prefixList", |
1245 | 0 | rp_info->plist); |
1246 | 0 | else |
1247 | 0 | json_object_string_addf(json_row, "group", |
1248 | 0 | "%pFX", |
1249 | 0 | &rp_info->group); |
1250 | 0 | json_object_string_add(json_row, "source", source); |
1251 | 0 | json_object_string_add(json_row, "groupType", |
1252 | 0 | group_type); |
1253 | |
|
1254 | 0 | json_object_array_add(json_rp_rows, json_row); |
1255 | 0 | } else { |
1256 | 0 | prefix2str(&rp_info->group, grp, sizeof(grp)); |
1257 | 0 | ttable_add_row(tt, "%pPA|%s|%s|%s|%s|%s", |
1258 | 0 | &rp_info->rp.rpf_addr, |
1259 | 0 | rp_info->plist |
1260 | 0 | ? rp_info->plist |
1261 | 0 | : grp, |
1262 | 0 | rp_info->rp.source_nexthop.interface |
1263 | 0 | ? rp_info->rp.source_nexthop |
1264 | 0 | .interface->name |
1265 | 0 | : "Unknown", |
1266 | 0 | rp_info->i_am_rp |
1267 | 0 | ? "yes" |
1268 | 0 | : "no", |
1269 | 0 | source, group_type); |
1270 | 0 | } |
1271 | 0 | prev_rp_info = rp_info; |
1272 | 0 | } |
1273 | | |
1274 | | /* Dump the generated table. */ |
1275 | 0 | if (!json) { |
1276 | 0 | table = ttable_dump(tt, "\n"); |
1277 | 0 | vty_out(vty, "%s\n", table); |
1278 | 0 | XFREE(MTYPE_TMP, table); |
1279 | 0 | ttable_del(tt); |
1280 | 0 | } else { |
1281 | 0 | if (prev_rp_info && json_rp_rows) |
1282 | 0 | json_object_object_addf(json, json_rp_rows, "%pPA", |
1283 | 0 | &prev_rp_info->rp.rpf_addr); |
1284 | 0 | } |
1285 | 0 | } |
1286 | | |
1287 | | void pim_resolve_rp_nh(struct pim_instance *pim, struct pim_neighbor *nbr) |
1288 | 236 | { |
1289 | 236 | struct listnode *node = NULL; |
1290 | 236 | struct rp_info *rp_info = NULL; |
1291 | 236 | struct nexthop *nh_node = NULL; |
1292 | 236 | pim_addr nht_p; |
1293 | 236 | struct pim_nexthop_cache pnc; |
1294 | | |
1295 | 7.50k | for (ALL_LIST_ELEMENTS_RO(pim->rp_list, node, rp_info)) { |
1296 | 7.50k | if (pim_rpf_addr_is_inaddr_any(&rp_info->rp)) |
1297 | 206 | continue; |
1298 | | |
1299 | 7.30k | nht_p = rp_info->rp.rpf_addr; |
1300 | 7.30k | memset(&pnc, 0, sizeof(struct pim_nexthop_cache)); |
1301 | 7.30k | if (!pim_find_or_track_nexthop(pim, nht_p, NULL, rp_info, &pnc)) |
1302 | 7.30k | continue; |
1303 | | |
1304 | 0 | for (nh_node = pnc.nexthop; nh_node; nh_node = nh_node->next) { |
1305 | 0 | #if PIM_IPV == 4 |
1306 | 0 | if (!pim_addr_is_any(nh_node->gate.ipv4)) |
1307 | 0 | continue; |
1308 | | #else |
1309 | | if (!pim_addr_is_any(nh_node->gate.ipv6)) |
1310 | | continue; |
1311 | | #endif |
1312 | | |
1313 | 0 | struct interface *ifp1 = if_lookup_by_index( |
1314 | 0 | nh_node->ifindex, pim->vrf->vrf_id); |
1315 | |
|
1316 | 0 | if (nbr->interface != ifp1) |
1317 | 0 | continue; |
1318 | | |
1319 | 0 | #if PIM_IPV == 4 |
1320 | 0 | nh_node->gate.ipv4 = nbr->source_addr; |
1321 | | #else |
1322 | | nh_node->gate.ipv6 = nbr->source_addr; |
1323 | | #endif |
1324 | 0 | if (PIM_DEBUG_PIM_NHT_RP) |
1325 | 0 | zlog_debug( |
1326 | 0 | "%s: addr %pPA new nexthop addr %pPAs interface %s", |
1327 | 0 | __func__, &nht_p, &nbr->source_addr, |
1328 | 0 | ifp1->name); |
1329 | 0 | } |
1330 | 0 | } |
1331 | 236 | } |