/src/frr/bgpd/bgp_updgrp_adv.c
Line | Count | Source (jump to first uncovered line) |
1 | | // SPDX-License-Identifier: GPL-2.0-or-later |
2 | | /** |
3 | | * bgp_updgrp_adv.c: BGP update group advertisement and adjacency |
4 | | * maintenance |
5 | | * |
6 | | * |
7 | | * @copyright Copyright (C) 2014 Cumulus Networks, Inc. |
8 | | * |
9 | | * @author Avneesh Sachdev <avneesh@sproute.net> |
10 | | * @author Rajesh Varadarajan <rajesh@sproute.net> |
11 | | * @author Pradosh Mohapatra <pradosh@sproute.net> |
12 | | */ |
13 | | |
14 | | #include <zebra.h> |
15 | | |
16 | | #include "command.h" |
17 | | #include "memory.h" |
18 | | #include "prefix.h" |
19 | | #include "hash.h" |
20 | | #include "frrevent.h" |
21 | | #include "queue.h" |
22 | | #include "routemap.h" |
23 | | #include "filter.h" |
24 | | |
25 | | #include "bgpd/bgpd.h" |
26 | | #include "bgpd/bgp_table.h" |
27 | | #include "bgpd/bgp_debug.h" |
28 | | #include "bgpd/bgp_route.h" |
29 | | #include "bgpd/bgp_advertise.h" |
30 | | #include "bgpd/bgp_attr.h" |
31 | | #include "bgpd/bgp_aspath.h" |
32 | | #include "bgpd/bgp_packet.h" |
33 | | #include "bgpd/bgp_fsm.h" |
34 | | #include "bgpd/bgp_mplsvpn.h" |
35 | | #include "bgpd/bgp_updgrp.h" |
36 | | #include "bgpd/bgp_advertise.h" |
37 | | #include "bgpd/bgp_addpath.h" |
38 | | |
39 | | |
40 | | /******************** |
41 | | * PRIVATE FUNCTIONS |
42 | | ********************/ |
43 | | static int bgp_adj_out_compare(const struct bgp_adj_out *o1, |
44 | | const struct bgp_adj_out *o2) |
45 | 0 | { |
46 | 0 | if (o1->subgroup < o2->subgroup) |
47 | 0 | return -1; |
48 | | |
49 | 0 | if (o1->subgroup > o2->subgroup) |
50 | 0 | return 1; |
51 | | |
52 | 0 | if (o1->addpath_tx_id < o2->addpath_tx_id) |
53 | 0 | return -1; |
54 | | |
55 | 0 | if (o1->addpath_tx_id > o2->addpath_tx_id) |
56 | 0 | return 1; |
57 | | |
58 | 0 | return 0; |
59 | 0 | } |
60 | | RB_GENERATE(bgp_adj_out_rb, bgp_adj_out, adj_entry, bgp_adj_out_compare); |
61 | | |
62 | | static inline struct bgp_adj_out *adj_lookup(struct bgp_dest *dest, |
63 | | struct update_subgroup *subgrp, |
64 | | uint32_t addpath_tx_id) |
65 | 0 | { |
66 | 0 | struct bgp_adj_out lookup; |
67 | |
|
68 | 0 | if (!dest || !subgrp) |
69 | 0 | return NULL; |
70 | | |
71 | | /* update-groups that do not support addpath will pass 0 for |
72 | | * addpath_tx_id. */ |
73 | 0 | lookup.subgroup = subgrp; |
74 | 0 | lookup.addpath_tx_id = addpath_tx_id; |
75 | |
|
76 | 0 | return RB_FIND(bgp_adj_out_rb, &dest->adj_out, &lookup); |
77 | 0 | } |
78 | | |
79 | | static void adj_free(struct bgp_adj_out *adj) |
80 | 0 | { |
81 | 0 | TAILQ_REMOVE(&(adj->subgroup->adjq), adj, subgrp_adj_train); |
82 | 0 | SUBGRP_DECR_STAT(adj->subgroup, adj_count); |
83 | |
|
84 | 0 | RB_REMOVE(bgp_adj_out_rb, &adj->dest->adj_out, adj); |
85 | 0 | bgp_dest_unlock_node(adj->dest); |
86 | |
|
87 | 0 | XFREE(MTYPE_BGP_ADJ_OUT, adj); |
88 | 0 | } |
89 | | |
90 | | static void subgrp_withdraw_stale_addpath(struct updwalk_context *ctx, |
91 | | struct update_subgroup *subgrp) |
92 | 0 | { |
93 | 0 | struct bgp_adj_out *adj, *adj_next; |
94 | 0 | uint32_t id; |
95 | 0 | struct bgp_path_info *pi; |
96 | 0 | afi_t afi = SUBGRP_AFI(subgrp); |
97 | 0 | safi_t safi = SUBGRP_SAFI(subgrp); |
98 | 0 | struct peer *peer = SUBGRP_PEER(subgrp); |
99 | | |
100 | | /* Look through all of the paths we have advertised for this rn and send |
101 | | * a withdraw for the ones that are no longer present */ |
102 | 0 | RB_FOREACH_SAFE (adj, bgp_adj_out_rb, &ctx->dest->adj_out, adj_next) { |
103 | 0 | if (adj->subgroup != subgrp) |
104 | 0 | continue; |
105 | | |
106 | 0 | for (pi = bgp_dest_get_bgp_path_info(ctx->dest); pi; |
107 | 0 | pi = pi->next) { |
108 | 0 | id = bgp_addpath_id_for_peer(peer, afi, safi, |
109 | 0 | &pi->tx_addpath); |
110 | |
|
111 | 0 | if (id == adj->addpath_tx_id) { |
112 | 0 | break; |
113 | 0 | } |
114 | 0 | } |
115 | |
|
116 | 0 | if (!pi) { |
117 | 0 | subgroup_process_announce_selected(subgrp, NULL, |
118 | 0 | ctx->dest, afi, safi, |
119 | 0 | adj->addpath_tx_id); |
120 | 0 | } |
121 | 0 | } |
122 | 0 | } |
123 | | |
124 | | static int group_announce_route_walkcb(struct update_group *updgrp, void *arg) |
125 | 0 | { |
126 | 0 | struct updwalk_context *ctx = arg; |
127 | 0 | struct update_subgroup *subgrp; |
128 | 0 | struct bgp_path_info *pi; |
129 | 0 | afi_t afi; |
130 | 0 | safi_t safi; |
131 | 0 | struct peer *peer; |
132 | 0 | struct bgp_adj_out *adj, *adj_next; |
133 | 0 | bool addpath_capable; |
134 | |
|
135 | 0 | afi = UPDGRP_AFI(updgrp); |
136 | 0 | safi = UPDGRP_SAFI(updgrp); |
137 | 0 | peer = UPDGRP_PEER(updgrp); |
138 | 0 | addpath_capable = bgp_addpath_encode_tx(peer, afi, safi); |
139 | |
|
140 | 0 | if (BGP_DEBUG(update, UPDATE_OUT)) |
141 | 0 | zlog_debug("%s: afi=%s, safi=%s, p=%pRN", __func__, |
142 | 0 | afi2str(afi), safi2str(safi), |
143 | 0 | bgp_dest_to_rnode(ctx->dest)); |
144 | |
|
145 | 0 | UPDGRP_FOREACH_SUBGRP (updgrp, subgrp) { |
146 | | |
147 | | /* |
148 | | * Skip the subgroups that have coalesce timer running. We will |
149 | | * walk the entire prefix table for those subgroups when the |
150 | | * coalesce timer fires. |
151 | | */ |
152 | 0 | if (!subgrp->t_coalesce) { |
153 | | |
154 | | /* An update-group that uses addpath */ |
155 | 0 | if (addpath_capable) { |
156 | 0 | subgrp_withdraw_stale_addpath(ctx, subgrp); |
157 | |
|
158 | 0 | for (pi = bgp_dest_get_bgp_path_info(ctx->dest); |
159 | 0 | pi; pi = pi->next) { |
160 | | /* Skip the bestpath for now */ |
161 | 0 | if (pi == ctx->pi) |
162 | 0 | continue; |
163 | | |
164 | 0 | subgroup_process_announce_selected( |
165 | 0 | subgrp, pi, ctx->dest, afi, |
166 | 0 | safi, |
167 | 0 | bgp_addpath_id_for_peer( |
168 | 0 | peer, afi, safi, |
169 | 0 | &pi->tx_addpath)); |
170 | 0 | } |
171 | | |
172 | | /* Process the bestpath last so the "show [ip] |
173 | | * bgp neighbor x.x.x.x advertised" |
174 | | * output shows the attributes from the bestpath |
175 | | */ |
176 | 0 | if (ctx->pi) |
177 | 0 | subgroup_process_announce_selected( |
178 | 0 | subgrp, ctx->pi, ctx->dest, afi, |
179 | 0 | safi, |
180 | 0 | bgp_addpath_id_for_peer( |
181 | 0 | peer, afi, safi, |
182 | 0 | &ctx->pi->tx_addpath)); |
183 | 0 | } |
184 | | /* An update-group that does not use addpath */ |
185 | 0 | else { |
186 | 0 | if (ctx->pi) { |
187 | 0 | subgroup_process_announce_selected( |
188 | 0 | subgrp, ctx->pi, ctx->dest, afi, |
189 | 0 | safi, |
190 | 0 | bgp_addpath_id_for_peer( |
191 | 0 | peer, afi, safi, |
192 | 0 | &ctx->pi->tx_addpath)); |
193 | 0 | } else { |
194 | | /* Find the addpath_tx_id of the path we |
195 | | * had advertised and |
196 | | * send a withdraw */ |
197 | 0 | RB_FOREACH_SAFE (adj, bgp_adj_out_rb, |
198 | 0 | &ctx->dest->adj_out, |
199 | 0 | adj_next) { |
200 | 0 | if (adj->subgroup == subgrp) { |
201 | 0 | subgroup_process_announce_selected( |
202 | 0 | subgrp, NULL, |
203 | 0 | ctx->dest, afi, |
204 | 0 | safi, |
205 | 0 | adj->addpath_tx_id); |
206 | 0 | } |
207 | 0 | } |
208 | 0 | } |
209 | 0 | } |
210 | 0 | } |
211 | | |
212 | | /* Notify BGP Conditional advertisement */ |
213 | 0 | bgp_notify_conditional_adv_scanner(subgrp); |
214 | 0 | } |
215 | |
|
216 | 0 | return UPDWALK_CONTINUE; |
217 | 0 | } |
218 | | |
219 | | static void subgrp_show_adjq_vty(struct update_subgroup *subgrp, |
220 | | struct vty *vty, uint8_t flags) |
221 | 0 | { |
222 | 0 | struct bgp_table *table; |
223 | 0 | struct bgp_adj_out *adj; |
224 | 0 | unsigned long output_count; |
225 | 0 | struct bgp_dest *dest; |
226 | 0 | int header1 = 1; |
227 | 0 | struct bgp *bgp; |
228 | 0 | int header2 = 1; |
229 | |
|
230 | 0 | bgp = SUBGRP_INST(subgrp); |
231 | 0 | if (!bgp) |
232 | 0 | return; |
233 | | |
234 | 0 | table = bgp->rib[SUBGRP_AFI(subgrp)][SUBGRP_SAFI(subgrp)]; |
235 | |
|
236 | 0 | output_count = 0; |
237 | |
|
238 | 0 | for (dest = bgp_table_top(table); dest; dest = bgp_route_next(dest)) { |
239 | 0 | const struct prefix *dest_p = bgp_dest_get_prefix(dest); |
240 | |
|
241 | 0 | RB_FOREACH (adj, bgp_adj_out_rb, &dest->adj_out) { |
242 | 0 | if (adj->subgroup != subgrp) |
243 | 0 | continue; |
244 | | |
245 | 0 | if (header1) { |
246 | 0 | vty_out(vty, |
247 | 0 | "BGP table version is %" PRIu64 |
248 | 0 | ", local router ID is %pI4\n", |
249 | 0 | table->version, &bgp->router_id); |
250 | 0 | vty_out(vty, BGP_SHOW_SCODE_HEADER); |
251 | 0 | vty_out(vty, BGP_SHOW_OCODE_HEADER); |
252 | 0 | header1 = 0; |
253 | 0 | } |
254 | 0 | if (header2) { |
255 | 0 | vty_out(vty, BGP_SHOW_HEADER); |
256 | 0 | header2 = 0; |
257 | 0 | } |
258 | 0 | if ((flags & UPDWALK_FLAGS_ADVQUEUE) && adj->adv && |
259 | 0 | adj->adv->baa) { |
260 | 0 | route_vty_out_tmp( |
261 | 0 | vty, dest, dest_p, adj->adv->baa->attr, |
262 | 0 | SUBGRP_SAFI(subgrp), 0, NULL, false); |
263 | 0 | output_count++; |
264 | 0 | } |
265 | 0 | if ((flags & UPDWALK_FLAGS_ADVERTISED) && adj->attr) { |
266 | 0 | route_vty_out_tmp(vty, dest, dest_p, adj->attr, |
267 | 0 | SUBGRP_SAFI(subgrp), 0, NULL, |
268 | 0 | false); |
269 | 0 | output_count++; |
270 | 0 | } |
271 | 0 | } |
272 | 0 | } |
273 | 0 | if (output_count != 0) |
274 | 0 | vty_out(vty, "\nTotal number of prefixes %ld\n", output_count); |
275 | 0 | } |
276 | | |
277 | | static int updgrp_show_adj_walkcb(struct update_group *updgrp, void *arg) |
278 | 0 | { |
279 | 0 | struct updwalk_context *ctx = arg; |
280 | 0 | struct update_subgroup *subgrp; |
281 | 0 | struct vty *vty; |
282 | |
|
283 | 0 | vty = ctx->vty; |
284 | 0 | UPDGRP_FOREACH_SUBGRP (updgrp, subgrp) { |
285 | 0 | if (ctx->subgrp_id && (ctx->subgrp_id != subgrp->id)) |
286 | 0 | continue; |
287 | 0 | vty_out(vty, "update group %" PRIu64 ", subgroup %" PRIu64 "\n", |
288 | 0 | updgrp->id, subgrp->id); |
289 | 0 | subgrp_show_adjq_vty(subgrp, vty, ctx->flags); |
290 | 0 | } |
291 | 0 | return UPDWALK_CONTINUE; |
292 | 0 | } |
293 | | |
294 | | static void updgrp_show_adj(struct bgp *bgp, afi_t afi, safi_t safi, |
295 | | struct vty *vty, uint64_t id, uint8_t flags) |
296 | 0 | { |
297 | 0 | struct updwalk_context ctx; |
298 | 0 | memset(&ctx, 0, sizeof(ctx)); |
299 | 0 | ctx.vty = vty; |
300 | 0 | ctx.subgrp_id = id; |
301 | 0 | ctx.flags = flags; |
302 | |
|
303 | 0 | update_group_af_walk(bgp, afi, safi, updgrp_show_adj_walkcb, &ctx); |
304 | 0 | } |
305 | | |
306 | | static void subgroup_coalesce_timer(struct event *thread) |
307 | 0 | { |
308 | 0 | struct update_subgroup *subgrp; |
309 | 0 | struct bgp *bgp; |
310 | 0 |
|
311 | 0 | subgrp = EVENT_ARG(thread); |
312 | 0 | if (bgp_debug_update(NULL, NULL, subgrp->update_group, 0)) |
313 | 0 | zlog_debug("u%" PRIu64 ":s%" PRIu64" announcing routes upon coalesce timer expiry(%u ms)", |
314 | 0 | (SUBGRP_UPDGRP(subgrp))->id, subgrp->id, |
315 | 0 | subgrp->v_coalesce); |
316 | 0 | subgrp->t_coalesce = NULL; |
317 | 0 | subgrp->v_coalesce = 0; |
318 | 0 | bgp = SUBGRP_INST(subgrp); |
319 | 0 | subgroup_announce_route(subgrp); |
320 | 0 |
|
321 | 0 |
|
322 | 0 | /* While the announce_route() may kick off the route advertisement timer |
323 | 0 | * for |
324 | 0 | * the members of the subgroup, we'd like to send the initial updates |
325 | 0 | * much |
326 | 0 | * faster (i.e., without enforcing MRAI). Also, if there were no routes |
327 | 0 | * to |
328 | 0 | * announce, this is the method currently employed to trigger the EOR. |
329 | 0 | */ |
330 | 0 | if (!bgp_update_delay_active(SUBGRP_INST(subgrp)) && |
331 | 0 | !(BGP_SUPPRESS_FIB_ENABLED(bgp))) { |
332 | 0 | struct peer_af *paf; |
333 | 0 | struct peer *peer; |
334 | 0 |
|
335 | 0 | SUBGRP_FOREACH_PEER (subgrp, paf) { |
336 | 0 | peer = PAF_PEER(paf); |
337 | 0 | EVENT_OFF(peer->t_routeadv); |
338 | 0 | BGP_TIMER_ON(peer->t_routeadv, bgp_routeadv_timer, 0); |
339 | 0 | } |
340 | 0 | } |
341 | 0 | } |
342 | | |
343 | | static int update_group_announce_walkcb(struct update_group *updgrp, void *arg) |
344 | 0 | { |
345 | 0 | struct update_subgroup *subgrp; |
346 | |
|
347 | 0 | UPDGRP_FOREACH_SUBGRP (updgrp, subgrp) { |
348 | | /* Avoid supressing duplicate routes later |
349 | | * when processing in subgroup_announce_table(). |
350 | | */ |
351 | 0 | SET_FLAG(subgrp->sflags, SUBGRP_STATUS_FORCE_UPDATES); |
352 | |
|
353 | 0 | subgroup_announce_all(subgrp); |
354 | 0 | } |
355 | |
|
356 | 0 | return UPDWALK_CONTINUE; |
357 | 0 | } |
358 | | |
359 | | static int update_group_announce_rrc_walkcb(struct update_group *updgrp, |
360 | | void *arg) |
361 | 0 | { |
362 | 0 | struct update_subgroup *subgrp; |
363 | 0 | afi_t afi; |
364 | 0 | safi_t safi; |
365 | 0 | struct peer *peer; |
366 | |
|
367 | 0 | afi = UPDGRP_AFI(updgrp); |
368 | 0 | safi = UPDGRP_SAFI(updgrp); |
369 | 0 | peer = UPDGRP_PEER(updgrp); |
370 | | |
371 | | /* Only announce if this is a group of route-reflector-clients */ |
372 | 0 | if (CHECK_FLAG(peer->af_flags[afi][safi], PEER_FLAG_REFLECTOR_CLIENT)) { |
373 | 0 | UPDGRP_FOREACH_SUBGRP (updgrp, subgrp) { |
374 | 0 | subgroup_announce_all(subgrp); |
375 | 0 | } |
376 | 0 | } |
377 | |
|
378 | 0 | return UPDWALK_CONTINUE; |
379 | 0 | } |
380 | | |
381 | | /******************** |
382 | | * PUBLIC FUNCTIONS |
383 | | ********************/ |
384 | | |
385 | | /** |
386 | | * Allocate an adj-out object. Do proper initialization of its fields, |
387 | | * primarily its association with the subgroup and the prefix. |
388 | | */ |
389 | | struct bgp_adj_out *bgp_adj_out_alloc(struct update_subgroup *subgrp, |
390 | | struct bgp_dest *dest, |
391 | | uint32_t addpath_tx_id) |
392 | 0 | { |
393 | 0 | struct bgp_adj_out *adj; |
394 | |
|
395 | 0 | adj = XCALLOC(MTYPE_BGP_ADJ_OUT, sizeof(struct bgp_adj_out)); |
396 | 0 | adj->subgroup = subgrp; |
397 | 0 | adj->addpath_tx_id = addpath_tx_id; |
398 | |
|
399 | 0 | RB_INSERT(bgp_adj_out_rb, &dest->adj_out, adj); |
400 | 0 | bgp_dest_lock_node(dest); |
401 | 0 | adj->dest = dest; |
402 | |
|
403 | 0 | TAILQ_INSERT_TAIL(&(subgrp->adjq), adj, subgrp_adj_train); |
404 | 0 | SUBGRP_INCR_STAT(subgrp, adj_count); |
405 | 0 | return adj; |
406 | 0 | } |
407 | | |
408 | | |
409 | | struct bgp_advertise * |
410 | | bgp_advertise_clean_subgroup(struct update_subgroup *subgrp, |
411 | | struct bgp_adj_out *adj) |
412 | 0 | { |
413 | 0 | struct bgp_advertise *adv; |
414 | 0 | struct bgp_advertise_attr *baa; |
415 | 0 | struct bgp_advertise *next; |
416 | 0 | struct bgp_adv_fifo_head *fhead; |
417 | |
|
418 | 0 | adv = adj->adv; |
419 | 0 | baa = adv->baa; |
420 | 0 | next = NULL; |
421 | |
|
422 | 0 | if (baa) { |
423 | 0 | fhead = &subgrp->sync->update; |
424 | | |
425 | | /* Unlink myself from advertise attribute FIFO. */ |
426 | 0 | bgp_advertise_delete(baa, adv); |
427 | | |
428 | | /* Fetch next advertise candidate. */ |
429 | 0 | next = baa->adv; |
430 | | |
431 | | /* Unintern BGP advertise attribute. */ |
432 | 0 | bgp_advertise_attr_unintern(subgrp->hash, baa); |
433 | 0 | } else |
434 | 0 | fhead = &subgrp->sync->withdraw; |
435 | | |
436 | | |
437 | | /* Unlink myself from advertisement FIFO. */ |
438 | 0 | bgp_adv_fifo_del(fhead, adv); |
439 | | |
440 | | /* Free memory. */ |
441 | 0 | bgp_advertise_free(adj->adv); |
442 | 0 | adj->adv = NULL; |
443 | |
|
444 | 0 | return next; |
445 | 0 | } |
446 | | |
447 | | void bgp_adj_out_set_subgroup(struct bgp_dest *dest, |
448 | | struct update_subgroup *subgrp, struct attr *attr, |
449 | | struct bgp_path_info *path) |
450 | 0 | { |
451 | 0 | struct bgp_adj_out *adj = NULL; |
452 | 0 | struct bgp_advertise *adv; |
453 | 0 | struct peer *peer; |
454 | 0 | afi_t afi; |
455 | 0 | safi_t safi; |
456 | 0 | struct peer *adv_peer; |
457 | 0 | struct peer_af *paf; |
458 | 0 | struct bgp *bgp; |
459 | 0 | uint32_t attr_hash = attrhash_key_make(attr); |
460 | |
|
461 | 0 | peer = SUBGRP_PEER(subgrp); |
462 | 0 | afi = SUBGRP_AFI(subgrp); |
463 | 0 | safi = SUBGRP_SAFI(subgrp); |
464 | 0 | bgp = SUBGRP_INST(subgrp); |
465 | |
|
466 | 0 | if (DISABLE_BGP_ANNOUNCE) |
467 | 0 | return; |
468 | | |
469 | | /* Look for adjacency information. */ |
470 | 0 | adj = adj_lookup( |
471 | 0 | dest, subgrp, |
472 | 0 | bgp_addpath_id_for_peer(peer, afi, safi, &path->tx_addpath)); |
473 | |
|
474 | 0 | if (adj) { |
475 | 0 | if (CHECK_FLAG(subgrp->sflags, SUBGRP_STATUS_TABLE_REPARSING)) |
476 | 0 | subgrp->pscount++; |
477 | 0 | } else { |
478 | 0 | adj = bgp_adj_out_alloc( |
479 | 0 | subgrp, dest, |
480 | 0 | bgp_addpath_id_for_peer(peer, afi, safi, |
481 | 0 | &path->tx_addpath)); |
482 | 0 | if (!adj) |
483 | 0 | return; |
484 | | |
485 | 0 | subgrp->pscount++; |
486 | 0 | } |
487 | | |
488 | | /* Check if we are sending the same route. This is needed to |
489 | | * avoid duplicate UPDATES. For instance, filtering communities |
490 | | * at egress, neighbors will see duplicate UPDATES despite |
491 | | * the route wasn't changed actually. |
492 | | * Do not suppress BGP UPDATES for route-refresh. |
493 | | */ |
494 | 0 | if (CHECK_FLAG(bgp->flags, BGP_FLAG_SUPPRESS_DUPLICATES) |
495 | 0 | && !CHECK_FLAG(subgrp->sflags, SUBGRP_STATUS_FORCE_UPDATES) |
496 | 0 | && adj->attr_hash == attr_hash) { |
497 | 0 | if (BGP_DEBUG(update, UPDATE_OUT)) { |
498 | 0 | char attr_str[BUFSIZ] = {0}; |
499 | |
|
500 | 0 | bgp_dump_attr(attr, attr_str, sizeof(attr_str)); |
501 | |
|
502 | 0 | zlog_debug("%s suppress UPDATE w/ attr: %s", peer->host, |
503 | 0 | attr_str); |
504 | 0 | } |
505 | | |
506 | | /* |
507 | | * If BGP is skipping sending this value to it's peers |
508 | | * the version number should be updated just like it |
509 | | * would if it sent the data. Why? Because update |
510 | | * groups will not be coalesced until such time that |
511 | | * the version numbers are the same. |
512 | | * |
513 | | * Imagine a scenario with say 2 peers and they come |
514 | | * up and are placed in the same update group. Then |
515 | | * a new peer comes up a bit later. Then a prefix is |
516 | | * flapped that we decide for the first 2 peers are |
517 | | * mapped to and we decide not to send the data to |
518 | | * it. Then unless more network changes happen we |
519 | | * will never be able to coalesce the 3rd peer down |
520 | | */ |
521 | 0 | subgrp->version = MAX(subgrp->version, dest->version); |
522 | 0 | return; |
523 | 0 | } |
524 | | |
525 | 0 | if (adj->adv) |
526 | 0 | bgp_advertise_clean_subgroup(subgrp, adj); |
527 | 0 | adj->adv = bgp_advertise_new(); |
528 | |
|
529 | 0 | adv = adj->adv; |
530 | 0 | adv->dest = dest; |
531 | 0 | assert(adv->pathi == NULL); |
532 | | /* bgp_path_info adj_out reference */ |
533 | 0 | adv->pathi = bgp_path_info_lock(path); |
534 | |
|
535 | 0 | adv->baa = bgp_advertise_attr_intern(subgrp->hash, attr); |
536 | 0 | adv->adj = adj; |
537 | 0 | adj->attr_hash = attr_hash; |
538 | | |
539 | | /* Add new advertisement to advertisement attribute list. */ |
540 | 0 | bgp_advertise_add(adv->baa, adv); |
541 | | |
542 | | /* |
543 | | * If the update adv list is empty, trigger the member peers' |
544 | | * mrai timers so the socket writes can happen. |
545 | | */ |
546 | 0 | if (!bgp_adv_fifo_count(&subgrp->sync->update)) { |
547 | 0 | SUBGRP_FOREACH_PEER (subgrp, paf) { |
548 | | /* If there are no routes in the withdraw list, set |
549 | | * the flag PEER_STATUS_ADV_DELAY which will allow |
550 | | * more routes to be sent in the update message |
551 | | */ |
552 | 0 | if (BGP_SUPPRESS_FIB_ENABLED(bgp)) { |
553 | 0 | adv_peer = PAF_PEER(paf); |
554 | 0 | if (!bgp_adv_fifo_count( |
555 | 0 | &subgrp->sync->withdraw)) |
556 | 0 | SET_FLAG(adv_peer->thread_flags, |
557 | 0 | PEER_THREAD_SUBGRP_ADV_DELAY); |
558 | 0 | else |
559 | 0 | UNSET_FLAG(adv_peer->thread_flags, |
560 | 0 | PEER_THREAD_SUBGRP_ADV_DELAY); |
561 | 0 | } |
562 | 0 | bgp_adjust_routeadv(PAF_PEER(paf)); |
563 | 0 | } |
564 | 0 | } |
565 | |
|
566 | 0 | bgp_adv_fifo_add_tail(&subgrp->sync->update, adv); |
567 | |
|
568 | 0 | subgrp->version = MAX(subgrp->version, dest->version); |
569 | 0 | } |
570 | | |
571 | | /* The only time 'withdraw' will be false is if we are sending |
572 | | * the "neighbor x.x.x.x default-originate" default and need to clear |
573 | | * bgp_adj_out for the 0.0.0.0/0 route in the BGP table. |
574 | | */ |
575 | | void bgp_adj_out_unset_subgroup(struct bgp_dest *dest, |
576 | | struct update_subgroup *subgrp, char withdraw, |
577 | | uint32_t addpath_tx_id) |
578 | 0 | { |
579 | 0 | struct bgp_adj_out *adj; |
580 | 0 | struct bgp_advertise *adv; |
581 | 0 | bool trigger_write; |
582 | |
|
583 | 0 | if (DISABLE_BGP_ANNOUNCE) |
584 | 0 | return; |
585 | | |
586 | | /* Lookup existing adjacency */ |
587 | 0 | adj = adj_lookup(dest, subgrp, addpath_tx_id); |
588 | 0 | if (adj != NULL) { |
589 | | /* Clean up previous advertisement. */ |
590 | 0 | if (adj->adv) |
591 | 0 | bgp_advertise_clean_subgroup(subgrp, adj); |
592 | | |
593 | | /* If default originate is enabled and the route is default |
594 | | * route, do not send withdraw. This will prevent deletion of |
595 | | * the default route at the peer. |
596 | | */ |
597 | 0 | if (CHECK_FLAG(subgrp->sflags, SUBGRP_STATUS_DEFAULT_ORIGINATE) |
598 | 0 | && is_default_prefix(bgp_dest_get_prefix(dest))) |
599 | 0 | return; |
600 | | |
601 | 0 | if (adj->attr && withdraw) { |
602 | | /* We need advertisement structure. */ |
603 | 0 | adj->adv = bgp_advertise_new(); |
604 | 0 | adv = adj->adv; |
605 | 0 | adv->dest = dest; |
606 | 0 | adv->adj = adj; |
607 | | |
608 | | /* Note if we need to trigger a packet write */ |
609 | 0 | trigger_write = |
610 | 0 | !bgp_adv_fifo_count(&subgrp->sync->withdraw); |
611 | | |
612 | | /* Add to synchronization entry for withdraw |
613 | | * announcement. */ |
614 | 0 | bgp_adv_fifo_add_tail(&subgrp->sync->withdraw, adv); |
615 | |
|
616 | 0 | if (trigger_write) |
617 | 0 | subgroup_trigger_write(subgrp); |
618 | 0 | } else { |
619 | | /* Free allocated information. */ |
620 | 0 | adj_free(adj); |
621 | 0 | } |
622 | 0 | if (!CHECK_FLAG(subgrp->sflags, SUBGRP_STATUS_TABLE_REPARSING)) |
623 | 0 | subgrp->pscount--; |
624 | 0 | } |
625 | | |
626 | 0 | subgrp->version = MAX(subgrp->version, dest->version); |
627 | 0 | } |
628 | | |
629 | | void bgp_adj_out_remove_subgroup(struct bgp_dest *dest, struct bgp_adj_out *adj, |
630 | | struct update_subgroup *subgrp) |
631 | 0 | { |
632 | 0 | if (adj->attr) |
633 | 0 | bgp_attr_unintern(&adj->attr); |
634 | |
|
635 | 0 | if (adj->adv) |
636 | 0 | bgp_advertise_clean_subgroup(subgrp, adj); |
637 | |
|
638 | 0 | adj_free(adj); |
639 | 0 | } |
640 | | |
641 | | /* |
642 | | * Go through all the routes and clean up the adj/adv structures corresponding |
643 | | * to the subgroup. |
644 | | */ |
645 | | void subgroup_clear_table(struct update_subgroup *subgrp) |
646 | 0 | { |
647 | 0 | struct bgp_adj_out *aout, *taout; |
648 | |
|
649 | 0 | SUBGRP_FOREACH_ADJ_SAFE (subgrp, aout, taout) |
650 | 0 | bgp_adj_out_remove_subgroup(aout->dest, aout, subgrp); |
651 | 0 | } |
652 | | |
653 | | /* |
654 | | * subgroup_announce_table |
655 | | */ |
656 | | void subgroup_announce_table(struct update_subgroup *subgrp, |
657 | | struct bgp_table *table) |
658 | 0 | { |
659 | 0 | struct bgp_dest *dest; |
660 | 0 | struct bgp_path_info *ri; |
661 | 0 | struct peer *peer; |
662 | 0 | afi_t afi; |
663 | 0 | safi_t safi; |
664 | 0 | safi_t safi_rib; |
665 | 0 | bool addpath_capable; |
666 | |
|
667 | 0 | peer = SUBGRP_PEER(subgrp); |
668 | 0 | afi = SUBGRP_AFI(subgrp); |
669 | 0 | safi = SUBGRP_SAFI(subgrp); |
670 | 0 | addpath_capable = bgp_addpath_encode_tx(peer, afi, safi); |
671 | |
|
672 | 0 | if (safi == SAFI_LABELED_UNICAST) |
673 | 0 | safi_rib = SAFI_UNICAST; |
674 | 0 | else |
675 | 0 | safi_rib = safi; |
676 | |
|
677 | 0 | if (!table) |
678 | 0 | table = peer->bgp->rib[afi][safi_rib]; |
679 | |
|
680 | 0 | if (safi != SAFI_MPLS_VPN && safi != SAFI_ENCAP && safi != SAFI_EVPN |
681 | 0 | && CHECK_FLAG(peer->af_flags[afi][safi], |
682 | 0 | PEER_FLAG_DEFAULT_ORIGINATE)) |
683 | 0 | subgroup_default_originate(subgrp, 0); |
684 | |
|
685 | 0 | subgrp->pscount = 0; |
686 | 0 | SET_FLAG(subgrp->sflags, SUBGRP_STATUS_TABLE_REPARSING); |
687 | |
|
688 | 0 | for (dest = bgp_table_top(table); dest; dest = bgp_route_next(dest)) { |
689 | 0 | for (ri = bgp_dest_get_bgp_path_info(dest); ri; ri = ri->next) { |
690 | |
|
691 | 0 | if (!bgp_check_selected(ri, peer, addpath_capable, afi, |
692 | 0 | safi_rib)) |
693 | 0 | continue; |
694 | | |
695 | | /* If default originate is enabled for |
696 | | * the peer, do not send explicit |
697 | | * withdraw. This will prevent deletion |
698 | | * of default route advertised through |
699 | | * default originate |
700 | | */ |
701 | 0 | if (CHECK_FLAG(peer->af_flags[afi][safi], |
702 | 0 | PEER_FLAG_DEFAULT_ORIGINATE) && |
703 | 0 | is_default_prefix(bgp_dest_get_prefix(dest))) |
704 | 0 | break; |
705 | | |
706 | 0 | subgroup_process_announce_selected( |
707 | 0 | subgrp, ri, dest, afi, safi_rib, |
708 | 0 | bgp_addpath_id_for_peer(peer, afi, safi_rib, |
709 | 0 | &ri->tx_addpath)); |
710 | 0 | } |
711 | 0 | } |
712 | 0 | UNSET_FLAG(subgrp->sflags, SUBGRP_STATUS_TABLE_REPARSING); |
713 | | |
714 | | /* |
715 | | * We walked through the whole table -- make sure our version number |
716 | | * is consistent with the one on the table. This should allow |
717 | | * subgroups to merge sooner if a peer comes up when the route node |
718 | | * with the largest version is no longer in the table. This also |
719 | | * covers the pathological case where all routes in the table have |
720 | | * now been deleted. |
721 | | */ |
722 | 0 | subgrp->version = MAX(subgrp->version, table->version); |
723 | | |
724 | | /* |
725 | | * Start a task to merge the subgroup if necessary. |
726 | | */ |
727 | 0 | update_subgroup_trigger_merge_check(subgrp, 0); |
728 | 0 | } |
729 | | |
730 | | /* |
731 | | * subgroup_announce_route |
732 | | * |
733 | | * Refresh all routes out to a subgroup. |
734 | | */ |
735 | | void subgroup_announce_route(struct update_subgroup *subgrp) |
736 | 0 | { |
737 | 0 | struct bgp_dest *dest; |
738 | 0 | struct bgp_table *table; |
739 | 0 | struct peer *onlypeer; |
740 | |
|
741 | 0 | if (update_subgroup_needs_refresh(subgrp)) { |
742 | 0 | update_subgroup_set_needs_refresh(subgrp, 0); |
743 | 0 | } |
744 | | |
745 | | /* |
746 | | * First update is deferred until ORF or ROUTE-REFRESH is received |
747 | | */ |
748 | 0 | onlypeer = ((SUBGRP_PCOUNT(subgrp) == 1) ? (SUBGRP_PFIRST(subgrp))->peer |
749 | 0 | : NULL); |
750 | 0 | if (onlypeer && CHECK_FLAG(onlypeer->af_sflags[SUBGRP_AFI(subgrp)] |
751 | 0 | [SUBGRP_SAFI(subgrp)], |
752 | 0 | PEER_STATUS_ORF_WAIT_REFRESH)) |
753 | 0 | return; |
754 | | |
755 | 0 | if (SUBGRP_SAFI(subgrp) != SAFI_MPLS_VPN |
756 | 0 | && SUBGRP_SAFI(subgrp) != SAFI_ENCAP |
757 | 0 | && SUBGRP_SAFI(subgrp) != SAFI_EVPN) |
758 | 0 | subgroup_announce_table(subgrp, NULL); |
759 | 0 | else |
760 | 0 | for (dest = bgp_table_top(update_subgroup_rib(subgrp)); dest; |
761 | 0 | dest = bgp_route_next(dest)) { |
762 | 0 | table = bgp_dest_get_bgp_table_info(dest); |
763 | 0 | if (!table) |
764 | 0 | continue; |
765 | 0 | subgroup_announce_table(subgrp, table); |
766 | 0 | } |
767 | 0 | } |
768 | | |
769 | | void subgroup_default_originate(struct update_subgroup *subgrp, int withdraw) |
770 | 0 | { |
771 | 0 | struct bgp *bgp; |
772 | 0 | struct attr attr; |
773 | 0 | struct attr *new_attr = &attr; |
774 | 0 | struct prefix p; |
775 | 0 | struct peer *from; |
776 | 0 | struct bgp_dest *dest; |
777 | 0 | struct bgp_path_info *pi; |
778 | 0 | struct peer *peer; |
779 | 0 | struct bgp_adj_out *adj; |
780 | 0 | route_map_result_t ret = RMAP_DENYMATCH; |
781 | 0 | route_map_result_t new_ret = RMAP_DENYMATCH; |
782 | 0 | afi_t afi; |
783 | 0 | safi_t safi; |
784 | 0 | safi_t safi_rib; |
785 | 0 | int pref = 65536; |
786 | 0 | int new_pref = 0; |
787 | |
|
788 | 0 | if (!subgrp) |
789 | 0 | return; |
790 | | |
791 | 0 | peer = SUBGRP_PEER(subgrp); |
792 | 0 | afi = SUBGRP_AFI(subgrp); |
793 | 0 | safi = SUBGRP_SAFI(subgrp); |
794 | |
|
795 | 0 | if (!(afi == AFI_IP || afi == AFI_IP6)) |
796 | 0 | return; |
797 | | |
798 | 0 | if (safi == SAFI_LABELED_UNICAST) |
799 | 0 | safi_rib = SAFI_UNICAST; |
800 | 0 | else |
801 | 0 | safi_rib = safi; |
802 | |
|
803 | 0 | bgp = peer->bgp; |
804 | 0 | from = bgp->peer_self; |
805 | |
|
806 | 0 | bgp_attr_default_set(&attr, bgp, BGP_ORIGIN_IGP); |
807 | | |
808 | | /* make coverity happy */ |
809 | 0 | assert(attr.aspath); |
810 | | |
811 | 0 | attr.med = 0; |
812 | 0 | attr.flag |= ATTR_FLAG_BIT(BGP_ATTR_MULTI_EXIT_DISC); |
813 | |
|
814 | 0 | if ((afi == AFI_IP6) || peer_cap_enhe(peer, afi, safi)) { |
815 | | /* IPv6 global nexthop must be included. */ |
816 | 0 | attr.mp_nexthop_len = BGP_ATTR_NHLEN_IPV6_GLOBAL; |
817 | | |
818 | | /* If the peer is on shared nextwork and we have link-local |
819 | | nexthop set it. */ |
820 | 0 | if (peer->shared_network |
821 | 0 | && !IN6_IS_ADDR_UNSPECIFIED(&peer->nexthop.v6_local)) |
822 | 0 | attr.mp_nexthop_len = BGP_ATTR_NHLEN_IPV6_GLOBAL_AND_LL; |
823 | 0 | } |
824 | |
|
825 | 0 | if (peer->default_rmap[afi][safi].name) { |
826 | 0 | struct bgp_path_info tmp_pi = {0}; |
827 | |
|
828 | 0 | tmp_pi.peer = bgp->peer_self; |
829 | |
|
830 | 0 | SET_FLAG(bgp->peer_self->rmap_type, PEER_RMAP_TYPE_DEFAULT); |
831 | | |
832 | | /* Iterate over the RIB to see if we can announce |
833 | | * the default route. We announce the default |
834 | | * route only if route-map has a match. |
835 | | */ |
836 | 0 | for (dest = bgp_table_top(bgp->rib[afi][safi_rib]); dest; |
837 | 0 | dest = bgp_route_next(dest)) { |
838 | 0 | if (!bgp_dest_has_bgp_path_info_data(dest)) |
839 | 0 | continue; |
840 | | |
841 | 0 | for (pi = bgp_dest_get_bgp_path_info(dest); pi; |
842 | 0 | pi = pi->next) { |
843 | 0 | struct attr tmp_attr = attr; |
844 | |
|
845 | 0 | tmp_pi.attr = &tmp_attr; |
846 | |
|
847 | 0 | new_ret = route_map_apply_ext( |
848 | 0 | peer->default_rmap[afi][safi].map, |
849 | 0 | bgp_dest_get_prefix(dest), pi, &tmp_pi, |
850 | 0 | &new_pref); |
851 | |
|
852 | 0 | if (new_ret == RMAP_PERMITMATCH) { |
853 | 0 | if (new_pref < pref) { |
854 | 0 | pref = new_pref; |
855 | 0 | bgp_attr_flush(new_attr); |
856 | 0 | new_attr = bgp_attr_intern( |
857 | 0 | tmp_pi.attr); |
858 | 0 | bgp_attr_flush(tmp_pi.attr); |
859 | 0 | } |
860 | 0 | subgroup_announce_reset_nhop( |
861 | 0 | (peer_cap_enhe(peer, afi, safi) |
862 | 0 | ? AF_INET6 |
863 | 0 | : AF_INET), |
864 | 0 | new_attr); |
865 | 0 | ret = new_ret; |
866 | 0 | } else |
867 | 0 | bgp_attr_flush(&tmp_attr); |
868 | 0 | } |
869 | 0 | } |
870 | 0 | bgp->peer_self->rmap_type = 0; |
871 | |
|
872 | 0 | if (ret == RMAP_DENYMATCH) { |
873 | | /* |
874 | | * If its a implicit withdraw due to routemap |
875 | | * deny operation need to set the flag back. |
876 | | * This is a convertion of update flow to |
877 | | * withdraw flow. |
878 | | */ |
879 | 0 | if (!withdraw && |
880 | 0 | (!CHECK_FLAG(subgrp->sflags, |
881 | 0 | SUBGRP_STATUS_DEFAULT_ORIGINATE))) |
882 | 0 | SET_FLAG(subgrp->sflags, |
883 | 0 | SUBGRP_STATUS_DEFAULT_ORIGINATE); |
884 | 0 | withdraw = 1; |
885 | 0 | } |
886 | 0 | } |
887 | | |
888 | | /* Check if the default route is in local BGP RIB which is |
889 | | * installed through redistribute or network command |
890 | | */ |
891 | 0 | memset(&p, 0, sizeof(p)); |
892 | 0 | p.family = afi2family(afi); |
893 | 0 | p.prefixlen = 0; |
894 | 0 | dest = bgp_safi_node_lookup(bgp->rib[afi][safi_rib], safi_rib, &p, |
895 | 0 | NULL); |
896 | |
|
897 | 0 | if (withdraw) { |
898 | | /* Withdraw the default route advertised using default |
899 | | * originate |
900 | | */ |
901 | 0 | if (CHECK_FLAG(subgrp->sflags, SUBGRP_STATUS_DEFAULT_ORIGINATE)) |
902 | 0 | subgroup_default_withdraw_packet(subgrp); |
903 | 0 | UNSET_FLAG(subgrp->sflags, SUBGRP_STATUS_DEFAULT_ORIGINATE); |
904 | | |
905 | | /* If default route is present in the local RIB, advertise the |
906 | | * route |
907 | | */ |
908 | 0 | if (dest) { |
909 | 0 | for (pi = bgp_dest_get_bgp_path_info(dest); pi; |
910 | 0 | pi = pi->next) { |
911 | 0 | if (CHECK_FLAG(pi->flags, BGP_PATH_SELECTED)) |
912 | 0 | if (subgroup_announce_check( |
913 | 0 | dest, pi, subgrp, |
914 | 0 | bgp_dest_get_prefix(dest), |
915 | 0 | &attr, NULL)) { |
916 | 0 | struct attr *default_attr = |
917 | 0 | bgp_attr_intern(&attr); |
918 | |
|
919 | 0 | bgp_adj_out_set_subgroup( |
920 | 0 | dest, subgrp, |
921 | 0 | default_attr, pi); |
922 | 0 | } |
923 | 0 | } |
924 | 0 | bgp_dest_unlock_node(dest); |
925 | 0 | } |
926 | 0 | } else { |
927 | 0 | if (!CHECK_FLAG(subgrp->sflags, |
928 | 0 | SUBGRP_STATUS_DEFAULT_ORIGINATE)) { |
929 | | |
930 | | /* The 'neighbor x.x.x.x default-originate' default will |
931 | | * act as an |
932 | | * implicit withdraw for any previous UPDATEs sent for |
933 | | * 0.0.0.0/0 so |
934 | | * clear adj_out for the 0.0.0.0/0 prefix in the BGP |
935 | | * table. |
936 | | */ |
937 | 0 | if (dest) { |
938 | | /* Remove the adjacency for the previously |
939 | | * advertised default route |
940 | | */ |
941 | 0 | adj = adj_lookup( |
942 | 0 | dest, subgrp, |
943 | 0 | BGP_ADDPATH_TX_ID_FOR_DEFAULT_ORIGINATE); |
944 | 0 | if (adj != NULL) { |
945 | | /* Clean up previous advertisement. */ |
946 | 0 | if (adj->adv) |
947 | 0 | bgp_advertise_clean_subgroup( |
948 | 0 | subgrp, adj); |
949 | | |
950 | | /* Free allocated information. */ |
951 | 0 | adj_free(adj); |
952 | 0 | } |
953 | 0 | bgp_dest_unlock_node(dest); |
954 | 0 | } |
955 | | |
956 | | /* Advertise the default route */ |
957 | 0 | if (bgp_in_graceful_shutdown(bgp)) |
958 | 0 | bgp_attr_add_gshut_community(new_attr); |
959 | |
|
960 | 0 | SET_FLAG(subgrp->sflags, |
961 | 0 | SUBGRP_STATUS_DEFAULT_ORIGINATE); |
962 | 0 | subgroup_default_update_packet(subgrp, new_attr, from); |
963 | 0 | } |
964 | 0 | } |
965 | |
|
966 | 0 | aspath_unintern(&attr.aspath); |
967 | 0 | } |
968 | | |
969 | | /* |
970 | | * Announce the BGP table to a subgroup. |
971 | | * |
972 | | * At startup, we try to optimize route announcement by coalescing the |
973 | | * peer-up events. This is done only the first time - from then on, |
974 | | * subgrp->v_coalesce will be set to zero and the normal logic |
975 | | * prevails. |
976 | | */ |
977 | | void subgroup_announce_all(struct update_subgroup *subgrp) |
978 | 0 | { |
979 | 0 | if (!subgrp) |
980 | 0 | return; |
981 | | |
982 | | /* |
983 | | * If coalesce timer value is not set, announce routes immediately. |
984 | | */ |
985 | 0 | if (!subgrp->v_coalesce) { |
986 | 0 | if (bgp_debug_update(NULL, NULL, subgrp->update_group, 0)) |
987 | 0 | zlog_debug("u%" PRIu64 ":s%" PRIu64" announcing all routes", |
988 | 0 | subgrp->update_group->id, subgrp->id); |
989 | 0 | subgroup_announce_route(subgrp); |
990 | 0 | return; |
991 | 0 | } |
992 | | |
993 | | /* |
994 | | * We should wait for the coalesce timer. Arm the timer if not done. |
995 | | */ |
996 | 0 | if (!subgrp->t_coalesce) { |
997 | 0 | event_add_timer_msec(bm->master, subgroup_coalesce_timer, |
998 | 0 | subgrp, subgrp->v_coalesce, |
999 | 0 | &subgrp->t_coalesce); |
1000 | 0 | } |
1001 | 0 | } |
1002 | | |
1003 | | /* |
1004 | | * Go through all update subgroups and set up the adv queue for the |
1005 | | * input route. |
1006 | | */ |
1007 | | void group_announce_route(struct bgp *bgp, afi_t afi, safi_t safi, |
1008 | | struct bgp_dest *dest, struct bgp_path_info *pi) |
1009 | 0 | { |
1010 | 0 | struct updwalk_context ctx; |
1011 | 0 | ctx.pi = pi; |
1012 | 0 | ctx.dest = dest; |
1013 | | |
1014 | | /* If suppress fib is enabled, the route will be advertised when |
1015 | | * FIB status is received |
1016 | | */ |
1017 | 0 | if (!bgp_check_advertise(bgp, dest)) |
1018 | 0 | return; |
1019 | | |
1020 | 0 | update_group_af_walk(bgp, afi, safi, group_announce_route_walkcb, &ctx); |
1021 | 0 | } |
1022 | | |
1023 | | void update_group_show_adj_queue(struct bgp *bgp, afi_t afi, safi_t safi, |
1024 | | struct vty *vty, uint64_t id) |
1025 | 0 | { |
1026 | 0 | updgrp_show_adj(bgp, afi, safi, vty, id, UPDWALK_FLAGS_ADVQUEUE); |
1027 | 0 | } |
1028 | | |
1029 | | void update_group_show_advertised(struct bgp *bgp, afi_t afi, safi_t safi, |
1030 | | struct vty *vty, uint64_t id) |
1031 | 0 | { |
1032 | 0 | updgrp_show_adj(bgp, afi, safi, vty, id, UPDWALK_FLAGS_ADVERTISED); |
1033 | 0 | } |
1034 | | |
1035 | | void update_group_announce(struct bgp *bgp) |
1036 | 0 | { |
1037 | 0 | update_group_walk(bgp, update_group_announce_walkcb, NULL); |
1038 | 0 | } |
1039 | | |
1040 | | void update_group_announce_rrclients(struct bgp *bgp) |
1041 | 0 | { |
1042 | 0 | update_group_walk(bgp, update_group_announce_rrc_walkcb, NULL); |
1043 | 0 | } |