/src/frr/bgpd/bgp_updgrp.c
Line | Count | Source (jump to first uncovered line) |
1 | | // SPDX-License-Identifier: GPL-2.0-or-later |
2 | | /** |
3 | | * bgp_updgrp.c: BGP update group structures |
4 | | * |
5 | | * @copyright Copyright (C) 2014 Cumulus Networks, Inc. |
6 | | * |
7 | | * @author Avneesh Sachdev <avneesh@sproute.net> |
8 | | * @author Rajesh Varadarajan <rajesh@sproute.net> |
9 | | * @author Pradosh Mohapatra <pradosh@sproute.net> |
10 | | */ |
11 | | |
12 | | #include <zebra.h> |
13 | | |
14 | | #include "prefix.h" |
15 | | #include "frrevent.h" |
16 | | #include "buffer.h" |
17 | | #include "stream.h" |
18 | | #include "command.h" |
19 | | #include "sockunion.h" |
20 | | #include "network.h" |
21 | | #include "memory.h" |
22 | | #include "filter.h" |
23 | | #include "routemap.h" |
24 | | #include "log.h" |
25 | | #include "plist.h" |
26 | | #include "linklist.h" |
27 | | #include "workqueue.h" |
28 | | #include "hash.h" |
29 | | #include "jhash.h" |
30 | | #include "queue.h" |
31 | | |
32 | | #include "bgpd/bgpd.h" |
33 | | #include "bgpd/bgp_table.h" |
34 | | #include "bgpd/bgp_debug.h" |
35 | | #include "bgpd/bgp_errors.h" |
36 | | #include "bgpd/bgp_fsm.h" |
37 | | #include "bgpd/bgp_addpath.h" |
38 | | #include "bgpd/bgp_advertise.h" |
39 | | #include "bgpd/bgp_packet.h" |
40 | | #include "bgpd/bgp_updgrp.h" |
41 | | #include "bgpd/bgp_route.h" |
42 | | #include "bgpd/bgp_filter.h" |
43 | | #include "bgpd/bgp_io.h" |
44 | | |
45 | | /******************** |
46 | | * PRIVATE FUNCTIONS |
47 | | ********************/ |
48 | | |
49 | | /** |
50 | | * assign a unique ID to update group and subgroup. Mostly for display/ |
51 | | * debugging purposes. It's a 64-bit space - used leisurely without a |
52 | | * worry about its wrapping and about filling gaps. While at it, timestamp |
53 | | * the creation. |
54 | | */ |
55 | | static void update_group_checkin(struct update_group *updgrp) |
56 | 0 | { |
57 | 0 | updgrp->id = ++bm->updgrp_idspace; |
58 | 0 | updgrp->uptime = monotime(NULL); |
59 | 0 | } |
60 | | |
61 | | static void update_subgroup_checkin(struct update_subgroup *subgrp, |
62 | | struct update_group *updgrp) |
63 | 0 | { |
64 | 0 | subgrp->id = ++bm->subgrp_idspace; |
65 | 0 | subgrp->uptime = monotime(NULL); |
66 | 0 | } |
67 | | |
68 | | static void sync_init(struct update_subgroup *subgrp, |
69 | | struct update_group *updgrp) |
70 | 0 | { |
71 | 0 | struct peer *peer = UPDGRP_PEER(updgrp); |
72 | |
|
73 | 0 | subgrp->sync = |
74 | 0 | XCALLOC(MTYPE_BGP_SYNCHRONISE, sizeof(struct bgp_synchronize)); |
75 | 0 | bgp_adv_fifo_init(&subgrp->sync->update); |
76 | 0 | bgp_adv_fifo_init(&subgrp->sync->withdraw); |
77 | |
|
78 | 0 | subgrp->hash = |
79 | 0 | hash_create(bgp_advertise_attr_hash_key, |
80 | 0 | bgp_advertise_attr_hash_cmp, "BGP SubGroup Hash"); |
81 | | |
82 | | /* We use a larger buffer for subgrp->work in the event that: |
83 | | * - We RX a BGP_UPDATE where the attributes alone are just |
84 | | * under 4096 or 65535 (if Extended Message capability negotiated). |
85 | | * - The user configures an outbound route-map that does many as-path |
86 | | * prepends or adds many communities. At most they can have |
87 | | * CMD_ARGC_MAX |
88 | | * args in a route-map so there is a finite limit on how large they |
89 | | * can |
90 | | * make the attributes. |
91 | | * |
92 | | * Having a buffer with BGP_MAX_PACKET_SIZE_OVERFLOW allows us to avoid |
93 | | * bounds |
94 | | * checking for every single attribute as we construct an UPDATE. |
95 | | */ |
96 | 0 | subgrp->work = stream_new(peer->max_packet_size |
97 | 0 | + BGP_MAX_PACKET_SIZE_OVERFLOW); |
98 | 0 | subgrp->scratch = stream_new(peer->max_packet_size); |
99 | 0 | } |
100 | | |
101 | | static void sync_delete(struct update_subgroup *subgrp) |
102 | 0 | { |
103 | 0 | XFREE(MTYPE_BGP_SYNCHRONISE, subgrp->sync); |
104 | 0 | hash_clean_and_free(&subgrp->hash, |
105 | 0 | (void (*)(void *))bgp_advertise_attr_free); |
106 | |
|
107 | 0 | if (subgrp->work) |
108 | 0 | stream_free(subgrp->work); |
109 | 0 | subgrp->work = NULL; |
110 | 0 | if (subgrp->scratch) |
111 | 0 | stream_free(subgrp->scratch); |
112 | 0 | subgrp->scratch = NULL; |
113 | 0 | } |
114 | | |
115 | | /** |
116 | | * conf_copy |
117 | | * |
118 | | * copy only those fields that are relevant to update group match |
119 | | */ |
120 | | static void conf_copy(struct peer *dst, struct peer *src, afi_t afi, |
121 | | safi_t safi) |
122 | 0 | { |
123 | 0 | struct bgp_filter *srcfilter; |
124 | 0 | struct bgp_filter *dstfilter; |
125 | |
|
126 | 0 | srcfilter = &src->filter[afi][safi]; |
127 | 0 | dstfilter = &dst->filter[afi][safi]; |
128 | |
|
129 | 0 | dst->bgp = src->bgp; |
130 | 0 | dst->sort = src->sort; |
131 | 0 | dst->as = src->as; |
132 | 0 | dst->v_routeadv = src->v_routeadv; |
133 | 0 | dst->flags = src->flags; |
134 | 0 | dst->af_flags[afi][safi] = src->af_flags[afi][safi]; |
135 | 0 | dst->pmax_out[afi][safi] = src->pmax_out[afi][safi]; |
136 | 0 | dst->max_packet_size = src->max_packet_size; |
137 | 0 | XFREE(MTYPE_BGP_PEER_HOST, dst->host); |
138 | |
|
139 | 0 | dst->host = XSTRDUP(MTYPE_BGP_PEER_HOST, src->host); |
140 | 0 | dst->cap = src->cap; |
141 | 0 | dst->af_cap[afi][safi] = src->af_cap[afi][safi]; |
142 | 0 | dst->afc_nego[afi][safi] = src->afc_nego[afi][safi]; |
143 | 0 | dst->orf_plist[afi][safi] = src->orf_plist[afi][safi]; |
144 | 0 | dst->addpath_type[afi][safi] = src->addpath_type[afi][safi]; |
145 | 0 | dst->local_as = src->local_as; |
146 | 0 | dst->change_local_as = src->change_local_as; |
147 | 0 | dst->shared_network = src->shared_network; |
148 | 0 | dst->local_role = src->local_role; |
149 | 0 | dst->as_path_loop_detection = src->as_path_loop_detection; |
150 | |
|
151 | 0 | if (src->soo[afi][safi]) { |
152 | 0 | ecommunity_free(&dst->soo[afi][safi]); |
153 | 0 | dst->soo[afi][safi] = ecommunity_dup(src->soo[afi][safi]); |
154 | 0 | } |
155 | |
|
156 | 0 | memcpy(&(dst->nexthop), &(src->nexthop), sizeof(struct bgp_nexthop)); |
157 | |
|
158 | 0 | dst->group = src->group; |
159 | |
|
160 | 0 | if (src->default_rmap[afi][safi].name) { |
161 | 0 | dst->default_rmap[afi][safi].name = |
162 | 0 | XSTRDUP(MTYPE_ROUTE_MAP_NAME, |
163 | 0 | src->default_rmap[afi][safi].name); |
164 | 0 | dst->default_rmap[afi][safi].map = |
165 | 0 | src->default_rmap[afi][safi].map; |
166 | 0 | } |
167 | |
|
168 | 0 | if (DISTRIBUTE_OUT_NAME(srcfilter)) { |
169 | 0 | DISTRIBUTE_OUT_NAME(dstfilter) = XSTRDUP( |
170 | 0 | MTYPE_BGP_FILTER_NAME, DISTRIBUTE_OUT_NAME(srcfilter)); |
171 | 0 | DISTRIBUTE_OUT(dstfilter) = DISTRIBUTE_OUT(srcfilter); |
172 | 0 | } |
173 | |
|
174 | 0 | if (PREFIX_LIST_OUT_NAME(srcfilter)) { |
175 | 0 | PREFIX_LIST_OUT_NAME(dstfilter) = XSTRDUP( |
176 | 0 | MTYPE_BGP_FILTER_NAME, PREFIX_LIST_OUT_NAME(srcfilter)); |
177 | 0 | PREFIX_LIST_OUT(dstfilter) = PREFIX_LIST_OUT(srcfilter); |
178 | 0 | } |
179 | |
|
180 | 0 | if (FILTER_LIST_OUT_NAME(srcfilter)) { |
181 | 0 | FILTER_LIST_OUT_NAME(dstfilter) = XSTRDUP( |
182 | 0 | MTYPE_BGP_FILTER_NAME, FILTER_LIST_OUT_NAME(srcfilter)); |
183 | 0 | FILTER_LIST_OUT(dstfilter) = FILTER_LIST_OUT(srcfilter); |
184 | 0 | } |
185 | |
|
186 | 0 | if (ROUTE_MAP_OUT_NAME(srcfilter)) { |
187 | 0 | ROUTE_MAP_OUT_NAME(dstfilter) = XSTRDUP( |
188 | 0 | MTYPE_BGP_FILTER_NAME, ROUTE_MAP_OUT_NAME(srcfilter)); |
189 | 0 | ROUTE_MAP_OUT(dstfilter) = ROUTE_MAP_OUT(srcfilter); |
190 | 0 | } |
191 | |
|
192 | 0 | if (UNSUPPRESS_MAP_NAME(srcfilter)) { |
193 | 0 | UNSUPPRESS_MAP_NAME(dstfilter) = XSTRDUP( |
194 | 0 | MTYPE_BGP_FILTER_NAME, UNSUPPRESS_MAP_NAME(srcfilter)); |
195 | 0 | UNSUPPRESS_MAP(dstfilter) = UNSUPPRESS_MAP(srcfilter); |
196 | 0 | } |
197 | |
|
198 | 0 | if (ADVERTISE_MAP_NAME(srcfilter)) { |
199 | 0 | ADVERTISE_MAP_NAME(dstfilter) = XSTRDUP( |
200 | 0 | MTYPE_BGP_FILTER_NAME, ADVERTISE_MAP_NAME(srcfilter)); |
201 | 0 | ADVERTISE_MAP(dstfilter) = ADVERTISE_MAP(srcfilter); |
202 | 0 | ADVERTISE_CONDITION(dstfilter) = ADVERTISE_CONDITION(srcfilter); |
203 | 0 | } |
204 | |
|
205 | 0 | if (CONDITION_MAP_NAME(srcfilter)) { |
206 | 0 | CONDITION_MAP_NAME(dstfilter) = XSTRDUP( |
207 | 0 | MTYPE_BGP_FILTER_NAME, CONDITION_MAP_NAME(srcfilter)); |
208 | 0 | CONDITION_MAP(dstfilter) = CONDITION_MAP(srcfilter); |
209 | 0 | } |
210 | |
|
211 | 0 | dstfilter->advmap.update_type = srcfilter->advmap.update_type; |
212 | 0 | } |
213 | | |
214 | | /** |
215 | | * since we did a bunch of XSTRDUP's in conf_copy, time to free them up |
216 | | */ |
217 | | static void conf_release(struct peer *src, afi_t afi, safi_t safi) |
218 | 0 | { |
219 | 0 | struct bgp_filter *srcfilter; |
220 | |
|
221 | 0 | srcfilter = &src->filter[afi][safi]; |
222 | |
|
223 | 0 | XFREE(MTYPE_ROUTE_MAP_NAME, src->default_rmap[afi][safi].name); |
224 | |
|
225 | 0 | XFREE(MTYPE_BGP_FILTER_NAME, srcfilter->dlist[FILTER_OUT].name); |
226 | |
|
227 | 0 | XFREE(MTYPE_BGP_FILTER_NAME, srcfilter->plist[FILTER_OUT].name); |
228 | |
|
229 | 0 | XFREE(MTYPE_BGP_FILTER_NAME, srcfilter->aslist[FILTER_OUT].name); |
230 | |
|
231 | 0 | XFREE(MTYPE_BGP_FILTER_NAME, srcfilter->map[RMAP_OUT].name); |
232 | |
|
233 | 0 | XFREE(MTYPE_BGP_FILTER_NAME, srcfilter->usmap.name); |
234 | |
|
235 | 0 | XFREE(MTYPE_BGP_FILTER_NAME, srcfilter->advmap.aname); |
236 | |
|
237 | 0 | XFREE(MTYPE_BGP_FILTER_NAME, srcfilter->advmap.cname); |
238 | |
|
239 | 0 | XFREE(MTYPE_BGP_PEER_HOST, src->host); |
240 | |
|
241 | 0 | ecommunity_free(&src->soo[afi][safi]); |
242 | 0 | } |
243 | | |
244 | | static void peer2_updgrp_copy(struct update_group *updgrp, struct peer_af *paf) |
245 | 0 | { |
246 | 0 | struct peer *src; |
247 | 0 | struct peer *dst; |
248 | |
|
249 | 0 | if (!updgrp || !paf) |
250 | 0 | return; |
251 | | |
252 | 0 | src = paf->peer; |
253 | 0 | dst = updgrp->conf; |
254 | 0 | if (!src || !dst) |
255 | 0 | return; |
256 | | |
257 | 0 | updgrp->afi = paf->afi; |
258 | 0 | updgrp->safi = paf->safi; |
259 | 0 | updgrp->afid = paf->afid; |
260 | 0 | updgrp->bgp = src->bgp; |
261 | |
|
262 | 0 | conf_copy(dst, src, paf->afi, paf->safi); |
263 | 0 | } |
264 | | |
265 | | /** |
266 | | * auxiliary functions to maintain the hash table. |
267 | | * - updgrp_hash_alloc - to create a new entry, passed to hash_get |
268 | | * - updgrp_hash_key_make - makes the key for update group search |
269 | | * - updgrp_hash_cmp - compare two update groups. |
270 | | */ |
271 | | static void *updgrp_hash_alloc(void *p) |
272 | 0 | { |
273 | 0 | struct update_group *updgrp; |
274 | 0 | const struct update_group *in; |
275 | |
|
276 | 0 | in = (const struct update_group *)p; |
277 | 0 | updgrp = XCALLOC(MTYPE_BGP_UPDGRP, sizeof(struct update_group)); |
278 | 0 | memcpy(updgrp, in, sizeof(struct update_group)); |
279 | 0 | updgrp->conf = XCALLOC(MTYPE_BGP_PEER, sizeof(struct peer)); |
280 | 0 | conf_copy(updgrp->conf, in->conf, in->afi, in->safi); |
281 | 0 | return updgrp; |
282 | 0 | } |
283 | | |
284 | | /** |
285 | | * The hash value for a peer is computed from the following variables: |
286 | | * v = f( |
287 | | * 1. IBGP (1) or EBGP (2) |
288 | | * 2. FLAGS based on configuration: |
289 | | * LOCAL_AS_NO_PREPEND |
290 | | * LOCAL_AS_REPLACE_AS |
291 | | * 3. AF_FLAGS based on configuration: |
292 | | * Refer to definition in bgp_updgrp.h |
293 | | * 4. (AF-independent) Capability flags: |
294 | | * AS4_RCV capability |
295 | | * 5. (AF-dependent) Capability flags: |
296 | | * ORF_PREFIX_SM_RCV (peer can send prefix ORF) |
297 | | * 6. MRAI |
298 | | * 7. peer-group name |
299 | | * 8. Outbound route-map name (neighbor route-map <> out) |
300 | | * 9. Outbound distribute-list name (neighbor distribute-list <> out) |
301 | | * 10. Outbound prefix-list name (neighbor prefix-list <> out) |
302 | | * 11. Outbound as-list name (neighbor filter-list <> out) |
303 | | * 12. Unsuppress map name (neighbor unsuppress-map <>) |
304 | | * 13. default rmap name (neighbor default-originate route-map <>) |
305 | | * 14. encoding both global and link-local nexthop? |
306 | | * 15. If peer is configured to be a lonesoul, peer ip address |
307 | | * 16. Local-as should match, if configured. |
308 | | * 17. maximum-prefix-out |
309 | | * 18. Local-role should also match, if configured. |
310 | | * ) |
311 | | */ |
312 | | static unsigned int updgrp_hash_key_make(const void *p) |
313 | 0 | { |
314 | 0 | const struct update_group *updgrp; |
315 | 0 | const struct peer *peer; |
316 | 0 | const struct bgp_filter *filter; |
317 | 0 | uint64_t flags; |
318 | 0 | uint32_t key; |
319 | 0 | afi_t afi; |
320 | 0 | safi_t safi; |
321 | | |
322 | | /* |
323 | | * IF YOU ADD AN ADDITION TO THE HASH KEY TO ENSURE |
324 | | * THAT THE UPDATE GROUP CALCULATION IS CORRECT THEN |
325 | | * PLEASE ADD IT TO THE DEBUG OUTPUT TOO AT THE BOTTOM |
326 | | */ |
327 | 0 | #define SEED1 999331 |
328 | 0 | #define SEED2 2147483647 |
329 | |
|
330 | 0 | updgrp = p; |
331 | 0 | peer = updgrp->conf; |
332 | 0 | afi = updgrp->afi; |
333 | 0 | safi = updgrp->safi; |
334 | 0 | flags = peer->af_flags[afi][safi]; |
335 | 0 | filter = &peer->filter[afi][safi]; |
336 | |
|
337 | 0 | key = 0; |
338 | |
|
339 | 0 | key = jhash_1word(peer->sort, key); /* EBGP or IBGP */ |
340 | 0 | key = jhash_1word((peer->flags & PEER_UPDGRP_FLAGS), key); |
341 | 0 | key = jhash_1word((flags & PEER_UPDGRP_AF_FLAGS), key); |
342 | 0 | key = jhash_1word((uint32_t)peer->addpath_type[afi][safi], key); |
343 | 0 | key = jhash_1word((peer->cap & PEER_UPDGRP_CAP_FLAGS), key); |
344 | 0 | key = jhash_1word((peer->af_cap[afi][safi] & PEER_UPDGRP_AF_CAP_FLAGS), |
345 | 0 | key); |
346 | 0 | key = jhash_1word(peer->v_routeadv, key); |
347 | 0 | key = jhash_1word(peer->change_local_as, key); |
348 | 0 | key = jhash_1word(peer->max_packet_size, key); |
349 | 0 | key = jhash_1word(peer->pmax_out[afi][safi], key); |
350 | |
|
351 | 0 | if (peer->as_path_loop_detection) |
352 | 0 | key = jhash_2words(peer->as, peer->as_path_loop_detection, key); |
353 | |
|
354 | 0 | if (peer->group) |
355 | 0 | key = jhash_1word(jhash(peer->group->name, |
356 | 0 | strlen(peer->group->name), SEED1), |
357 | 0 | key); |
358 | |
|
359 | 0 | if (filter->map[RMAP_OUT].name) |
360 | 0 | key = jhash_1word(jhash(filter->map[RMAP_OUT].name, |
361 | 0 | strlen(filter->map[RMAP_OUT].name), |
362 | 0 | SEED1), |
363 | 0 | key); |
364 | |
|
365 | 0 | if (filter->dlist[FILTER_OUT].name) |
366 | 0 | key = jhash_1word(jhash(filter->dlist[FILTER_OUT].name, |
367 | 0 | strlen(filter->dlist[FILTER_OUT].name), |
368 | 0 | SEED1), |
369 | 0 | key); |
370 | |
|
371 | 0 | if (filter->plist[FILTER_OUT].name) |
372 | 0 | key = jhash_1word(jhash(filter->plist[FILTER_OUT].name, |
373 | 0 | strlen(filter->plist[FILTER_OUT].name), |
374 | 0 | SEED1), |
375 | 0 | key); |
376 | |
|
377 | 0 | if (filter->aslist[FILTER_OUT].name) |
378 | 0 | key = jhash_1word(jhash(filter->aslist[FILTER_OUT].name, |
379 | 0 | strlen(filter->aslist[FILTER_OUT].name), |
380 | 0 | SEED1), |
381 | 0 | key); |
382 | |
|
383 | 0 | if (filter->usmap.name) |
384 | 0 | key = jhash_1word(jhash(filter->usmap.name, |
385 | 0 | strlen(filter->usmap.name), SEED1), |
386 | 0 | key); |
387 | |
|
388 | 0 | if (filter->advmap.aname) |
389 | 0 | key = jhash_1word(jhash(filter->advmap.aname, |
390 | 0 | strlen(filter->advmap.aname), SEED1), |
391 | 0 | key); |
392 | |
|
393 | 0 | if (filter->advmap.update_type) |
394 | 0 | key = jhash_1word(filter->advmap.update_type, key); |
395 | |
|
396 | 0 | if (peer->default_rmap[afi][safi].name) |
397 | 0 | key = jhash_1word( |
398 | 0 | jhash(peer->default_rmap[afi][safi].name, |
399 | 0 | strlen(peer->default_rmap[afi][safi].name), |
400 | 0 | SEED1), |
401 | 0 | key); |
402 | | |
403 | | /* If peer is on a shared network and is exchanging IPv6 prefixes, |
404 | | * it needs to include link-local address. That's different from |
405 | | * non-shared-network peers (nexthop encoded with 32 bytes vs 16 |
406 | | * bytes). We create different update groups to take care of that. |
407 | | */ |
408 | 0 | key = jhash_1word( |
409 | 0 | (peer->shared_network && peer_afi_active_nego(peer, AFI_IP6)), |
410 | 0 | key); |
411 | | /* |
412 | | * There are certain peers that must get their own update-group: |
413 | | * - lonesoul peers |
414 | | * - peers that negotiated ORF |
415 | | * - maximum-prefix-out is set |
416 | | */ |
417 | 0 | if (CHECK_FLAG(peer->flags, PEER_FLAG_LONESOUL) |
418 | 0 | || CHECK_FLAG(peer->af_cap[afi][safi], PEER_CAP_ORF_PREFIX_SM_RCV) |
419 | 0 | || CHECK_FLAG(peer->af_cap[afi][safi], |
420 | 0 | PEER_CAP_ORF_PREFIX_SM_OLD_RCV) |
421 | 0 | || CHECK_FLAG(peer->af_flags[afi][safi], PEER_FLAG_MAX_PREFIX_OUT)) |
422 | 0 | key = jhash_1word(jhash(peer->host, strlen(peer->host), SEED2), |
423 | 0 | key); |
424 | | /* |
425 | | * Multiple sessions with the same neighbor should get their own |
426 | | * update-group if they have different roles. |
427 | | */ |
428 | 0 | key = jhash_1word(peer->local_role, key); |
429 | | |
430 | | /* Neighbors configured with the AIGP attribute are put in a separate |
431 | | * update group from other neighbors. |
432 | | */ |
433 | 0 | key = jhash_1word((peer->flags & PEER_FLAG_AIGP), key); |
434 | |
|
435 | 0 | if (peer->soo[afi][safi]) { |
436 | 0 | char *soo_str = ecommunity_str(peer->soo[afi][safi]); |
437 | |
|
438 | 0 | key = jhash_1word(jhash(soo_str, strlen(soo_str), SEED1), key); |
439 | 0 | } |
440 | | |
441 | | /* |
442 | | * ANY NEW ITEMS THAT ARE ADDED TO THE key, ENSURE DEBUG |
443 | | * STATEMENT STAYS UP TO DATE |
444 | | */ |
445 | 0 | if (bgp_debug_neighbor_events(peer)) { |
446 | 0 | zlog_debug( |
447 | 0 | "%pBP Update Group Hash: sort: %d UpdGrpFlags: %ju UpdGrpAFFlags: %ju", |
448 | 0 | peer, peer->sort, |
449 | 0 | (intmax_t)CHECK_FLAG(peer->flags, PEER_UPDGRP_FLAGS), |
450 | 0 | (intmax_t)CHECK_FLAG(flags, PEER_UPDGRP_AF_FLAGS)); |
451 | 0 | zlog_debug( |
452 | 0 | "%pBP Update Group Hash: addpath: %u UpdGrpCapFlag: %u UpdGrpCapAFFlag: %u route_adv: %u change local as: %u, as_path_loop_detection: %d", |
453 | 0 | peer, (uint32_t)peer->addpath_type[afi][safi], |
454 | 0 | CHECK_FLAG(peer->cap, PEER_UPDGRP_CAP_FLAGS), |
455 | 0 | CHECK_FLAG(peer->af_cap[afi][safi], |
456 | 0 | PEER_UPDGRP_AF_CAP_FLAGS), |
457 | 0 | peer->v_routeadv, peer->change_local_as, |
458 | 0 | peer->as_path_loop_detection); |
459 | 0 | zlog_debug( |
460 | 0 | "%pBP Update Group Hash: max packet size: %u pmax_out: %u Peer Group: %s rmap out: %s", |
461 | 0 | peer, peer->max_packet_size, peer->pmax_out[afi][safi], |
462 | 0 | peer->group ? peer->group->name : "(NONE)", |
463 | 0 | ROUTE_MAP_OUT_NAME(filter) ? ROUTE_MAP_OUT_NAME(filter) |
464 | 0 | : "(NONE)"); |
465 | 0 | zlog_debug( |
466 | 0 | "%pBP Update Group Hash: dlist out: %s plist out: %s aslist out: %s usmap out: %s advmap: %s %d", |
467 | 0 | peer, |
468 | 0 | DISTRIBUTE_OUT_NAME(filter) |
469 | 0 | ? DISTRIBUTE_OUT_NAME(filter) |
470 | 0 | : "(NONE)", |
471 | 0 | PREFIX_LIST_OUT_NAME(filter) |
472 | 0 | ? PREFIX_LIST_OUT_NAME(filter) |
473 | 0 | : "(NONE)", |
474 | 0 | FILTER_LIST_OUT_NAME(filter) |
475 | 0 | ? FILTER_LIST_OUT_NAME(filter) |
476 | 0 | : "(NONE)", |
477 | 0 | UNSUPPRESS_MAP_NAME(filter) |
478 | 0 | ? UNSUPPRESS_MAP_NAME(filter) |
479 | 0 | : "(NONE)", |
480 | 0 | ADVERTISE_MAP_NAME(filter) ? ADVERTISE_MAP_NAME(filter) |
481 | 0 | : "(NONE)", |
482 | 0 | filter->advmap.update_type); |
483 | 0 | zlog_debug( |
484 | 0 | "%pBP Update Group Hash: default rmap: %s shared network and afi active network: %d", |
485 | 0 | peer, |
486 | 0 | peer->default_rmap[afi][safi].name |
487 | 0 | ? peer->default_rmap[afi][safi].name |
488 | 0 | : "(NONE)", |
489 | 0 | peer->shared_network && |
490 | 0 | peer_afi_active_nego(peer, AFI_IP6)); |
491 | 0 | zlog_debug( |
492 | 0 | "%pBP Update Group Hash: Lonesoul: %d ORF prefix: %u ORF old: %u max prefix out: %ju", |
493 | 0 | peer, !!CHECK_FLAG(peer->flags, PEER_FLAG_LONESOUL), |
494 | 0 | CHECK_FLAG(peer->af_cap[afi][safi], |
495 | 0 | PEER_CAP_ORF_PREFIX_SM_RCV), |
496 | 0 | CHECK_FLAG(peer->af_cap[afi][safi], |
497 | 0 | PEER_CAP_ORF_PREFIX_SM_OLD_RCV), |
498 | 0 | (intmax_t)CHECK_FLAG(peer->af_flags[afi][safi], |
499 | 0 | PEER_FLAG_MAX_PREFIX_OUT)); |
500 | 0 | zlog_debug( |
501 | 0 | "%pBP Update Group Hash: local role: %u AIGP: %d SOO: %s", |
502 | 0 | peer, peer->local_role, |
503 | 0 | !!CHECK_FLAG(peer->flags, PEER_FLAG_AIGP), |
504 | 0 | peer->soo[afi][safi] |
505 | 0 | ? ecommunity_str(peer->soo[afi][safi]) |
506 | 0 | : "(NONE)"); |
507 | 0 | zlog_debug("%pBP Update Group Hash key: %u", peer, key); |
508 | 0 | } |
509 | 0 | return key; |
510 | 0 | } |
511 | | |
512 | | static bool updgrp_hash_cmp(const void *p1, const void *p2) |
513 | 0 | { |
514 | 0 | const struct update_group *grp1; |
515 | 0 | const struct update_group *grp2; |
516 | 0 | const struct peer *pe1; |
517 | 0 | const struct peer *pe2; |
518 | 0 | uint64_t flags1; |
519 | 0 | uint64_t flags2; |
520 | 0 | const struct bgp_filter *fl1; |
521 | 0 | const struct bgp_filter *fl2; |
522 | 0 | afi_t afi; |
523 | 0 | safi_t safi; |
524 | |
|
525 | 0 | if (!p1 || !p2) |
526 | 0 | return false; |
527 | | |
528 | 0 | grp1 = p1; |
529 | 0 | grp2 = p2; |
530 | 0 | pe1 = grp1->conf; |
531 | 0 | pe2 = grp2->conf; |
532 | 0 | afi = grp1->afi; |
533 | 0 | safi = grp1->safi; |
534 | 0 | flags1 = pe1->af_flags[afi][safi]; |
535 | 0 | flags2 = pe2->af_flags[afi][safi]; |
536 | 0 | fl1 = &pe1->filter[afi][safi]; |
537 | 0 | fl2 = &pe2->filter[afi][safi]; |
538 | | |
539 | | /* put EBGP and IBGP peers in different update groups */ |
540 | 0 | if (pe1->sort != pe2->sort) |
541 | 0 | return false; |
542 | | |
543 | | /* check peer flags */ |
544 | 0 | if ((pe1->flags & PEER_UPDGRP_FLAGS) |
545 | 0 | != (pe2->flags & PEER_UPDGRP_FLAGS)) |
546 | 0 | return false; |
547 | | |
548 | | /* If there is 'local-as' configured, it should match. */ |
549 | 0 | if (pe1->change_local_as != pe2->change_local_as) |
550 | 0 | return false; |
551 | | |
552 | 0 | if (pe1->pmax_out[afi][safi] != pe2->pmax_out[afi][safi]) |
553 | 0 | return false; |
554 | | |
555 | | /* flags like route reflector client */ |
556 | 0 | if ((flags1 & PEER_UPDGRP_AF_FLAGS) != (flags2 & PEER_UPDGRP_AF_FLAGS)) |
557 | 0 | return false; |
558 | | |
559 | 0 | if (pe1->addpath_type[afi][safi] != pe2->addpath_type[afi][safi]) |
560 | 0 | return false; |
561 | | |
562 | 0 | if ((pe1->cap & PEER_UPDGRP_CAP_FLAGS) |
563 | 0 | != (pe2->cap & PEER_UPDGRP_CAP_FLAGS)) |
564 | 0 | return false; |
565 | | |
566 | 0 | if ((pe1->af_cap[afi][safi] & PEER_UPDGRP_AF_CAP_FLAGS) |
567 | 0 | != (pe2->af_cap[afi][safi] & PEER_UPDGRP_AF_CAP_FLAGS)) |
568 | 0 | return false; |
569 | | |
570 | 0 | if (pe1->v_routeadv != pe2->v_routeadv) |
571 | 0 | return false; |
572 | | |
573 | 0 | if (pe1->group != pe2->group) |
574 | 0 | return false; |
575 | | |
576 | | /* Roles can affect filtering */ |
577 | 0 | if (pe1->local_role != pe2->local_role) |
578 | 0 | return false; |
579 | | |
580 | | /* route-map names should be the same */ |
581 | 0 | if ((fl1->map[RMAP_OUT].name && !fl2->map[RMAP_OUT].name) |
582 | 0 | || (!fl1->map[RMAP_OUT].name && fl2->map[RMAP_OUT].name) |
583 | 0 | || (fl1->map[RMAP_OUT].name && fl2->map[RMAP_OUT].name |
584 | 0 | && strcmp(fl1->map[RMAP_OUT].name, fl2->map[RMAP_OUT].name))) |
585 | 0 | return false; |
586 | | |
587 | 0 | if ((fl1->dlist[FILTER_OUT].name && !fl2->dlist[FILTER_OUT].name) |
588 | 0 | || (!fl1->dlist[FILTER_OUT].name && fl2->dlist[FILTER_OUT].name) |
589 | 0 | || (fl1->dlist[FILTER_OUT].name && fl2->dlist[FILTER_OUT].name |
590 | 0 | && strcmp(fl1->dlist[FILTER_OUT].name, |
591 | 0 | fl2->dlist[FILTER_OUT].name))) |
592 | 0 | return false; |
593 | | |
594 | 0 | if ((fl1->plist[FILTER_OUT].name && !fl2->plist[FILTER_OUT].name) |
595 | 0 | || (!fl1->plist[FILTER_OUT].name && fl2->plist[FILTER_OUT].name) |
596 | 0 | || (fl1->plist[FILTER_OUT].name && fl2->plist[FILTER_OUT].name |
597 | 0 | && strcmp(fl1->plist[FILTER_OUT].name, |
598 | 0 | fl2->plist[FILTER_OUT].name))) |
599 | 0 | return false; |
600 | | |
601 | 0 | if ((fl1->aslist[FILTER_OUT].name && !fl2->aslist[FILTER_OUT].name) |
602 | 0 | || (!fl1->aslist[FILTER_OUT].name && fl2->aslist[FILTER_OUT].name) |
603 | 0 | || (fl1->aslist[FILTER_OUT].name && fl2->aslist[FILTER_OUT].name |
604 | 0 | && strcmp(fl1->aslist[FILTER_OUT].name, |
605 | 0 | fl2->aslist[FILTER_OUT].name))) |
606 | 0 | return false; |
607 | | |
608 | 0 | if ((fl1->usmap.name && !fl2->usmap.name) |
609 | 0 | || (!fl1->usmap.name && fl2->usmap.name) |
610 | 0 | || (fl1->usmap.name && fl2->usmap.name |
611 | 0 | && strcmp(fl1->usmap.name, fl2->usmap.name))) |
612 | 0 | return false; |
613 | | |
614 | 0 | if ((fl1->advmap.aname && !fl2->advmap.aname) |
615 | 0 | || (!fl1->advmap.aname && fl2->advmap.aname) |
616 | 0 | || (fl1->advmap.aname && fl2->advmap.aname |
617 | 0 | && strcmp(fl1->advmap.aname, fl2->advmap.aname))) |
618 | 0 | return false; |
619 | | |
620 | 0 | if (fl1->advmap.update_type != fl2->advmap.update_type) |
621 | 0 | return false; |
622 | | |
623 | 0 | if ((pe1->default_rmap[afi][safi].name |
624 | 0 | && !pe2->default_rmap[afi][safi].name) |
625 | 0 | || (!pe1->default_rmap[afi][safi].name |
626 | 0 | && pe2->default_rmap[afi][safi].name) |
627 | 0 | || (pe1->default_rmap[afi][safi].name |
628 | 0 | && pe2->default_rmap[afi][safi].name |
629 | 0 | && strcmp(pe1->default_rmap[afi][safi].name, |
630 | 0 | pe2->default_rmap[afi][safi].name))) |
631 | 0 | return false; |
632 | | |
633 | 0 | if ((afi == AFI_IP6) && (pe1->shared_network != pe2->shared_network)) |
634 | 0 | return false; |
635 | | |
636 | 0 | if ((CHECK_FLAG(pe1->flags, PEER_FLAG_LONESOUL) |
637 | 0 | || CHECK_FLAG(pe1->af_cap[afi][safi], PEER_CAP_ORF_PREFIX_SM_RCV) |
638 | 0 | || CHECK_FLAG(pe1->af_cap[afi][safi], |
639 | 0 | PEER_CAP_ORF_PREFIX_SM_OLD_RCV)) |
640 | 0 | && !sockunion_same(&pe1->su, &pe2->su)) |
641 | 0 | return false; |
642 | | |
643 | 0 | return true; |
644 | 0 | } |
645 | | |
646 | | static void peer_lonesoul_or_not(struct peer *peer, int set) |
647 | 0 | { |
648 | | /* no change in status? */ |
649 | 0 | if (set == (CHECK_FLAG(peer->flags, PEER_FLAG_LONESOUL) > 0)) |
650 | 0 | return; |
651 | | |
652 | 0 | if (set) |
653 | 0 | SET_FLAG(peer->flags, PEER_FLAG_LONESOUL); |
654 | 0 | else |
655 | 0 | UNSET_FLAG(peer->flags, PEER_FLAG_LONESOUL); |
656 | |
|
657 | 0 | update_group_adjust_peer_afs(peer); |
658 | 0 | } |
659 | | |
660 | | /* |
661 | | * subgroup_total_packets_enqueued |
662 | | * |
663 | | * Returns the total number of packets enqueued to a subgroup. |
664 | | */ |
665 | | static unsigned int |
666 | | subgroup_total_packets_enqueued(struct update_subgroup *subgrp) |
667 | 0 | { |
668 | 0 | struct bpacket *pkt; |
669 | |
|
670 | 0 | pkt = bpacket_queue_last(SUBGRP_PKTQ(subgrp)); |
671 | |
|
672 | 0 | return pkt->ver - 1; |
673 | 0 | } |
674 | | |
675 | | static int update_group_show_walkcb(struct update_group *updgrp, void *arg) |
676 | 0 | { |
677 | 0 | struct updwalk_context *ctx = arg; |
678 | 0 | struct vty *vty; |
679 | 0 | struct update_subgroup *subgrp; |
680 | 0 | struct peer_af *paf; |
681 | 0 | struct bgp_filter *filter; |
682 | 0 | struct peer *peer = UPDGRP_PEER(updgrp); |
683 | 0 | int match = 0; |
684 | 0 | json_object *json_updgrp = NULL; |
685 | 0 | json_object *json_subgrps = NULL; |
686 | 0 | json_object *json_subgrp = NULL; |
687 | 0 | json_object *json_time = NULL; |
688 | 0 | json_object *json_subgrp_time = NULL; |
689 | 0 | json_object *json_subgrp_event = NULL; |
690 | 0 | json_object *json_peers = NULL; |
691 | 0 | json_object *json_pkt_info = NULL; |
692 | 0 | time_t epoch_tbuf, tbuf; |
693 | |
|
694 | 0 | if (!ctx) |
695 | 0 | return CMD_SUCCESS; |
696 | | |
697 | 0 | if (ctx->subgrp_id) { |
698 | 0 | UPDGRP_FOREACH_SUBGRP (updgrp, subgrp) { |
699 | 0 | if (ctx->subgrp_id && (ctx->subgrp_id != subgrp->id)) |
700 | 0 | continue; |
701 | 0 | else { |
702 | 0 | match = 1; |
703 | 0 | break; |
704 | 0 | } |
705 | 0 | } |
706 | 0 | } else { |
707 | 0 | match = 1; |
708 | 0 | } |
709 | |
|
710 | 0 | if (!match) { |
711 | | /* Since this routine is invoked from a walk, we cannot signal |
712 | | * any */ |
713 | | /* error here, can only return. */ |
714 | 0 | return CMD_SUCCESS; |
715 | 0 | } |
716 | | |
717 | 0 | vty = ctx->vty; |
718 | |
|
719 | 0 | if (ctx->uj) { |
720 | 0 | json_updgrp = json_object_new_object(); |
721 | | /* Display json o/p */ |
722 | 0 | tbuf = monotime(NULL); |
723 | 0 | tbuf -= updgrp->uptime; |
724 | 0 | epoch_tbuf = time(NULL) - tbuf; |
725 | 0 | json_time = json_object_new_object(); |
726 | 0 | json_object_int_add(json_time, "epoch", epoch_tbuf); |
727 | 0 | json_object_string_add(json_time, "epochString", |
728 | 0 | ctime(&epoch_tbuf)); |
729 | 0 | json_object_object_add(json_updgrp, "groupCreateTime", |
730 | 0 | json_time); |
731 | 0 | json_object_string_add(json_updgrp, "afi", |
732 | 0 | afi2str(updgrp->afi)); |
733 | 0 | json_object_string_add(json_updgrp, "safi", |
734 | 0 | safi2str(updgrp->safi)); |
735 | 0 | } else { |
736 | 0 | vty_out(vty, "Update-group %" PRIu64 ":\n", updgrp->id); |
737 | 0 | vty_out(vty, " Created: %s", timestamp_string(updgrp->uptime)); |
738 | 0 | } |
739 | |
|
740 | 0 | filter = &updgrp->conf->filter[updgrp->afi][updgrp->safi]; |
741 | 0 | if (filter->map[RMAP_OUT].name) { |
742 | 0 | if (ctx->uj) |
743 | 0 | json_object_string_add(json_updgrp, "outRouteMap", |
744 | 0 | filter->map[RMAP_OUT].name); |
745 | 0 | else |
746 | 0 | vty_out(vty, " Outgoing route map: %s\n", |
747 | 0 | filter->map[RMAP_OUT].name); |
748 | 0 | } |
749 | |
|
750 | 0 | if (ctx->uj) |
751 | 0 | json_object_int_add(json_updgrp, "minRouteAdvInt", |
752 | 0 | updgrp->conf->v_routeadv); |
753 | 0 | else |
754 | 0 | vty_out(vty, " MRAI value (seconds): %d\n", |
755 | 0 | updgrp->conf->v_routeadv); |
756 | |
|
757 | 0 | if (updgrp->conf->change_local_as) { |
758 | 0 | if (ctx->uj) { |
759 | 0 | json_object_int_add(json_updgrp, "localAs", |
760 | 0 | updgrp->conf->change_local_as); |
761 | 0 | json_object_boolean_add( |
762 | 0 | json_updgrp, "noPrepend", |
763 | 0 | CHECK_FLAG(updgrp->conf->flags, |
764 | 0 | PEER_FLAG_LOCAL_AS_NO_PREPEND)); |
765 | 0 | json_object_boolean_add( |
766 | 0 | json_updgrp, "replaceLocalAs", |
767 | 0 | CHECK_FLAG(updgrp->conf->flags, |
768 | 0 | PEER_FLAG_LOCAL_AS_REPLACE_AS)); |
769 | 0 | } else { |
770 | 0 | vty_out(vty, " Local AS %u%s%s\n", |
771 | 0 | updgrp->conf->change_local_as, |
772 | 0 | CHECK_FLAG(updgrp->conf->flags, |
773 | 0 | PEER_FLAG_LOCAL_AS_NO_PREPEND) |
774 | 0 | ? " no-prepend" |
775 | 0 | : "", |
776 | 0 | CHECK_FLAG(updgrp->conf->flags, |
777 | 0 | PEER_FLAG_LOCAL_AS_REPLACE_AS) |
778 | 0 | ? " replace-as" |
779 | 0 | : ""); |
780 | 0 | } |
781 | 0 | } |
782 | 0 | if (ctx->uj) |
783 | 0 | json_subgrps = json_object_new_array(); |
784 | 0 | UPDGRP_FOREACH_SUBGRP (updgrp, subgrp) { |
785 | 0 | if (ctx->subgrp_id && (ctx->subgrp_id != subgrp->id)) |
786 | 0 | continue; |
787 | 0 | if (ctx->uj) { |
788 | 0 | json_subgrp = json_object_new_object(); |
789 | 0 | json_object_int_add(json_subgrp, "subGroupId", |
790 | 0 | subgrp->id); |
791 | 0 | tbuf = monotime(NULL); |
792 | 0 | tbuf -= subgrp->uptime; |
793 | 0 | epoch_tbuf = time(NULL) - tbuf; |
794 | 0 | json_subgrp_time = json_object_new_object(); |
795 | 0 | json_object_int_add(json_subgrp_time, "epoch", |
796 | 0 | epoch_tbuf); |
797 | 0 | json_object_string_add(json_subgrp_time, "epochString", |
798 | 0 | ctime(&epoch_tbuf)); |
799 | 0 | json_object_object_add(json_subgrp, "groupCreateTime", |
800 | 0 | json_subgrp_time); |
801 | 0 | } else { |
802 | 0 | vty_out(vty, "\n"); |
803 | 0 | vty_out(vty, " Update-subgroup %" PRIu64 ":\n", |
804 | 0 | subgrp->id); |
805 | 0 | vty_out(vty, " Created: %s", |
806 | 0 | timestamp_string(subgrp->uptime)); |
807 | 0 | } |
808 | |
|
809 | 0 | if (subgrp->split_from.update_group_id |
810 | 0 | || subgrp->split_from.subgroup_id) { |
811 | 0 | if (ctx->uj) { |
812 | 0 | json_object_int_add( |
813 | 0 | json_subgrp, "splitGroupId", |
814 | 0 | subgrp->split_from.update_group_id); |
815 | 0 | json_object_int_add( |
816 | 0 | json_subgrp, "splitSubGroupId", |
817 | 0 | subgrp->split_from.subgroup_id); |
818 | 0 | } else { |
819 | 0 | vty_out(vty, |
820 | 0 | " Split from group id: %" PRIu64 |
821 | 0 | "\n", |
822 | 0 | subgrp->split_from.update_group_id); |
823 | 0 | vty_out(vty, |
824 | 0 | " Split from subgroup id: %" PRIu64 |
825 | 0 | "\n", |
826 | 0 | subgrp->split_from.subgroup_id); |
827 | 0 | } |
828 | 0 | } |
829 | |
|
830 | 0 | if (ctx->uj) { |
831 | 0 | json_subgrp_event = json_object_new_object(); |
832 | 0 | json_object_int_add(json_subgrp_event, "joinEvents", |
833 | 0 | subgrp->join_events); |
834 | 0 | json_object_int_add(json_subgrp_event, "pruneEvents", |
835 | 0 | subgrp->prune_events); |
836 | 0 | json_object_int_add(json_subgrp_event, "mergeEvents", |
837 | 0 | subgrp->merge_events); |
838 | 0 | json_object_int_add(json_subgrp_event, "splitEvents", |
839 | 0 | subgrp->split_events); |
840 | 0 | json_object_int_add(json_subgrp_event, "switchEvents", |
841 | 0 | subgrp->updgrp_switch_events); |
842 | 0 | json_object_int_add(json_subgrp_event, |
843 | 0 | "peerRefreshEvents", |
844 | 0 | subgrp->peer_refreshes_combined); |
845 | 0 | json_object_int_add(json_subgrp_event, |
846 | 0 | "mergeCheckEvents", |
847 | 0 | subgrp->merge_checks_triggered); |
848 | 0 | json_object_object_add(json_subgrp, "statistics", |
849 | 0 | json_subgrp_event); |
850 | 0 | json_object_int_add(json_subgrp, "coalesceTime", |
851 | 0 | (UPDGRP_INST(subgrp->update_group)) |
852 | 0 | ->coalesce_time); |
853 | 0 | json_object_int_add(json_subgrp, "version", |
854 | 0 | subgrp->version); |
855 | 0 | json_pkt_info = json_object_new_object(); |
856 | 0 | json_object_int_add( |
857 | 0 | json_pkt_info, "qeueueLen", |
858 | 0 | bpacket_queue_length(SUBGRP_PKTQ(subgrp))); |
859 | 0 | json_object_int_add( |
860 | 0 | json_pkt_info, "queuedTotal", |
861 | 0 | subgroup_total_packets_enqueued(subgrp)); |
862 | 0 | json_object_int_add( |
863 | 0 | json_pkt_info, "queueHwmLen", |
864 | 0 | bpacket_queue_hwm_length(SUBGRP_PKTQ(subgrp))); |
865 | 0 | json_object_int_add( |
866 | 0 | json_pkt_info, "totalEnqueued", |
867 | 0 | subgroup_total_packets_enqueued(subgrp)); |
868 | 0 | json_object_object_add(json_subgrp, "packetQueueInfo", |
869 | 0 | json_pkt_info); |
870 | 0 | json_object_int_add(json_subgrp, "adjListCount", |
871 | 0 | subgrp->adj_count); |
872 | 0 | json_object_boolean_add( |
873 | 0 | json_subgrp, "needsRefresh", |
874 | 0 | CHECK_FLAG(subgrp->flags, |
875 | 0 | SUBGRP_FLAG_NEEDS_REFRESH)); |
876 | 0 | } else { |
877 | 0 | vty_out(vty, " Join events: %u\n", |
878 | 0 | subgrp->join_events); |
879 | 0 | vty_out(vty, " Prune events: %u\n", |
880 | 0 | subgrp->prune_events); |
881 | 0 | vty_out(vty, " Merge events: %u\n", |
882 | 0 | subgrp->merge_events); |
883 | 0 | vty_out(vty, " Split events: %u\n", |
884 | 0 | subgrp->split_events); |
885 | 0 | vty_out(vty, " Update group switch events: %u\n", |
886 | 0 | subgrp->updgrp_switch_events); |
887 | 0 | vty_out(vty, " Peer refreshes combined: %u\n", |
888 | 0 | subgrp->peer_refreshes_combined); |
889 | 0 | vty_out(vty, " Merge checks triggered: %u\n", |
890 | 0 | subgrp->merge_checks_triggered); |
891 | 0 | vty_out(vty, " Coalesce Time: %u%s\n", |
892 | 0 | (UPDGRP_INST(subgrp->update_group)) |
893 | 0 | ->coalesce_time, |
894 | 0 | subgrp->t_coalesce ? "(Running)" : ""); |
895 | 0 | vty_out(vty, " Version: %" PRIu64 "\n", |
896 | 0 | subgrp->version); |
897 | 0 | vty_out(vty, " Packet queue length: %d\n", |
898 | 0 | bpacket_queue_length(SUBGRP_PKTQ(subgrp))); |
899 | 0 | vty_out(vty, " Total packets enqueued: %u\n", |
900 | 0 | subgroup_total_packets_enqueued(subgrp)); |
901 | 0 | vty_out(vty, " Packet queue high watermark: %d\n", |
902 | 0 | bpacket_queue_hwm_length(SUBGRP_PKTQ(subgrp))); |
903 | 0 | vty_out(vty, " Adj-out list count: %u\n", |
904 | 0 | subgrp->adj_count); |
905 | 0 | vty_out(vty, " Advertise list: %s\n", |
906 | 0 | advertise_list_is_empty(subgrp) ? "empty" |
907 | 0 | : "not empty"); |
908 | 0 | vty_out(vty, " Flags: %s\n", |
909 | 0 | CHECK_FLAG(subgrp->flags, |
910 | 0 | SUBGRP_FLAG_NEEDS_REFRESH) |
911 | 0 | ? "R" |
912 | 0 | : ""); |
913 | 0 | if (peer) |
914 | 0 | vty_out(vty, " Max packet size: %d\n", |
915 | 0 | peer->max_packet_size); |
916 | 0 | } |
917 | 0 | if (subgrp->peer_count > 0) { |
918 | 0 | if (ctx->uj) { |
919 | 0 | json_peers = json_object_new_array(); |
920 | 0 | SUBGRP_FOREACH_PEER (subgrp, paf) { |
921 | 0 | json_object *peer = |
922 | 0 | json_object_new_string( |
923 | 0 | paf->peer->host); |
924 | 0 | json_object_array_add(json_peers, peer); |
925 | 0 | } |
926 | 0 | json_object_object_add(json_subgrp, "peers", |
927 | 0 | json_peers); |
928 | 0 | } else { |
929 | 0 | vty_out(vty, " Peers:\n"); |
930 | 0 | SUBGRP_FOREACH_PEER (subgrp, paf) |
931 | 0 | vty_out(vty, " - %s\n", |
932 | 0 | paf->peer->host); |
933 | 0 | } |
934 | 0 | } |
935 | |
|
936 | 0 | if (ctx->uj) |
937 | 0 | json_object_array_add(json_subgrps, json_subgrp); |
938 | 0 | } |
939 | |
|
940 | 0 | if (ctx->uj) { |
941 | 0 | json_object_object_add(json_updgrp, "subGroup", json_subgrps); |
942 | 0 | json_object_object_addf(ctx->json_updategrps, json_updgrp, |
943 | 0 | "%" PRIu64, updgrp->id); |
944 | 0 | } |
945 | |
|
946 | 0 | return UPDWALK_CONTINUE; |
947 | 0 | } |
948 | | |
949 | | /* |
950 | | * Helper function to show the packet queue for each subgroup of update group. |
951 | | * Will be constrained to a particular subgroup id if id !=0 |
952 | | */ |
953 | | static int updgrp_show_packet_queue_walkcb(struct update_group *updgrp, |
954 | | void *arg) |
955 | 0 | { |
956 | 0 | struct updwalk_context *ctx = arg; |
957 | 0 | struct update_subgroup *subgrp; |
958 | 0 | struct vty *vty; |
959 | |
|
960 | 0 | vty = ctx->vty; |
961 | 0 | UPDGRP_FOREACH_SUBGRP (updgrp, subgrp) { |
962 | 0 | if (ctx->subgrp_id && (ctx->subgrp_id != subgrp->id)) |
963 | 0 | continue; |
964 | 0 | vty_out(vty, "update group %" PRIu64 ", subgroup %" PRIu64 "\n", |
965 | 0 | updgrp->id, subgrp->id); |
966 | 0 | bpacket_queue_show_vty(SUBGRP_PKTQ(subgrp), vty); |
967 | 0 | } |
968 | 0 | return UPDWALK_CONTINUE; |
969 | 0 | } |
970 | | |
971 | | /* |
972 | | * Show the packet queue for each subgroup of update group. Will be |
973 | | * constrained to a particular subgroup id if id !=0 |
974 | | */ |
975 | | void update_group_show_packet_queue(struct bgp *bgp, afi_t afi, safi_t safi, |
976 | | struct vty *vty, uint64_t id) |
977 | 0 | { |
978 | 0 | struct updwalk_context ctx; |
979 | |
|
980 | 0 | memset(&ctx, 0, sizeof(ctx)); |
981 | 0 | ctx.vty = vty; |
982 | 0 | ctx.subgrp_id = id; |
983 | 0 | ctx.flags = 0; |
984 | 0 | update_group_af_walk(bgp, afi, safi, updgrp_show_packet_queue_walkcb, |
985 | 0 | &ctx); |
986 | 0 | } |
987 | | |
988 | | static struct update_group *update_group_find(struct peer_af *paf) |
989 | 0 | { |
990 | 0 | struct update_group *updgrp; |
991 | 0 | struct update_group tmp; |
992 | 0 | struct peer tmp_conf; |
993 | |
|
994 | 0 | if (!peer_established(PAF_PEER(paf))) |
995 | 0 | return NULL; |
996 | | |
997 | 0 | memset(&tmp, 0, sizeof(tmp)); |
998 | 0 | memset(&tmp_conf, 0, sizeof(tmp_conf)); |
999 | 0 | tmp.conf = &tmp_conf; |
1000 | 0 | peer2_updgrp_copy(&tmp, paf); |
1001 | |
|
1002 | 0 | updgrp = hash_lookup(paf->peer->bgp->update_groups[paf->afid], &tmp); |
1003 | 0 | conf_release(&tmp_conf, paf->afi, paf->safi); |
1004 | 0 | return updgrp; |
1005 | 0 | } |
1006 | | |
1007 | | static struct update_group *update_group_create(struct peer_af *paf) |
1008 | 0 | { |
1009 | 0 | struct update_group *updgrp; |
1010 | 0 | struct update_group tmp; |
1011 | 0 | struct peer tmp_conf; |
1012 | |
|
1013 | 0 | memset(&tmp, 0, sizeof(tmp)); |
1014 | 0 | memset(&tmp_conf, 0, sizeof(tmp_conf)); |
1015 | 0 | tmp.conf = &tmp_conf; |
1016 | 0 | peer2_updgrp_copy(&tmp, paf); |
1017 | |
|
1018 | 0 | updgrp = hash_get(paf->peer->bgp->update_groups[paf->afid], &tmp, |
1019 | 0 | updgrp_hash_alloc); |
1020 | 0 | update_group_checkin(updgrp); |
1021 | |
|
1022 | 0 | if (BGP_DEBUG(update_groups, UPDATE_GROUPS)) |
1023 | 0 | zlog_debug("create update group %" PRIu64, updgrp->id); |
1024 | |
|
1025 | 0 | UPDGRP_GLOBAL_STAT(updgrp, updgrps_created) += 1; |
1026 | |
|
1027 | 0 | conf_release(&tmp_conf, paf->afi, paf->safi); |
1028 | 0 | return updgrp; |
1029 | 0 | } |
1030 | | |
1031 | | static void update_group_delete(struct update_group *updgrp) |
1032 | 0 | { |
1033 | 0 | if (BGP_DEBUG(update_groups, UPDATE_GROUPS)) |
1034 | 0 | zlog_debug("delete update group %" PRIu64, updgrp->id); |
1035 | |
|
1036 | 0 | UPDGRP_GLOBAL_STAT(updgrp, updgrps_deleted) += 1; |
1037 | |
|
1038 | 0 | hash_release(updgrp->bgp->update_groups[updgrp->afid], updgrp); |
1039 | 0 | conf_release(updgrp->conf, updgrp->afi, updgrp->safi); |
1040 | |
|
1041 | 0 | XFREE(MTYPE_BGP_PEER_HOST, updgrp->conf->host); |
1042 | |
|
1043 | 0 | XFREE(MTYPE_BGP_PEER_IFNAME, updgrp->conf->ifname); |
1044 | |
|
1045 | 0 | XFREE(MTYPE_BGP_PEER, updgrp->conf); |
1046 | 0 | XFREE(MTYPE_BGP_UPDGRP, updgrp); |
1047 | 0 | } |
1048 | | |
1049 | | static void update_group_add_subgroup(struct update_group *updgrp, |
1050 | | struct update_subgroup *subgrp) |
1051 | 0 | { |
1052 | 0 | if (!updgrp || !subgrp) |
1053 | 0 | return; |
1054 | | |
1055 | 0 | LIST_INSERT_HEAD(&(updgrp->subgrps), subgrp, updgrp_train); |
1056 | 0 | subgrp->update_group = updgrp; |
1057 | 0 | } |
1058 | | |
1059 | | static void update_group_remove_subgroup(struct update_group *updgrp, |
1060 | | struct update_subgroup *subgrp) |
1061 | 0 | { |
1062 | 0 | if (!updgrp || !subgrp) |
1063 | 0 | return; |
1064 | | |
1065 | 0 | LIST_REMOVE(subgrp, updgrp_train); |
1066 | 0 | subgrp->update_group = NULL; |
1067 | 0 | if (LIST_EMPTY(&(updgrp->subgrps))) |
1068 | 0 | update_group_delete(updgrp); |
1069 | 0 | } |
1070 | | |
1071 | | static struct update_subgroup * |
1072 | | update_subgroup_create(struct update_group *updgrp) |
1073 | 0 | { |
1074 | 0 | struct update_subgroup *subgrp; |
1075 | |
|
1076 | 0 | subgrp = XCALLOC(MTYPE_BGP_UPD_SUBGRP, sizeof(struct update_subgroup)); |
1077 | 0 | update_subgroup_checkin(subgrp, updgrp); |
1078 | 0 | subgrp->v_coalesce = (UPDGRP_INST(updgrp))->coalesce_time; |
1079 | 0 | sync_init(subgrp, updgrp); |
1080 | 0 | bpacket_queue_init(SUBGRP_PKTQ(subgrp)); |
1081 | 0 | bpacket_queue_add(SUBGRP_PKTQ(subgrp), NULL, NULL); |
1082 | 0 | TAILQ_INIT(&(subgrp->adjq)); |
1083 | 0 | if (BGP_DEBUG(update_groups, UPDATE_GROUPS)) |
1084 | 0 | zlog_debug("create subgroup u%" PRIu64 ":s%" PRIu64, updgrp->id, |
1085 | 0 | subgrp->id); |
1086 | |
|
1087 | 0 | update_group_add_subgroup(updgrp, subgrp); |
1088 | |
|
1089 | 0 | UPDGRP_INCR_STAT(updgrp, subgrps_created); |
1090 | |
|
1091 | 0 | return subgrp; |
1092 | 0 | } |
1093 | | |
1094 | | static void update_subgroup_delete(struct update_subgroup *subgrp) |
1095 | 0 | { |
1096 | 0 | if (!subgrp) |
1097 | 0 | return; |
1098 | | |
1099 | 0 | if (subgrp->update_group) |
1100 | 0 | UPDGRP_INCR_STAT(subgrp->update_group, subgrps_deleted); |
1101 | |
|
1102 | 0 | EVENT_OFF(subgrp->t_merge_check); |
1103 | 0 | EVENT_OFF(subgrp->t_coalesce); |
1104 | |
|
1105 | 0 | bpacket_queue_cleanup(SUBGRP_PKTQ(subgrp)); |
1106 | 0 | subgroup_clear_table(subgrp); |
1107 | |
|
1108 | 0 | sync_delete(subgrp); |
1109 | |
|
1110 | 0 | if (BGP_DEBUG(update_groups, UPDATE_GROUPS) && subgrp->update_group) |
1111 | 0 | zlog_debug("delete subgroup u%" PRIu64 ":s%" PRIu64, |
1112 | 0 | subgrp->update_group->id, subgrp->id); |
1113 | |
|
1114 | 0 | update_group_remove_subgroup(subgrp->update_group, subgrp); |
1115 | |
|
1116 | 0 | XFREE(MTYPE_BGP_UPD_SUBGRP, subgrp); |
1117 | 0 | } |
1118 | | |
1119 | | void update_subgroup_inherit_info(struct update_subgroup *to, |
1120 | | struct update_subgroup *from) |
1121 | 0 | { |
1122 | 0 | if (!to || !from) |
1123 | 0 | return; |
1124 | | |
1125 | 0 | to->sflags = from->sflags; |
1126 | 0 | } |
1127 | | |
1128 | | /* |
1129 | | * update_subgroup_check_delete |
1130 | | * |
1131 | | * Delete a subgroup if it is ready to be deleted. |
1132 | | * |
1133 | | * Returns true if the subgroup was deleted. |
1134 | | */ |
1135 | | static bool update_subgroup_check_delete(struct update_subgroup *subgrp) |
1136 | 0 | { |
1137 | 0 | if (!subgrp) |
1138 | 0 | return false; |
1139 | | |
1140 | 0 | if (!LIST_EMPTY(&(subgrp->peers))) |
1141 | 0 | return false; |
1142 | | |
1143 | 0 | update_subgroup_delete(subgrp); |
1144 | |
|
1145 | 0 | return true; |
1146 | 0 | } |
1147 | | |
1148 | | /* |
1149 | | * update_subgroup_add_peer |
1150 | | * |
1151 | | * @param send_enqueued_packets If true all currently enqueued packets will |
1152 | | * also be sent to the peer. |
1153 | | */ |
1154 | | static void update_subgroup_add_peer(struct update_subgroup *subgrp, |
1155 | | struct peer_af *paf, |
1156 | | int send_enqueued_pkts) |
1157 | 0 | { |
1158 | 0 | struct bpacket *pkt; |
1159 | |
|
1160 | 0 | if (!subgrp || !paf) |
1161 | 0 | return; |
1162 | | |
1163 | 0 | LIST_INSERT_HEAD(&(subgrp->peers), paf, subgrp_train); |
1164 | 0 | paf->subgroup = subgrp; |
1165 | 0 | subgrp->peer_count++; |
1166 | |
|
1167 | 0 | if (bgp_debug_peer_updout_enabled(paf->peer->host)) { |
1168 | 0 | UPDGRP_PEER_DBG_EN(subgrp->update_group); |
1169 | 0 | } |
1170 | |
|
1171 | 0 | SUBGRP_INCR_STAT(subgrp, join_events); |
1172 | |
|
1173 | 0 | if (send_enqueued_pkts) { |
1174 | 0 | pkt = bpacket_queue_first(SUBGRP_PKTQ(subgrp)); |
1175 | 0 | } else { |
1176 | | |
1177 | | /* |
1178 | | * Hang the peer off of the last, placeholder, packet in the |
1179 | | * queue. This means it won't see any of the packets that are |
1180 | | * currently the queue. |
1181 | | */ |
1182 | 0 | pkt = bpacket_queue_last(SUBGRP_PKTQ(subgrp)); |
1183 | 0 | assert(pkt->buffer == NULL); |
1184 | 0 | } |
1185 | | |
1186 | 0 | bpacket_add_peer(pkt, paf); |
1187 | |
|
1188 | 0 | if (BGP_DEBUG(update_groups, UPDATE_GROUPS)) |
1189 | 0 | zlog_debug("peer %s added to subgroup s%" PRIu64, |
1190 | 0 | paf->peer->host, subgrp->id); |
1191 | 0 | } |
1192 | | |
1193 | | /* |
1194 | | * update_subgroup_remove_peer_internal |
1195 | | * |
1196 | | * Internal function that removes a peer from a subgroup, but does not |
1197 | | * delete the subgroup. A call to this function must almost always be |
1198 | | * followed by a call to update_subgroup_check_delete(). |
1199 | | * |
1200 | | * @see update_subgroup_remove_peer |
1201 | | */ |
1202 | | static void update_subgroup_remove_peer_internal(struct update_subgroup *subgrp, |
1203 | | struct peer_af *paf) |
1204 | 0 | { |
1205 | 0 | assert(subgrp && paf && subgrp->update_group); |
1206 | | |
1207 | 0 | if (bgp_debug_peer_updout_enabled(paf->peer->host)) { |
1208 | 0 | UPDGRP_PEER_DBG_DIS(subgrp->update_group); |
1209 | 0 | } |
1210 | |
|
1211 | 0 | bpacket_queue_remove_peer(paf); |
1212 | 0 | LIST_REMOVE(paf, subgrp_train); |
1213 | 0 | paf->subgroup = NULL; |
1214 | 0 | subgrp->peer_count--; |
1215 | |
|
1216 | 0 | if (BGP_DEBUG(update_groups, UPDATE_GROUPS)) |
1217 | 0 | zlog_debug("peer %s deleted from subgroup s%" |
1218 | 0 | PRIu64 " peer cnt %d", |
1219 | 0 | paf->peer->host, subgrp->id, subgrp->peer_count); |
1220 | 0 | SUBGRP_INCR_STAT(subgrp, prune_events); |
1221 | 0 | } |
1222 | | |
1223 | | /* |
1224 | | * update_subgroup_remove_peer |
1225 | | */ |
1226 | | void update_subgroup_remove_peer(struct update_subgroup *subgrp, |
1227 | | struct peer_af *paf) |
1228 | 0 | { |
1229 | 0 | if (!subgrp || !paf) |
1230 | 0 | return; |
1231 | | |
1232 | 0 | update_subgroup_remove_peer_internal(subgrp, paf); |
1233 | |
|
1234 | 0 | if (update_subgroup_check_delete(subgrp)) |
1235 | 0 | return; |
1236 | | |
1237 | | /* |
1238 | | * The deletion of the peer may have caused some packets to be |
1239 | | * deleted from the subgroup packet queue. Check if the subgroup can |
1240 | | * be merged now. |
1241 | | */ |
1242 | 0 | update_subgroup_check_merge(subgrp, "removed peer from subgroup"); |
1243 | 0 | } |
1244 | | |
1245 | | static struct update_subgroup *update_subgroup_find(struct update_group *updgrp, |
1246 | | struct peer_af *paf) |
1247 | 0 | { |
1248 | 0 | struct update_subgroup *subgrp = NULL; |
1249 | 0 | uint64_t version; |
1250 | |
|
1251 | 0 | if (paf->subgroup) { |
1252 | 0 | assert(0); |
1253 | 0 | return NULL; |
1254 | 0 | } else |
1255 | 0 | version = 0; |
1256 | | |
1257 | 0 | if (!peer_established(PAF_PEER(paf))) |
1258 | 0 | return NULL; |
1259 | | |
1260 | 0 | UPDGRP_FOREACH_SUBGRP (updgrp, subgrp) { |
1261 | 0 | if (subgrp->version != version |
1262 | 0 | || CHECK_FLAG(subgrp->sflags, |
1263 | 0 | SUBGRP_STATUS_DEFAULT_ORIGINATE)) |
1264 | 0 | continue; |
1265 | | |
1266 | | /* |
1267 | | * The version number is not meaningful on a subgroup that needs |
1268 | | * a refresh. |
1269 | | */ |
1270 | 0 | if (update_subgroup_needs_refresh(subgrp)) |
1271 | 0 | continue; |
1272 | | |
1273 | 0 | break; |
1274 | 0 | } |
1275 | |
|
1276 | 0 | return subgrp; |
1277 | 0 | } |
1278 | | |
1279 | | /* |
1280 | | * update_subgroup_ready_for_merge |
1281 | | * |
1282 | | * Returns true if this subgroup is in a state that allows it to be |
1283 | | * merged into another subgroup. |
1284 | | */ |
1285 | | static bool update_subgroup_ready_for_merge(struct update_subgroup *subgrp) |
1286 | 0 | { |
1287 | | |
1288 | | /* |
1289 | | * Not ready if there are any encoded packets waiting to be written |
1290 | | * out to peers. |
1291 | | */ |
1292 | 0 | if (!bpacket_queue_is_empty(SUBGRP_PKTQ(subgrp))) |
1293 | 0 | return false; |
1294 | | |
1295 | | /* |
1296 | | * Not ready if there enqueued updates waiting to be encoded. |
1297 | | */ |
1298 | 0 | if (!advertise_list_is_empty(subgrp)) |
1299 | 0 | return false; |
1300 | | |
1301 | | /* |
1302 | | * Don't attempt to merge a subgroup that needs a refresh. For one, |
1303 | | * we can't determine if the adj_out of such a group matches that of |
1304 | | * another group. |
1305 | | */ |
1306 | 0 | if (update_subgroup_needs_refresh(subgrp)) |
1307 | 0 | return false; |
1308 | | |
1309 | 0 | return true; |
1310 | 0 | } |
1311 | | |
1312 | | /* |
1313 | | * update_subgrp_can_merge_into |
1314 | | * |
1315 | | * Returns true if the first subgroup can merge into the second |
1316 | | * subgroup. |
1317 | | */ |
1318 | | static int update_subgroup_can_merge_into(struct update_subgroup *subgrp, |
1319 | | struct update_subgroup *target) |
1320 | 0 | { |
1321 | |
|
1322 | 0 | if (subgrp == target) |
1323 | 0 | return 0; |
1324 | | |
1325 | | /* |
1326 | | * Both must have processed the BRIB to the same point in order to |
1327 | | * be merged. |
1328 | | */ |
1329 | 0 | if (subgrp->version != target->version) |
1330 | 0 | return 0; |
1331 | | |
1332 | 0 | if (CHECK_FLAG(subgrp->sflags, SUBGRP_STATUS_DEFAULT_ORIGINATE) |
1333 | 0 | != CHECK_FLAG(target->sflags, SUBGRP_STATUS_DEFAULT_ORIGINATE)) |
1334 | 0 | return 0; |
1335 | | |
1336 | 0 | if (subgrp->adj_count != target->adj_count) |
1337 | 0 | return 0; |
1338 | | |
1339 | 0 | return update_subgroup_ready_for_merge(target); |
1340 | 0 | } |
1341 | | |
1342 | | /* |
1343 | | * update_subgroup_merge |
1344 | | * |
1345 | | * Merge the first subgroup into the second one. |
1346 | | */ |
1347 | | static void update_subgroup_merge(struct update_subgroup *subgrp, |
1348 | | struct update_subgroup *target, |
1349 | | const char *reason) |
1350 | 0 | { |
1351 | 0 | struct peer_af *paf; |
1352 | 0 | int result; |
1353 | 0 | int peer_count; |
1354 | |
|
1355 | 0 | assert(subgrp->adj_count == target->adj_count); |
1356 | | |
1357 | 0 | peer_count = subgrp->peer_count; |
1358 | |
|
1359 | 0 | while (1) { |
1360 | 0 | paf = LIST_FIRST(&subgrp->peers); |
1361 | 0 | if (!paf) |
1362 | 0 | break; |
1363 | | |
1364 | 0 | update_subgroup_remove_peer_internal(subgrp, paf); |
1365 | | |
1366 | | /* |
1367 | | * Add the peer to the target subgroup, while making sure that |
1368 | | * any currently enqueued packets won't be sent to it. Enqueued |
1369 | | * packets could, for example, result in an unnecessary withdraw |
1370 | | * followed by an advertise. |
1371 | | */ |
1372 | 0 | update_subgroup_add_peer(target, paf, 0); |
1373 | 0 | } |
1374 | |
|
1375 | 0 | SUBGRP_INCR_STAT(target, merge_events); |
1376 | |
|
1377 | 0 | if (BGP_DEBUG(update_groups, UPDATE_GROUPS)) |
1378 | 0 | zlog_debug("u%" PRIu64 ":s%" PRIu64" (%d peers) merged into u%" PRIu64 ":s%" PRIu64", trigger: %s", |
1379 | 0 | subgrp->update_group->id, subgrp->id, peer_count, |
1380 | 0 | target->update_group->id, target->id, |
1381 | 0 | reason ? reason : "unknown"); |
1382 | |
|
1383 | 0 | result = update_subgroup_check_delete(subgrp); |
1384 | 0 | assert(result); |
1385 | 0 | } |
1386 | | |
1387 | | /* |
1388 | | * update_subgroup_check_merge |
1389 | | * |
1390 | | * Merge this subgroup into another subgroup if possible. |
1391 | | * |
1392 | | * Returns true if the subgroup has been merged. The subgroup pointer |
1393 | | * should not be accessed in this case. |
1394 | | */ |
1395 | | bool update_subgroup_check_merge(struct update_subgroup *subgrp, |
1396 | | const char *reason) |
1397 | 0 | { |
1398 | 0 | struct update_subgroup *target; |
1399 | |
|
1400 | 0 | if (!update_subgroup_ready_for_merge(subgrp)) |
1401 | 0 | return false; |
1402 | | |
1403 | | /* |
1404 | | * Look for a subgroup to merge into. |
1405 | | */ |
1406 | 0 | UPDGRP_FOREACH_SUBGRP (subgrp->update_group, target) { |
1407 | 0 | if (update_subgroup_can_merge_into(subgrp, target)) |
1408 | 0 | break; |
1409 | 0 | } |
1410 | |
|
1411 | 0 | if (!target) |
1412 | 0 | return false; |
1413 | | |
1414 | 0 | update_subgroup_merge(subgrp, target, reason); |
1415 | 0 | return true; |
1416 | 0 | } |
1417 | | |
1418 | | /* |
1419 | | * update_subgroup_merge_check_thread_cb |
1420 | | */ |
1421 | | static void update_subgroup_merge_check_thread_cb(struct event *thread) |
1422 | 0 | { |
1423 | 0 | struct update_subgroup *subgrp; |
1424 | 0 |
|
1425 | 0 | subgrp = EVENT_ARG(thread); |
1426 | 0 |
|
1427 | 0 | subgrp->t_merge_check = NULL; |
1428 | 0 |
|
1429 | 0 | update_subgroup_check_merge(subgrp, "triggered merge check"); |
1430 | 0 | } |
1431 | | |
1432 | | /* |
1433 | | * update_subgroup_trigger_merge_check |
1434 | | * |
1435 | | * Triggers a call to update_subgroup_check_merge() on a clean context. |
1436 | | * |
1437 | | * @param force If true, the merge check will be triggered even if the |
1438 | | * subgroup doesn't currently look ready for a merge. |
1439 | | * |
1440 | | * Returns true if a merge check will be performed shortly. |
1441 | | */ |
1442 | | bool update_subgroup_trigger_merge_check(struct update_subgroup *subgrp, |
1443 | | int force) |
1444 | 0 | { |
1445 | 0 | if (subgrp->t_merge_check) |
1446 | 0 | return true; |
1447 | | |
1448 | 0 | if (!force && !update_subgroup_ready_for_merge(subgrp)) |
1449 | 0 | return false; |
1450 | | |
1451 | 0 | subgrp->t_merge_check = NULL; |
1452 | 0 | event_add_timer_msec(bm->master, update_subgroup_merge_check_thread_cb, |
1453 | 0 | subgrp, 0, &subgrp->t_merge_check); |
1454 | |
|
1455 | 0 | SUBGRP_INCR_STAT(subgrp, merge_checks_triggered); |
1456 | |
|
1457 | 0 | return true; |
1458 | 0 | } |
1459 | | |
1460 | | /* |
1461 | | * update_subgroup_copy_adj_out |
1462 | | * |
1463 | | * Helper function that clones the adj out (state about advertised |
1464 | | * routes) from one subgroup to another. It assumes that the adj out |
1465 | | * of the target subgroup is empty. |
1466 | | */ |
1467 | | static void update_subgroup_copy_adj_out(struct update_subgroup *source, |
1468 | | struct update_subgroup *dest) |
1469 | 0 | { |
1470 | 0 | struct bgp_adj_out *aout, *aout_copy; |
1471 | |
|
1472 | 0 | SUBGRP_FOREACH_ADJ (source, aout) { |
1473 | | /* |
1474 | | * Copy the adj out. |
1475 | | */ |
1476 | 0 | aout_copy = bgp_adj_out_alloc(dest, aout->dest, |
1477 | 0 | aout->addpath_tx_id); |
1478 | 0 | aout_copy->attr = |
1479 | 0 | aout->attr ? bgp_attr_intern(aout->attr) : NULL; |
1480 | 0 | } |
1481 | |
|
1482 | 0 | dest->scount = source->scount; |
1483 | 0 | } |
1484 | | |
1485 | | /* |
1486 | | * update_subgroup_copy_packets |
1487 | | * |
1488 | | * Copy packets after and including the given packet to the subgroup |
1489 | | * 'dest'. |
1490 | | * |
1491 | | * Returns the number of packets copied. |
1492 | | */ |
1493 | | static int update_subgroup_copy_packets(struct update_subgroup *dest, |
1494 | | struct bpacket *pkt) |
1495 | 0 | { |
1496 | 0 | int count; |
1497 | |
|
1498 | 0 | count = 0; |
1499 | 0 | while (pkt && pkt->buffer) { |
1500 | 0 | bpacket_queue_add(SUBGRP_PKTQ(dest), stream_dup(pkt->buffer), |
1501 | 0 | &pkt->arr); |
1502 | 0 | count++; |
1503 | 0 | pkt = bpacket_next(pkt); |
1504 | 0 | } |
1505 | |
|
1506 | 0 | return count; |
1507 | 0 | } |
1508 | | |
1509 | | static bool updgrp_prefix_list_update(struct update_group *updgrp, |
1510 | | const char *name) |
1511 | 0 | { |
1512 | 0 | struct peer *peer; |
1513 | 0 | struct bgp_filter *filter; |
1514 | |
|
1515 | 0 | peer = UPDGRP_PEER(updgrp); |
1516 | 0 | filter = &peer->filter[UPDGRP_AFI(updgrp)][UPDGRP_SAFI(updgrp)]; |
1517 | |
|
1518 | 0 | if (PREFIX_LIST_OUT_NAME(filter) |
1519 | 0 | && (strcmp(name, PREFIX_LIST_OUT_NAME(filter)) == 0)) { |
1520 | 0 | PREFIX_LIST_OUT(filter) = prefix_list_lookup( |
1521 | 0 | UPDGRP_AFI(updgrp), PREFIX_LIST_OUT_NAME(filter)); |
1522 | 0 | return true; |
1523 | 0 | } |
1524 | 0 | return false; |
1525 | 0 | } |
1526 | | |
1527 | | static bool updgrp_filter_list_update(struct update_group *updgrp, |
1528 | | const char *name) |
1529 | 0 | { |
1530 | 0 | struct peer *peer; |
1531 | 0 | struct bgp_filter *filter; |
1532 | |
|
1533 | 0 | peer = UPDGRP_PEER(updgrp); |
1534 | 0 | filter = &peer->filter[UPDGRP_AFI(updgrp)][UPDGRP_SAFI(updgrp)]; |
1535 | |
|
1536 | 0 | if (FILTER_LIST_OUT_NAME(filter) |
1537 | 0 | && (strcmp(name, FILTER_LIST_OUT_NAME(filter)) == 0)) { |
1538 | 0 | FILTER_LIST_OUT(filter) = |
1539 | 0 | as_list_lookup(FILTER_LIST_OUT_NAME(filter)); |
1540 | 0 | return true; |
1541 | 0 | } |
1542 | 0 | return false; |
1543 | 0 | } |
1544 | | |
1545 | | static bool updgrp_distribute_list_update(struct update_group *updgrp, |
1546 | | const char *name) |
1547 | 0 | { |
1548 | 0 | struct peer *peer; |
1549 | 0 | struct bgp_filter *filter; |
1550 | |
|
1551 | 0 | peer = UPDGRP_PEER(updgrp); |
1552 | 0 | filter = &peer->filter[UPDGRP_AFI(updgrp)][UPDGRP_SAFI(updgrp)]; |
1553 | |
|
1554 | 0 | if (DISTRIBUTE_OUT_NAME(filter) |
1555 | 0 | && (strcmp(name, DISTRIBUTE_OUT_NAME(filter)) == 0)) { |
1556 | 0 | DISTRIBUTE_OUT(filter) = access_list_lookup( |
1557 | 0 | UPDGRP_AFI(updgrp), DISTRIBUTE_OUT_NAME(filter)); |
1558 | 0 | return true; |
1559 | 0 | } |
1560 | 0 | return false; |
1561 | 0 | } |
1562 | | |
1563 | | static int updgrp_route_map_update(struct update_group *updgrp, |
1564 | | const char *name, int *def_rmap_changed) |
1565 | 0 | { |
1566 | 0 | struct peer *peer; |
1567 | 0 | struct bgp_filter *filter; |
1568 | 0 | int changed = 0; |
1569 | 0 | afi_t afi; |
1570 | 0 | safi_t safi; |
1571 | |
|
1572 | 0 | peer = UPDGRP_PEER(updgrp); |
1573 | 0 | afi = UPDGRP_AFI(updgrp); |
1574 | 0 | safi = UPDGRP_SAFI(updgrp); |
1575 | 0 | filter = &peer->filter[afi][safi]; |
1576 | |
|
1577 | 0 | if (ROUTE_MAP_OUT_NAME(filter) |
1578 | 0 | && (strcmp(name, ROUTE_MAP_OUT_NAME(filter)) == 0)) { |
1579 | 0 | ROUTE_MAP_OUT(filter) = route_map_lookup_by_name(name); |
1580 | |
|
1581 | 0 | changed = 1; |
1582 | 0 | } |
1583 | |
|
1584 | 0 | if (UNSUPPRESS_MAP_NAME(filter) |
1585 | 0 | && (strcmp(name, UNSUPPRESS_MAP_NAME(filter)) == 0)) { |
1586 | 0 | UNSUPPRESS_MAP(filter) = route_map_lookup_by_name(name); |
1587 | 0 | changed = 1; |
1588 | 0 | } |
1589 | | |
1590 | | /* process default-originate route-map */ |
1591 | 0 | if (peer->default_rmap[afi][safi].name |
1592 | 0 | && (strcmp(name, peer->default_rmap[afi][safi].name) == 0)) { |
1593 | 0 | peer->default_rmap[afi][safi].map = |
1594 | 0 | route_map_lookup_by_name(name); |
1595 | 0 | if (def_rmap_changed) |
1596 | 0 | *def_rmap_changed = 1; |
1597 | 0 | } |
1598 | 0 | return changed; |
1599 | 0 | } |
1600 | | |
1601 | | /* |
1602 | | * hash iteration callback function to process a policy change for an |
1603 | | * update group. Check if the changed policy matches the updgrp's |
1604 | | * outbound route-map or unsuppress-map or default-originate map or |
1605 | | * filter-list or prefix-list or distribute-list. |
1606 | | * Trigger update generation accordingly. |
1607 | | */ |
1608 | | static int updgrp_policy_update_walkcb(struct update_group *updgrp, void *arg) |
1609 | 0 | { |
1610 | 0 | struct updwalk_context *ctx = arg; |
1611 | 0 | struct update_subgroup *subgrp; |
1612 | 0 | int changed = 0; |
1613 | 0 | int def_changed = 0; |
1614 | |
|
1615 | 0 | if (!updgrp || !ctx || !ctx->policy_name) |
1616 | 0 | return UPDWALK_CONTINUE; |
1617 | | |
1618 | 0 | switch (ctx->policy_type) { |
1619 | 0 | case BGP_POLICY_ROUTE_MAP: |
1620 | 0 | changed = updgrp_route_map_update(updgrp, ctx->policy_name, |
1621 | 0 | &def_changed); |
1622 | 0 | break; |
1623 | 0 | case BGP_POLICY_FILTER_LIST: |
1624 | 0 | changed = updgrp_filter_list_update(updgrp, ctx->policy_name); |
1625 | 0 | break; |
1626 | 0 | case BGP_POLICY_PREFIX_LIST: |
1627 | 0 | changed = updgrp_prefix_list_update(updgrp, ctx->policy_name); |
1628 | 0 | break; |
1629 | 0 | case BGP_POLICY_DISTRIBUTE_LIST: |
1630 | 0 | changed = |
1631 | 0 | updgrp_distribute_list_update(updgrp, ctx->policy_name); |
1632 | 0 | break; |
1633 | 0 | default: |
1634 | 0 | break; |
1635 | 0 | } |
1636 | | |
1637 | | /* If not doing route update, return after updating "config" */ |
1638 | 0 | if (!ctx->policy_route_update) |
1639 | 0 | return UPDWALK_CONTINUE; |
1640 | | |
1641 | | /* If nothing has changed, return after updating "config" */ |
1642 | 0 | if (!changed && !def_changed) |
1643 | 0 | return UPDWALK_CONTINUE; |
1644 | | |
1645 | | /* |
1646 | | * If something has changed, at the beginning of a route-map |
1647 | | * modification |
1648 | | * event, mark each subgroup's needs-refresh bit. For one, it signals to |
1649 | | * whoever that the subgroup needs a refresh. Second, it prevents |
1650 | | * premature |
1651 | | * merge of this subgroup with another before a complete (outbound) |
1652 | | * refresh. |
1653 | | */ |
1654 | 0 | if (ctx->policy_event_start_flag) { |
1655 | 0 | UPDGRP_FOREACH_SUBGRP (updgrp, subgrp) { |
1656 | 0 | update_subgroup_set_needs_refresh(subgrp, 1); |
1657 | 0 | } |
1658 | 0 | return UPDWALK_CONTINUE; |
1659 | 0 | } |
1660 | | |
1661 | 0 | UPDGRP_FOREACH_SUBGRP (updgrp, subgrp) { |
1662 | | /* Avoid supressing duplicate routes later |
1663 | | * when processing in subgroup_announce_table(). |
1664 | | */ |
1665 | 0 | SET_FLAG(subgrp->sflags, SUBGRP_STATUS_FORCE_UPDATES); |
1666 | |
|
1667 | 0 | if (changed) { |
1668 | 0 | if (bgp_debug_update(NULL, NULL, updgrp, 0)) |
1669 | 0 | zlog_debug( |
1670 | 0 | "u%" PRIu64 ":s%" PRIu64" announcing routes upon policy %s (type %d) change", |
1671 | 0 | updgrp->id, subgrp->id, |
1672 | 0 | ctx->policy_name, ctx->policy_type); |
1673 | 0 | subgroup_announce_route(subgrp); |
1674 | 0 | } |
1675 | 0 | if (def_changed) { |
1676 | 0 | if (bgp_debug_update(NULL, NULL, updgrp, 0)) |
1677 | 0 | zlog_debug( |
1678 | 0 | "u%" PRIu64 ":s%" PRIu64" announcing default upon default routemap %s change", |
1679 | 0 | updgrp->id, subgrp->id, |
1680 | 0 | ctx->policy_name); |
1681 | 0 | if (route_map_lookup_by_name(ctx->policy_name)) { |
1682 | | /* |
1683 | | * When there is change in routemap, this flow |
1684 | | * is triggered. the routemap is still present |
1685 | | * in lib, hence its a update flow. The flag |
1686 | | * needs to be unset. |
1687 | | */ |
1688 | 0 | UNSET_FLAG(subgrp->sflags, |
1689 | 0 | SUBGRP_STATUS_DEFAULT_ORIGINATE); |
1690 | 0 | subgroup_default_originate(subgrp, 0); |
1691 | 0 | } else { |
1692 | | /* |
1693 | | * This is a explicit withdraw, since the |
1694 | | * routemap is not present in routemap lib. need |
1695 | | * to pass 1 for withdraw arg. |
1696 | | */ |
1697 | 0 | subgroup_default_originate(subgrp, 1); |
1698 | 0 | } |
1699 | 0 | } |
1700 | 0 | update_subgroup_set_needs_refresh(subgrp, 0); |
1701 | 0 | } |
1702 | 0 | return UPDWALK_CONTINUE; |
1703 | 0 | } |
1704 | | |
1705 | | static int update_group_walkcb(struct hash_bucket *bucket, void *arg) |
1706 | 0 | { |
1707 | 0 | struct update_group *updgrp = bucket->data; |
1708 | 0 | struct updwalk_context *wctx = arg; |
1709 | 0 | int ret = (*wctx->cb)(updgrp, wctx->context); |
1710 | 0 | return ret; |
1711 | 0 | } |
1712 | | |
1713 | | static int update_group_periodic_merge_walkcb(struct update_group *updgrp, |
1714 | | void *arg) |
1715 | 0 | { |
1716 | 0 | struct update_subgroup *subgrp; |
1717 | 0 | struct update_subgroup *tmp_subgrp; |
1718 | 0 | const char *reason = arg; |
1719 | |
|
1720 | 0 | UPDGRP_FOREACH_SUBGRP_SAFE (updgrp, subgrp, tmp_subgrp) |
1721 | 0 | update_subgroup_check_merge(subgrp, reason); |
1722 | 0 | return UPDWALK_CONTINUE; |
1723 | 0 | } |
1724 | | |
1725 | | /******************** |
1726 | | * PUBLIC FUNCTIONS |
1727 | | ********************/ |
1728 | | |
1729 | | /* |
1730 | | * trigger function when a policy (route-map/filter-list/prefix-list/ |
1731 | | * distribute-list etc.) content changes. Go through all the |
1732 | | * update groups and process the change. |
1733 | | * |
1734 | | * bgp: the bgp instance |
1735 | | * ptype: the type of policy that got modified, see bgpd.h |
1736 | | * pname: name of the policy |
1737 | | * route_update: flag to control if an automatic update generation should |
1738 | | * occur |
1739 | | * start_event: flag that indicates if it's the beginning of the change. |
1740 | | * Esp. when the user is changing the content interactively |
1741 | | * over multiple statements. Useful to set dirty flag on |
1742 | | * update groups. |
1743 | | */ |
1744 | | void update_group_policy_update(struct bgp *bgp, enum bgp_policy_type ptype, |
1745 | | const char *pname, bool route_update, |
1746 | | int start_event) |
1747 | 0 | { |
1748 | 0 | struct updwalk_context ctx; |
1749 | |
|
1750 | 0 | memset(&ctx, 0, sizeof(ctx)); |
1751 | 0 | ctx.policy_type = ptype; |
1752 | 0 | ctx.policy_name = pname; |
1753 | 0 | ctx.policy_route_update = route_update; |
1754 | 0 | ctx.policy_event_start_flag = start_event; |
1755 | 0 | ctx.flags = 0; |
1756 | |
|
1757 | 0 | update_group_walk(bgp, updgrp_policy_update_walkcb, &ctx); |
1758 | 0 | } |
1759 | | |
1760 | | /* |
1761 | | * update_subgroup_split_peer |
1762 | | * |
1763 | | * Ensure that the given peer is in a subgroup of its own in the |
1764 | | * specified update group. |
1765 | | */ |
1766 | | void update_subgroup_split_peer(struct peer_af *paf, |
1767 | | struct update_group *updgrp) |
1768 | 0 | { |
1769 | 0 | struct update_subgroup *old_subgrp, *subgrp; |
1770 | 0 | uint64_t old_id; |
1771 | | |
1772 | |
|
1773 | 0 | old_subgrp = paf->subgroup; |
1774 | |
|
1775 | 0 | if (!updgrp) |
1776 | 0 | updgrp = old_subgrp->update_group; |
1777 | | |
1778 | | /* |
1779 | | * If the peer is alone in its subgroup, reuse the existing |
1780 | | * subgroup. |
1781 | | */ |
1782 | 0 | if (old_subgrp->peer_count == 1) { |
1783 | 0 | if (updgrp == old_subgrp->update_group) |
1784 | 0 | return; |
1785 | | |
1786 | 0 | subgrp = old_subgrp; |
1787 | 0 | old_id = old_subgrp->update_group->id; |
1788 | |
|
1789 | 0 | if (bgp_debug_peer_updout_enabled(paf->peer->host)) { |
1790 | 0 | UPDGRP_PEER_DBG_DIS(old_subgrp->update_group); |
1791 | 0 | } |
1792 | |
|
1793 | 0 | update_group_remove_subgroup(old_subgrp->update_group, |
1794 | 0 | old_subgrp); |
1795 | 0 | update_group_add_subgroup(updgrp, subgrp); |
1796 | |
|
1797 | 0 | if (bgp_debug_peer_updout_enabled(paf->peer->host)) { |
1798 | 0 | UPDGRP_PEER_DBG_EN(updgrp); |
1799 | 0 | } |
1800 | 0 | if (BGP_DEBUG(update_groups, UPDATE_GROUPS)) |
1801 | 0 | zlog_debug("u%" PRIu64 ":s%" PRIu64" peer %s moved to u%" PRIu64 ":s%" PRIu64, |
1802 | 0 | old_id, subgrp->id, paf->peer->host, |
1803 | 0 | updgrp->id, subgrp->id); |
1804 | | |
1805 | | /* |
1806 | | * The state of the subgroup (adj_out, advs, packet queue etc) |
1807 | | * is consistent internally, but may not be identical to other |
1808 | | * subgroups in the new update group even if the version number |
1809 | | * matches up. Make sure a full refresh is done before the |
1810 | | * subgroup is merged with another. |
1811 | | */ |
1812 | 0 | update_subgroup_set_needs_refresh(subgrp, 1); |
1813 | |
|
1814 | 0 | SUBGRP_INCR_STAT(subgrp, updgrp_switch_events); |
1815 | 0 | return; |
1816 | 0 | } |
1817 | | |
1818 | | /* |
1819 | | * Create a new subgroup under the specified update group, and copy |
1820 | | * over relevant state to it. |
1821 | | */ |
1822 | 0 | subgrp = update_subgroup_create(updgrp); |
1823 | 0 | update_subgroup_inherit_info(subgrp, old_subgrp); |
1824 | |
|
1825 | 0 | subgrp->split_from.update_group_id = old_subgrp->update_group->id; |
1826 | 0 | subgrp->split_from.subgroup_id = old_subgrp->id; |
1827 | | |
1828 | | /* |
1829 | | * Copy out relevant state from the old subgroup. |
1830 | | */ |
1831 | 0 | update_subgroup_copy_adj_out(paf->subgroup, subgrp); |
1832 | 0 | update_subgroup_copy_packets(subgrp, paf->next_pkt_to_send); |
1833 | |
|
1834 | 0 | if (BGP_DEBUG(update_groups, UPDATE_GROUPS)) |
1835 | 0 | zlog_debug("u%" PRIu64 ":s%" PRIu64" peer %s split and moved into u%" PRIu64":s%" PRIu64, |
1836 | 0 | paf->subgroup->update_group->id, paf->subgroup->id, |
1837 | 0 | paf->peer->host, updgrp->id, subgrp->id); |
1838 | |
|
1839 | 0 | SUBGRP_INCR_STAT(paf->subgroup, split_events); |
1840 | | |
1841 | | /* |
1842 | | * Since queued advs were left behind, this new subgroup needs a |
1843 | | * refresh. |
1844 | | */ |
1845 | 0 | update_subgroup_set_needs_refresh(subgrp, 1); |
1846 | | |
1847 | | /* |
1848 | | * Remove peer from old subgroup, and add it to the new one. |
1849 | | */ |
1850 | 0 | update_subgroup_remove_peer(paf->subgroup, paf); |
1851 | |
|
1852 | 0 | update_subgroup_add_peer(subgrp, paf, 1); |
1853 | 0 | } |
1854 | | |
1855 | | void update_bgp_group_init(struct bgp *bgp) |
1856 | 0 | { |
1857 | 0 | int afid; |
1858 | |
|
1859 | 0 | AF_FOREACH (afid) |
1860 | 0 | bgp->update_groups[afid] = |
1861 | 0 | hash_create(updgrp_hash_key_make, updgrp_hash_cmp, |
1862 | 0 | "BGP Update Group Hash"); |
1863 | 0 | } |
1864 | | |
1865 | | void update_bgp_group_free(struct bgp *bgp) |
1866 | 0 | { |
1867 | 0 | int afid; |
1868 | |
|
1869 | 0 | AF_FOREACH (afid) { |
1870 | 0 | if (bgp->update_groups[afid]) { |
1871 | 0 | hash_free(bgp->update_groups[afid]); |
1872 | 0 | bgp->update_groups[afid] = NULL; |
1873 | 0 | } |
1874 | 0 | } |
1875 | 0 | } |
1876 | | |
1877 | | void update_group_show(struct bgp *bgp, afi_t afi, safi_t safi, struct vty *vty, |
1878 | | uint64_t subgrp_id, bool uj) |
1879 | 0 | { |
1880 | 0 | struct updwalk_context ctx; |
1881 | 0 | json_object *json_vrf_obj = NULL; |
1882 | |
|
1883 | 0 | memset(&ctx, 0, sizeof(ctx)); |
1884 | 0 | ctx.vty = vty; |
1885 | 0 | ctx.subgrp_id = subgrp_id; |
1886 | 0 | ctx.uj = uj; |
1887 | |
|
1888 | 0 | if (uj) { |
1889 | 0 | ctx.json_updategrps = json_object_new_object(); |
1890 | 0 | json_vrf_obj = json_object_new_object(); |
1891 | 0 | } |
1892 | |
|
1893 | 0 | update_group_af_walk(bgp, afi, safi, update_group_show_walkcb, &ctx); |
1894 | |
|
1895 | 0 | if (uj) { |
1896 | 0 | const char *vname; |
1897 | |
|
1898 | 0 | if (bgp->inst_type == BGP_INSTANCE_TYPE_DEFAULT) |
1899 | 0 | vname = VRF_DEFAULT_NAME; |
1900 | 0 | else |
1901 | 0 | vname = bgp->name; |
1902 | 0 | json_object_object_add(json_vrf_obj, vname, |
1903 | 0 | ctx.json_updategrps); |
1904 | 0 | vty_json(vty, json_vrf_obj); |
1905 | 0 | } |
1906 | 0 | } |
1907 | | |
1908 | | /* |
1909 | | * update_group_show_stats |
1910 | | * |
1911 | | * Show global statistics about update groups. |
1912 | | */ |
1913 | | void update_group_show_stats(struct bgp *bgp, struct vty *vty) |
1914 | 0 | { |
1915 | 0 | vty_out(vty, "Update groups created: %u\n", |
1916 | 0 | bgp->update_group_stats.updgrps_created); |
1917 | 0 | vty_out(vty, "Update groups deleted: %u\n", |
1918 | 0 | bgp->update_group_stats.updgrps_deleted); |
1919 | 0 | vty_out(vty, "Update subgroups created: %u\n", |
1920 | 0 | bgp->update_group_stats.subgrps_created); |
1921 | 0 | vty_out(vty, "Update subgroups deleted: %u\n", |
1922 | 0 | bgp->update_group_stats.subgrps_deleted); |
1923 | 0 | vty_out(vty, "Join events: %u\n", bgp->update_group_stats.join_events); |
1924 | 0 | vty_out(vty, "Prune events: %u\n", |
1925 | 0 | bgp->update_group_stats.prune_events); |
1926 | 0 | vty_out(vty, "Merge events: %u\n", |
1927 | 0 | bgp->update_group_stats.merge_events); |
1928 | 0 | vty_out(vty, "Split events: %u\n", |
1929 | 0 | bgp->update_group_stats.split_events); |
1930 | 0 | vty_out(vty, "Update group switch events: %u\n", |
1931 | 0 | bgp->update_group_stats.updgrp_switch_events); |
1932 | 0 | vty_out(vty, "Peer route refreshes combined: %u\n", |
1933 | 0 | bgp->update_group_stats.peer_refreshes_combined); |
1934 | 0 | vty_out(vty, "Merge checks triggered: %u\n", |
1935 | 0 | bgp->update_group_stats.merge_checks_triggered); |
1936 | 0 | } |
1937 | | |
1938 | | /* |
1939 | | * update_group_adjust_peer |
1940 | | */ |
1941 | | void update_group_adjust_peer(struct peer_af *paf) |
1942 | 0 | { |
1943 | 0 | struct update_group *updgrp; |
1944 | 0 | struct update_subgroup *subgrp, *old_subgrp; |
1945 | 0 | struct peer *peer; |
1946 | |
|
1947 | 0 | if (!paf) |
1948 | 0 | return; |
1949 | | |
1950 | 0 | peer = PAF_PEER(paf); |
1951 | 0 | if (!peer_established(peer)) { |
1952 | 0 | return; |
1953 | 0 | } |
1954 | | |
1955 | 0 | if (!CHECK_FLAG(peer->flags, PEER_FLAG_CONFIG_NODE)) { |
1956 | 0 | return; |
1957 | 0 | } |
1958 | | |
1959 | 0 | if (!peer->afc_nego[paf->afi][paf->safi]) { |
1960 | 0 | return; |
1961 | 0 | } |
1962 | | |
1963 | 0 | updgrp = update_group_find(paf); |
1964 | 0 | if (!updgrp) |
1965 | 0 | updgrp = update_group_create(paf); |
1966 | |
|
1967 | 0 | old_subgrp = paf->subgroup; |
1968 | |
|
1969 | 0 | if (old_subgrp) { |
1970 | | |
1971 | | /* |
1972 | | * If the update group of the peer is unchanged, the peer can |
1973 | | * stay |
1974 | | * in its existing subgroup and we're done. |
1975 | | */ |
1976 | 0 | if (old_subgrp->update_group == updgrp) |
1977 | 0 | return; |
1978 | | |
1979 | | /* |
1980 | | * The peer is switching between update groups. Put it in its |
1981 | | * own subgroup under the new update group. |
1982 | | */ |
1983 | 0 | update_subgroup_split_peer(paf, updgrp); |
1984 | 0 | return; |
1985 | 0 | } |
1986 | | |
1987 | 0 | subgrp = update_subgroup_find(updgrp, paf); |
1988 | 0 | if (!subgrp) |
1989 | 0 | subgrp = update_subgroup_create(updgrp); |
1990 | |
|
1991 | 0 | update_subgroup_add_peer(subgrp, paf, 1); |
1992 | 0 | if (BGP_DEBUG(update_groups, UPDATE_GROUPS)) |
1993 | 0 | zlog_debug("u%" PRIu64 ":s%" PRIu64 " add peer %s", updgrp->id, |
1994 | 0 | subgrp->id, paf->peer->host); |
1995 | |
|
1996 | 0 | return; |
1997 | 0 | } |
1998 | | |
1999 | | int update_group_adjust_soloness(struct peer *peer, int set) |
2000 | 0 | { |
2001 | 0 | struct peer_group *group; |
2002 | 0 | struct listnode *node, *nnode; |
2003 | |
|
2004 | 0 | if (!CHECK_FLAG(peer->sflags, PEER_STATUS_GROUP)) { |
2005 | 0 | peer_lonesoul_or_not(peer, set); |
2006 | 0 | if (peer_established(peer)) |
2007 | 0 | bgp_announce_route_all(peer); |
2008 | 0 | } else { |
2009 | 0 | group = peer->group; |
2010 | 0 | for (ALL_LIST_ELEMENTS(group->peer, node, nnode, peer)) { |
2011 | 0 | peer_lonesoul_or_not(peer, set); |
2012 | 0 | if (peer_established(peer)) |
2013 | 0 | bgp_announce_route_all(peer); |
2014 | 0 | } |
2015 | 0 | } |
2016 | 0 | return 0; |
2017 | 0 | } |
2018 | | |
2019 | | /* |
2020 | | * update_subgroup_rib |
2021 | | */ |
2022 | | struct bgp_table *update_subgroup_rib(struct update_subgroup *subgrp) |
2023 | 0 | { |
2024 | 0 | struct bgp *bgp; |
2025 | |
|
2026 | 0 | bgp = SUBGRP_INST(subgrp); |
2027 | 0 | if (!bgp) |
2028 | 0 | return NULL; |
2029 | | |
2030 | 0 | return bgp->rib[SUBGRP_AFI(subgrp)][SUBGRP_SAFI(subgrp)]; |
2031 | 0 | } |
2032 | | |
2033 | | void update_group_af_walk(struct bgp *bgp, afi_t afi, safi_t safi, |
2034 | | updgrp_walkcb cb, void *ctx) |
2035 | 0 | { |
2036 | 0 | struct updwalk_context wctx; |
2037 | 0 | int afid; |
2038 | |
|
2039 | 0 | if (!bgp) |
2040 | 0 | return; |
2041 | 0 | afid = afindex(afi, safi); |
2042 | 0 | if (afid >= BGP_AF_MAX) |
2043 | 0 | return; |
2044 | | |
2045 | 0 | memset(&wctx, 0, sizeof(wctx)); |
2046 | 0 | wctx.cb = cb; |
2047 | 0 | wctx.context = ctx; |
2048 | |
|
2049 | 0 | if (bgp->update_groups[afid]) |
2050 | 0 | hash_walk(bgp->update_groups[afid], update_group_walkcb, &wctx); |
2051 | 0 | } |
2052 | | |
2053 | | void update_group_walk(struct bgp *bgp, updgrp_walkcb cb, void *ctx) |
2054 | 0 | { |
2055 | 0 | afi_t afi; |
2056 | 0 | safi_t safi; |
2057 | |
|
2058 | 0 | FOREACH_AFI_SAFI (afi, safi) { |
2059 | 0 | update_group_af_walk(bgp, afi, safi, cb, ctx); |
2060 | 0 | } |
2061 | 0 | } |
2062 | | |
2063 | | void update_group_periodic_merge(struct bgp *bgp) |
2064 | 0 | { |
2065 | 0 | char reason[] = "periodic merge check"; |
2066 | |
|
2067 | 0 | update_group_walk(bgp, update_group_periodic_merge_walkcb, |
2068 | 0 | (void *)reason); |
2069 | 0 | } |
2070 | | |
2071 | | static int |
2072 | | update_group_default_originate_route_map_walkcb(struct update_group *updgrp, |
2073 | | void *arg) |
2074 | 0 | { |
2075 | 0 | struct update_subgroup *subgrp; |
2076 | 0 | struct peer *peer; |
2077 | 0 | afi_t afi; |
2078 | 0 | safi_t safi; |
2079 | |
|
2080 | 0 | UPDGRP_FOREACH_SUBGRP (updgrp, subgrp) { |
2081 | 0 | peer = SUBGRP_PEER(subgrp); |
2082 | 0 | afi = SUBGRP_AFI(subgrp); |
2083 | 0 | safi = SUBGRP_SAFI(subgrp); |
2084 | |
|
2085 | 0 | if (peer->default_rmap[afi][safi].name) { |
2086 | | /* |
2087 | | * When there is change in routemap this flow will |
2088 | | * be triggered. We need to unset the Flag to ensure |
2089 | | * the update flow gets triggered. |
2090 | | */ |
2091 | 0 | UNSET_FLAG(subgrp->sflags, |
2092 | 0 | SUBGRP_STATUS_DEFAULT_ORIGINATE); |
2093 | 0 | subgroup_default_originate(subgrp, 0); |
2094 | 0 | } |
2095 | 0 | } |
2096 | |
|
2097 | 0 | return UPDWALK_CONTINUE; |
2098 | 0 | } |
2099 | | |
2100 | | void update_group_refresh_default_originate_route_map(struct event *thread) |
2101 | 0 | { |
2102 | 0 | struct bgp *bgp; |
2103 | 0 | char reason[] = "refresh default-originate route-map"; |
2104 | |
|
2105 | 0 | bgp = EVENT_ARG(thread); |
2106 | 0 | update_group_walk(bgp, update_group_default_originate_route_map_walkcb, |
2107 | 0 | reason); |
2108 | 0 | EVENT_OFF(bgp->t_rmap_def_originate_eval); |
2109 | 0 | bgp_unlock(bgp); |
2110 | 0 | } |
2111 | | |
2112 | | /* |
2113 | | * peer_af_announce_route |
2114 | | * |
2115 | | * Refreshes routes out to a peer_af immediately. |
2116 | | * |
2117 | | * If the combine parameter is true, then this function will try to |
2118 | | * gather other peers in the subgroup for which a route announcement |
2119 | | * is pending and efficently announce routes to all of them. |
2120 | | * |
2121 | | * For now, the 'combine' option has an effect only if all peers in |
2122 | | * the subgroup have a route announcement pending. |
2123 | | */ |
2124 | | void peer_af_announce_route(struct peer_af *paf, int combine) |
2125 | 0 | { |
2126 | 0 | struct update_subgroup *subgrp; |
2127 | 0 | struct peer_af *cur_paf; |
2128 | 0 | int all_pending; |
2129 | |
|
2130 | 0 | subgrp = paf->subgroup; |
2131 | 0 | all_pending = 0; |
2132 | |
|
2133 | 0 | if (combine) { |
2134 | | /* |
2135 | | * If there are other peers in the old subgroup that also need |
2136 | | * routes to be announced, pull them into the peer's new |
2137 | | * subgroup. |
2138 | | * Combine route announcement with other peers if possible. |
2139 | | * |
2140 | | * For now, we combine only if all peers in the subgroup have an |
2141 | | * announcement pending. |
2142 | | */ |
2143 | 0 | all_pending = 1; |
2144 | |
|
2145 | 0 | SUBGRP_FOREACH_PEER (subgrp, cur_paf) { |
2146 | 0 | if (cur_paf == paf) |
2147 | 0 | continue; |
2148 | | |
2149 | 0 | if (cur_paf->t_announce_route) |
2150 | 0 | continue; |
2151 | | |
2152 | 0 | all_pending = 0; |
2153 | 0 | break; |
2154 | 0 | } |
2155 | 0 | } |
2156 | | /* |
2157 | | * Announce to the peer alone if we were not asked to combine peers, |
2158 | | * or if some peers don't have a route annoucement pending. |
2159 | | */ |
2160 | 0 | if (!combine || !all_pending) { |
2161 | 0 | update_subgroup_split_peer(paf, NULL); |
2162 | 0 | subgrp = paf->subgroup; |
2163 | |
|
2164 | 0 | assert(subgrp && subgrp->update_group); |
2165 | 0 | if (bgp_debug_update(paf->peer, NULL, subgrp->update_group, 0)) |
2166 | 0 | zlog_debug("u%" PRIu64 ":s%" PRIu64" %s announcing routes", |
2167 | 0 | subgrp->update_group->id, subgrp->id, |
2168 | 0 | paf->peer->host); |
2169 | |
|
2170 | 0 | subgroup_announce_route(paf->subgroup); |
2171 | 0 | return; |
2172 | 0 | } |
2173 | | |
2174 | | /* |
2175 | | * We will announce routes the entire subgroup. |
2176 | | * |
2177 | | * First stop refresh timers on all the other peers. |
2178 | | */ |
2179 | 0 | SUBGRP_FOREACH_PEER (subgrp, cur_paf) { |
2180 | 0 | if (cur_paf == paf) |
2181 | 0 | continue; |
2182 | | |
2183 | 0 | bgp_stop_announce_route_timer(cur_paf); |
2184 | 0 | } |
2185 | |
|
2186 | 0 | if (bgp_debug_update(paf->peer, NULL, subgrp->update_group, 0)) |
2187 | 0 | zlog_debug("u%" PRIu64 ":s%" PRIu64" announcing routes to %s, combined into %d peers", |
2188 | 0 | subgrp->update_group->id, subgrp->id, |
2189 | 0 | paf->peer->host, subgrp->peer_count); |
2190 | |
|
2191 | 0 | subgroup_announce_route(subgrp); |
2192 | |
|
2193 | 0 | SUBGRP_INCR_STAT_BY(subgrp, peer_refreshes_combined, |
2194 | 0 | subgrp->peer_count - 1); |
2195 | 0 | } |
2196 | | |
2197 | | void subgroup_trigger_write(struct update_subgroup *subgrp) |
2198 | 0 | { |
2199 | 0 | struct peer_af *paf; |
2200 | | |
2201 | | /* |
2202 | | * For each peer in the subgroup, schedule a job to pull packets from |
2203 | | * the subgroup output queue into their own output queue. This action |
2204 | | * will trigger a write job on the I/O thread. |
2205 | | */ |
2206 | 0 | SUBGRP_FOREACH_PEER (subgrp, paf) |
2207 | 0 | if (peer_established(paf->peer)) |
2208 | 0 | event_add_timer_msec( |
2209 | 0 | bm->master, bgp_generate_updgrp_packets, |
2210 | 0 | paf->peer, 0, |
2211 | 0 | &paf->peer->t_generate_updgrp_packets); |
2212 | 0 | } |
2213 | | |
2214 | | int update_group_clear_update_dbg(struct update_group *updgrp, void *arg) |
2215 | 0 | { |
2216 | 0 | UPDGRP_PEER_DBG_OFF(updgrp); |
2217 | 0 | return UPDWALK_CONTINUE; |
2218 | 0 | } |
2219 | | |
2220 | | /* Return true if we should addpath encode NLRI to this peer */ |
2221 | | bool bgp_addpath_encode_tx(struct peer *peer, afi_t afi, safi_t safi) |
2222 | 0 | { |
2223 | 0 | return (CHECK_FLAG(peer->af_cap[afi][safi], PEER_CAP_ADDPATH_AF_TX_ADV) |
2224 | 0 | && CHECK_FLAG(peer->af_cap[afi][safi], |
2225 | 0 | PEER_CAP_ADDPATH_AF_RX_RCV)); |
2226 | 0 | } |
2227 | | |
2228 | | bool bgp_addpath_capable(struct bgp_path_info *bpi, struct peer *peer, |
2229 | | afi_t afi, safi_t safi) |
2230 | 0 | { |
2231 | 0 | return (bgp_addpath_tx_path(peer->addpath_type[afi][safi], bpi) || |
2232 | 0 | (safi == SAFI_LABELED_UNICAST && |
2233 | 0 | bgp_addpath_tx_path(peer->addpath_type[afi][SAFI_UNICAST], |
2234 | 0 | bpi))); |
2235 | 0 | } |
2236 | | |
2237 | | bool bgp_check_selected(struct bgp_path_info *bpi, struct peer *peer, |
2238 | | bool addpath_capable, afi_t afi, safi_t safi) |
2239 | 0 | { |
2240 | 0 | return (CHECK_FLAG(bpi->flags, BGP_PATH_SELECTED) || |
2241 | 0 | (addpath_capable && bgp_addpath_capable(bpi, peer, afi, safi))); |
2242 | 0 | } |