Coverage Report

Created: 2025-10-23 06:55

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
/src/frr/bgpd/bgp_addpath.c
Line
Count
Source
1
// SPDX-License-Identifier: GPL-2.0-or-later
2
/*
3
 * Addpath TX ID selection, and related utilities
4
 * Copyright (C) 2018  Amazon.com, Inc. or its affiliates
5
 */
6
7
#ifdef HAVE_CONFIG_H
8
#include "config.h"
9
#endif
10
11
#include "bgp_addpath.h"
12
#include "bgp_route.h"
13
14
static const struct bgp_addpath_strategy_names strat_names[BGP_ADDPATH_MAX] = {
15
  {
16
    .config_name = "addpath-tx-all-paths",
17
    .human_name = "All",
18
    .human_description = "Advertise all paths via addpath",
19
    .type_json_name = "addpathTxAllPaths",
20
    .id_json_name = "addpathTxIdAll"
21
  },
22
  {
23
    .config_name = "addpath-tx-bestpath-per-AS",
24
    .human_name = "Best-Per-AS",
25
    .human_description = "Advertise bestpath per AS via addpath",
26
    .type_json_name = "addpathTxBestpathPerAS",
27
    .id_json_name = "addpathTxIdBestPerAS"
28
  }
29
};
30
31
static const struct bgp_addpath_strategy_names unknown_names = {
32
  .config_name = "addpath-tx-unknown",
33
  .human_name = "Unknown-Addpath-Strategy",
34
  .human_description = "Unknown Addpath Strategy",
35
  .type_json_name = "addpathTxUnknown",
36
  .id_json_name = "addpathTxIdUnknown"
37
};
38
39
/*
40
 * Returns a structure full of strings associated with an addpath type. Will
41
 * never return null.
42
 */
43
const struct bgp_addpath_strategy_names *
44
bgp_addpath_names(enum bgp_addpath_strat strat)
45
0
{
46
0
  if (strat < BGP_ADDPATH_MAX)
47
0
    return &(strat_names[strat]);
48
0
  else
49
0
    return &unknown_names;
50
0
};
51
52
/*
53
 * Returns if any peer is transmitting addpaths for a given afi/safi.
54
 */
55
bool bgp_addpath_is_addpath_used(struct bgp_addpath_bgp_data *d, afi_t afi,
56
         safi_t safi)
57
0
{
58
0
  return d->total_peercount[afi][safi] > 0;
59
0
}
60
61
/*
62
 * Initialize the BGP instance level data for addpath.
63
 */
64
void bgp_addpath_init_bgp_data(struct bgp_addpath_bgp_data *d)
65
1
{
66
1
  safi_t safi;
67
1
  afi_t afi;
68
1
  int i;
69
70
21
  FOREACH_AFI_SAFI (afi, safi) {
71
63
    for (i = 0; i < BGP_ADDPATH_MAX; i++) {
72
42
      d->id_allocators[afi][safi][i] = NULL;
73
42
      d->peercount[afi][safi][i] = 0;
74
42
    }
75
21
    d->total_peercount[afi][safi] = 0;
76
21
  }
77
1
}
78
79
/*
80
 * Free up resources associated with BGP route info structures.
81
 */
82
void bgp_addpath_free_info_data(struct bgp_addpath_info_data *d,
83
            struct bgp_addpath_node_data *nd)
84
0
{
85
0
  int i;
86
87
0
  for (i = 0; i < BGP_ADDPATH_MAX; i++) {
88
0
    if (d->addpath_tx_id[i] != IDALLOC_INVALID)
89
0
      idalloc_free_to_pool(&nd->free_ids[i],
90
0
               d->addpath_tx_id[i]);
91
0
  }
92
0
}
93
94
/*
95
 * Return the addpath ID used to send a particular route, to a particular peer,
96
 * in a particular AFI/SAFI.
97
 */
98
uint32_t bgp_addpath_id_for_peer(struct peer *peer, afi_t afi, safi_t safi,
99
        struct bgp_addpath_info_data *d)
100
0
{
101
0
  if (safi == SAFI_LABELED_UNICAST)
102
0
    safi = SAFI_UNICAST;
103
104
0
  if (peer->addpath_type[afi][safi] < BGP_ADDPATH_MAX)
105
0
    return d->addpath_tx_id[peer->addpath_type[afi][safi]];
106
0
  else
107
0
    return IDALLOC_INVALID;
108
0
}
109
110
/*
111
 * Returns true if the path has an assigned addpath ID for any of the addpath
112
 * strategies.
113
 */
114
bool bgp_addpath_info_has_ids(struct bgp_addpath_info_data *d)
115
0
{
116
0
  int i;
117
118
0
  for (i = 0; i < BGP_ADDPATH_MAX; i++)
119
0
    if (d->addpath_tx_id[i] != 0)
120
0
      return true;
121
122
0
  return false;
123
0
}
124
125
/*
126
 * Releases any ID's associated with the BGP prefix.
127
 */
128
void bgp_addpath_free_node_data(struct bgp_addpath_bgp_data *bd,
129
            struct bgp_addpath_node_data *nd, afi_t afi,
130
            safi_t safi)
131
2.67k
{
132
2.67k
  int i;
133
134
8.01k
  for (i = 0; i < BGP_ADDPATH_MAX; i++) {
135
5.34k
    idalloc_drain_pool(bd->id_allocators[afi][safi][i],
136
5.34k
           &(nd->free_ids[i]));
137
5.34k
  }
138
2.67k
}
139
140
/*
141
 * Check to see if the addpath strategy requires DMED to be configured to work.
142
 */
143
bool bgp_addpath_dmed_required(int strategy)
144
0
{
145
0
  return strategy == BGP_ADDPATH_BEST_PER_AS;
146
0
}
147
148
/*
149
 * Return true if this is a path we should advertise due to a
150
 * configured addpath-tx knob
151
 */
152
bool bgp_addpath_tx_path(enum bgp_addpath_strat strat, struct bgp_path_info *pi)
153
0
{
154
0
  switch (strat) {
155
0
  case BGP_ADDPATH_NONE:
156
0
    return false;
157
0
  case BGP_ADDPATH_ALL:
158
0
    return true;
159
0
  case BGP_ADDPATH_BEST_PER_AS:
160
0
    if (CHECK_FLAG(pi->flags, BGP_PATH_DMED_SELECTED))
161
0
      return true;
162
0
    else
163
0
      return false;
164
0
  case BGP_ADDPATH_MAX:
165
0
    return false;
166
0
  }
167
168
0
  assert(!"Reached end of function we should never hit");
169
0
}
170
171
static void bgp_addpath_flush_type_rn(struct bgp *bgp, afi_t afi, safi_t safi,
172
              enum bgp_addpath_strat addpath_type,
173
              struct bgp_dest *dest)
174
0
{
175
0
  struct bgp_path_info *pi;
176
177
0
  if (safi == SAFI_LABELED_UNICAST)
178
0
    safi = SAFI_UNICAST;
179
180
0
  idalloc_drain_pool(
181
0
    bgp->tx_addpath.id_allocators[afi][safi][addpath_type],
182
0
    &(dest->tx_addpath.free_ids[addpath_type]));
183
0
  for (pi = bgp_dest_get_bgp_path_info(dest); pi; pi = pi->next) {
184
0
    if (pi->tx_addpath.addpath_tx_id[addpath_type]
185
0
        != IDALLOC_INVALID) {
186
0
      idalloc_free(
187
0
        bgp->tx_addpath
188
0
          .id_allocators[afi][safi][addpath_type],
189
0
        pi->tx_addpath.addpath_tx_id[addpath_type]);
190
0
      pi->tx_addpath.addpath_tx_id[addpath_type] =
191
0
        IDALLOC_INVALID;
192
0
    }
193
0
  }
194
0
}
195
196
/*
197
 * Purge all addpath ID's on a BGP instance associated with the addpath
198
 * strategy, and afi/safi combination. This lets us let go of all memory held to
199
 * track ID numbers associated with an addpath type not in use. Since
200
 * post-bestpath ID processing is skipped for types not used, this is the only
201
 * chance to free this data.
202
 */
203
static void bgp_addpath_flush_type(struct bgp *bgp, afi_t afi, safi_t safi,
204
           enum bgp_addpath_strat addpath_type)
205
0
{
206
0
  struct bgp_dest *dest, *ndest;
207
208
0
  if (safi == SAFI_LABELED_UNICAST)
209
0
    safi = SAFI_UNICAST;
210
211
0
  for (dest = bgp_table_top(bgp->rib[afi][safi]); dest;
212
0
       dest = bgp_route_next(dest)) {
213
0
    if (safi == SAFI_MPLS_VPN) {
214
0
      struct bgp_table *table;
215
216
0
      table = bgp_dest_get_bgp_table_info(dest);
217
0
      if (!table)
218
0
        continue;
219
220
0
      for (ndest = bgp_table_top(table); ndest;
221
0
           ndest = bgp_route_next(ndest))
222
0
        bgp_addpath_flush_type_rn(bgp, afi, safi,
223
0
                addpath_type, ndest);
224
0
    } else {
225
0
      bgp_addpath_flush_type_rn(bgp, afi, safi, addpath_type,
226
0
              dest);
227
0
    }
228
0
  }
229
230
0
  idalloc_destroy(bgp->tx_addpath.id_allocators[afi][safi][addpath_type]);
231
0
  bgp->tx_addpath.id_allocators[afi][safi][addpath_type] = NULL;
232
0
}
233
234
/*
235
 * Allocate an Addpath ID for the given type on a path, if necessary.
236
 */
237
static void bgp_addpath_populate_path(struct id_alloc *allocator,
238
              struct bgp_path_info *path,
239
              enum bgp_addpath_strat addpath_type)
240
0
{
241
0
  if (bgp_addpath_tx_path(addpath_type, path)) {
242
0
    path->tx_addpath.addpath_tx_id[addpath_type] =
243
0
      idalloc_allocate(allocator);
244
0
  }
245
0
}
246
247
/*
248
 * Compute addpath ID's on a BGP instance associated with the addpath strategy,
249
 * and afi/safi combination. Since we won't waste the time computing addpath IDs
250
 * for unused strategies, the first time a peer is configured to use a strategy,
251
 * we have to backfill the data.
252
 * In labeled-unicast, addpath allocations SHOULD be done in unicast SAFI.
253
 */
254
static void bgp_addpath_populate_type(struct bgp *bgp, afi_t afi, safi_t safi,
255
            enum bgp_addpath_strat addpath_type)
256
0
{
257
0
  struct bgp_dest *dest, *ndest;
258
0
  char buf[200];
259
0
  struct id_alloc *allocator;
260
261
0
  if (safi == SAFI_LABELED_UNICAST)
262
0
    safi = SAFI_UNICAST;
263
264
0
  snprintf(buf, sizeof(buf), "Addpath ID Allocator %s:%d/%d",
265
0
     bgp_addpath_names(addpath_type)->config_name, (int)afi,
266
0
     (int)safi);
267
0
  buf[sizeof(buf) - 1] = '\0';
268
0
  zlog_info("Computing addpath IDs for addpath type %s",
269
0
    bgp_addpath_names(addpath_type)->human_name);
270
271
0
  bgp->tx_addpath.id_allocators[afi][safi][addpath_type] =
272
0
    idalloc_new(buf);
273
274
0
  idalloc_reserve(bgp->tx_addpath.id_allocators[afi][safi][addpath_type],
275
0
    BGP_ADDPATH_TX_ID_FOR_DEFAULT_ORIGINATE);
276
277
0
  allocator = bgp->tx_addpath.id_allocators[afi][safi][addpath_type];
278
279
0
  for (dest = bgp_table_top(bgp->rib[afi][safi]); dest;
280
0
       dest = bgp_route_next(dest)) {
281
0
    struct bgp_path_info *bi;
282
283
0
    if (safi == SAFI_MPLS_VPN) {
284
0
      struct bgp_table *table;
285
286
0
      table = bgp_dest_get_bgp_table_info(dest);
287
0
      if (!table)
288
0
        continue;
289
290
0
      for (ndest = bgp_table_top(table); ndest;
291
0
           ndest = bgp_route_next(ndest))
292
0
        for (bi = bgp_dest_get_bgp_path_info(ndest); bi;
293
0
             bi = bi->next)
294
0
          bgp_addpath_populate_path(allocator, bi,
295
0
                  addpath_type);
296
0
    } else {
297
0
      for (bi = bgp_dest_get_bgp_path_info(dest); bi;
298
0
           bi = bi->next)
299
0
        bgp_addpath_populate_path(allocator, bi,
300
0
                addpath_type);
301
0
    }
302
0
  }
303
0
}
304
305
/*
306
 * Handle updates to a peer or group's addpath strategy. If after adjusting
307
 * counts a addpath strategy is in use for the first time, or no longer in use,
308
 * the IDs for that strategy will be populated or flushed.
309
 */
310
void bgp_addpath_type_changed(struct bgp *bgp)
311
0
{
312
0
  afi_t afi;
313
0
  safi_t safi;
314
0
  struct listnode *node, *nnode;
315
0
  struct peer *peer;
316
0
  int peer_count[AFI_MAX][SAFI_MAX][BGP_ADDPATH_MAX];
317
0
  enum bgp_addpath_strat type;
318
319
0
  FOREACH_AFI_SAFI(afi, safi) {
320
0
    for (type=0; type<BGP_ADDPATH_MAX; type++) {
321
0
      peer_count[afi][safi][type] = 0;
322
0
    }
323
0
    bgp->tx_addpath.total_peercount[afi][safi] = 0;
324
0
  }
325
326
0
  for (ALL_LIST_ELEMENTS(bgp->peer, node, nnode, peer)) {
327
0
    FOREACH_AFI_SAFI(afi, safi) {
328
0
      type = peer->addpath_type[afi][safi];
329
0
      if (type != BGP_ADDPATH_NONE) {
330
0
        peer_count[afi][safi][type] += 1;
331
0
        bgp->tx_addpath.total_peercount[afi][safi] += 1;
332
0
      }
333
0
    }
334
0
  }
335
336
0
  FOREACH_AFI_SAFI(afi, safi) {
337
0
    for (type=0; type<BGP_ADDPATH_MAX; type++) {
338
0
      int old = bgp->tx_addpath.peercount[afi][safi][type];
339
0
      int new = peer_count[afi][safi][type];
340
341
0
      bgp->tx_addpath.peercount[afi][safi][type] = new;
342
343
0
      if (old == 0 && new != 0) {
344
0
        bgp_addpath_populate_type(bgp, afi, safi,
345
0
          type);
346
0
      } else if (old != 0 && new == 0) {
347
0
        bgp_addpath_flush_type(bgp, afi, safi, type);
348
0
      }
349
0
    }
350
0
  }
351
0
}
352
353
/*
354
 * Change the addpath type assigned to a peer, or peer group. In addition to
355
 * adjusting the counts, peer sessions will be reset as needed to make the
356
 * change take effect.
357
 */
358
void bgp_addpath_set_peer_type(struct peer *peer, afi_t afi, safi_t safi,
359
            enum bgp_addpath_strat addpath_type)
360
0
{
361
0
  struct bgp *bgp = peer->bgp;
362
0
  enum bgp_addpath_strat old_type;
363
0
  struct listnode *node, *nnode;
364
0
  struct peer *tmp_peer;
365
0
  struct peer_group *group;
366
367
0
  if (safi == SAFI_LABELED_UNICAST)
368
0
    safi = SAFI_UNICAST;
369
370
0
  old_type = peer->addpath_type[afi][safi];
371
0
  if (addpath_type == old_type)
372
0
    return;
373
374
0
  if (addpath_type == BGP_ADDPATH_NONE && peer->group &&
375
0
      !CHECK_FLAG(peer->sflags, PEER_STATUS_GROUP)) {
376
    /* A "no" config on a group member inherits group */
377
0
    addpath_type = peer->group->conf->addpath_type[afi][safi];
378
0
  }
379
380
0
  peer->addpath_type[afi][safi] = addpath_type;
381
382
0
  bgp_addpath_type_changed(bgp);
383
384
0
  if (addpath_type != BGP_ADDPATH_NONE) {
385
0
    if (bgp_addpath_dmed_required(addpath_type)) {
386
0
      if (!CHECK_FLAG(bgp->flags,
387
0
          BGP_FLAG_DETERMINISTIC_MED)) {
388
0
        zlog_warn(
389
0
          "%s: enabling bgp deterministic-med, this is required for addpath-tx-bestpath-per-AS",
390
0
          peer->host);
391
0
        SET_FLAG(bgp->flags,
392
0
           BGP_FLAG_DETERMINISTIC_MED);
393
0
        bgp_recalculate_all_bestpaths(bgp);
394
0
      }
395
0
    }
396
0
  }
397
398
0
  zlog_info("Resetting peer %s%pBP due to change in addpath config",
399
0
      CHECK_FLAG(peer->sflags, PEER_STATUS_GROUP) ? "group " : "",
400
0
      peer);
401
402
0
  if (CHECK_FLAG(peer->sflags, PEER_STATUS_GROUP)) {
403
0
    group = peer->group;
404
405
    /* group will be null as peer_group_delete calls peer_delete on
406
     * group->conf. That peer_delete will eventuallly end up here
407
     * if the group was configured to tx addpaths.
408
     */
409
0
    if (group != NULL) {
410
0
      for (ALL_LIST_ELEMENTS(group->peer, node, nnode,
411
0
           tmp_peer)) {
412
0
        if (tmp_peer->addpath_type[afi][safi] ==
413
0
            old_type) {
414
0
          bgp_addpath_set_peer_type(tmp_peer,
415
0
                 afi,
416
0
                 safi,
417
0
                 addpath_type);
418
0
        }
419
0
      }
420
0
    }
421
0
  } else {
422
0
    peer_change_action(peer, afi, safi, peer_change_reset);
423
0
  }
424
425
0
}
426
427
/*
428
 * Intended to run after bestpath. This function will take TX IDs from paths
429
 * that no longer need them, and give them to paths that do. This prevents
430
 * best-per-as updates from needing to do a separate withdraw and update just to
431
 * swap out which path is sent.
432
 */
433
void bgp_addpath_update_ids(struct bgp *bgp, struct bgp_dest *bn, afi_t afi,
434
          safi_t safi)
435
0
{
436
0
  int i;
437
0
  struct bgp_path_info *pi;
438
0
  struct id_alloc_pool **pool_ptr;
439
440
0
  if (safi == SAFI_LABELED_UNICAST)
441
0
    safi = SAFI_UNICAST;
442
443
0
  for (i = 0; i < BGP_ADDPATH_MAX; i++) {
444
0
    struct id_alloc *alloc =
445
0
      bgp->tx_addpath.id_allocators[afi][safi][i];
446
0
    pool_ptr = &(bn->tx_addpath.free_ids[i]);
447
448
0
    if (bgp->tx_addpath.peercount[afi][safi][i] == 0)
449
0
      continue;
450
451
    /* Free Unused IDs back to the pool.*/
452
0
    for (pi = bgp_dest_get_bgp_path_info(bn); pi; pi = pi->next) {
453
0
      if (pi->tx_addpath.addpath_tx_id[i] != IDALLOC_INVALID
454
0
          && !bgp_addpath_tx_path(i, pi)) {
455
0
        idalloc_free_to_pool(pool_ptr,
456
0
          pi->tx_addpath.addpath_tx_id[i]);
457
0
        pi->tx_addpath.addpath_tx_id[i] =
458
0
          IDALLOC_INVALID;
459
0
      }
460
0
    }
461
462
    /* Give IDs to paths that need them (pulling from the pool) */
463
0
    for (pi = bgp_dest_get_bgp_path_info(bn); pi; pi = pi->next) {
464
0
      if (pi->tx_addpath.addpath_tx_id[i] == IDALLOC_INVALID
465
0
          && bgp_addpath_tx_path(i, pi)) {
466
0
        pi->tx_addpath.addpath_tx_id[i] =
467
0
          idalloc_allocate_prefer_pool(
468
0
            alloc, pool_ptr);
469
0
      }
470
0
    }
471
472
    /* Free any IDs left in the pool to the main allocator */
473
0
    idalloc_drain_pool(alloc, pool_ptr);
474
0
  }
475
0
}