Coverage Report

Created: 2025-08-26 06:20

/src/frr/bgpd/bgp_labelpool.c
Line
Count
Source (jump to first uncovered line)
1
// SPDX-License-Identifier: GPL-2.0-or-later
2
/*
3
 * BGP Label Pool - Manage label chunk allocations from zebra asynchronously
4
 *
5
 * Copyright (C) 2018 LabN Consulting, L.L.C.
6
 */
7
8
#include <zebra.h>
9
10
#include "log.h"
11
#include "memory.h"
12
#include "stream.h"
13
#include "mpls.h"
14
#include "vty.h"
15
#include "linklist.h"
16
#include "skiplist.h"
17
#include "workqueue.h"
18
#include "zclient.h"
19
#include "mpls.h"
20
21
#include "bgpd/bgpd.h"
22
#include "bgpd/bgp_labelpool.h"
23
#include "bgpd/bgp_debug.h"
24
#include "bgpd/bgp_errors.h"
25
#include "bgpd/bgp_route.h"
26
#include "bgpd/bgp_zebra.h"
27
#include "bgpd/bgp_vty.h"
28
#include "bgpd/bgp_rd.h"
29
30
#define BGP_LABELPOOL_ENABLE_TESTS 0
31
32
#include "bgpd/bgp_labelpool_clippy.c"
33
34
35
/*
36
 * Definitions and external declarations.
37
 */
38
extern struct zclient *zclient;
39
40
#if BGP_LABELPOOL_ENABLE_TESTS
41
static void lptest_init(void);
42
static void lptest_finish(void);
43
#endif
44
45
/*
46
 * Remember where pool data are kept
47
 */
48
static struct labelpool *lp;
49
50
/*
51
 * Number of labels requested at a time from the zebra label manager.
52
 * We start small but double the request size each time up to a
53
 * maximum size.
54
 *
55
 * The label space is 20 bits which is shared with other FRR processes
56
 * on this host, so to avoid greedily requesting a mostly wasted chunk,
57
 * we limit the chunk size to 1/16 of the label space (that's the -4 bits
58
 * in the definition below). This limit slightly increases our cost of
59
 * finding free labels in our allocated chunks.
60
 */
61
1
#define LP_CHUNK_SIZE_MIN 128
62
0
#define LP_CHUNK_SIZE_MAX (1 << (20 - 4))
63
64
DEFINE_MTYPE_STATIC(BGPD, BGP_LABEL_CHUNK, "BGP Label Chunk");
65
DEFINE_MTYPE_STATIC(BGPD, BGP_LABEL_FIFO, "BGP Label FIFO item");
66
DEFINE_MTYPE_STATIC(BGPD, BGP_LABEL_CB, "BGP Dynamic Label Assignment");
67
DEFINE_MTYPE_STATIC(BGPD, BGP_LABEL_CBQ, "BGP Dynamic Label Callback");
68
69
struct lp_chunk {
70
  uint32_t  first;
71
  uint32_t  last;
72
  uint32_t nfree;        /* un-allocated count */
73
  uint32_t idx_last_allocated; /* start looking here */
74
  bitfield_t allocated_map;
75
};
76
77
/*
78
 * label control block
79
 */
80
struct lp_lcb {
81
  mpls_label_t  label;    /* MPLS_LABEL_NONE = not allocated */
82
  int   type;
83
  void    *labelid; /* unique ID */
84
  /*
85
   * callback for label allocation and loss
86
   *
87
   * allocated: false = lost
88
   */
89
  int   (*cbfunc)(mpls_label_t label, void *lblid, bool alloc);
90
};
91
92
struct lp_fifo {
93
  struct lp_fifo_item fifo;
94
  struct lp_lcb lcb;
95
};
96
97
DECLARE_LIST(lp_fifo, struct lp_fifo, fifo);
98
99
struct lp_cbq_item {
100
  int   (*cbfunc)(mpls_label_t label, void *lblid, bool alloc);
101
  int   type;
102
  mpls_label_t  label;
103
  void    *labelid;
104
  bool    allocated;  /* false = lost */
105
};
106
107
static wq_item_status lp_cbq_docallback(struct work_queue *wq, void *data)
108
0
{
109
0
  struct lp_cbq_item *lcbq = data;
110
0
  int rc;
111
0
  int debug = BGP_DEBUG(labelpool, LABELPOOL);
112
113
0
  if (debug)
114
0
    zlog_debug("%s: calling callback with labelid=%p label=%u allocated=%d",
115
0
      __func__, lcbq->labelid, lcbq->label, lcbq->allocated);
116
117
0
  if (lcbq->label == MPLS_LABEL_NONE) {
118
    /* shouldn't happen */
119
0
    flog_err(EC_BGP_LABEL, "%s: error: label==MPLS_LABEL_NONE",
120
0
       __func__);
121
0
    return WQ_SUCCESS;
122
0
  }
123
124
0
  rc = (*(lcbq->cbfunc))(lcbq->label, lcbq->labelid, lcbq->allocated);
125
126
0
  if (lcbq->allocated && rc) {
127
    /*
128
     * Callback rejected allocation. This situation could arise
129
     * if there was a label request followed by the requestor
130
     * deciding it didn't need the assignment (e.g., config
131
     * change) while the reply to the original request (with
132
     * label) was in the work queue.
133
     */
134
0
    if (debug)
135
0
      zlog_debug("%s: callback rejected allocation, releasing labelid=%p label=%u",
136
0
        __func__, lcbq->labelid, lcbq->label);
137
138
0
    uintptr_t lbl = lcbq->label;
139
0
    void *labelid;
140
0
    struct lp_lcb *lcb;
141
142
    /*
143
     * If the rejected label was marked inuse by this labelid,
144
     * release the label back to the pool.
145
     *
146
     * Further, if the rejected label was still assigned to
147
     * this labelid in the LCB, delete the LCB.
148
     */
149
0
    if (!skiplist_search(lp->inuse, (void *)lbl, &labelid)) {
150
0
      if (labelid == lcbq->labelid) {
151
0
        if (!skiplist_search(lp->ledger, labelid,
152
0
          (void **)&lcb)) {
153
0
          if (lcbq->label == lcb->label)
154
0
            skiplist_delete(lp->ledger,
155
0
              labelid, NULL);
156
0
        }
157
0
        skiplist_delete(lp->inuse, (void *)lbl, NULL);
158
0
      }
159
0
    }
160
0
  }
161
162
0
  return WQ_SUCCESS;
163
0
}
164
165
static void lp_cbq_item_free(struct work_queue *wq, void *data)
166
0
{
167
0
  XFREE(MTYPE_BGP_LABEL_CBQ, data);
168
0
}
169
170
static void lp_lcb_free(void *goner)
171
0
{
172
0
  XFREE(MTYPE_BGP_LABEL_CB, goner);
173
0
}
174
175
static void lp_chunk_free(void *goner)
176
0
{
177
0
  struct lp_chunk *chunk = (struct lp_chunk *)goner;
178
179
0
  bf_free(chunk->allocated_map);
180
0
  XFREE(MTYPE_BGP_LABEL_CHUNK, goner);
181
0
}
182
183
void bgp_lp_init(struct event_loop *master, struct labelpool *pool)
184
1
{
185
1
  if (BGP_DEBUG(labelpool, LABELPOOL))
186
0
    zlog_debug("%s: entry", __func__);
187
188
1
  lp = pool;  /* Set module pointer to pool data */
189
190
1
  lp->ledger = skiplist_new(0, NULL, lp_lcb_free);
191
1
  lp->inuse = skiplist_new(0, NULL, NULL);
192
1
  lp->chunks = list_new();
193
1
  lp->chunks->del = lp_chunk_free;
194
1
  lp_fifo_init(&lp->requests);
195
1
  lp->callback_q = work_queue_new(master, "label callbacks");
196
197
1
  lp->callback_q->spec.workfunc = lp_cbq_docallback;
198
1
  lp->callback_q->spec.del_item_data = lp_cbq_item_free;
199
1
  lp->callback_q->spec.max_retries = 0;
200
201
1
  lp->next_chunksize = LP_CHUNK_SIZE_MIN;
202
203
#if BGP_LABELPOOL_ENABLE_TESTS
204
  lptest_init();
205
#endif
206
1
}
207
208
/* check if a label callback was for a BGP LU node, and if so, unlock it */
209
static void check_bgp_lu_cb_unlock(struct lp_lcb *lcb)
210
0
{
211
0
  if (lcb->type == LP_TYPE_BGP_LU)
212
0
    bgp_dest_unlock_node(lcb->labelid);
213
0
}
214
215
/* check if a label callback was for a BGP LU node, and if so, lock it */
216
static void check_bgp_lu_cb_lock(struct lp_lcb *lcb)
217
0
{
218
0
  if (lcb->type == LP_TYPE_BGP_LU)
219
0
    bgp_dest_lock_node(lcb->labelid);
220
0
}
221
222
void bgp_lp_finish(void)
223
0
{
224
0
  struct lp_fifo *lf;
225
0
  struct work_queue_item *item, *titem;
226
227
#if BGP_LABELPOOL_ENABLE_TESTS
228
  lptest_finish();
229
#endif
230
0
  if (!lp)
231
0
    return;
232
233
0
  skiplist_free(lp->ledger);
234
0
  lp->ledger = NULL;
235
236
0
  skiplist_free(lp->inuse);
237
0
  lp->inuse = NULL;
238
239
0
  list_delete(&lp->chunks);
240
241
0
  while ((lf = lp_fifo_pop(&lp->requests))) {
242
0
    check_bgp_lu_cb_unlock(&lf->lcb);
243
0
    XFREE(MTYPE_BGP_LABEL_FIFO, lf);
244
0
  }
245
0
  lp_fifo_fini(&lp->requests);
246
247
  /* we must unlock path infos for LU callbacks; but we cannot do that
248
   * in the deletion callback of the workqueue, as that is also called
249
   * to remove an element from the queue after it has been run, resulting
250
   * in a double unlock. Hence we need to iterate over our queues and
251
   * lists and manually perform the unlocking (ugh)
252
   */
253
0
  STAILQ_FOREACH_SAFE (item, &lp->callback_q->items, wq, titem)
254
0
    check_bgp_lu_cb_unlock(item->data);
255
256
0
  work_queue_free_and_null(&lp->callback_q);
257
258
0
  lp = NULL;
259
0
}
260
261
static mpls_label_t get_label_from_pool(void *labelid)
262
0
{
263
0
  struct listnode *node;
264
0
  struct lp_chunk *chunk;
265
0
  int debug = BGP_DEBUG(labelpool, LABELPOOL);
266
267
  /*
268
   * Find a free label
269
   */
270
0
  for (ALL_LIST_ELEMENTS_RO(lp->chunks, node, chunk)) {
271
0
    uintptr_t lbl;
272
0
    unsigned int index;
273
274
0
    if (debug)
275
0
      zlog_debug("%s: chunk first=%u last=%u",
276
0
        __func__, chunk->first, chunk->last);
277
278
    /*
279
     * don't look in chunks with no available labels
280
     */
281
0
    if (!chunk->nfree)
282
0
      continue;
283
284
    /*
285
     * roll through bitfield starting where we stopped
286
     * last time
287
     */
288
0
    index = bf_find_next_clear_bit_wrap(
289
0
      &chunk->allocated_map, chunk->idx_last_allocated + 1,
290
0
      0);
291
292
    /*
293
     * since chunk->nfree is non-zero, we should always get
294
     * a valid index
295
     */
296
0
    assert(index != WORD_MAX);
297
298
0
    lbl = chunk->first + index;
299
0
    if (skiplist_insert(lp->inuse, (void *)lbl, labelid)) {
300
      /* something is very wrong */
301
0
      zlog_err("%s: unable to insert inuse label %u (id %p)",
302
0
         __func__, (uint32_t)lbl, labelid);
303
0
      return MPLS_LABEL_NONE;
304
0
    }
305
306
    /*
307
     * Success
308
     */
309
0
    bf_set_bit(chunk->allocated_map, index);
310
0
    chunk->idx_last_allocated = index;
311
0
    chunk->nfree -= 1;
312
313
0
    return lbl;
314
0
  }
315
316
0
  return MPLS_LABEL_NONE;
317
0
}
318
319
/*
320
 * Success indicated by value of "label" field in returned LCB
321
 */
322
static struct lp_lcb *lcb_alloc(
323
  int type,
324
  void  *labelid,
325
  int (*cbfunc)(mpls_label_t label, void *labelid, bool allocated))
326
0
{
327
  /*
328
   * Set up label control block
329
   */
330
0
  struct lp_lcb *new = XCALLOC(MTYPE_BGP_LABEL_CB,
331
0
    sizeof(struct lp_lcb));
332
333
0
  new->label = get_label_from_pool(labelid);
334
0
  new->type = type;
335
0
  new->labelid = labelid;
336
0
  new->cbfunc = cbfunc;
337
338
0
  return new;
339
0
}
340
341
/*
342
 * Callers who need labels must supply a type, labelid, and callback.
343
 * The type is a value defined in bgp_labelpool.h (add types as needed).
344
 * The callback is for asynchronous notification of label allocation.
345
 * The labelid is passed as an argument to the callback. It should be unique
346
 * to the requested label instance.
347
 *
348
 * If zebra is not connected, callbacks with labels will be delayed
349
 * until connection is established. If zebra connection is lost after
350
 * labels have been assigned, existing assignments via this labelpool
351
 * module will continue until reconnection.
352
 *
353
 * When connection to zebra is reestablished, previous label assignments
354
 * will be invalidated (via callbacks having the "allocated" parameter unset)
355
 * and new labels will be automatically reassigned by this labelpool module
356
 * (that is, a requestor does not need to call bgp_lp_get() again if it is
357
 * notified via callback that its label has been lost: it will eventually
358
 * get another callback with a new label assignment).
359
 *
360
 * The callback function should return 0 to accept the allocation
361
 * and non-zero to refuse it. The callback function return value is
362
 * ignored for invalidations (i.e., when the "allocated" parameter is false)
363
 *
364
 * Prior requests for a given labelid are detected so that requests and
365
 * assignments are not duplicated.
366
 */
367
void bgp_lp_get(
368
  int type,
369
  void  *labelid,
370
  int (*cbfunc)(mpls_label_t label, void *labelid, bool allocated))
371
0
{
372
0
  struct lp_lcb *lcb;
373
0
  int requested = 0;
374
0
  int debug = BGP_DEBUG(labelpool, LABELPOOL);
375
376
0
  if (debug)
377
0
    zlog_debug("%s: labelid=%p", __func__, labelid);
378
379
  /*
380
   * Have we seen this request before?
381
   */
382
0
  if (!skiplist_search(lp->ledger, labelid, (void **)&lcb)) {
383
0
    requested = 1;
384
0
  } else {
385
0
    lcb = lcb_alloc(type, labelid, cbfunc);
386
0
    if (debug)
387
0
      zlog_debug("%s: inserting lcb=%p label=%u",
388
0
        __func__, lcb, lcb->label);
389
0
    int rc = skiplist_insert(lp->ledger, labelid, lcb);
390
391
0
    if (rc) {
392
      /* shouldn't happen */
393
0
      flog_err(EC_BGP_LABEL,
394
0
         "%s: can't insert new LCB into ledger list",
395
0
         __func__);
396
0
      XFREE(MTYPE_BGP_LABEL_CB, lcb);
397
0
      return;
398
0
    }
399
0
  }
400
401
0
  if (lcb->label != MPLS_LABEL_NONE) {
402
    /*
403
     * Fast path: we filled the request from local pool (or
404
     * this is a duplicate request that we filled already).
405
     * Enqueue response work item with new label.
406
     */
407
0
    struct lp_cbq_item *q;
408
409
0
    q = XCALLOC(MTYPE_BGP_LABEL_CBQ, sizeof(struct lp_cbq_item));
410
411
0
    q->cbfunc = lcb->cbfunc;
412
0
    q->type = lcb->type;
413
0
    q->label = lcb->label;
414
0
    q->labelid = lcb->labelid;
415
0
    q->allocated = true;
416
417
    /* if this is a LU request, lock node before queueing */
418
0
    check_bgp_lu_cb_lock(lcb);
419
420
0
    work_queue_add(lp->callback_q, q);
421
422
0
    return;
423
0
  }
424
425
0
  if (requested)
426
0
    return;
427
428
0
  if (debug)
429
0
    zlog_debug("%s: slow path. lcb=%p label=%u",
430
0
      __func__, lcb, lcb->label);
431
432
  /*
433
   * Slow path: we are out of labels in the local pool,
434
   * so remember the request and also get another chunk from
435
   * the label manager.
436
   *
437
   * We track number of outstanding label requests: don't
438
   * need to get a chunk for each one.
439
   */
440
441
0
  struct lp_fifo *lf = XCALLOC(MTYPE_BGP_LABEL_FIFO,
442
0
    sizeof(struct lp_fifo));
443
444
0
  lf->lcb = *lcb;
445
  /* if this is a LU request, lock node before queueing */
446
0
  check_bgp_lu_cb_lock(lcb);
447
448
0
  lp_fifo_add_tail(&lp->requests, lf);
449
450
0
  if (lp_fifo_count(&lp->requests) > lp->pending_count) {
451
0
    if (!zclient || zclient->sock < 0)
452
0
      return;
453
0
    if (zclient_send_get_label_chunk(zclient, 0, lp->next_chunksize,
454
0
             MPLS_LABEL_BASE_ANY) !=
455
0
        ZCLIENT_SEND_FAILURE) {
456
0
      lp->pending_count += lp->next_chunksize;
457
0
      if ((lp->next_chunksize << 1) <= LP_CHUNK_SIZE_MAX)
458
0
        lp->next_chunksize <<= 1;
459
0
    }
460
0
  }
461
0
}
462
463
void bgp_lp_release(
464
  int   type,
465
  void    *labelid,
466
  mpls_label_t  label)
467
0
{
468
0
  struct lp_lcb *lcb;
469
470
0
  if (!skiplist_search(lp->ledger, labelid, (void **)&lcb)) {
471
0
    if (label == lcb->label && type == lcb->type) {
472
0
      struct listnode *node;
473
0
      struct lp_chunk *chunk;
474
0
      uintptr_t lbl = label;
475
0
      bool deallocated = false;
476
477
      /* no longer in use */
478
0
      skiplist_delete(lp->inuse, (void *)lbl, NULL);
479
480
      /* no longer requested */
481
0
      skiplist_delete(lp->ledger, labelid, NULL);
482
483
      /*
484
       * Find the chunk this label belongs to and
485
       * deallocate the label
486
       */
487
0
      for (ALL_LIST_ELEMENTS_RO(lp->chunks, node, chunk)) {
488
0
        uint32_t index;
489
490
0
        if ((label < chunk->first) ||
491
0
            (label > chunk->last))
492
0
          continue;
493
494
0
        index = label - chunk->first;
495
0
        assert(bf_test_index(chunk->allocated_map,
496
0
                 index));
497
0
        bf_release_index(chunk->allocated_map, index);
498
0
        chunk->nfree += 1;
499
0
        deallocated = true;
500
0
      }
501
0
      assert(deallocated);
502
0
    }
503
0
  }
504
0
}
505
506
/*
507
 * zebra response giving us a chunk of labels
508
 */
509
void bgp_lp_event_chunk(uint8_t keep, uint32_t first, uint32_t last)
510
0
{
511
0
  struct lp_chunk *chunk;
512
0
  int debug = BGP_DEBUG(labelpool, LABELPOOL);
513
0
  struct lp_fifo *lf;
514
0
  uint32_t labelcount;
515
516
0
  if (last < first) {
517
0
    flog_err(EC_BGP_LABEL,
518
0
       "%s: zebra label chunk invalid: first=%u, last=%u",
519
0
       __func__, first, last);
520
0
    return;
521
0
  }
522
523
0
  chunk = XCALLOC(MTYPE_BGP_LABEL_CHUNK, sizeof(struct lp_chunk));
524
525
0
  labelcount = last - first + 1;
526
527
0
  chunk->first = first;
528
0
  chunk->last = last;
529
0
  chunk->nfree = labelcount;
530
0
  bf_init(chunk->allocated_map, labelcount);
531
532
  /*
533
   * Optimize for allocation by adding the new (presumably larger)
534
   * chunk at the head of the list so it is examined first.
535
   */
536
0
  listnode_add_head(lp->chunks, chunk);
537
538
0
  lp->pending_count -= labelcount;
539
540
0
  if (debug) {
541
0
    zlog_debug("%s: %zu pending requests", __func__,
542
0
      lp_fifo_count(&lp->requests));
543
0
  }
544
545
0
  while (labelcount && (lf = lp_fifo_first(&lp->requests))) {
546
547
0
    struct lp_lcb *lcb;
548
0
    void *labelid = lf->lcb.labelid;
549
550
0
    if (skiplist_search(lp->ledger, labelid, (void **)&lcb)) {
551
      /* request no longer in effect */
552
553
0
      if (debug) {
554
0
        zlog_debug("%s: labelid %p: request no longer in effect",
555
0
            __func__, labelid);
556
0
      }
557
      /* if this was a BGP_LU request, unlock node
558
       */
559
0
      check_bgp_lu_cb_unlock(lcb);
560
0
      goto finishedrequest;
561
0
    }
562
563
    /* have LCB */
564
0
    if (lcb->label != MPLS_LABEL_NONE) {
565
      /* request already has a label */
566
0
      if (debug) {
567
0
        zlog_debug("%s: labelid %p: request already has a label: %u=0x%x, lcb=%p",
568
0
            __func__, labelid,
569
0
            lcb->label, lcb->label, lcb);
570
0
      }
571
      /* if this was a BGP_LU request, unlock node
572
       */
573
0
      check_bgp_lu_cb_unlock(lcb);
574
575
0
      goto finishedrequest;
576
0
    }
577
578
0
    lcb->label = get_label_from_pool(lcb->labelid);
579
580
0
    if (lcb->label == MPLS_LABEL_NONE) {
581
      /*
582
       * Out of labels in local pool, await next chunk
583
       */
584
0
      if (debug) {
585
0
        zlog_debug("%s: out of labels, await more",
586
0
            __func__);
587
0
      }
588
0
      break;
589
0
    }
590
591
0
    labelcount -= 1;
592
593
    /*
594
     * we filled the request from local pool.
595
     * Enqueue response work item with new label.
596
     */
597
0
    struct lp_cbq_item *q = XCALLOC(MTYPE_BGP_LABEL_CBQ,
598
0
      sizeof(struct lp_cbq_item));
599
600
0
    q->cbfunc = lcb->cbfunc;
601
0
    q->type = lcb->type;
602
0
    q->label = lcb->label;
603
0
    q->labelid = lcb->labelid;
604
0
    q->allocated = true;
605
606
0
    if (debug)
607
0
      zlog_debug("%s: assigning label %u to labelid %p",
608
0
        __func__, q->label, q->labelid);
609
610
0
    work_queue_add(lp->callback_q, q);
611
612
0
finishedrequest:
613
0
    lp_fifo_del(&lp->requests, lf);
614
0
    XFREE(MTYPE_BGP_LABEL_FIFO, lf);
615
0
  }
616
0
}
617
618
/*
619
 * continue using allocated labels until zebra returns
620
 */
621
void bgp_lp_event_zebra_down(void)
622
0
{
623
  /* rats. */
624
0
}
625
626
/*
627
 * Inform owners of previously-allocated labels that their labels
628
 * are not valid. Request chunk from zebra large enough to satisfy
629
 * previously-allocated labels plus any outstanding requests.
630
 */
631
void bgp_lp_event_zebra_up(void)
632
0
{
633
0
  unsigned int labels_needed;
634
0
  unsigned int chunks_needed;
635
0
  void *labelid;
636
0
  struct lp_lcb *lcb;
637
0
  int lm_init_ok;
638
639
0
  lp->reconnect_count++;
640
  /*
641
   * Get label chunk allocation request dispatched to zebra
642
   */
643
0
  labels_needed = lp_fifo_count(&lp->requests) +
644
0
    skiplist_count(lp->inuse);
645
646
0
  if (labels_needed > lp->next_chunksize) {
647
0
    while ((lp->next_chunksize < labels_needed) &&
648
0
           (lp->next_chunksize << 1 <= LP_CHUNK_SIZE_MAX))
649
650
0
      lp->next_chunksize <<= 1;
651
0
  }
652
653
  /* round up */
654
0
  chunks_needed = (labels_needed / lp->next_chunksize) + 1;
655
0
  labels_needed = chunks_needed * lp->next_chunksize;
656
657
0
  lm_init_ok = lm_label_manager_connect(zclient, 1) == 0;
658
659
0
  if (!lm_init_ok) {
660
0
    zlog_err("%s: label manager connection error", __func__);
661
0
    return;
662
0
  }
663
664
0
  zclient_send_get_label_chunk(zclient, 0, labels_needed,
665
0
             MPLS_LABEL_BASE_ANY);
666
0
  lp->pending_count = labels_needed;
667
668
  /*
669
   * Invalidate current list of chunks
670
   */
671
0
  list_delete_all_node(lp->chunks);
672
673
  /*
674
   * Invalidate any existing labels and requeue them as requests
675
   */
676
0
  while (!skiplist_first(lp->inuse, NULL, &labelid)) {
677
678
    /*
679
     * Get LCB
680
     */
681
0
    if (!skiplist_search(lp->ledger, labelid, (void **)&lcb)) {
682
683
0
      if (lcb->label != MPLS_LABEL_NONE) {
684
        /*
685
         * invalidate
686
         */
687
0
        struct lp_cbq_item *q;
688
689
0
        q = XCALLOC(MTYPE_BGP_LABEL_CBQ,
690
0
          sizeof(struct lp_cbq_item));
691
0
        q->cbfunc = lcb->cbfunc;
692
0
        q->type = lcb->type;
693
0
        q->label = lcb->label;
694
0
        q->labelid = lcb->labelid;
695
0
        q->allocated = false;
696
0
        check_bgp_lu_cb_lock(lcb);
697
0
        work_queue_add(lp->callback_q, q);
698
699
0
        lcb->label = MPLS_LABEL_NONE;
700
0
      }
701
702
      /*
703
       * request queue
704
       */
705
0
      struct lp_fifo *lf = XCALLOC(MTYPE_BGP_LABEL_FIFO,
706
0
        sizeof(struct lp_fifo));
707
708
0
      lf->lcb = *lcb;
709
0
      check_bgp_lu_cb_lock(lcb);
710
0
      lp_fifo_add_tail(&lp->requests, lf);
711
0
    }
712
713
0
    skiplist_delete_first(lp->inuse);
714
0
  }
715
0
}
716
717
DEFUN(show_bgp_labelpool_summary, show_bgp_labelpool_summary_cmd,
718
      "show bgp labelpool summary [json]",
719
      SHOW_STR BGP_STR
720
      "BGP Labelpool information\n"
721
      "BGP Labelpool summary\n" JSON_STR)
722
0
{
723
0
  bool uj = use_json(argc, argv);
724
0
  json_object *json = NULL;
725
726
0
  if (!lp) {
727
0
    if (uj)
728
0
      vty_out(vty, "{}\n");
729
0
    else
730
0
      vty_out(vty, "No existing BGP labelpool\n");
731
0
    return (CMD_WARNING);
732
0
  }
733
734
0
  if (uj) {
735
0
    json = json_object_new_object();
736
0
    json_object_int_add(json, "ledger", skiplist_count(lp->ledger));
737
0
    json_object_int_add(json, "inUse", skiplist_count(lp->inuse));
738
0
    json_object_int_add(json, "requests",
739
0
            lp_fifo_count(&lp->requests));
740
0
    json_object_int_add(json, "labelChunks", listcount(lp->chunks));
741
0
    json_object_int_add(json, "pending", lp->pending_count);
742
0
    json_object_int_add(json, "reconnects", lp->reconnect_count);
743
0
    vty_json(vty, json);
744
0
  } else {
745
0
    vty_out(vty, "Labelpool Summary\n");
746
0
    vty_out(vty, "-----------------\n");
747
0
    vty_out(vty, "%-13s %d\n",
748
0
      "Ledger:", skiplist_count(lp->ledger));
749
0
    vty_out(vty, "%-13s %d\n", "InUse:", skiplist_count(lp->inuse));
750
0
    vty_out(vty, "%-13s %zu\n",
751
0
      "Requests:", lp_fifo_count(&lp->requests));
752
0
    vty_out(vty, "%-13s %d\n",
753
0
      "LabelChunks:", listcount(lp->chunks));
754
0
    vty_out(vty, "%-13s %d\n", "Pending:", lp->pending_count);
755
0
    vty_out(vty, "%-13s %d\n", "Reconnects:", lp->reconnect_count);
756
0
  }
757
0
  return CMD_SUCCESS;
758
0
}
759
760
DEFUN(show_bgp_labelpool_ledger, show_bgp_labelpool_ledger_cmd,
761
      "show bgp labelpool ledger [json]",
762
      SHOW_STR BGP_STR
763
      "BGP Labelpool information\n"
764
      "BGP Labelpool ledger\n" JSON_STR)
765
0
{
766
0
  bool uj = use_json(argc, argv);
767
0
  json_object *json = NULL, *json_elem = NULL;
768
0
  struct lp_lcb *lcb = NULL;
769
0
  struct bgp_dest *dest;
770
0
  void *cursor = NULL;
771
0
  const struct prefix *p;
772
0
  int rc, count;
773
774
0
  if (!lp) {
775
0
    if (uj)
776
0
      vty_out(vty, "{}\n");
777
0
    else
778
0
      vty_out(vty, "No existing BGP labelpool\n");
779
0
    return (CMD_WARNING);
780
0
  }
781
782
0
  if (uj) {
783
0
    count = skiplist_count(lp->ledger);
784
0
    if (!count) {
785
0
      vty_out(vty, "{}\n");
786
0
      return CMD_SUCCESS;
787
0
    }
788
0
    json = json_object_new_array();
789
0
  } else {
790
0
    vty_out(vty, "Prefix                Label\n");
791
0
    vty_out(vty, "---------------------------\n");
792
0
  }
793
794
0
  for (rc = skiplist_next(lp->ledger, (void **)&dest, (void **)&lcb,
795
0
        &cursor);
796
0
       !rc; rc = skiplist_next(lp->ledger, (void **)&dest, (void **)&lcb,
797
0
             &cursor)) {
798
0
    if (uj) {
799
0
      json_elem = json_object_new_object();
800
0
      json_object_array_add(json, json_elem);
801
0
    }
802
0
    switch (lcb->type) {
803
0
    case LP_TYPE_BGP_LU:
804
0
      if (!CHECK_FLAG(dest->flags, BGP_NODE_LABEL_REQUESTED))
805
0
        if (uj) {
806
0
          json_object_string_add(
807
0
            json_elem, "prefix", "INVALID");
808
0
          json_object_int_add(json_elem, "label",
809
0
                  lcb->label);
810
0
        } else
811
0
          vty_out(vty, "%-18s         %u\n",
812
0
            "INVALID", lcb->label);
813
0
      else {
814
0
        p = bgp_dest_get_prefix(dest);
815
0
        if (uj) {
816
0
          json_object_string_addf(
817
0
            json_elem, "prefix", "%pFX", p);
818
0
          json_object_int_add(json_elem, "label",
819
0
                  lcb->label);
820
0
        } else
821
0
          vty_out(vty, "%-18pFX    %u\n", p,
822
0
            lcb->label);
823
0
      }
824
0
      break;
825
0
    case LP_TYPE_VRF:
826
0
      if (uj) {
827
0
        json_object_string_add(json_elem, "prefix",
828
0
                   "VRF");
829
0
        json_object_int_add(json_elem, "label",
830
0
                lcb->label);
831
0
      } else
832
0
        vty_out(vty, "%-18s         %u\n", "VRF",
833
0
          lcb->label);
834
835
0
      break;
836
0
    case LP_TYPE_NEXTHOP:
837
0
      if (uj) {
838
0
        json_object_string_add(json_elem, "prefix",
839
0
                   "nexthop");
840
0
        json_object_int_add(json_elem, "label",
841
0
                lcb->label);
842
0
      } else
843
0
        vty_out(vty, "%-18s         %u\n", "nexthop",
844
0
          lcb->label);
845
0
      break;
846
0
    }
847
0
  }
848
0
  if (uj)
849
0
    vty_json(vty, json);
850
0
  return CMD_SUCCESS;
851
0
}
852
853
DEFUN(show_bgp_labelpool_inuse, show_bgp_labelpool_inuse_cmd,
854
      "show bgp labelpool inuse [json]",
855
      SHOW_STR BGP_STR
856
      "BGP Labelpool information\n"
857
      "BGP Labelpool inuse\n" JSON_STR)
858
0
{
859
0
  bool uj = use_json(argc, argv);
860
0
  json_object *json = NULL, *json_elem = NULL;
861
0
  struct bgp_dest *dest;
862
0
  mpls_label_t label;
863
0
  struct lp_lcb *lcb;
864
0
  void *cursor = NULL;
865
0
  const struct prefix *p;
866
0
  int rc, count;
867
868
0
  if (!lp) {
869
0
    vty_out(vty, "No existing BGP labelpool\n");
870
0
    return (CMD_WARNING);
871
0
  }
872
0
  if (!lp) {
873
0
    if (uj)
874
0
      vty_out(vty, "{}\n");
875
0
    else
876
0
      vty_out(vty, "No existing BGP labelpool\n");
877
0
    return (CMD_WARNING);
878
0
  }
879
880
0
  if (uj) {
881
0
    count = skiplist_count(lp->inuse);
882
0
    if (!count) {
883
0
      vty_out(vty, "{}\n");
884
0
      return CMD_SUCCESS;
885
0
    }
886
0
    json = json_object_new_array();
887
0
  } else {
888
0
    vty_out(vty, "Prefix                Label\n");
889
0
    vty_out(vty, "---------------------------\n");
890
0
  }
891
0
  for (rc = skiplist_next(lp->inuse, (void **)&label, (void **)&dest,
892
0
        &cursor);
893
0
       !rc; rc = skiplist_next(lp->ledger, (void **)&label,
894
0
             (void **)&dest, &cursor)) {
895
0
    if (skiplist_search(lp->ledger, dest, (void **)&lcb))
896
0
      continue;
897
898
0
    if (uj) {
899
0
      json_elem = json_object_new_object();
900
0
      json_object_array_add(json, json_elem);
901
0
    }
902
903
0
    switch (lcb->type) {
904
0
    case LP_TYPE_BGP_LU:
905
0
      if (!CHECK_FLAG(dest->flags, BGP_NODE_LABEL_REQUESTED))
906
0
        if (uj) {
907
0
          json_object_string_add(
908
0
            json_elem, "prefix", "INVALID");
909
0
          json_object_int_add(json_elem, "label",
910
0
                  label);
911
0
        } else
912
0
          vty_out(vty, "INVALID         %u\n",
913
0
            label);
914
0
      else {
915
0
        p = bgp_dest_get_prefix(dest);
916
0
        if (uj) {
917
0
          json_object_string_addf(
918
0
            json_elem, "prefix", "%pFX", p);
919
0
          json_object_int_add(json_elem, "label",
920
0
                  label);
921
0
        } else
922
0
          vty_out(vty, "%-18pFX    %u\n", p,
923
0
            label);
924
0
      }
925
0
      break;
926
0
    case LP_TYPE_VRF:
927
0
      if (uj) {
928
0
        json_object_string_add(json_elem, "prefix",
929
0
                   "VRF");
930
0
        json_object_int_add(json_elem, "label", label);
931
0
      } else
932
0
        vty_out(vty, "%-18s         %u\n", "VRF",
933
0
          label);
934
0
      break;
935
0
    case LP_TYPE_NEXTHOP:
936
0
      if (uj) {
937
0
        json_object_string_add(json_elem, "prefix",
938
0
                   "nexthop");
939
0
        json_object_int_add(json_elem, "label", label);
940
0
      } else
941
0
        vty_out(vty, "%-18s         %u\n", "nexthop",
942
0
          label);
943
0
      break;
944
0
    }
945
0
  }
946
0
  if (uj)
947
0
    vty_json(vty, json);
948
0
  return CMD_SUCCESS;
949
0
}
950
951
DEFUN(show_bgp_labelpool_requests, show_bgp_labelpool_requests_cmd,
952
      "show bgp labelpool requests [json]",
953
      SHOW_STR BGP_STR
954
      "BGP Labelpool information\n"
955
      "BGP Labelpool requests\n" JSON_STR)
956
0
{
957
0
  bool uj = use_json(argc, argv);
958
0
  json_object *json = NULL, *json_elem = NULL;
959
0
  struct bgp_dest *dest;
960
0
  const struct prefix *p;
961
0
  struct lp_fifo *item, *next;
962
0
  int count;
963
964
0
  if (!lp) {
965
0
    if (uj)
966
0
      vty_out(vty, "{}\n");
967
0
    else
968
0
      vty_out(vty, "No existing BGP labelpool\n");
969
0
    return (CMD_WARNING);
970
0
  }
971
972
0
  if (uj) {
973
0
    count = lp_fifo_count(&lp->requests);
974
0
    if (!count) {
975
0
      vty_out(vty, "{}\n");
976
0
      return CMD_SUCCESS;
977
0
    }
978
0
    json = json_object_new_array();
979
0
  } else {
980
0
    vty_out(vty, "Prefix         \n");
981
0
    vty_out(vty, "----------------\n");
982
0
  }
983
984
0
  for (item = lp_fifo_first(&lp->requests); item; item = next) {
985
0
    next = lp_fifo_next_safe(&lp->requests, item);
986
0
    dest = item->lcb.labelid;
987
0
    if (uj) {
988
0
      json_elem = json_object_new_object();
989
0
      json_object_array_add(json, json_elem);
990
0
    }
991
0
    switch (item->lcb.type) {
992
0
    case LP_TYPE_BGP_LU:
993
0
      if (!CHECK_FLAG(dest->flags,
994
0
          BGP_NODE_LABEL_REQUESTED)) {
995
0
        if (uj)
996
0
          json_object_string_add(
997
0
            json_elem, "prefix", "INVALID");
998
0
        else
999
0
          vty_out(vty, "INVALID\n");
1000
0
      } else {
1001
0
        p = bgp_dest_get_prefix(dest);
1002
0
        if (uj)
1003
0
          json_object_string_addf(
1004
0
            json_elem, "prefix", "%pFX", p);
1005
0
        else
1006
0
          vty_out(vty, "%-18pFX\n", p);
1007
0
      }
1008
0
      break;
1009
0
    case LP_TYPE_VRF:
1010
0
      if (uj)
1011
0
        json_object_string_add(json_elem, "prefix",
1012
0
                   "VRF");
1013
0
      else
1014
0
        vty_out(vty, "VRF\n");
1015
0
      break;
1016
0
    case LP_TYPE_NEXTHOP:
1017
0
      if (uj)
1018
0
        json_object_string_add(json_elem, "prefix",
1019
0
                   "nexthop");
1020
0
      else
1021
0
        vty_out(vty, "Nexthop\n");
1022
0
      break;
1023
0
    }
1024
0
  }
1025
0
  if (uj)
1026
0
    vty_json(vty, json);
1027
0
  return CMD_SUCCESS;
1028
0
}
1029
1030
DEFUN(show_bgp_labelpool_chunks, show_bgp_labelpool_chunks_cmd,
1031
      "show bgp labelpool chunks [json]",
1032
      SHOW_STR BGP_STR
1033
      "BGP Labelpool information\n"
1034
      "BGP Labelpool chunks\n" JSON_STR)
1035
0
{
1036
0
  bool uj = use_json(argc, argv);
1037
0
  json_object *json = NULL, *json_elem;
1038
0
  struct listnode *node;
1039
0
  struct lp_chunk *chunk;
1040
0
  int count;
1041
1042
0
  if (!lp) {
1043
0
    if (uj)
1044
0
      vty_out(vty, "{}\n");
1045
0
    else
1046
0
      vty_out(vty, "No existing BGP labelpool\n");
1047
0
    return (CMD_WARNING);
1048
0
  }
1049
1050
0
  if (uj) {
1051
0
    count = listcount(lp->chunks);
1052
0
    if (!count) {
1053
0
      vty_out(vty, "{}\n");
1054
0
      return CMD_SUCCESS;
1055
0
    }
1056
0
    json = json_object_new_array();
1057
0
  } else {
1058
0
    vty_out(vty, "%10s %10s %10s %10s\n", "First", "Last", "Size",
1059
0
      "nfree");
1060
0
    vty_out(vty, "-------------------------------------------\n");
1061
0
  }
1062
1063
0
  for (ALL_LIST_ELEMENTS_RO(lp->chunks, node, chunk)) {
1064
0
    uint32_t size;
1065
1066
0
    size = chunk->last - chunk->first + 1;
1067
1068
0
    if (uj) {
1069
0
      json_elem = json_object_new_object();
1070
0
      json_object_array_add(json, json_elem);
1071
0
      json_object_int_add(json_elem, "first", chunk->first);
1072
0
      json_object_int_add(json_elem, "last", chunk->last);
1073
0
      json_object_int_add(json_elem, "size", size);
1074
0
      json_object_int_add(json_elem, "numberFree",
1075
0
              chunk->nfree);
1076
0
    } else
1077
0
      vty_out(vty, "%10u %10u %10u %10u\n", chunk->first,
1078
0
        chunk->last, size, chunk->nfree);
1079
0
  }
1080
0
  if (uj)
1081
0
    vty_json(vty, json);
1082
0
  return CMD_SUCCESS;
1083
0
}
1084
1085
static void show_bgp_nexthop_label_afi(struct vty *vty, afi_t afi,
1086
               struct bgp *bgp, bool detail)
1087
0
{
1088
0
  struct bgp_label_per_nexthop_cache_head *tree;
1089
0
  struct bgp_label_per_nexthop_cache *iter;
1090
0
  safi_t safi;
1091
0
  void *src;
1092
0
  char buf[PREFIX2STR_BUFFER];
1093
0
  char labelstr[MPLS_LABEL_STRLEN];
1094
0
  struct bgp_dest *dest;
1095
0
  struct bgp_path_info *path;
1096
0
  struct bgp *bgp_path;
1097
0
  struct bgp_table *table;
1098
0
  time_t tbuf;
1099
1100
0
  vty_out(vty, "Current BGP label nexthop cache for %s, VRF %s\n",
1101
0
    afi2str(afi), bgp->name_pretty);
1102
1103
0
  tree = &bgp->mpls_labels_per_nexthop[afi];
1104
0
  frr_each (bgp_label_per_nexthop_cache, tree, iter) {
1105
0
    if (afi2family(afi) == AF_INET)
1106
0
      src = (void *)&iter->nexthop.u.prefix4;
1107
0
    else
1108
0
      src = (void *)&iter->nexthop.u.prefix6;
1109
1110
0
    vty_out(vty, " %s, label %s #paths %u\n",
1111
0
      inet_ntop(afi2family(afi), src, buf, sizeof(buf)),
1112
0
      mpls_label2str(1, &iter->label, labelstr,
1113
0
               sizeof(labelstr), 0, true),
1114
0
      iter->path_count);
1115
0
    if (iter->nh)
1116
0
      vty_out(vty, "  if %s\n",
1117
0
        ifindex2ifname(iter->nh->ifindex,
1118
0
                 iter->nh->vrf_id));
1119
0
    tbuf = time(NULL) - (monotime(NULL) - iter->last_update);
1120
0
    vty_out(vty, "  Last update: %s", ctime(&tbuf));
1121
0
    if (!detail)
1122
0
      continue;
1123
0
    vty_out(vty, "  Paths:\n");
1124
0
    LIST_FOREACH (path, &(iter->paths), label_nh_thread) {
1125
0
      dest = path->net;
1126
0
      table = bgp_dest_table(dest);
1127
0
      assert(dest && table);
1128
0
      afi = family2afi(bgp_dest_get_prefix(dest)->family);
1129
0
      safi = table->safi;
1130
0
      bgp_path = table->bgp;
1131
1132
0
      if (dest->pdest) {
1133
0
        vty_out(vty, "    %d/%d %pBD RD ", afi, safi,
1134
0
          dest);
1135
1136
0
        vty_out(vty, BGP_RD_AS_FORMAT(bgp->asnotation),
1137
0
          (struct prefix_rd *)bgp_dest_get_prefix(
1138
0
            dest->pdest));
1139
0
        vty_out(vty, " %s flags 0x%x\n",
1140
0
          bgp_path->name_pretty, path->flags);
1141
0
      } else
1142
0
        vty_out(vty, "    %d/%d %pBD %s flags 0x%x\n",
1143
0
          afi, safi, dest, bgp_path->name_pretty,
1144
0
          path->flags);
1145
0
    }
1146
0
  }
1147
0
}
1148
1149
DEFPY(show_bgp_nexthop_label, show_bgp_nexthop_label_cmd,
1150
      "show bgp [<view|vrf> VIEWVRFNAME] label-nexthop [detail]",
1151
      SHOW_STR BGP_STR BGP_INSTANCE_HELP_STR
1152
      "BGP label per-nexthop table\n"
1153
      "Show detailed information\n")
1154
0
{
1155
0
  int idx = 0;
1156
0
  char *vrf = NULL;
1157
0
  struct bgp *bgp;
1158
0
  bool detail = false;
1159
0
  int afi;
1160
1161
0
  if (argv_find(argv, argc, "vrf", &idx)) {
1162
0
    vrf = argv[++idx]->arg;
1163
0
    bgp = bgp_lookup_by_name(vrf);
1164
0
  } else
1165
0
    bgp = bgp_get_default();
1166
1167
0
  if (!bgp)
1168
0
    return CMD_SUCCESS;
1169
1170
0
  if (argv_find(argv, argc, "detail", &idx))
1171
0
    detail = true;
1172
1173
0
  for (afi = AFI_IP; afi <= AFI_IP6; afi++)
1174
0
    show_bgp_nexthop_label_afi(vty, afi, bgp, detail);
1175
0
  return CMD_SUCCESS;
1176
0
}
1177
1178
#if BGP_LABELPOOL_ENABLE_TESTS
1179
/*------------------------------------------------------------------------
1180
 *      Testing code start
1181
 *------------------------------------------------------------------------*/
1182
1183
DEFINE_MTYPE_STATIC(BGPD, LABELPOOL_TEST, "Label pool test");
1184
1185
#define LPT_STAT_INSERT_FAIL 0
1186
#define LPT_STAT_DELETE_FAIL 1
1187
#define LPT_STAT_ALLOCATED 2
1188
#define LPT_STAT_DEALLOCATED 3
1189
#define LPT_STAT_MAX 4
1190
1191
const char *lpt_counter_names[] = {
1192
  "sl insert failures",
1193
  "sl delete failures",
1194
  "labels allocated",
1195
  "labels deallocated",
1196
};
1197
1198
static uint8_t lpt_generation;
1199
static bool lpt_inprogress;
1200
static struct skiplist *lp_tests;
1201
static unsigned int lpt_test_cb_tcb_lookup_fails;
1202
static unsigned int lpt_release_tcb_lookup_fails;
1203
static unsigned int lpt_test_event_tcb_lookup_fails;
1204
static unsigned int lpt_stop_tcb_lookup_fails;
1205
1206
struct lp_test {
1207
  uint8_t generation;
1208
  unsigned int request_maximum;
1209
  unsigned int request_blocksize;
1210
  uintptr_t request_count; /* match type of labelid */
1211
  int label_type;
1212
  struct skiplist *labels;
1213
  struct timeval starttime;
1214
  struct skiplist *timestamps_alloc;
1215
  struct skiplist *timestamps_dealloc;
1216
  struct event *event_thread;
1217
  unsigned int counter[LPT_STAT_MAX];
1218
};
1219
1220
/* test parameters */
1221
#define LPT_MAX_COUNT 500000  /* get this many labels in all */
1222
#define LPT_BLKSIZE 10000     /* this many at a time, then yield */
1223
#define LPT_TS_INTERVAL 10000 /* timestamp every this many labels */
1224
1225
1226
static int test_cb(mpls_label_t label, void *labelid, bool allocated)
1227
{
1228
  uintptr_t generation;
1229
  struct lp_test *tcb;
1230
1231
  generation = ((uintptr_t)labelid >> 24) & 0xff;
1232
1233
  if (skiplist_search(lp_tests, (void *)generation, (void **)&tcb)) {
1234
1235
    /* couldn't find current test in progress */
1236
    ++lpt_test_cb_tcb_lookup_fails;
1237
    return -1; /* reject allocation */
1238
  }
1239
1240
  if (allocated) {
1241
    ++tcb->counter[LPT_STAT_ALLOCATED];
1242
    if (!(tcb->counter[LPT_STAT_ALLOCATED] % LPT_TS_INTERVAL)) {
1243
      uintptr_t time_ms;
1244
1245
      time_ms = monotime_since(&tcb->starttime, NULL) / 1000;
1246
      skiplist_insert(tcb->timestamps_alloc,
1247
          (void *)(uintptr_t)tcb
1248
            ->counter[LPT_STAT_ALLOCATED],
1249
          (void *)time_ms);
1250
    }
1251
    if (skiplist_insert(tcb->labels, labelid,
1252
            (void *)(uintptr_t)label)) {
1253
      ++tcb->counter[LPT_STAT_INSERT_FAIL];
1254
      return -1;
1255
    }
1256
  } else {
1257
    ++tcb->counter[LPT_STAT_DEALLOCATED];
1258
    if (!(tcb->counter[LPT_STAT_DEALLOCATED] % LPT_TS_INTERVAL)) {
1259
      uintptr_t time_ms;
1260
1261
      time_ms = monotime_since(&tcb->starttime, NULL) / 1000;
1262
      skiplist_insert(tcb->timestamps_dealloc,
1263
          (void *)(uintptr_t)tcb
1264
            ->counter[LPT_STAT_ALLOCATED],
1265
          (void *)time_ms);
1266
    }
1267
    if (skiplist_delete(tcb->labels, labelid, 0)) {
1268
      ++tcb->counter[LPT_STAT_DELETE_FAIL];
1269
      return -1;
1270
    }
1271
  }
1272
  return 0;
1273
}
1274
1275
static void labelpool_test_event_handler(struct event *thread)
1276
{
1277
  struct lp_test *tcb;
1278
1279
  if (skiplist_search(lp_tests, (void *)(uintptr_t)(lpt_generation),
1280
          (void **)&tcb)) {
1281
1282
    /* couldn't find current test in progress */
1283
    ++lpt_test_event_tcb_lookup_fails;
1284
    return;
1285
  }
1286
1287
  /*
1288
   * request a bunch of labels
1289
   */
1290
  for (unsigned int i = 0; (i < tcb->request_blocksize) &&
1291
         (tcb->request_count < tcb->request_maximum);
1292
       ++i) {
1293
1294
    uintptr_t id;
1295
1296
    ++tcb->request_count;
1297
1298
    /*
1299
     * construct 32-bit id from request_count and generation
1300
     */
1301
    id = ((uintptr_t)tcb->generation << 24) |
1302
         (tcb->request_count & 0x00ffffff);
1303
    bgp_lp_get(LP_TYPE_VRF, (void *)id, test_cb);
1304
  }
1305
1306
  if (tcb->request_count < tcb->request_maximum)
1307
    thread_add_event(bm->master, labelpool_test_event_handler, NULL,
1308
         0, &tcb->event_thread);
1309
}
1310
1311
static void lptest_stop(void)
1312
{
1313
  struct lp_test *tcb;
1314
1315
  if (!lpt_inprogress)
1316
    return;
1317
1318
  if (skiplist_search(lp_tests, (void *)(uintptr_t)(lpt_generation),
1319
          (void **)&tcb)) {
1320
1321
    /* couldn't find current test in progress */
1322
    ++lpt_stop_tcb_lookup_fails;
1323
    return;
1324
  }
1325
1326
  if (tcb->event_thread)
1327
    event_cancel(&tcb->event_thread);
1328
1329
  lpt_inprogress = false;
1330
}
1331
1332
static int lptest_start(struct vty *vty)
1333
{
1334
  struct lp_test *tcb;
1335
1336
  if (lpt_inprogress) {
1337
    vty_out(vty, "test already in progress\n");
1338
    return -1;
1339
  }
1340
1341
  if (skiplist_count(lp_tests) >=
1342
      (1 << (8 * sizeof(lpt_generation))) - 1) {
1343
    /*
1344
     * Too many test runs
1345
     */
1346
    vty_out(vty, "too many tests: clear first\n");
1347
    return -1;
1348
  }
1349
1350
  /*
1351
   * We pack the generation and request number into the labelid;
1352
   * make sure they fit.
1353
   */
1354
  unsigned int n1 = LPT_MAX_COUNT;
1355
  unsigned int sh = 0;
1356
  unsigned int label_bits;
1357
1358
  label_bits = 8 * (sizeof(tcb->request_count) - sizeof(lpt_generation));
1359
1360
  /* n1 should be same type as tcb->request_maximum */
1361
  assert(sizeof(n1) == sizeof(tcb->request_maximum));
1362
1363
  while (n1 >>= 1)
1364
    ++sh;
1365
  sh += 1; /* number of bits needed to hold LPT_MAX_COUNT */
1366
1367
  if (sh > label_bits) {
1368
    vty_out(vty,
1369
      "Sorry, test iteration count too big on this platform (LPT_MAX_COUNT %u, need %u bits, but label_bits is only %u)\n",
1370
      LPT_MAX_COUNT, sh, label_bits);
1371
    return -1;
1372
  }
1373
1374
  lpt_inprogress = true;
1375
  ++lpt_generation;
1376
1377
  tcb = XCALLOC(MTYPE_LABELPOOL_TEST, sizeof(*tcb));
1378
1379
  tcb->generation = lpt_generation;
1380
  tcb->label_type = LP_TYPE_VRF;
1381
  tcb->request_maximum = LPT_MAX_COUNT;
1382
  tcb->request_blocksize = LPT_BLKSIZE;
1383
  tcb->labels = skiplist_new(0, NULL, NULL);
1384
  tcb->timestamps_alloc = skiplist_new(0, NULL, NULL);
1385
  tcb->timestamps_dealloc = skiplist_new(0, NULL, NULL);
1386
  thread_add_event(bm->master, labelpool_test_event_handler, NULL, 0,
1387
       &tcb->event_thread);
1388
  monotime(&tcb->starttime);
1389
1390
  skiplist_insert(lp_tests, (void *)(uintptr_t)tcb->generation, tcb);
1391
  return 0;
1392
}
1393
1394
DEFPY(start_labelpool_perf_test, start_labelpool_perf_test_cmd,
1395
      "debug bgp lptest start",
1396
      DEBUG_STR BGP_STR
1397
      "label pool test\n"
1398
      "start\n")
1399
{
1400
  lptest_start(vty);
1401
  return CMD_SUCCESS;
1402
}
1403
1404
static void lptest_print_stats(struct vty *vty, struct lp_test *tcb)
1405
{
1406
  unsigned int i;
1407
1408
  vty_out(vty, "Global Lookup Failures in test_cb: %5u\n",
1409
    lpt_test_cb_tcb_lookup_fails);
1410
  vty_out(vty, "Global Lookup Failures in release: %5u\n",
1411
    lpt_release_tcb_lookup_fails);
1412
  vty_out(vty, "Global Lookup Failures in event:   %5u\n",
1413
    lpt_test_event_tcb_lookup_fails);
1414
  vty_out(vty, "Global Lookup Failures in stop:    %5u\n",
1415
    lpt_stop_tcb_lookup_fails);
1416
  vty_out(vty, "\n");
1417
1418
  if (!tcb) {
1419
    if (skiplist_search(lp_tests, (void *)(uintptr_t)lpt_generation,
1420
            (void **)&tcb)) {
1421
      vty_out(vty, "Error: can't find test %u\n",
1422
        lpt_generation);
1423
      return;
1424
    }
1425
  }
1426
1427
  vty_out(vty, "Test Generation %u:\n", tcb->generation);
1428
1429
  vty_out(vty, "Counter   Value\n");
1430
  for (i = 0; i < LPT_STAT_MAX; ++i) {
1431
    vty_out(vty, "%20s: %10u\n", lpt_counter_names[i],
1432
      tcb->counter[i]);
1433
  }
1434
  vty_out(vty, "\n");
1435
1436
  if (tcb->timestamps_alloc) {
1437
    void *Key;
1438
    void *Value;
1439
    void *cursor;
1440
1441
    float elapsed;
1442
1443
    vty_out(vty, "%10s %10s\n", "Count", "Seconds");
1444
1445
    cursor = NULL;
1446
    while (!skiplist_next(tcb->timestamps_alloc, &Key, &Value,
1447
              &cursor)) {
1448
1449
      elapsed = ((float)(uintptr_t)Value) / 1000;
1450
1451
      vty_out(vty, "%10llu %10.3f\n",
1452
        (unsigned long long)(uintptr_t)Key, elapsed);
1453
    }
1454
    vty_out(vty, "\n");
1455
  }
1456
}
1457
1458
DEFPY(show_labelpool_perf_test, show_labelpool_perf_test_cmd,
1459
      "debug bgp lptest show",
1460
      DEBUG_STR BGP_STR
1461
      "label pool test\n"
1462
      "show\n")
1463
{
1464
1465
  if (lp_tests) {
1466
    void *Key;
1467
    void *Value;
1468
    void *cursor;
1469
1470
    cursor = NULL;
1471
    while (!skiplist_next(lp_tests, &Key, &Value, &cursor)) {
1472
      lptest_print_stats(vty, (struct lp_test *)Value);
1473
    }
1474
  } else {
1475
    vty_out(vty, "no test results\n");
1476
  }
1477
  return CMD_SUCCESS;
1478
}
1479
1480
DEFPY(stop_labelpool_perf_test, stop_labelpool_perf_test_cmd,
1481
      "debug bgp lptest stop",
1482
      DEBUG_STR BGP_STR
1483
      "label pool test\n"
1484
      "stop\n")
1485
{
1486
1487
  if (lpt_inprogress) {
1488
    lptest_stop();
1489
    lptest_print_stats(vty, NULL);
1490
  } else {
1491
    vty_out(vty, "no test in progress\n");
1492
  }
1493
  return CMD_SUCCESS;
1494
}
1495
1496
DEFPY(clear_labelpool_perf_test, clear_labelpool_perf_test_cmd,
1497
      "debug bgp lptest clear",
1498
      DEBUG_STR BGP_STR
1499
      "label pool test\n"
1500
      "clear\n")
1501
{
1502
1503
  if (lpt_inprogress) {
1504
    lptest_stop();
1505
  }
1506
  if (lp_tests) {
1507
    while (!skiplist_first(lp_tests, NULL, NULL))
1508
      /* del function of skiplist cleans up tcbs */
1509
      skiplist_delete_first(lp_tests);
1510
  }
1511
  return CMD_SUCCESS;
1512
}
1513
1514
/*
1515
 * With the "release" command, we can release labels at intervals through
1516
 * the ID space. Thus we can to exercise the bitfield-wrapping behavior
1517
 * of the allocator in a subsequent test.
1518
 */
1519
/* clang-format off */
1520
DEFPY(release_labelpool_perf_test, release_labelpool_perf_test_cmd,
1521
      "debug bgp lptest release test GENERATION$generation every (1-5)$every_nth",
1522
      DEBUG_STR
1523
      BGP_STR
1524
      "label pool test\n"
1525
      "release labels\n"
1526
      "\"test\"\n"
1527
      "test number\n"
1528
      "\"every\"\n"
1529
      "label fraction denominator\n")
1530
{
1531
  /* clang-format on */
1532
1533
  unsigned long testnum;
1534
  char *end;
1535
  struct lp_test *tcb;
1536
1537
  testnum = strtoul(generation, &end, 0);
1538
  if (*end) {
1539
    vty_out(vty, "Invalid test number: \"%s\"\n", generation);
1540
    return CMD_SUCCESS;
1541
  }
1542
  if (lpt_inprogress && (testnum == lpt_generation)) {
1543
    vty_out(vty,
1544
      "Error: Test %lu is still in progress (stop first)\n",
1545
      testnum);
1546
    return CMD_SUCCESS;
1547
  }
1548
1549
  if (skiplist_search(lp_tests, (void *)(uintptr_t)testnum,
1550
          (void **)&tcb)) {
1551
1552
    /* couldn't find current test in progress */
1553
    vty_out(vty, "Error: Can't look up test number: \"%lu\"\n",
1554
      testnum);
1555
    ++lpt_release_tcb_lookup_fails;
1556
    return CMD_SUCCESS;
1557
  }
1558
1559
  void *Key, *cKey;
1560
  void *Value, *cValue;
1561
  void *cursor;
1562
  unsigned int iteration;
1563
  int rc;
1564
1565
  cursor = NULL;
1566
  iteration = 0;
1567
  rc = skiplist_next(tcb->labels, &Key, &Value, &cursor);
1568
1569
  while (!rc) {
1570
    cKey = Key;
1571
    cValue = Value;
1572
1573
    /* find next item before we delete this one */
1574
    rc = skiplist_next(tcb->labels, &Key, &Value, &cursor);
1575
1576
    if (!(iteration % every_nth)) {
1577
      bgp_lp_release(tcb->label_type, cKey,
1578
               (mpls_label_t)(uintptr_t)cValue);
1579
      skiplist_delete(tcb->labels, cKey, NULL);
1580
      ++tcb->counter[LPT_STAT_DEALLOCATED];
1581
    }
1582
    ++iteration;
1583
  }
1584
1585
  return CMD_SUCCESS;
1586
}
1587
1588
static void lptest_delete(void *val)
1589
{
1590
  struct lp_test *tcb = (struct lp_test *)val;
1591
  void *Key;
1592
  void *Value;
1593
  void *cursor;
1594
1595
  if (tcb->labels) {
1596
    cursor = NULL;
1597
    while (!skiplist_next(tcb->labels, &Key, &Value, &cursor))
1598
      bgp_lp_release(tcb->label_type, Key,
1599
               (mpls_label_t)(uintptr_t)Value);
1600
    skiplist_free(tcb->labels);
1601
    tcb->labels = NULL;
1602
  }
1603
  if (tcb->timestamps_alloc) {
1604
    cursor = NULL;
1605
    skiplist_free(tcb->timestamps_alloc);
1606
    tcb->timestamps_alloc = NULL;
1607
  }
1608
1609
  if (tcb->timestamps_dealloc) {
1610
    cursor = NULL;
1611
    skiplist_free(tcb->timestamps_dealloc);
1612
    tcb->timestamps_dealloc = NULL;
1613
  }
1614
1615
  if (tcb->event_thread)
1616
    event_cancel(&tcb->event_thread);
1617
1618
  memset(tcb, 0, sizeof(*tcb));
1619
1620
  XFREE(MTYPE_LABELPOOL_TEST, tcb);
1621
}
1622
1623
static void lptest_init(void)
1624
{
1625
  lp_tests = skiplist_new(0, NULL, lptest_delete);
1626
}
1627
1628
static void lptest_finish(void)
1629
{
1630
  if (lp_tests) {
1631
    skiplist_free(lp_tests);
1632
    lp_tests = NULL;
1633
  }
1634
}
1635
1636
/*------------------------------------------------------------------------
1637
 *      Testing code end
1638
 *------------------------------------------------------------------------*/
1639
#endif /* BGP_LABELPOOL_ENABLE_TESTS */
1640
1641
void bgp_lp_vty_init(void)
1642
0
{
1643
0
  install_element(VIEW_NODE, &show_bgp_labelpool_summary_cmd);
1644
0
  install_element(VIEW_NODE, &show_bgp_labelpool_ledger_cmd);
1645
0
  install_element(VIEW_NODE, &show_bgp_labelpool_inuse_cmd);
1646
0
  install_element(VIEW_NODE, &show_bgp_labelpool_requests_cmd);
1647
0
  install_element(VIEW_NODE, &show_bgp_labelpool_chunks_cmd);
1648
1649
#if BGP_LABELPOOL_ENABLE_TESTS
1650
  install_element(ENABLE_NODE, &start_labelpool_perf_test_cmd);
1651
  install_element(ENABLE_NODE, &show_labelpool_perf_test_cmd);
1652
  install_element(ENABLE_NODE, &stop_labelpool_perf_test_cmd);
1653
  install_element(ENABLE_NODE, &release_labelpool_perf_test_cmd);
1654
  install_element(ENABLE_NODE, &clear_labelpool_perf_test_cmd);
1655
#endif /* BGP_LABELPOOL_ENABLE_TESTS */
1656
0
}
1657
1658
DEFINE_MTYPE_STATIC(BGPD, LABEL_PER_NEXTHOP_CACHE,
1659
        "BGP Label Per Nexthop entry");
1660
1661
/* The nexthops values are compared to
1662
 * find in the tree the appropriate cache entry
1663
 */
1664
int bgp_label_per_nexthop_cache_cmp(const struct bgp_label_per_nexthop_cache *a,
1665
            const struct bgp_label_per_nexthop_cache *b)
1666
0
{
1667
0
  return prefix_cmp(&a->nexthop, &b->nexthop);
1668
0
}
1669
1670
struct bgp_label_per_nexthop_cache *
1671
bgp_label_per_nexthop_new(struct bgp_label_per_nexthop_cache_head *tree,
1672
        struct prefix *nexthop)
1673
0
{
1674
0
  struct bgp_label_per_nexthop_cache *blnc;
1675
1676
0
  blnc = XCALLOC(MTYPE_LABEL_PER_NEXTHOP_CACHE,
1677
0
           sizeof(struct bgp_label_per_nexthop_cache));
1678
0
  blnc->tree = tree;
1679
0
  blnc->label = MPLS_INVALID_LABEL;
1680
0
  prefix_copy(&blnc->nexthop, nexthop);
1681
0
  LIST_INIT(&(blnc->paths));
1682
0
  bgp_label_per_nexthop_cache_add(tree, blnc);
1683
1684
0
  return blnc;
1685
0
}
1686
1687
struct bgp_label_per_nexthop_cache *
1688
bgp_label_per_nexthop_find(struct bgp_label_per_nexthop_cache_head *tree,
1689
         struct prefix *nexthop)
1690
0
{
1691
0
  struct bgp_label_per_nexthop_cache blnc = {};
1692
1693
0
  if (!tree)
1694
0
    return NULL;
1695
1696
0
  memcpy(&blnc.nexthop, nexthop, sizeof(struct prefix));
1697
0
  return bgp_label_per_nexthop_cache_find(tree, &blnc);
1698
0
}
1699
1700
void bgp_label_per_nexthop_free(struct bgp_label_per_nexthop_cache *blnc)
1701
0
{
1702
0
  if (blnc->label != MPLS_INVALID_LABEL) {
1703
0
    bgp_zebra_send_nexthop_label(ZEBRA_MPLS_LABELS_DELETE,
1704
0
               blnc->label, blnc->nh->ifindex,
1705
0
               blnc->nh->vrf_id, ZEBRA_LSP_BGP,
1706
0
               &blnc->nexthop);
1707
0
    bgp_lp_release(LP_TYPE_NEXTHOP, blnc, blnc->label);
1708
0
  }
1709
0
  bgp_label_per_nexthop_cache_del(blnc->tree, blnc);
1710
0
  if (blnc->nh)
1711
0
    nexthop_free(blnc->nh);
1712
0
  blnc->nh = NULL;
1713
0
  XFREE(MTYPE_LABEL_PER_NEXTHOP_CACHE, blnc);
1714
0
}
1715
1716
void bgp_label_per_nexthop_init(void)
1717
1
{
1718
1
  install_element(VIEW_NODE, &show_bgp_nexthop_label_cmd);
1719
1
}