Coverage Report

Created: 2025-08-26 06:59

/src/bind9/lib/isc/quota.c
Line
Count
Source (jump to first uncovered line)
1
/*
2
 * Copyright (C) Internet Systems Consortium, Inc. ("ISC")
3
 *
4
 * SPDX-License-Identifier: MPL-2.0
5
 *
6
 * This Source Code Form is subject to the terms of the Mozilla Public
7
 * License, v. 2.0. If a copy of the MPL was not distributed with this
8
 * file, you can obtain one at https://mozilla.org/MPL/2.0/.
9
 *
10
 * See the COPYRIGHT file distributed with this work for additional
11
 * information regarding copyright ownership.
12
 */
13
14
/*! \file */
15
16
#include <stddef.h>
17
18
#include <isc/atomic.h>
19
#include <isc/quota.h>
20
#include <isc/urcu.h>
21
#include <isc/util.h>
22
23
0
#define QUOTA_MAGIC    ISC_MAGIC('Q', 'U', 'O', 'T')
24
#define VALID_QUOTA(p) ISC_MAGIC_VALID(p, QUOTA_MAGIC)
25
26
void
27
0
isc_quota_init(isc_quota_t *quota, unsigned int max) {
28
0
  atomic_init(&quota->max, max);
29
0
  atomic_init(&quota->used, 0);
30
0
  atomic_init(&quota->soft, 0);
31
0
  cds_wfcq_init(&quota->jobs.head, &quota->jobs.tail);
32
0
  ISC_LINK_INIT(quota, link);
33
0
  quota->magic = QUOTA_MAGIC;
34
0
}
35
36
void
37
0
isc_quota_soft(isc_quota_t *quota, unsigned int soft) {
38
0
  REQUIRE(VALID_QUOTA(quota));
39
0
  atomic_store_relaxed(&quota->soft, soft);
40
0
}
41
42
void
43
0
isc_quota_max(isc_quota_t *quota, unsigned int max) {
44
0
  REQUIRE(VALID_QUOTA(quota));
45
0
  atomic_store_relaxed(&quota->max, max);
46
0
}
47
48
unsigned int
49
0
isc_quota_getmax(isc_quota_t *quota) {
50
0
  REQUIRE(VALID_QUOTA(quota));
51
0
  return atomic_load_relaxed(&quota->max);
52
0
}
53
54
unsigned int
55
0
isc_quota_getsoft(isc_quota_t *quota) {
56
0
  REQUIRE(VALID_QUOTA(quota));
57
0
  return atomic_load_relaxed(&quota->soft);
58
0
}
59
60
unsigned int
61
0
isc_quota_getused(isc_quota_t *quota) {
62
0
  REQUIRE(VALID_QUOTA(quota));
63
0
  return atomic_load_acquire(&quota->used);
64
0
}
65
66
void
67
0
isc_quota_release(isc_quota_t *quota) {
68
0
  struct cds_wfcq_node *node;
69
  /*
70
   * We are using the cds_wfcq_dequeue_blocking() variant here that
71
   * has an internal mutex because we need synchronization on
72
   * multiple dequeues running from different threads.
73
   *
74
   * NOTE: cds_wfcq_dequeue_blocking() checks whether the queue is free
75
   * with cds_wfcq_empty() before acquiring the internal lock, so if
76
   * there's nothing queued, the call should be very lightweight.
77
   */
78
0
again:
79
0
  node = cds_wfcq_dequeue_blocking(&quota->jobs.head, &quota->jobs.tail);
80
0
  if (node == NULL) {
81
0
    uint_fast32_t used = atomic_fetch_sub_acq_rel(&quota->used, 1);
82
0
    INSIST(used > 0);
83
84
    /*
85
     * If this was the last quota released and in the meantime a
86
     * new job has appeared in the queue, then give it a chance
87
     * to run, otherwise it could get stuck there until a new quota
88
     * is acquired and released again.
89
     */
90
0
    if (used == 1 &&
91
0
        !cds_wfcq_empty(&quota->jobs.head, &quota->jobs.tail))
92
0
    {
93
0
      atomic_fetch_add_acq_rel(&quota->used, 1);
94
0
      goto again;
95
0
    }
96
97
0
    return;
98
0
  }
99
100
0
  isc_job_t *job = caa_container_of(node, isc_job_t, wfcq_node);
101
0
  job->cb(job->cbarg);
102
0
}
103
104
isc_result_t
105
isc_quota_acquire_cb(isc_quota_t *quota, isc_job_t *job, isc_job_cb cb,
106
0
         void *cbarg) {
107
0
  REQUIRE(VALID_QUOTA(quota));
108
0
  REQUIRE(job == NULL || cb != NULL);
109
110
0
  uint_fast32_t used = atomic_fetch_add_acq_rel(&quota->used, 1);
111
0
  uint_fast32_t max = atomic_load_relaxed(&quota->max);
112
0
  if (max != 0 && used >= max) {
113
0
    (void)atomic_fetch_sub_acq_rel(&quota->used, 1);
114
0
    if (job != NULL) {
115
0
      job->cb = cb;
116
0
      job->cbarg = cbarg;
117
0
      cds_wfcq_node_init(&job->wfcq_node);
118
119
      /*
120
       * The cds_wfcq_enqueue() is non-blocking (no internal
121
       * mutex involved), so it offers a slight advantage.
122
       */
123
0
      cds_wfcq_enqueue(&quota->jobs.head, &quota->jobs.tail,
124
0
           &job->wfcq_node);
125
126
      /*
127
       * While we were initializing and enqueuing a new node,
128
       * quotas might have been released, and if no quota is
129
       * used any more, then our newly enqueued job won't
130
       * have a chance to get running until a new quota is
131
       * acquired and released. To avoid a hangup, check
132
       * quota->used again, if it's 0 then simulate a quota
133
       * acquire/release for the current job to run as soon as
134
       * possible, although we will still return ISC_R_QUOTA
135
       * to the caller.
136
       */
137
0
      if (atomic_compare_exchange_strong_acq_rel(
138
0
            &quota->used, &(uint_fast32_t){ 0 }, 1))
139
0
      {
140
0
        isc_quota_release(quota);
141
0
      }
142
0
    }
143
0
    return ISC_R_QUOTA;
144
0
  }
145
146
0
  uint_fast32_t soft = atomic_load_relaxed(&quota->soft);
147
0
  if (soft != 0 && used >= soft) {
148
0
    return ISC_R_SOFTQUOTA;
149
0
  }
150
151
0
  return ISC_R_SUCCESS;
152
0
}
153
154
void
155
0
isc_quota_destroy(isc_quota_t *quota) {
156
0
  REQUIRE(VALID_QUOTA(quota));
157
0
  quota->magic = 0;
158
159
0
  INSIST(atomic_load_acquire(&quota->used) == 0);
160
0
  INSIST(cds_wfcq_empty(&quota->jobs.head, &quota->jobs.tail));
161
162
0
  cds_wfcq_destroy(&quota->jobs.head, &quota->jobs.tail);
163
0
}