Coverage Report

Created: 2025-07-01 06:50

/src/openvswitch/lib/id-fpool.c
Line
Count
Source (jump to first uncovered line)
1
/*
2
 * Copyright (c) 2021 NVIDIA Corporation.
3
 *
4
 * Licensed under the Apache License, Version 2.0 (the "License");
5
 * you may not use this file except in compliance with the License.
6
 * You may obtain a copy of the License at:
7
 *
8
 *     http://www.apache.org/licenses/LICENSE-2.0
9
 *
10
 * Unless required by applicable law or agreed to in writing, software
11
 * distributed under the License is distributed on an "AS IS" BASIS,
12
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
 * See the License for the specific language governing permissions and
14
 * limitations under the License.
15
 */
16
17
#include <config.h>
18
19
#include "openvswitch/list.h"
20
#include "openvswitch/thread.h"
21
#include "openvswitch/util.h"
22
#include "ovs-atomic.h"
23
#include "id-fpool.h"
24
25
#ifdef HAVE_PTHREAD_SPIN_LOCK
26
#define id_fpool_lock_type ovs_spin
27
0
#define id_fpool_lock_init(l) do { ovs_spin_init(l); } while (0)
28
0
#define id_fpool_lock_destroy(l) do { ovs_spin_destroy(l); } while (0)
29
0
#define id_fpool_lock(l) do { ovs_spin_lock(l); } while (0)
30
0
#define id_fpool_unlock(l) do { ovs_spin_unlock(l); } while (0)
31
#else
32
#define id_fpool_lock_type ovs_mutex
33
#define id_fpool_lock_init(l) do { ovs_mutex_init(l); } while (0)
34
#define id_fpool_lock_destroy(l) do { ovs_mutex_destroy(l); } while (0)
35
#define id_fpool_lock(l) do { ovs_mutex_lock(l); } while (0)
36
#define id_fpool_unlock(l) do { ovs_mutex_unlock(l); } while (0)
37
#endif
38
39
struct id_slab {
40
    struct ovs_list node;
41
    uint32_t pos;
42
    uint32_t ids[ID_FPOOL_CACHE_SIZE];
43
};
44
45
struct per_user {
46
PADDED_MEMBERS(CACHE_LINE_SIZE,
47
    struct id_fpool_lock_type user_lock;
48
    struct id_slab *slab;
49
);};
50
51
struct id_fpool {
52
    /* Constants */
53
    uint32_t floor; /* IDs are in the range of [floor, ceiling). */
54
    uint32_t ceiling;
55
    size_t nb_user; /* Number of concurrent users. */
56
57
    /* Shared mutable data protected by global lock. */
58
    struct id_fpool_lock_type pool_lock;
59
    struct ovs_list free_slabs;
60
    uint32_t next_id;
61
62
    /* Per-user mutable data protected by user locks. */
63
    struct per_user per_users[0];
64
};
65
66
/* Lock precedence is
67
 * 1: per_users.user_lock
68
 * 2: pool_lock
69
 */
70
71
static struct id_slab *
72
id_slab_create(uint32_t *next_id, uint32_t max)
73
0
{
74
0
    struct id_slab *slab;
75
0
    size_t n_ids;
76
0
    size_t pos;
77
78
0
    if (next_id[0] == max) {
79
0
        return NULL;
80
0
    }
81
82
0
    n_ids = max - next_id[0];
83
0
    slab = xmalloc(sizeof *slab);
84
0
    ovs_list_init(&slab->node);
85
0
    slab->pos = 0;
86
87
0
    for (pos = MIN(n_ids, ARRAY_SIZE(slab->ids)); pos > 0; pos--) {
88
0
        slab->ids[pos - 1] = next_id[0];
89
0
        next_id[0]++;
90
0
        slab->pos++;
91
0
    }
92
93
0
    return slab;
94
0
}
95
96
static bool
97
id_slab_insert(struct id_slab *slab, uint32_t id)
98
0
{
99
0
    if (slab == NULL) {
100
0
        return false;
101
0
    }
102
0
    if (slab->pos >= ARRAY_SIZE(slab->ids)) {
103
0
        return false;
104
0
    }
105
0
    slab->ids[slab->pos++] = id;
106
0
    return true;
107
0
}
108
109
static bool
110
id_slab_remove(struct id_slab *slab, uint32_t *id)
111
0
{
112
0
    if (slab == NULL) {
113
0
        return false;
114
0
    }
115
0
    if (slab->pos == 0) {
116
0
        return false;
117
0
    }
118
0
    *id = slab->ids[--slab->pos];
119
0
    return true;
120
0
}
121
122
static void
123
per_user_init(struct per_user *pu, uint32_t *next_id, uint32_t max)
124
0
{
125
0
    id_fpool_lock_init(&pu->user_lock);
126
0
    pu->slab = id_slab_create(next_id, max);
127
0
}
128
129
static void
130
per_user_destroy(struct per_user *pu)
131
0
{
132
0
    id_fpool_lock(&pu->user_lock);
133
0
    free(pu->slab);
134
0
    pu->slab = NULL;
135
0
    id_fpool_unlock(&pu->user_lock);
136
0
    id_fpool_lock_destroy(&pu->user_lock);
137
0
}
138
139
struct id_fpool *
140
id_fpool_create(unsigned int nb_user, uint32_t floor, uint32_t n_ids)
141
0
{
142
0
    struct id_fpool *pool;
143
0
    size_t i;
144
145
0
    ovs_assert(nb_user != 0);
146
0
    ovs_assert(floor <= UINT32_MAX - n_ids);
147
148
0
    pool = xmalloc(sizeof *pool + nb_user * sizeof(struct per_user));
149
0
    pool->next_id = floor;
150
0
    pool->floor = floor;
151
0
    pool->ceiling = floor + n_ids;
152
153
0
    for (i = 0; i < nb_user; i++) {
154
0
        per_user_init(&pool->per_users[i],
155
0
                      &pool->next_id, pool->ceiling);
156
0
    }
157
0
    pool->nb_user = nb_user;
158
159
0
    id_fpool_lock_init(&pool->pool_lock);
160
0
    ovs_list_init(&pool->free_slabs);
161
162
0
    return pool;
163
0
}
164
165
void
166
id_fpool_destroy(struct id_fpool *pool)
167
0
{
168
0
    struct id_slab *slab;
169
0
    size_t i;
170
171
0
    id_fpool_lock(&pool->pool_lock);
172
0
    LIST_FOR_EACH_SAFE (slab, node, &pool->free_slabs) {
173
0
        free(slab);
174
0
    }
175
0
    ovs_list_poison(&pool->free_slabs);
176
0
    id_fpool_unlock(&pool->pool_lock);
177
0
    id_fpool_lock_destroy(&pool->pool_lock);
178
179
0
    for (i = 0; i < pool->nb_user; i++) {
180
0
        per_user_destroy(&pool->per_users[i]);
181
0
    }
182
0
    free(pool);
183
0
}
184
185
bool
186
id_fpool_new_id(struct id_fpool *pool, unsigned int uid, uint32_t *id)
187
0
{
188
0
    struct per_user *pu;
189
0
    unsigned int uid2;
190
0
    bool res = false;
191
192
0
    ovs_assert(uid < pool->nb_user);
193
0
    pu = &pool->per_users[uid];
194
195
0
    id_fpool_lock(&pu->user_lock);
196
197
0
    if (id_slab_remove(pu->slab, id)) {
198
0
        res = true;
199
0
        goto unlock_and_ret;
200
0
    }
201
0
    free(pu->slab);
202
203
0
    id_fpool_lock(&pool->pool_lock);
204
0
    if (!ovs_list_is_empty(&pool->free_slabs)) {
205
0
        pu->slab = CONTAINER_OF(ovs_list_pop_front(&pool->free_slabs),
206
0
                                struct id_slab, node);
207
0
    } else {
208
0
        pu->slab = id_slab_create(&pool->next_id, pool->ceiling);
209
0
    }
210
0
    id_fpool_unlock(&pool->pool_lock);
211
212
0
    if (pu->slab != NULL) {
213
0
        res = id_slab_remove(pu->slab, id);
214
0
        goto unlock_and_ret;
215
0
    }
216
217
0
    id_fpool_unlock(&pu->user_lock);
218
219
    /* No ID available in local slab, no slab available in shared list.
220
     * The shared counter is maxed out. Attempt to steal an ID from another
221
     * user slab. */
222
223
0
    for (uid2 = 0; uid2 < pool->nb_user; uid2++) {
224
0
        struct per_user *pu2 = &pool->per_users[uid2];
225
226
0
        if (uid == uid2) {
227
0
            continue;
228
0
        }
229
0
        id_fpool_lock(&pu2->user_lock);;
230
0
        res = id_slab_remove(pu2->slab, id);
231
0
        id_fpool_unlock(&pu2->user_lock);;
232
0
        if (res) {
233
0
            break;
234
0
        }
235
0
    }
236
237
0
    goto out;
238
239
0
unlock_and_ret:
240
0
    id_fpool_unlock(&pu->user_lock);
241
0
out:
242
0
    return res;
243
0
}
244
245
void
246
id_fpool_free_id(struct id_fpool *pool, unsigned int uid, uint32_t id)
247
0
{
248
0
    struct per_user *pu;
249
250
0
    if (id < pool->floor || id >= pool->ceiling) {
251
0
        return;
252
0
    }
253
254
0
    ovs_assert(uid < pool->nb_user);
255
0
    pu = &pool->per_users[uid];
256
257
0
    id_fpool_lock(&pu->user_lock);
258
259
0
    if (pu->slab == NULL) {
260
        /* Create local slab with a single ID. */
261
0
        pu->slab = id_slab_create(&id, id + 1);
262
0
        goto unlock;
263
0
    }
264
265
0
    if (id_slab_insert(pu->slab, id)) {
266
0
        goto unlock;
267
0
    }
268
269
0
    id_fpool_lock(&pool->pool_lock);
270
0
    ovs_list_push_back(&pool->free_slabs, &pu->slab->node);
271
0
    id_fpool_unlock(&pool->pool_lock);
272
273
    /* Create local slab with a single ID. */
274
0
    pu->slab = id_slab_create(&id, id + 1);
275
276
0
unlock:
277
0
    id_fpool_unlock(&pu->user_lock);
278
0
}