Coverage Report

Created: 2025-11-24 06:48

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
/src/openvswitch/lib/fat-rwlock.c
Line
Count
Source
1
/*
2
 * Copyright (c) 2013, 2014 Nicira, Inc.
3
 *
4
 * Licensed under the Apache License, Version 2.0 (the "License");
5
 * you may not use this file except in compliance with the License.
6
 * You may obtain a copy of the License at:
7
 *
8
 *     http://www.apache.org/licenses/LICENSE-2.0
9
 *
10
 * Unless required by applicable law or agreed to in writing, software
11
 * distributed under the License is distributed on an "AS IS" BASIS,
12
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
 * See the License for the specific language governing permissions and
14
 * limitations under the License.
15
 */
16
17
#include <config.h>
18
19
#include "fat-rwlock.h"
20
21
#include <errno.h>
22
23
#include "openvswitch/hmap.h"
24
#include "openvswitch/list.h"
25
#include "ovs-thread.h"
26
#include "random.h"
27
28
struct fat_rwlock_slot {
29
    /* Membership in rwlock's list of "struct fat_rwlock_slot"s.
30
     *
31
     * fat_rwlock_destroy() sets 'rwlock' to NULL to indicate that this
32
     * slot may be destroyed. */
33
    struct ovs_list list_node;  /* In struct rwlock's 'threads' list. */
34
    struct fat_rwlock *rwlock;  /* Owner. */
35
36
    /* Mutex.
37
     *
38
     * A thread holding the read-lock holds its own mutex.
39
     *
40
     * A thread holding the write-lock holds every thread's mutex, plus
41
     * 'rwlock->mutex'. */
42
    struct ovs_mutex mutex;
43
44
    /* This thread's locking status for 'rwlock':
45
     *
46
     *     - 0: This thread does not have any lock on 'rwlock'.  This thread
47
     *       does not have 'mutex' locked.
48
     *
49
     *     - 1: This thread has a read-lock on 'rwlock' and holds 'mutex'.
50
     *
51
     *     - 2...UINT_MAX-1: This thread has recursively taken the read-lock on
52
     *       'rwlock' to the level of 'depth'.  This thread holds 'mutex'.
53
     *
54
     *     - UINT_MAX: This thread has the write-lock on 'rwlock' and holds
55
     *       'mutex' (plus the 'mutex' of all of 'rwlock''s other slots).
56
     *
57
     * Accessed only by the slot's own thread, so no synchronization is
58
     * needed. */
59
    unsigned int depth;
60
};
61
62
static void
63
free_slot(struct fat_rwlock_slot *slot)
64
0
{
65
0
    if (slot->depth) {
66
0
        abort();
67
0
    }
68
69
0
    ovs_list_remove(&slot->list_node);
70
0
    free_cacheline(slot);
71
0
}
72
73
static void
74
slot_destructor(void *slot_)
75
0
{
76
0
    struct fat_rwlock_slot *slot = slot_;
77
0
    struct fat_rwlock *rwlock = slot->rwlock;
78
79
0
    ovs_mutex_lock(&rwlock->mutex);
80
0
    free_slot(slot);
81
0
    ovs_mutex_unlock(&rwlock->mutex);
82
0
}
83
84
/* Initialize 'rwlock' as a new fat_rwlock. */
85
void
86
fat_rwlock_init(struct fat_rwlock *rwlock)
87
0
{
88
0
    ovsthread_key_create(&rwlock->key, slot_destructor);
89
0
    ovs_mutex_init(&rwlock->mutex);
90
0
    ovs_mutex_lock(&rwlock->mutex);
91
0
    ovs_list_init(&rwlock->threads);
92
0
    ovs_mutex_unlock(&rwlock->mutex);
93
0
}
94
95
/* Destroys 'rwlock', which must not be locked or otherwise in use by any
96
 * thread. */
97
void
98
fat_rwlock_destroy(struct fat_rwlock *rwlock)
99
0
{
100
0
    struct fat_rwlock_slot *slot;
101
102
    /* Order is important here.  By destroying the thread-specific data first,
103
     * before we destroy the slots, we ensure that the thread-specific
104
     * data destructor can't race with our loop below. */
105
0
    ovsthread_key_delete(rwlock->key);
106
107
0
    ovs_mutex_lock(&rwlock->mutex);
108
0
    LIST_FOR_EACH_SAFE (slot, list_node, &rwlock->threads) {
109
0
        free_slot(slot);
110
0
    }
111
0
    ovs_mutex_unlock(&rwlock->mutex);
112
0
    ovs_mutex_destroy(&rwlock->mutex);
113
0
}
114
115
static struct fat_rwlock_slot *
116
fat_rwlock_get_slot__(struct fat_rwlock *rwlock)
117
0
{
118
0
    struct fat_rwlock_slot *slot;
119
120
    /* Fast path. */
121
0
    slot = ovsthread_getspecific(rwlock->key);
122
0
    if (slot) {
123
0
        return slot;
124
0
    }
125
126
    /* Slow path: create a new slot for 'rwlock' in this thread. */
127
128
0
    slot = xmalloc_cacheline(sizeof *slot);
129
0
    slot->rwlock = rwlock;
130
0
    ovs_mutex_init(&slot->mutex);
131
0
    slot->depth = 0;
132
133
0
    ovs_mutex_lock(&rwlock->mutex);
134
0
    ovs_list_push_back(&rwlock->threads, &slot->list_node);
135
0
    ovs_mutex_unlock(&rwlock->mutex);
136
137
0
    ovsthread_setspecific(rwlock->key, slot);
138
139
0
    return slot;
140
0
}
141
142
/* Locks 'rwlock' for reading.  The read-lock is recursive: it may be acquired
143
 * any number of times by a single thread (which must then release it the same
144
 * number of times for it to truly be released). */
145
void
146
fat_rwlock_rdlock(const struct fat_rwlock *rwlock_)
147
    OVS_ACQ_RDLOCK(rwlock_)
148
    OVS_NO_THREAD_SAFETY_ANALYSIS
149
0
{
150
0
    struct fat_rwlock *rwlock = CONST_CAST(struct fat_rwlock *, rwlock_);
151
0
    struct fat_rwlock_slot *this = fat_rwlock_get_slot__(rwlock);
152
153
0
    switch (this->depth) {
154
0
    case UINT_MAX:
155
        /* This thread already holds the write-lock. */
156
0
        abort();
157
158
0
    case 0:
159
0
        ovs_mutex_lock(&this->mutex);
160
        /* fall through */
161
0
    default:
162
0
        this->depth++;
163
0
        break;
164
0
    }
165
0
}
166
167
static struct fat_rwlock_slot *
168
fat_rwlock_try_get_slot__(struct fat_rwlock *rwlock)
169
0
{
170
0
    struct fat_rwlock_slot *slot;
171
172
    /* Fast path. */
173
0
    slot = ovsthread_getspecific(rwlock->key);
174
0
    if (slot) {
175
0
        return slot;
176
0
    }
177
178
    /* Slow path: create a new slot for 'rwlock' in this thread. */
179
180
0
    if (!ovs_mutex_trylock(&rwlock->mutex)) {
181
0
        slot = xmalloc_cacheline(sizeof *slot);
182
0
        slot->rwlock = rwlock;
183
0
        ovs_mutex_init(&slot->mutex);
184
0
        slot->depth = 0;
185
186
0
        ovs_list_push_back(&rwlock->threads, &slot->list_node);
187
0
        ovs_mutex_unlock(&rwlock->mutex);
188
0
        ovsthread_setspecific(rwlock->key, slot);
189
0
    }
190
191
0
    return slot;
192
0
}
193
194
/* Tries to lock 'rwlock' for reading.  If successful, returns 0.  If taking
195
 * the lock would require blocking, returns EBUSY (without blocking). */
196
int
197
fat_rwlock_tryrdlock(const struct fat_rwlock *rwlock_)
198
    OVS_TRY_RDLOCK(0, rwlock_)
199
    OVS_NO_THREAD_SAFETY_ANALYSIS
200
0
{
201
0
    struct fat_rwlock *rwlock = CONST_CAST(struct fat_rwlock *, rwlock_);
202
0
    struct fat_rwlock_slot *this = fat_rwlock_try_get_slot__(rwlock);
203
0
    int error;
204
205
0
    if (!this) {
206
0
        return EBUSY;
207
0
    }
208
209
0
    switch (this->depth) {
210
0
    case UINT_MAX:
211
0
        return EBUSY;
212
213
0
    case 0:
214
0
        error = ovs_mutex_trylock(&this->mutex);
215
0
        if (error) {
216
0
            return error;
217
0
        }
218
        /* fall through */
219
0
    default:
220
0
        this->depth++;
221
0
        break;
222
0
    }
223
224
0
    return 0;
225
0
}
226
227
/* Locks 'rwlock' for writing.
228
 *
229
 * The write lock is not recursive. */
230
void
231
fat_rwlock_wrlock(const struct fat_rwlock *rwlock_)
232
    OVS_ACQ_WRLOCK(rwlock_)
233
    OVS_NO_THREAD_SAFETY_ANALYSIS
234
0
{
235
0
    struct fat_rwlock *rwlock = CONST_CAST(struct fat_rwlock *, rwlock_);
236
0
    struct fat_rwlock_slot *this = fat_rwlock_get_slot__(rwlock);
237
0
    struct fat_rwlock_slot *slot;
238
239
0
    ovs_assert(!this->depth);
240
0
    this->depth = UINT_MAX;
241
242
0
    ovs_mutex_lock(&rwlock->mutex);
243
0
    LIST_FOR_EACH (slot, list_node, &rwlock->threads) {
244
0
        ovs_mutex_lock(&slot->mutex);
245
0
    }
246
0
}
247
248
/* Unlocks 'rwlock', which the current thread must have locked for reading or
249
 * for writing.  If the read lock has been taken recursively, it must be
250
 * released the same number of times to be truly released. */
251
void
252
fat_rwlock_unlock(const struct fat_rwlock *rwlock_)
253
    OVS_RELEASES(rwlock_)
254
    OVS_NO_THREAD_SAFETY_ANALYSIS
255
0
{
256
0
    struct fat_rwlock *rwlock = CONST_CAST(struct fat_rwlock *, rwlock_);
257
0
    struct fat_rwlock_slot *this = fat_rwlock_get_slot__(rwlock);
258
0
    struct fat_rwlock_slot *slot;
259
260
0
    switch (this->depth) {
261
0
    case UINT_MAX:
262
0
        LIST_FOR_EACH (slot, list_node, &rwlock->threads) {
263
0
            ovs_mutex_unlock(&slot->mutex);
264
0
        }
265
0
        ovs_mutex_unlock(&rwlock->mutex);
266
0
        this->depth = 0;
267
0
        break;
268
269
0
    case 0:
270
        /* This thread doesn't hold any lock. */
271
0
        abort();
272
273
0
    case 1:
274
0
        ovs_mutex_unlock(&this->mutex);
275
        /* fall through */
276
0
    default:
277
0
        this->depth--;
278
0
        break;
279
0
    }
280
0
}