/src/opensips/rw_locking.h
Line | Count | Source (jump to first uncovered line) |
1 | | /* |
2 | | * Copyright (C) 2016 OpenSIPS Project |
3 | | * |
4 | | * This file is part of opensips, a free SIP server. |
5 | | * |
6 | | * opensips is free software; you can redistribute it and/or modify |
7 | | * it under the terms of the GNU General Public License as published by |
8 | | * the Free Software Foundation; either version 2 of the License, or |
9 | | * (at your option) any later version |
10 | | * |
11 | | * opensips is distributed in the hope that it will be useful, |
12 | | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
13 | | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
14 | | * GNU General Public License for more details. |
15 | | * |
16 | | * You should have received a copy of the GNU General Public License |
17 | | * along with this program; if not, write to the Free Software |
18 | | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA |
19 | | * 02110-1301 USA |
20 | | */ |
21 | | |
22 | | #ifndef _rw_locking_h |
23 | | #define _rw_locking_h |
24 | | |
25 | | #include <unistd.h> |
26 | | #include "locking.h" |
27 | | |
28 | 0 | #define LOCK_WAIT 10 |
29 | | |
30 | | typedef struct rw_lock_t { |
31 | | gen_lock_t *lock; |
32 | | int w_flag; |
33 | | int sw_flag; |
34 | | int r_count; |
35 | | } rw_lock_t; |
36 | | |
37 | | inline static rw_lock_t * lock_init_rw(void) |
38 | 0 | { |
39 | 0 | rw_lock_t * new_lock; |
40 | 0 | new_lock = (rw_lock_t*)shm_malloc(sizeof(rw_lock_t)); |
41 | |
|
42 | 0 | if (!new_lock) |
43 | 0 | goto error; |
44 | 0 | memset(new_lock, 0, sizeof(rw_lock_t)); |
45 | 0 | new_lock->lock = lock_alloc(); |
46 | |
|
47 | 0 | if (!new_lock->lock) |
48 | 0 | goto error; |
49 | 0 | if (!lock_init(new_lock->lock)) |
50 | 0 | goto error; |
51 | | |
52 | 0 | return new_lock; |
53 | 0 | error: |
54 | 0 | if (new_lock!=NULL && new_lock->lock) |
55 | 0 | lock_dealloc(new_lock->lock); |
56 | 0 | if (new_lock) |
57 | 0 | shm_free(new_lock); |
58 | 0 | return NULL; |
59 | 0 | } Unexecuted instantiation: statistics.c:lock_init_rw Unexecuted instantiation: route.c:lock_init_rw Unexecuted instantiation: forward.c:lock_init_rw Unexecuted instantiation: blacklists.c:lock_init_rw Unexecuted instantiation: resolve.c:lock_init_rw Unexecuted instantiation: cfg_reload.c:lock_init_rw Unexecuted instantiation: status_report.c:lock_init_rw Unexecuted instantiation: cfg.tab.c:lock_init_rw Unexecuted instantiation: shutdown.c:lock_init_rw Unexecuted instantiation: core_cmds.c:lock_init_rw |
60 | | |
61 | | inline static void lock_destroy_rw(rw_lock_t *_lock) |
62 | 0 | { |
63 | 0 | if (!_lock) |
64 | 0 | return; |
65 | | |
66 | 0 | if (_lock->lock) { |
67 | 0 | lock_destroy(_lock->lock); |
68 | 0 | lock_dealloc(_lock->lock); |
69 | 0 | } |
70 | 0 | shm_free(_lock); |
71 | 0 | } Unexecuted instantiation: statistics.c:lock_destroy_rw Unexecuted instantiation: route.c:lock_destroy_rw Unexecuted instantiation: forward.c:lock_destroy_rw Unexecuted instantiation: blacklists.c:lock_destroy_rw Unexecuted instantiation: resolve.c:lock_destroy_rw Unexecuted instantiation: cfg_reload.c:lock_destroy_rw Unexecuted instantiation: status_report.c:lock_destroy_rw Unexecuted instantiation: cfg.tab.c:lock_destroy_rw Unexecuted instantiation: shutdown.c:lock_destroy_rw Unexecuted instantiation: core_cmds.c:lock_destroy_rw |
72 | | |
73 | | #define lock_start_write(_lock) \ |
74 | 0 | do { \ |
75 | 0 | __label__ again; \ |
76 | 0 | again: \ |
77 | 0 | lock_get((_lock)->lock); \ |
78 | 0 | /* wait for the other writers */ \ |
79 | 0 | if ((_lock)->w_flag) { \ |
80 | 0 | lock_release((_lock)->lock); \ |
81 | 0 | usleep(LOCK_WAIT); \ |
82 | 0 | goto again; \ |
83 | 0 | } \ |
84 | 0 | (_lock)->w_flag = 1; \ |
85 | 0 | lock_release((_lock)->lock); \ |
86 | 0 | /* wait for readers */ \ |
87 | 0 | while ((_lock)->r_count) \ |
88 | 0 | usleep(LOCK_WAIT); \ |
89 | 0 | } while (0) |
90 | | |
91 | | #define lock_stop_write(_lock) \ |
92 | 0 | do { \ |
93 | 0 | (_lock)->w_flag = 0; \ |
94 | 0 | } while(0) |
95 | | |
96 | | #define lock_start_read(_lock) \ |
97 | 0 | do { \ |
98 | 0 | __label__ again; \ |
99 | 0 | again: \ |
100 | 0 | lock_get((_lock)->lock); \ |
101 | 0 | if ((_lock)->w_flag) { \ |
102 | 0 | lock_release((_lock)->lock); \ |
103 | 0 | usleep(LOCK_WAIT); \ |
104 | 0 | goto again; \ |
105 | 0 | } \ |
106 | 0 | (_lock)->r_count++; \ |
107 | 0 | lock_release((_lock)->lock); \ |
108 | 0 | } while (0) |
109 | | |
110 | | #define lock_stop_read(_lock) \ |
111 | 0 | do { \ |
112 | 0 | lock_get((_lock)->lock); \ |
113 | 0 | (_lock)->r_count--; \ |
114 | 0 | lock_release((_lock)->lock); \ |
115 | 0 | } while (0) |
116 | | |
117 | | /* processes using the "switchable R/W" macros will run serially with each |
118 | | * other, however they will still run in parallel with processes using the |
119 | | * lock_start_read() macro above! |
120 | | */ |
121 | | #define lock_start_sw_read(_lock) \ |
122 | | do { \ |
123 | | __label__ again; \ |
124 | | again: \ |
125 | | lock_get((_lock)->lock); \ |
126 | | if ((_lock)->w_flag || (_lock)->sw_flag) { \ |
127 | | lock_release((_lock)->lock); \ |
128 | | usleep(LOCK_WAIT); \ |
129 | | goto again; \ |
130 | | } \ |
131 | | (_lock)->r_count++; \ |
132 | | (_lock)->sw_flag = 1; \ |
133 | | lock_release((_lock)->lock); \ |
134 | | } while (0) |
135 | | |
136 | | /* to be defined in each function making use of re-entrance */ |
137 | | #define DEFS_RW_LOCKING_R \ |
138 | | int __r_read_changed = 0; |
139 | | |
140 | | /** |
141 | | * Re-entrant versions of the reader start/stop functions. |
142 | | * @_r_read_acq: a process-local global variable to test the re-entrance |
143 | | * Note: these functions *cannot* be called in a nested fashion |
144 | | * within the same function! |
145 | | */ |
146 | | #define lock_start_read_r(_lock, _r_read_acq) \ |
147 | | do { \ |
148 | | if (!(_r_read_acq)) { \ |
149 | | (_r_read_acq) = 1; \ |
150 | | __r_read_changed = 1; \ |
151 | | lock_start_read(_lock); \ |
152 | | } \ |
153 | | } while (0) |
154 | | |
155 | | #define lock_stop_read_r(_lock, _r_read_acq) \ |
156 | | do { \ |
157 | | if (__r_read_changed) { \ |
158 | | lock_stop_read(_lock); \ |
159 | | __r_read_changed = 0; \ |
160 | | (_r_read_acq) = 0; \ |
161 | | } \ |
162 | | } while (0) |
163 | | |
164 | | #define lock_stop_sw_read(_lock) \ |
165 | | do { \ |
166 | | lock_get((_lock)->lock); \ |
167 | | (_lock)->r_count--; \ |
168 | | lock_release((_lock)->lock); \ |
169 | | (_lock)->sw_flag = 0; \ |
170 | | } while (0) |
171 | | |
172 | | /* switch to writing access with lock previously acquired for switchable reading |
173 | | * note: switching back to reading is required before releasing the lock |
174 | | */ |
175 | | #define lock_switch_write(_lock, __old) \ |
176 | | do { \ |
177 | | lock_get((_lock)->lock); \ |
178 | | __old = (_lock)->w_flag; \ |
179 | | (_lock)->w_flag = 1; \ |
180 | | lock_release((_lock)->lock); \ |
181 | | while ((_lock)->r_count > 1) \ |
182 | | usleep(LOCK_WAIT); \ |
183 | | } while (0) |
184 | | |
185 | | /* switch back to reading access if previously switched to writing */ |
186 | | #define lock_switch_read(_lock, __old) \ |
187 | | do { \ |
188 | | lock_get((_lock)->lock); \ |
189 | | (_lock)->w_flag = __old; \ |
190 | | lock_release((_lock)->lock); \ |
191 | | } while (0) |
192 | | |
193 | | #endif |