/src/nspr/pr/src/misc/pratom.c
Line | Count | Source (jump to first uncovered line) |
1 | | /* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ |
2 | | /* This Source Code Form is subject to the terms of the Mozilla Public |
3 | | * License, v. 2.0. If a copy of the MPL was not distributed with this |
4 | | * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ |
5 | | |
6 | | /* |
7 | | ** PR Atomic operations |
8 | | */ |
9 | | |
10 | | #include "pratom.h" |
11 | | #include "primpl.h" |
12 | | |
13 | | #include <string.h> |
14 | | |
15 | | /* |
16 | | * The following is a fallback implementation that emulates |
17 | | * atomic operations for platforms without atomic operations. |
18 | | * If a platform has atomic operations, it should define the |
19 | | * macro _PR_HAVE_ATOMIC_OPS, and the following will not be |
20 | | * compiled in. |
21 | | */ |
22 | | |
23 | | #if !defined(_PR_HAVE_ATOMIC_OPS) |
24 | | |
25 | | # if defined(_PR_PTHREADS) |
26 | | /* |
27 | | * PR_AtomicDecrement() is used in NSPR's thread-specific data |
28 | | * destructor. Because thread-specific data destructors may be |
29 | | * invoked after a PR_Cleanup() call, we need an implementation |
30 | | * of the atomic routines that doesn't need NSPR to be initialized. |
31 | | */ |
32 | | |
33 | | /* |
34 | | * We use a set of locks for all the emulated atomic operations. |
35 | | * By hashing on the address of the integer to be locked the |
36 | | * contention between multiple threads should be lessened. |
37 | | * |
38 | | * The number of atomic locks can be set by the environment variable |
39 | | * NSPR_ATOMIC_HASH_LOCKS |
40 | | */ |
41 | | |
42 | | /* |
43 | | * lock counts should be a power of 2 |
44 | | */ |
45 | | # define DEFAULT_ATOMIC_LOCKS \ |
46 | | 16 /* should be in sync with the number of initializers \ |
47 | | below */ |
48 | | # define MAX_ATOMIC_LOCKS (4 * 1024) |
49 | | |
50 | | static pthread_mutex_t static_atomic_locks[DEFAULT_ATOMIC_LOCKS] = { |
51 | | PTHREAD_MUTEX_INITIALIZER, PTHREAD_MUTEX_INITIALIZER, |
52 | | PTHREAD_MUTEX_INITIALIZER, PTHREAD_MUTEX_INITIALIZER, |
53 | | PTHREAD_MUTEX_INITIALIZER, PTHREAD_MUTEX_INITIALIZER, |
54 | | PTHREAD_MUTEX_INITIALIZER, PTHREAD_MUTEX_INITIALIZER, |
55 | | PTHREAD_MUTEX_INITIALIZER, PTHREAD_MUTEX_INITIALIZER, |
56 | | PTHREAD_MUTEX_INITIALIZER, PTHREAD_MUTEX_INITIALIZER, |
57 | | PTHREAD_MUTEX_INITIALIZER, PTHREAD_MUTEX_INITIALIZER, |
58 | | PTHREAD_MUTEX_INITIALIZER, PTHREAD_MUTEX_INITIALIZER}; |
59 | | |
60 | | # ifdef DEBUG |
61 | | static PRInt32 static_hash_lock_counts[DEFAULT_ATOMIC_LOCKS]; |
62 | | static PRInt32* hash_lock_counts = static_hash_lock_counts; |
63 | | # endif |
64 | | |
65 | | static PRUint32 num_atomic_locks = DEFAULT_ATOMIC_LOCKS; |
66 | | static pthread_mutex_t* atomic_locks = static_atomic_locks; |
67 | | static PRUint32 atomic_hash_mask = DEFAULT_ATOMIC_LOCKS - 1; |
68 | | |
69 | | # define _PR_HASH_FOR_LOCK(ptr) \ |
70 | | ((PRUint32)(((PRUptrdiff)(ptr) >> 2) ^ ((PRUptrdiff)(ptr) >> 8)) & \ |
71 | | atomic_hash_mask) |
72 | | |
73 | | void _PR_MD_INIT_ATOMIC() { |
74 | | char* eval; |
75 | | int index; |
76 | | |
77 | | PR_ASSERT(PR_FloorLog2(MAX_ATOMIC_LOCKS) == PR_CeilingLog2(MAX_ATOMIC_LOCKS)); |
78 | | |
79 | | PR_ASSERT(PR_FloorLog2(DEFAULT_ATOMIC_LOCKS) == |
80 | | PR_CeilingLog2(DEFAULT_ATOMIC_LOCKS)); |
81 | | |
82 | | if (((eval = getenv("NSPR_ATOMIC_HASH_LOCKS")) != NULL) && |
83 | | ((num_atomic_locks = atoi(eval)) != DEFAULT_ATOMIC_LOCKS)) { |
84 | | if (num_atomic_locks > MAX_ATOMIC_LOCKS) { |
85 | | num_atomic_locks = MAX_ATOMIC_LOCKS; |
86 | | } else if (num_atomic_locks < 1) { |
87 | | num_atomic_locks = 1; |
88 | | } else { |
89 | | num_atomic_locks = PR_FloorLog2(num_atomic_locks); |
90 | | num_atomic_locks = 1L << num_atomic_locks; |
91 | | } |
92 | | atomic_locks = |
93 | | (pthread_mutex_t*)PR_Malloc(sizeof(pthread_mutex_t) * num_atomic_locks); |
94 | | if (atomic_locks) { |
95 | | for (index = 0; index < num_atomic_locks; index++) { |
96 | | if (pthread_mutex_init(&atomic_locks[index], NULL)) { |
97 | | PR_DELETE(atomic_locks); |
98 | | atomic_locks = NULL; |
99 | | break; |
100 | | } |
101 | | } |
102 | | } |
103 | | # ifdef DEBUG |
104 | | if (atomic_locks) { |
105 | | hash_lock_counts = PR_CALLOC(num_atomic_locks * sizeof(PRInt32)); |
106 | | if (hash_lock_counts == NULL) { |
107 | | PR_DELETE(atomic_locks); |
108 | | atomic_locks = NULL; |
109 | | } |
110 | | } |
111 | | # endif |
112 | | if (atomic_locks == NULL) { |
113 | | /* |
114 | | * Use statically allocated locks |
115 | | */ |
116 | | atomic_locks = static_atomic_locks; |
117 | | num_atomic_locks = DEFAULT_ATOMIC_LOCKS; |
118 | | # ifdef DEBUG |
119 | | hash_lock_counts = static_hash_lock_counts; |
120 | | # endif |
121 | | } |
122 | | atomic_hash_mask = num_atomic_locks - 1; |
123 | | } |
124 | | PR_ASSERT(PR_FloorLog2(num_atomic_locks) == PR_CeilingLog2(num_atomic_locks)); |
125 | | } |
126 | | |
127 | | PRInt32 _PR_MD_ATOMIC_INCREMENT(PRInt32* val) { |
128 | | PRInt32 rv; |
129 | | PRInt32 idx = _PR_HASH_FOR_LOCK(val); |
130 | | |
131 | | pthread_mutex_lock(&atomic_locks[idx]); |
132 | | rv = ++(*val); |
133 | | # ifdef DEBUG |
134 | | hash_lock_counts[idx]++; |
135 | | # endif |
136 | | pthread_mutex_unlock(&atomic_locks[idx]); |
137 | | return rv; |
138 | | } |
139 | | |
140 | | PRInt32 _PR_MD_ATOMIC_ADD(PRInt32* ptr, PRInt32 val) { |
141 | | PRInt32 rv; |
142 | | PRInt32 idx = _PR_HASH_FOR_LOCK(ptr); |
143 | | |
144 | | pthread_mutex_lock(&atomic_locks[idx]); |
145 | | rv = ((*ptr) += val); |
146 | | # ifdef DEBUG |
147 | | hash_lock_counts[idx]++; |
148 | | # endif |
149 | | pthread_mutex_unlock(&atomic_locks[idx]); |
150 | | return rv; |
151 | | } |
152 | | |
153 | | PRInt32 _PR_MD_ATOMIC_DECREMENT(PRInt32* val) { |
154 | | PRInt32 rv; |
155 | | PRInt32 idx = _PR_HASH_FOR_LOCK(val); |
156 | | |
157 | | pthread_mutex_lock(&atomic_locks[idx]); |
158 | | rv = --(*val); |
159 | | # ifdef DEBUG |
160 | | hash_lock_counts[idx]++; |
161 | | # endif |
162 | | pthread_mutex_unlock(&atomic_locks[idx]); |
163 | | return rv; |
164 | | } |
165 | | |
166 | | PRInt32 _PR_MD_ATOMIC_SET(PRInt32* val, PRInt32 newval) { |
167 | | PRInt32 rv; |
168 | | PRInt32 idx = _PR_HASH_FOR_LOCK(val); |
169 | | |
170 | | pthread_mutex_lock(&atomic_locks[idx]); |
171 | | rv = *val; |
172 | | *val = newval; |
173 | | # ifdef DEBUG |
174 | | hash_lock_counts[idx]++; |
175 | | # endif |
176 | | pthread_mutex_unlock(&atomic_locks[idx]); |
177 | | return rv; |
178 | | } |
179 | | # else /* _PR_PTHREADS */ |
180 | | /* |
181 | | * We use a single lock for all the emulated atomic operations. |
182 | | * The lock contention should be acceptable. |
183 | | */ |
184 | | static PRLock* atomic_lock = NULL; |
185 | | void _PR_MD_INIT_ATOMIC(void) { |
186 | | if (atomic_lock == NULL) { |
187 | | atomic_lock = PR_NewLock(); |
188 | | } |
189 | | } |
190 | | |
191 | | PRInt32 _PR_MD_ATOMIC_INCREMENT(PRInt32* val) { |
192 | | PRInt32 rv; |
193 | | |
194 | | if (!_pr_initialized) { |
195 | | _PR_ImplicitInitialization(); |
196 | | } |
197 | | PR_Lock(atomic_lock); |
198 | | rv = ++(*val); |
199 | | PR_Unlock(atomic_lock); |
200 | | return rv; |
201 | | } |
202 | | |
203 | | PRInt32 _PR_MD_ATOMIC_ADD(PRInt32* ptr, PRInt32 val) { |
204 | | PRInt32 rv; |
205 | | |
206 | | if (!_pr_initialized) { |
207 | | _PR_ImplicitInitialization(); |
208 | | } |
209 | | PR_Lock(atomic_lock); |
210 | | rv = ((*ptr) += val); |
211 | | PR_Unlock(atomic_lock); |
212 | | return rv; |
213 | | } |
214 | | |
215 | | PRInt32 _PR_MD_ATOMIC_DECREMENT(PRInt32* val) { |
216 | | PRInt32 rv; |
217 | | |
218 | | if (!_pr_initialized) { |
219 | | _PR_ImplicitInitialization(); |
220 | | } |
221 | | PR_Lock(atomic_lock); |
222 | | rv = --(*val); |
223 | | PR_Unlock(atomic_lock); |
224 | | return rv; |
225 | | } |
226 | | |
227 | | PRInt32 _PR_MD_ATOMIC_SET(PRInt32* val, PRInt32 newval) { |
228 | | PRInt32 rv; |
229 | | |
230 | | if (!_pr_initialized) { |
231 | | _PR_ImplicitInitialization(); |
232 | | } |
233 | | PR_Lock(atomic_lock); |
234 | | rv = *val; |
235 | | *val = newval; |
236 | | PR_Unlock(atomic_lock); |
237 | | return rv; |
238 | | } |
239 | | # endif /* _PR_PTHREADS */ |
240 | | |
241 | | #endif /* !_PR_HAVE_ATOMIC_OPS */ |
242 | | |
243 | 1 | void _PR_InitAtomic(void) { _PR_MD_INIT_ATOMIC(); } |
244 | | |
245 | | PR_IMPLEMENT(PRInt32) |
246 | 0 | PR_AtomicIncrement(PRInt32* val) { return _PR_MD_ATOMIC_INCREMENT(val); } |
247 | | |
248 | | PR_IMPLEMENT(PRInt32) |
249 | 0 | PR_AtomicDecrement(PRInt32* val) { return _PR_MD_ATOMIC_DECREMENT(val); } |
250 | | |
251 | | PR_IMPLEMENT(PRInt32) |
252 | 0 | PR_AtomicSet(PRInt32* val, PRInt32 newval) { |
253 | 0 | return _PR_MD_ATOMIC_SET(val, newval); |
254 | 0 | } |
255 | | |
256 | | PR_IMPLEMENT(PRInt32) |
257 | 0 | PR_AtomicAdd(PRInt32* ptr, PRInt32 val) { return _PR_MD_ATOMIC_ADD(ptr, val); } |
258 | | /* |
259 | | * For platforms, which don't support the CAS (compare-and-swap) instruction |
260 | | * (or an equivalent), the stack operations are implemented by use of PRLock |
261 | | */ |
262 | | |
263 | | PR_IMPLEMENT(PRStack*) |
264 | 0 | PR_CreateStack(const char* stack_name) { |
265 | 0 | PRStack* stack; |
266 | |
|
267 | 0 | if (!_pr_initialized) { |
268 | 0 | _PR_ImplicitInitialization(); |
269 | 0 | } |
270 | |
|
271 | 0 | if ((stack = PR_NEW(PRStack)) == NULL) { |
272 | 0 | return NULL; |
273 | 0 | } |
274 | 0 | if (stack_name) { |
275 | 0 | stack->prstk_name = (char*)PR_Malloc(strlen(stack_name) + 1); |
276 | 0 | if (stack->prstk_name == NULL) { |
277 | 0 | PR_DELETE(stack); |
278 | 0 | return NULL; |
279 | 0 | } |
280 | 0 | strcpy(stack->prstk_name, stack_name); |
281 | 0 | } else { |
282 | 0 | stack->prstk_name = NULL; |
283 | 0 | } |
284 | | |
285 | 0 | #ifndef _PR_HAVE_ATOMIC_CAS |
286 | 0 | stack->prstk_lock = PR_NewLock(); |
287 | 0 | if (stack->prstk_lock == NULL) { |
288 | 0 | PR_Free(stack->prstk_name); |
289 | 0 | PR_DELETE(stack); |
290 | 0 | return NULL; |
291 | 0 | } |
292 | 0 | #endif /* !_PR_HAVE_ATOMIC_CAS */ |
293 | | |
294 | 0 | stack->prstk_head.prstk_elem_next = NULL; |
295 | |
|
296 | 0 | return stack; |
297 | 0 | } |
298 | | |
299 | | PR_IMPLEMENT(PRStatus) |
300 | 0 | PR_DestroyStack(PRStack* stack) { |
301 | 0 | if (stack->prstk_head.prstk_elem_next != NULL) { |
302 | 0 | PR_SetError(PR_INVALID_STATE_ERROR, 0); |
303 | 0 | return PR_FAILURE; |
304 | 0 | } |
305 | | |
306 | 0 | if (stack->prstk_name) { |
307 | 0 | PR_Free(stack->prstk_name); |
308 | 0 | } |
309 | 0 | #ifndef _PR_HAVE_ATOMIC_CAS |
310 | 0 | PR_DestroyLock(stack->prstk_lock); |
311 | 0 | #endif /* !_PR_HAVE_ATOMIC_CAS */ |
312 | 0 | PR_DELETE(stack); |
313 | |
|
314 | 0 | return PR_SUCCESS; |
315 | 0 | } |
316 | | |
317 | | #ifndef _PR_HAVE_ATOMIC_CAS |
318 | | |
319 | | PR_IMPLEMENT(void) |
320 | 0 | PR_StackPush(PRStack* stack, PRStackElem* stack_elem) { |
321 | 0 | PR_Lock(stack->prstk_lock); |
322 | 0 | stack_elem->prstk_elem_next = stack->prstk_head.prstk_elem_next; |
323 | 0 | stack->prstk_head.prstk_elem_next = stack_elem; |
324 | 0 | PR_Unlock(stack->prstk_lock); |
325 | 0 | return; |
326 | 0 | } |
327 | | |
328 | | PR_IMPLEMENT(PRStackElem*) |
329 | 0 | PR_StackPop(PRStack* stack) { |
330 | 0 | PRStackElem* element; |
331 | |
|
332 | 0 | PR_Lock(stack->prstk_lock); |
333 | 0 | element = stack->prstk_head.prstk_elem_next; |
334 | 0 | if (element != NULL) { |
335 | 0 | stack->prstk_head.prstk_elem_next = element->prstk_elem_next; |
336 | 0 | element->prstk_elem_next = NULL; /* debugging aid */ |
337 | 0 | } |
338 | 0 | PR_Unlock(stack->prstk_lock); |
339 | 0 | return element; |
340 | 0 | } |
341 | | #endif /* !_PR_HAVE_ATOMIC_CAS */ |