/src/nspr/pr/src/pthreads/ptsynch.c
Line | Count | Source (jump to first uncovered line) |
1 | | /* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ |
2 | | /* This Source Code Form is subject to the terms of the Mozilla Public |
3 | | * License, v. 2.0. If a copy of the MPL was not distributed with this |
4 | | * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ |
5 | | |
6 | | /* |
7 | | ** File: ptsynch.c |
8 | | ** Descritpion: Implemenation for thread synchronization using pthreads |
9 | | ** Exports: prlock.h, prcvar.h, prmon.h, prcmon.h |
10 | | */ |
11 | | |
12 | | #if defined(_PR_PTHREADS) |
13 | | |
14 | | # include "primpl.h" |
15 | | # include "obsolete/prsem.h" |
16 | | |
17 | | # include <string.h> |
18 | | # include <pthread.h> |
19 | | # include <sys/time.h> |
20 | | |
21 | | static pthread_mutexattr_t _pt_mattr; |
22 | | static pthread_condattr_t _pt_cvar_attr; |
23 | | |
24 | | # if defined(DEBUG) |
25 | | extern PTDebug pt_debug; /* this is shared between several modules */ |
26 | | # endif /* defined(DEBUG) */ |
27 | | |
28 | | # if defined(FREEBSD) |
29 | | /* |
30 | | * On older versions of FreeBSD, pthread_mutex_trylock returns EDEADLK. |
31 | | * Newer versions return EBUSY. We still need to support both. |
32 | | */ |
33 | | static int pt_pthread_mutex_is_locked(pthread_mutex_t* m) { |
34 | | int rv = pthread_mutex_trylock(m); |
35 | | return (EBUSY == rv || EDEADLK == rv); |
36 | | } |
37 | | # endif |
38 | | |
39 | | /**************************************************************/ |
40 | | /**************************************************************/ |
41 | | /*****************************LOCKS****************************/ |
42 | | /**************************************************************/ |
43 | | /**************************************************************/ |
44 | | |
45 | 1 | void _PR_InitLocks(void) { |
46 | 1 | int rv; |
47 | 1 | rv = _PT_PTHREAD_MUTEXATTR_INIT(&_pt_mattr); |
48 | 1 | PR_ASSERT(0 == rv); |
49 | | |
50 | 1 | # if (defined(LINUX) && \ |
51 | 1 | (__GLIBC__ > 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ >= 2))) || \ |
52 | 1 | (defined(FREEBSD) && __FreeBSD_version > 700055) |
53 | 1 | rv = pthread_mutexattr_settype(&_pt_mattr, PTHREAD_MUTEX_ADAPTIVE_NP); |
54 | 1 | PR_ASSERT(0 == rv); |
55 | 1 | # endif |
56 | | |
57 | 1 | rv = _PT_PTHREAD_CONDATTR_INIT(&_pt_cvar_attr); |
58 | 1 | PR_ASSERT(0 == rv); |
59 | 1 | } |
60 | | |
61 | 9 | static void pt_PostNotifies(PRLock* lock, PRBool unlock) { |
62 | 9 | PRIntn index, rv; |
63 | 9 | _PT_Notified post; |
64 | 9 | _PT_Notified *notified, *prev = NULL; |
65 | | /* |
66 | | * Time to actually notify any conditions that were affected |
67 | | * while the lock was held. Get a copy of the list that's in |
68 | | * the lock structure and then zero the original. If it's |
69 | | * linked to other such structures, we own that storage. |
70 | | */ |
71 | 9 | post = lock->notified; /* a safe copy; we own the lock */ |
72 | | |
73 | 9 | # if defined(DEBUG) |
74 | 9 | memset(&lock->notified, 0, sizeof(_PT_Notified)); /* reset */ |
75 | | # else |
76 | | lock->notified.length = 0; /* these are really sufficient */ |
77 | | lock->notified.link = NULL; |
78 | | # endif |
79 | | |
80 | | /* should (may) we release lock before notifying? */ |
81 | 9 | if (unlock) { |
82 | 9 | rv = pthread_mutex_unlock(&lock->mutex); |
83 | 9 | PR_ASSERT(0 == rv); |
84 | 9 | } |
85 | | |
86 | 9 | notified = &post; /* this is where we start */ |
87 | 9 | do { |
88 | 18 | for (index = 0; index < notified->length; ++index) { |
89 | 9 | PRCondVar* cv = notified->cv[index].cv; |
90 | 9 | PR_ASSERT(NULL != cv); |
91 | 9 | PR_ASSERT(0 != notified->cv[index].times); |
92 | 9 | if (-1 == notified->cv[index].times) { |
93 | 9 | rv = pthread_cond_broadcast(&cv->cv); |
94 | 9 | PR_ASSERT(0 == rv); |
95 | 9 | } else { |
96 | 0 | while (notified->cv[index].times-- > 0) { |
97 | 0 | rv = pthread_cond_signal(&cv->cv); |
98 | 0 | PR_ASSERT(0 == rv); |
99 | 0 | } |
100 | 0 | } |
101 | 9 | # if defined(DEBUG) |
102 | 9 | pt_debug.cvars_notified += 1; |
103 | 9 | if (0 > PR_ATOMIC_DECREMENT(&cv->notify_pending)) { |
104 | 0 | pt_debug.delayed_cv_deletes += 1; |
105 | 0 | PR_DestroyCondVar(cv); |
106 | 0 | } |
107 | | # else /* defined(DEBUG) */ |
108 | | if (0 > PR_ATOMIC_DECREMENT(&cv->notify_pending)) { |
109 | | PR_DestroyCondVar(cv); |
110 | | } |
111 | | # endif /* defined(DEBUG) */ |
112 | 9 | } |
113 | 9 | prev = notified; |
114 | 9 | notified = notified->link; |
115 | 9 | if (&post != prev) { |
116 | 0 | PR_DELETE(prev); |
117 | 0 | } |
118 | 9 | } while (NULL != notified); |
119 | 9 | } /* pt_PostNotifies */ |
120 | | |
121 | 24.2k | PR_IMPLEMENT(PRLock*) PR_NewLock(void) { |
122 | 24.2k | PRIntn rv; |
123 | 24.2k | PRLock* lock; |
124 | | |
125 | 24.2k | if (!_pr_initialized) { |
126 | 0 | _PR_ImplicitInitialization(); |
127 | 0 | } |
128 | | |
129 | 24.2k | lock = PR_NEWZAP(PRLock); |
130 | 24.2k | if (lock != NULL) { |
131 | 24.2k | rv = _PT_PTHREAD_MUTEX_INIT(lock->mutex, _pt_mattr); |
132 | 24.2k | PR_ASSERT(0 == rv); |
133 | 24.2k | } |
134 | 24.2k | # if defined(DEBUG) |
135 | 24.2k | pt_debug.locks_created += 1; |
136 | 24.2k | # endif |
137 | 24.2k | return lock; |
138 | 24.2k | } /* PR_NewLock */ |
139 | | |
140 | 24.2k | PR_IMPLEMENT(void) PR_DestroyLock(PRLock* lock) { |
141 | 24.2k | PRIntn rv; |
142 | 24.2k | PR_ASSERT(NULL != lock); |
143 | 24.2k | PR_ASSERT(PR_FALSE == lock->locked); |
144 | 24.2k | PR_ASSERT(0 == lock->notified.length); |
145 | 24.2k | PR_ASSERT(NULL == lock->notified.link); |
146 | 24.2k | rv = pthread_mutex_destroy(&lock->mutex); |
147 | 24.2k | PR_ASSERT(0 == rv); |
148 | 24.2k | # if defined(DEBUG) |
149 | 24.2k | memset(lock, 0xaf, sizeof(PRLock)); |
150 | 24.2k | pt_debug.locks_destroyed += 1; |
151 | 24.2k | # endif |
152 | 24.2k | PR_Free(lock); |
153 | 24.2k | } /* PR_DestroyLock */ |
154 | | |
155 | 30.8M | PR_IMPLEMENT(void) PR_Lock(PRLock* lock) { |
156 | | /* Nb: PR_Lock must not call PR_GetCurrentThread to access the |id| or |
157 | | * |tid| field of the current thread's PRThread structure because |
158 | | * _pt_root calls PR_Lock before setting thred->id and thred->tid. */ |
159 | 30.8M | PRIntn rv; |
160 | 30.8M | PR_ASSERT(lock != NULL); |
161 | 30.8M | rv = pthread_mutex_lock(&lock->mutex); |
162 | 30.8M | PR_ASSERT(0 == rv); |
163 | 30.8M | PR_ASSERT(0 == lock->notified.length); |
164 | 30.8M | PR_ASSERT(NULL == lock->notified.link); |
165 | 30.8M | PR_ASSERT(PR_FALSE == lock->locked); |
166 | | /* Nb: the order of the next two statements is not critical to |
167 | | * the correctness of PR_AssertCurrentThreadOwnsLock(), but |
168 | | * this particular order makes the assertion more likely to |
169 | | * catch errors. */ |
170 | 30.8M | lock->owner = pthread_self(); |
171 | 30.8M | lock->locked = PR_TRUE; |
172 | 30.8M | # if defined(DEBUG) |
173 | 30.8M | pt_debug.locks_acquired += 1; |
174 | 30.8M | # endif |
175 | 30.8M | } /* PR_Lock */ |
176 | | |
177 | 30.8M | PR_IMPLEMENT(PRStatus) PR_Unlock(PRLock* lock) { |
178 | 30.8M | pthread_t self = pthread_self(); |
179 | 30.8M | PRIntn rv; |
180 | | |
181 | 30.8M | PR_ASSERT(lock != NULL); |
182 | 30.8M | PR_ASSERT(_PT_PTHREAD_MUTEX_IS_LOCKED(lock->mutex)); |
183 | 30.8M | PR_ASSERT(PR_TRUE == lock->locked); |
184 | 30.8M | PR_ASSERT(pthread_equal(lock->owner, self)); |
185 | | |
186 | 30.8M | if (!lock->locked || !pthread_equal(lock->owner, self)) { |
187 | 0 | return PR_FAILURE; |
188 | 0 | } |
189 | | |
190 | 30.8M | lock->locked = PR_FALSE; |
191 | 30.8M | if (0 == lock->notified.length) /* shortcut */ |
192 | 30.8M | { |
193 | 30.8M | rv = pthread_mutex_unlock(&lock->mutex); |
194 | 30.8M | PR_ASSERT(0 == rv); |
195 | 30.8M | } else { |
196 | 9 | pt_PostNotifies(lock, PR_TRUE); |
197 | 9 | } |
198 | | |
199 | 30.8M | # if defined(DEBUG) |
200 | 30.8M | pt_debug.locks_released += 1; |
201 | 30.8M | # endif |
202 | 30.8M | return PR_SUCCESS; |
203 | 30.8M | } /* PR_Unlock */ |
204 | | |
205 | 0 | PR_IMPLEMENT(void) PR_AssertCurrentThreadOwnsLock(PRLock* lock) { |
206 | | /* Nb: the order of the |locked| and |owner==me| checks is not critical |
207 | | * to the correctness of PR_AssertCurrentThreadOwnsLock(), but |
208 | | * this particular order makes the assertion more likely to |
209 | | * catch errors. */ |
210 | 0 | PR_ASSERT(lock->locked && pthread_equal(lock->owner, pthread_self())); |
211 | 0 | } |
212 | | |
213 | | /**************************************************************/ |
214 | | /**************************************************************/ |
215 | | /***************************CONDITIONS*************************/ |
216 | | /**************************************************************/ |
217 | | /**************************************************************/ |
218 | | |
219 | | /* |
220 | | * This code is used to compute the absolute time for the wakeup. |
221 | | * It's moderately ugly, so it's defined here and called in a |
222 | | * couple of places. |
223 | | */ |
224 | 0 | # define PT_NANOPERMICRO 1000UL |
225 | 0 | # define PT_BILLION 1000000000UL |
226 | | |
227 | | static PRIntn pt_TimedWait(pthread_cond_t* cv, pthread_mutex_t* ml, |
228 | 0 | PRIntervalTime timeout) { |
229 | 0 | int rv; |
230 | 0 | struct timeval now; |
231 | 0 | struct timespec tmo; |
232 | 0 | PRUint32 ticks = PR_TicksPerSecond(); |
233 | |
|
234 | 0 | tmo.tv_sec = (PRInt32)(timeout / ticks); |
235 | 0 | tmo.tv_nsec = (PRInt32)(timeout - (tmo.tv_sec * ticks)); |
236 | 0 | tmo.tv_nsec = |
237 | 0 | (PRInt32)PR_IntervalToMicroseconds(PT_NANOPERMICRO * tmo.tv_nsec); |
238 | | |
239 | | /* pthreads wants this in absolute time, off we go ... */ |
240 | 0 | (void)GETTIMEOFDAY(&now); |
241 | | /* that one's usecs, this one's nsecs - grrrr! */ |
242 | 0 | tmo.tv_sec += now.tv_sec; |
243 | 0 | tmo.tv_nsec += (PT_NANOPERMICRO * now.tv_usec); |
244 | 0 | tmo.tv_sec += tmo.tv_nsec / PT_BILLION; |
245 | 0 | tmo.tv_nsec %= PT_BILLION; |
246 | |
|
247 | 0 | rv = pthread_cond_timedwait(cv, ml, &tmo); |
248 | | |
249 | | /* NSPR doesn't report timeouts */ |
250 | 0 | return (rv == ETIMEDOUT) ? 0 : rv; |
251 | 0 | } /* pt_TimedWait */ |
252 | | |
253 | | /* |
254 | | * Notifies just get posted to the protecting mutex. The |
255 | | * actual notification is done when the lock is released so that |
256 | | * MP systems don't contend for a lock that they can't have. |
257 | | */ |
258 | 9 | static void pt_PostNotifyToCvar(PRCondVar* cvar, PRBool broadcast) { |
259 | 9 | PRIntn index = 0; |
260 | 9 | _PT_Notified* notified = &cvar->lock->notified; |
261 | | |
262 | 9 | PR_ASSERT(PR_TRUE == cvar->lock->locked); |
263 | 9 | PR_ASSERT(pthread_equal(cvar->lock->owner, pthread_self())); |
264 | 9 | PR_ASSERT(_PT_PTHREAD_MUTEX_IS_LOCKED(cvar->lock->mutex)); |
265 | | |
266 | 9 | while (1) { |
267 | 9 | for (index = 0; index < notified->length; ++index) { |
268 | 0 | if (notified->cv[index].cv == cvar) { |
269 | 0 | if (broadcast) { |
270 | 0 | notified->cv[index].times = -1; |
271 | 0 | } else if (-1 != notified->cv[index].times) { |
272 | 0 | notified->cv[index].times += 1; |
273 | 0 | } |
274 | 0 | return; /* we're finished */ |
275 | 0 | } |
276 | 0 | } |
277 | | /* if not full, enter new CV in this array */ |
278 | 9 | if (notified->length < PT_CV_NOTIFIED_LENGTH) { |
279 | 9 | break; |
280 | 9 | } |
281 | | |
282 | | /* if there's no link, create an empty array and link it */ |
283 | 0 | if (NULL == notified->link) { |
284 | 0 | notified->link = PR_NEWZAP(_PT_Notified); |
285 | 0 | } |
286 | 0 | notified = notified->link; |
287 | 0 | } |
288 | | |
289 | | /* A brand new entry in the array */ |
290 | 9 | (void)PR_ATOMIC_INCREMENT(&cvar->notify_pending); |
291 | 9 | notified->cv[index].times = (broadcast) ? -1 : 1; |
292 | 9 | notified->cv[index].cv = cvar; |
293 | 9 | notified->length += 1; |
294 | 9 | } /* pt_PostNotifyToCvar */ |
295 | | |
296 | 13 | PR_IMPLEMENT(PRCondVar*) PR_NewCondVar(PRLock* lock) { |
297 | 13 | PRCondVar* cv = PR_NEW(PRCondVar); |
298 | 13 | PR_ASSERT(lock != NULL); |
299 | 13 | if (cv != NULL) { |
300 | 13 | int rv = _PT_PTHREAD_COND_INIT(cv->cv, _pt_cvar_attr); |
301 | 13 | PR_ASSERT(0 == rv); |
302 | 13 | if (0 == rv) { |
303 | 13 | cv->lock = lock; |
304 | 13 | cv->notify_pending = 0; |
305 | 13 | # if defined(DEBUG) |
306 | 13 | pt_debug.cvars_created += 1; |
307 | 13 | # endif |
308 | 13 | } else { |
309 | 0 | PR_DELETE(cv); |
310 | 0 | cv = NULL; |
311 | 0 | } |
312 | 13 | } |
313 | 13 | return cv; |
314 | 13 | } /* PR_NewCondVar */ |
315 | | |
316 | 9 | PR_IMPLEMENT(void) PR_DestroyCondVar(PRCondVar* cvar) { |
317 | 9 | if (0 > PR_ATOMIC_DECREMENT(&cvar->notify_pending)) { |
318 | 9 | PRIntn rv = pthread_cond_destroy(&cvar->cv); |
319 | 9 | # if defined(DEBUG) |
320 | 9 | PR_ASSERT(0 == rv); |
321 | 9 | memset(cvar, 0xaf, sizeof(PRCondVar)); |
322 | 9 | pt_debug.cvars_destroyed += 1; |
323 | | # else |
324 | | (void)rv; |
325 | | # endif |
326 | 9 | PR_Free(cvar); |
327 | 9 | } |
328 | 9 | } /* PR_DestroyCondVar */ |
329 | | |
330 | 0 | PR_IMPLEMENT(PRStatus) PR_WaitCondVar(PRCondVar* cvar, PRIntervalTime timeout) { |
331 | 0 | PRIntn rv; |
332 | 0 | PRThread* thred = PR_GetCurrentThread(); |
333 | |
|
334 | 0 | PR_ASSERT(cvar != NULL); |
335 | | /* We'd better be locked */ |
336 | 0 | PR_ASSERT(_PT_PTHREAD_MUTEX_IS_LOCKED(cvar->lock->mutex)); |
337 | 0 | PR_ASSERT(PR_TRUE == cvar->lock->locked); |
338 | | /* and it better be by us */ |
339 | 0 | PR_ASSERT(pthread_equal(cvar->lock->owner, pthread_self())); |
340 | |
|
341 | 0 | if (_PT_THREAD_INTERRUPTED(thred)) { |
342 | 0 | goto aborted; |
343 | 0 | } |
344 | | |
345 | | /* |
346 | | * The thread waiting is used for PR_Interrupt |
347 | | */ |
348 | 0 | thred->waiting = cvar; /* this is where we're waiting */ |
349 | | |
350 | | /* |
351 | | * If we have pending notifies, post them now. |
352 | | * |
353 | | * This is not optimal. We're going to post these notifies |
354 | | * while we're holding the lock. That means on MP systems |
355 | | * that they are going to collide for the lock that we will |
356 | | * hold until we actually wait. |
357 | | */ |
358 | 0 | if (0 != cvar->lock->notified.length) { |
359 | 0 | pt_PostNotifies(cvar->lock, PR_FALSE); |
360 | 0 | } |
361 | | |
362 | | /* |
363 | | * We're surrendering the lock, so clear out the locked field. |
364 | | */ |
365 | 0 | cvar->lock->locked = PR_FALSE; |
366 | |
|
367 | 0 | if (timeout == PR_INTERVAL_NO_TIMEOUT) { |
368 | 0 | rv = pthread_cond_wait(&cvar->cv, &cvar->lock->mutex); |
369 | 0 | } else { |
370 | 0 | rv = pt_TimedWait(&cvar->cv, &cvar->lock->mutex, timeout); |
371 | 0 | } |
372 | | |
373 | | /* We just got the lock back - this better be empty */ |
374 | 0 | PR_ASSERT(PR_FALSE == cvar->lock->locked); |
375 | 0 | cvar->lock->locked = PR_TRUE; |
376 | 0 | cvar->lock->owner = pthread_self(); |
377 | |
|
378 | 0 | PR_ASSERT(0 == cvar->lock->notified.length); |
379 | 0 | thred->waiting = NULL; /* and now we're not */ |
380 | 0 | if (_PT_THREAD_INTERRUPTED(thred)) { |
381 | 0 | goto aborted; |
382 | 0 | } |
383 | 0 | if (rv != 0) { |
384 | 0 | _PR_MD_MAP_DEFAULT_ERROR(rv); |
385 | 0 | return PR_FAILURE; |
386 | 0 | } |
387 | 0 | return PR_SUCCESS; |
388 | | |
389 | 0 | aborted: |
390 | 0 | PR_SetError(PR_PENDING_INTERRUPT_ERROR, 0); |
391 | 0 | thred->state &= ~PT_THREAD_ABORTED; |
392 | 0 | return PR_FAILURE; |
393 | 0 | } /* PR_WaitCondVar */ |
394 | | |
395 | 0 | PR_IMPLEMENT(PRStatus) PR_NotifyCondVar(PRCondVar* cvar) { |
396 | 0 | PR_ASSERT(cvar != NULL); |
397 | 0 | pt_PostNotifyToCvar(cvar, PR_FALSE); |
398 | 0 | return PR_SUCCESS; |
399 | 0 | } /* PR_NotifyCondVar */ |
400 | | |
401 | 9 | PR_IMPLEMENT(PRStatus) PR_NotifyAllCondVar(PRCondVar* cvar) { |
402 | 9 | PR_ASSERT(cvar != NULL); |
403 | 9 | pt_PostNotifyToCvar(cvar, PR_TRUE); |
404 | 9 | return PR_SUCCESS; |
405 | 9 | } /* PR_NotifyAllCondVar */ |
406 | | |
407 | | /**************************************************************/ |
408 | | /**************************************************************/ |
409 | | /***************************MONITORS***************************/ |
410 | | /**************************************************************/ |
411 | | /**************************************************************/ |
412 | | |
413 | | /* |
414 | | * Notifies just get posted to the monitor. The actual notification is done |
415 | | * when the monitor is fully exited so that MP systems don't contend for a |
416 | | * monitor that they can't enter. |
417 | | */ |
418 | 0 | static void pt_PostNotifyToMonitor(PRMonitor* mon, PRBool broadcast) { |
419 | 0 | PR_ASSERT(NULL != mon); |
420 | 0 | PR_ASSERT_CURRENT_THREAD_IN_MONITOR(mon); |
421 | | |
422 | | /* mon->notifyTimes is protected by the monitor, so we don't need to |
423 | | * acquire mon->lock. |
424 | | */ |
425 | 0 | if (broadcast) { |
426 | 0 | mon->notifyTimes = -1; |
427 | 0 | } else if (-1 != mon->notifyTimes) { |
428 | 0 | mon->notifyTimes += 1; |
429 | 0 | } |
430 | 0 | } /* pt_PostNotifyToMonitor */ |
431 | | |
432 | 0 | static void pt_PostNotifiesFromMonitor(pthread_cond_t* cv, PRIntn times) { |
433 | 0 | PRIntn rv; |
434 | | |
435 | | /* |
436 | | * Time to actually notify any waits that were affected while the monitor |
437 | | * was entered. |
438 | | */ |
439 | 0 | PR_ASSERT(NULL != cv); |
440 | 0 | PR_ASSERT(0 != times); |
441 | 0 | if (-1 == times) { |
442 | 0 | rv = pthread_cond_broadcast(cv); |
443 | 0 | PR_ASSERT(0 == rv); |
444 | 0 | } else { |
445 | 0 | while (times-- > 0) { |
446 | 0 | rv = pthread_cond_signal(cv); |
447 | 0 | PR_ASSERT(0 == rv); |
448 | 0 | } |
449 | 0 | } |
450 | 0 | } /* pt_PostNotifiesFromMonitor */ |
451 | | |
452 | 11 | PR_IMPLEMENT(PRMonitor*) PR_NewMonitor(void) { |
453 | 11 | PRMonitor* mon; |
454 | 11 | int rv; |
455 | | |
456 | 11 | if (!_pr_initialized) { |
457 | 0 | _PR_ImplicitInitialization(); |
458 | 0 | } |
459 | | |
460 | 11 | mon = PR_NEWZAP(PRMonitor); |
461 | 11 | if (mon == NULL) { |
462 | 0 | PR_SetError(PR_OUT_OF_MEMORY_ERROR, 0); |
463 | 0 | return NULL; |
464 | 0 | } |
465 | | |
466 | 11 | rv = _PT_PTHREAD_MUTEX_INIT(mon->lock, _pt_mattr); |
467 | 11 | PR_ASSERT(0 == rv); |
468 | 11 | if (0 != rv) { |
469 | 0 | goto error1; |
470 | 0 | } |
471 | | |
472 | 11 | _PT_PTHREAD_INVALIDATE_THR_HANDLE(mon->owner); |
473 | | |
474 | 11 | rv = _PT_PTHREAD_COND_INIT(mon->entryCV, _pt_cvar_attr); |
475 | 11 | PR_ASSERT(0 == rv); |
476 | 11 | if (0 != rv) { |
477 | 0 | goto error2; |
478 | 0 | } |
479 | | |
480 | 11 | rv = _PT_PTHREAD_COND_INIT(mon->waitCV, _pt_cvar_attr); |
481 | 11 | PR_ASSERT(0 == rv); |
482 | 11 | if (0 != rv) { |
483 | 0 | goto error3; |
484 | 0 | } |
485 | | |
486 | 11 | mon->notifyTimes = 0; |
487 | 11 | mon->entryCount = 0; |
488 | 11 | mon->refCount = 1; |
489 | 11 | mon->name = NULL; |
490 | 11 | return mon; |
491 | | |
492 | 0 | error3: |
493 | 0 | pthread_cond_destroy(&mon->entryCV); |
494 | 0 | error2: |
495 | 0 | pthread_mutex_destroy(&mon->lock); |
496 | 0 | error1: |
497 | 0 | PR_Free(mon); |
498 | 0 | _PR_MD_MAP_DEFAULT_ERROR(rv); |
499 | 0 | return NULL; |
500 | 0 | } /* PR_NewMonitor */ |
501 | | |
502 | 1 | PR_IMPLEMENT(PRMonitor*) PR_NewNamedMonitor(const char* name) { |
503 | 1 | PRMonitor* mon = PR_NewMonitor(); |
504 | 1 | if (mon) { |
505 | 1 | mon->name = name; |
506 | 1 | } |
507 | 1 | return mon; |
508 | 1 | } |
509 | | |
510 | 4 | PR_IMPLEMENT(void) PR_DestroyMonitor(PRMonitor* mon) { |
511 | 4 | int rv; |
512 | | |
513 | 4 | PR_ASSERT(mon != NULL); |
514 | 4 | if (PR_ATOMIC_DECREMENT(&mon->refCount) == 0) { |
515 | 1 | rv = pthread_cond_destroy(&mon->waitCV); |
516 | 1 | PR_ASSERT(0 == rv); |
517 | 1 | rv = pthread_cond_destroy(&mon->entryCV); |
518 | 1 | PR_ASSERT(0 == rv); |
519 | 1 | rv = pthread_mutex_destroy(&mon->lock); |
520 | 1 | PR_ASSERT(0 == rv); |
521 | 1 | # if defined(DEBUG) |
522 | 1 | memset(mon, 0xaf, sizeof(PRMonitor)); |
523 | 1 | # endif |
524 | 1 | PR_Free(mon); |
525 | 1 | } |
526 | 4 | } /* PR_DestroyMonitor */ |
527 | | |
528 | | /* The GC uses this; it is quite arguably a bad interface. I'm just |
529 | | * duplicating it for now - XXXMB |
530 | | */ |
531 | 0 | PR_IMPLEMENT(PRIntn) PR_GetMonitorEntryCount(PRMonitor* mon) { |
532 | 0 | pthread_t self = pthread_self(); |
533 | 0 | PRIntn rv; |
534 | 0 | PRIntn count = 0; |
535 | |
|
536 | 0 | rv = pthread_mutex_lock(&mon->lock); |
537 | 0 | PR_ASSERT(0 == rv); |
538 | 0 | if (pthread_equal(mon->owner, self)) { |
539 | 0 | count = mon->entryCount; |
540 | 0 | } |
541 | 0 | rv = pthread_mutex_unlock(&mon->lock); |
542 | 0 | PR_ASSERT(0 == rv); |
543 | 0 | return count; |
544 | 0 | } |
545 | | |
546 | 0 | PR_IMPLEMENT(void) PR_AssertCurrentThreadInMonitor(PRMonitor* mon) { |
547 | 0 | # if defined(DEBUG) || defined(FORCE_PR_ASSERT) |
548 | 0 | PRIntn rv; |
549 | |
|
550 | 0 | rv = pthread_mutex_lock(&mon->lock); |
551 | 0 | PR_ASSERT(0 == rv); |
552 | 0 | PR_ASSERT(mon->entryCount != 0 && pthread_equal(mon->owner, pthread_self())); |
553 | 0 | rv = pthread_mutex_unlock(&mon->lock); |
554 | 0 | PR_ASSERT(0 == rv); |
555 | 0 | # endif |
556 | 0 | } |
557 | | |
558 | 4 | PR_IMPLEMENT(void) PR_EnterMonitor(PRMonitor* mon) { |
559 | 4 | pthread_t self = pthread_self(); |
560 | 4 | PRIntn rv; |
561 | | |
562 | 4 | PR_ASSERT(mon != NULL); |
563 | 4 | rv = pthread_mutex_lock(&mon->lock); |
564 | 4 | PR_ASSERT(0 == rv); |
565 | 4 | if (mon->entryCount != 0) { |
566 | 1 | if (pthread_equal(mon->owner, self)) { |
567 | 1 | goto done; |
568 | 1 | } |
569 | 0 | while (mon->entryCount != 0) { |
570 | 0 | rv = pthread_cond_wait(&mon->entryCV, &mon->lock); |
571 | 0 | PR_ASSERT(0 == rv); |
572 | 0 | } |
573 | 0 | } |
574 | | /* and now I have the monitor */ |
575 | 3 | PR_ASSERT(0 == mon->notifyTimes); |
576 | 3 | PR_ASSERT(_PT_PTHREAD_THR_HANDLE_IS_INVALID(mon->owner)); |
577 | 3 | _PT_PTHREAD_COPY_THR_HANDLE(self, mon->owner); |
578 | | |
579 | 4 | done: |
580 | 4 | mon->entryCount += 1; |
581 | 4 | rv = pthread_mutex_unlock(&mon->lock); |
582 | 4 | PR_ASSERT(0 == rv); |
583 | 4 | } /* PR_EnterMonitor */ |
584 | | |
585 | 4 | PR_IMPLEMENT(PRStatus) PR_ExitMonitor(PRMonitor* mon) { |
586 | 4 | pthread_t self = pthread_self(); |
587 | 4 | PRIntn rv; |
588 | 4 | PRBool notifyEntryWaiter = PR_FALSE; |
589 | 4 | PRIntn notifyTimes = 0; |
590 | | |
591 | 4 | PR_ASSERT(mon != NULL); |
592 | 4 | rv = pthread_mutex_lock(&mon->lock); |
593 | 4 | PR_ASSERT(0 == rv); |
594 | | /* the entries should be > 0 and we'd better be the owner */ |
595 | 4 | PR_ASSERT(mon->entryCount > 0); |
596 | 4 | PR_ASSERT(pthread_equal(mon->owner, self)); |
597 | 4 | if (mon->entryCount == 0 || !pthread_equal(mon->owner, self)) { |
598 | 0 | rv = pthread_mutex_unlock(&mon->lock); |
599 | 0 | PR_ASSERT(0 == rv); |
600 | 0 | return PR_FAILURE; |
601 | 0 | } |
602 | | |
603 | 4 | mon->entryCount -= 1; /* reduce by one */ |
604 | 4 | if (mon->entryCount == 0) { |
605 | | /* and if it transitioned to zero - notify an entry waiter */ |
606 | | /* make the owner unknown */ |
607 | 3 | _PT_PTHREAD_INVALIDATE_THR_HANDLE(mon->owner); |
608 | 3 | notifyEntryWaiter = PR_TRUE; |
609 | 3 | notifyTimes = mon->notifyTimes; |
610 | 3 | mon->notifyTimes = 0; |
611 | | /* We will access the members of 'mon' after unlocking mon->lock. |
612 | | * Add a reference. */ |
613 | 3 | PR_ATOMIC_INCREMENT(&mon->refCount); |
614 | 3 | } |
615 | 4 | rv = pthread_mutex_unlock(&mon->lock); |
616 | 4 | PR_ASSERT(0 == rv); |
617 | 4 | if (notifyEntryWaiter) { |
618 | 3 | if (notifyTimes) { |
619 | 0 | pt_PostNotifiesFromMonitor(&mon->waitCV, notifyTimes); |
620 | 0 | } |
621 | 3 | rv = pthread_cond_signal(&mon->entryCV); |
622 | 3 | PR_ASSERT(0 == rv); |
623 | | /* We are done accessing the members of 'mon'. Release the |
624 | | * reference. */ |
625 | 3 | PR_DestroyMonitor(mon); |
626 | 3 | } |
627 | 4 | return PR_SUCCESS; |
628 | 4 | } /* PR_ExitMonitor */ |
629 | | |
630 | 0 | PR_IMPLEMENT(PRStatus) PR_Wait(PRMonitor* mon, PRIntervalTime timeout) { |
631 | 0 | PRStatus rv; |
632 | 0 | PRUint32 saved_entries; |
633 | 0 | pthread_t saved_owner; |
634 | |
|
635 | 0 | PR_ASSERT(mon != NULL); |
636 | 0 | rv = pthread_mutex_lock(&mon->lock); |
637 | 0 | PR_ASSERT(0 == rv); |
638 | | /* the entries better be positive */ |
639 | 0 | PR_ASSERT(mon->entryCount > 0); |
640 | | /* and it better be owned by us */ |
641 | 0 | PR_ASSERT(pthread_equal(mon->owner, pthread_self())); |
642 | | |
643 | | /* tuck these away 'till later */ |
644 | 0 | saved_entries = mon->entryCount; |
645 | 0 | mon->entryCount = 0; |
646 | 0 | _PT_PTHREAD_COPY_THR_HANDLE(mon->owner, saved_owner); |
647 | 0 | _PT_PTHREAD_INVALIDATE_THR_HANDLE(mon->owner); |
648 | | /* |
649 | | * If we have pending notifies, post them now. |
650 | | * |
651 | | * This is not optimal. We're going to post these notifies |
652 | | * while we're holding the lock. That means on MP systems |
653 | | * that they are going to collide for the lock that we will |
654 | | * hold until we actually wait. |
655 | | */ |
656 | 0 | if (0 != mon->notifyTimes) { |
657 | 0 | pt_PostNotifiesFromMonitor(&mon->waitCV, mon->notifyTimes); |
658 | 0 | mon->notifyTimes = 0; |
659 | 0 | } |
660 | 0 | rv = pthread_cond_signal(&mon->entryCV); |
661 | 0 | PR_ASSERT(0 == rv); |
662 | |
|
663 | 0 | if (timeout == PR_INTERVAL_NO_TIMEOUT) { |
664 | 0 | rv = pthread_cond_wait(&mon->waitCV, &mon->lock); |
665 | 0 | } else { |
666 | 0 | rv = pt_TimedWait(&mon->waitCV, &mon->lock, timeout); |
667 | 0 | } |
668 | 0 | PR_ASSERT(0 == rv); |
669 | |
|
670 | 0 | while (mon->entryCount != 0) { |
671 | 0 | rv = pthread_cond_wait(&mon->entryCV, &mon->lock); |
672 | 0 | PR_ASSERT(0 == rv); |
673 | 0 | } |
674 | 0 | PR_ASSERT(0 == mon->notifyTimes); |
675 | | /* reinstate the interesting information */ |
676 | 0 | mon->entryCount = saved_entries; |
677 | 0 | _PT_PTHREAD_COPY_THR_HANDLE(saved_owner, mon->owner); |
678 | |
|
679 | 0 | rv = pthread_mutex_unlock(&mon->lock); |
680 | 0 | PR_ASSERT(0 == rv); |
681 | 0 | return rv; |
682 | 0 | } /* PR_Wait */ |
683 | | |
684 | 0 | PR_IMPLEMENT(PRStatus) PR_Notify(PRMonitor* mon) { |
685 | 0 | pt_PostNotifyToMonitor(mon, PR_FALSE); |
686 | 0 | return PR_SUCCESS; |
687 | 0 | } /* PR_Notify */ |
688 | | |
689 | 0 | PR_IMPLEMENT(PRStatus) PR_NotifyAll(PRMonitor* mon) { |
690 | 0 | pt_PostNotifyToMonitor(mon, PR_TRUE); |
691 | 0 | return PR_SUCCESS; |
692 | 0 | } /* PR_NotifyAll */ |
693 | | |
694 | | /**************************************************************/ |
695 | | /**************************************************************/ |
696 | | /**************************SEMAPHORES**************************/ |
697 | | /**************************************************************/ |
698 | | /**************************************************************/ |
699 | 0 | PR_IMPLEMENT(void) PR_PostSem(PRSemaphore* semaphore) { |
700 | 0 | static PRBool unwarned = PR_TRUE; |
701 | 0 | if (unwarned) |
702 | 0 | unwarned = _PR_Obsolete("PR_PostSem", "locks & condition variables"); |
703 | 0 | PR_Lock(semaphore->cvar->lock); |
704 | 0 | PR_NotifyCondVar(semaphore->cvar); |
705 | 0 | semaphore->count += 1; |
706 | 0 | PR_Unlock(semaphore->cvar->lock); |
707 | 0 | } /* PR_PostSem */ |
708 | | |
709 | 0 | PR_IMPLEMENT(PRStatus) PR_WaitSem(PRSemaphore* semaphore) { |
710 | 0 | PRStatus status = PR_SUCCESS; |
711 | 0 | static PRBool unwarned = PR_TRUE; |
712 | 0 | if (unwarned) |
713 | 0 | unwarned = _PR_Obsolete("PR_WaitSem", "locks & condition variables"); |
714 | 0 | PR_Lock(semaphore->cvar->lock); |
715 | 0 | while ((semaphore->count == 0) && (PR_SUCCESS == status)) { |
716 | 0 | status = PR_WaitCondVar(semaphore->cvar, PR_INTERVAL_NO_TIMEOUT); |
717 | 0 | } |
718 | 0 | if (PR_SUCCESS == status) { |
719 | 0 | semaphore->count -= 1; |
720 | 0 | } |
721 | 0 | PR_Unlock(semaphore->cvar->lock); |
722 | 0 | return status; |
723 | 0 | } /* PR_WaitSem */ |
724 | | |
725 | 0 | PR_IMPLEMENT(void) PR_DestroySem(PRSemaphore* semaphore) { |
726 | 0 | static PRBool unwarned = PR_TRUE; |
727 | 0 | if (unwarned) |
728 | 0 | unwarned = _PR_Obsolete("PR_DestroySem", "locks & condition variables"); |
729 | 0 | PR_DestroyLock(semaphore->cvar->lock); |
730 | 0 | PR_DestroyCondVar(semaphore->cvar); |
731 | 0 | PR_Free(semaphore); |
732 | 0 | } /* PR_DestroySem */ |
733 | | |
734 | 0 | PR_IMPLEMENT(PRSemaphore*) PR_NewSem(PRUintn value) { |
735 | 0 | PRSemaphore* semaphore; |
736 | 0 | static PRBool unwarned = PR_TRUE; |
737 | 0 | if (!_pr_initialized) { |
738 | 0 | _PR_ImplicitInitialization(); |
739 | 0 | } |
740 | |
|
741 | 0 | if (unwarned) |
742 | 0 | unwarned = _PR_Obsolete("PR_NewSem", "locks & condition variables"); |
743 | |
|
744 | 0 | semaphore = PR_NEWZAP(PRSemaphore); |
745 | 0 | if (NULL != semaphore) { |
746 | 0 | PRLock* lock = PR_NewLock(); |
747 | 0 | if (NULL != lock) { |
748 | 0 | semaphore->cvar = PR_NewCondVar(lock); |
749 | 0 | if (NULL != semaphore->cvar) { |
750 | 0 | semaphore->count = value; |
751 | 0 | return semaphore; |
752 | 0 | } |
753 | 0 | PR_DestroyLock(lock); |
754 | 0 | } |
755 | 0 | PR_Free(semaphore); |
756 | 0 | } |
757 | 0 | return NULL; |
758 | 0 | } |
759 | | |
760 | | /* |
761 | | * Define the interprocess named semaphore functions. |
762 | | * There are three implementations: |
763 | | * 1. POSIX semaphore based; |
764 | | * 2. System V semaphore based; |
765 | | * 3. unsupported (fails with PR_NOT_IMPLEMENTED_ERROR). |
766 | | */ |
767 | | |
768 | | # ifdef _PR_HAVE_POSIX_SEMAPHORES |
769 | | # include <fcntl.h> |
770 | | |
771 | | PR_IMPLEMENT(PRSem*) |
772 | | PR_OpenSemaphore(const char* name, PRIntn flags, PRIntn mode, PRUintn value) { |
773 | | PRSem* sem; |
774 | | char osname[PR_IPC_NAME_SIZE]; |
775 | | |
776 | | if (_PR_MakeNativeIPCName(name, osname, sizeof(osname), _PRIPCSem) == |
777 | | PR_FAILURE) { |
778 | | return NULL; |
779 | | } |
780 | | |
781 | | sem = PR_NEW(PRSem); |
782 | | if (NULL == sem) { |
783 | | PR_SetError(PR_OUT_OF_MEMORY_ERROR, 0); |
784 | | return NULL; |
785 | | } |
786 | | |
787 | | if (flags & PR_SEM_CREATE) { |
788 | | int oflag = O_CREAT; |
789 | | |
790 | | if (flags & PR_SEM_EXCL) { |
791 | | oflag |= O_EXCL; |
792 | | } |
793 | | sem->sem = sem_open(osname, oflag, mode, value); |
794 | | } else { |
795 | | # ifdef HPUX |
796 | | /* Pass 0 as the mode and value arguments to work around a bug. */ |
797 | | sem->sem = sem_open(osname, 0, 0, 0); |
798 | | # else |
799 | | sem->sem = sem_open(osname, 0); |
800 | | # endif |
801 | | } |
802 | | if ((sem_t*)-1 == sem->sem) { |
803 | | _PR_MD_MAP_DEFAULT_ERROR(errno); |
804 | | PR_Free(sem); |
805 | | return NULL; |
806 | | } |
807 | | return sem; |
808 | | } |
809 | | |
810 | | PR_IMPLEMENT(PRStatus) PR_WaitSemaphore(PRSem* sem) { |
811 | | int rv; |
812 | | rv = sem_wait(sem->sem); |
813 | | if (0 != rv) { |
814 | | _PR_MD_MAP_DEFAULT_ERROR(errno); |
815 | | return PR_FAILURE; |
816 | | } |
817 | | return PR_SUCCESS; |
818 | | } |
819 | | |
820 | | PR_IMPLEMENT(PRStatus) PR_PostSemaphore(PRSem* sem) { |
821 | | int rv; |
822 | | rv = sem_post(sem->sem); |
823 | | if (0 != rv) { |
824 | | _PR_MD_MAP_DEFAULT_ERROR(errno); |
825 | | return PR_FAILURE; |
826 | | } |
827 | | return PR_SUCCESS; |
828 | | } |
829 | | |
830 | | PR_IMPLEMENT(PRStatus) PR_CloseSemaphore(PRSem* sem) { |
831 | | int rv; |
832 | | rv = sem_close(sem->sem); |
833 | | if (0 != rv) { |
834 | | _PR_MD_MAP_DEFAULT_ERROR(errno); |
835 | | return PR_FAILURE; |
836 | | } |
837 | | PR_Free(sem); |
838 | | return PR_SUCCESS; |
839 | | } |
840 | | |
841 | | PR_IMPLEMENT(PRStatus) PR_DeleteSemaphore(const char* name) { |
842 | | int rv; |
843 | | char osname[PR_IPC_NAME_SIZE]; |
844 | | |
845 | | if (_PR_MakeNativeIPCName(name, osname, sizeof(osname), _PRIPCSem) == |
846 | | PR_FAILURE) { |
847 | | return PR_FAILURE; |
848 | | } |
849 | | rv = sem_unlink(osname); |
850 | | if (0 != rv) { |
851 | | _PR_MD_MAP_DEFAULT_ERROR(errno); |
852 | | return PR_FAILURE; |
853 | | } |
854 | | return PR_SUCCESS; |
855 | | } |
856 | | |
857 | | # elif defined(_PR_HAVE_SYSV_SEMAPHORES) |
858 | | |
859 | | # include <fcntl.h> |
860 | | # include <sys/sem.h> |
861 | | |
862 | | /* |
863 | | * From the semctl(2) man page in glibc 2.0 |
864 | | */ |
865 | | # if (defined(__GNU_LIBRARY__) && !defined(_SEM_SEMUN_UNDEFINED)) || \ |
866 | | (defined(FREEBSD) && __FreeBSD_version < 1200059) || \ |
867 | | defined(OPENBSD) || defined(DARWIN) |
868 | | /* union semun is defined by including <sys/sem.h> */ |
869 | | # else |
870 | | /* according to X/OPEN we have to define it ourselves */ |
871 | | union semun { |
872 | | int val; |
873 | | struct semid_ds* buf; |
874 | | unsigned short* array; |
875 | | }; |
876 | | # endif |
877 | | |
878 | | /* |
879 | | * 'a' (97) is the final closing price of NSCP stock. |
880 | | */ |
881 | 0 | # define NSPR_IPC_KEY_ID 'a' /* the id argument for ftok() */ |
882 | | |
883 | 0 | # define NSPR_SEM_MODE 0666 |
884 | | |
885 | | PR_IMPLEMENT(PRSem*) |
886 | 0 | PR_OpenSemaphore(const char* name, PRIntn flags, PRIntn mode, PRUintn value) { |
887 | 0 | PRSem* sem; |
888 | 0 | key_t key; |
889 | 0 | union semun arg; |
890 | 0 | struct sembuf sop; |
891 | 0 | struct semid_ds seminfo; |
892 | 0 | # define MAX_TRIES 60 |
893 | 0 | PRIntn i; |
894 | 0 | char osname[PR_IPC_NAME_SIZE]; |
895 | |
|
896 | 0 | if (_PR_MakeNativeIPCName(name, osname, sizeof(osname), _PRIPCSem) == |
897 | 0 | PR_FAILURE) { |
898 | 0 | return NULL; |
899 | 0 | } |
900 | | |
901 | | /* Make sure the file exists before calling ftok. */ |
902 | 0 | if (flags & PR_SEM_CREATE) { |
903 | 0 | int osfd = open(osname, O_RDWR | O_CREAT, mode); |
904 | 0 | if (-1 == osfd) { |
905 | 0 | _PR_MD_MAP_OPEN_ERROR(errno); |
906 | 0 | return NULL; |
907 | 0 | } |
908 | 0 | if (close(osfd) == -1) { |
909 | 0 | _PR_MD_MAP_CLOSE_ERROR(errno); |
910 | 0 | return NULL; |
911 | 0 | } |
912 | 0 | } |
913 | 0 | key = ftok(osname, NSPR_IPC_KEY_ID); |
914 | 0 | if ((key_t)-1 == key) { |
915 | 0 | _PR_MD_MAP_DEFAULT_ERROR(errno); |
916 | 0 | return NULL; |
917 | 0 | } |
918 | | |
919 | 0 | sem = PR_NEW(PRSem); |
920 | 0 | if (NULL == sem) { |
921 | 0 | PR_SetError(PR_OUT_OF_MEMORY_ERROR, 0); |
922 | 0 | return NULL; |
923 | 0 | } |
924 | | |
925 | 0 | if (flags & PR_SEM_CREATE) { |
926 | 0 | sem->semid = semget(key, 1, mode | IPC_CREAT | IPC_EXCL); |
927 | 0 | if (sem->semid >= 0) { |
928 | | /* creator of a semaphore is responsible for initializing it */ |
929 | 0 | arg.val = 0; |
930 | 0 | if (semctl(sem->semid, 0, SETVAL, arg) == -1) { |
931 | 0 | _PR_MD_MAP_DEFAULT_ERROR(errno); |
932 | 0 | PR_Free(sem); |
933 | 0 | return NULL; |
934 | 0 | } |
935 | | /* call semop to set sem_otime to nonzero */ |
936 | 0 | sop.sem_num = 0; |
937 | 0 | sop.sem_op = value; |
938 | 0 | sop.sem_flg = 0; |
939 | 0 | if (semop(sem->semid, &sop, 1) == -1) { |
940 | 0 | _PR_MD_MAP_DEFAULT_ERROR(errno); |
941 | 0 | PR_Free(sem); |
942 | 0 | return NULL; |
943 | 0 | } |
944 | 0 | return sem; |
945 | 0 | } |
946 | | |
947 | 0 | if (errno != EEXIST || flags & PR_SEM_EXCL) { |
948 | 0 | _PR_MD_MAP_DEFAULT_ERROR(errno); |
949 | 0 | PR_Free(sem); |
950 | 0 | return NULL; |
951 | 0 | } |
952 | 0 | } |
953 | | |
954 | 0 | sem->semid = semget(key, 1, NSPR_SEM_MODE); |
955 | 0 | if (sem->semid == -1) { |
956 | 0 | _PR_MD_MAP_DEFAULT_ERROR(errno); |
957 | 0 | PR_Free(sem); |
958 | 0 | return NULL; |
959 | 0 | } |
960 | 0 | for (i = 0; i < MAX_TRIES; i++) { |
961 | 0 | arg.buf = &seminfo; |
962 | 0 | semctl(sem->semid, 0, IPC_STAT, arg); |
963 | 0 | if (seminfo.sem_otime != 0) { |
964 | 0 | break; |
965 | 0 | } |
966 | 0 | sleep(1); |
967 | 0 | } |
968 | 0 | if (i == MAX_TRIES) { |
969 | 0 | PR_SetError(PR_IO_TIMEOUT_ERROR, 0); |
970 | 0 | PR_Free(sem); |
971 | 0 | return NULL; |
972 | 0 | } |
973 | 0 | return sem; |
974 | 0 | } |
975 | | |
976 | 0 | PR_IMPLEMENT(PRStatus) PR_WaitSemaphore(PRSem* sem) { |
977 | 0 | struct sembuf sop; |
978 | |
|
979 | 0 | sop.sem_num = 0; |
980 | 0 | sop.sem_op = -1; |
981 | 0 | sop.sem_flg = 0; |
982 | 0 | if (semop(sem->semid, &sop, 1) == -1) { |
983 | 0 | _PR_MD_MAP_DEFAULT_ERROR(errno); |
984 | 0 | return PR_FAILURE; |
985 | 0 | } |
986 | 0 | return PR_SUCCESS; |
987 | 0 | } |
988 | | |
989 | 0 | PR_IMPLEMENT(PRStatus) PR_PostSemaphore(PRSem* sem) { |
990 | 0 | struct sembuf sop; |
991 | |
|
992 | 0 | sop.sem_num = 0; |
993 | 0 | sop.sem_op = 1; |
994 | 0 | sop.sem_flg = 0; |
995 | 0 | if (semop(sem->semid, &sop, 1) == -1) { |
996 | 0 | _PR_MD_MAP_DEFAULT_ERROR(errno); |
997 | 0 | return PR_FAILURE; |
998 | 0 | } |
999 | 0 | return PR_SUCCESS; |
1000 | 0 | } |
1001 | | |
1002 | 0 | PR_IMPLEMENT(PRStatus) PR_CloseSemaphore(PRSem* sem) { |
1003 | 0 | PR_Free(sem); |
1004 | 0 | return PR_SUCCESS; |
1005 | 0 | } |
1006 | | |
1007 | 0 | PR_IMPLEMENT(PRStatus) PR_DeleteSemaphore(const char* name) { |
1008 | 0 | key_t key; |
1009 | 0 | int semid; |
1010 | | /* On some systems (e.g., glibc 2.0) semctl requires a fourth argument */ |
1011 | 0 | union semun unused; |
1012 | 0 | char osname[PR_IPC_NAME_SIZE]; |
1013 | |
|
1014 | 0 | if (_PR_MakeNativeIPCName(name, osname, sizeof(osname), _PRIPCSem) == |
1015 | 0 | PR_FAILURE) { |
1016 | 0 | return PR_FAILURE; |
1017 | 0 | } |
1018 | 0 | key = ftok(osname, NSPR_IPC_KEY_ID); |
1019 | 0 | if ((key_t)-1 == key) { |
1020 | 0 | _PR_MD_MAP_DEFAULT_ERROR(errno); |
1021 | 0 | return PR_FAILURE; |
1022 | 0 | } |
1023 | 0 | if (unlink(osname) == -1) { |
1024 | 0 | _PR_MD_MAP_UNLINK_ERROR(errno); |
1025 | 0 | return PR_FAILURE; |
1026 | 0 | } |
1027 | 0 | semid = semget(key, 1, NSPR_SEM_MODE); |
1028 | 0 | if (-1 == semid) { |
1029 | 0 | _PR_MD_MAP_DEFAULT_ERROR(errno); |
1030 | 0 | return PR_FAILURE; |
1031 | 0 | } |
1032 | 0 | unused.val = 0; |
1033 | 0 | if (semctl(semid, 0, IPC_RMID, unused) == -1) { |
1034 | 0 | _PR_MD_MAP_DEFAULT_ERROR(errno); |
1035 | 0 | return PR_FAILURE; |
1036 | 0 | } |
1037 | 0 | return PR_SUCCESS; |
1038 | 0 | } |
1039 | | |
1040 | | # else /* neither POSIX nor System V semaphores are available */ |
1041 | | |
1042 | | PR_IMPLEMENT(PRSem*) |
1043 | | PR_OpenSemaphore(const char* name, PRIntn flags, PRIntn mode, PRUintn value) { |
1044 | | PR_SetError(PR_NOT_IMPLEMENTED_ERROR, 0); |
1045 | | return NULL; |
1046 | | } |
1047 | | |
1048 | | PR_IMPLEMENT(PRStatus) PR_WaitSemaphore(PRSem* sem) { |
1049 | | PR_SetError(PR_NOT_IMPLEMENTED_ERROR, 0); |
1050 | | return PR_FAILURE; |
1051 | | } |
1052 | | |
1053 | | PR_IMPLEMENT(PRStatus) PR_PostSemaphore(PRSem* sem) { |
1054 | | PR_SetError(PR_NOT_IMPLEMENTED_ERROR, 0); |
1055 | | return PR_FAILURE; |
1056 | | } |
1057 | | |
1058 | | PR_IMPLEMENT(PRStatus) PR_CloseSemaphore(PRSem* sem) { |
1059 | | PR_SetError(PR_NOT_IMPLEMENTED_ERROR, 0); |
1060 | | return PR_FAILURE; |
1061 | | } |
1062 | | |
1063 | | PR_IMPLEMENT(PRStatus) PR_DeleteSemaphore(const char* name) { |
1064 | | PR_SetError(PR_NOT_IMPLEMENTED_ERROR, 0); |
1065 | | return PR_FAILURE; |
1066 | | } |
1067 | | |
1068 | | # endif /* end of interprocess named semaphore functions */ |
1069 | | |
1070 | | /**************************************************************/ |
1071 | | /**************************************************************/ |
1072 | | /******************ROUTINES FOR DCE EMULATION******************/ |
1073 | | /**************************************************************/ |
1074 | | /**************************************************************/ |
1075 | | |
1076 | | # include "prpdce.h" |
1077 | | |
1078 | 0 | PR_IMPLEMENT(PRStatus) PRP_TryLock(PRLock* lock) { |
1079 | 0 | PRIntn rv = pthread_mutex_trylock(&lock->mutex); |
1080 | 0 | if (rv == 0) { |
1081 | 0 | PR_ASSERT(PR_FALSE == lock->locked); |
1082 | 0 | lock->locked = PR_TRUE; |
1083 | 0 | lock->owner = pthread_self(); |
1084 | 0 | } |
1085 | | /* XXX set error code? */ |
1086 | 0 | return (0 == rv) ? PR_SUCCESS : PR_FAILURE; |
1087 | 0 | } /* PRP_TryLock */ |
1088 | | |
1089 | 0 | PR_IMPLEMENT(PRCondVar*) PRP_NewNakedCondVar(void) { |
1090 | 0 | PRCondVar* cv; |
1091 | |
|
1092 | 0 | if (!_pr_initialized) { |
1093 | 0 | _PR_ImplicitInitialization(); |
1094 | 0 | } |
1095 | |
|
1096 | 0 | cv = PR_NEW(PRCondVar); |
1097 | 0 | if (cv != NULL) { |
1098 | 0 | int rv; |
1099 | 0 | rv = _PT_PTHREAD_COND_INIT(cv->cv, _pt_cvar_attr); |
1100 | 0 | PR_ASSERT(0 == rv); |
1101 | 0 | if (0 == rv) { |
1102 | 0 | cv->lock = _PR_NAKED_CV_LOCK; |
1103 | 0 | } else { |
1104 | 0 | PR_DELETE(cv); |
1105 | 0 | cv = NULL; |
1106 | 0 | } |
1107 | 0 | } |
1108 | 0 | return cv; |
1109 | 0 | } /* PRP_NewNakedCondVar */ |
1110 | | |
1111 | 0 | PR_IMPLEMENT(void) PRP_DestroyNakedCondVar(PRCondVar* cvar) { |
1112 | 0 | int rv; |
1113 | 0 | rv = pthread_cond_destroy(&cvar->cv); |
1114 | 0 | PR_ASSERT(0 == rv); |
1115 | 0 | # if defined(DEBUG) |
1116 | 0 | memset(cvar, 0xaf, sizeof(PRCondVar)); |
1117 | 0 | # endif |
1118 | 0 | PR_Free(cvar); |
1119 | 0 | } /* PRP_DestroyNakedCondVar */ |
1120 | | |
1121 | | PR_IMPLEMENT(PRStatus) |
1122 | 0 | PRP_NakedWait(PRCondVar* cvar, PRLock* ml, PRIntervalTime timeout) { |
1123 | 0 | PRIntn rv; |
1124 | 0 | PR_ASSERT(cvar != NULL); |
1125 | | /* XXX do we really want to assert this in a naked wait? */ |
1126 | 0 | PR_ASSERT(_PT_PTHREAD_MUTEX_IS_LOCKED(ml->mutex)); |
1127 | 0 | if (timeout == PR_INTERVAL_NO_TIMEOUT) { |
1128 | 0 | rv = pthread_cond_wait(&cvar->cv, &ml->mutex); |
1129 | 0 | } else { |
1130 | 0 | rv = pt_TimedWait(&cvar->cv, &ml->mutex, timeout); |
1131 | 0 | } |
1132 | 0 | if (rv != 0) { |
1133 | 0 | _PR_MD_MAP_DEFAULT_ERROR(rv); |
1134 | 0 | return PR_FAILURE; |
1135 | 0 | } |
1136 | 0 | return PR_SUCCESS; |
1137 | 0 | } /* PRP_NakedWait */ |
1138 | | |
1139 | 0 | PR_IMPLEMENT(PRStatus) PRP_NakedNotify(PRCondVar* cvar) { |
1140 | 0 | int rv; |
1141 | 0 | PR_ASSERT(cvar != NULL); |
1142 | 0 | rv = pthread_cond_signal(&cvar->cv); |
1143 | 0 | PR_ASSERT(0 == rv); |
1144 | 0 | return PR_SUCCESS; |
1145 | 0 | } /* PRP_NakedNotify */ |
1146 | | |
1147 | 0 | PR_IMPLEMENT(PRStatus) PRP_NakedBroadcast(PRCondVar* cvar) { |
1148 | 0 | int rv; |
1149 | 0 | PR_ASSERT(cvar != NULL); |
1150 | 0 | rv = pthread_cond_broadcast(&cvar->cv); |
1151 | 0 | PR_ASSERT(0 == rv); |
1152 | 0 | return PR_SUCCESS; |
1153 | 0 | } /* PRP_NakedBroadcast */ |
1154 | | |
1155 | | #endif /* defined(_PR_PTHREADS) */ |
1156 | | |
1157 | | /* ptsynch.c */ |