/src/nspr/pr/src/pthreads/ptthread.c
Line | Count | Source (jump to first uncovered line) |
1 | | /* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ |
2 | | /* This Source Code Form is subject to the terms of the Mozilla Public |
3 | | * License, v. 2.0. If a copy of the MPL was not distributed with this |
4 | | * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ |
5 | | |
6 | | /* |
7 | | ** File: ptthread.c |
8 | | ** Descritpion: Implemenation for threds using pthreds |
9 | | ** Exports: ptthread.h |
10 | | */ |
11 | | |
12 | | #if defined(_PR_PTHREADS) |
13 | | |
14 | | # include "prlog.h" |
15 | | # include "primpl.h" |
16 | | # include "prpdce.h" |
17 | | |
18 | | # include <pthread.h> |
19 | | # include <unistd.h> |
20 | | # include <string.h> |
21 | | # include <signal.h> |
22 | | # include <dlfcn.h> |
23 | | |
24 | | # if defined(OPENBSD) || defined(FREEBSD) || defined(DRAGONFLY) |
25 | | # include <pthread_np.h> |
26 | | # endif |
27 | | |
28 | | # if defined(ANDROID) |
29 | | # include <sys/prctl.h> |
30 | | # endif |
31 | | |
32 | | # ifdef _PR_NICE_PRIORITY_SCHEDULING |
33 | | # undef _POSIX_THREAD_PRIORITY_SCHEDULING |
34 | | # include <sys/resource.h> |
35 | | # ifndef HAVE_GETTID |
36 | | # define gettid() (syscall(SYS_gettid)) |
37 | | # endif |
38 | | # endif |
39 | | |
40 | | /* |
41 | | * Record whether or not we have the privilege to set the scheduling |
42 | | * policy and priority of threads. 0 means that privilege is available. |
43 | | * EPERM means that privilege is not available. |
44 | | */ |
45 | | |
46 | | static PRIntn pt_schedpriv = 0; |
47 | | extern PRLock* _pr_sleeplock; |
48 | | |
49 | | static struct _PT_Bookeeping { |
50 | | PRLock* ml; /* a lock to protect ourselves */ |
51 | | PRCondVar* cv; /* used to signal global things */ |
52 | | PRInt32 system, user; /* a count of the two different types */ |
53 | | PRUintn this_many; /* number of threads allowed for exit */ |
54 | | pthread_key_t key; /* thread private data key */ |
55 | | PRBool keyCreated; /* whether 'key' should be deleted */ |
56 | | PRThread *first, *last; /* list of threads we know about */ |
57 | | # if _POSIX_THREAD_PRIORITY_SCHEDULING > 0 |
58 | | PRInt32 minPrio, maxPrio; /* range of scheduling priorities */ |
59 | | # endif |
60 | | } pt_book = {0}; |
61 | | |
62 | | static void _pt_thread_death(void* arg); |
63 | | static void _pt_thread_death_internal(void* arg, PRBool callDestructors); |
64 | | static void init_pthread_gc_support(void); |
65 | | |
66 | | # if _POSIX_THREAD_PRIORITY_SCHEDULING > 0 |
67 | | static PRIntn pt_PriorityMap(PRThreadPriority pri) { |
68 | | # ifdef NTO |
69 | | /* This priority algorithm causes lots of problems on Neutrino |
70 | | * for now I have just hard coded everything to run at priority 10 |
71 | | * until I can come up with a new algorithm. |
72 | | * Jerry.Kirk@Nexwarecorp.com |
73 | | */ |
74 | | return 10; |
75 | | # else |
76 | | return pt_book.minPrio + |
77 | | pri * (pt_book.maxPrio - pt_book.minPrio) / PR_PRIORITY_LAST; |
78 | | # endif |
79 | | } |
80 | | # elif defined(_PR_NICE_PRIORITY_SCHEDULING) |
81 | | /* |
82 | | * This functions maps higher priorities to lower nice values relative to the |
83 | | * nice value specified in the |nice| parameter. The corresponding relative |
84 | | * adjustments are: |
85 | | * |
86 | | * PR_PRIORITY_LOW +1 |
87 | | * PR_PRIORITY_NORMAL 0 |
88 | | * PR_PRIORITY_HIGH -1 |
89 | | * PR_PRIORITY_URGENT -2 |
90 | | */ |
91 | 0 | static int pt_RelativePriority(int nice, PRThreadPriority pri) { |
92 | 0 | return nice + (1 - pri); |
93 | 0 | } |
94 | | # endif |
95 | | |
96 | | /* |
97 | | ** Initialize a stack for a native pthread thread |
98 | | */ |
99 | 12 | static void _PR_InitializeStack(PRThreadStack* ts) { |
100 | 12 | if (ts && (ts->stackTop == 0)) { |
101 | 12 | ts->allocBase = (char*)&ts; |
102 | 12 | ts->allocSize = ts->stackSize; |
103 | | |
104 | | /* |
105 | | ** Setup stackTop and stackBottom values. |
106 | | */ |
107 | | # ifdef HAVE_STACK_GROWING_UP |
108 | | ts->stackBottom = ts->allocBase + ts->stackSize; |
109 | | ts->stackTop = ts->allocBase; |
110 | | # else |
111 | 12 | ts->stackTop = ts->allocBase; |
112 | 12 | ts->stackBottom = ts->allocBase - ts->stackSize; |
113 | 12 | # endif |
114 | 12 | } |
115 | 12 | } |
116 | | |
117 | 0 | static void* _pt_root(void* arg) { |
118 | 0 | PRIntn rv; |
119 | 0 | PRThread* thred = (PRThread*)arg; |
120 | 0 | PRBool detached = (thred->state & PT_THREAD_DETACHED) ? PR_TRUE : PR_FALSE; |
121 | 0 | pthread_t id = pthread_self(); |
122 | 0 | # ifdef _PR_NICE_PRIORITY_SCHEDULING |
123 | 0 | pid_t tid; |
124 | 0 | # endif |
125 | |
|
126 | 0 | # ifdef _PR_NICE_PRIORITY_SCHEDULING |
127 | | /* |
128 | | * We need to know the kernel thread ID of each thread in order to |
129 | | * set its nice value hence we do it here instead of at creation time. |
130 | | */ |
131 | 0 | tid = gettid(); |
132 | 0 | errno = 0; |
133 | 0 | rv = getpriority(PRIO_PROCESS, 0); |
134 | | |
135 | | /* If we cannot read the main thread's nice value don't try to change the |
136 | | * new thread's nice value. */ |
137 | 0 | if (errno == 0) { |
138 | 0 | setpriority(PRIO_PROCESS, tid, pt_RelativePriority(rv, thred->priority)); |
139 | 0 | } |
140 | 0 | # endif |
141 | | |
142 | | /* Set up the thread stack information */ |
143 | 0 | _PR_InitializeStack(thred->stack); |
144 | | |
145 | | /* |
146 | | * Set within the current thread the pointer to our object. |
147 | | * This object will be deleted when the thread termintates, |
148 | | * whether in a join or detached (see _PR_InitThreads()). |
149 | | */ |
150 | 0 | rv = pthread_setspecific(pt_book.key, thred); |
151 | 0 | PR_ASSERT(0 == rv); |
152 | | |
153 | | /* make the thread visible to the rest of the runtime */ |
154 | 0 | PR_Lock(pt_book.ml); |
155 | | /* |
156 | | * Both the parent thread and this new thread set thred->id. |
157 | | * The new thread must ensure that thred->id is set before |
158 | | * it executes its startFunc. The parent thread must ensure |
159 | | * that thred->id is set before PR_CreateThread() returns. |
160 | | * Both threads set thred->id while holding pt_book.ml and |
161 | | * use thred->idSet to ensure thred->id is written only once. |
162 | | */ |
163 | 0 | if (!thred->idSet) { |
164 | 0 | thred->id = id; |
165 | 0 | thred->idSet = PR_TRUE; |
166 | 0 | } else { |
167 | 0 | PR_ASSERT(pthread_equal(thred->id, id)); |
168 | 0 | } |
169 | |
|
170 | 0 | # ifdef _PR_NICE_PRIORITY_SCHEDULING |
171 | 0 | thred->tid = tid; |
172 | 0 | PR_NotifyAllCondVar(pt_book.cv); |
173 | 0 | # endif |
174 | | |
175 | | /* If this is a GCABLE thread, set its state appropriately */ |
176 | 0 | if (thred->suspend & PT_THREAD_SETGCABLE) { |
177 | 0 | thred->state |= PT_THREAD_GCABLE; |
178 | 0 | } |
179 | 0 | thred->suspend = 0; |
180 | |
|
181 | 0 | thred->prev = pt_book.last; |
182 | 0 | if (pt_book.last) { |
183 | 0 | pt_book.last->next = thred; |
184 | 0 | } else { |
185 | 0 | pt_book.first = thred; |
186 | 0 | } |
187 | 0 | thred->next = NULL; |
188 | 0 | pt_book.last = thred; |
189 | 0 | PR_Unlock(pt_book.ml); |
190 | |
|
191 | 0 | thred->startFunc(thred->arg); /* make visible to the client */ |
192 | | |
193 | | /* unhook the thread from the runtime */ |
194 | 0 | PR_Lock(pt_book.ml); |
195 | | /* |
196 | | * At this moment, PR_CreateThread() may not have set thred->id yet. |
197 | | * It is safe for a detached thread to free thred only after |
198 | | * PR_CreateThread() has accessed thred->id and thred->idSet. |
199 | | */ |
200 | 0 | if (detached) { |
201 | 0 | while (!thred->okToDelete) { |
202 | 0 | PR_WaitCondVar(pt_book.cv, PR_INTERVAL_NO_TIMEOUT); |
203 | 0 | } |
204 | 0 | } |
205 | |
|
206 | 0 | if (thred->state & PT_THREAD_SYSTEM) { |
207 | 0 | pt_book.system -= 1; |
208 | 0 | } else if (--pt_book.user == pt_book.this_many) { |
209 | 0 | PR_NotifyAllCondVar(pt_book.cv); |
210 | 0 | } |
211 | 0 | if (NULL == thred->prev) { |
212 | 0 | pt_book.first = thred->next; |
213 | 0 | } else { |
214 | 0 | thred->prev->next = thred->next; |
215 | 0 | } |
216 | 0 | if (NULL == thred->next) { |
217 | 0 | pt_book.last = thred->prev; |
218 | 0 | } else { |
219 | 0 | thred->next->prev = thred->prev; |
220 | 0 | } |
221 | 0 | PR_Unlock(pt_book.ml); |
222 | | |
223 | | /* |
224 | | * Here we set the pthread's backpointer to the PRThread to NULL. |
225 | | * Otherwise the destructor would get called eagerly as the thread |
226 | | * returns to the pthread runtime. The joining thread would them be |
227 | | * the proud possessor of a dangling reference. However, this is the |
228 | | * last chance to delete the object if the thread is detached, so |
229 | | * just let the destructor do the work. |
230 | | */ |
231 | 0 | if (PR_FALSE == detached) { |
232 | | /* Call TPD destructors on this thread. */ |
233 | 0 | _PR_DestroyThreadPrivate(thred); |
234 | 0 | rv = pthread_setspecific(pt_book.key, NULL); |
235 | 0 | PR_ASSERT(0 == rv); |
236 | 0 | } |
237 | |
|
238 | 0 | return NULL; |
239 | 0 | } /* _pt_root */ |
240 | | |
241 | 0 | static PRThread* pt_AttachThread(void) { |
242 | 0 | PRThread* thred = NULL; |
243 | | |
244 | | /* |
245 | | * NSPR must have been initialized when PR_AttachThread is called. |
246 | | * We cannot have PR_AttachThread call implicit initialization |
247 | | * because if multiple threads call PR_AttachThread simultaneously, |
248 | | * NSPR may be initialized more than once. |
249 | | * We can't call any function that calls PR_GetCurrentThread() |
250 | | * either (e.g., PR_SetError()) as that will result in infinite |
251 | | * recursion. |
252 | | */ |
253 | 0 | if (!_pr_initialized) { |
254 | 0 | return NULL; |
255 | 0 | } |
256 | | |
257 | | /* PR_NEWZAP must not call PR_GetCurrentThread() */ |
258 | 0 | thred = PR_NEWZAP(PRThread); |
259 | 0 | if (NULL != thred) { |
260 | 0 | int rv; |
261 | |
|
262 | 0 | thred->priority = PR_PRIORITY_NORMAL; |
263 | 0 | thred->id = pthread_self(); |
264 | 0 | thred->idSet = PR_TRUE; |
265 | 0 | # ifdef _PR_NICE_PRIORITY_SCHEDULING |
266 | 0 | thred->tid = gettid(); |
267 | 0 | # endif |
268 | 0 | rv = pthread_setspecific(pt_book.key, thred); |
269 | 0 | PR_ASSERT(0 == rv); |
270 | |
|
271 | 0 | thred->state = PT_THREAD_GLOBAL | PT_THREAD_FOREIGN; |
272 | 0 | PR_Lock(pt_book.ml); |
273 | | |
274 | | /* then put it into the list */ |
275 | 0 | thred->prev = pt_book.last; |
276 | 0 | if (pt_book.last) { |
277 | 0 | pt_book.last->next = thred; |
278 | 0 | } else { |
279 | 0 | pt_book.first = thred; |
280 | 0 | } |
281 | 0 | thred->next = NULL; |
282 | 0 | pt_book.last = thred; |
283 | 0 | PR_Unlock(pt_book.ml); |
284 | 0 | } |
285 | 0 | return thred; /* may be NULL */ |
286 | 0 | } /* pt_AttachThread */ |
287 | | |
288 | | static PRThread* _PR_CreateThread(PRThreadType type, void (*start)(void* arg), |
289 | | void* arg, PRThreadPriority priority, |
290 | | PRThreadScope scope, PRThreadState state, |
291 | 0 | PRUint32 stackSize, PRBool isGCAble) { |
292 | 0 | int rv; |
293 | 0 | PRThread* thred; |
294 | 0 | pthread_attr_t tattr; |
295 | |
|
296 | 0 | if (!_pr_initialized) { |
297 | 0 | _PR_ImplicitInitialization(); |
298 | 0 | } |
299 | |
|
300 | 0 | if ((PRIntn)PR_PRIORITY_FIRST > (PRIntn)priority) { |
301 | 0 | priority = PR_PRIORITY_FIRST; |
302 | 0 | } else if ((PRIntn)PR_PRIORITY_LAST < (PRIntn)priority) { |
303 | 0 | priority = PR_PRIORITY_LAST; |
304 | 0 | } |
305 | |
|
306 | 0 | rv = _PT_PTHREAD_ATTR_INIT(&tattr); |
307 | 0 | PR_ASSERT(0 == rv); |
308 | |
|
309 | 0 | if (EPERM != pt_schedpriv) { |
310 | | # if _POSIX_THREAD_PRIORITY_SCHEDULING > 0 |
311 | | struct sched_param schedule; |
312 | | # endif |
313 | |
|
314 | | # if _POSIX_THREAD_PRIORITY_SCHEDULING > 0 |
315 | | rv = pthread_attr_setinheritsched(&tattr, PTHREAD_EXPLICIT_SCHED); |
316 | | PR_ASSERT(0 == rv); |
317 | | # endif |
318 | | |
319 | | /* Use the default scheduling policy */ |
320 | |
|
321 | | # if _POSIX_THREAD_PRIORITY_SCHEDULING > 0 |
322 | | rv = pthread_attr_getschedparam(&tattr, &schedule); |
323 | | PR_ASSERT(0 == rv); |
324 | | schedule.sched_priority = pt_PriorityMap(priority); |
325 | | rv = pthread_attr_setschedparam(&tattr, &schedule); |
326 | | PR_ASSERT(0 == rv); |
327 | | # ifdef NTO |
328 | | rv = pthread_attr_setschedpolicy(&tattr, SCHED_RR); /* Round Robin */ |
329 | | PR_ASSERT(0 == rv); |
330 | | # endif |
331 | | # endif /* _POSIX_THREAD_PRIORITY_SCHEDULING > 0 */ |
332 | 0 | } |
333 | |
|
334 | 0 | rv = pthread_attr_setdetachstate( |
335 | 0 | &tattr, ((PR_JOINABLE_THREAD == state) ? PTHREAD_CREATE_JOINABLE |
336 | 0 | : PTHREAD_CREATE_DETACHED)); |
337 | 0 | PR_ASSERT(0 == rv); |
338 | | |
339 | | /* |
340 | | * If stackSize is 0, we use the default pthread stack size. |
341 | | */ |
342 | 0 | if (stackSize) { |
343 | | # ifdef _MD_MINIMUM_STACK_SIZE |
344 | | if (stackSize < _MD_MINIMUM_STACK_SIZE) { |
345 | | stackSize = _MD_MINIMUM_STACK_SIZE; |
346 | | } |
347 | | # endif |
348 | 0 | rv = pthread_attr_setstacksize(&tattr, stackSize); |
349 | 0 | PR_ASSERT(0 == rv); |
350 | 0 | } |
351 | |
|
352 | 0 | thred = PR_NEWZAP(PRThread); |
353 | 0 | if (NULL == thred) { |
354 | 0 | PR_SetError(PR_OUT_OF_MEMORY_ERROR, errno); |
355 | 0 | goto done; |
356 | 0 | } else { |
357 | 0 | pthread_t id; |
358 | |
|
359 | 0 | thred->arg = arg; |
360 | 0 | thred->startFunc = start; |
361 | 0 | thred->priority = priority; |
362 | 0 | if (PR_UNJOINABLE_THREAD == state) { |
363 | 0 | thred->state |= PT_THREAD_DETACHED; |
364 | 0 | } |
365 | |
|
366 | 0 | if (PR_LOCAL_THREAD == scope) { |
367 | 0 | scope = PR_GLOBAL_THREAD; |
368 | 0 | } |
369 | |
|
370 | 0 | if (PR_GLOBAL_BOUND_THREAD == scope) { |
371 | | # if _POSIX_THREAD_PRIORITY_SCHEDULING > 0 |
372 | | rv = pthread_attr_setscope(&tattr, PTHREAD_SCOPE_SYSTEM); |
373 | | if (rv) { |
374 | | /* |
375 | | * system scope not supported |
376 | | */ |
377 | | scope = PR_GLOBAL_THREAD; |
378 | | /* |
379 | | * reset scope |
380 | | */ |
381 | | rv = pthread_attr_setscope(&tattr, PTHREAD_SCOPE_PROCESS); |
382 | | PR_ASSERT(0 == rv); |
383 | | } |
384 | | # endif |
385 | 0 | } |
386 | 0 | if (PR_GLOBAL_THREAD == scope) { |
387 | 0 | thred->state |= PT_THREAD_GLOBAL; |
388 | 0 | } else if (PR_GLOBAL_BOUND_THREAD == scope) { |
389 | 0 | thred->state |= (PT_THREAD_GLOBAL | PT_THREAD_BOUND); |
390 | 0 | } else { /* force it global */ |
391 | 0 | thred->state |= PT_THREAD_GLOBAL; |
392 | 0 | } |
393 | 0 | if (PR_SYSTEM_THREAD == type) { |
394 | 0 | thred->state |= PT_THREAD_SYSTEM; |
395 | 0 | } |
396 | |
|
397 | 0 | thred->suspend = (isGCAble) ? PT_THREAD_SETGCABLE : 0; |
398 | |
|
399 | 0 | thred->stack = PR_NEWZAP(PRThreadStack); |
400 | 0 | if (thred->stack == NULL) { |
401 | 0 | PRIntn oserr = errno; |
402 | 0 | PR_Free(thred); /* all that work ... poof! */ |
403 | 0 | PR_SetError(PR_OUT_OF_MEMORY_ERROR, oserr); |
404 | 0 | thred = NULL; /* and for what? */ |
405 | 0 | goto done; |
406 | 0 | } |
407 | 0 | thred->stack->stackSize = stackSize; |
408 | 0 | thred->stack->thr = thred; |
409 | |
|
410 | 0 | # ifdef PT_NO_SIGTIMEDWAIT |
411 | 0 | pthread_mutex_init(&thred->suspendResumeMutex, NULL); |
412 | 0 | pthread_cond_init(&thred->suspendResumeCV, NULL); |
413 | 0 | # endif |
414 | | |
415 | | /* make the thread counted to the rest of the runtime */ |
416 | 0 | PR_Lock(pt_book.ml); |
417 | 0 | if (PR_SYSTEM_THREAD == type) { |
418 | 0 | pt_book.system += 1; |
419 | 0 | } else { |
420 | 0 | pt_book.user += 1; |
421 | 0 | } |
422 | 0 | PR_Unlock(pt_book.ml); |
423 | | |
424 | | /* |
425 | | * We pass a pointer to a local copy (instead of thred->id) |
426 | | * to pthread_create() because who knows what wacky things |
427 | | * pthread_create() may be doing to its argument. |
428 | | */ |
429 | 0 | rv = _PT_PTHREAD_CREATE(&id, tattr, _pt_root, thred); |
430 | |
|
431 | 0 | if (EPERM == rv) { |
432 | | /* Remember that we don't have thread scheduling privilege. */ |
433 | 0 | pt_schedpriv = EPERM; |
434 | 0 | PR_LOG(_pr_thread_lm, PR_LOG_MIN, |
435 | 0 | ("_PR_CreateThread: no thread scheduling privilege")); |
436 | | /* Try creating the thread again without setting priority. */ |
437 | | # if _POSIX_THREAD_PRIORITY_SCHEDULING > 0 |
438 | | rv = pthread_attr_setinheritsched(&tattr, PTHREAD_INHERIT_SCHED); |
439 | | PR_ASSERT(0 == rv); |
440 | | # endif |
441 | 0 | rv = _PT_PTHREAD_CREATE(&id, tattr, _pt_root, thred); |
442 | 0 | } |
443 | |
|
444 | 0 | if (0 != rv) { |
445 | 0 | PRIntn oserr = rv; |
446 | 0 | PR_Lock(pt_book.ml); |
447 | 0 | if (thred->state & PT_THREAD_SYSTEM) { |
448 | 0 | pt_book.system -= 1; |
449 | 0 | } else if (--pt_book.user == pt_book.this_many) { |
450 | 0 | PR_NotifyAllCondVar(pt_book.cv); |
451 | 0 | } |
452 | 0 | PR_Unlock(pt_book.ml); |
453 | |
|
454 | 0 | PR_Free(thred->stack); |
455 | 0 | PR_Free(thred); /* all that work ... poof! */ |
456 | 0 | PR_SetError(PR_INSUFFICIENT_RESOURCES_ERROR, oserr); |
457 | 0 | thred = NULL; /* and for what? */ |
458 | 0 | goto done; |
459 | 0 | } |
460 | | |
461 | 0 | PR_Lock(pt_book.ml); |
462 | | /* |
463 | | * Both the parent thread and this new thread set thred->id. |
464 | | * The parent thread must ensure that thred->id is set before |
465 | | * PR_CreateThread() returns. (See comments in _pt_root().) |
466 | | */ |
467 | 0 | if (!thred->idSet) { |
468 | 0 | thred->id = id; |
469 | 0 | thred->idSet = PR_TRUE; |
470 | 0 | } else { |
471 | 0 | PR_ASSERT(pthread_equal(thred->id, id)); |
472 | 0 | } |
473 | | |
474 | | /* |
475 | | * If the new thread is detached, tell it that PR_CreateThread() has |
476 | | * accessed thred->id and thred->idSet so it's ok to delete thred. |
477 | | */ |
478 | 0 | if (PR_UNJOINABLE_THREAD == state) { |
479 | 0 | thred->okToDelete = PR_TRUE; |
480 | 0 | PR_NotifyAllCondVar(pt_book.cv); |
481 | 0 | } |
482 | 0 | PR_Unlock(pt_book.ml); |
483 | 0 | } |
484 | | |
485 | 0 | done: |
486 | 0 | rv = _PT_PTHREAD_ATTR_DESTROY(&tattr); |
487 | 0 | PR_ASSERT(0 == rv); |
488 | |
|
489 | 0 | return thred; |
490 | 0 | } /* _PR_CreateThread */ |
491 | | |
492 | | PR_IMPLEMENT(PRThread*) |
493 | | PR_CreateThread(PRThreadType type, void (*start)(void* arg), void* arg, |
494 | | PRThreadPriority priority, PRThreadScope scope, |
495 | 0 | PRThreadState state, PRUint32 stackSize) { |
496 | 0 | return _PR_CreateThread(type, start, arg, priority, scope, state, stackSize, |
497 | 0 | PR_FALSE); |
498 | 0 | } /* PR_CreateThread */ |
499 | | |
500 | | PR_IMPLEMENT(PRThread*) |
501 | | PR_CreateThreadGCAble(PRThreadType type, void (*start)(void* arg), void* arg, |
502 | | PRThreadPriority priority, PRThreadScope scope, |
503 | 0 | PRThreadState state, PRUint32 stackSize) { |
504 | 0 | return _PR_CreateThread(type, start, arg, priority, scope, state, stackSize, |
505 | 0 | PR_TRUE); |
506 | 0 | } /* PR_CreateThreadGCAble */ |
507 | | |
508 | 0 | PR_IMPLEMENT(void*) GetExecutionEnvironment(PRThread* thred) { |
509 | 0 | return thred->environment; |
510 | 0 | } /* GetExecutionEnvironment */ |
511 | | |
512 | 0 | PR_IMPLEMENT(void) SetExecutionEnvironment(PRThread* thred, void* env) { |
513 | 0 | thred->environment = env; |
514 | 0 | } /* SetExecutionEnvironment */ |
515 | | |
516 | | PR_IMPLEMENT(PRThread*) |
517 | | PR_AttachThread(PRThreadType type, PRThreadPriority priority, |
518 | 0 | PRThreadStack* stack) { |
519 | 0 | return PR_GetCurrentThread(); |
520 | 0 | } /* PR_AttachThread */ |
521 | | |
522 | 0 | PR_IMPLEMENT(PRStatus) PR_JoinThread(PRThread* thred) { |
523 | 0 | int rv = -1; |
524 | 0 | void* result = NULL; |
525 | 0 | PR_ASSERT(thred != NULL); |
526 | |
|
527 | 0 | if ((0xafafafaf == thred->state) || |
528 | 0 | (PT_THREAD_DETACHED == (PT_THREAD_DETACHED & thred->state)) || |
529 | 0 | (PT_THREAD_FOREIGN == (PT_THREAD_FOREIGN & thred->state))) { |
530 | | /* |
531 | | * This might be a bad address, but if it isn't, the state should |
532 | | * either be an unjoinable thread or it's already had the object |
533 | | * deleted. However, the client that called join on a detached |
534 | | * thread deserves all the rath I can muster.... |
535 | | */ |
536 | 0 | PR_SetError(PR_INVALID_ARGUMENT_ERROR, 0); |
537 | 0 | PR_LogPrint("PR_JoinThread: %p not joinable | already smashed\n", thred); |
538 | 0 | } else { |
539 | 0 | pthread_t id = thred->id; |
540 | 0 | rv = pthread_join(id, &result); |
541 | 0 | PR_ASSERT(rv == 0 && result == NULL); |
542 | 0 | if (0 == rv) { |
543 | | /* |
544 | | * PR_FALSE, because the thread already called the TPD |
545 | | * destructors before exiting _pt_root. |
546 | | */ |
547 | 0 | _pt_thread_death_internal(thred, PR_FALSE); |
548 | 0 | } else { |
549 | 0 | PRErrorCode prerror; |
550 | 0 | switch (rv) { |
551 | 0 | case EINVAL: /* not a joinable thread */ |
552 | 0 | case ESRCH: /* no thread with given ID */ |
553 | 0 | prerror = PR_INVALID_ARGUMENT_ERROR; |
554 | 0 | break; |
555 | 0 | case EDEADLK: /* a thread joining with itself */ |
556 | 0 | prerror = PR_DEADLOCK_ERROR; |
557 | 0 | break; |
558 | 0 | default: |
559 | 0 | prerror = PR_UNKNOWN_ERROR; |
560 | 0 | break; |
561 | 0 | } |
562 | 0 | PR_SetError(prerror, rv); |
563 | 0 | } |
564 | 0 | } |
565 | 0 | return (0 == rv) ? PR_SUCCESS : PR_FAILURE; |
566 | 0 | } /* PR_JoinThread */ |
567 | | |
568 | 0 | PR_IMPLEMENT(void) PR_DetachThread(void) { |
569 | 0 | void* thred; |
570 | 0 | int rv; |
571 | |
|
572 | 0 | _PT_PTHREAD_GETSPECIFIC(pt_book.key, thred); |
573 | 0 | if (NULL == thred) { |
574 | 0 | return; |
575 | 0 | } |
576 | 0 | _pt_thread_death(thred); |
577 | 0 | rv = pthread_setspecific(pt_book.key, NULL); |
578 | 0 | PR_ASSERT(0 == rv); |
579 | 0 | } /* PR_DetachThread */ |
580 | | |
581 | 128M | PR_IMPLEMENT(PRThread*) PR_GetCurrentThread(void) { |
582 | 128M | void* thred; |
583 | | |
584 | 128M | if (!_pr_initialized) { |
585 | 1 | _PR_ImplicitInitialization(); |
586 | 1 | } |
587 | | |
588 | 128M | _PT_PTHREAD_GETSPECIFIC(pt_book.key, thred); |
589 | 128M | if (NULL == thred) { |
590 | 0 | thred = pt_AttachThread(); |
591 | 0 | } |
592 | 128M | PR_ASSERT(NULL != thred); |
593 | 128M | return (PRThread*)thred; |
594 | 128M | } /* PR_GetCurrentThread */ |
595 | | |
596 | 0 | PR_IMPLEMENT(PRThreadScope) PR_GetThreadScope(const PRThread* thred) { |
597 | 0 | return (thred->state & PT_THREAD_BOUND) ? PR_GLOBAL_BOUND_THREAD |
598 | 0 | : PR_GLOBAL_THREAD; |
599 | 0 | } /* PR_GetThreadScope() */ |
600 | | |
601 | 0 | PR_IMPLEMENT(PRThreadType) PR_GetThreadType(const PRThread* thred) { |
602 | 0 | return (thred->state & PT_THREAD_SYSTEM) ? PR_SYSTEM_THREAD : PR_USER_THREAD; |
603 | 0 | } |
604 | | |
605 | 0 | PR_IMPLEMENT(PRThreadState) PR_GetThreadState(const PRThread* thred) { |
606 | 0 | return (thred->state & PT_THREAD_DETACHED) ? PR_UNJOINABLE_THREAD |
607 | 0 | : PR_JOINABLE_THREAD; |
608 | 0 | } /* PR_GetThreadState */ |
609 | | |
610 | 0 | PR_IMPLEMENT(PRThreadPriority) PR_GetThreadPriority(const PRThread* thred) { |
611 | 0 | PR_ASSERT(thred != NULL); |
612 | 0 | return thred->priority; |
613 | 0 | } /* PR_GetThreadPriority */ |
614 | | |
615 | | PR_IMPLEMENT(void) |
616 | 0 | PR_SetThreadPriority(PRThread* thred, PRThreadPriority newPri) { |
617 | 0 | PRIntn rv; |
618 | |
|
619 | 0 | PR_ASSERT(NULL != thred); |
620 | |
|
621 | 0 | if ((PRIntn)PR_PRIORITY_FIRST > (PRIntn)newPri) { |
622 | 0 | newPri = PR_PRIORITY_FIRST; |
623 | 0 | } else if ((PRIntn)PR_PRIORITY_LAST < (PRIntn)newPri) { |
624 | 0 | newPri = PR_PRIORITY_LAST; |
625 | 0 | } |
626 | |
|
627 | | # if _POSIX_THREAD_PRIORITY_SCHEDULING > 0 |
628 | | if (EPERM != pt_schedpriv) { |
629 | | int policy; |
630 | | struct sched_param schedule; |
631 | | |
632 | | rv = pthread_getschedparam(thred->id, &policy, &schedule); |
633 | | if (0 == rv) { |
634 | | schedule.sched_priority = pt_PriorityMap(newPri); |
635 | | rv = pthread_setschedparam(thred->id, policy, &schedule); |
636 | | if (EPERM == rv) { |
637 | | pt_schedpriv = EPERM; |
638 | | PR_LOG(_pr_thread_lm, PR_LOG_MIN, |
639 | | ("PR_SetThreadPriority: no thread scheduling privilege")); |
640 | | } |
641 | | } |
642 | | if (rv != 0) { |
643 | | rv = -1; |
644 | | } |
645 | | } |
646 | | # elif defined(_PR_NICE_PRIORITY_SCHEDULING) |
647 | | PR_Lock(pt_book.ml); |
648 | 0 | while (thred->tid == 0) { |
649 | 0 | PR_WaitCondVar(pt_book.cv, PR_INTERVAL_NO_TIMEOUT); |
650 | 0 | } |
651 | 0 | PR_Unlock(pt_book.ml); |
652 | |
|
653 | 0 | errno = 0; |
654 | 0 | rv = getpriority(PRIO_PROCESS, 0); |
655 | | |
656 | | /* Do not proceed unless we know the main thread's nice value. */ |
657 | 0 | if (errno == 0) { |
658 | 0 | rv = setpriority(PRIO_PROCESS, thred->tid, pt_RelativePriority(rv, newPri)); |
659 | |
|
660 | 0 | if (rv == -1) { |
661 | | /* We don't set pt_schedpriv to EPERM in case errno == EPERM |
662 | | * because adjusting the nice value might be permitted for certain |
663 | | * ranges but not for others. */ |
664 | 0 | PR_LOG(_pr_thread_lm, PR_LOG_MIN, |
665 | 0 | ("PR_SetThreadPriority: setpriority failed with error %d", errno)); |
666 | 0 | } |
667 | 0 | } |
668 | | # else |
669 | | (void)rv; /* rv is unused */ |
670 | | # endif |
671 | |
|
672 | 0 | thred->priority = newPri; |
673 | 0 | } /* PR_SetThreadPriority */ |
674 | | |
675 | 0 | PR_IMPLEMENT(PRStatus) PR_Interrupt(PRThread* thred) { |
676 | | /* |
677 | | ** If the target thread indicates that it's waiting, |
678 | | ** find the condition and broadcast to it. Broadcast |
679 | | ** since we don't know which thread (if there are more |
680 | | ** than one). This sounds risky, but clients must |
681 | | ** test their invariants when resumed from a wait and |
682 | | ** I don't expect very many threads to be waiting on |
683 | | ** a single condition and I don't expect interrupt to |
684 | | ** be used very often. |
685 | | ** |
686 | | ** I don't know why I thought this would work. Must have |
687 | | ** been one of those weaker momements after I'd been |
688 | | ** smelling the vapors. |
689 | | ** |
690 | | ** Even with the followng changes it is possible that |
691 | | ** the pointer to the condition variable is pointing |
692 | | ** at a bogus value. Will the unerlying code detect |
693 | | ** that? |
694 | | */ |
695 | 0 | PRCondVar* cv; |
696 | 0 | PR_ASSERT(NULL != thred); |
697 | 0 | if (NULL == thred) { |
698 | 0 | return PR_FAILURE; |
699 | 0 | } |
700 | | |
701 | 0 | thred->state |= PT_THREAD_ABORTED; |
702 | |
|
703 | 0 | cv = thred->waiting; |
704 | 0 | if ((NULL != cv) && !thred->interrupt_blocked) { |
705 | 0 | PRIntn rv; |
706 | 0 | (void)PR_ATOMIC_INCREMENT(&cv->notify_pending); |
707 | 0 | rv = pthread_cond_broadcast(&cv->cv); |
708 | 0 | PR_ASSERT(0 == rv); |
709 | 0 | if (0 > PR_ATOMIC_DECREMENT(&cv->notify_pending)) { |
710 | 0 | PR_DestroyCondVar(cv); |
711 | 0 | } |
712 | 0 | } |
713 | 0 | return PR_SUCCESS; |
714 | 0 | } /* PR_Interrupt */ |
715 | | |
716 | 0 | PR_IMPLEMENT(void) PR_ClearInterrupt(void) { |
717 | 0 | PRThread* me = PR_GetCurrentThread(); |
718 | 0 | me->state &= ~PT_THREAD_ABORTED; |
719 | 0 | } /* PR_ClearInterrupt */ |
720 | | |
721 | 0 | PR_IMPLEMENT(void) PR_BlockInterrupt(void) { |
722 | 0 | PRThread* me = PR_GetCurrentThread(); |
723 | 0 | _PT_THREAD_BLOCK_INTERRUPT(me); |
724 | 0 | } /* PR_BlockInterrupt */ |
725 | | |
726 | 0 | PR_IMPLEMENT(void) PR_UnblockInterrupt(void) { |
727 | 0 | PRThread* me = PR_GetCurrentThread(); |
728 | 0 | _PT_THREAD_UNBLOCK_INTERRUPT(me); |
729 | 0 | } /* PR_UnblockInterrupt */ |
730 | | |
731 | 0 | PR_IMPLEMENT(PRStatus) PR_Yield(void) { |
732 | 0 | static PRBool warning = PR_TRUE; |
733 | 0 | if (warning) |
734 | 0 | warning = _PR_Obsolete("PR_Yield()", "PR_Sleep(PR_INTERVAL_NO_WAIT)"); |
735 | 0 | return PR_Sleep(PR_INTERVAL_NO_WAIT); |
736 | 0 | } |
737 | | |
738 | 1.75k | PR_IMPLEMENT(PRStatus) PR_Sleep(PRIntervalTime ticks) { |
739 | 1.75k | PRStatus rv = PR_SUCCESS; |
740 | | |
741 | 1.75k | if (!_pr_initialized) { |
742 | 0 | _PR_ImplicitInitialization(); |
743 | 0 | } |
744 | | |
745 | 1.75k | if (PR_INTERVAL_NO_WAIT == ticks) { |
746 | 1.75k | _PT_PTHREAD_YIELD(); |
747 | 1.75k | } else { |
748 | 0 | PRCondVar* cv; |
749 | 0 | PRIntervalTime timein; |
750 | |
|
751 | 0 | timein = PR_IntervalNow(); |
752 | 0 | cv = PR_NewCondVar(_pr_sleeplock); |
753 | 0 | PR_ASSERT(cv != NULL); |
754 | 0 | PR_Lock(_pr_sleeplock); |
755 | 0 | do { |
756 | 0 | PRIntervalTime now = PR_IntervalNow(); |
757 | 0 | PRIntervalTime delta = now - timein; |
758 | 0 | if (delta > ticks) { |
759 | 0 | break; |
760 | 0 | } |
761 | 0 | rv = PR_WaitCondVar(cv, ticks - delta); |
762 | 0 | } while (PR_SUCCESS == rv); |
763 | 0 | PR_Unlock(_pr_sleeplock); |
764 | 0 | PR_DestroyCondVar(cv); |
765 | 0 | } |
766 | 0 | return rv; |
767 | 1.75k | } /* PR_Sleep */ |
768 | | |
769 | 0 | static void _pt_thread_death(void* arg) { |
770 | 0 | void* thred; |
771 | 0 | int rv; |
772 | |
|
773 | 0 | _PT_PTHREAD_GETSPECIFIC(pt_book.key, thred); |
774 | 0 | if (NULL == thred) { |
775 | | /* |
776 | | * Have PR_GetCurrentThread return the expected value to the |
777 | | * destructors. |
778 | | */ |
779 | 0 | rv = pthread_setspecific(pt_book.key, arg); |
780 | 0 | PR_ASSERT(0 == rv); |
781 | 0 | } |
782 | | |
783 | | /* PR_TRUE for: call destructors */ |
784 | 0 | _pt_thread_death_internal(arg, PR_TRUE); |
785 | |
|
786 | 0 | if (NULL == thred) { |
787 | 0 | rv = pthread_setspecific(pt_book.key, NULL); |
788 | 0 | PR_ASSERT(0 == rv); |
789 | 0 | } |
790 | 0 | } |
791 | | |
792 | 0 | static void _pt_thread_death_internal(void* arg, PRBool callDestructors) { |
793 | 0 | PRThread* thred = (PRThread*)arg; |
794 | |
|
795 | 0 | if (thred->state & (PT_THREAD_FOREIGN | PT_THREAD_PRIMORD)) { |
796 | 0 | PR_Lock(pt_book.ml); |
797 | 0 | if (NULL == thred->prev) { |
798 | 0 | pt_book.first = thred->next; |
799 | 0 | } else { |
800 | 0 | thred->prev->next = thred->next; |
801 | 0 | } |
802 | 0 | if (NULL == thred->next) { |
803 | 0 | pt_book.last = thred->prev; |
804 | 0 | } else { |
805 | 0 | thred->next->prev = thred->prev; |
806 | 0 | } |
807 | 0 | PR_Unlock(pt_book.ml); |
808 | 0 | } |
809 | 0 | if (callDestructors) { |
810 | 0 | _PR_DestroyThreadPrivate(thred); |
811 | 0 | } |
812 | 0 | PR_Free(thred->privateData); |
813 | 0 | if (NULL != thred->errorString) { |
814 | 0 | PR_Free(thred->errorString); |
815 | 0 | } |
816 | 0 | if (NULL != thred->name) { |
817 | 0 | PR_Free(thred->name); |
818 | 0 | } |
819 | 0 | PR_Free(thred->stack); |
820 | 0 | if (NULL != thred->syspoll_list) { |
821 | 0 | PR_Free(thred->syspoll_list); |
822 | 0 | } |
823 | | # if defined(_PR_POLL_WITH_SELECT) |
824 | | if (NULL != thred->selectfd_list) { |
825 | | PR_Free(thred->selectfd_list); |
826 | | } |
827 | | # endif |
828 | 0 | # if defined(DEBUG) |
829 | 0 | memset(thred, 0xaf, sizeof(PRThread)); |
830 | 0 | # endif /* defined(DEBUG) */ |
831 | 0 | PR_Free(thred); |
832 | 0 | } /* _pt_thread_death */ |
833 | | |
834 | | void _PR_InitThreads(PRThreadType type, PRThreadPriority priority, |
835 | 12 | PRUintn maxPTDs) { |
836 | 12 | int rv; |
837 | 12 | PRThread* thred; |
838 | | |
839 | 12 | PR_ASSERT(priority == PR_PRIORITY_NORMAL); |
840 | | |
841 | | # ifdef _PR_NEED_PTHREAD_INIT |
842 | | /* |
843 | | * On BSD/OS (3.1 and 4.0), the pthread subsystem is lazily |
844 | | * initialized, but pthread_self() fails to initialize |
845 | | * pthreads and hence returns a null thread ID if invoked |
846 | | * by the primordial thread before any other pthread call. |
847 | | * So we explicitly initialize pthreads here. |
848 | | */ |
849 | | pthread_init(); |
850 | | # endif |
851 | | |
852 | | # if _POSIX_THREAD_PRIORITY_SCHEDULING > 0 |
853 | | # if defined(FREEBSD) |
854 | | { |
855 | | pthread_attr_t attr; |
856 | | int policy; |
857 | | /* get the min and max priorities of the default policy */ |
858 | | pthread_attr_init(&attr); |
859 | | pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED); |
860 | | pthread_attr_getschedpolicy(&attr, &policy); |
861 | | pt_book.minPrio = sched_get_priority_min(policy); |
862 | | PR_ASSERT(-1 != pt_book.minPrio); |
863 | | pt_book.maxPrio = sched_get_priority_max(policy); |
864 | | PR_ASSERT(-1 != pt_book.maxPrio); |
865 | | pthread_attr_destroy(&attr); |
866 | | } |
867 | | # else |
868 | | /* |
869 | | ** These might be function evaluations |
870 | | */ |
871 | | pt_book.minPrio = PT_PRIO_MIN; |
872 | | pt_book.maxPrio = PT_PRIO_MAX; |
873 | | # endif |
874 | | # endif |
875 | | |
876 | 12 | PR_ASSERT(NULL == pt_book.ml); |
877 | 12 | pt_book.ml = PR_NewLock(); |
878 | 12 | PR_ASSERT(NULL != pt_book.ml); |
879 | 12 | pt_book.cv = PR_NewCondVar(pt_book.ml); |
880 | 12 | PR_ASSERT(NULL != pt_book.cv); |
881 | 12 | thred = PR_NEWZAP(PRThread); |
882 | 12 | PR_ASSERT(NULL != thred); |
883 | 12 | thred->arg = NULL; |
884 | 12 | thred->startFunc = NULL; |
885 | 12 | thred->priority = priority; |
886 | 12 | thred->id = pthread_self(); |
887 | 12 | thred->idSet = PR_TRUE; |
888 | 12 | # ifdef _PR_NICE_PRIORITY_SCHEDULING |
889 | 12 | thred->tid = gettid(); |
890 | 12 | # endif |
891 | | |
892 | 12 | thred->state = (PT_THREAD_DETACHED | PT_THREAD_PRIMORD); |
893 | 12 | if (PR_SYSTEM_THREAD == type) { |
894 | 0 | thred->state |= PT_THREAD_SYSTEM; |
895 | 0 | pt_book.system += 1; |
896 | 0 | pt_book.this_many = 0; |
897 | 12 | } else { |
898 | 12 | pt_book.user += 1; |
899 | 12 | pt_book.this_many = 1; |
900 | 12 | } |
901 | 12 | thred->next = thred->prev = NULL; |
902 | 12 | pt_book.first = pt_book.last = thred; |
903 | | |
904 | 12 | thred->stack = PR_NEWZAP(PRThreadStack); |
905 | 12 | PR_ASSERT(thred->stack != NULL); |
906 | 12 | thred->stack->stackSize = 0; |
907 | 12 | thred->stack->thr = thred; |
908 | 12 | _PR_InitializeStack(thred->stack); |
909 | | |
910 | | /* |
911 | | * Create a key for our use to store a backpointer in the pthread |
912 | | * to our PRThread object. This object gets deleted when the thread |
913 | | * returns from its root in the case of a detached thread. Other |
914 | | * threads delete the objects in Join. |
915 | | * |
916 | | * NB: The destructor logic seems to have a bug so it isn't used. |
917 | | * NBB: Oh really? I'm going to give it a spin - AOF 19 June 1998. |
918 | | * More info - the problem is that pthreads calls the destructor |
919 | | * eagerly as the thread returns from its root, rather than lazily |
920 | | * after the thread is joined. Therefore, threads that are joining |
921 | | * and holding PRThread references are actually holding pointers to |
922 | | * nothing. |
923 | | */ |
924 | 12 | rv = _PT_PTHREAD_KEY_CREATE(&pt_book.key, _pt_thread_death); |
925 | 12 | if (0 != rv) { |
926 | 0 | PR_Assert("0 == rv", __FILE__, __LINE__); |
927 | 0 | } |
928 | 12 | pt_book.keyCreated = PR_TRUE; |
929 | 12 | rv = pthread_setspecific(pt_book.key, thred); |
930 | 12 | PR_ASSERT(0 == rv); |
931 | 12 | } /* _PR_InitThreads */ |
932 | | |
933 | | # ifdef __GNUC__ |
934 | | /* |
935 | | * GCC supports the constructor and destructor attributes as of |
936 | | * version 2.5. |
937 | | */ |
938 | | # if defined(DARWIN) |
939 | | /* |
940 | | * The dynamic linker on OSX doesn't execute __attribute__((destructor)) |
941 | | * destructors in the right order wrt non-__attribute((destructor)) destructors |
942 | | * in other libraries. So use atexit() instead, which does. |
943 | | * See https://bugzilla.mozilla.org/show_bug.cgi?id=1399746#c99 |
944 | | */ |
945 | | static void _PR_Fini(void); |
946 | | |
947 | | __attribute__((constructor)) static void _register_PR_Fini() { |
948 | | atexit(_PR_Fini); |
949 | | } |
950 | | # else |
951 | | static void _PR_Fini(void) __attribute__((destructor)); |
952 | | # endif |
953 | | |
954 | | # elif defined(__SUNPRO_C) |
955 | | /* |
956 | | * Sun Studio compiler |
957 | | */ |
958 | | # pragma fini(_PR_Fini) |
959 | | static void _PR_Fini(void); |
960 | | # elif defined(HPUX) |
961 | | /* |
962 | | * Current versions of HP C compiler define __HP_cc. |
963 | | * HP C compiler A.11.01.20 doesn't define __HP_cc. |
964 | | */ |
965 | | # if defined(__ia64) || defined(_LP64) |
966 | | # pragma FINI "_PR_Fini" |
967 | | static void _PR_Fini(void); |
968 | | # else |
969 | | /* |
970 | | * Only HP-UX 10.x style initializers are supported in 32-bit links. |
971 | | * Need to use the +I PR_HPUX10xInit linker option. |
972 | | */ |
973 | | # include <dl.h> |
974 | | |
975 | | static void _PR_Fini(void); |
976 | | |
977 | | void PR_HPUX10xInit(shl_t handle, int loading) { |
978 | | /* |
979 | | * This function is called when a shared library is loaded as well |
980 | | * as when the shared library is unloaded. Note that it may not |
981 | | * be called when the user's program terminates. |
982 | | * |
983 | | * handle is the shl_load API handle for the shared library being |
984 | | * initialized. |
985 | | * |
986 | | * loading is non-zero at startup and zero at termination. |
987 | | */ |
988 | | if (loading) { |
989 | | /* ... do some initializations ... */ |
990 | | } else { |
991 | | _PR_Fini(); |
992 | | } |
993 | | } |
994 | | # endif |
995 | | # elif defined(AIX) |
996 | | /* Need to use the -binitfini::_PR_Fini linker option. */ |
997 | | # endif |
998 | | |
999 | 0 | void _PR_Fini(void) { |
1000 | 0 | void* thred; |
1001 | 0 | int rv; |
1002 | |
|
1003 | 0 | if (!_pr_initialized) { |
1004 | | /* Either NSPR was never successfully initialized or |
1005 | | * PR_Cleanup has been called already. */ |
1006 | 0 | if (pt_book.keyCreated) { |
1007 | 0 | rv = pthread_key_delete(pt_book.key); |
1008 | 0 | PR_ASSERT(0 == rv); |
1009 | 0 | pt_book.keyCreated = PR_FALSE; |
1010 | 0 | } |
1011 | 0 | return; |
1012 | 0 | } |
1013 | | |
1014 | 0 | _PT_PTHREAD_GETSPECIFIC(pt_book.key, thred); |
1015 | 0 | if (NULL != thred) { |
1016 | | /* |
1017 | | * PR_FALSE, because it is unsafe to call back to the |
1018 | | * thread private data destructors at final cleanup. |
1019 | | */ |
1020 | 0 | _pt_thread_death_internal(thred, PR_FALSE); |
1021 | 0 | rv = pthread_setspecific(pt_book.key, NULL); |
1022 | 0 | PR_ASSERT(0 == rv); |
1023 | 0 | } |
1024 | 0 | rv = pthread_key_delete(pt_book.key); |
1025 | 0 | PR_ASSERT(0 == rv); |
1026 | 0 | pt_book.keyCreated = PR_FALSE; |
1027 | | /* TODO: free other resources used by NSPR */ |
1028 | | /* _pr_initialized = PR_FALSE; */ |
1029 | 0 | } /* _PR_Fini */ |
1030 | | |
1031 | 0 | PR_IMPLEMENT(PRStatus) PR_Cleanup(void) { |
1032 | 0 | PRThread* me = PR_GetCurrentThread(); |
1033 | 0 | int rv; |
1034 | 0 | PR_LOG(_pr_thread_lm, PR_LOG_MIN, ("PR_Cleanup: shutting down NSPR")); |
1035 | 0 | PR_ASSERT(me->state & PT_THREAD_PRIMORD); |
1036 | 0 | if (me->state & PT_THREAD_PRIMORD) { |
1037 | 0 | PR_Lock(pt_book.ml); |
1038 | 0 | while (pt_book.user > pt_book.this_many) { |
1039 | 0 | PR_WaitCondVar(pt_book.cv, PR_INTERVAL_NO_TIMEOUT); |
1040 | 0 | } |
1041 | 0 | if (me->state & PT_THREAD_SYSTEM) { |
1042 | 0 | pt_book.system -= 1; |
1043 | 0 | } else { |
1044 | 0 | pt_book.user -= 1; |
1045 | 0 | } |
1046 | 0 | PR_Unlock(pt_book.ml); |
1047 | |
|
1048 | 0 | _PR_MD_EARLY_CLEANUP(); |
1049 | |
|
1050 | 0 | _PR_CleanupMW(); |
1051 | 0 | _PR_CleanupTime(); |
1052 | 0 | _PR_CleanupDtoa(); |
1053 | 0 | _PR_CleanupCallOnce(); |
1054 | 0 | _PR_ShutdownLinker(); |
1055 | 0 | _PR_LogCleanup(); |
1056 | 0 | _PR_CleanupNet(); |
1057 | | /* Close all the fd's before calling _PR_CleanupIO */ |
1058 | 0 | _PR_CleanupIO(); |
1059 | 0 | _PR_CleanupCMon(); |
1060 | |
|
1061 | 0 | _pt_thread_death(me); |
1062 | 0 | rv = pthread_setspecific(pt_book.key, NULL); |
1063 | 0 | PR_ASSERT(0 == rv); |
1064 | | /* |
1065 | | * I am not sure if it's safe to delete the cv and lock here, |
1066 | | * since there may still be "system" threads around. If this |
1067 | | * call isn't immediately prior to exiting, then there's a |
1068 | | * problem. |
1069 | | */ |
1070 | 0 | if (0 == pt_book.system) { |
1071 | 0 | PR_DestroyCondVar(pt_book.cv); |
1072 | 0 | pt_book.cv = NULL; |
1073 | 0 | PR_DestroyLock(pt_book.ml); |
1074 | 0 | pt_book.ml = NULL; |
1075 | 0 | } |
1076 | 0 | PR_DestroyLock(_pr_sleeplock); |
1077 | 0 | _pr_sleeplock = NULL; |
1078 | 0 | _PR_CleanupLayerCache(); |
1079 | 0 | _PR_CleanupEnv(); |
1080 | 0 | # ifdef _PR_ZONE_ALLOCATOR |
1081 | 0 | _PR_DestroyZones(); |
1082 | 0 | # endif |
1083 | 0 | _pr_initialized = PR_FALSE; |
1084 | 0 | return PR_SUCCESS; |
1085 | 0 | } |
1086 | 0 | return PR_FAILURE; |
1087 | 0 | } /* PR_Cleanup */ |
1088 | | |
1089 | 0 | PR_IMPLEMENT(void) PR_ProcessExit(PRIntn status) { _exit(status); } |
1090 | | |
1091 | 0 | PR_IMPLEMENT(PRUint32) PR_GetThreadID(PRThread* thred) { |
1092 | 0 | return (PRUint32)thred->id; /* and I don't know what they will do with it */ |
1093 | 0 | } |
1094 | | |
1095 | | /* |
1096 | | * $$$ |
1097 | | * The following two thread-to-processor affinity functions are not |
1098 | | * yet implemented for pthreads. By the way, these functions should return |
1099 | | * PRStatus rather than PRInt32 to indicate the success/failure status. |
1100 | | * $$$ |
1101 | | */ |
1102 | | |
1103 | | PR_IMPLEMENT(PRInt32) |
1104 | 0 | PR_GetThreadAffinityMask(PRThread* thread, PRUint32* mask) { |
1105 | 0 | return 0; /* not implemented */ |
1106 | 0 | } |
1107 | | |
1108 | | PR_IMPLEMENT(PRInt32) |
1109 | 0 | PR_SetThreadAffinityMask(PRThread* thread, PRUint32 mask) { |
1110 | 0 | return 0; /* not implemented */ |
1111 | 0 | } |
1112 | | |
1113 | | PR_IMPLEMENT(void) |
1114 | 0 | PR_SetThreadDumpProc(PRThread* thread, PRThreadDumpProc dump, void* arg) { |
1115 | 0 | thread->dump = dump; |
1116 | 0 | thread->dumpArg = arg; |
1117 | 0 | } |
1118 | | |
1119 | | /* |
1120 | | * Garbage collection support follows. |
1121 | | */ |
1122 | | |
1123 | | /* a bogus signal mask for forcing a timed wait */ |
1124 | | /* Not so bogus in AIX as we really do a sigwait */ |
1125 | | static sigset_t sigwait_set; |
1126 | | |
1127 | | static struct timespec onemillisec = {0, 1000000L}; |
1128 | | # ifndef PT_NO_SIGTIMEDWAIT |
1129 | | static struct timespec hundredmillisec = {0, 100000000L}; |
1130 | | # endif |
1131 | | |
1132 | | static void suspend_signal_handler(PRIntn sig); |
1133 | | |
1134 | | # ifdef PT_NO_SIGTIMEDWAIT |
1135 | | static void null_signal_handler(PRIntn sig); |
1136 | | # endif |
1137 | | |
1138 | | /* |
1139 | | * Linux pthreads use SIGUSR1 and SIGUSR2 internally, which |
1140 | | * conflict with the use of these two signals in our GC support. |
1141 | | * So we don't know how to support GC on Linux pthreads. |
1142 | | */ |
1143 | 0 | static void init_pthread_gc_support(void) { |
1144 | 0 | PRIntn rv; |
1145 | |
|
1146 | 0 | { |
1147 | 0 | struct sigaction sigact_usr2; |
1148 | |
|
1149 | 0 | sigact_usr2.sa_handler = suspend_signal_handler; |
1150 | 0 | sigact_usr2.sa_flags = SA_RESTART; |
1151 | 0 | sigemptyset(&sigact_usr2.sa_mask); |
1152 | |
|
1153 | 0 | rv = sigaction(SIGUSR2, &sigact_usr2, NULL); |
1154 | 0 | PR_ASSERT(0 == rv); |
1155 | |
|
1156 | 0 | sigemptyset(&sigwait_set); |
1157 | 0 | # if defined(PT_NO_SIGTIMEDWAIT) |
1158 | 0 | sigaddset(&sigwait_set, SIGUSR1); |
1159 | | # else |
1160 | | sigaddset(&sigwait_set, SIGUSR2); |
1161 | | # endif /* defined(PT_NO_SIGTIMEDWAIT) */ |
1162 | 0 | } |
1163 | 0 | # if defined(PT_NO_SIGTIMEDWAIT) |
1164 | 0 | { |
1165 | 0 | struct sigaction sigact_null; |
1166 | 0 | sigact_null.sa_handler = null_signal_handler; |
1167 | 0 | sigact_null.sa_flags = SA_RESTART; |
1168 | 0 | sigemptyset(&sigact_null.sa_mask); |
1169 | 0 | rv = sigaction(SIGUSR1, &sigact_null, NULL); |
1170 | 0 | PR_ASSERT(0 == rv); |
1171 | 0 | } |
1172 | 0 | # endif /* defined(PT_NO_SIGTIMEDWAIT) */ |
1173 | 0 | } |
1174 | | |
1175 | 0 | PR_IMPLEMENT(void) PR_SetThreadGCAble(void) { |
1176 | 0 | PR_Lock(pt_book.ml); |
1177 | 0 | PR_GetCurrentThread()->state |= PT_THREAD_GCABLE; |
1178 | 0 | PR_Unlock(pt_book.ml); |
1179 | 0 | } |
1180 | | |
1181 | 0 | PR_IMPLEMENT(void) PR_ClearThreadGCAble(void) { |
1182 | 0 | PR_Lock(pt_book.ml); |
1183 | 0 | PR_GetCurrentThread()->state &= (~PT_THREAD_GCABLE); |
1184 | 0 | PR_Unlock(pt_book.ml); |
1185 | 0 | } |
1186 | | |
1187 | | # if defined(DEBUG) |
1188 | | static PRBool suspendAllOn = PR_FALSE; |
1189 | | # endif |
1190 | | |
1191 | | static PRBool suspendAllSuspended = PR_FALSE; |
1192 | | |
1193 | 0 | PR_IMPLEMENT(PRStatus) PR_EnumerateThreads(PREnumerator func, void* arg) { |
1194 | 0 | PRIntn count = 0; |
1195 | 0 | PRStatus rv = PR_SUCCESS; |
1196 | 0 | PRThread* thred = pt_book.first; |
1197 | |
|
1198 | 0 | # if defined(DEBUG) || defined(FORCE_PR_ASSERT) |
1199 | 0 | PRThread* me = PR_GetCurrentThread(); |
1200 | 0 | # endif |
1201 | |
|
1202 | 0 | PR_LOG(_pr_gc_lm, PR_LOG_ALWAYS, ("Begin PR_EnumerateThreads\n")); |
1203 | | /* |
1204 | | * $$$ |
1205 | | * Need to suspend all threads other than me before doing this. |
1206 | | * This is really a gross and disgusting thing to do. The only |
1207 | | * good thing is that since all other threads are suspended, holding |
1208 | | * the lock during a callback seems like child's play. |
1209 | | * $$$ |
1210 | | */ |
1211 | 0 | PR_ASSERT(suspendAllOn); |
1212 | |
|
1213 | 0 | while (thred != NULL) { |
1214 | | /* Steve Morse, 4-23-97: Note that we can't walk a queue by taking |
1215 | | * qp->next after applying the function "func". In particular, "func" |
1216 | | * might remove the thread from the queue and put it into another one in |
1217 | | * which case qp->next no longer points to the next entry in the original |
1218 | | * queue. |
1219 | | * |
1220 | | * To get around this problem, we save qp->next in qp_next before applying |
1221 | | * "func" and use that saved value as the next value after applying "func". |
1222 | | */ |
1223 | 0 | PRThread* next = thred->next; |
1224 | |
|
1225 | 0 | if (_PT_IS_GCABLE_THREAD(thred)) { |
1226 | 0 | PR_ASSERT((thred == me) || (thred->suspend & PT_THREAD_SUSPENDED)); |
1227 | 0 | PR_LOG(_pr_gc_lm, PR_LOG_ALWAYS, |
1228 | 0 | ("In PR_EnumerateThreads callback thread %p thid = %X\n", thred, |
1229 | 0 | thred->id)); |
1230 | |
|
1231 | 0 | rv = func(thred, count++, arg); |
1232 | 0 | if (rv != PR_SUCCESS) { |
1233 | 0 | return rv; |
1234 | 0 | } |
1235 | 0 | } |
1236 | 0 | thred = next; |
1237 | 0 | } |
1238 | 0 | PR_LOG(_pr_gc_lm, PR_LOG_ALWAYS, |
1239 | 0 | ("End PR_EnumerateThreads count = %d \n", count)); |
1240 | 0 | return rv; |
1241 | 0 | } /* PR_EnumerateThreads */ |
1242 | | |
1243 | | /* |
1244 | | * PR_SuspendAll and PR_ResumeAll are called during garbage collection. The |
1245 | | * strategy we use is to send a SIGUSR2 signal to every gc able thread that we |
1246 | | * intend to suspend. The signal handler will record the stack pointer and will |
1247 | | * block until resumed by the resume call. Since the signal handler is the last |
1248 | | * routine called for the suspended thread, the stack pointer will also serve as |
1249 | | * a place where all the registers have been saved on the stack for the |
1250 | | * previously executing routines. |
1251 | | * |
1252 | | * Through global variables, we also make sure that PR_Suspend and PR_Resume |
1253 | | * does not proceed until the thread is suspended or resumed. |
1254 | | */ |
1255 | | |
1256 | | /* |
1257 | | * In the signal handler, we can not use condition variable notify or wait. |
1258 | | * This does not work consistently across all pthread platforms. We also can |
1259 | | * not use locking since that does not seem to work reliably across platforms. |
1260 | | * Only thing we can do is yielding while testing for a global condition |
1261 | | * to change. This does work on pthread supported platforms. We may have |
1262 | | * to play with priortities if there are any problems detected. |
1263 | | */ |
1264 | | |
1265 | | /* |
1266 | | * In AIX, you cannot use ANY pthread calls in the signal handler except perhaps |
1267 | | * pthread_yield. But that is horribly inefficient. Hence we use only sigwait, |
1268 | | * no sigtimedwait is available. We need to use another user signal, SIGUSR1. |
1269 | | * Actually SIGUSR1 is also used by exec in Java. So our usage here breaks the |
1270 | | * exec in Java, for AIX. You cannot use pthread_cond_wait or pthread_delay_np |
1271 | | * in the signal handler as all synchronization mechanisms just break down. |
1272 | | */ |
1273 | | |
1274 | | # if defined(PT_NO_SIGTIMEDWAIT) |
1275 | 0 | static void null_signal_handler(PRIntn sig) { return; } |
1276 | | # endif |
1277 | | |
1278 | 0 | static void suspend_signal_handler(PRIntn sig) { |
1279 | 0 | PRThread* me = PR_GetCurrentThread(); |
1280 | |
|
1281 | 0 | PR_ASSERT(me != NULL); |
1282 | 0 | PR_ASSERT(_PT_IS_GCABLE_THREAD(me)); |
1283 | 0 | PR_ASSERT((me->suspend & PT_THREAD_SUSPENDED) == 0); |
1284 | |
|
1285 | 0 | PR_LOG( |
1286 | 0 | _pr_gc_lm, PR_LOG_ALWAYS, |
1287 | 0 | ("Begin suspend_signal_handler thred %p thread id = %X\n", me, me->id)); |
1288 | | |
1289 | | /* |
1290 | | * save stack pointer |
1291 | | */ |
1292 | 0 | me->sp = &me; |
1293 | | |
1294 | | /* |
1295 | | At this point, the thread's stack pointer has been saved, |
1296 | | And it is going to enter a wait loop until it is resumed. |
1297 | | So it is _really_ suspended |
1298 | | */ |
1299 | |
|
1300 | 0 | me->suspend |= PT_THREAD_SUSPENDED; |
1301 | | |
1302 | | /* |
1303 | | * now, block current thread |
1304 | | */ |
1305 | 0 | # if defined(PT_NO_SIGTIMEDWAIT) |
1306 | 0 | pthread_cond_signal(&me->suspendResumeCV); |
1307 | 0 | while (me->suspend & PT_THREAD_SUSPENDED) { |
1308 | 0 | # if !defined(FREEBSD) && !defined(NETBSD) && !defined(OPENBSD) && \ |
1309 | 0 | !defined(DARWIN) && !defined(RISCOS) |
1310 | 0 | PRIntn rv; |
1311 | 0 | sigwait(&sigwait_set, &rv); |
1312 | 0 | # endif |
1313 | 0 | } |
1314 | 0 | me->suspend |= PT_THREAD_RESUMED; |
1315 | 0 | pthread_cond_signal(&me->suspendResumeCV); |
1316 | | # else /* defined(PT_NO_SIGTIMEDWAIT) */ |
1317 | | while (me->suspend & PT_THREAD_SUSPENDED) { |
1318 | | PRIntn rv = sigtimedwait(&sigwait_set, NULL, &hundredmillisec); |
1319 | | PR_ASSERT(-1 == rv); |
1320 | | } |
1321 | | me->suspend |= PT_THREAD_RESUMED; |
1322 | | # endif |
1323 | | |
1324 | | /* |
1325 | | * At this point, thread has been resumed, so set a global condition. |
1326 | | * The ResumeAll needs to know that this has really been resumed. |
1327 | | * So the signal handler sets a flag which PR_ResumeAll will reset. |
1328 | | * The PR_ResumeAll must reset this flag ... |
1329 | | */ |
1330 | |
|
1331 | 0 | PR_LOG(_pr_gc_lm, PR_LOG_ALWAYS, |
1332 | 0 | ("End suspend_signal_handler thred = %p tid = %X\n", me, me->id)); |
1333 | 0 | } /* suspend_signal_handler */ |
1334 | | |
1335 | 0 | static void pt_SuspendSet(PRThread* thred) { |
1336 | 0 | PRIntn rv; |
1337 | |
|
1338 | 0 | PR_LOG(_pr_gc_lm, PR_LOG_ALWAYS, |
1339 | 0 | ("pt_SuspendSet thred %p thread id = %X\n", thred, thred->id)); |
1340 | | |
1341 | | /* |
1342 | | * Check the thread state and signal the thread to suspend |
1343 | | */ |
1344 | |
|
1345 | 0 | PR_ASSERT((thred->suspend & PT_THREAD_SUSPENDED) == 0); |
1346 | |
|
1347 | 0 | PR_LOG(_pr_gc_lm, PR_LOG_ALWAYS, |
1348 | 0 | ("doing pthread_kill in pt_SuspendSet thred %p tid = %X\n", thred, |
1349 | 0 | thred->id)); |
1350 | 0 | rv = pthread_kill(thred->id, SIGUSR2); |
1351 | 0 | PR_ASSERT(0 == rv); |
1352 | 0 | } |
1353 | | |
1354 | 0 | static void pt_SuspendTest(PRThread* thred) { |
1355 | 0 | PR_LOG(_pr_gc_lm, PR_LOG_ALWAYS, |
1356 | 0 | ("Begin pt_SuspendTest thred %p thread id = %X\n", thred, thred->id)); |
1357 | | |
1358 | | /* |
1359 | | * Wait for the thread to be really suspended. This happens when the |
1360 | | * suspend signal handler stores the stack pointer and sets the state |
1361 | | * to suspended. |
1362 | | */ |
1363 | |
|
1364 | 0 | # if defined(PT_NO_SIGTIMEDWAIT) |
1365 | 0 | pthread_mutex_lock(&thred->suspendResumeMutex); |
1366 | 0 | while ((thred->suspend & PT_THREAD_SUSPENDED) == 0) { |
1367 | 0 | pthread_cond_timedwait(&thred->suspendResumeCV, &thred->suspendResumeMutex, |
1368 | 0 | &onemillisec); |
1369 | 0 | } |
1370 | 0 | pthread_mutex_unlock(&thred->suspendResumeMutex); |
1371 | | # else |
1372 | | while ((thred->suspend & PT_THREAD_SUSPENDED) == 0) { |
1373 | | PRIntn rv = sigtimedwait(&sigwait_set, NULL, &onemillisec); |
1374 | | PR_ASSERT(-1 == rv); |
1375 | | } |
1376 | | # endif |
1377 | |
|
1378 | 0 | PR_LOG(_pr_gc_lm, PR_LOG_ALWAYS, |
1379 | 0 | ("End pt_SuspendTest thred %p tid %X\n", thred, thred->id)); |
1380 | 0 | } /* pt_SuspendTest */ |
1381 | | |
1382 | 0 | static void pt_ResumeSet(PRThread* thred) { |
1383 | 0 | PR_LOG(_pr_gc_lm, PR_LOG_ALWAYS, |
1384 | 0 | ("pt_ResumeSet thred %p thread id = %X\n", thred, thred->id)); |
1385 | | |
1386 | | /* |
1387 | | * Clear the global state and set the thread state so that it will |
1388 | | * continue past yield loop in the suspend signal handler |
1389 | | */ |
1390 | |
|
1391 | 0 | PR_ASSERT(thred->suspend & PT_THREAD_SUSPENDED); |
1392 | |
|
1393 | 0 | thred->suspend &= ~PT_THREAD_SUSPENDED; |
1394 | |
|
1395 | 0 | # if defined(PT_NO_SIGTIMEDWAIT) |
1396 | 0 | pthread_kill(thred->id, SIGUSR1); |
1397 | 0 | # endif |
1398 | |
|
1399 | 0 | } /* pt_ResumeSet */ |
1400 | | |
1401 | 0 | static void pt_ResumeTest(PRThread* thred) { |
1402 | 0 | PR_LOG(_pr_gc_lm, PR_LOG_ALWAYS, |
1403 | 0 | ("Begin pt_ResumeTest thred %p thread id = %X\n", thred, thred->id)); |
1404 | | |
1405 | | /* |
1406 | | * Wait for the threads resume state to change |
1407 | | * to indicate it is really resumed |
1408 | | */ |
1409 | 0 | # if defined(PT_NO_SIGTIMEDWAIT) |
1410 | 0 | pthread_mutex_lock(&thred->suspendResumeMutex); |
1411 | 0 | while ((thred->suspend & PT_THREAD_RESUMED) == 0) { |
1412 | 0 | pthread_cond_timedwait(&thred->suspendResumeCV, &thred->suspendResumeMutex, |
1413 | 0 | &onemillisec); |
1414 | 0 | } |
1415 | 0 | pthread_mutex_unlock(&thred->suspendResumeMutex); |
1416 | | # else |
1417 | | while ((thred->suspend & PT_THREAD_RESUMED) == 0) { |
1418 | | PRIntn rv = sigtimedwait(&sigwait_set, NULL, &onemillisec); |
1419 | | PR_ASSERT(-1 == rv); |
1420 | | } |
1421 | | # endif |
1422 | |
|
1423 | 0 | thred->suspend &= ~PT_THREAD_RESUMED; |
1424 | |
|
1425 | 0 | PR_LOG(_pr_gc_lm, PR_LOG_ALWAYS, |
1426 | 0 | ("End pt_ResumeTest thred %p tid %X\n", thred, thred->id)); |
1427 | 0 | } /* pt_ResumeTest */ |
1428 | | |
1429 | | static pthread_once_t pt_gc_support_control = PTHREAD_ONCE_INIT; |
1430 | | |
1431 | 0 | PR_IMPLEMENT(void) PR_SuspendAll(void) { |
1432 | 0 | # ifdef DEBUG |
1433 | 0 | PRIntervalTime stime, etime; |
1434 | 0 | # endif |
1435 | 0 | PRThread* thred = pt_book.first; |
1436 | 0 | PRThread* me = PR_GetCurrentThread(); |
1437 | 0 | int rv; |
1438 | |
|
1439 | 0 | rv = pthread_once(&pt_gc_support_control, init_pthread_gc_support); |
1440 | 0 | PR_ASSERT(0 == rv); |
1441 | 0 | PR_LOG(_pr_gc_lm, PR_LOG_ALWAYS, ("Begin PR_SuspendAll\n")); |
1442 | | /* |
1443 | | * Stop all threads which are marked GC able. |
1444 | | */ |
1445 | 0 | PR_Lock(pt_book.ml); |
1446 | 0 | # ifdef DEBUG |
1447 | 0 | suspendAllOn = PR_TRUE; |
1448 | 0 | stime = PR_IntervalNow(); |
1449 | 0 | # endif |
1450 | 0 | while (thred != NULL) { |
1451 | 0 | if ((thred != me) && _PT_IS_GCABLE_THREAD(thred)) { |
1452 | 0 | pt_SuspendSet(thred); |
1453 | 0 | } |
1454 | 0 | thred = thred->next; |
1455 | 0 | } |
1456 | | |
1457 | | /* Wait till they are really suspended */ |
1458 | 0 | thred = pt_book.first; |
1459 | 0 | while (thred != NULL) { |
1460 | 0 | if ((thred != me) && _PT_IS_GCABLE_THREAD(thred)) { |
1461 | 0 | pt_SuspendTest(thred); |
1462 | 0 | } |
1463 | 0 | thred = thred->next; |
1464 | 0 | } |
1465 | |
|
1466 | 0 | suspendAllSuspended = PR_TRUE; |
1467 | |
|
1468 | 0 | # ifdef DEBUG |
1469 | 0 | etime = PR_IntervalNow(); |
1470 | 0 | PR_LOG(_pr_gc_lm, PR_LOG_ALWAYS, |
1471 | 0 | ("End PR_SuspendAll (time %dms)\n", |
1472 | 0 | PR_IntervalToMilliseconds(etime - stime))); |
1473 | 0 | # endif |
1474 | 0 | } /* PR_SuspendAll */ |
1475 | | |
1476 | 0 | PR_IMPLEMENT(void) PR_ResumeAll(void) { |
1477 | 0 | # ifdef DEBUG |
1478 | 0 | PRIntervalTime stime, etime; |
1479 | 0 | # endif |
1480 | 0 | PRThread* thred = pt_book.first; |
1481 | 0 | PRThread* me = PR_GetCurrentThread(); |
1482 | 0 | PR_LOG(_pr_gc_lm, PR_LOG_ALWAYS, ("Begin PR_ResumeAll\n")); |
1483 | | /* |
1484 | | * Resume all previously suspended GC able threads. |
1485 | | */ |
1486 | 0 | suspendAllSuspended = PR_FALSE; |
1487 | 0 | # ifdef DEBUG |
1488 | 0 | stime = PR_IntervalNow(); |
1489 | 0 | # endif |
1490 | |
|
1491 | 0 | while (thred != NULL) { |
1492 | 0 | if ((thred != me) && _PT_IS_GCABLE_THREAD(thred)) { |
1493 | 0 | pt_ResumeSet(thred); |
1494 | 0 | } |
1495 | 0 | thred = thred->next; |
1496 | 0 | } |
1497 | |
|
1498 | 0 | thred = pt_book.first; |
1499 | 0 | while (thred != NULL) { |
1500 | 0 | if ((thred != me) && _PT_IS_GCABLE_THREAD(thred)) { |
1501 | 0 | pt_ResumeTest(thred); |
1502 | 0 | } |
1503 | 0 | thred = thred->next; |
1504 | 0 | } |
1505 | |
|
1506 | 0 | PR_Unlock(pt_book.ml); |
1507 | 0 | # ifdef DEBUG |
1508 | 0 | suspendAllOn = PR_FALSE; |
1509 | 0 | etime = PR_IntervalNow(); |
1510 | 0 | PR_LOG(_pr_gc_lm, PR_LOG_ALWAYS, |
1511 | 0 | ("End PR_ResumeAll (time %dms)\n", |
1512 | 0 | PR_IntervalToMilliseconds(etime - stime))); |
1513 | 0 | # endif |
1514 | 0 | } /* PR_ResumeAll */ |
1515 | | |
1516 | | /* Return the stack pointer for the given thread- used by the GC */ |
1517 | 0 | PR_IMPLEMENT(void*) PR_GetSP(PRThread* thred) { |
1518 | 0 | PR_LOG(_pr_gc_lm, PR_LOG_ALWAYS, |
1519 | 0 | ("in PR_GetSP thred %p thid = %X, sp = %p\n", thred, thred->id, |
1520 | 0 | thred->sp)); |
1521 | 0 | return thred->sp; |
1522 | 0 | } /* PR_GetSP */ |
1523 | | |
1524 | 0 | PR_IMPLEMENT(PRStatus) PR_SetCurrentThreadName(const char* name) { |
1525 | 0 | PRThread* thread; |
1526 | 0 | size_t nameLen; |
1527 | 0 | int result = 0; |
1528 | |
|
1529 | 0 | if (!name) { |
1530 | 0 | PR_SetError(PR_INVALID_ARGUMENT_ERROR, 0); |
1531 | 0 | return PR_FAILURE; |
1532 | 0 | } |
1533 | | |
1534 | 0 | thread = PR_GetCurrentThread(); |
1535 | 0 | if (!thread) { |
1536 | 0 | return PR_FAILURE; |
1537 | 0 | } |
1538 | | |
1539 | 0 | PR_Free(thread->name); |
1540 | 0 | nameLen = strlen(name); |
1541 | 0 | thread->name = (char*)PR_Malloc(nameLen + 1); |
1542 | 0 | if (!thread->name) { |
1543 | 0 | return PR_FAILURE; |
1544 | 0 | } |
1545 | 0 | memcpy(thread->name, name, nameLen + 1); |
1546 | |
|
1547 | | # if defined(OPENBSD) || defined(FREEBSD) || defined(DRAGONFLY) |
1548 | | pthread_set_name_np(thread->id, name); |
1549 | | # elif defined(ANDROID) |
1550 | | prctl(PR_SET_NAME, (unsigned long)(name)); |
1551 | | # elif defined(NETBSD) |
1552 | | result = pthread_setname_np(thread->id, "%s", (void*)name); |
1553 | | # else /* not BSD */ |
1554 | | /* |
1555 | | * On OSX, pthread_setname_np is only available in 10.6 or later, so test |
1556 | | * for it at runtime. It also may not be available on all linux distros. |
1557 | | */ |
1558 | | # if defined(DARWIN) |
1559 | | int (*dynamic_pthread_setname_np)(const char*); |
1560 | | # else |
1561 | 0 | int (*dynamic_pthread_setname_np)(pthread_t, const char*); |
1562 | 0 | # endif |
1563 | |
|
1564 | 0 | *(void**)(&dynamic_pthread_setname_np) = |
1565 | 0 | dlsym(RTLD_DEFAULT, "pthread_setname_np"); |
1566 | 0 | if (!dynamic_pthread_setname_np) { |
1567 | 0 | return PR_SUCCESS; |
1568 | 0 | } |
1569 | | |
1570 | | # if defined(DARWIN) |
1571 | | /* Mac OS X has a length limit of 63 characters, but there is no API |
1572 | | * exposing it. |
1573 | | */ |
1574 | | # define SETNAME_LENGTH_CONSTRAINT 63 |
1575 | | # else |
1576 | | /* |
1577 | | * The 15-character name length limit is an experimentally determined |
1578 | | * length of a null-terminated string that most linux distros accept |
1579 | | * as an argument to pthread_setname_np. Otherwise the E2BIG |
1580 | | * error is returned by the function. |
1581 | | */ |
1582 | 0 | # define SETNAME_LENGTH_CONSTRAINT 15 |
1583 | 0 | # endif |
1584 | 0 | # define SETNAME_FRAGMENT1_LENGTH (SETNAME_LENGTH_CONSTRAINT >> 1) |
1585 | 0 | # define SETNAME_FRAGMENT2_LENGTH \ |
1586 | 0 | (SETNAME_LENGTH_CONSTRAINT - SETNAME_FRAGMENT1_LENGTH - 1) |
1587 | 0 | char name_dup[SETNAME_LENGTH_CONSTRAINT + 1]; |
1588 | 0 | if (nameLen > SETNAME_LENGTH_CONSTRAINT) { |
1589 | 0 | memcpy(name_dup, name, SETNAME_FRAGMENT1_LENGTH); |
1590 | 0 | name_dup[SETNAME_FRAGMENT1_LENGTH] = '~'; |
1591 | | /* Note that this also copies the null terminator. */ |
1592 | 0 | memcpy(name_dup + SETNAME_FRAGMENT1_LENGTH + 1, |
1593 | 0 | name + nameLen - SETNAME_FRAGMENT2_LENGTH, |
1594 | 0 | SETNAME_FRAGMENT2_LENGTH + 1); |
1595 | 0 | name = name_dup; |
1596 | 0 | } |
1597 | |
|
1598 | | # if defined(DARWIN) |
1599 | | result = dynamic_pthread_setname_np(name); |
1600 | | # else |
1601 | 0 | result = dynamic_pthread_setname_np(thread->id, name); |
1602 | 0 | # endif |
1603 | 0 | # endif /* not BSD */ |
1604 | |
|
1605 | 0 | if (result) { |
1606 | 0 | PR_SetError(PR_UNKNOWN_ERROR, result); |
1607 | 0 | return PR_FAILURE; |
1608 | 0 | } |
1609 | 0 | return PR_SUCCESS; |
1610 | 0 | } |
1611 | | |
1612 | 0 | PR_IMPLEMENT(const char*) PR_GetThreadName(const PRThread* thread) { |
1613 | 0 | if (!thread) { |
1614 | 0 | return NULL; |
1615 | 0 | } |
1616 | 0 | return thread->name; |
1617 | 0 | } |
1618 | | |
1619 | | #endif /* defined(_PR_PTHREADS) */ |
1620 | | |
1621 | | /* ptthread.c */ |