/src/nspr/pr/src/threads/prcmon.c
Line | Count | Source (jump to first uncovered line) |
1 | | /* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ |
2 | | /* This Source Code Form is subject to the terms of the Mozilla Public |
3 | | * License, v. 2.0. If a copy of the MPL was not distributed with this |
4 | | * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ |
5 | | |
6 | | #include "primpl.h" |
7 | | |
8 | | #include <stdlib.h> |
9 | | #include <stddef.h> |
10 | | |
11 | | /* Lock used to lock the monitor cache */ |
12 | | #ifdef _PR_NO_PREEMPT |
13 | | # define _PR_NEW_LOCK_MCACHE() |
14 | | # define _PR_DESTROY_LOCK_MCACHE() |
15 | | # define _PR_LOCK_MCACHE() |
16 | | # define _PR_UNLOCK_MCACHE() |
17 | | #else |
18 | | # ifdef _PR_LOCAL_THREADS_ONLY |
19 | | # define _PR_NEW_LOCK_MCACHE() |
20 | | # define _PR_DESTROY_LOCK_MCACHE() |
21 | | # define _PR_LOCK_MCACHE() \ |
22 | | { \ |
23 | | PRIntn _is; \ |
24 | | _PR_INTSOFF(_is) |
25 | | # define _PR_UNLOCK_MCACHE() \ |
26 | | _PR_INTSON(_is); \ |
27 | | } |
28 | | # else |
29 | | PRLock* _pr_mcacheLock; |
30 | 14 | # define _PR_NEW_LOCK_MCACHE() (_pr_mcacheLock = PR_NewLock()) |
31 | | # define _PR_DESTROY_LOCK_MCACHE() \ |
32 | 0 | PR_BEGIN_MACRO \ |
33 | 0 | if (_pr_mcacheLock) { \ |
34 | 0 | PR_DestroyLock(_pr_mcacheLock); \ |
35 | 0 | _pr_mcacheLock = NULL; \ |
36 | 0 | } \ |
37 | 0 | PR_END_MACRO |
38 | 0 | # define _PR_LOCK_MCACHE() PR_Lock(_pr_mcacheLock) |
39 | 0 | # define _PR_UNLOCK_MCACHE() PR_Unlock(_pr_mcacheLock) |
40 | | # endif |
41 | | #endif |
42 | | |
43 | | /************************************************************************/ |
44 | | |
45 | | typedef struct MonitorCacheEntryStr MonitorCacheEntry; |
46 | | |
47 | | struct MonitorCacheEntryStr { |
48 | | MonitorCacheEntry* next; |
49 | | void* address; |
50 | | PRMonitor* mon; |
51 | | long cacheEntryCount; |
52 | | }; |
53 | | |
54 | | /* |
55 | | ** An array of MonitorCacheEntry's, plus a pointer to link these |
56 | | ** arrays together. |
57 | | */ |
58 | | |
59 | | typedef struct MonitorCacheEntryBlockStr MonitorCacheEntryBlock; |
60 | | |
61 | | struct MonitorCacheEntryBlockStr { |
62 | | MonitorCacheEntryBlock* next; |
63 | | MonitorCacheEntry entries[1]; |
64 | | }; |
65 | | |
66 | | static PRUint32 hash_mask; |
67 | | static PRUintn num_hash_buckets; |
68 | | static PRUintn num_hash_buckets_log2; |
69 | | static MonitorCacheEntry** hash_buckets; |
70 | | static MonitorCacheEntry* free_entries; |
71 | | static PRUintn num_free_entries; |
72 | | static PRBool expanding; |
73 | | static MonitorCacheEntryBlock* mcache_blocks; |
74 | | |
75 | | static void (*OnMonitorRecycle)(void* address); |
76 | | |
77 | | #define HASH(address) \ |
78 | 0 | ((PRUint32)(((PRUptrdiff)(address) >> 2) ^ ((PRUptrdiff)(address) >> 10)) & \ |
79 | 0 | hash_mask) |
80 | | |
81 | | /* |
82 | | ** Expand the monitor cache. This grows the hash buckets and allocates a |
83 | | ** new chunk of cache entries and throws them on the free list. We keep |
84 | | ** as many hash buckets as there are entries. |
85 | | ** |
86 | | ** Because we call malloc and malloc may need the monitor cache, we must |
87 | | ** ensure that there are several free monitor cache entries available for |
88 | | ** malloc to get. FREE_THRESHOLD is used to prevent monitor cache |
89 | | ** starvation during monitor cache expansion. |
90 | | */ |
91 | | |
92 | 0 | #define FREE_THRESHOLD 5 |
93 | | |
94 | 14 | static PRStatus ExpandMonitorCache(PRUintn new_size_log2) { |
95 | 14 | MonitorCacheEntry **old_hash_buckets, *p; |
96 | 14 | PRUintn i, entries, old_num_hash_buckets, added; |
97 | 14 | MonitorCacheEntry** new_hash_buckets; |
98 | 14 | MonitorCacheEntryBlock* new_block; |
99 | | |
100 | 14 | entries = 1L << new_size_log2; |
101 | | |
102 | | /* |
103 | | ** Expand the monitor-cache-entry free list |
104 | | */ |
105 | 14 | new_block = (MonitorCacheEntryBlock*)PR_CALLOC( |
106 | 14 | sizeof(MonitorCacheEntryBlock) + |
107 | 14 | (entries - 1) * sizeof(MonitorCacheEntry)); |
108 | 14 | if (NULL == new_block) { |
109 | 0 | return PR_FAILURE; |
110 | 0 | } |
111 | | |
112 | | /* |
113 | | ** Allocate system monitors for the new monitor cache entries. If we |
114 | | ** run out of system monitors, break out of the loop. |
115 | | */ |
116 | 126 | for (i = 0, p = new_block->entries; i < entries; i++, p++) { |
117 | 112 | p->mon = PR_NewMonitor(); |
118 | 112 | if (!p->mon) { |
119 | 0 | break; |
120 | 0 | } |
121 | 112 | } |
122 | 14 | added = i; |
123 | 14 | if (added != entries) { |
124 | 0 | MonitorCacheEntryBlock* realloc_block; |
125 | |
|
126 | 0 | if (added == 0) { |
127 | | /* Totally out of system monitors. Lossage abounds */ |
128 | 0 | PR_DELETE(new_block); |
129 | 0 | return PR_FAILURE; |
130 | 0 | } |
131 | | |
132 | | /* |
133 | | ** We were able to allocate some of the system monitors. Use |
134 | | ** realloc to shrink down the new_block memory. If that fails, |
135 | | ** carry on with the too-large new_block. |
136 | | */ |
137 | 0 | realloc_block = (MonitorCacheEntryBlock*)PR_REALLOC( |
138 | 0 | new_block, sizeof(MonitorCacheEntryBlock) + |
139 | 0 | (added - 1) * sizeof(MonitorCacheEntry)); |
140 | 0 | if (realloc_block) { |
141 | 0 | new_block = realloc_block; |
142 | 0 | } |
143 | 0 | } |
144 | | |
145 | | /* |
146 | | ** Now that we have allocated all of the system monitors, build up |
147 | | ** the new free list. We can just update the free_list because we own |
148 | | ** the mcache-lock and we aren't calling anyone who might want to use |
149 | | ** it. |
150 | | */ |
151 | 112 | for (i = 0, p = new_block->entries; i < added - 1; i++, p++) { |
152 | 98 | p->next = p + 1; |
153 | 98 | } |
154 | 14 | p->next = free_entries; |
155 | 14 | free_entries = new_block->entries; |
156 | 14 | num_free_entries += added; |
157 | 14 | new_block->next = mcache_blocks; |
158 | 14 | mcache_blocks = new_block; |
159 | | |
160 | | /* Try to expand the hash table */ |
161 | 14 | new_hash_buckets = |
162 | 14 | (MonitorCacheEntry**)PR_CALLOC(entries * sizeof(MonitorCacheEntry*)); |
163 | 14 | if (NULL == new_hash_buckets) { |
164 | | /* |
165 | | ** Partial lossage. In this situation we don't get any more hash |
166 | | ** buckets, which just means that the table lookups will take |
167 | | ** longer. This is bad, but not fatal |
168 | | */ |
169 | 0 | PR_LOG(_pr_cmon_lm, PR_LOG_WARNING, |
170 | 0 | ("unable to grow monitor cache hash buckets")); |
171 | 0 | return PR_SUCCESS; |
172 | 0 | } |
173 | | |
174 | | /* |
175 | | ** Compute new hash mask value. This value is used to mask an address |
176 | | ** until it's bits are in the right spot for indexing into the hash |
177 | | ** table. |
178 | | */ |
179 | 14 | hash_mask = entries - 1; |
180 | | |
181 | | /* |
182 | | ** Expand the hash table. We have to rehash everything in the old |
183 | | ** table into the new table. |
184 | | */ |
185 | 14 | old_hash_buckets = hash_buckets; |
186 | 14 | old_num_hash_buckets = num_hash_buckets; |
187 | 14 | for (i = 0; i < old_num_hash_buckets; i++) { |
188 | 0 | p = old_hash_buckets[i]; |
189 | 0 | while (p) { |
190 | 0 | MonitorCacheEntry* next = p->next; |
191 | | |
192 | | /* Hash based on new table size, and then put p in the new table */ |
193 | 0 | PRUintn hash = HASH(p->address); |
194 | 0 | p->next = new_hash_buckets[hash]; |
195 | 0 | new_hash_buckets[hash] = p; |
196 | |
|
197 | 0 | p = next; |
198 | 0 | } |
199 | 0 | } |
200 | | |
201 | | /* |
202 | | ** Switch over to new hash table and THEN call free of the old |
203 | | ** table. Since free might re-enter _pr_mcache_lock, things would |
204 | | ** break terribly if it used the old hash table. |
205 | | */ |
206 | 14 | hash_buckets = new_hash_buckets; |
207 | 14 | num_hash_buckets = entries; |
208 | 14 | num_hash_buckets_log2 = new_size_log2; |
209 | 14 | PR_DELETE(old_hash_buckets); |
210 | | |
211 | 14 | PR_LOG( |
212 | 14 | _pr_cmon_lm, PR_LOG_NOTICE, |
213 | 14 | ("expanded monitor cache to %d (buckets %d)", num_free_entries, entries)); |
214 | | |
215 | 14 | return PR_SUCCESS; |
216 | 14 | } /* ExpandMonitorCache */ |
217 | | |
218 | | /* |
219 | | ** Lookup a monitor cache entry by address. Return a pointer to the |
220 | | ** pointer to the monitor cache entry on success, null on failure. |
221 | | */ |
222 | 0 | static MonitorCacheEntry** LookupMonitorCacheEntry(void* address) { |
223 | 0 | PRUintn hash; |
224 | 0 | MonitorCacheEntry **pp, *p; |
225 | |
|
226 | 0 | hash = HASH(address); |
227 | 0 | pp = hash_buckets + hash; |
228 | 0 | while ((p = *pp) != 0) { |
229 | 0 | if (p->address == address) { |
230 | 0 | if (p->cacheEntryCount > 0) { |
231 | 0 | return pp; |
232 | 0 | } |
233 | 0 | return NULL; |
234 | 0 | } |
235 | 0 | pp = &p->next; |
236 | 0 | } |
237 | 0 | return NULL; |
238 | 0 | } |
239 | | |
240 | | /* |
241 | | ** Try to create a new cached monitor. If it's already in the cache, |
242 | | ** great - return it. Otherwise get a new free cache entry and set it |
243 | | ** up. If the cache free space is getting low, expand the cache. |
244 | | */ |
245 | 0 | static PRMonitor* CreateMonitor(void* address) { |
246 | 0 | PRUintn hash; |
247 | 0 | MonitorCacheEntry **pp, *p; |
248 | |
|
249 | 0 | hash = HASH(address); |
250 | 0 | pp = hash_buckets + hash; |
251 | 0 | while ((p = *pp) != 0) { |
252 | 0 | if (p->address == address) { |
253 | 0 | goto gotit; |
254 | 0 | } |
255 | | |
256 | 0 | pp = &p->next; |
257 | 0 | } |
258 | | |
259 | | /* Expand the monitor cache if we have run out of free slots in the table */ |
260 | 0 | if (num_free_entries < FREE_THRESHOLD) { |
261 | | /* Expand monitor cache */ |
262 | | |
263 | | /* |
264 | | ** This function is called with the lock held. So what's the 'expanding' |
265 | | ** boolean all about? Seems a bit redundant. |
266 | | */ |
267 | 0 | if (!expanding) { |
268 | 0 | PRStatus rv; |
269 | |
|
270 | 0 | expanding = PR_TRUE; |
271 | 0 | rv = ExpandMonitorCache(num_hash_buckets_log2 + 1); |
272 | 0 | expanding = PR_FALSE; |
273 | 0 | if (PR_FAILURE == rv) { |
274 | 0 | return NULL; |
275 | 0 | } |
276 | | |
277 | | /* redo the hash because it'll be different now */ |
278 | 0 | hash = HASH(address); |
279 | 0 | } else { |
280 | | /* |
281 | | ** We are in process of expanding and we need a cache |
282 | | ** monitor. Make sure we have enough! |
283 | | */ |
284 | 0 | PR_ASSERT(num_free_entries > 0); |
285 | 0 | } |
286 | 0 | } |
287 | | |
288 | | /* Make a new monitor */ |
289 | 0 | p = free_entries; |
290 | 0 | free_entries = p->next; |
291 | 0 | num_free_entries--; |
292 | 0 | if (OnMonitorRecycle && p->address) { |
293 | 0 | OnMonitorRecycle(p->address); |
294 | 0 | } |
295 | 0 | p->address = address; |
296 | 0 | p->next = hash_buckets[hash]; |
297 | 0 | hash_buckets[hash] = p; |
298 | 0 | PR_ASSERT(p->cacheEntryCount == 0); |
299 | |
|
300 | 0 | gotit: |
301 | 0 | p->cacheEntryCount++; |
302 | 0 | return p->mon; |
303 | 0 | } |
304 | | |
305 | | /* |
306 | | ** Initialize the monitor cache |
307 | | */ |
308 | 14 | void _PR_InitCMon(void) { |
309 | 14 | _PR_NEW_LOCK_MCACHE(); |
310 | 14 | ExpandMonitorCache(3); |
311 | 14 | } |
312 | | |
313 | | /* |
314 | | ** Destroy the monitor cache |
315 | | */ |
316 | 0 | void _PR_CleanupCMon(void) { |
317 | 0 | _PR_DESTROY_LOCK_MCACHE(); |
318 | |
|
319 | 0 | while (free_entries) { |
320 | 0 | PR_DestroyMonitor(free_entries->mon); |
321 | 0 | free_entries = free_entries->next; |
322 | 0 | } |
323 | 0 | num_free_entries = 0; |
324 | |
|
325 | 0 | while (mcache_blocks) { |
326 | 0 | MonitorCacheEntryBlock* block; |
327 | |
|
328 | 0 | block = mcache_blocks; |
329 | 0 | mcache_blocks = block->next; |
330 | 0 | PR_DELETE(block); |
331 | 0 | } |
332 | |
|
333 | 0 | PR_DELETE(hash_buckets); |
334 | 0 | hash_mask = 0; |
335 | 0 | num_hash_buckets = 0; |
336 | 0 | num_hash_buckets_log2 = 0; |
337 | |
|
338 | 0 | expanding = PR_FALSE; |
339 | 0 | OnMonitorRecycle = NULL; |
340 | 0 | } |
341 | | |
342 | | /* |
343 | | ** Create monitor for address. Don't enter the monitor while we have the |
344 | | ** mcache locked because we might block! |
345 | | */ |
346 | 0 | PR_IMPLEMENT(PRMonitor*) PR_CEnterMonitor(void* address) { |
347 | 0 | PRMonitor* mon; |
348 | |
|
349 | 0 | if (!_pr_initialized) { |
350 | 0 | _PR_ImplicitInitialization(); |
351 | 0 | } |
352 | |
|
353 | 0 | _PR_LOCK_MCACHE(); |
354 | 0 | mon = CreateMonitor(address); |
355 | 0 | _PR_UNLOCK_MCACHE(); |
356 | |
|
357 | 0 | if (!mon) { |
358 | 0 | return NULL; |
359 | 0 | } |
360 | | |
361 | 0 | PR_EnterMonitor(mon); |
362 | 0 | return mon; |
363 | 0 | } |
364 | | |
365 | 0 | PR_IMPLEMENT(PRStatus) PR_CExitMonitor(void* address) { |
366 | 0 | MonitorCacheEntry **pp, *p; |
367 | 0 | PRStatus status = PR_SUCCESS; |
368 | |
|
369 | 0 | _PR_LOCK_MCACHE(); |
370 | 0 | pp = LookupMonitorCacheEntry(address); |
371 | 0 | if (pp != NULL) { |
372 | 0 | p = *pp; |
373 | 0 | if (--p->cacheEntryCount == 0) { |
374 | | /* |
375 | | ** Nobody is using the system monitor. Put it on the cached free |
376 | | ** list. We are safe from somebody trying to use it because we |
377 | | ** have the mcache locked. |
378 | | */ |
379 | 0 | p->address = 0; /* defensive move */ |
380 | 0 | *pp = p->next; /* unlink from hash_buckets */ |
381 | 0 | p->next = free_entries; /* link into free list */ |
382 | 0 | free_entries = p; |
383 | 0 | num_free_entries++; /* count it as free */ |
384 | 0 | } |
385 | 0 | status = PR_ExitMonitor(p->mon); |
386 | 0 | } else { |
387 | 0 | status = PR_FAILURE; |
388 | 0 | } |
389 | 0 | _PR_UNLOCK_MCACHE(); |
390 | |
|
391 | 0 | return status; |
392 | 0 | } |
393 | | |
394 | 0 | PR_IMPLEMENT(PRStatus) PR_CWait(void* address, PRIntervalTime ticks) { |
395 | 0 | MonitorCacheEntry** pp; |
396 | 0 | PRMonitor* mon; |
397 | |
|
398 | 0 | _PR_LOCK_MCACHE(); |
399 | 0 | pp = LookupMonitorCacheEntry(address); |
400 | 0 | mon = pp ? ((*pp)->mon) : NULL; |
401 | 0 | _PR_UNLOCK_MCACHE(); |
402 | |
|
403 | 0 | if (mon == NULL) { |
404 | 0 | return PR_FAILURE; |
405 | 0 | } |
406 | 0 | return PR_Wait(mon, ticks); |
407 | 0 | } |
408 | | |
409 | 0 | PR_IMPLEMENT(PRStatus) PR_CNotify(void* address) { |
410 | 0 | MonitorCacheEntry** pp; |
411 | 0 | PRMonitor* mon; |
412 | |
|
413 | 0 | _PR_LOCK_MCACHE(); |
414 | 0 | pp = LookupMonitorCacheEntry(address); |
415 | 0 | mon = pp ? ((*pp)->mon) : NULL; |
416 | 0 | _PR_UNLOCK_MCACHE(); |
417 | |
|
418 | 0 | if (mon == NULL) { |
419 | 0 | return PR_FAILURE; |
420 | 0 | } |
421 | 0 | return PR_Notify(mon); |
422 | 0 | } |
423 | | |
424 | 0 | PR_IMPLEMENT(PRStatus) PR_CNotifyAll(void* address) { |
425 | 0 | MonitorCacheEntry** pp; |
426 | 0 | PRMonitor* mon; |
427 | |
|
428 | 0 | _PR_LOCK_MCACHE(); |
429 | 0 | pp = LookupMonitorCacheEntry(address); |
430 | 0 | mon = pp ? ((*pp)->mon) : NULL; |
431 | 0 | _PR_UNLOCK_MCACHE(); |
432 | |
|
433 | 0 | if (mon == NULL) { |
434 | 0 | return PR_FAILURE; |
435 | 0 | } |
436 | 0 | return PR_NotifyAll(mon); |
437 | 0 | } |
438 | | |
439 | | PR_IMPLEMENT(void) |
440 | 0 | PR_CSetOnMonitorRecycle(void (*callback)(void* address)) { |
441 | 0 | OnMonitorRecycle = callback; |
442 | 0 | } |