/src/nss-nspr/nspr/pr/src/malloc/prmem.c
Line | Count | Source (jump to first uncovered line) |
1 | | /* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ |
2 | | /* This Source Code Form is subject to the terms of the Mozilla Public |
3 | | * License, v. 2.0. If a copy of the MPL was not distributed with this |
4 | | * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ |
5 | | |
6 | | /* |
7 | | ** Thread safe versions of malloc, free, realloc, calloc and cfree. |
8 | | */ |
9 | | |
10 | | #include "primpl.h" |
11 | | |
12 | | #ifdef _PR_ZONE_ALLOCATOR |
13 | | |
14 | | /* |
15 | | ** The zone allocator code must use native mutexes and cannot |
16 | | ** use PRLocks because PR_NewLock calls PR_Calloc, resulting |
17 | | ** in cyclic dependency of initialization. |
18 | | */ |
19 | | |
20 | | # include <string.h> |
21 | | |
22 | | union memBlkHdrUn; |
23 | | |
24 | | typedef struct MemoryZoneStr { |
25 | | union memBlkHdrUn* head; /* free list */ |
26 | | pthread_mutex_t lock; |
27 | | size_t blockSize; /* size of blocks on this free list */ |
28 | | PRUint32 locked; /* current state of lock */ |
29 | | PRUint32 contention; /* counter: had to wait for lock */ |
30 | | PRUint32 hits; /* allocated from free list */ |
31 | | PRUint32 misses; /* had to call malloc */ |
32 | | PRUint32 elements; /* on free list */ |
33 | | } MemoryZone; |
34 | | |
35 | | typedef union memBlkHdrUn { |
36 | | unsigned char filler[48]; /* fix the size of this beast */ |
37 | | struct memBlkHdrStr { |
38 | | union memBlkHdrUn* next; |
39 | | MemoryZone* zone; |
40 | | size_t blockSize; |
41 | | size_t requestedSize; |
42 | | PRUint32 magic; |
43 | | } s; |
44 | | } MemBlockHdr; |
45 | | |
46 | 0 | # define MEM_ZONES 7 |
47 | 0 | # define THREAD_POOLS 11 /* prime number for modulus */ |
48 | 0 | # define ZONE_MAGIC 0x0BADC0DE |
49 | | |
50 | | static MemoryZone zones[MEM_ZONES][THREAD_POOLS]; |
51 | | |
52 | | static PRBool use_zone_allocator = PR_FALSE; |
53 | | |
54 | | static void pr_ZoneFree(void* ptr); |
55 | | |
56 | 0 | void _PR_DestroyZones(void) { |
57 | 0 | int i, j; |
58 | |
|
59 | 0 | if (!use_zone_allocator) { |
60 | 0 | return; |
61 | 0 | } |
62 | | |
63 | 0 | for (j = 0; j < THREAD_POOLS; j++) { |
64 | 0 | for (i = 0; i < MEM_ZONES; i++) { |
65 | 0 | MemoryZone* mz = &zones[i][j]; |
66 | 0 | pthread_mutex_destroy(&mz->lock); |
67 | 0 | while (mz->head) { |
68 | 0 | MemBlockHdr* hdr = mz->head; |
69 | 0 | mz->head = hdr->s.next; /* unlink it */ |
70 | 0 | free(hdr); |
71 | 0 | mz->elements--; |
72 | 0 | } |
73 | 0 | } |
74 | 0 | } |
75 | 0 | use_zone_allocator = PR_FALSE; |
76 | 0 | } |
77 | | |
78 | | /* |
79 | | ** pr_FindSymbolInProg |
80 | | ** |
81 | | ** Find the specified data symbol in the program and return |
82 | | ** its address. |
83 | | */ |
84 | | |
85 | | # ifdef HAVE_DLL |
86 | | |
87 | | # if defined(USE_DLFCN) && !defined(NO_DLOPEN_NULL) |
88 | | |
89 | | # include <dlfcn.h> |
90 | | |
91 | 2 | static void* pr_FindSymbolInProg(const char* name) { |
92 | 2 | void* h; |
93 | 2 | void* sym; |
94 | | |
95 | 2 | h = dlopen(0, RTLD_LAZY); |
96 | 2 | if (h == NULL) { |
97 | 0 | return NULL; |
98 | 0 | } |
99 | 2 | sym = dlsym(h, name); |
100 | 2 | (void)dlclose(h); |
101 | 2 | return sym; |
102 | 2 | } |
103 | | |
104 | | # elif defined(USE_HPSHL) |
105 | | |
106 | | # include <dl.h> |
107 | | |
108 | | static void* pr_FindSymbolInProg(const char* name) { |
109 | | shl_t h = NULL; |
110 | | void* sym; |
111 | | |
112 | | if (shl_findsym(&h, name, TYPE_DATA, &sym) == -1) { |
113 | | return NULL; |
114 | | } |
115 | | return sym; |
116 | | } |
117 | | |
118 | | # elif defined(USE_MACH_DYLD) || defined(NO_DLOPEN_NULL) |
119 | | |
120 | | static void* pr_FindSymbolInProg(const char* name) { |
121 | | /* FIXME: not implemented */ |
122 | | return NULL; |
123 | | } |
124 | | |
125 | | # else |
126 | | |
127 | | # error "The zone allocator is not supported on this platform" |
128 | | |
129 | | # endif |
130 | | |
131 | | # else /* !defined(HAVE_DLL) */ |
132 | | |
133 | | static void* pr_FindSymbolInProg(const char* name) { |
134 | | /* can't be implemented */ |
135 | | return NULL; |
136 | | } |
137 | | |
138 | | # endif /* HAVE_DLL */ |
139 | | |
140 | 2 | void _PR_InitZones(void) { |
141 | 2 | int i, j; |
142 | 2 | char* envp; |
143 | 2 | PRBool* sym; |
144 | | |
145 | 2 | if ((sym = (PRBool*)pr_FindSymbolInProg("nspr_use_zone_allocator")) != NULL) { |
146 | 0 | use_zone_allocator = *sym; |
147 | 2 | } else if ((envp = getenv("NSPR_USE_ZONE_ALLOCATOR")) != NULL) { |
148 | 0 | use_zone_allocator = (atoi(envp) == 1); |
149 | 0 | } |
150 | | |
151 | 2 | if (!use_zone_allocator) { |
152 | 2 | return; |
153 | 2 | } |
154 | | |
155 | 0 | for (j = 0; j < THREAD_POOLS; j++) { |
156 | 0 | for (i = 0; i < MEM_ZONES; i++) { |
157 | 0 | MemoryZone* mz = &zones[i][j]; |
158 | 0 | int rv = pthread_mutex_init(&mz->lock, NULL); |
159 | 0 | PR_ASSERT(0 == rv); |
160 | 0 | if (rv != 0) { |
161 | 0 | goto loser; |
162 | 0 | } |
163 | 0 | mz->blockSize = 16 << (2 * i); |
164 | 0 | } |
165 | 0 | } |
166 | 0 | return; |
167 | | |
168 | 0 | loser: |
169 | 0 | _PR_DestroyZones(); |
170 | 0 | return; |
171 | 0 | } |
172 | | |
173 | | PR_IMPLEMENT(void) |
174 | 0 | PR_FPrintZoneStats(PRFileDesc* debug_out) { |
175 | 0 | int i, j; |
176 | |
|
177 | 0 | for (j = 0; j < THREAD_POOLS; j++) { |
178 | 0 | for (i = 0; i < MEM_ZONES; i++) { |
179 | 0 | MemoryZone* mz = &zones[i][j]; |
180 | 0 | MemoryZone zone = *mz; |
181 | 0 | if (zone.elements || zone.misses || zone.hits) { |
182 | 0 | PR_fprintf(debug_out, |
183 | 0 | "pool: %d, zone: %d, size: %d, free: %d, hit: %d, miss: %d, " |
184 | 0 | "contend: %d\n", |
185 | 0 | j, i, zone.blockSize, zone.elements, zone.hits, zone.misses, |
186 | 0 | zone.contention); |
187 | 0 | } |
188 | 0 | } |
189 | 0 | } |
190 | 0 | } |
191 | | |
192 | 0 | static void* pr_ZoneMalloc(PRUint32 size) { |
193 | 0 | void* rv; |
194 | 0 | unsigned int zone; |
195 | 0 | size_t blockSize; |
196 | 0 | MemBlockHdr *mb, *mt; |
197 | 0 | MemoryZone* mz; |
198 | | |
199 | | /* Always allocate a non-zero amount of bytes */ |
200 | 0 | if (size < 1) { |
201 | 0 | size = 1; |
202 | 0 | } |
203 | 0 | for (zone = 0, blockSize = 16; zone < MEM_ZONES; ++zone, blockSize <<= 2) { |
204 | 0 | if (size <= blockSize) { |
205 | 0 | break; |
206 | 0 | } |
207 | 0 | } |
208 | 0 | if (zone < MEM_ZONES) { |
209 | 0 | pthread_t me = pthread_self(); |
210 | 0 | unsigned int pool = (PRUptrdiff)me % THREAD_POOLS; |
211 | 0 | PRUint32 wasLocked; |
212 | 0 | mz = &zones[zone][pool]; |
213 | 0 | wasLocked = mz->locked; |
214 | 0 | pthread_mutex_lock(&mz->lock); |
215 | 0 | mz->locked = 1; |
216 | 0 | if (wasLocked) { |
217 | 0 | mz->contention++; |
218 | 0 | } |
219 | 0 | if (mz->head) { |
220 | 0 | mb = mz->head; |
221 | 0 | PR_ASSERT(mb->s.magic == ZONE_MAGIC); |
222 | 0 | PR_ASSERT(mb->s.zone == mz); |
223 | 0 | PR_ASSERT(mb->s.blockSize == blockSize); |
224 | 0 | PR_ASSERT(mz->blockSize == blockSize); |
225 | |
|
226 | 0 | mt = (MemBlockHdr*)(((char*)(mb + 1)) + blockSize); |
227 | 0 | PR_ASSERT(mt->s.magic == ZONE_MAGIC); |
228 | 0 | PR_ASSERT(mt->s.zone == mz); |
229 | 0 | PR_ASSERT(mt->s.blockSize == blockSize); |
230 | |
|
231 | 0 | mz->hits++; |
232 | 0 | mz->elements--; |
233 | 0 | mz->head = mb->s.next; /* take off free list */ |
234 | 0 | mz->locked = 0; |
235 | 0 | pthread_mutex_unlock(&mz->lock); |
236 | |
|
237 | 0 | mt->s.next = mb->s.next = NULL; |
238 | 0 | mt->s.requestedSize = mb->s.requestedSize = size; |
239 | |
|
240 | 0 | rv = (void*)(mb + 1); |
241 | 0 | return rv; |
242 | 0 | } |
243 | | |
244 | 0 | mz->misses++; |
245 | 0 | mz->locked = 0; |
246 | 0 | pthread_mutex_unlock(&mz->lock); |
247 | |
|
248 | 0 | mb = (MemBlockHdr*)malloc(blockSize + 2 * (sizeof *mb)); |
249 | 0 | if (!mb) { |
250 | 0 | PR_SetError(PR_OUT_OF_MEMORY_ERROR, 0); |
251 | 0 | return NULL; |
252 | 0 | } |
253 | 0 | mb->s.next = NULL; |
254 | 0 | mb->s.zone = mz; |
255 | 0 | mb->s.magic = ZONE_MAGIC; |
256 | 0 | mb->s.blockSize = blockSize; |
257 | 0 | mb->s.requestedSize = size; |
258 | |
|
259 | 0 | mt = (MemBlockHdr*)(((char*)(mb + 1)) + blockSize); |
260 | 0 | memcpy(mt, mb, sizeof *mb); |
261 | |
|
262 | 0 | rv = (void*)(mb + 1); |
263 | 0 | return rv; |
264 | 0 | } |
265 | | |
266 | | /* size was too big. Create a block with no zone */ |
267 | 0 | blockSize = (size & 15) ? size + 16 - (size & 15) : size; |
268 | 0 | mb = (MemBlockHdr*)malloc(blockSize + 2 * (sizeof *mb)); |
269 | 0 | if (!mb) { |
270 | 0 | PR_SetError(PR_OUT_OF_MEMORY_ERROR, 0); |
271 | 0 | return NULL; |
272 | 0 | } |
273 | 0 | mb->s.next = NULL; |
274 | 0 | mb->s.zone = NULL; |
275 | 0 | mb->s.magic = ZONE_MAGIC; |
276 | 0 | mb->s.blockSize = blockSize; |
277 | 0 | mb->s.requestedSize = size; |
278 | |
|
279 | 0 | mt = (MemBlockHdr*)(((char*)(mb + 1)) + blockSize); |
280 | 0 | memcpy(mt, mb, sizeof *mb); |
281 | |
|
282 | 0 | rv = (void*)(mb + 1); |
283 | 0 | return rv; |
284 | 0 | } |
285 | | |
286 | 0 | static void* pr_ZoneCalloc(PRUint32 nelem, PRUint32 elsize) { |
287 | 0 | PRUint32 size = nelem * elsize; |
288 | 0 | void* p = pr_ZoneMalloc(size); |
289 | 0 | if (p) { |
290 | 0 | memset(p, 0, size); |
291 | 0 | } |
292 | 0 | return p; |
293 | 0 | } |
294 | | |
295 | 0 | static void* pr_ZoneRealloc(void* oldptr, PRUint32 bytes) { |
296 | 0 | void* rv; |
297 | 0 | MemBlockHdr* mb; |
298 | 0 | int ours; |
299 | 0 | MemBlockHdr phony; |
300 | |
|
301 | 0 | if (!oldptr) { |
302 | 0 | return pr_ZoneMalloc(bytes); |
303 | 0 | } |
304 | 0 | mb = (MemBlockHdr*)((char*)oldptr - (sizeof *mb)); |
305 | 0 | if (mb->s.magic != ZONE_MAGIC) { |
306 | | /* Maybe this just came from ordinary malloc */ |
307 | 0 | # ifdef DEBUG |
308 | 0 | fprintf(stderr, |
309 | 0 | "Warning: reallocing memory block %p from ordinary malloc\n", |
310 | 0 | oldptr); |
311 | 0 | # endif |
312 | | /* |
313 | | * We are going to realloc oldptr. If realloc succeeds, the |
314 | | * original value of oldptr will point to freed memory. So this |
315 | | * function must not fail after a successfull realloc call. We |
316 | | * must perform any operation that may fail before the realloc |
317 | | * call. |
318 | | */ |
319 | 0 | rv = pr_ZoneMalloc(bytes); /* this may fail */ |
320 | 0 | if (!rv) { |
321 | 0 | return rv; |
322 | 0 | } |
323 | | |
324 | | /* We don't know how big it is. But we can fix that. */ |
325 | 0 | oldptr = realloc(oldptr, bytes); |
326 | | /* |
327 | | * If realloc returns NULL, this function loses the original |
328 | | * value of oldptr. This isn't a leak because the caller of |
329 | | * this function still has the original value of oldptr. |
330 | | */ |
331 | 0 | if (!oldptr) { |
332 | 0 | if (bytes) { |
333 | 0 | PR_SetError(PR_OUT_OF_MEMORY_ERROR, 0); |
334 | 0 | pr_ZoneFree(rv); |
335 | 0 | return oldptr; |
336 | 0 | } |
337 | 0 | } |
338 | 0 | phony.s.requestedSize = bytes; |
339 | 0 | mb = &phony; |
340 | 0 | ours = 0; |
341 | 0 | } else { |
342 | 0 | size_t blockSize = mb->s.blockSize; |
343 | 0 | MemBlockHdr* mt = (MemBlockHdr*)(((char*)(mb + 1)) + blockSize); |
344 | |
|
345 | 0 | PR_ASSERT(mt->s.magic == ZONE_MAGIC); |
346 | 0 | PR_ASSERT(mt->s.zone == mb->s.zone); |
347 | 0 | PR_ASSERT(mt->s.blockSize == blockSize); |
348 | |
|
349 | 0 | if (bytes <= blockSize) { |
350 | | /* The block is already big enough. */ |
351 | 0 | mt->s.requestedSize = mb->s.requestedSize = bytes; |
352 | 0 | return oldptr; |
353 | 0 | } |
354 | 0 | ours = 1; |
355 | 0 | rv = pr_ZoneMalloc(bytes); |
356 | 0 | if (!rv) { |
357 | 0 | return rv; |
358 | 0 | } |
359 | 0 | } |
360 | | |
361 | 0 | if (oldptr && mb->s.requestedSize) { |
362 | 0 | memcpy(rv, oldptr, mb->s.requestedSize); |
363 | 0 | } |
364 | 0 | if (ours) { |
365 | 0 | pr_ZoneFree(oldptr); |
366 | 0 | } else if (oldptr) { |
367 | 0 | free(oldptr); |
368 | 0 | } |
369 | 0 | return rv; |
370 | 0 | } |
371 | | |
372 | 0 | static void pr_ZoneFree(void* ptr) { |
373 | 0 | MemBlockHdr *mb, *mt; |
374 | 0 | MemoryZone* mz; |
375 | 0 | size_t blockSize; |
376 | 0 | PRUint32 wasLocked; |
377 | |
|
378 | 0 | if (!ptr) { |
379 | 0 | return; |
380 | 0 | } |
381 | | |
382 | 0 | mb = (MemBlockHdr*)((char*)ptr - (sizeof *mb)); |
383 | |
|
384 | 0 | if (mb->s.magic != ZONE_MAGIC) { |
385 | | /* maybe this came from ordinary malloc */ |
386 | 0 | # ifdef DEBUG |
387 | 0 | fprintf(stderr, "Warning: freeing memory block %p from ordinary malloc\n", |
388 | 0 | ptr); |
389 | 0 | # endif |
390 | 0 | free(ptr); |
391 | 0 | return; |
392 | 0 | } |
393 | | |
394 | 0 | blockSize = mb->s.blockSize; |
395 | 0 | mz = mb->s.zone; |
396 | 0 | mt = (MemBlockHdr*)(((char*)(mb + 1)) + blockSize); |
397 | 0 | PR_ASSERT(mt->s.magic == ZONE_MAGIC); |
398 | 0 | PR_ASSERT(mt->s.zone == mz); |
399 | 0 | PR_ASSERT(mt->s.blockSize == blockSize); |
400 | 0 | if (!mz) { |
401 | 0 | PR_ASSERT(blockSize > 65536); |
402 | | /* This block was not in any zone. Just free it. */ |
403 | 0 | free(mb); |
404 | 0 | return; |
405 | 0 | } |
406 | 0 | PR_ASSERT(mz->blockSize == blockSize); |
407 | 0 | wasLocked = mz->locked; |
408 | 0 | pthread_mutex_lock(&mz->lock); |
409 | 0 | mz->locked = 1; |
410 | 0 | if (wasLocked) { |
411 | 0 | mz->contention++; |
412 | 0 | } |
413 | 0 | mt->s.next = mb->s.next = mz->head; /* put on head of list */ |
414 | 0 | mz->head = mb; |
415 | 0 | mz->elements++; |
416 | 0 | mz->locked = 0; |
417 | 0 | pthread_mutex_unlock(&mz->lock); |
418 | 0 | } |
419 | | |
420 | 6.55k | PR_IMPLEMENT(void*) PR_Malloc(PRUint32 size) { |
421 | 6.55k | if (!_pr_initialized) { |
422 | 0 | _PR_ImplicitInitialization(); |
423 | 0 | } |
424 | | |
425 | 6.55k | return use_zone_allocator ? pr_ZoneMalloc(size) : malloc(size); |
426 | 6.55k | } |
427 | | |
428 | 6.66k | PR_IMPLEMENT(void*) PR_Calloc(PRUint32 nelem, PRUint32 elsize) { |
429 | 6.66k | if (!_pr_initialized) { |
430 | 0 | _PR_ImplicitInitialization(); |
431 | 0 | } |
432 | | |
433 | 6.66k | return use_zone_allocator ? pr_ZoneCalloc(nelem, elsize) |
434 | 6.66k | : calloc(nelem, elsize); |
435 | 6.66k | } |
436 | | |
437 | 26 | PR_IMPLEMENT(void*) PR_Realloc(void* ptr, PRUint32 size) { |
438 | 26 | if (!_pr_initialized) { |
439 | 0 | _PR_ImplicitInitialization(); |
440 | 0 | } |
441 | | |
442 | 26 | return use_zone_allocator ? pr_ZoneRealloc(ptr, size) : realloc(ptr, size); |
443 | 26 | } |
444 | | |
445 | 11.5k | PR_IMPLEMENT(void) PR_Free(void* ptr) { |
446 | 11.5k | if (use_zone_allocator) { |
447 | 0 | pr_ZoneFree(ptr); |
448 | 11.5k | } else { |
449 | 11.5k | free(ptr); |
450 | 11.5k | } |
451 | 11.5k | } |
452 | | |
453 | | #else /* !defined(_PR_ZONE_ALLOCATOR) */ |
454 | | |
455 | | /* |
456 | | ** The PR_Malloc, PR_Calloc, PR_Realloc, and PR_Free functions simply |
457 | | ** call their libc equivalents now. This may seem redundant, but it |
458 | | ** ensures that we are calling into the same runtime library. On |
459 | | ** Win32, it is possible to have multiple runtime libraries (e.g., |
460 | | ** objects compiled with /MD and /MDd) in the same process, and |
461 | | ** they maintain separate heaps, which cannot be mixed. |
462 | | */ |
463 | | PR_IMPLEMENT(void*) PR_Malloc(PRUint32 size) { return malloc(size); } |
464 | | |
465 | | PR_IMPLEMENT(void*) PR_Calloc(PRUint32 nelem, PRUint32 elsize) { |
466 | | return calloc(nelem, elsize); |
467 | | } |
468 | | |
469 | | PR_IMPLEMENT(void*) PR_Realloc(void* ptr, PRUint32 size) { |
470 | | return realloc(ptr, size); |
471 | | } |
472 | | |
473 | | PR_IMPLEMENT(void) PR_Free(void* ptr) { free(ptr); } |
474 | | |
475 | | #endif /* _PR_ZONE_ALLOCATOR */ |
476 | | |
477 | | /* |
478 | | ** Complexity alert! |
479 | | ** |
480 | | ** If malloc/calloc/free (etc.) were implemented to use pr lock's then |
481 | | ** the entry points could block when called if some other thread had the |
482 | | ** lock. |
483 | | ** |
484 | | ** Most of the time this isn't a problem. However, in the case that we |
485 | | ** are using the thread safe malloc code after PR_Init but before |
486 | | ** PR_AttachThread has been called (on a native thread that nspr has yet |
487 | | ** to be told about) we could get royally screwed if the lock was busy |
488 | | ** and we tried to context switch the thread away. In this scenario |
489 | | ** PR_CURRENT_THREAD() == NULL |
490 | | ** |
491 | | ** To avoid this unfortunate case, we use the low level locking |
492 | | ** facilities for malloc protection instead of the slightly higher level |
493 | | ** locking. This makes malloc somewhat faster so maybe it's a good thing |
494 | | ** anyway. |
495 | | */ |
496 | | #ifdef _PR_OVERRIDE_MALLOC |
497 | | |
498 | | /* Imports */ |
499 | | extern void* _PR_UnlockedMalloc(size_t size); |
500 | | extern void* _PR_UnlockedMemalign(size_t alignment, size_t size); |
501 | | extern void _PR_UnlockedFree(void* ptr); |
502 | | extern void* _PR_UnlockedRealloc(void* ptr, size_t size); |
503 | | extern void* _PR_UnlockedCalloc(size_t n, size_t elsize); |
504 | | |
505 | | static PRBool _PR_malloc_initialised = PR_FALSE; |
506 | | |
507 | | # ifdef _PR_PTHREADS |
508 | | static pthread_mutex_t _PR_MD_malloc_crustylock; |
509 | | |
510 | | # define _PR_Lock_Malloc() \ |
511 | | { \ |
512 | | if (PR_TRUE == _PR_malloc_initialised) { \ |
513 | | PRStatus rv; \ |
514 | | rv = pthread_mutex_lock(&_PR_MD_malloc_crustylock); \ |
515 | | PR_ASSERT(0 == rv); \ |
516 | | } |
517 | | |
518 | | # define _PR_Unlock_Malloc() \ |
519 | | if (PR_TRUE == _PR_malloc_initialised) { \ |
520 | | PRStatus rv; \ |
521 | | rv = pthread_mutex_unlock(&_PR_MD_malloc_crustylock); \ |
522 | | PR_ASSERT(0 == rv); \ |
523 | | } \ |
524 | | } |
525 | | # else /* _PR_PTHREADS */ |
526 | | static _MDLock _PR_MD_malloc_crustylock; |
527 | | |
528 | | # define _PR_Lock_Malloc() \ |
529 | | { \ |
530 | | PRIntn _is; \ |
531 | | if (PR_TRUE == _PR_malloc_initialised) { \ |
532 | | if (_PR_MD_CURRENT_THREAD() && \ |
533 | | !_PR_IS_NATIVE_THREAD(_PR_MD_CURRENT_THREAD())) \ |
534 | | _PR_INTSOFF(_is); \ |
535 | | _PR_MD_LOCK(&_PR_MD_malloc_crustylock); \ |
536 | | } |
537 | | |
538 | | # define _PR_Unlock_Malloc() \ |
539 | | if (PR_TRUE == _PR_malloc_initialised) { \ |
540 | | _PR_MD_UNLOCK(&_PR_MD_malloc_crustylock); \ |
541 | | if (_PR_MD_CURRENT_THREAD() && \ |
542 | | !_PR_IS_NATIVE_THREAD(_PR_MD_CURRENT_THREAD())) \ |
543 | | _PR_INTSON(_is); \ |
544 | | } \ |
545 | | } |
546 | | # endif /* _PR_PTHREADS */ |
547 | | |
548 | | PR_IMPLEMENT(PRStatus) _PR_MallocInit(void) { |
549 | | PRStatus rv = PR_SUCCESS; |
550 | | |
551 | | if (PR_TRUE == _PR_malloc_initialised) { |
552 | | return PR_SUCCESS; |
553 | | } |
554 | | |
555 | | # ifdef _PR_PTHREADS |
556 | | { |
557 | | int status; |
558 | | pthread_mutexattr_t mattr; |
559 | | |
560 | | status = _PT_PTHREAD_MUTEXATTR_INIT(&mattr); |
561 | | PR_ASSERT(0 == status); |
562 | | status = _PT_PTHREAD_MUTEX_INIT(_PR_MD_malloc_crustylock, mattr); |
563 | | PR_ASSERT(0 == status); |
564 | | status = _PT_PTHREAD_MUTEXATTR_DESTROY(&mattr); |
565 | | PR_ASSERT(0 == status); |
566 | | } |
567 | | # else /* _PR_PTHREADS */ |
568 | | _MD_NEW_LOCK(&_PR_MD_malloc_crustylock); |
569 | | # endif /* _PR_PTHREADS */ |
570 | | |
571 | | if (PR_SUCCESS == rv) { |
572 | | _PR_malloc_initialised = PR_TRUE; |
573 | | } |
574 | | |
575 | | return rv; |
576 | | } |
577 | | |
578 | | void* malloc(size_t size) { |
579 | | void* p; |
580 | | _PR_Lock_Malloc(); |
581 | | p = _PR_UnlockedMalloc(size); |
582 | | _PR_Unlock_Malloc(); |
583 | | return p; |
584 | | } |
585 | | |
586 | | void free(void* ptr) { |
587 | | _PR_Lock_Malloc(); |
588 | | _PR_UnlockedFree(ptr); |
589 | | _PR_Unlock_Malloc(); |
590 | | } |
591 | | |
592 | | void* realloc(void* ptr, size_t size) { |
593 | | void* p; |
594 | | _PR_Lock_Malloc(); |
595 | | p = _PR_UnlockedRealloc(ptr, size); |
596 | | _PR_Unlock_Malloc(); |
597 | | return p; |
598 | | } |
599 | | |
600 | | void* calloc(size_t n, size_t elsize) { |
601 | | void* p; |
602 | | _PR_Lock_Malloc(); |
603 | | p = _PR_UnlockedCalloc(n, elsize); |
604 | | _PR_Unlock_Malloc(); |
605 | | return p; |
606 | | } |
607 | | |
608 | | void cfree(void* p) { |
609 | | _PR_Lock_Malloc(); |
610 | | _PR_UnlockedFree(p); |
611 | | _PR_Unlock_Malloc(); |
612 | | } |
613 | | |
614 | | void _PR_InitMem(void) { |
615 | | PRStatus rv; |
616 | | rv = _PR_MallocInit(); |
617 | | PR_ASSERT(PR_SUCCESS == rv); |
618 | | } |
619 | | |
620 | | #endif /* _PR_OVERRIDE_MALLOC */ |