/src/httpd/srclib/apr/memory/unix/apr_pools.c
Line | Count | Source (jump to first uncovered line) |
1 | | /* Licensed to the Apache Software Foundation (ASF) under one or more |
2 | | * contributor license agreements. See the NOTICE file distributed with |
3 | | * this work for additional information regarding copyright ownership. |
4 | | * The ASF licenses this file to You under the Apache License, Version 2.0 |
5 | | * (the "License"); you may not use this file except in compliance with |
6 | | * the License. You may obtain a copy of the License at |
7 | | * |
8 | | * http://www.apache.org/licenses/LICENSE-2.0 |
9 | | * |
10 | | * Unless required by applicable law or agreed to in writing, software |
11 | | * distributed under the License is distributed on an "AS IS" BASIS, |
12 | | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
13 | | * See the License for the specific language governing permissions and |
14 | | * limitations under the License. |
15 | | */ |
16 | | |
17 | | #include "apr.h" |
18 | | #include "apr_private.h" |
19 | | |
20 | | #include "apr_atomic.h" |
21 | | #include "apr_portable.h" /* for get_os_proc */ |
22 | | #include "apr_strings.h" |
23 | | #include "apr_general.h" |
24 | | #include "apr_pools.h" |
25 | | #include "apr_allocator.h" |
26 | | #include "apr_lib.h" |
27 | | #include "apr_thread_mutex.h" |
28 | | #include "apr_hash.h" |
29 | | #include "apr_time.h" |
30 | | #include "apr_support.h" |
31 | | #define APR_WANT_MEMFUNC |
32 | | #include "apr_want.h" |
33 | | #include "apr_env.h" |
34 | | |
35 | | #if APR_HAVE_STDLIB_H |
36 | | #include <stdlib.h> /* for malloc, free and abort */ |
37 | | #endif |
38 | | |
39 | | #if APR_HAVE_UNISTD_H |
40 | | #include <unistd.h> /* for getpid and sysconf */ |
41 | | #endif |
42 | | |
43 | | #if APR_ALLOCATOR_GUARD_PAGES && !APR_ALLOCATOR_USES_MMAP |
44 | | #define APR_ALLOCATOR_USES_MMAP 1 |
45 | | #endif |
46 | | |
47 | | #if APR_ALLOCATOR_USES_MMAP |
48 | | #include <sys/mman.h> |
49 | | #endif |
50 | | |
51 | | #if HAVE_VALGRIND |
52 | | #define REDZONE APR_ALIGN_DEFAULT(8) |
53 | | int apr_running_on_valgrind = 0; |
54 | | #endif |
55 | | |
56 | | #if APR_POOL_CONCURRENCY_CHECK && !APR_HAS_THREADS |
57 | | #error pool-concurrency-check does not make sense without threads |
58 | | #endif |
59 | | |
60 | | |
61 | | /* |
62 | | * Magic numbers |
63 | | */ |
64 | | |
65 | | /* |
66 | | * Recycle up to MAX_INDEX in slots, larger indexes go to the |
67 | | * sink slot at MAX_INDEX, and allocate at least MIN_ALLOC |
68 | | * bytes (2^order boundaries/pages). |
69 | | */ |
70 | 0 | #define MAX_INDEX 20 |
71 | 0 | #define MAX_ORDER 9 |
72 | | static unsigned int min_order = 1; |
73 | 0 | #define MIN_ALLOC (BOUNDARY_SIZE << min_order) |
74 | | |
75 | | /* |
76 | | * Determines the boundary/page size. |
77 | | */ |
78 | | #if defined(_SC_PAGESIZE) || defined(WIN32) |
79 | | static unsigned int boundary_index; |
80 | | static unsigned int boundary_size; |
81 | 0 | #define BOUNDARY_INDEX boundary_index |
82 | 0 | #define BOUNDARY_SIZE boundary_size |
83 | | #else /* Assume 4K pages */ |
84 | | #define BOUNDARY_INDEX 12 |
85 | | #define BOUNDARY_SIZE (1 << BOUNDARY_INDEX) |
86 | | #endif |
87 | | |
88 | | #if APR_ALLOCATOR_GUARD_PAGES |
89 | | #if defined(_SC_PAGESIZE) |
90 | | #define GUARDPAGE_SIZE boundary_size |
91 | | #else |
92 | | #error Cannot determine page size |
93 | | #endif /* _SC_PAGESIZE */ |
94 | | #else |
95 | | #define GUARDPAGE_SIZE 0 |
96 | | #endif /* APR_ALLOCATOR_GUARD_PAGES */ |
97 | | |
98 | | /* |
99 | | * Timing constants for killing subprocesses |
100 | | * There is a total 3-second delay between sending a SIGINT |
101 | | * and sending of the final SIGKILL. |
102 | | * TIMEOUT_INTERVAL should be set to TIMEOUT_USECS / 64 |
103 | | * for the exponetial timeout alogrithm. |
104 | | */ |
105 | 0 | #define TIMEOUT_USECS 3000000 |
106 | 0 | #define TIMEOUT_INTERVAL 46875 |
107 | | |
108 | | /* |
109 | | * Allocator |
110 | | * |
111 | | * @note The max_free_index and current_free_index fields are not really |
112 | | * indices, but quantities of BOUNDARY_SIZE big memory blocks. |
113 | | */ |
114 | | |
115 | | struct apr_allocator_t { |
116 | | /** largest used index into free[], always < MAX_INDEX */ |
117 | | apr_size_t max_index; |
118 | | /** Total size (in BOUNDARY_SIZE multiples) of unused memory before |
119 | | * blocks are given back. @see apr_allocator_max_free_set(). |
120 | | * @note Initialized to APR_ALLOCATOR_MAX_FREE_UNLIMITED, |
121 | | * which means to never give back blocks. |
122 | | */ |
123 | | apr_size_t max_free_index; |
124 | | /** |
125 | | * Memory size (in BOUNDARY_SIZE multiples) that currently must be freed |
126 | | * before blocks are given back. Range: 0..max_free_index |
127 | | */ |
128 | | apr_size_t current_free_index; |
129 | | #if APR_HAS_THREADS |
130 | | apr_thread_mutex_t *mutex; |
131 | | #endif /* APR_HAS_THREADS */ |
132 | | apr_pool_t *owner; |
133 | | /** |
134 | | * Lists of free nodes. Slot MAX_INDEX is used for oversized nodes, |
135 | | * and the slots 0..MAX_INDEX-1 contain nodes of sizes |
136 | | * (i+1) * BOUNDARY_SIZE. Example for BOUNDARY_INDEX == 12: |
137 | | * slot 0: size 4096 |
138 | | * slot 1: size 8192 |
139 | | * slot 2: size 12288 |
140 | | * ... |
141 | | * slot 19: size 81920 |
142 | | * slot 20: nodes larger than 81920 |
143 | | */ |
144 | | apr_memnode_t *free[MAX_INDEX + 1]; |
145 | | }; |
146 | | |
147 | 0 | #define SIZEOF_ALLOCATOR_T APR_ALIGN_DEFAULT(sizeof(apr_allocator_t)) |
148 | | |
149 | | |
150 | | /* |
151 | | * Allocator |
152 | | */ |
153 | | |
154 | | static APR_INLINE |
155 | | void allocator_lock(apr_allocator_t *allocator) |
156 | 0 | { |
157 | 0 | #if APR_HAS_THREADS |
158 | 0 | if (allocator->mutex) |
159 | 0 | apr_thread_mutex_lock(allocator->mutex); |
160 | 0 | #endif /* APR_HAS_THREADS */ |
161 | 0 | } |
162 | | |
163 | | static APR_INLINE |
164 | | void allocator_unlock(apr_allocator_t *allocator) |
165 | 0 | { |
166 | 0 | #if APR_HAS_THREADS |
167 | 0 | if (allocator->mutex) |
168 | 0 | apr_thread_mutex_unlock(allocator->mutex); |
169 | 0 | #endif /* APR_HAS_THREADS */ |
170 | 0 | } |
171 | | |
172 | | APR_DECLARE(apr_status_t) apr_allocator_create(apr_allocator_t **allocator) |
173 | 0 | { |
174 | 0 | apr_allocator_t *new_allocator; |
175 | |
|
176 | 0 | *allocator = NULL; |
177 | |
|
178 | 0 | if ((new_allocator = malloc(SIZEOF_ALLOCATOR_T)) == NULL) |
179 | 0 | return APR_ENOMEM; |
180 | | |
181 | 0 | memset(new_allocator, 0, SIZEOF_ALLOCATOR_T); |
182 | 0 | new_allocator->max_free_index = APR_ALLOCATOR_MAX_FREE_UNLIMITED; |
183 | |
|
184 | 0 | *allocator = new_allocator; |
185 | |
|
186 | 0 | return APR_SUCCESS; |
187 | 0 | } |
188 | | |
189 | | APR_DECLARE(void) apr_allocator_destroy(apr_allocator_t *allocator) |
190 | 0 | { |
191 | 0 | apr_size_t index; |
192 | 0 | apr_memnode_t *node, **ref; |
193 | |
|
194 | 0 | for (index = 0; index <= MAX_INDEX; index++) { |
195 | 0 | ref = &allocator->free[index]; |
196 | 0 | while ((node = *ref) != NULL) { |
197 | 0 | *ref = node->next; |
198 | | #if APR_ALLOCATOR_USES_MMAP |
199 | | munmap((char *)node - GUARDPAGE_SIZE, |
200 | | 2 * GUARDPAGE_SIZE + ((node->index+1) << BOUNDARY_INDEX)); |
201 | | #else |
202 | 0 | free(node); |
203 | 0 | #endif |
204 | 0 | } |
205 | 0 | } |
206 | |
|
207 | 0 | free(allocator); |
208 | 0 | } |
209 | | |
210 | | #if APR_HAS_THREADS |
211 | | APR_DECLARE(void) apr_allocator_mutex_set(apr_allocator_t *allocator, |
212 | | apr_thread_mutex_t *mutex) |
213 | 0 | { |
214 | 0 | allocator->mutex = mutex; |
215 | 0 | } |
216 | | |
217 | | APR_DECLARE(apr_thread_mutex_t *) apr_allocator_mutex_get( |
218 | | apr_allocator_t *allocator) |
219 | 0 | { |
220 | 0 | return allocator->mutex; |
221 | 0 | } |
222 | | #endif /* APR_HAS_THREADS */ |
223 | | |
224 | | APR_DECLARE(void) apr_allocator_owner_set(apr_allocator_t *allocator, |
225 | | apr_pool_t *pool) |
226 | 0 | { |
227 | 0 | allocator->owner = pool; |
228 | 0 | } |
229 | | |
230 | | APR_DECLARE(apr_pool_t *) apr_allocator_owner_get(apr_allocator_t *allocator) |
231 | 0 | { |
232 | 0 | return allocator->owner; |
233 | 0 | } |
234 | | |
235 | | APR_DECLARE(void) apr_allocator_max_free_set(apr_allocator_t *allocator, |
236 | | apr_size_t in_size) |
237 | 0 | { |
238 | 0 | apr_size_t max_free_index; |
239 | 0 | apr_size_t size = in_size; |
240 | |
|
241 | 0 | allocator_lock(allocator); |
242 | |
|
243 | 0 | max_free_index = APR_ALIGN(size, BOUNDARY_SIZE) >> BOUNDARY_INDEX; |
244 | 0 | allocator->current_free_index += max_free_index; |
245 | 0 | allocator->current_free_index -= allocator->max_free_index; |
246 | 0 | allocator->max_free_index = max_free_index; |
247 | 0 | if (allocator->current_free_index > max_free_index) |
248 | 0 | allocator->current_free_index = max_free_index; |
249 | |
|
250 | 0 | allocator_unlock(allocator); |
251 | 0 | } |
252 | | |
253 | | static APR_INLINE |
254 | | apr_size_t allocator_align(apr_size_t in_size) |
255 | 0 | { |
256 | 0 | apr_size_t size = in_size; |
257 | | |
258 | | /* Round up the block size to the next boundary, but always |
259 | | * allocate at least a certain size (MIN_ALLOC). |
260 | | */ |
261 | 0 | size = APR_ALIGN(size + APR_MEMNODE_T_SIZE, BOUNDARY_SIZE); |
262 | 0 | if (size < in_size) { |
263 | 0 | return 0; |
264 | 0 | } |
265 | 0 | if (size < MIN_ALLOC) { |
266 | 0 | size = MIN_ALLOC; |
267 | 0 | } |
268 | |
|
269 | 0 | return size; |
270 | 0 | } |
271 | | |
272 | | APR_DECLARE(apr_size_t) apr_allocator_align(apr_allocator_t *allocator, |
273 | | apr_size_t size) |
274 | 0 | { |
275 | 0 | (void)allocator; |
276 | 0 | return allocator_align(size); |
277 | 0 | } |
278 | | |
279 | | static APR_INLINE |
280 | | apr_memnode_t *allocator_alloc(apr_allocator_t *allocator, apr_size_t in_size) |
281 | 0 | { |
282 | 0 | apr_memnode_t *node, **ref; |
283 | 0 | apr_size_t max_index, upper_index; |
284 | 0 | apr_size_t size, i, index; |
285 | | |
286 | | /* Round up the block size to the next boundary, but always |
287 | | * allocate at least a certain size (MIN_ALLOC). |
288 | | */ |
289 | 0 | size = allocator_align(in_size); |
290 | 0 | if (!size) { |
291 | 0 | return NULL; |
292 | 0 | } |
293 | | |
294 | | /* Find the index for this node size by |
295 | | * dividing its size by the boundary size |
296 | | */ |
297 | 0 | index = (size >> BOUNDARY_INDEX) - 1; |
298 | |
|
299 | 0 | if (index > APR_UINT32_MAX) { |
300 | 0 | return NULL; |
301 | 0 | } |
302 | | |
303 | | /* First see if there are any nodes in the area we know |
304 | | * our node will fit into. |
305 | | */ |
306 | 0 | if (index <= allocator->max_index) { |
307 | 0 | allocator_lock(allocator); |
308 | | |
309 | | /* Walk the free list to see if there are |
310 | | * any nodes on it of the requested size |
311 | | * |
312 | | * If there is no exact match, look for nodes |
313 | | * of up to twice the requested size, so we |
314 | | * won't unnecessarily allocate more memory |
315 | | * nor waste too much of what we have. |
316 | | * |
317 | | * NOTE: an optimization would be to check |
318 | | * allocator->free[index] first and if no |
319 | | * node is present, directly use |
320 | | * allocator->free[max_index]. This seems |
321 | | * like overkill though and could cause |
322 | | * memory waste. |
323 | | */ |
324 | 0 | max_index = allocator->max_index; |
325 | 0 | upper_index = 2 * index < max_index ? 2 * index : max_index; |
326 | 0 | ref = &allocator->free[index]; |
327 | 0 | i = index; |
328 | 0 | while (*ref == NULL && i < upper_index) { |
329 | 0 | ref++; |
330 | 0 | i++; |
331 | 0 | } |
332 | |
|
333 | 0 | if ((node = *ref) != NULL) { |
334 | | /* If we have found a node and it doesn't have any |
335 | | * nodes waiting in line behind it _and_ we are on |
336 | | * the highest available index, find the new highest |
337 | | * available index |
338 | | */ |
339 | 0 | if ((*ref = node->next) == NULL && i >= max_index) { |
340 | 0 | do { |
341 | 0 | ref--; |
342 | 0 | max_index--; |
343 | 0 | } |
344 | 0 | while (*ref == NULL && max_index); |
345 | |
|
346 | 0 | allocator->max_index = max_index; |
347 | 0 | } |
348 | |
|
349 | 0 | allocator->current_free_index += node->index + 1; |
350 | 0 | if (allocator->current_free_index > allocator->max_free_index) |
351 | 0 | allocator->current_free_index = allocator->max_free_index; |
352 | |
|
353 | 0 | allocator_unlock(allocator); |
354 | |
|
355 | 0 | goto have_node; |
356 | 0 | } |
357 | | |
358 | 0 | allocator_unlock(allocator); |
359 | 0 | } |
360 | | |
361 | | /* If we found nothing, seek the sink (at index MAX_INDEX), if |
362 | | * it is not empty. |
363 | | */ |
364 | 0 | else if (allocator->free[MAX_INDEX]) { |
365 | 0 | allocator_lock(allocator); |
366 | | |
367 | | /* Walk the free list to see if there are |
368 | | * any nodes on it of the requested size |
369 | | */ |
370 | 0 | ref = &allocator->free[MAX_INDEX]; |
371 | 0 | while ((node = *ref) != NULL && index > node->index) |
372 | 0 | ref = &node->next; |
373 | |
|
374 | 0 | if (node) { |
375 | 0 | *ref = node->next; |
376 | |
|
377 | 0 | allocator->current_free_index += node->index + 1; |
378 | 0 | if (allocator->current_free_index > allocator->max_free_index) |
379 | 0 | allocator->current_free_index = allocator->max_free_index; |
380 | |
|
381 | 0 | allocator_unlock(allocator); |
382 | |
|
383 | 0 | goto have_node; |
384 | 0 | } |
385 | | |
386 | 0 | allocator_unlock(allocator); |
387 | 0 | } |
388 | | |
389 | | /* If we haven't got a suitable node, malloc a new one |
390 | | * and initialize it. |
391 | | */ |
392 | | #if APR_ALLOCATOR_GUARD_PAGES |
393 | | if ((node = mmap(NULL, size + 2 * GUARDPAGE_SIZE, PROT_NONE, |
394 | | MAP_PRIVATE|MAP_ANON, -1, 0)) == MAP_FAILED) |
395 | | #elif APR_ALLOCATOR_USES_MMAP |
396 | | if ((node = mmap(NULL, size, PROT_READ|PROT_WRITE, |
397 | | MAP_PRIVATE|MAP_ANON, -1, 0)) == MAP_FAILED) |
398 | | #else |
399 | 0 | if ((node = malloc(size)) == NULL) |
400 | 0 | #endif |
401 | 0 | return NULL; |
402 | | |
403 | | #if APR_ALLOCATOR_GUARD_PAGES |
404 | | node = (apr_memnode_t *)((char *)node + GUARDPAGE_SIZE); |
405 | | if (mprotect(node, size, PROT_READ|PROT_WRITE) != 0) { |
406 | | munmap((char *)node - GUARDPAGE_SIZE, size + 2 * GUARDPAGE_SIZE); |
407 | | return NULL; |
408 | | } |
409 | | #endif |
410 | 0 | node->index = (apr_uint32_t)index; |
411 | 0 | node->endp = (char *)node + size; |
412 | |
|
413 | 0 | have_node: |
414 | 0 | node->next = NULL; |
415 | 0 | node->first_avail = (char *)node + APR_MEMNODE_T_SIZE; |
416 | |
|
417 | 0 | APR_VALGRIND_UNDEFINED(node->first_avail, size - APR_MEMNODE_T_SIZE); |
418 | |
|
419 | 0 | return node; |
420 | 0 | } |
421 | | |
422 | | static APR_INLINE |
423 | | void allocator_free(apr_allocator_t *allocator, apr_memnode_t *node) |
424 | 0 | { |
425 | 0 | apr_memnode_t *next, *freelist = NULL; |
426 | 0 | apr_size_t index, max_index; |
427 | 0 | apr_size_t max_free_index, current_free_index; |
428 | |
|
429 | 0 | allocator_lock(allocator); |
430 | |
|
431 | 0 | max_index = allocator->max_index; |
432 | 0 | max_free_index = allocator->max_free_index; |
433 | 0 | current_free_index = allocator->current_free_index; |
434 | | |
435 | | /* Walk the list of submitted nodes and free them one by one, |
436 | | * shoving them in the right 'size' buckets as we go. |
437 | | */ |
438 | 0 | do { |
439 | 0 | next = node->next; |
440 | 0 | index = node->index; |
441 | |
|
442 | 0 | APR_VALGRIND_NOACCESS((char *)node + APR_MEMNODE_T_SIZE, |
443 | 0 | (node->index+1) << BOUNDARY_INDEX); |
444 | |
|
445 | 0 | if (max_free_index != APR_ALLOCATOR_MAX_FREE_UNLIMITED |
446 | 0 | && index + 1 > current_free_index) { |
447 | 0 | node->next = freelist; |
448 | 0 | freelist = node; |
449 | 0 | } |
450 | 0 | else if (index < MAX_INDEX) { |
451 | | /* Add the node to the appropriate 'size' bucket. Adjust |
452 | | * the max_index when appropriate. |
453 | | */ |
454 | 0 | if ((node->next = allocator->free[index]) == NULL |
455 | 0 | && index > max_index) { |
456 | 0 | max_index = index; |
457 | 0 | } |
458 | 0 | allocator->free[index] = node; |
459 | 0 | if (current_free_index >= index + 1) |
460 | 0 | current_free_index -= index + 1; |
461 | 0 | else |
462 | 0 | current_free_index = 0; |
463 | 0 | } |
464 | 0 | else { |
465 | | /* This node is too large to keep in a specific size bucket, |
466 | | * just add it to the sink (at index MAX_INDEX). |
467 | | */ |
468 | 0 | node->next = allocator->free[MAX_INDEX]; |
469 | 0 | allocator->free[MAX_INDEX] = node; |
470 | 0 | if (current_free_index >= index + 1) |
471 | 0 | current_free_index -= index + 1; |
472 | 0 | else |
473 | 0 | current_free_index = 0; |
474 | 0 | } |
475 | 0 | } while ((node = next) != NULL); |
476 | |
|
477 | 0 | allocator->max_index = max_index; |
478 | 0 | allocator->current_free_index = current_free_index; |
479 | |
|
480 | 0 | allocator_unlock(allocator); |
481 | |
|
482 | 0 | while (freelist != NULL) { |
483 | 0 | node = freelist; |
484 | 0 | freelist = node->next; |
485 | | #if APR_ALLOCATOR_USES_MMAP |
486 | | munmap((char *)node - GUARDPAGE_SIZE, |
487 | | 2 * GUARDPAGE_SIZE + ((node->index+1) << BOUNDARY_INDEX)); |
488 | | #else |
489 | 0 | free(node); |
490 | 0 | #endif |
491 | 0 | } |
492 | 0 | } |
493 | | |
494 | | APR_DECLARE(apr_memnode_t *) apr_allocator_alloc(apr_allocator_t *allocator, |
495 | | apr_size_t size) |
496 | 0 | { |
497 | 0 | return allocator_alloc(allocator, size); |
498 | 0 | } |
499 | | |
500 | | APR_DECLARE(void) apr_allocator_free(apr_allocator_t *allocator, |
501 | | apr_memnode_t *node) |
502 | 0 | { |
503 | 0 | allocator_free(allocator, node); |
504 | 0 | } |
505 | | |
506 | | APR_DECLARE(apr_size_t) apr_allocator_page_size(void) |
507 | 0 | { |
508 | 0 | return boundary_size; |
509 | 0 | } |
510 | | |
511 | | APR_DECLARE(apr_status_t) apr_allocator_min_order_set(unsigned int order) |
512 | 0 | { |
513 | 0 | if (order > MAX_ORDER) { |
514 | 0 | return APR_EINVAL; |
515 | 0 | } |
516 | 0 | min_order = order; |
517 | 0 | return APR_SUCCESS; |
518 | 0 | } |
519 | | |
520 | | |
521 | | /* |
522 | | * Debug level |
523 | | */ |
524 | | |
525 | | #define APR_POOL_DEBUG_GENERAL 0x01 |
526 | | #define APR_POOL_DEBUG_VERBOSE 0x02 |
527 | | #define APR_POOL_DEBUG_LIFETIME 0x04 |
528 | | #define APR_POOL_DEBUG_OWNER 0x08 |
529 | | #define APR_POOL_DEBUG_VERBOSE_ALLOC 0x10 |
530 | | |
531 | | #define APR_POOL_DEBUG_VERBOSE_ALL (APR_POOL_DEBUG_VERBOSE \ |
532 | | | APR_POOL_DEBUG_VERBOSE_ALLOC) |
533 | | |
534 | | |
535 | | /* |
536 | | * Structures |
537 | | */ |
538 | | |
539 | | typedef struct cleanup_t cleanup_t; |
540 | | |
541 | | /** A list of processes */ |
542 | | struct process_chain { |
543 | | /** The process ID */ |
544 | | apr_proc_t *proc; |
545 | | apr_kill_conditions_e kill_how; |
546 | | /** The next process in the list */ |
547 | | struct process_chain *next; |
548 | | }; |
549 | | |
550 | | |
551 | | #if APR_POOL_DEBUG |
552 | | |
553 | | typedef struct debug_node_t debug_node_t; |
554 | | |
555 | | struct debug_node_t { |
556 | | debug_node_t *next; |
557 | | apr_size_t index; |
558 | | void *beginp[64]; |
559 | | void *endp[64]; |
560 | | }; |
561 | | |
562 | 1.84k | #define SIZEOF_DEBUG_NODE_T APR_ALIGN_DEFAULT(sizeof(debug_node_t)) |
563 | | |
564 | | #endif /* APR_POOL_DEBUG */ |
565 | | |
566 | | /* The ref field in the apr_pool_t struct holds a |
567 | | * pointer to the pointer referencing this pool. |
568 | | * It is used for parent, child, sibling management. |
569 | | * Look at apr_pool_create_ex() and apr_pool_destroy() |
570 | | * to see how it is used. |
571 | | */ |
572 | | struct apr_pool_t { |
573 | | apr_pool_t *parent; |
574 | | apr_pool_t *child; |
575 | | apr_pool_t *sibling; |
576 | | apr_pool_t **ref; |
577 | | cleanup_t *cleanups; |
578 | | cleanup_t *free_cleanups; |
579 | | apr_allocator_t *allocator; |
580 | | struct process_chain *subprocesses; |
581 | | apr_abortfunc_t abort_fn; |
582 | | apr_hash_t *user_data; |
583 | | const char *tag; |
584 | | |
585 | | #if !APR_POOL_DEBUG |
586 | | apr_memnode_t *active; |
587 | | apr_memnode_t *self; /* The node containing the pool itself */ |
588 | | char *self_first_avail; |
589 | | |
590 | | #else /* APR_POOL_DEBUG */ |
591 | | apr_pool_t *joined; /* the caller has guaranteed that this pool |
592 | | * will survive as long as ->joined */ |
593 | | debug_node_t *nodes; |
594 | | const char *file_line; |
595 | | apr_uint32_t creation_flags; |
596 | | unsigned int stat_alloc; |
597 | | unsigned int stat_total_alloc; |
598 | | unsigned int stat_clear; |
599 | | #if APR_HAS_THREADS |
600 | | apr_os_thread_t owner; |
601 | | apr_thread_mutex_t *mutex; |
602 | | #endif /* APR_HAS_THREADS */ |
603 | | #endif /* APR_POOL_DEBUG */ |
604 | | #ifdef NETWARE |
605 | | apr_os_proc_t owner_proc; |
606 | | #endif /* defined(NETWARE) */ |
607 | | cleanup_t *pre_cleanups; |
608 | | #if APR_POOL_CONCURRENCY_CHECK |
609 | | |
610 | | #define IDLE 0 |
611 | | #define IN_USE 1 |
612 | | #define DESTROYED 2 |
613 | | volatile apr_uint32_t in_use; |
614 | | apr_os_thread_t in_use_by; |
615 | | #endif /* APR_POOL_CONCURRENCY_CHECK */ |
616 | | }; |
617 | | |
618 | 1.64k | #define SIZEOF_POOL_T APR_ALIGN_DEFAULT(sizeof(apr_pool_t)) |
619 | | |
620 | | |
621 | | /* |
622 | | * Variables |
623 | | */ |
624 | | |
625 | | static apr_byte_t apr_pools_initialized = 0; |
626 | | static apr_pool_t *global_pool = NULL; |
627 | | |
628 | | #if !APR_POOL_DEBUG |
629 | | static apr_allocator_t *global_allocator = NULL; |
630 | | #endif /* !APR_POOL_DEBUG */ |
631 | | |
632 | | #if (APR_POOL_DEBUG & APR_POOL_DEBUG_VERBOSE_ALL) |
633 | | static apr_file_t *file_stderr = NULL; |
634 | | static apr_status_t apr_pool_cleanup_file_stderr(void *data) |
635 | | { |
636 | | file_stderr = NULL; |
637 | | return APR_SUCCESS; |
638 | | } |
639 | | |
640 | | #endif /* (APR_POOL_DEBUG & APR_POOL_DEBUG_VERBOSE_ALL) */ |
641 | | |
642 | | /* |
643 | | * Local functions |
644 | | */ |
645 | | |
646 | | static void run_cleanups(cleanup_t **c); |
647 | | static void free_proc_chain(struct process_chain *procs); |
648 | | |
649 | | #if APR_POOL_DEBUG |
650 | | static void pool_destroy_debug(apr_pool_t *pool, const char *file_line); |
651 | | #endif |
652 | | |
653 | | #if !APR_POOL_DEBUG |
654 | | /* |
655 | | * Initialization |
656 | | */ |
657 | | |
658 | | APR_DECLARE(apr_status_t) apr_pool_initialize(void) |
659 | | { |
660 | | apr_status_t rv; |
661 | | |
662 | | if (apr_pools_initialized++) |
663 | | return APR_SUCCESS; |
664 | | |
665 | | #if HAVE_VALGRIND |
666 | | apr_running_on_valgrind = RUNNING_ON_VALGRIND; |
667 | | #endif |
668 | | |
669 | | #if defined(_SC_PAGESIZE) |
670 | | boundary_size = sysconf(_SC_PAGESIZE); |
671 | | #elif defined(WIN32) |
672 | | { |
673 | | SYSTEM_INFO si; |
674 | | GetSystemInfo(&si); |
675 | | boundary_size = si.dwPageSize; |
676 | | } |
677 | | #endif |
678 | | boundary_index = 12; |
679 | | while ( (1u << boundary_index) < boundary_size) |
680 | | boundary_index++; |
681 | | boundary_size = (1u << boundary_index); |
682 | | |
683 | | if ((rv = apr_allocator_create(&global_allocator)) != APR_SUCCESS) { |
684 | | apr_pools_initialized = 0; |
685 | | return rv; |
686 | | } |
687 | | |
688 | | if ((rv = apr_pool_create_ex(&global_pool, NULL, NULL, |
689 | | global_allocator)) != APR_SUCCESS) { |
690 | | apr_allocator_destroy(global_allocator); |
691 | | global_allocator = NULL; |
692 | | apr_pools_initialized = 0; |
693 | | return rv; |
694 | | } |
695 | | |
696 | | apr_pool_tag(global_pool, "apr_global_pool"); |
697 | | |
698 | | /* This has to happen here because mutexes might be backed by |
699 | | * atomics. It used to be snug and safe in apr_initialize(). |
700 | | * |
701 | | * Warning: apr_atomic_init() must always be called, by any |
702 | | * means possible, from apr_initialize(). |
703 | | */ |
704 | | if ((rv = apr_atomic_init(global_pool)) != APR_SUCCESS) { |
705 | | return rv; |
706 | | } |
707 | | |
708 | | #if APR_HAS_THREADS |
709 | | { |
710 | | apr_thread_mutex_t *mutex; |
711 | | |
712 | | if ((rv = apr_thread_mutex_create(&mutex, |
713 | | APR_THREAD_MUTEX_DEFAULT, |
714 | | global_pool)) != APR_SUCCESS) { |
715 | | return rv; |
716 | | } |
717 | | |
718 | | apr_allocator_mutex_set(global_allocator, mutex); |
719 | | } |
720 | | #endif /* APR_HAS_THREADS */ |
721 | | |
722 | | apr_allocator_owner_set(global_allocator, global_pool); |
723 | | |
724 | | return APR_SUCCESS; |
725 | | } |
726 | | |
727 | | APR_DECLARE(void) apr_pool_terminate(void) |
728 | | { |
729 | | if (!apr_pools_initialized) |
730 | | return; |
731 | | |
732 | | if (--apr_pools_initialized) |
733 | | return; |
734 | | |
735 | | apr_pool_destroy(global_pool); /* This will also destroy the mutex */ |
736 | | global_pool = NULL; |
737 | | |
738 | | global_allocator = NULL; |
739 | | } |
740 | | |
741 | | |
742 | | /* Node list management helper macros; list_insert() inserts 'node' |
743 | | * before 'point'. */ |
744 | | #define list_insert(node, point) do { \ |
745 | | node->ref = point->ref; \ |
746 | | *node->ref = node; \ |
747 | | node->next = point; \ |
748 | | point->ref = &node->next; \ |
749 | | } while (0) |
750 | | |
751 | | /* list_remove() removes 'node' from its list. */ |
752 | | #define list_remove(node) do { \ |
753 | | *node->ref = node->next; \ |
754 | | node->next->ref = node->ref; \ |
755 | | } while (0) |
756 | | |
757 | | /* Returns the amount of free space in the given node. */ |
758 | | #define node_free_space(node_) ((apr_size_t)(node_->endp - node_->first_avail)) |
759 | | |
760 | | /* |
761 | | * Helpers to mark pool as in-use/free. Used for finding thread-unsafe |
762 | | * concurrent accesses from different threads. |
763 | | */ |
764 | | #if APR_POOL_CONCURRENCY_CHECK |
765 | | |
766 | | static const char * const in_use_string[] = { "idle", "in use", "destroyed" }; |
767 | | |
768 | | static void pool_concurrency_abort(apr_pool_t *pool, apr_uint32_t new, apr_uint32_t old) |
769 | | { |
770 | | fprintf(stderr, "pool concurrency check: pool %p(%s), thread cur %lx " |
771 | | "in use by %lx, state %s -> %s \n", |
772 | | pool, pool->tag, (unsigned long)apr_os_thread_current(), |
773 | | (unsigned long)pool->in_use_by, |
774 | | in_use_string[old], in_use_string[new]); |
775 | | abort(); |
776 | | } |
777 | | |
778 | | static APR_INLINE void pool_concurrency_set_used(apr_pool_t *pool) |
779 | | { |
780 | | apr_uint32_t old; |
781 | | |
782 | | old = apr_atomic_cas32(&pool->in_use, IN_USE, IDLE); |
783 | | |
784 | | if (old != IDLE) |
785 | | pool_concurrency_abort(pool, IN_USE, old); |
786 | | |
787 | | pool->in_use_by = apr_os_thread_current(); |
788 | | } |
789 | | |
790 | | static APR_INLINE void pool_concurrency_set_idle(apr_pool_t *pool) |
791 | | { |
792 | | apr_uint32_t old; |
793 | | |
794 | | old = apr_atomic_cas32(&pool->in_use, IDLE, IN_USE); |
795 | | |
796 | | if (old != IN_USE) |
797 | | pool_concurrency_abort(pool, IDLE, old); |
798 | | } |
799 | | |
800 | | static APR_INLINE void pool_concurrency_init(apr_pool_t *pool) |
801 | | { |
802 | | pool->in_use = IDLE; |
803 | | } |
804 | | |
805 | | static APR_INLINE void pool_concurrency_set_destroyed(apr_pool_t *pool) |
806 | | { |
807 | | apr_uint32_t old; |
808 | | |
809 | | old = apr_atomic_cas32(&pool->in_use, DESTROYED, IDLE); |
810 | | |
811 | | if (old != IDLE) |
812 | | pool_concurrency_abort(pool, DESTROYED, old); |
813 | | pool->in_use_by = apr_os_thread_current(); |
814 | | } |
815 | | #else |
816 | | static APR_INLINE void pool_concurrency_init(apr_pool_t *pool) { } |
817 | | static APR_INLINE void pool_concurrency_set_used(apr_pool_t *pool) { } |
818 | | static APR_INLINE void pool_concurrency_set_idle(apr_pool_t *pool) { } |
819 | | static APR_INLINE void pool_concurrency_set_destroyed(apr_pool_t *pool) { } |
820 | | #endif /* APR_POOL_CONCURRENCY_CHECK */ |
821 | | |
822 | | /* |
823 | | * Memory allocation |
824 | | */ |
825 | | |
826 | | APR_DECLARE(void *) apr_palloc(apr_pool_t *pool, apr_size_t in_size) |
827 | | { |
828 | | apr_memnode_t *active, *node; |
829 | | void *mem; |
830 | | apr_size_t size, free_index; |
831 | | |
832 | | pool_concurrency_set_used(pool); |
833 | | size = APR_ALIGN_DEFAULT(in_size); |
834 | | #if HAVE_VALGRIND |
835 | | if (apr_running_on_valgrind) |
836 | | size += 2 * REDZONE; |
837 | | #endif |
838 | | if (size < in_size) { |
839 | | pool_concurrency_set_idle(pool); |
840 | | if (pool->abort_fn) |
841 | | pool->abort_fn(APR_ENOMEM); |
842 | | |
843 | | return NULL; |
844 | | } |
845 | | active = pool->active; |
846 | | |
847 | | /* If the active node has enough bytes left, use it. */ |
848 | | if (size <= node_free_space(active)) { |
849 | | mem = active->first_avail; |
850 | | active->first_avail += size; |
851 | | goto have_mem; |
852 | | } |
853 | | |
854 | | node = active->next; |
855 | | if (size <= node_free_space(node)) { |
856 | | list_remove(node); |
857 | | } |
858 | | else { |
859 | | if ((node = allocator_alloc(pool->allocator, size)) == NULL) { |
860 | | pool_concurrency_set_idle(pool); |
861 | | if (pool->abort_fn) |
862 | | pool->abort_fn(APR_ENOMEM); |
863 | | |
864 | | return NULL; |
865 | | } |
866 | | } |
867 | | |
868 | | node->free_index = 0; |
869 | | |
870 | | mem = node->first_avail; |
871 | | node->first_avail += size; |
872 | | |
873 | | list_insert(node, active); |
874 | | |
875 | | pool->active = node; |
876 | | |
877 | | free_index = (APR_ALIGN(active->endp - active->first_avail + 1, |
878 | | BOUNDARY_SIZE) - BOUNDARY_SIZE) >> BOUNDARY_INDEX; |
879 | | |
880 | | active->free_index = (apr_uint32_t)free_index; |
881 | | node = active->next; |
882 | | if (free_index >= node->free_index) |
883 | | goto have_mem; |
884 | | |
885 | | do { |
886 | | node = node->next; |
887 | | } |
888 | | while (free_index < node->free_index); |
889 | | |
890 | | list_remove(active); |
891 | | list_insert(active, node); |
892 | | |
893 | | have_mem: |
894 | | #if HAVE_VALGRIND |
895 | | if (!apr_running_on_valgrind) { |
896 | | pool_concurrency_set_idle(pool); |
897 | | return mem; |
898 | | } |
899 | | else { |
900 | | mem = (char *)mem + REDZONE; |
901 | | VALGRIND_MEMPOOL_ALLOC(pool, mem, in_size); |
902 | | pool_concurrency_set_idle(pool); |
903 | | return mem; |
904 | | } |
905 | | #else |
906 | | pool_concurrency_set_idle(pool); |
907 | | return mem; |
908 | | #endif |
909 | | } |
910 | | |
911 | | /* Provide an implementation of apr_pcalloc for backward compatibility |
912 | | * with code built before apr_pcalloc was a macro |
913 | | */ |
914 | | |
915 | | #ifdef apr_pcalloc |
916 | | #undef apr_pcalloc |
917 | | #endif |
918 | | |
919 | | APR_DECLARE(void *) apr_pcalloc(apr_pool_t *pool, apr_size_t size); |
920 | | APR_DECLARE(void *) apr_pcalloc(apr_pool_t *pool, apr_size_t size) |
921 | | { |
922 | | void *mem; |
923 | | |
924 | | if ((mem = apr_palloc(pool, size)) != NULL) { |
925 | | memset(mem, 0, size); |
926 | | } |
927 | | |
928 | | return mem; |
929 | | } |
930 | | |
931 | | |
932 | | /* |
933 | | * Pool creation/destruction |
934 | | */ |
935 | | |
936 | | APR_DECLARE(void) apr_pool_clear(apr_pool_t *pool) |
937 | | { |
938 | | apr_memnode_t *active; |
939 | | |
940 | | /* Run pre destroy cleanups */ |
941 | | run_cleanups(&pool->pre_cleanups); |
942 | | |
943 | | pool_concurrency_set_used(pool); |
944 | | pool->pre_cleanups = NULL; |
945 | | pool_concurrency_set_idle(pool); |
946 | | |
947 | | /* Destroy the subpools. The subpools will detach themselves from |
948 | | * this pool thus this loop is safe and easy. |
949 | | */ |
950 | | while (pool->child) |
951 | | apr_pool_destroy(pool->child); |
952 | | |
953 | | /* Run cleanups */ |
954 | | run_cleanups(&pool->cleanups); |
955 | | |
956 | | pool_concurrency_set_used(pool); |
957 | | pool->cleanups = NULL; |
958 | | pool->free_cleanups = NULL; |
959 | | |
960 | | /* Free subprocesses */ |
961 | | free_proc_chain(pool->subprocesses); |
962 | | pool->subprocesses = NULL; |
963 | | |
964 | | /* Clear the user data. */ |
965 | | pool->user_data = NULL; |
966 | | |
967 | | /* Find the node attached to the pool structure, reset it, make |
968 | | * it the active node and free the rest of the nodes. |
969 | | */ |
970 | | active = pool->active = pool->self; |
971 | | active->first_avail = pool->self_first_avail; |
972 | | |
973 | | APR_IF_VALGRIND(VALGRIND_MEMPOOL_TRIM(pool, pool, 1)); |
974 | | |
975 | | if (active->next == active) { |
976 | | pool_concurrency_set_idle(pool); |
977 | | return; |
978 | | } |
979 | | |
980 | | *active->ref = NULL; |
981 | | allocator_free(pool->allocator, active->next); |
982 | | active->next = active; |
983 | | active->ref = &active->next; |
984 | | |
985 | | pool_concurrency_set_idle(pool); |
986 | | } |
987 | | |
988 | | APR_DECLARE(void) apr_pool_destroy(apr_pool_t *pool) |
989 | | { |
990 | | apr_memnode_t *active; |
991 | | apr_allocator_t *allocator; |
992 | | |
993 | | /* Run pre destroy cleanups */ |
994 | | run_cleanups(&pool->pre_cleanups); |
995 | | |
996 | | pool_concurrency_set_used(pool); |
997 | | pool->pre_cleanups = NULL; |
998 | | pool_concurrency_set_idle(pool); |
999 | | |
1000 | | /* Destroy the subpools. The subpools will detach themselve from |
1001 | | * this pool thus this loop is safe and easy. |
1002 | | */ |
1003 | | while (pool->child) |
1004 | | apr_pool_destroy(pool->child); |
1005 | | |
1006 | | /* Run cleanups */ |
1007 | | run_cleanups(&pool->cleanups); |
1008 | | pool_concurrency_set_destroyed(pool); |
1009 | | |
1010 | | /* Free subprocesses */ |
1011 | | free_proc_chain(pool->subprocesses); |
1012 | | |
1013 | | /* Remove the pool from the parents child list */ |
1014 | | if (pool->parent) { |
1015 | | allocator_lock(pool->parent->allocator); |
1016 | | |
1017 | | if ((*pool->ref = pool->sibling) != NULL) |
1018 | | pool->sibling->ref = pool->ref; |
1019 | | |
1020 | | allocator_unlock(pool->parent->allocator); |
1021 | | } |
1022 | | |
1023 | | /* Find the block attached to the pool structure. Save a copy of the |
1024 | | * allocator pointer, because the pool struct soon will be no more. |
1025 | | */ |
1026 | | allocator = pool->allocator; |
1027 | | active = pool->self; |
1028 | | *active->ref = NULL; |
1029 | | |
1030 | | #if APR_HAS_THREADS |
1031 | | if (apr_allocator_owner_get(allocator) == pool) { |
1032 | | /* Make sure to remove the lock, since it is highly likely to |
1033 | | * be invalid now. |
1034 | | */ |
1035 | | apr_allocator_mutex_set(allocator, NULL); |
1036 | | } |
1037 | | #endif /* APR_HAS_THREADS */ |
1038 | | |
1039 | | /* Free all the nodes in the pool (including the node holding the |
1040 | | * pool struct), by giving them back to the allocator. |
1041 | | */ |
1042 | | allocator_free(allocator, active); |
1043 | | |
1044 | | /* If this pool happens to be the owner of the allocator, free |
1045 | | * everything in the allocator (that includes the pool struct |
1046 | | * and the allocator). Don't worry about destroying the optional mutex |
1047 | | * in the allocator, it will have been destroyed by the cleanup function. |
1048 | | */ |
1049 | | if (apr_allocator_owner_get(allocator) == pool) { |
1050 | | apr_allocator_destroy(allocator); |
1051 | | } |
1052 | | APR_IF_VALGRIND(VALGRIND_DESTROY_MEMPOOL(pool)); |
1053 | | } |
1054 | | |
1055 | | APR_DECLARE(apr_status_t) apr_pool_create_ex(apr_pool_t **newpool, |
1056 | | apr_pool_t *parent, |
1057 | | apr_abortfunc_t abort_fn, |
1058 | | apr_allocator_t *allocator) |
1059 | | { |
1060 | | apr_pool_t *pool; |
1061 | | apr_memnode_t *node; |
1062 | | |
1063 | | *newpool = NULL; |
1064 | | |
1065 | | if (!parent) |
1066 | | parent = global_pool; |
1067 | | |
1068 | | /* parent will always be non-NULL here except the first time a |
1069 | | * pool is created, in which case allocator is guaranteed to be |
1070 | | * non-NULL. */ |
1071 | | |
1072 | | if (!abort_fn && parent) |
1073 | | abort_fn = parent->abort_fn; |
1074 | | |
1075 | | if (allocator == NULL) |
1076 | | allocator = parent->allocator; |
1077 | | |
1078 | | if ((node = allocator_alloc(allocator, |
1079 | | MIN_ALLOC - APR_MEMNODE_T_SIZE)) == NULL) { |
1080 | | if (abort_fn) |
1081 | | abort_fn(APR_ENOMEM); |
1082 | | |
1083 | | return APR_ENOMEM; |
1084 | | } |
1085 | | |
1086 | | node->next = node; |
1087 | | node->ref = &node->next; |
1088 | | |
1089 | | #if HAVE_VALGRIND |
1090 | | if (!apr_running_on_valgrind) { |
1091 | | pool = (apr_pool_t *)node->first_avail; |
1092 | | pool->self_first_avail = (char *)pool + SIZEOF_POOL_T; |
1093 | | } |
1094 | | else { |
1095 | | pool = (apr_pool_t *)(node->first_avail + REDZONE); |
1096 | | pool->self_first_avail = (char *)pool + SIZEOF_POOL_T + 2 * REDZONE; |
1097 | | VALGRIND_MAKE_MEM_NOACCESS(pool->self_first_avail, |
1098 | | node->endp - pool->self_first_avail); |
1099 | | VALGRIND_CREATE_MEMPOOL(pool, REDZONE, 0); |
1100 | | } |
1101 | | #else |
1102 | | pool = (apr_pool_t *)node->first_avail; |
1103 | | pool->self_first_avail = (char *)pool + SIZEOF_POOL_T; |
1104 | | #endif |
1105 | | node->first_avail = pool->self_first_avail; |
1106 | | |
1107 | | pool->allocator = allocator; |
1108 | | pool->active = pool->self = node; |
1109 | | pool->abort_fn = abort_fn; |
1110 | | pool->child = NULL; |
1111 | | pool->cleanups = NULL; |
1112 | | pool->free_cleanups = NULL; |
1113 | | pool->pre_cleanups = NULL; |
1114 | | pool->subprocesses = NULL; |
1115 | | pool->user_data = NULL; |
1116 | | pool->tag = NULL; |
1117 | | |
1118 | | #ifdef NETWARE |
1119 | | pool->owner_proc = (apr_os_proc_t)getnlmhandle(); |
1120 | | #endif /* defined(NETWARE) */ |
1121 | | |
1122 | | if ((pool->parent = parent) != NULL) { |
1123 | | allocator_lock(parent->allocator); |
1124 | | |
1125 | | if ((pool->sibling = parent->child) != NULL) |
1126 | | pool->sibling->ref = &pool->sibling; |
1127 | | |
1128 | | parent->child = pool; |
1129 | | pool->ref = &parent->child; |
1130 | | |
1131 | | allocator_unlock(parent->allocator); |
1132 | | } |
1133 | | else { |
1134 | | pool->sibling = NULL; |
1135 | | pool->ref = NULL; |
1136 | | } |
1137 | | |
1138 | | pool_concurrency_init(pool); |
1139 | | |
1140 | | *newpool = pool; |
1141 | | |
1142 | | return APR_SUCCESS; |
1143 | | } |
1144 | | |
1145 | | APR_DECLARE(apr_status_t) apr_pool_create_unmanaged_ex(apr_pool_t **newpool, |
1146 | | apr_abortfunc_t abort_fn, |
1147 | | apr_allocator_t *allocator) |
1148 | | { |
1149 | | apr_pool_t *pool; |
1150 | | apr_memnode_t *node; |
1151 | | apr_allocator_t *pool_allocator; |
1152 | | |
1153 | | *newpool = NULL; |
1154 | | |
1155 | | if (!apr_pools_initialized) |
1156 | | return APR_ENOPOOL; |
1157 | | if ((pool_allocator = allocator) == NULL) { |
1158 | | if (apr_allocator_create(&pool_allocator) != APR_SUCCESS) { |
1159 | | if (abort_fn) |
1160 | | abort_fn(APR_ENOMEM); |
1161 | | |
1162 | | return APR_ENOMEM; |
1163 | | } |
1164 | | if ((node = allocator_alloc(pool_allocator, |
1165 | | MIN_ALLOC - APR_MEMNODE_T_SIZE)) == NULL) { |
1166 | | if (abort_fn) |
1167 | | abort_fn(APR_ENOMEM); |
1168 | | |
1169 | | apr_allocator_destroy(pool_allocator); |
1170 | | |
1171 | | return APR_ENOMEM; |
1172 | | } |
1173 | | } |
1174 | | else if ((node = allocator_alloc(pool_allocator, |
1175 | | MIN_ALLOC - APR_MEMNODE_T_SIZE)) == NULL) { |
1176 | | if (abort_fn) |
1177 | | abort_fn(APR_ENOMEM); |
1178 | | |
1179 | | return APR_ENOMEM; |
1180 | | } |
1181 | | |
1182 | | node->next = node; |
1183 | | node->ref = &node->next; |
1184 | | |
1185 | | pool = (apr_pool_t *)node->first_avail; |
1186 | | node->first_avail = pool->self_first_avail = (char *)pool + SIZEOF_POOL_T; |
1187 | | |
1188 | | pool->allocator = pool_allocator; |
1189 | | pool->active = pool->self = node; |
1190 | | pool->abort_fn = abort_fn; |
1191 | | pool->child = NULL; |
1192 | | pool->cleanups = NULL; |
1193 | | pool->free_cleanups = NULL; |
1194 | | pool->pre_cleanups = NULL; |
1195 | | pool->subprocesses = NULL; |
1196 | | pool->user_data = NULL; |
1197 | | pool->tag = NULL; |
1198 | | pool->parent = NULL; |
1199 | | pool->sibling = NULL; |
1200 | | pool->ref = NULL; |
1201 | | |
1202 | | #ifdef NETWARE |
1203 | | pool->owner_proc = (apr_os_proc_t)getnlmhandle(); |
1204 | | #endif /* defined(NETWARE) */ |
1205 | | if (!allocator) |
1206 | | pool_allocator->owner = pool; |
1207 | | |
1208 | | pool_concurrency_init(pool); |
1209 | | *newpool = pool; |
1210 | | |
1211 | | return APR_SUCCESS; |
1212 | | } |
1213 | | |
1214 | | /* |
1215 | | * "Print" functions |
1216 | | */ |
1217 | | |
1218 | | /* |
1219 | | * apr_psprintf is implemented by writing directly into the current |
1220 | | * block of the pool, starting right at first_avail. If there's |
1221 | | * insufficient room, then a new block is allocated and the earlier |
1222 | | * output is copied over. The new block isn't linked into the pool |
1223 | | * until all the output is done. |
1224 | | * |
1225 | | * Note that this is completely safe because nothing else can |
1226 | | * allocate in this apr_pool_t while apr_psprintf is running. alarms are |
1227 | | * blocked, and the only thing outside of apr_pools.c that's invoked |
1228 | | * is apr_vformatter -- which was purposefully written to be |
1229 | | * self-contained with no callouts. |
1230 | | */ |
1231 | | |
1232 | | struct psprintf_data { |
1233 | | apr_vformatter_buff_t vbuff; |
1234 | | apr_memnode_t *node; |
1235 | | apr_pool_t *pool; |
1236 | | apr_byte_t got_a_new_node; |
1237 | | apr_memnode_t *free; |
1238 | | }; |
1239 | | |
1240 | | #define APR_PSPRINTF_MIN_STRINGSIZE 32 |
1241 | | |
1242 | | static int psprintf_flush(apr_vformatter_buff_t *vbuff) |
1243 | | { |
1244 | | struct psprintf_data *ps = (struct psprintf_data *)vbuff; |
1245 | | apr_memnode_t *node, *active; |
1246 | | apr_size_t cur_len, size; |
1247 | | char *strp; |
1248 | | apr_pool_t *pool; |
1249 | | apr_size_t free_index; |
1250 | | |
1251 | | pool = ps->pool; |
1252 | | active = ps->node; |
1253 | | strp = ps->vbuff.curpos; |
1254 | | cur_len = strp - active->first_avail; |
1255 | | size = cur_len << 1; |
1256 | | |
1257 | | /* Make sure that we don't try to use a block that has less |
1258 | | * than APR_PSPRINTF_MIN_STRINGSIZE bytes left in it. This |
1259 | | * also catches the case where size == 0, which would result |
1260 | | * in reusing a block that can't even hold the NUL byte. |
1261 | | */ |
1262 | | if (size < APR_PSPRINTF_MIN_STRINGSIZE) |
1263 | | size = APR_PSPRINTF_MIN_STRINGSIZE; |
1264 | | |
1265 | | node = active->next; |
1266 | | if (!ps->got_a_new_node && size <= node_free_space(node)) { |
1267 | | |
1268 | | list_remove(node); |
1269 | | list_insert(node, active); |
1270 | | |
1271 | | node->free_index = 0; |
1272 | | |
1273 | | pool->active = node; |
1274 | | |
1275 | | free_index = (APR_ALIGN(active->endp - active->first_avail + 1, |
1276 | | BOUNDARY_SIZE) - BOUNDARY_SIZE) >> BOUNDARY_INDEX; |
1277 | | |
1278 | | active->free_index = (apr_uint32_t)free_index; |
1279 | | node = active->next; |
1280 | | if (free_index < node->free_index) { |
1281 | | do { |
1282 | | node = node->next; |
1283 | | } |
1284 | | while (free_index < node->free_index); |
1285 | | |
1286 | | list_remove(active); |
1287 | | list_insert(active, node); |
1288 | | } |
1289 | | |
1290 | | node = pool->active; |
1291 | | } |
1292 | | else { |
1293 | | if ((node = allocator_alloc(pool->allocator, size)) == NULL) |
1294 | | return -1; |
1295 | | |
1296 | | if (ps->got_a_new_node) { |
1297 | | active->next = ps->free; |
1298 | | ps->free = active; |
1299 | | } |
1300 | | |
1301 | | ps->got_a_new_node = 1; |
1302 | | } |
1303 | | |
1304 | | APR_VALGRIND_UNDEFINED(node->first_avail, |
1305 | | node->endp - node->first_avail); |
1306 | | memcpy(node->first_avail, active->first_avail, cur_len); |
1307 | | APR_VALGRIND_NOACCESS(active->first_avail, |
1308 | | active->endp - active->first_avail); |
1309 | | |
1310 | | ps->node = node; |
1311 | | ps->vbuff.curpos = node->first_avail + cur_len; |
1312 | | ps->vbuff.endpos = node->endp - 1; /* Save a byte for NUL terminator */ |
1313 | | |
1314 | | return 0; |
1315 | | } |
1316 | | |
1317 | | #if HAVE_VALGRIND |
1318 | | static int add_redzone(int (*flush_func)(apr_vformatter_buff_t *b), |
1319 | | struct psprintf_data *ps) |
1320 | | { |
1321 | | apr_size_t len = ps->vbuff.curpos - ps->node->first_avail + REDZONE; |
1322 | | |
1323 | | while (ps->vbuff.curpos - ps->node->first_avail < len) { |
1324 | | if (ps->vbuff.endpos - ps->node->first_avail >= len) |
1325 | | ps->vbuff.curpos = ps->node->first_avail + len; |
1326 | | else |
1327 | | ps->vbuff.curpos = ps->vbuff.endpos; |
1328 | | |
1329 | | /* |
1330 | | * Prevent valgrind from complaining when psprintf_flush() |
1331 | | * does a memcpy(). The VALGRIND_MEMPOOL_ALLOC() will reset |
1332 | | * the redzone to NOACCESS. |
1333 | | */ |
1334 | | if (ps->vbuff.curpos != ps->node->first_avail) |
1335 | | VALGRIND_MAKE_MEM_DEFINED(ps->node->first_avail, |
1336 | | ps->vbuff.curpos - ps->node->first_avail); |
1337 | | if (ps->vbuff.curpos == ps->vbuff.endpos) { |
1338 | | if (psprintf_flush(&ps->vbuff) == -1) |
1339 | | return -1; |
1340 | | } |
1341 | | } |
1342 | | return 0; |
1343 | | } |
1344 | | #endif |
1345 | | |
1346 | | APR_DECLARE(char *) apr_pvsprintf(apr_pool_t *pool, const char *fmt, va_list ap) |
1347 | | { |
1348 | | struct psprintf_data ps; |
1349 | | char *strp; |
1350 | | apr_size_t size; |
1351 | | apr_memnode_t *active, *node; |
1352 | | apr_size_t free_index; |
1353 | | |
1354 | | pool_concurrency_set_used(pool); |
1355 | | ps.node = pool->active; |
1356 | | ps.pool = pool; |
1357 | | ps.vbuff.curpos = ps.node->first_avail; |
1358 | | |
1359 | | /* Save a byte for the NUL terminator */ |
1360 | | ps.vbuff.endpos = ps.node->endp - 1; |
1361 | | ps.got_a_new_node = 0; |
1362 | | ps.free = NULL; |
1363 | | |
1364 | | /* Make sure that the first node passed to apr_vformatter has at least |
1365 | | * room to hold the NUL terminator. |
1366 | | */ |
1367 | | if (ps.node->first_avail == ps.node->endp) { |
1368 | | if (psprintf_flush(&ps.vbuff) == -1) |
1369 | | goto error; |
1370 | | } |
1371 | | #if HAVE_VALGRIND |
1372 | | if (apr_running_on_valgrind) { |
1373 | | if (add_redzone(psprintf_flush, &ps) == -1) |
1374 | | goto error; |
1375 | | if (!ps.got_a_new_node) { |
1376 | | /* psprintf_flush() has not been called, allow access to our node */ |
1377 | | VALGRIND_MAKE_MEM_UNDEFINED(ps.vbuff.curpos, |
1378 | | ps.node->endp - ps.vbuff.curpos); |
1379 | | } |
1380 | | } |
1381 | | #endif /* HAVE_VALGRIND */ |
1382 | | |
1383 | | if (apr_vformatter(psprintf_flush, &ps.vbuff, fmt, ap) == -1) |
1384 | | goto error; |
1385 | | |
1386 | | *ps.vbuff.curpos++ = '\0'; |
1387 | | |
1388 | | #if HAVE_VALGRIND |
1389 | | if (!apr_running_on_valgrind) { |
1390 | | strp = ps.node->first_avail; |
1391 | | } |
1392 | | else { |
1393 | | if (add_redzone(psprintf_flush, &ps) == -1) |
1394 | | goto error; |
1395 | | if (ps.node->endp != ps.vbuff.curpos) |
1396 | | APR_VALGRIND_NOACCESS(ps.vbuff.curpos, |
1397 | | ps.node->endp - ps.vbuff.curpos); |
1398 | | strp = ps.node->first_avail + REDZONE; |
1399 | | size = ps.vbuff.curpos - strp; |
1400 | | VALGRIND_MEMPOOL_ALLOC(pool, strp, size); |
1401 | | VALGRIND_MAKE_MEM_DEFINED(strp, size); |
1402 | | } |
1403 | | #else |
1404 | | strp = ps.node->first_avail; |
1405 | | #endif |
1406 | | |
1407 | | size = ps.vbuff.curpos - ps.node->first_avail; |
1408 | | size = APR_ALIGN_DEFAULT(size); |
1409 | | ps.node->first_avail += size; |
1410 | | |
1411 | | if (ps.free) |
1412 | | allocator_free(pool->allocator, ps.free); |
1413 | | |
1414 | | /* |
1415 | | * Link the node in if it's a new one |
1416 | | */ |
1417 | | if (!ps.got_a_new_node) { |
1418 | | pool_concurrency_set_idle(pool); |
1419 | | return strp; |
1420 | | } |
1421 | | |
1422 | | active = pool->active; |
1423 | | node = ps.node; |
1424 | | |
1425 | | node->free_index = 0; |
1426 | | |
1427 | | list_insert(node, active); |
1428 | | |
1429 | | pool->active = node; |
1430 | | |
1431 | | free_index = (APR_ALIGN(active->endp - active->first_avail + 1, |
1432 | | BOUNDARY_SIZE) - BOUNDARY_SIZE) >> BOUNDARY_INDEX; |
1433 | | |
1434 | | active->free_index = (apr_uint32_t)free_index; |
1435 | | node = active->next; |
1436 | | |
1437 | | if (free_index >= node->free_index) { |
1438 | | pool_concurrency_set_idle(pool); |
1439 | | return strp; |
1440 | | } |
1441 | | |
1442 | | do { |
1443 | | node = node->next; |
1444 | | } |
1445 | | while (free_index < node->free_index); |
1446 | | |
1447 | | list_remove(active); |
1448 | | list_insert(active, node); |
1449 | | |
1450 | | pool_concurrency_set_idle(pool); |
1451 | | return strp; |
1452 | | |
1453 | | error: |
1454 | | pool_concurrency_set_idle(pool); |
1455 | | if (pool->abort_fn) |
1456 | | pool->abort_fn(APR_ENOMEM); |
1457 | | if (ps.got_a_new_node) { |
1458 | | ps.node->next = ps.free; |
1459 | | allocator_free(pool->allocator, ps.node); |
1460 | | } |
1461 | | APR_VALGRIND_NOACCESS(pool->active->first_avail, |
1462 | | pool->active->endp - pool->active->first_avail); |
1463 | | return NULL; |
1464 | | } |
1465 | | |
1466 | | |
1467 | | #else /* APR_POOL_DEBUG */ |
1468 | | /* |
1469 | | * Debug helper functions |
1470 | | */ |
1471 | | |
1472 | | static APR_INLINE |
1473 | | void pool_lock(apr_pool_t *pool) |
1474 | 411 | { |
1475 | 411 | #if APR_HAS_THREADS |
1476 | 411 | apr_thread_mutex_lock(pool->mutex); |
1477 | 411 | #endif /* APR_HAS_THREADS */ |
1478 | 411 | } |
1479 | | |
1480 | | static APR_INLINE |
1481 | | void pool_unlock(apr_pool_t *pool) |
1482 | 411 | { |
1483 | 411 | #if APR_HAS_THREADS |
1484 | 411 | apr_thread_mutex_unlock(pool->mutex); |
1485 | 411 | #endif /* APR_HAS_THREADS */ |
1486 | 411 | } |
1487 | | |
1488 | | #if APR_HAS_THREADS |
1489 | | static APR_INLINE |
1490 | | apr_thread_mutex_t *parent_lock(apr_pool_t *pool) |
1491 | 822 | { |
1492 | 822 | if (pool->parent) { |
1493 | 411 | apr_thread_mutex_lock(pool->parent->mutex); |
1494 | 411 | return pool->parent->mutex; |
1495 | 411 | } |
1496 | 411 | return NULL; |
1497 | 822 | } |
1498 | | |
1499 | | static APR_INLINE |
1500 | | void parent_unlock(apr_thread_mutex_t *mutex) |
1501 | 822 | { |
1502 | 822 | if (mutex) { |
1503 | 411 | apr_thread_mutex_unlock(mutex); |
1504 | 411 | } |
1505 | 822 | } |
1506 | | #endif /* APR_HAS_THREADS */ |
1507 | | |
1508 | | /* |
1509 | | * Walk the pool tree rooted at pool, depth first. When fn returns |
1510 | | * anything other than 0, abort the traversal and return the value |
1511 | | * returned by fn. |
1512 | | */ |
1513 | | static int apr_pool_walk_tree(apr_pool_t *pool, |
1514 | | int (*fn)(apr_pool_t *pool, void *data), |
1515 | | void *data) |
1516 | 0 | { |
1517 | 0 | int rv; |
1518 | 0 | apr_pool_t *child; |
1519 | |
|
1520 | 0 | rv = fn(pool, data); |
1521 | 0 | if (rv) |
1522 | 0 | return rv; |
1523 | | |
1524 | 0 | pool_lock(pool); |
1525 | |
|
1526 | 0 | child = pool->child; |
1527 | 0 | while (child) { |
1528 | 0 | rv = apr_pool_walk_tree(child, fn, data); |
1529 | 0 | if (rv) |
1530 | 0 | break; |
1531 | | |
1532 | 0 | child = child->sibling; |
1533 | 0 | } |
1534 | |
|
1535 | 0 | pool_unlock(pool); |
1536 | |
|
1537 | 0 | return rv; |
1538 | 0 | } |
1539 | | |
1540 | | #if (APR_POOL_DEBUG & APR_POOL_DEBUG_VERBOSE_ALL) |
1541 | | static void apr_pool_log_event(apr_pool_t *pool, const char *event, |
1542 | | const char *file_line, int deref) |
1543 | | { |
1544 | | if (file_stderr) { |
1545 | | if (deref) { |
1546 | | apr_file_printf(file_stderr, |
1547 | | "POOL DEBUG: " |
1548 | | "[%lu" |
1549 | | #if APR_HAS_THREADS |
1550 | | "/%lu" |
1551 | | #endif /* APR_HAS_THREADS */ |
1552 | | "] " |
1553 | | "%7s " |
1554 | | "(%10lu/%10lu/%10lu) " |
1555 | | "0x%pp \"%s\" " |
1556 | | "<%s> " |
1557 | | "0x%pp " |
1558 | | "(%u/%u/%u) " |
1559 | | "\n", |
1560 | | (unsigned long)getpid(), |
1561 | | #if APR_HAS_THREADS |
1562 | | (unsigned long)apr_os_thread_current(), |
1563 | | #endif /* APR_HAS_THREADS */ |
1564 | | event, |
1565 | | (unsigned long)apr_pool_num_bytes(pool, 0), |
1566 | | (unsigned long)apr_pool_num_bytes(pool, 1), |
1567 | | (unsigned long)apr_pool_num_bytes(global_pool, 1), |
1568 | | pool, pool->tag, |
1569 | | file_line, |
1570 | | pool->parent, |
1571 | | pool->stat_alloc, pool->stat_total_alloc, pool->stat_clear); |
1572 | | } |
1573 | | else { |
1574 | | apr_file_printf(file_stderr, |
1575 | | "POOL DEBUG: " |
1576 | | "[%lu" |
1577 | | #if APR_HAS_THREADS |
1578 | | "/%lu" |
1579 | | #endif /* APR_HAS_THREADS */ |
1580 | | "] " |
1581 | | "%7s " |
1582 | | " " |
1583 | | "0x%pp " |
1584 | | "<%s> " |
1585 | | "\n", |
1586 | | (unsigned long)getpid(), |
1587 | | #if APR_HAS_THREADS |
1588 | | (unsigned long)apr_os_thread_current(), |
1589 | | #endif /* APR_HAS_THREADS */ |
1590 | | event, |
1591 | | pool, |
1592 | | file_line); |
1593 | | } |
1594 | | } |
1595 | | } |
1596 | | #endif /* (APR_POOL_DEBUG & APR_POOL_DEBUG_VERBOSE_ALL) */ |
1597 | | |
1598 | | #if (APR_POOL_DEBUG & APR_POOL_DEBUG_LIFETIME) |
1599 | | static int pool_is_child_of(apr_pool_t *parent, void *data) |
1600 | | { |
1601 | | apr_pool_t *pool = (apr_pool_t *)data; |
1602 | | |
1603 | | return (pool == parent); |
1604 | | } |
1605 | | |
1606 | | static int apr_pool_is_child_of(apr_pool_t *pool, apr_pool_t *parent) |
1607 | | { |
1608 | | if (parent == NULL) |
1609 | | return 0; |
1610 | | |
1611 | | return apr_pool_walk_tree(parent, pool_is_child_of, pool); |
1612 | | } |
1613 | | #endif /* (APR_POOL_DEBUG & APR_POOL_DEBUG_LIFETIME) */ |
1614 | | |
1615 | | static void apr_pool_check_lifetime(apr_pool_t *pool) |
1616 | 2.26k | { |
1617 | | /* Rule of thumb: use of the global pool is always |
1618 | | * ok, since the only user is apr_pools.c. Unless |
1619 | | * people have searched for the top level parent and |
1620 | | * started to use that... |
1621 | | */ |
1622 | 2.26k | if (pool == global_pool || global_pool == NULL) |
1623 | 1.64k | return; |
1624 | | |
1625 | | /* Lifetime |
1626 | | * This basically checks to see if the pool being used is still |
1627 | | * a relative to the global pool. If not it was previously |
1628 | | * destroyed, in which case we abort(). |
1629 | | */ |
1630 | | #if (APR_POOL_DEBUG & APR_POOL_DEBUG_LIFETIME) |
1631 | | if (!apr_pool_is_child_of(pool, global_pool)) { |
1632 | | #if (APR_POOL_DEBUG & APR_POOL_DEBUG_VERBOSE_ALL) |
1633 | | apr_pool_log_event(pool, "LIFE", |
1634 | | __FILE__ ":apr_pool_integrity check [lifetime]", 0); |
1635 | | #endif /* (APR_POOL_DEBUG & APR_POOL_DEBUG_VERBOSE_ALL) */ |
1636 | | abort(); |
1637 | | } |
1638 | | #endif /* (APR_POOL_DEBUG & APR_POOL_DEBUG_LIFETIME) */ |
1639 | 2.26k | } |
1640 | | |
1641 | | static void apr_pool_check_owner(apr_pool_t *pool) |
1642 | 2.26k | { |
1643 | | #if (APR_POOL_DEBUG & APR_POOL_DEBUG_OWNER) |
1644 | | #if APR_HAS_THREADS |
1645 | | if (!apr_os_thread_equal(pool->owner, apr_os_thread_current())) { |
1646 | | #if (APR_POOL_DEBUG & APR_POOL_DEBUG_VERBOSE_ALL) |
1647 | | apr_pool_log_event(pool, "THREAD", |
1648 | | __FILE__ ":apr_pool_integrity check [owner]", 0); |
1649 | | #endif /* (APR_POOL_DEBUG & APR_POOL_DEBUG_VERBOSE_ALL) */ |
1650 | | abort(); |
1651 | | } |
1652 | | #endif /* APR_HAS_THREADS */ |
1653 | | #endif /* (APR_POOL_DEBUG & APR_POOL_DEBUG_OWNER) */ |
1654 | 2.26k | } |
1655 | | |
1656 | | static void apr_pool_check_integrity(apr_pool_t *pool) |
1657 | 1.43k | { |
1658 | 1.43k | apr_pool_check_lifetime(pool); |
1659 | 1.43k | apr_pool_check_owner(pool); |
1660 | 1.43k | } |
1661 | | |
1662 | | APR_DECLARE(void) apr_pool_owner_set(apr_pool_t *pool, apr_uint32_t flags) |
1663 | 0 | { |
1664 | | #if APR_HAS_THREADS && (APR_POOL_DEBUG & APR_POOL_DEBUG_OWNER) |
1665 | | pool->owner = apr_os_thread_current(); |
1666 | | #endif |
1667 | 0 | } |
1668 | | |
1669 | | /* |
1670 | | * Initialization (debug) |
1671 | | */ |
1672 | | |
1673 | | APR_DECLARE(apr_status_t) apr_pool_initialize(void) |
1674 | 411 | { |
1675 | 411 | apr_status_t rv; |
1676 | | #if (APR_POOL_DEBUG & APR_POOL_DEBUG_VERBOSE_ALL) |
1677 | | char *logpath; |
1678 | | apr_file_t *debug_log = NULL; |
1679 | | #endif |
1680 | | |
1681 | 411 | if (apr_pools_initialized++) |
1682 | 0 | return APR_SUCCESS; |
1683 | | |
1684 | 411 | #if defined(_SC_PAGESIZE) |
1685 | 411 | boundary_size = sysconf(_SC_PAGESIZE); |
1686 | | #elif defined(WIN32) |
1687 | | { |
1688 | | SYSTEM_INFO si; |
1689 | | GetSystemInfo(&si); |
1690 | | boundary_size = si.dwPageSize; |
1691 | | } |
1692 | | #endif |
1693 | 411 | boundary_index = 12; |
1694 | 411 | while ( (1 << boundary_index) < boundary_size) |
1695 | 0 | boundary_index++; |
1696 | 411 | boundary_size = (1 << boundary_index); |
1697 | | |
1698 | | /* Since the debug code works a bit differently then the |
1699 | | * regular pools code, we ask for a lock here. The regular |
1700 | | * pools code has got this lock embedded in the global |
1701 | | * allocator, a concept unknown to debug mode. |
1702 | | */ |
1703 | 411 | if ((rv = apr_pool_create_ex(&global_pool, NULL, NULL, |
1704 | 411 | NULL)) != APR_SUCCESS) { |
1705 | 0 | return rv; |
1706 | 0 | } |
1707 | | |
1708 | 411 | apr_pool_tag(global_pool, "APR global pool"); |
1709 | | |
1710 | 411 | apr_pools_initialized = 1; |
1711 | | |
1712 | | /* This has to happen here because mutexes might be backed by |
1713 | | * atomics. It used to be snug and safe in apr_initialize(). |
1714 | | */ |
1715 | 411 | if ((rv = apr_atomic_init(global_pool)) != APR_SUCCESS) { |
1716 | 0 | return rv; |
1717 | 0 | } |
1718 | | |
1719 | | #if (APR_POOL_DEBUG & APR_POOL_DEBUG_VERBOSE_ALL) |
1720 | | rv = apr_env_get(&logpath, "APR_POOL_DEBUG_LOG", global_pool); |
1721 | | |
1722 | | /* Don't pass file_stderr directly to apr_file_open() here, since |
1723 | | * apr_file_open() can call back to apr_pool_log_event() and that |
1724 | | * may attempt to use then then non-NULL but partially set up file |
1725 | | * object. */ |
1726 | | if (rv == APR_SUCCESS) { |
1727 | | apr_file_open(&debug_log, logpath, |
1728 | | APR_FOPEN_APPEND|APR_FOPEN_WRITE|APR_FOPEN_CREATE, |
1729 | | APR_FPROT_OS_DEFAULT, global_pool); |
1730 | | } |
1731 | | else { |
1732 | | apr_file_open_stderr(&debug_log, global_pool); |
1733 | | } |
1734 | | |
1735 | | /* debug_log is now a file handle. */ |
1736 | | file_stderr = debug_log; |
1737 | | |
1738 | | if (file_stderr) { |
1739 | | apr_file_printf(file_stderr, |
1740 | | "POOL DEBUG: [PID" |
1741 | | #if APR_HAS_THREADS |
1742 | | "/TID" |
1743 | | #endif /* APR_HAS_THREADS */ |
1744 | | "] ACTION (SIZE /POOL SIZE /TOTAL SIZE) " |
1745 | | "POOL \"TAG\" <__FILE__:__LINE__> PARENT (ALLOCS/TOTAL ALLOCS/CLEARS)\n"); |
1746 | | |
1747 | | apr_pool_log_event(global_pool, "GLOBAL", __FILE__ ":apr_pool_initialize", 0); |
1748 | | |
1749 | | /* Add a cleanup handler that sets the debug log file handle |
1750 | | * to NULL, otherwise we'll try to log the global pool |
1751 | | * destruction event with predictably disastrous results. */ |
1752 | | apr_pool_cleanup_register(global_pool, NULL, |
1753 | | apr_pool_cleanup_file_stderr, |
1754 | | apr_pool_cleanup_null); |
1755 | | } |
1756 | | #endif /* (APR_POOL_DEBUG & APR_POOL_DEBUG_VERBOSE_ALL) */ |
1757 | | |
1758 | 411 | return APR_SUCCESS; |
1759 | 411 | } |
1760 | | |
1761 | | APR_DECLARE(void) apr_pool_terminate(void) |
1762 | 411 | { |
1763 | 411 | if (!apr_pools_initialized) |
1764 | 0 | return; |
1765 | | |
1766 | 411 | if (--apr_pools_initialized) |
1767 | 0 | return; |
1768 | | |
1769 | 411 | apr_pool_destroy(global_pool); /* This will also destroy the mutex */ |
1770 | 411 | global_pool = NULL; |
1771 | | |
1772 | | #if (APR_POOL_DEBUG & APR_POOL_DEBUG_VERBOSE_ALL) |
1773 | | file_stderr = NULL; |
1774 | | #endif /* (APR_POOL_DEBUG & APR_POOL_DEBUG_VERBOSE_ALL) */ |
1775 | 411 | } |
1776 | | |
1777 | | |
1778 | | /* |
1779 | | * Memory allocation (debug) |
1780 | | */ |
1781 | | |
1782 | | static void *pool_alloc(apr_pool_t *pool, apr_size_t size) |
1783 | 1.02k | { |
1784 | 1.02k | debug_node_t *node; |
1785 | 1.02k | void *mem; |
1786 | | |
1787 | 1.02k | if ((mem = malloc(size)) == NULL) { |
1788 | 0 | if (pool->abort_fn) |
1789 | 0 | pool->abort_fn(APR_ENOMEM); |
1790 | |
|
1791 | 0 | return NULL; |
1792 | 0 | } |
1793 | | |
1794 | 1.02k | node = pool->nodes; |
1795 | 1.02k | if (node == NULL || node->index == 64) { |
1796 | 615 | if ((node = malloc(SIZEOF_DEBUG_NODE_T)) == NULL) { |
1797 | 0 | free(mem); |
1798 | 0 | if (pool->abort_fn) |
1799 | 0 | pool->abort_fn(APR_ENOMEM); |
1800 | |
|
1801 | 0 | return NULL; |
1802 | 0 | } |
1803 | | |
1804 | 615 | memset(node, 0, SIZEOF_DEBUG_NODE_T); |
1805 | | |
1806 | 615 | node->next = pool->nodes; |
1807 | 615 | pool->nodes = node; |
1808 | 615 | node->index = 0; |
1809 | 615 | } |
1810 | | |
1811 | 1.02k | node->beginp[node->index] = mem; |
1812 | 1.02k | node->endp[node->index] = (char *)mem + size; |
1813 | 1.02k | node->index++; |
1814 | | |
1815 | 1.02k | pool->stat_alloc++; |
1816 | 1.02k | pool->stat_total_alloc++; |
1817 | | |
1818 | 1.02k | return mem; |
1819 | 1.02k | } |
1820 | | |
1821 | | APR_DECLARE(void *) apr_palloc_debug(apr_pool_t *pool, apr_size_t size, |
1822 | | const char *file_line) |
1823 | 616 | { |
1824 | 616 | void *mem; |
1825 | | |
1826 | 616 | apr_pool_check_integrity(pool); |
1827 | | |
1828 | 616 | mem = pool_alloc(pool, size); |
1829 | | |
1830 | | #if (APR_POOL_DEBUG & APR_POOL_DEBUG_VERBOSE_ALLOC) |
1831 | | apr_pool_log_event(pool, "PALLOC", file_line, 1); |
1832 | | #endif /* (APR_POOL_DEBUG & APR_POOL_DEBUG_VERBOSE_ALLOC) */ |
1833 | | |
1834 | 616 | return mem; |
1835 | 616 | } |
1836 | | |
1837 | | APR_DECLARE(void *) apr_pcalloc_debug(apr_pool_t *pool, apr_size_t size, |
1838 | | const char *file_line) |
1839 | 411 | { |
1840 | 411 | void *mem; |
1841 | | |
1842 | 411 | apr_pool_check_integrity(pool); |
1843 | | |
1844 | 411 | mem = pool_alloc(pool, size); |
1845 | 411 | memset(mem, 0, size); |
1846 | | |
1847 | | #if (APR_POOL_DEBUG & APR_POOL_DEBUG_VERBOSE_ALLOC) |
1848 | | apr_pool_log_event(pool, "PCALLOC", file_line, 1); |
1849 | | #endif /* (APR_POOL_DEBUG & APR_POOL_DEBUG_VERBOSE_ALLOC) */ |
1850 | | |
1851 | 411 | return mem; |
1852 | 411 | } |
1853 | | |
1854 | | |
1855 | | /* |
1856 | | * Pool creation/destruction (debug) |
1857 | | */ |
1858 | | |
1859 | 1.64k | #define POOL_POISON_BYTE 'A' |
1860 | | |
1861 | | static void pool_clear_debug(apr_pool_t *pool, const char *file_line) |
1862 | 822 | { |
1863 | 822 | debug_node_t *node; |
1864 | 822 | apr_size_t index; |
1865 | | |
1866 | | /* Run pre destroy cleanups */ |
1867 | 822 | run_cleanups(&pool->pre_cleanups); |
1868 | 822 | pool->pre_cleanups = NULL; |
1869 | | |
1870 | | /* |
1871 | | * Now that we have given the pre cleanups the chance to kill of any |
1872 | | * threads using the pool, the owner must be correct. |
1873 | | */ |
1874 | 822 | apr_pool_check_owner(pool); |
1875 | | |
1876 | | /* Destroy the subpools. The subpools will detach themselves from |
1877 | | * this pool thus this loop is safe and easy. |
1878 | | */ |
1879 | 822 | while (pool->child) |
1880 | 0 | pool_destroy_debug(pool->child, file_line); |
1881 | | |
1882 | | /* Run cleanups */ |
1883 | 822 | run_cleanups(&pool->cleanups); |
1884 | 822 | pool->free_cleanups = NULL; |
1885 | 822 | pool->cleanups = NULL; |
1886 | | |
1887 | | /* If new child pools showed up, this is a reason to raise a flag */ |
1888 | 822 | if (pool->child) |
1889 | 0 | abort(); |
1890 | | |
1891 | | /* Free subprocesses */ |
1892 | 822 | free_proc_chain(pool->subprocesses); |
1893 | 822 | pool->subprocesses = NULL; |
1894 | | |
1895 | | /* Clear the user data. */ |
1896 | 822 | pool->user_data = NULL; |
1897 | | |
1898 | | /* Free the blocks, scribbling over them first to help highlight |
1899 | | * use-after-free issues. */ |
1900 | 1.43k | while ((node = pool->nodes) != NULL) { |
1901 | 615 | pool->nodes = node->next; |
1902 | | |
1903 | 1.64k | for (index = 0; index < node->index; index++) { |
1904 | 1.02k | memset(node->beginp[index], POOL_POISON_BYTE, |
1905 | 1.02k | (char *)node->endp[index] - (char *)node->beginp[index]); |
1906 | 1.02k | free(node->beginp[index]); |
1907 | 1.02k | } |
1908 | | |
1909 | 615 | memset(node, POOL_POISON_BYTE, SIZEOF_DEBUG_NODE_T); |
1910 | 615 | free(node); |
1911 | 615 | } |
1912 | | |
1913 | 822 | pool->stat_alloc = 0; |
1914 | 822 | pool->stat_clear++; |
1915 | | |
1916 | | #if (APR_POOL_DEBUG & APR_POOL_DEBUG_VERBOSE) |
1917 | | apr_pool_log_event(pool, "CLEARED", file_line, 1); |
1918 | | #endif /* (APR_POOL_DEBUG & APR_POOL_DEBUG_VERBOSE) */ |
1919 | 822 | } |
1920 | | |
1921 | | APR_DECLARE(void) apr_pool_clear_debug(apr_pool_t *pool, |
1922 | | const char *file_line) |
1923 | 0 | { |
1924 | 0 | #if APR_HAS_THREADS |
1925 | 0 | apr_thread_mutex_t *mutex; |
1926 | 0 | #endif |
1927 | |
|
1928 | 0 | apr_pool_check_lifetime(pool); |
1929 | |
|
1930 | 0 | #if APR_HAS_THREADS |
1931 | | /* Lock the parent mutex before clearing so that if we have our |
1932 | | * own mutex it won't be accessed by apr_pool_walk_tree after |
1933 | | * it has been destroyed. |
1934 | | */ |
1935 | 0 | mutex = parent_lock(pool); |
1936 | 0 | #endif |
1937 | |
|
1938 | | #if (APR_POOL_DEBUG & APR_POOL_DEBUG_VERBOSE) |
1939 | | apr_pool_log_event(pool, "CLEAR", file_line, 1); |
1940 | | #endif /* (APR_POOL_DEBUG & APR_POOL_DEBUG_VERBOSE) */ |
1941 | |
|
1942 | 0 | pool_clear_debug(pool, file_line); |
1943 | |
|
1944 | 0 | #if APR_HAS_THREADS |
1945 | | /* If we had our own mutex, it will have been destroyed by |
1946 | | * the registered cleanups. Recreate it. |
1947 | | */ |
1948 | 0 | if (mutex != pool->mutex) { |
1949 | | /* |
1950 | | * Prevent apr_palloc() in apr_thread_mutex_create() from trying to |
1951 | | * use the destroyed mutex. |
1952 | | */ |
1953 | 0 | pool->mutex = NULL; |
1954 | 0 | (void)apr_thread_mutex_create(&pool->mutex, |
1955 | 0 | APR_THREAD_MUTEX_NESTED, pool); |
1956 | 0 | } |
1957 | | |
1958 | | /* Unlock the mutex we obtained above */ |
1959 | 0 | parent_unlock(mutex); |
1960 | 0 | #endif /* APR_HAS_THREADS */ |
1961 | 0 | } |
1962 | | |
1963 | | static void pool_destroy_debug(apr_pool_t *pool, const char *file_line) |
1964 | 822 | { |
1965 | 822 | pool_clear_debug(pool, file_line); |
1966 | | |
1967 | | /* Remove the pool from the parent's child list */ |
1968 | 822 | if (pool->parent != NULL |
1969 | 822 | && (*pool->ref = pool->sibling) != NULL) { |
1970 | 0 | pool->sibling->ref = pool->ref; |
1971 | 0 | } |
1972 | | |
1973 | | /* Destroy the allocator if the pool owns it */ |
1974 | 822 | if (pool->allocator != NULL |
1975 | 822 | && apr_allocator_owner_get(pool->allocator) == pool) { |
1976 | 0 | apr_allocator_destroy(pool->allocator); |
1977 | 0 | } |
1978 | | |
1979 | | /* Free the pool itself */ |
1980 | 822 | free(pool); |
1981 | 822 | } |
1982 | | |
1983 | | APR_DECLARE(void) apr_pool_destroy_debug(apr_pool_t *pool, |
1984 | | const char *file_line) |
1985 | 822 | { |
1986 | 822 | #if APR_HAS_THREADS |
1987 | 822 | apr_thread_mutex_t *mutex; |
1988 | 822 | #endif |
1989 | | |
1990 | 822 | apr_pool_check_lifetime(pool); |
1991 | | |
1992 | 822 | if (pool->joined) { |
1993 | | /* Joined pools must not be explicitly destroyed; the caller |
1994 | | * has broken the guarantee. */ |
1995 | | #if (APR_POOL_DEBUG & APR_POOL_DEBUG_VERBOSE_ALL) |
1996 | | apr_pool_log_event(pool, "LIFE", |
1997 | | __FILE__ ":apr_pool_destroy abort on joined", 0); |
1998 | | #endif /* (APR_POOL_DEBUG & APR_POOL_DEBUG_VERBOSE_ALL) */ |
1999 | |
|
2000 | 0 | abort(); |
2001 | 0 | } |
2002 | | |
2003 | 822 | #if APR_HAS_THREADS |
2004 | | /* Lock the parent mutex before destroying so that it's not accessed |
2005 | | * concurrently by apr_pool_walk_tree. |
2006 | | */ |
2007 | 822 | mutex = parent_lock(pool); |
2008 | 822 | #endif |
2009 | | |
2010 | | #if (APR_POOL_DEBUG & APR_POOL_DEBUG_VERBOSE) |
2011 | | apr_pool_log_event(pool, "DESTROY", file_line, 1); |
2012 | | #endif /* (APR_POOL_DEBUG & APR_POOL_DEBUG_VERBOSE) */ |
2013 | | |
2014 | 822 | pool_destroy_debug(pool, file_line); |
2015 | | |
2016 | 822 | #if APR_HAS_THREADS |
2017 | | /* Unlock the mutex we obtained above */ |
2018 | 822 | parent_unlock(mutex); |
2019 | 822 | #endif /* APR_HAS_THREADS */ |
2020 | 822 | } |
2021 | | |
2022 | | APR_DECLARE(apr_status_t) apr_pool_create_ex_debug(apr_pool_t **newpool, |
2023 | | apr_pool_t *parent, |
2024 | | apr_abortfunc_t abort_fn, |
2025 | | apr_allocator_t *allocator, |
2026 | | const char *file_line) |
2027 | 822 | { |
2028 | 822 | apr_pool_t *pool; |
2029 | | |
2030 | 822 | *newpool = NULL; |
2031 | | |
2032 | 822 | if (!parent) { |
2033 | 822 | parent = global_pool; |
2034 | 822 | } |
2035 | 0 | else { |
2036 | 0 | apr_pool_check_lifetime(parent); |
2037 | |
|
2038 | 0 | if (!allocator) |
2039 | 0 | allocator = parent->allocator; |
2040 | 0 | } |
2041 | | |
2042 | 822 | if (!abort_fn && parent) |
2043 | 411 | abort_fn = parent->abort_fn; |
2044 | | |
2045 | 822 | if ((pool = malloc(SIZEOF_POOL_T)) == NULL) { |
2046 | 0 | if (abort_fn) |
2047 | 0 | abort_fn(APR_ENOMEM); |
2048 | |
|
2049 | 0 | return APR_ENOMEM; |
2050 | 0 | } |
2051 | | |
2052 | 822 | memset(pool, 0, SIZEOF_POOL_T); |
2053 | | |
2054 | 822 | pool->allocator = allocator; |
2055 | 822 | pool->abort_fn = abort_fn; |
2056 | 822 | pool->tag = file_line; |
2057 | 822 | pool->file_line = file_line; |
2058 | | |
2059 | 822 | #if APR_HAS_THREADS |
2060 | 822 | if (parent == NULL || parent->allocator != allocator) { |
2061 | 411 | apr_status_t rv; |
2062 | | |
2063 | | /* No matter what the creation flags say, always create |
2064 | | * a lock. Without it integrity_check and apr_pool_num_bytes |
2065 | | * blow up (because they traverse pools child lists that |
2066 | | * possibly belong to another thread, in combination with |
2067 | | * the pool having no lock). However, this might actually |
2068 | | * hide problems like creating a child pool of a pool |
2069 | | * belonging to another thread. |
2070 | | */ |
2071 | 411 | if ((rv = apr_thread_mutex_create(&pool->mutex, |
2072 | 411 | APR_THREAD_MUTEX_NESTED, pool)) != APR_SUCCESS) { |
2073 | 0 | free(pool); |
2074 | 0 | return rv; |
2075 | 0 | } |
2076 | 411 | } |
2077 | 411 | else { |
2078 | 411 | pool->mutex = parent->mutex; |
2079 | 411 | } |
2080 | 822 | #endif /* APR_HAS_THREADS */ |
2081 | | |
2082 | 822 | if ((pool->parent = parent) != NULL) { |
2083 | 411 | pool_lock(parent); |
2084 | | |
2085 | 411 | if ((pool->sibling = parent->child) != NULL) |
2086 | 0 | pool->sibling->ref = &pool->sibling; |
2087 | | |
2088 | 411 | parent->child = pool; |
2089 | 411 | pool->ref = &parent->child; |
2090 | | |
2091 | 411 | pool_unlock(parent); |
2092 | 411 | } |
2093 | 411 | else { |
2094 | 411 | pool->sibling = NULL; |
2095 | 411 | pool->ref = NULL; |
2096 | 411 | } |
2097 | | |
2098 | 822 | #if APR_HAS_THREADS |
2099 | 822 | pool->owner = apr_os_thread_current(); |
2100 | 822 | #endif /* APR_HAS_THREADS */ |
2101 | | #ifdef NETWARE |
2102 | | pool->owner_proc = (apr_os_proc_t)getnlmhandle(); |
2103 | | #endif /* defined(NETWARE) */ |
2104 | | |
2105 | | #if (APR_POOL_DEBUG & APR_POOL_DEBUG_VERBOSE) |
2106 | | apr_pool_log_event(pool, "CREATE", file_line, 1); |
2107 | | #endif /* (APR_POOL_DEBUG & APR_POOL_DEBUG_VERBOSE) */ |
2108 | | |
2109 | 822 | *newpool = pool; |
2110 | | |
2111 | 822 | return APR_SUCCESS; |
2112 | 822 | } |
2113 | | |
2114 | | APR_DECLARE(apr_status_t) apr_pool_create_unmanaged_ex_debug(apr_pool_t **newpool, |
2115 | | apr_abortfunc_t abort_fn, |
2116 | | apr_allocator_t *allocator, |
2117 | | const char *file_line) |
2118 | 0 | { |
2119 | 0 | apr_pool_t *pool; |
2120 | 0 | apr_allocator_t *pool_allocator; |
2121 | |
|
2122 | 0 | *newpool = NULL; |
2123 | |
|
2124 | 0 | if ((pool = malloc(SIZEOF_POOL_T)) == NULL) { |
2125 | 0 | if (abort_fn) |
2126 | 0 | abort_fn(APR_ENOMEM); |
2127 | |
|
2128 | 0 | return APR_ENOMEM; |
2129 | 0 | } |
2130 | | |
2131 | 0 | memset(pool, 0, SIZEOF_POOL_T); |
2132 | |
|
2133 | 0 | pool->abort_fn = abort_fn; |
2134 | 0 | pool->tag = file_line; |
2135 | 0 | pool->file_line = file_line; |
2136 | |
|
2137 | 0 | #if APR_HAS_THREADS |
2138 | 0 | { |
2139 | 0 | apr_status_t rv; |
2140 | | |
2141 | | /* No matter what the creation flags say, always create |
2142 | | * a lock. Without it integrity_check and apr_pool_num_bytes |
2143 | | * blow up (because they traverse pools child lists that |
2144 | | * possibly belong to another thread, in combination with |
2145 | | * the pool having no lock). However, this might actually |
2146 | | * hide problems like creating a child pool of a pool |
2147 | | * belonging to another thread. |
2148 | | */ |
2149 | 0 | if ((rv = apr_thread_mutex_create(&pool->mutex, |
2150 | 0 | APR_THREAD_MUTEX_NESTED, pool)) != APR_SUCCESS) { |
2151 | 0 | free(pool); |
2152 | 0 | return rv; |
2153 | 0 | } |
2154 | 0 | } |
2155 | 0 | #endif /* APR_HAS_THREADS */ |
2156 | | |
2157 | 0 | #if APR_HAS_THREADS |
2158 | 0 | pool->owner = apr_os_thread_current(); |
2159 | 0 | #endif /* APR_HAS_THREADS */ |
2160 | | #ifdef NETWARE |
2161 | | pool->owner_proc = (apr_os_proc_t)getnlmhandle(); |
2162 | | #endif /* defined(NETWARE) */ |
2163 | |
|
2164 | 0 | if ((pool_allocator = allocator) == NULL) { |
2165 | 0 | apr_status_t rv; |
2166 | 0 | if ((rv = apr_allocator_create(&pool_allocator)) != APR_SUCCESS) { |
2167 | 0 | if (abort_fn) |
2168 | 0 | abort_fn(rv); |
2169 | 0 | return rv; |
2170 | 0 | } |
2171 | 0 | pool_allocator->owner = pool; |
2172 | 0 | } |
2173 | 0 | pool->allocator = pool_allocator; |
2174 | |
|
2175 | | #if (APR_POOL_DEBUG & APR_POOL_DEBUG_VERBOSE) |
2176 | | apr_pool_log_event(pool, "CREATEU", file_line, 1); |
2177 | | #endif /* (APR_POOL_DEBUG & APR_POOL_DEBUG_VERBOSE) */ |
2178 | |
|
2179 | 0 | *newpool = pool; |
2180 | |
|
2181 | 0 | return APR_SUCCESS; |
2182 | 0 | } |
2183 | | |
2184 | | /* |
2185 | | * "Print" functions (debug) |
2186 | | */ |
2187 | | |
2188 | | struct psprintf_data { |
2189 | | apr_vformatter_buff_t vbuff; |
2190 | | char *mem; |
2191 | | apr_size_t size; |
2192 | | }; |
2193 | | |
2194 | | static int psprintf_flush(apr_vformatter_buff_t *vbuff) |
2195 | 0 | { |
2196 | 0 | struct psprintf_data *ps = (struct psprintf_data *)vbuff; |
2197 | 0 | apr_size_t size; |
2198 | |
|
2199 | 0 | size = ps->vbuff.curpos - ps->mem; |
2200 | |
|
2201 | 0 | ps->size <<= 1; |
2202 | 0 | if ((ps->mem = realloc(ps->mem, ps->size)) == NULL) |
2203 | 0 | return -1; |
2204 | | |
2205 | 0 | ps->vbuff.curpos = ps->mem + size; |
2206 | 0 | ps->vbuff.endpos = ps->mem + ps->size - 1; |
2207 | |
|
2208 | 0 | return 0; |
2209 | 0 | } |
2210 | | |
2211 | | APR_DECLARE(char *) apr_pvsprintf(apr_pool_t *pool, const char *fmt, va_list ap) |
2212 | 0 | { |
2213 | 0 | struct psprintf_data ps; |
2214 | 0 | debug_node_t *node; |
2215 | |
|
2216 | 0 | apr_pool_check_integrity(pool); |
2217 | |
|
2218 | 0 | ps.size = 64; |
2219 | 0 | ps.mem = malloc(ps.size); |
2220 | 0 | ps.vbuff.curpos = ps.mem; |
2221 | | |
2222 | | /* Save a byte for the NUL terminator */ |
2223 | 0 | ps.vbuff.endpos = ps.mem + ps.size - 1; |
2224 | |
|
2225 | 0 | if (apr_vformatter(psprintf_flush, &ps.vbuff, fmt, ap) == -1) { |
2226 | 0 | if (pool->abort_fn) |
2227 | 0 | pool->abort_fn(APR_ENOMEM); |
2228 | |
|
2229 | 0 | return NULL; |
2230 | 0 | } |
2231 | | |
2232 | 0 | *ps.vbuff.curpos++ = '\0'; |
2233 | | |
2234 | | /* |
2235 | | * Link the node in |
2236 | | */ |
2237 | 0 | node = pool->nodes; |
2238 | 0 | if (node == NULL || node->index == 64) { |
2239 | 0 | if ((node = malloc(SIZEOF_DEBUG_NODE_T)) == NULL) { |
2240 | 0 | if (pool->abort_fn) |
2241 | 0 | pool->abort_fn(APR_ENOMEM); |
2242 | |
|
2243 | 0 | return NULL; |
2244 | 0 | } |
2245 | | |
2246 | 0 | node->next = pool->nodes; |
2247 | 0 | pool->nodes = node; |
2248 | 0 | node->index = 0; |
2249 | 0 | } |
2250 | | |
2251 | 0 | node->beginp[node->index] = ps.mem; |
2252 | 0 | node->endp[node->index] = ps.mem + ps.size; |
2253 | 0 | node->index++; |
2254 | |
|
2255 | 0 | return ps.mem; |
2256 | 0 | } |
2257 | | |
2258 | | |
2259 | | /* |
2260 | | * Debug functions |
2261 | | */ |
2262 | | |
2263 | | APR_DECLARE(void) apr_pool_join(apr_pool_t *p, apr_pool_t *sub) |
2264 | 0 | { |
2265 | 0 | #if APR_POOL_DEBUG |
2266 | 0 | if (sub->parent != p) { |
2267 | 0 | abort(); |
2268 | 0 | } |
2269 | 0 | sub->joined = p; |
2270 | 0 | #endif |
2271 | 0 | } |
2272 | | |
2273 | | static int pool_find(apr_pool_t *pool, void *data) |
2274 | 0 | { |
2275 | 0 | void **pmem = (void **)data; |
2276 | 0 | debug_node_t *node; |
2277 | 0 | apr_size_t index; |
2278 | |
|
2279 | 0 | node = pool->nodes; |
2280 | |
|
2281 | 0 | while (node) { |
2282 | 0 | for (index = 0; index < node->index; index++) { |
2283 | 0 | if (node->beginp[index] <= *pmem |
2284 | 0 | && node->endp[index] > *pmem) { |
2285 | 0 | *pmem = pool; |
2286 | 0 | return 1; |
2287 | 0 | } |
2288 | 0 | } |
2289 | | |
2290 | 0 | node = node->next; |
2291 | 0 | } |
2292 | | |
2293 | 0 | return 0; |
2294 | 0 | } |
2295 | | |
2296 | | APR_DECLARE(apr_pool_t *) apr_pool_find(const void *mem) |
2297 | 0 | { |
2298 | 0 | void *pool = (void *)mem; |
2299 | |
|
2300 | 0 | if (apr_pool_walk_tree(global_pool, pool_find, &pool)) |
2301 | 0 | return pool; |
2302 | | |
2303 | 0 | return NULL; |
2304 | 0 | } |
2305 | | |
2306 | | static int pool_num_bytes(apr_pool_t *pool, void *data) |
2307 | 0 | { |
2308 | 0 | apr_size_t *psize = (apr_size_t *)data; |
2309 | 0 | debug_node_t *node; |
2310 | 0 | apr_size_t index; |
2311 | |
|
2312 | 0 | node = pool->nodes; |
2313 | |
|
2314 | 0 | while (node) { |
2315 | 0 | for (index = 0; index < node->index; index++) { |
2316 | 0 | *psize += (char *)node->endp[index] - (char *)node->beginp[index]; |
2317 | 0 | } |
2318 | |
|
2319 | 0 | node = node->next; |
2320 | 0 | } |
2321 | |
|
2322 | 0 | return 0; |
2323 | 0 | } |
2324 | | |
2325 | | APR_DECLARE(apr_size_t) apr_pool_num_bytes(apr_pool_t *pool, int recurse) |
2326 | 0 | { |
2327 | 0 | apr_size_t size = 0; |
2328 | |
|
2329 | 0 | if (!recurse) { |
2330 | 0 | pool_num_bytes(pool, &size); |
2331 | |
|
2332 | 0 | return size; |
2333 | 0 | } |
2334 | | |
2335 | 0 | apr_pool_walk_tree(pool, pool_num_bytes, &size); |
2336 | |
|
2337 | 0 | return size; |
2338 | 0 | } |
2339 | | |
2340 | | APR_DECLARE(void) apr_pool_lock(apr_pool_t *pool, int flag) |
2341 | 0 | { |
2342 | 0 | } |
2343 | | |
2344 | | #endif /* !APR_POOL_DEBUG */ |
2345 | | |
2346 | | #ifdef NETWARE |
2347 | | void netware_pool_proc_cleanup () |
2348 | | { |
2349 | | apr_pool_t *pool = global_pool->child; |
2350 | | apr_os_proc_t owner_proc = (apr_os_proc_t)getnlmhandle(); |
2351 | | |
2352 | | while (pool) { |
2353 | | if (pool->owner_proc == owner_proc) { |
2354 | | apr_pool_destroy (pool); |
2355 | | pool = global_pool->child; |
2356 | | } |
2357 | | else { |
2358 | | pool = pool->sibling; |
2359 | | } |
2360 | | } |
2361 | | return; |
2362 | | } |
2363 | | #endif /* defined(NETWARE) */ |
2364 | | |
2365 | | |
2366 | | /* |
2367 | | * "Print" functions (common) |
2368 | | */ |
2369 | | |
2370 | | APR_DECLARE_NONSTD(char *) apr_psprintf(apr_pool_t *p, const char *fmt, ...) |
2371 | 0 | { |
2372 | 0 | va_list ap; |
2373 | 0 | char *res; |
2374 | |
|
2375 | 0 | va_start(ap, fmt); |
2376 | 0 | res = apr_pvsprintf(p, fmt, ap); |
2377 | 0 | va_end(ap); |
2378 | 0 | return res; |
2379 | 0 | } |
2380 | | |
2381 | | /* |
2382 | | * Pool Properties |
2383 | | */ |
2384 | | |
2385 | | APR_DECLARE(void) apr_pool_abort_set(apr_abortfunc_t abort_fn, |
2386 | | apr_pool_t *pool) |
2387 | 0 | { |
2388 | 0 | pool->abort_fn = abort_fn; |
2389 | 0 | } |
2390 | | |
2391 | | APR_DECLARE(apr_abortfunc_t) apr_pool_abort_get(apr_pool_t *pool) |
2392 | 0 | { |
2393 | 0 | return pool->abort_fn; |
2394 | 0 | } |
2395 | | |
2396 | | APR_DECLARE(apr_pool_t *) apr_pool_parent_get(apr_pool_t *pool) |
2397 | 0 | { |
2398 | | #ifdef NETWARE |
2399 | | /* On NetWare, don't return the global_pool, return the application pool |
2400 | | as the top most pool */ |
2401 | | if (pool->parent == global_pool) |
2402 | | return pool; |
2403 | | else |
2404 | | #endif |
2405 | 0 | return pool->parent; |
2406 | 0 | } |
2407 | | |
2408 | | APR_DECLARE(apr_allocator_t *) apr_pool_allocator_get(apr_pool_t *pool) |
2409 | 0 | { |
2410 | 0 | return pool->allocator; |
2411 | 0 | } |
2412 | | |
2413 | | /* return TRUE if a is an ancestor of b |
2414 | | * NULL is considered an ancestor of all pools |
2415 | | */ |
2416 | | APR_DECLARE(int) apr_pool_is_ancestor(apr_pool_t *a, apr_pool_t *b) |
2417 | 0 | { |
2418 | 0 | if (a == NULL) |
2419 | 0 | return 1; |
2420 | | |
2421 | 0 | #if APR_POOL_DEBUG |
2422 | | /* Find the pool with the longest lifetime guaranteed by the |
2423 | | * caller: */ |
2424 | 0 | while (a->joined) { |
2425 | 0 | a = a->joined; |
2426 | 0 | } |
2427 | 0 | #endif |
2428 | |
|
2429 | 0 | while (b) { |
2430 | 0 | if (a == b) |
2431 | 0 | return 1; |
2432 | | |
2433 | 0 | b = b->parent; |
2434 | 0 | } |
2435 | | |
2436 | 0 | return 0; |
2437 | 0 | } |
2438 | | |
2439 | | APR_DECLARE(void) apr_pool_tag(apr_pool_t *pool, const char *tag) |
2440 | 411 | { |
2441 | 411 | pool->tag = tag; |
2442 | 411 | } |
2443 | | |
2444 | | APR_DECLARE(const char *) apr_pool_get_tag(apr_pool_t *pool) |
2445 | 0 | { |
2446 | 0 | return pool->tag; |
2447 | 0 | } |
2448 | | |
2449 | | /* |
2450 | | * User data management |
2451 | | */ |
2452 | | |
2453 | | APR_DECLARE(apr_status_t) apr_pool_userdata_set(const void *data, const char *key, |
2454 | | apr_status_t (*cleanup) (void *), |
2455 | | apr_pool_t *pool) |
2456 | 0 | { |
2457 | 0 | #if APR_POOL_DEBUG |
2458 | 0 | apr_pool_check_integrity(pool); |
2459 | 0 | #endif /* APR_POOL_DEBUG */ |
2460 | |
|
2461 | 0 | if (pool->user_data == NULL) |
2462 | 0 | pool->user_data = apr_hash_make(pool); |
2463 | |
|
2464 | 0 | if (apr_hash_get(pool->user_data, key, APR_HASH_KEY_STRING) == NULL) { |
2465 | 0 | char *new_key = apr_pstrdup(pool, key); |
2466 | 0 | apr_hash_set(pool->user_data, new_key, APR_HASH_KEY_STRING, data); |
2467 | 0 | } |
2468 | 0 | else { |
2469 | 0 | apr_hash_set(pool->user_data, key, APR_HASH_KEY_STRING, data); |
2470 | 0 | } |
2471 | |
|
2472 | 0 | if (cleanup) |
2473 | 0 | apr_pool_cleanup_register(pool, data, cleanup, cleanup); |
2474 | |
|
2475 | 0 | return APR_SUCCESS; |
2476 | 0 | } |
2477 | | |
2478 | | APR_DECLARE(apr_status_t) apr_pool_userdata_setn(const void *data, |
2479 | | const char *key, |
2480 | | apr_status_t (*cleanup)(void *), |
2481 | | apr_pool_t *pool) |
2482 | 0 | { |
2483 | 0 | #if APR_POOL_DEBUG |
2484 | 0 | apr_pool_check_integrity(pool); |
2485 | 0 | #endif /* APR_POOL_DEBUG */ |
2486 | |
|
2487 | 0 | if (pool->user_data == NULL) |
2488 | 0 | pool->user_data = apr_hash_make(pool); |
2489 | |
|
2490 | 0 | apr_hash_set(pool->user_data, key, APR_HASH_KEY_STRING, data); |
2491 | |
|
2492 | 0 | if (cleanup) |
2493 | 0 | apr_pool_cleanup_register(pool, data, cleanup, cleanup); |
2494 | |
|
2495 | 0 | return APR_SUCCESS; |
2496 | 0 | } |
2497 | | |
2498 | | APR_DECLARE(apr_status_t) apr_pool_userdata_get(void **data, const char *key, |
2499 | | apr_pool_t *pool) |
2500 | 0 | { |
2501 | 0 | #if APR_POOL_DEBUG |
2502 | 0 | apr_pool_check_integrity(pool); |
2503 | 0 | #endif /* APR_POOL_DEBUG */ |
2504 | |
|
2505 | 0 | if (pool->user_data == NULL) { |
2506 | 0 | *data = NULL; |
2507 | 0 | } |
2508 | 0 | else { |
2509 | 0 | *data = apr_hash_get(pool->user_data, key, APR_HASH_KEY_STRING); |
2510 | 0 | } |
2511 | |
|
2512 | 0 | return APR_SUCCESS; |
2513 | 0 | } |
2514 | | |
2515 | | |
2516 | | /* |
2517 | | * Cleanup |
2518 | | */ |
2519 | | |
2520 | | struct cleanup_t { |
2521 | | struct cleanup_t *next; |
2522 | | const void *data; |
2523 | | apr_status_t (*plain_cleanup_fn)(void *data); |
2524 | | apr_status_t (*child_cleanup_fn)(void *data); |
2525 | | }; |
2526 | | |
2527 | | APR_DECLARE(void) apr_pool_cleanup_register(apr_pool_t *p, const void *data, |
2528 | | apr_status_t (*plain_cleanup_fn)(void *data), |
2529 | | apr_status_t (*child_cleanup_fn)(void *data)) |
2530 | 411 | { |
2531 | 411 | cleanup_t *c = NULL; |
2532 | | |
2533 | 411 | #if APR_POOL_DEBUG |
2534 | 411 | apr_pool_check_integrity(p); |
2535 | 411 | #endif /* APR_POOL_DEBUG */ |
2536 | | |
2537 | 411 | if (p != NULL) { |
2538 | 411 | if (p->free_cleanups) { |
2539 | | /* reuse a cleanup structure */ |
2540 | 0 | c = p->free_cleanups; |
2541 | 0 | p->free_cleanups = c->next; |
2542 | 411 | } else { |
2543 | 411 | c = apr_palloc(p, sizeof(cleanup_t)); |
2544 | 411 | } |
2545 | 411 | c->data = data; |
2546 | 411 | c->plain_cleanup_fn = plain_cleanup_fn; |
2547 | 411 | c->child_cleanup_fn = child_cleanup_fn; |
2548 | 411 | c->next = p->cleanups; |
2549 | 411 | p->cleanups = c; |
2550 | 411 | } |
2551 | | |
2552 | 411 | #if APR_POOL_DEBUG |
2553 | 411 | if (!c || !c->plain_cleanup_fn || !c->child_cleanup_fn) { |
2554 | 0 | abort(); |
2555 | 0 | } |
2556 | 411 | #endif /* APR_POOL_DEBUG */ |
2557 | 411 | } |
2558 | | |
2559 | | APR_DECLARE(void) apr_pool_pre_cleanup_register(apr_pool_t *p, const void *data, |
2560 | | apr_status_t (*plain_cleanup_fn)(void *data)) |
2561 | 0 | { |
2562 | 0 | cleanup_t *c = NULL; |
2563 | |
|
2564 | 0 | #if APR_POOL_DEBUG |
2565 | 0 | apr_pool_check_integrity(p); |
2566 | 0 | #endif /* APR_POOL_DEBUG */ |
2567 | |
|
2568 | 0 | if (p != NULL) { |
2569 | 0 | if (p->free_cleanups) { |
2570 | | /* reuse a cleanup structure */ |
2571 | 0 | c = p->free_cleanups; |
2572 | 0 | p->free_cleanups = c->next; |
2573 | 0 | } else { |
2574 | 0 | c = apr_palloc(p, sizeof(cleanup_t)); |
2575 | 0 | } |
2576 | 0 | c->data = data; |
2577 | 0 | c->plain_cleanup_fn = plain_cleanup_fn; |
2578 | 0 | c->next = p->pre_cleanups; |
2579 | 0 | p->pre_cleanups = c; |
2580 | 0 | } |
2581 | |
|
2582 | 0 | #if APR_POOL_DEBUG |
2583 | 0 | if (!c || !c->plain_cleanup_fn) { |
2584 | 0 | abort(); |
2585 | 0 | } |
2586 | 0 | #endif /* APR_POOL_DEBUG */ |
2587 | 0 | } |
2588 | | |
2589 | | APR_DECLARE(void) apr_pool_cleanup_kill(apr_pool_t *p, const void *data, |
2590 | | apr_status_t (*cleanup_fn)(void *)) |
2591 | 0 | { |
2592 | 0 | cleanup_t *c, **lastp; |
2593 | |
|
2594 | 0 | #if APR_POOL_DEBUG |
2595 | 0 | apr_pool_check_integrity(p); |
2596 | 0 | #endif /* APR_POOL_DEBUG */ |
2597 | |
|
2598 | 0 | if (p == NULL) |
2599 | 0 | return; |
2600 | | |
2601 | 0 | c = p->cleanups; |
2602 | 0 | lastp = &p->cleanups; |
2603 | 0 | while (c) { |
2604 | 0 | #if APR_POOL_DEBUG |
2605 | | /* Some cheap loop detection to catch a corrupt list: */ |
2606 | 0 | if (c == c->next |
2607 | 0 | || (c->next && c == c->next->next) |
2608 | 0 | || (c->next && c->next->next && c == c->next->next->next)) { |
2609 | 0 | abort(); |
2610 | 0 | } |
2611 | 0 | #endif |
2612 | | |
2613 | 0 | if (c->data == data && c->plain_cleanup_fn == cleanup_fn) { |
2614 | 0 | *lastp = c->next; |
2615 | | /* move to freelist */ |
2616 | 0 | c->next = p->free_cleanups; |
2617 | 0 | p->free_cleanups = c; |
2618 | 0 | break; |
2619 | 0 | } |
2620 | | |
2621 | 0 | lastp = &c->next; |
2622 | 0 | c = c->next; |
2623 | 0 | } |
2624 | | |
2625 | | /* Remove any pre-cleanup as well */ |
2626 | 0 | c = p->pre_cleanups; |
2627 | 0 | lastp = &p->pre_cleanups; |
2628 | 0 | while (c) { |
2629 | 0 | #if APR_POOL_DEBUG |
2630 | | /* Some cheap loop detection to catch a corrupt list: */ |
2631 | 0 | if (c == c->next |
2632 | 0 | || (c->next && c == c->next->next) |
2633 | 0 | || (c->next && c->next->next && c == c->next->next->next)) { |
2634 | 0 | abort(); |
2635 | 0 | } |
2636 | 0 | #endif |
2637 | | |
2638 | 0 | if (c->data == data && c->plain_cleanup_fn == cleanup_fn) { |
2639 | 0 | *lastp = c->next; |
2640 | | /* move to freelist */ |
2641 | 0 | c->next = p->free_cleanups; |
2642 | 0 | p->free_cleanups = c; |
2643 | 0 | break; |
2644 | 0 | } |
2645 | | |
2646 | 0 | lastp = &c->next; |
2647 | 0 | c = c->next; |
2648 | 0 | } |
2649 | |
|
2650 | 0 | } |
2651 | | |
2652 | | APR_DECLARE(void) apr_pool_child_cleanup_set(apr_pool_t *p, const void *data, |
2653 | | apr_status_t (*plain_cleanup_fn)(void *), |
2654 | | apr_status_t (*child_cleanup_fn)(void *)) |
2655 | 0 | { |
2656 | 0 | cleanup_t *c; |
2657 | |
|
2658 | 0 | #if APR_POOL_DEBUG |
2659 | 0 | apr_pool_check_integrity(p); |
2660 | 0 | #endif /* APR_POOL_DEBUG */ |
2661 | |
|
2662 | 0 | if (p == NULL) |
2663 | 0 | return; |
2664 | | |
2665 | 0 | c = p->cleanups; |
2666 | 0 | while (c) { |
2667 | 0 | if (c->data == data && c->plain_cleanup_fn == plain_cleanup_fn) { |
2668 | 0 | c->child_cleanup_fn = child_cleanup_fn; |
2669 | 0 | break; |
2670 | 0 | } |
2671 | | |
2672 | 0 | c = c->next; |
2673 | 0 | } |
2674 | 0 | } |
2675 | | |
2676 | | APR_DECLARE(apr_status_t) apr_pool_cleanup_run(apr_pool_t *p, void *data, |
2677 | | apr_status_t (*cleanup_fn)(void *)) |
2678 | 0 | { |
2679 | 0 | apr_pool_cleanup_kill(p, data, cleanup_fn); |
2680 | 0 | return (*cleanup_fn)(data); |
2681 | 0 | } |
2682 | | |
2683 | | static void run_cleanups(cleanup_t **cref) |
2684 | 1.64k | { |
2685 | 1.64k | cleanup_t *c = *cref; |
2686 | | |
2687 | 2.05k | while (c) { |
2688 | 411 | *cref = c->next; |
2689 | 411 | (*c->plain_cleanup_fn)((void *)c->data); |
2690 | 411 | c = *cref; |
2691 | 411 | } |
2692 | 1.64k | } |
2693 | | |
2694 | | #if !defined(WIN32) && !defined(OS2) |
2695 | | |
2696 | | static void run_child_cleanups(cleanup_t **cref) |
2697 | 0 | { |
2698 | 0 | cleanup_t *c = *cref; |
2699 | |
|
2700 | 0 | while (c) { |
2701 | 0 | *cref = c->next; |
2702 | 0 | (*c->child_cleanup_fn)((void *)c->data); |
2703 | 0 | c = *cref; |
2704 | 0 | } |
2705 | 0 | } |
2706 | | |
2707 | | static void cleanup_pool_for_exec(apr_pool_t *p) |
2708 | 0 | { |
2709 | 0 | run_child_cleanups(&p->cleanups); |
2710 | |
|
2711 | 0 | for (p = p->child; p; p = p->sibling) |
2712 | 0 | cleanup_pool_for_exec(p); |
2713 | 0 | } |
2714 | | |
2715 | | APR_DECLARE(void) apr_pool_cleanup_for_exec(void) |
2716 | 0 | { |
2717 | 0 | cleanup_pool_for_exec(global_pool); |
2718 | 0 | } |
2719 | | |
2720 | | #else /* !defined(WIN32) && !defined(OS2) */ |
2721 | | |
2722 | | APR_DECLARE(void) apr_pool_cleanup_for_exec(void) |
2723 | | { |
2724 | | /* |
2725 | | * Don't need to do anything on NT or OS/2, because |
2726 | | * these platforms will spawn the new process - not |
2727 | | * fork for exec. All handles that are not inheritable, |
2728 | | * will be automajically closed. The only problem is |
2729 | | * with file handles that are open, but there isn't |
2730 | | * much that can be done about that (except if the |
2731 | | * child decides to go out and close them, or the |
2732 | | * developer quits opening them shared) |
2733 | | */ |
2734 | | return; |
2735 | | } |
2736 | | |
2737 | | #endif /* !defined(WIN32) && !defined(OS2) */ |
2738 | | |
2739 | | APR_DECLARE_NONSTD(apr_status_t) apr_pool_cleanup_null(void *data) |
2740 | 0 | { |
2741 | | /* do nothing cleanup routine */ |
2742 | 0 | return APR_SUCCESS; |
2743 | 0 | } |
2744 | | |
2745 | | /* Subprocesses don't use the generic cleanup interface because |
2746 | | * we don't want multiple subprocesses to result in multiple |
2747 | | * three-second pauses; the subprocesses have to be "freed" all |
2748 | | * at once. If other resources are introduced with the same property, |
2749 | | * we might want to fold support for that into the generic interface. |
2750 | | * For now, it's a special case. |
2751 | | */ |
2752 | | APR_DECLARE(void) apr_pool_note_subprocess(apr_pool_t *pool, apr_proc_t *proc, |
2753 | | apr_kill_conditions_e how) |
2754 | 0 | { |
2755 | 0 | struct process_chain *pc = apr_palloc(pool, sizeof(struct process_chain)); |
2756 | |
|
2757 | 0 | pc->proc = proc; |
2758 | 0 | pc->kill_how = how; |
2759 | 0 | pc->next = pool->subprocesses; |
2760 | 0 | pool->subprocesses = pc; |
2761 | 0 | } |
2762 | | |
2763 | | static void free_proc_chain(struct process_chain *procs) |
2764 | 822 | { |
2765 | | /* Dispose of the subprocesses we've spawned off in the course of |
2766 | | * whatever it was we're cleaning up now. This may involve killing |
2767 | | * some of them off... |
2768 | | */ |
2769 | 822 | struct process_chain *pc; |
2770 | 822 | int need_timeout = 0; |
2771 | 822 | apr_time_t timeout_interval; |
2772 | | |
2773 | 822 | if (!procs) |
2774 | 822 | return; /* No work. Whew! */ |
2775 | | |
2776 | | /* First, check to see if we need to do the SIGTERM, sleep, SIGKILL |
2777 | | * dance with any of the processes we're cleaning up. If we've got |
2778 | | * any kill-on-sight subprocesses, ditch them now as well, so they |
2779 | | * don't waste any more cycles doing whatever it is that they shouldn't |
2780 | | * be doing anymore. |
2781 | | */ |
2782 | | |
2783 | 0 | #ifndef NEED_WAITPID |
2784 | | /* Pick up all defunct processes */ |
2785 | 0 | for (pc = procs; pc; pc = pc->next) { |
2786 | 0 | if (apr_proc_wait(pc->proc, NULL, NULL, APR_NOWAIT) != APR_CHILD_NOTDONE) |
2787 | 0 | pc->kill_how = APR_KILL_NEVER; |
2788 | 0 | } |
2789 | 0 | #endif /* !defined(NEED_WAITPID) */ |
2790 | |
|
2791 | 0 | for (pc = procs; pc; pc = pc->next) { |
2792 | 0 | #ifndef WIN32 |
2793 | 0 | if ((pc->kill_how == APR_KILL_AFTER_TIMEOUT) |
2794 | 0 | || (pc->kill_how == APR_KILL_ONLY_ONCE)) { |
2795 | | /* |
2796 | | * Subprocess may be dead already. Only need the timeout if not. |
2797 | | * Note: apr_proc_kill on Windows is TerminateProcess(), which is |
2798 | | * similar to a SIGKILL, so always give the process a timeout |
2799 | | * under Windows before killing it. |
2800 | | */ |
2801 | 0 | if (apr_proc_kill(pc->proc, SIGTERM) == APR_SUCCESS) |
2802 | 0 | need_timeout = 1; |
2803 | 0 | } |
2804 | 0 | else if (pc->kill_how == APR_KILL_ALWAYS) { |
2805 | | #else /* WIN32 knows only one fast, clean method of killing processes today */ |
2806 | | if (pc->kill_how != APR_KILL_NEVER) { |
2807 | | need_timeout = 1; |
2808 | | pc->kill_how = APR_KILL_ALWAYS; |
2809 | | #endif |
2810 | 0 | apr_proc_kill(pc->proc, SIGKILL); |
2811 | 0 | } |
2812 | 0 | } |
2813 | | |
2814 | | /* Sleep only if we have to. The sleep algorithm grows |
2815 | | * by a factor of two on each iteration. TIMEOUT_INTERVAL |
2816 | | * is equal to TIMEOUT_USECS / 64. |
2817 | | */ |
2818 | 0 | if (need_timeout) { |
2819 | 0 | timeout_interval = TIMEOUT_INTERVAL; |
2820 | 0 | apr_sleep(timeout_interval); |
2821 | |
|
2822 | 0 | do { |
2823 | | /* check the status of the subprocesses */ |
2824 | 0 | need_timeout = 0; |
2825 | 0 | for (pc = procs; pc; pc = pc->next) { |
2826 | 0 | if (pc->kill_how == APR_KILL_AFTER_TIMEOUT) { |
2827 | 0 | if (apr_proc_wait(pc->proc, NULL, NULL, APR_NOWAIT) |
2828 | 0 | == APR_CHILD_NOTDONE) |
2829 | 0 | need_timeout = 1; /* subprocess is still active */ |
2830 | 0 | else |
2831 | 0 | pc->kill_how = APR_KILL_NEVER; /* subprocess has exited */ |
2832 | 0 | } |
2833 | 0 | } |
2834 | 0 | if (need_timeout) { |
2835 | 0 | if (timeout_interval >= TIMEOUT_USECS) { |
2836 | 0 | break; |
2837 | 0 | } |
2838 | 0 | apr_sleep(timeout_interval); |
2839 | 0 | timeout_interval *= 2; |
2840 | 0 | } |
2841 | 0 | } while (need_timeout); |
2842 | 0 | } |
2843 | | |
2844 | | /* OK, the scripts we just timed out for have had a chance to clean up |
2845 | | * --- now, just get rid of them, and also clean up the system accounting |
2846 | | * goop... |
2847 | | */ |
2848 | 0 | for (pc = procs; pc; pc = pc->next) { |
2849 | 0 | if (pc->kill_how == APR_KILL_AFTER_TIMEOUT) |
2850 | 0 | apr_proc_kill(pc->proc, SIGKILL); |
2851 | 0 | } |
2852 | | |
2853 | | /* Now wait for all the signaled processes to die */ |
2854 | 0 | for (pc = procs; pc; pc = pc->next) { |
2855 | 0 | if (pc->kill_how != APR_KILL_NEVER) |
2856 | 0 | (void)apr_proc_wait(pc->proc, NULL, NULL, APR_WAIT); |
2857 | 0 | } |
2858 | 0 | } |
2859 | | |
2860 | | #if !APR_POOL_DEBUG |
2861 | | |
2862 | | /* |
2863 | | * Pool creation/destruction stubs, for people who are running |
2864 | | * mixed release/debug enviroments. |
2865 | | */ |
2866 | | |
2867 | | APR_DECLARE(void *) apr_palloc_debug(apr_pool_t *pool, apr_size_t size, |
2868 | | const char *file_line) |
2869 | | { |
2870 | | return apr_palloc(pool, size); |
2871 | | } |
2872 | | |
2873 | | APR_DECLARE(void *) apr_pcalloc_debug(apr_pool_t *pool, apr_size_t size, |
2874 | | const char *file_line) |
2875 | | { |
2876 | | return apr_pcalloc(pool, size); |
2877 | | } |
2878 | | |
2879 | | APR_DECLARE(void) apr_pool_clear_debug(apr_pool_t *pool, |
2880 | | const char *file_line) |
2881 | | { |
2882 | | apr_pool_clear(pool); |
2883 | | } |
2884 | | |
2885 | | APR_DECLARE(void) apr_pool_destroy_debug(apr_pool_t *pool, |
2886 | | const char *file_line) |
2887 | | { |
2888 | | apr_pool_destroy(pool); |
2889 | | } |
2890 | | |
2891 | | APR_DECLARE(apr_status_t) apr_pool_create_ex_debug(apr_pool_t **newpool, |
2892 | | apr_pool_t *parent, |
2893 | | apr_abortfunc_t abort_fn, |
2894 | | apr_allocator_t *allocator, |
2895 | | const char *file_line) |
2896 | | { |
2897 | | return apr_pool_create_ex(newpool, parent, abort_fn, allocator); |
2898 | | } |
2899 | | |
2900 | | APR_DECLARE(apr_status_t) apr_pool_create_unmanaged_ex_debug(apr_pool_t **newpool, |
2901 | | apr_abortfunc_t abort_fn, |
2902 | | apr_allocator_t *allocator, |
2903 | | const char *file_line) |
2904 | | { |
2905 | | return apr_pool_create_unmanaged_ex(newpool, abort_fn, allocator); |
2906 | | } |
2907 | | |
2908 | | /* |
2909 | | * Other stubs, for people who are running |
2910 | | * mixed release/debug enviroments. |
2911 | | */ |
2912 | | |
2913 | | APR_DECLARE(void) apr_pool_owner_set(apr_pool_t *pool, apr_uint32_t flags) |
2914 | | { |
2915 | | } |
2916 | | |
2917 | | APR_DECLARE(void) apr_pool_join(apr_pool_t *p, apr_pool_t *sub) |
2918 | | { |
2919 | | } |
2920 | | |
2921 | | APR_DECLARE(apr_pool_t *) apr_pool_find(const void *mem) |
2922 | | { |
2923 | | return NULL; |
2924 | | } |
2925 | | |
2926 | | APR_DECLARE(apr_size_t) apr_pool_num_bytes(apr_pool_t *pool, int recurse) |
2927 | | { |
2928 | | return 0; |
2929 | | } |
2930 | | |
2931 | | APR_DECLARE(void) apr_pool_lock(apr_pool_t *pool, int flag) |
2932 | | { |
2933 | | } |
2934 | | |
2935 | | #else /* APR_POOL_DEBUG */ |
2936 | | |
2937 | | #undef apr_palloc |
2938 | | APR_DECLARE(void *) apr_palloc(apr_pool_t *pool, apr_size_t size); |
2939 | | |
2940 | | APR_DECLARE(void *) apr_palloc(apr_pool_t *pool, apr_size_t size) |
2941 | 0 | { |
2942 | 0 | return apr_palloc_debug(pool, size, "undefined"); |
2943 | 0 | } |
2944 | | |
2945 | | #undef apr_pcalloc |
2946 | | APR_DECLARE(void *) apr_pcalloc(apr_pool_t *pool, apr_size_t size); |
2947 | | |
2948 | | APR_DECLARE(void *) apr_pcalloc(apr_pool_t *pool, apr_size_t size) |
2949 | 0 | { |
2950 | 0 | return apr_pcalloc_debug(pool, size, "undefined"); |
2951 | 0 | } |
2952 | | |
2953 | | #undef apr_pool_clear |
2954 | | APR_DECLARE(void) apr_pool_clear(apr_pool_t *pool); |
2955 | | |
2956 | | APR_DECLARE(void) apr_pool_clear(apr_pool_t *pool) |
2957 | 0 | { |
2958 | 0 | apr_pool_clear_debug(pool, "undefined"); |
2959 | 0 | } |
2960 | | |
2961 | | #undef apr_pool_destroy |
2962 | | APR_DECLARE(void) apr_pool_destroy(apr_pool_t *pool); |
2963 | | |
2964 | | APR_DECLARE(void) apr_pool_destroy(apr_pool_t *pool) |
2965 | 411 | { |
2966 | 411 | apr_pool_destroy_debug(pool, "undefined"); |
2967 | 411 | } |
2968 | | |
2969 | | #undef apr_pool_create_ex |
2970 | | APR_DECLARE(apr_status_t) apr_pool_create_ex(apr_pool_t **newpool, |
2971 | | apr_pool_t *parent, |
2972 | | apr_abortfunc_t abort_fn, |
2973 | | apr_allocator_t *allocator); |
2974 | | |
2975 | | APR_DECLARE(apr_status_t) apr_pool_create_ex(apr_pool_t **newpool, |
2976 | | apr_pool_t *parent, |
2977 | | apr_abortfunc_t abort_fn, |
2978 | | apr_allocator_t *allocator) |
2979 | 411 | { |
2980 | 411 | return apr_pool_create_ex_debug(newpool, parent, |
2981 | 411 | abort_fn, allocator, |
2982 | 411 | "undefined"); |
2983 | 411 | } |
2984 | | |
2985 | | #undef apr_pool_create_unmanaged_ex |
2986 | | APR_DECLARE(apr_status_t) apr_pool_create_unmanaged_ex(apr_pool_t **newpool, |
2987 | | apr_abortfunc_t abort_fn, |
2988 | | apr_allocator_t *allocator); |
2989 | | |
2990 | | APR_DECLARE(apr_status_t) apr_pool_create_unmanaged_ex(apr_pool_t **newpool, |
2991 | | apr_abortfunc_t abort_fn, |
2992 | | apr_allocator_t *allocator) |
2993 | 0 | { |
2994 | 0 | return apr_pool_create_unmanaged_ex_debug(newpool, abort_fn, |
2995 | 0 | allocator, "undefined"); |
2996 | 0 | } |
2997 | | |
2998 | | #endif /* APR_POOL_DEBUG */ |