Coverage Report

Created: 2025-12-20 06:38

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
/src/proftpd/src/pool.c
Line
Count
Source
1
/*
2
 * ProFTPD - FTP server daemon
3
 * Copyright (c) 1997, 1998 Public Flood Software
4
 * Copyright (c) 1999, 2000 MacGyver aka Habeeb J. Dihu <macgyver@tos.net>
5
 * Copyright (c) 2001-2025 The ProFTPD Project team
6
 *
7
 * This program is free software; you can redistribute it and/or modify
8
 * it under the terms of the GNU General Public License as published by
9
 * the Free Software Foundation; either version 2 of the License, or
10
 * (at your option) any later version.
11
 *
12
 * This program is distributed in the hope that it will be useful,
13
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15
 * GNU General Public License for more details.
16
 *
17
 * You should have received a copy of the GNU General Public License
18
 * along with this program; if not, write to the Free Software
19
 * Foundation, Inc., 51 Franklin Street, Suite 500, Boston, MA 02110-1335, USA.
20
 *
21
 * As a special exemption, Public Flood Software/MacGyver aka Habeeb J. Dihu
22
 * and other respective copyright holders give permission to link this program
23
 * with OpenSSL, and distribute the resulting executable, without including
24
 * the source code for OpenSSL in the source distribution.
25
 */
26
27
/* Resource allocation code */
28
29
#include "conf.h"
30
31
/* Manage free storage blocks */
32
33
union align {
34
  char *cp;
35
  void (*f)(void);
36
  long l;
37
  FILE *fp;
38
  double d;
39
};
40
41
18.2k
#define CLICK_SZ (sizeof(union align))
42
43
union block_hdr {
44
  union align a;
45
46
  /* Padding */
47
#if defined(_LP64) || defined(__LP64__)
48
  char pad[32];
49
#endif
50
51
  /* Actual header */
52
  struct {
53
    void *endp;
54
    union block_hdr *next;
55
    void *first_avail;
56
  } h;
57
};
58
59
static union block_hdr *block_freelist = NULL;
60
61
/* Statistics */
62
static unsigned int stat_malloc = 0;  /* incr when malloc required */
63
static unsigned int stat_freehit = 0; /* incr when freelist used */
64
65
static const char *trace_channel = "pool";
66
67
/* Debug flags */
68
static int debug_flags = 0;
69
70
#ifdef PR_USE_DEVEL
71
static void oom_printf(const char *fmt, ...) {
72
  char buf[PR_TUNABLE_BUFFER_SIZE];
73
  va_list msg;
74
75
  memset(buf, '\0', sizeof(buf));
76
77
  va_start(msg, fmt);
78
  pr_vsnprintf(buf, sizeof(buf), fmt, msg);
79
  va_end(msg);
80
81
  buf[sizeof(buf)-1] = '\0';
82
  fprintf(stderr, "%s\n", buf);
83
}
84
#endif /* PR_USE_DEVEL */
85
86
/* Lowest level memory allocation functions
87
 */
88
89
0
static void null_alloc(void) {
90
0
  pr_log_pri(PR_LOG_ALERT, "Out of memory!");
91
#ifdef PR_USE_DEVEL
92
  if (debug_flags & PR_POOL_DEBUG_FL_OOM_DUMP_POOLS) {
93
    pr_pool_debug_memory(oom_printf);
94
  }
95
#endif
96
97
0
  exit(1);
98
0
}
99
100
2
static void *smalloc(size_t size) {
101
2
  void *res;
102
103
2
  if (size == 0) {
104
    /* Avoid zero-length malloc(); on non-POSIX systems, the behavior is
105
     * not dependable.  And on POSIX systems, malloc(3) might still return
106
     * a "unique pointer" for a zero-length allocation (or NULL).
107
     *
108
     * Either way, a zero-length allocation request here means that someone
109
     * is doing something they should not be doing.
110
     */
111
0
    null_alloc();
112
0
  }
113
114
2
  res = malloc(size);
115
2
  if (res == NULL) {
116
0
    null_alloc();
117
0
  }
118
119
2
  return res;
120
2
}
121
122
/* Grab a completely new block from the system pool.  Relies on malloc()
123
 * to return truly aligned memory.
124
 */
125
2
static union block_hdr *malloc_block(size_t size) {
126
2
  union block_hdr *blok =
127
2
    (union block_hdr *) smalloc(size + sizeof(union block_hdr));
128
129
2
  blok->h.next = NULL;
130
2
  blok->h.first_avail = (char *) (blok + 1);
131
2
  blok->h.endp = size + (char *) blok->h.first_avail;
132
133
2
  return blok;
134
2
}
135
136
static void chk_on_blk_list(union block_hdr *blok, union block_hdr *free_blk,
137
5.46k
    const char *pool_tag) {
138
139
#ifdef PR_USE_DEVEL
140
  /* Debug code */
141
142
  while (free_blk) {
143
    if (free_blk != blok) {
144
      free_blk = free_blk->h.next;
145
      continue;
146
    }
147
148
    pr_log_pri(PR_LOG_WARNING, "fatal: DEBUG: Attempt to free already free "
149
     "block in pool '%s'", pool_tag ? pool_tag : "<unnamed>");
150
    exit(1);
151
  }
152
#endif /* PR_USE_DEVEL */
153
5.46k
}
154
155
/* Free a chain of blocks -- _must_ call with alarms blocked. */
156
157
10.9k
static void free_blocks(union block_hdr *blok, const char *pool_tag) {
158
  /* Puts new blocks at head of block list, point next pointer of
159
   * last block in chain to free blocks we already had.
160
   */
161
162
10.9k
  union block_hdr *old_free_list = block_freelist;
163
164
10.9k
  if (blok == NULL) {
165
    /* Don't free an empty pool. */
166
5.46k
    return;
167
5.46k
  }
168
169
5.46k
  block_freelist = blok;
170
171
  /* Adjust first_avail pointers */
172
173
5.46k
  while (blok->h.next) {
174
0
    chk_on_blk_list(blok, old_free_list, pool_tag);
175
0
    blok->h.first_avail = (char *) (blok + 1);
176
0
    blok = blok->h.next;
177
0
  }
178
179
5.46k
  chk_on_blk_list(blok, old_free_list, pool_tag);
180
5.46k
  blok->h.first_avail = (char *) (blok + 1);
181
5.46k
  blok->h.next = old_free_list;
182
5.46k
}
183
184
/* Get a new block, from the free list if possible, otherwise malloc a new
185
 * one.  minsz is the requested size of the block to be allocated.
186
 * If exact is TRUE, then minsz is the exact size of the allocated block;
187
 * otherwise, the allocated size will be rounded up from minsz to the nearest
188
 * multiple of BLOCK_MINFREE.
189
 *
190
 * Important: BLOCK ALARMS BEFORE CALLING
191
 */
192
193
5.46k
static union block_hdr *new_block(int minsz, int exact) {
194
5.46k
  union block_hdr **lastptr = &block_freelist;
195
5.46k
  union block_hdr *blok = block_freelist;
196
197
5.46k
  if (!exact) {
198
5.46k
    minsz = 1 + ((minsz - 1) / BLOCK_MINFREE);
199
5.46k
    minsz *= BLOCK_MINFREE;
200
5.46k
  }
201
202
  /* Check if we have anything of the requested size on our free list first...
203
   */
204
5.46k
  while (blok) {
205
5.46k
    if (minsz <= ((char *) blok->h.endp - (char *) blok->h.first_avail)) {
206
5.46k
      *lastptr = blok->h.next;
207
5.46k
      blok->h.next = NULL;
208
209
5.46k
      stat_freehit++;
210
5.46k
      return blok;
211
5.46k
    }
212
213
0
    lastptr = &blok->h.next;
214
0
    blok = blok->h.next;
215
0
  }
216
217
  /* Nope...damn.  Have to malloc() a new one. */
218
2
  stat_malloc++;
219
2
  return malloc_block(minsz);
220
5.46k
}
221
222
struct cleanup;
223
224
static void run_cleanups(struct cleanup *c);
225
226
/* Pool internal and management */
227
228
struct pool_rec {
229
  union block_hdr *first;
230
  union block_hdr *last;
231
  struct cleanup *cleanups;
232
  struct pool_rec *sub_pools;
233
  struct pool_rec *sub_next;
234
  struct pool_rec *sub_prev;
235
  struct pool_rec *parent;
236
  char *free_first_avail;
237
  const char *tag;
238
};
239
240
pool *permanent_pool = NULL;
241
pool *global_config_pool = NULL;
242
243
/* Each pool structure is allocated in the start of it's own first block,
244
 * so there is a need to know how many bytes that is (once properly
245
 * aligned).
246
 */
247
248
5.46k
#define POOL_HDR_CLICKS (1 + ((sizeof(struct pool_rec) - 1) / CLICK_SZ))
249
5.46k
#define POOL_HDR_BYTES (POOL_HDR_CLICKS * CLICK_SZ)
250
251
0
static unsigned long blocks_in_block_list(union block_hdr *blok) {
252
0
  unsigned long count = 0;
253
254
0
  while (blok) {
255
0
    count++;
256
0
    blok = blok->h.next;
257
0
  }
258
259
0
  return count;
260
0
}
261
262
0
static unsigned long bytes_in_block_list(union block_hdr *blok) {
263
0
  unsigned long size = 0;
264
265
0
  while (blok) {
266
0
    size += ((char *) blok->h.endp - (char *) (blok + 1));
267
0
    blok = blok->h.next;
268
0
  }
269
270
0
  return size;
271
0
}
272
273
0
static unsigned int subpools_in_pool(pool *p) {
274
0
  unsigned int count = 0;
275
0
  pool *iter;
276
277
0
  if (p->sub_pools == NULL) {
278
0
    return 0;
279
0
  }
280
281
0
  for (iter = p->sub_pools; iter; iter = iter->sub_next) {
282
    /* Count one for the current subpool (iter). */
283
0
    count += (subpools_in_pool(iter) + 1);
284
0
  }
285
286
0
  return count;
287
0
}
288
289
/* Visit all pools, starting with the top-level permanent pool, walking the
290
 * hierarchy.
291
 */
292
static unsigned long visit_pools(pool *p, unsigned long level,
293
0
    void (*visit)(const pr_pool_info_t *, void *), void *user_data) {
294
0
  unsigned long total_bytes = 0;
295
296
0
  if (p == NULL) {
297
0
    return 0;
298
0
  }
299
300
0
  for (; p; p = p->sub_next) {
301
0
    unsigned long byte_count = 0, block_count = 0;
302
0
    unsigned int subpool_count = 0;
303
0
    pr_pool_info_t pinfo;
304
305
0
    byte_count = bytes_in_block_list(p->first);
306
0
    block_count = blocks_in_block_list(p->first);
307
0
    subpool_count = subpools_in_pool(p);
308
309
0
    total_bytes += byte_count;
310
311
0
    memset(&pinfo, 0, sizeof(pinfo));
312
0
    pinfo.have_pool_info = TRUE;
313
0
    pinfo.tag = p->tag;
314
0
    pinfo.ptr = p;
315
0
    pinfo.byte_count = byte_count;
316
0
    pinfo.block_count = block_count;
317
0
    pinfo.subpool_count = subpool_count;
318
0
    pinfo.level = level;
319
320
0
    visit(&pinfo, user_data);
321
322
    /* Recurse */
323
0
    if (p->sub_pools) {
324
0
      total_bytes += visit_pools(p->sub_pools, level + 1, visit, user_data);
325
0
    }
326
0
  }
327
328
0
  return total_bytes;
329
0
}
330
331
0
static void pool_printf(const char *fmt, ...) {
332
0
  char buf[PR_TUNABLE_BUFFER_SIZE];
333
0
  va_list msg;
334
335
0
  memset(buf, '\0', sizeof(buf));
336
337
0
  va_start(msg, fmt);
338
0
  pr_vsnprintf(buf, sizeof(buf), fmt, msg);
339
0
  va_end(msg);
340
341
0
  buf[sizeof(buf)-1] = '\0';
342
0
  pr_trace_msg(trace_channel, 5, "%s", buf);
343
0
}
344
345
0
static void pool_visitf(const pr_pool_info_t *pinfo, void *user_data) {
346
0
  void (*debugf)(const char *, ...) = user_data;
347
348
0
  if (pinfo->have_pool_info) {
349
350
    /* The emitted message is:
351
     *
352
     *  <pool-tag> [pool-ptr] (n B, m L, r P)
353
     *
354
     * where n is the number of bytes (B), m is the number of allocated blocks
355
     * in the pool list (L), and r is the number of sub-pools (P).
356
     */
357
358
0
    if (pinfo->level == 0) {
359
0
      debugf("%s [%p] (%lu B, %lu L, %u P)",
360
0
        pinfo->tag ? pinfo->tag : "<unnamed>", pinfo->ptr,
361
0
        pinfo->byte_count, pinfo->block_count, pinfo->subpool_count);
362
363
0
    } else {
364
0
      char indent_text[80] = "";
365
366
0
      if (pinfo->level > 1) {
367
0
        memset(indent_text, ' ', sizeof(indent_text)-1);
368
369
0
        if ((pinfo->level - 1) * 3 >= sizeof(indent_text)) {
370
0
          indent_text[sizeof(indent_text)-1] = 0;
371
372
0
        } else {
373
0
          indent_text[(pinfo->level - 1) * 3] = '\0';
374
0
        }
375
0
      }
376
377
0
      debugf("%s + %s [%p] (%lu B, %lu L, %u P)", indent_text,
378
0
        pinfo->tag ? pinfo->tag : "<unnamed>", pinfo->ptr,
379
0
        pinfo->byte_count, pinfo->block_count, pinfo->subpool_count);
380
0
    }
381
0
  }
382
383
0
  if (pinfo->have_freelist_info) {
384
0
    debugf("Free block list: %lu bytes", pinfo->freelist_byte_count);
385
0
  }
386
387
0
  if (pinfo->have_total_info) {
388
0
    debugf("Total %lu bytes allocated", pinfo->total_byte_count);
389
0
    debugf("%lu blocks allocated", pinfo->total_blocks_allocated);
390
0
    debugf("%lu blocks reused", pinfo->total_blocks_reused);
391
0
  }
392
0
}
393
394
0
void pr_pool_debug_memory(void (*debugf)(const char *, ...)) {
395
0
  if (debugf == NULL) {
396
0
    debugf = pool_printf;
397
0
  }
398
399
0
  debugf("Memory pool allocation:");
400
0
  pr_pool_debug_memory2(pool_visitf, debugf);
401
0
}
402
403
void pr_pool_debug_memory2(void (*visit)(const pr_pool_info_t *, void *),
404
0
    void *user_data) {
405
0
  unsigned long freelist_byte_count = 0, freelist_block_count = 0,
406
0
    total_byte_count = 0;
407
0
  pr_pool_info_t pinfo;
408
409
0
  if (visit == NULL) {
410
0
    return;
411
0
  }
412
413
  /* Per pool */
414
0
  total_byte_count = visit_pools(permanent_pool, 0, visit, user_data);
415
416
  /* Free list */
417
0
  if (block_freelist) {
418
0
    freelist_byte_count = bytes_in_block_list(block_freelist);
419
0
    freelist_block_count = blocks_in_block_list(block_freelist);
420
0
  }
421
422
0
  memset(&pinfo, 0, sizeof(pinfo));
423
0
  pinfo.have_freelist_info = TRUE;
424
0
  pinfo.freelist_byte_count = freelist_byte_count;
425
0
  pinfo.freelist_block_count = freelist_block_count;
426
427
0
  visit(&pinfo, user_data);
428
429
  /* Totals */
430
0
  memset(&pinfo, 0, sizeof(pinfo));
431
0
  pinfo.have_total_info = TRUE;
432
0
  pinfo.total_byte_count = total_byte_count;
433
0
  pinfo.total_blocks_allocated = stat_malloc;
434
0
  pinfo.total_blocks_reused = stat_freehit;
435
436
0
  visit(&pinfo, user_data);
437
0
}
438
439
0
int pr_pool_debug_set_flags(int flags) {
440
0
  if (flags < 0) {
441
0
    errno = EINVAL;
442
0
    return -1;
443
0
  }
444
445
0
  debug_flags = flags;
446
0
  return 0;
447
0
}
448
449
3.68k
void pr_pool_tag(pool *p, const char *tag) {
450
3.68k
  if (p == NULL ||
451
3.68k
      tag == NULL) {
452
0
    return;
453
0
  }
454
455
3.68k
  p->tag = tag;
456
3.68k
}
457
458
0
const char *pr_pool_get_tag(pool *p) {
459
0
  if (p == NULL) {
460
0
    errno = EINVAL;
461
0
    return NULL;
462
0
  }
463
464
0
  return p->tag;
465
0
}
466
467
/* Release the entire free block list */
468
0
static void pool_release_free_block_list(void) {
469
0
  union block_hdr *blok = NULL, *next = NULL;
470
471
0
  pr_alarms_block();
472
473
0
  for (blok = block_freelist; blok; blok = next) {
474
0
    next = blok->h.next;
475
0
    free(blok);
476
0
  }
477
0
  block_freelist = NULL;
478
479
0
  pr_alarms_unblock();
480
0
}
481
482
5.46k
struct pool_rec *make_sub_pool(struct pool_rec *p) {
483
5.46k
  union block_hdr *blok;
484
5.46k
  pool *new_pool;
485
486
5.46k
  pr_alarms_block();
487
488
5.46k
  blok = new_block(0, FALSE);
489
490
5.46k
  new_pool = (pool *) blok->h.first_avail;
491
5.46k
  blok->h.first_avail = POOL_HDR_BYTES + (char *) blok->h.first_avail;
492
493
5.46k
  memset(new_pool, 0, sizeof(struct pool_rec));
494
5.46k
  new_pool->free_first_avail = blok->h.first_avail;
495
5.46k
  new_pool->first = new_pool->last = blok;
496
497
5.46k
  if (p != NULL) {
498
3.68k
    new_pool->parent = p;
499
3.68k
    new_pool->sub_next = p->sub_pools;
500
501
3.68k
    if (new_pool->sub_next != NULL) {
502
0
      new_pool->sub_next->sub_prev = new_pool;
503
0
    }
504
505
3.68k
    p->sub_pools = new_pool;
506
3.68k
  }
507
508
5.46k
  pr_alarms_unblock();
509
510
5.46k
  return new_pool;
511
5.46k
}
512
513
0
struct pool_rec *pr_pool_create_sz(struct pool_rec *p, size_t sz) {
514
0
  union block_hdr *blok;
515
0
  pool *new_pool;
516
517
0
  pr_alarms_block();
518
519
0
  blok = new_block(sz + POOL_HDR_BYTES, TRUE);
520
521
0
  new_pool = (pool *) blok->h.first_avail;
522
0
  blok->h.first_avail = POOL_HDR_BYTES + (char *) blok->h.first_avail;
523
524
0
  memset(new_pool, 0, sizeof(struct pool_rec));
525
0
  new_pool->free_first_avail = blok->h.first_avail;
526
0
  new_pool->first = new_pool->last = blok;
527
528
0
  if (p != NULL) {
529
0
    new_pool->parent = p;
530
0
    new_pool->sub_next = p->sub_pools;
531
532
0
    if (new_pool->sub_next != NULL) {
533
0
      new_pool->sub_next->sub_prev = new_pool;
534
0
    }
535
536
0
    p->sub_pools = new_pool;
537
0
  }
538
539
0
  pr_alarms_unblock();
540
541
0
  return new_pool;
542
0
}
543
544
/* Initialize the pool system by creating the base permanent_pool. */
545
546
0
void init_pools(void) {
547
0
  if (permanent_pool == NULL) {
548
0
    permanent_pool = make_sub_pool(NULL);
549
0
  }
550
551
0
  pr_pool_tag(permanent_pool, "permanent_pool");
552
0
}
553
554
0
void free_pools(void) {
555
0
  destroy_pool(permanent_pool);
556
0
  permanent_pool = NULL;
557
0
  pool_release_free_block_list();
558
0
}
559
560
5.46k
static void clear_pool(struct pool_rec *p) {
561
562
  /* Sanity check. */
563
5.46k
  if (p == NULL) {
564
0
    return;
565
0
  }
566
567
5.46k
  pr_alarms_block();
568
569
  /* Run through any cleanups. */
570
5.46k
  run_cleanups(p->cleanups);
571
5.46k
  p->cleanups = NULL;
572
573
  /* Destroy subpools. */
574
5.46k
  while (p->sub_pools != NULL) {
575
0
    destroy_pool(p->sub_pools);
576
0
  }
577
578
5.46k
  p->sub_pools = NULL;
579
580
5.46k
  free_blocks(p->first->h.next, p->tag);
581
5.46k
  p->first->h.next = NULL;
582
583
5.46k
  p->last = p->first;
584
5.46k
  p->first->h.first_avail = p->free_first_avail;
585
586
5.46k
  p->tag = NULL;
587
5.46k
  pr_alarms_unblock();
588
5.46k
}
589
590
5.46k
void destroy_pool(pool *p) {
591
5.46k
  if (p == NULL) {
592
0
    return;
593
0
  }
594
595
5.46k
  pr_alarms_block();
596
597
5.46k
  if (p->parent != NULL) {
598
3.68k
    if (p->parent->sub_pools == p) {
599
3.68k
      p->parent->sub_pools = p->sub_next;
600
3.68k
    }
601
602
3.68k
    if (p->sub_prev != NULL) {
603
0
      p->sub_prev->sub_next = p->sub_next;
604
0
    }
605
606
3.68k
    if (p->sub_next != NULL) {
607
0
      p->sub_next->sub_prev = p->sub_prev;
608
0
    }
609
3.68k
  }
610
611
5.46k
  clear_pool(p);
612
5.46k
  free_blocks(p->first, p->tag);
613
614
5.46k
  pr_alarms_unblock();
615
616
#if defined(PR_DEVEL_NO_POOL_FREELIST)
617
  /* If configured explicitly to do so, call free(3) on the freelist after
618
   * a pool is destroyed.  This can be useful for tracking down use-after-free
619
   * and other memory issues using libraries such as dmalloc.
620
   */
621
  pool_release_free_block_list();
622
#endif /* PR_DEVEL_NO_POOL_FREELIST */
623
5.46k
}
624
625
/* Allocation interface...
626
 */
627
628
3.68k
static void *alloc_pool(struct pool_rec *p, size_t reqsz, int exact) {
629
  /* Round up requested size to an even number of aligned units */
630
3.68k
  size_t nclicks = 1 + ((reqsz - 1) / CLICK_SZ);
631
3.68k
  size_t sz = nclicks * CLICK_SZ;
632
3.68k
  union block_hdr *blok;
633
3.68k
  char *first_avail, *new_first_avail;
634
635
3.68k
  if (p == NULL) {
636
0
    errno = EINVAL;
637
0
    return NULL;
638
0
  }
639
640
  /* For performance, see if space is available in the most recently
641
   * allocated block.
642
   */
643
644
3.68k
  blok = p->last;
645
3.68k
  if (blok == NULL) {
646
0
    errno = EINVAL;
647
0
    return NULL;
648
0
  }
649
650
3.68k
  first_avail = blok->h.first_avail;
651
652
3.68k
  if (reqsz == 0) {
653
    /* Don't try to allocate memory of zero length.
654
     *
655
     * This should NOT happen normally; if it does, by returning NULL we
656
     * almost guarantee a null pointer dereference.
657
     */
658
0
    errno = EINVAL;
659
0
    return NULL;
660
0
  }
661
662
3.68k
  new_first_avail = first_avail + sz;
663
664
3.68k
  if (new_first_avail <= (char *) blok->h.endp) {
665
3.68k
    blok->h.first_avail = new_first_avail;
666
3.68k
    return (void *) first_avail;
667
3.68k
  }
668
669
  /* Need a new one that's big enough */
670
0
  pr_alarms_block();
671
672
0
  blok = new_block(sz, exact);
673
0
  p->last->h.next = blok;
674
0
  p->last = blok;
675
676
0
  first_avail = blok->h.first_avail;
677
0
  blok->h.first_avail = sz + (char *) blok->h.first_avail;
678
679
0
  pr_alarms_unblock();
680
0
  return (void *) first_avail;
681
3.68k
}
682
683
3.68k
void *palloc(struct pool_rec *p, size_t sz) {
684
3.68k
  return alloc_pool(p, sz, FALSE);
685
3.68k
}
686
687
0
void *pallocsz(struct pool_rec *p, size_t sz) {
688
0
  return alloc_pool(p, sz, TRUE);
689
0
}
690
691
3.68k
void *pcalloc(struct pool_rec *p, size_t sz) {
692
3.68k
  void *res;
693
694
3.68k
  if (p == NULL) {
695
0
    errno = EINVAL;
696
0
    return NULL;
697
0
  }
698
699
3.68k
  res = palloc(p, sz);
700
3.68k
  memset(res, '\0', sz);
701
702
3.68k
  return res;
703
3.68k
}
704
705
0
void *pcallocsz(struct pool_rec *p, size_t sz) {
706
0
  void *res;
707
708
0
  if (p == NULL) {
709
0
    errno = EINVAL;
710
0
    return NULL;
711
0
  }
712
713
0
  res = pallocsz(p, sz);
714
0
  memset(res, '\0', sz);
715
716
0
  return res;
717
0
}
718
719
/* Array functions */
720
721
0
array_header *make_array(pool *p, unsigned int nelts, size_t elt_size) {
722
0
  array_header *res;
723
724
0
  if (p == NULL ||
725
0
      elt_size == 0) {
726
0
    errno = EINVAL;
727
0
    return NULL;
728
0
  }
729
730
0
  res = palloc(p, sizeof(array_header));
731
732
0
  if (nelts < 1) {
733
0
    nelts = 1;
734
0
  }
735
736
0
  res->elts = pcalloc(p, nelts * elt_size);
737
0
  res->pool = p;
738
0
  res->elt_size = elt_size;
739
0
  res->nelts = 0;
740
0
  res->nalloc = nelts;
741
742
0
  return res;
743
0
}
744
745
0
void clear_array(array_header *arr) {
746
0
  if (arr == NULL) {
747
0
    return;
748
0
  }
749
750
0
  arr->elts = pcalloc(arr->pool, arr->nalloc * arr->elt_size);
751
0
  arr->nelts = 0;
752
0
}
753
754
0
void *push_array(array_header *arr) {
755
0
  if (arr == NULL) {
756
0
    errno = EINVAL;
757
0
    return NULL;
758
0
  }
759
760
0
  if (arr->nelts == arr->nalloc) {
761
0
    char *new_data = pcalloc(arr->pool, arr->nalloc * arr->elt_size * 2);
762
763
0
    memcpy(new_data, arr->elts, arr->nalloc * arr->elt_size);
764
0
    arr->elts = new_data;
765
0
    arr->nalloc *= 2;
766
0
  }
767
768
0
  ++arr->nelts;
769
0
  return ((char *) arr->elts) + (arr->elt_size * (arr->nelts - 1));
770
0
}
771
772
0
int array_cat2(array_header *dst, const array_header *src) {
773
0
  size_t elt_size;
774
775
0
  if (dst == NULL ||
776
0
      src == NULL) {
777
0
    errno = EINVAL;
778
0
    return -1;
779
0
  }
780
781
0
  elt_size = dst->elt_size;
782
783
0
  if (dst->nelts + src->nelts > dst->nalloc) {
784
0
    size_t new_size;
785
0
    char *new_data;
786
787
0
    new_size = dst->nalloc * 2;
788
0
    if (new_size == 0) {
789
0
      ++new_size;
790
0
    }
791
792
0
    while ((dst->nelts + src->nelts) > new_size) {
793
0
      new_size *= 2;
794
0
    }
795
796
0
    new_data = pcalloc(dst->pool, elt_size * new_size);
797
0
    memcpy(new_data, dst->elts, dst->nalloc * elt_size);
798
799
0
    dst->elts = new_data;
800
0
    dst->nalloc = new_size;
801
0
  }
802
803
0
  memcpy(((char *) dst->elts) + (dst->nelts * elt_size), (char *) src->elts,
804
0
         elt_size * src->nelts);
805
0
  dst->nelts += src->nelts;
806
807
0
  return 0;
808
0
}
809
810
0
void array_cat(array_header *dst, const array_header *src) {
811
0
  (void) array_cat2(dst, src);
812
0
}
813
814
0
array_header *copy_array(pool *p, const array_header *arr) {
815
0
  array_header *res;
816
817
0
  if (p == NULL ||
818
0
      arr == NULL) {
819
0
    errno = EINVAL;
820
0
    return NULL;
821
0
  }
822
823
0
  res = make_array(p, arr->nalloc, arr->elt_size);
824
825
0
  if (arr->nelts > 0) {
826
0
    memcpy(res->elts, arr->elts, arr->elt_size * arr->nelts);
827
0
  }
828
829
0
  res->nelts = arr->nelts;
830
0
  return res;
831
0
}
832
833
/* copy an array that is assumed to consist solely of strings */
834
0
array_header *copy_array_str(pool *p, const array_header *arr) {
835
0
  register unsigned int i;
836
0
  array_header *res;
837
838
0
  if (p == NULL ||
839
0
      arr == NULL) {
840
0
    errno = EINVAL;
841
0
    return NULL;
842
0
  }
843
844
0
  res = copy_array(p, arr);
845
846
0
  for (i = 0; i < arr->nelts; i++) {
847
0
    ((char **) res->elts)[i] = pstrdup(p, ((char **) res->elts)[i]);
848
0
  }
849
850
0
  return res;
851
0
}
852
853
0
array_header *copy_array_hdr(pool *p, const array_header *arr) {
854
0
  array_header *res;
855
856
0
  if (p == NULL ||
857
0
      arr == NULL) {
858
0
    errno = EINVAL;
859
0
    return NULL;
860
0
  }
861
862
0
  res = palloc(p, sizeof(array_header));
863
864
0
  res->elts = arr->elts;
865
0
  res->pool = p;
866
0
  res->elt_size = arr->elt_size;
867
0
  res->nelts = arr->nelts;
868
0
  res->nalloc = arr->nelts;   /* Force overflow on push */
869
870
0
  return res;
871
0
}
872
873
array_header *append_arrays(pool *p, const array_header *first,
874
0
    const array_header *second) {
875
0
  array_header *res;
876
877
0
  if (p == NULL ||
878
0
      first == NULL ||
879
0
      second == NULL) {
880
0
    errno = EINVAL;
881
0
    return NULL;
882
0
  }
883
884
0
  res = copy_array_hdr(p, first);
885
886
0
  array_cat(res, second);
887
0
  return res;
888
0
}
889
890
/* Generic cleanups */
891
892
typedef struct cleanup {
893
  void *user_data;
894
  void (*cleanup_cb)(void *);
895
  struct cleanup *next;
896
897
} cleanup_t;
898
899
0
void register_cleanup2(pool *p, void *user_data, void (*cleanup_cb)(void*)) {
900
0
  cleanup_t *c;
901
902
0
  if (p == NULL) {
903
0
    return;
904
0
  }
905
906
0
  c = pcalloc(p, sizeof(cleanup_t));
907
0
  c->user_data = user_data;
908
0
  c->cleanup_cb = cleanup_cb;
909
910
  /* Add this cleanup to the given pool's list of cleanups. */
911
0
  c->next = p->cleanups;
912
0
  p->cleanups = c;
913
0
}
914
915
void register_cleanup(pool *p, void *user_data, void (*plain_cleanup_cb)(void*),
916
0
    void (*child_cleanup_cb)(void *)) {
917
0
  (void) child_cleanup_cb;
918
0
  register_cleanup2(p, user_data, plain_cleanup_cb);
919
0
}
920
921
0
void unregister_cleanup(pool *p, void *user_data, void (*cleanup_cb)(void *)) {
922
0
  cleanup_t *c, **lastp;
923
924
0
  if (p == NULL) {
925
0
    return;
926
0
  }
927
928
0
  c = p->cleanups;
929
0
  lastp = &p->cleanups;
930
931
0
  while (c != NULL) {
932
0
    if (c->user_data == user_data &&
933
0
        (c->cleanup_cb == cleanup_cb || cleanup_cb == NULL)) {
934
935
      /* Remove the given cleanup by pointing the previous next pointer to
936
       * the matching cleanup's next pointer.
937
       */
938
0
      *lastp = c->next;
939
0
      break;
940
0
    }
941
942
0
    lastp = &c->next;
943
0
    c = c->next;
944
0
  }
945
0
}
946
947
5.46k
static void run_cleanups(cleanup_t *c) {
948
5.46k
  while (c != NULL) {
949
0
    if (c->cleanup_cb) {
950
0
      (*c->cleanup_cb)(c->user_data);
951
0
    }
952
953
0
    c = c->next;
954
0
  }
955
5.46k
}