Coverage Report

Created: 2025-07-23 07:04

/src/samba/lib/talloc/talloc.c
Line
Count
Source (jump to first uncovered line)
1
/*
2
   Samba Unix SMB/CIFS implementation.
3
4
   Samba trivial allocation library - new interface
5
6
   NOTE: Please read talloc_guide.txt for full documentation
7
8
   Copyright (C) Andrew Tridgell 2004
9
   Copyright (C) Stefan Metzmacher 2006
10
11
     ** NOTE! The following LGPL license applies to the talloc
12
     ** library. This does NOT imply that all of Samba is released
13
     ** under the LGPL
14
15
   This library is free software; you can redistribute it and/or
16
   modify it under the terms of the GNU Lesser General Public
17
   License as published by the Free Software Foundation; either
18
   version 3 of the License, or (at your option) any later version.
19
20
   This library is distributed in the hope that it will be useful,
21
   but WITHOUT ANY WARRANTY; without even the implied warranty of
22
   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
23
   Lesser General Public License for more details.
24
25
   You should have received a copy of the GNU Lesser General Public
26
   License along with this library; if not, see <http://www.gnu.org/licenses/>.
27
*/
28
29
/*
30
  inspired by http://swapped.cc/halloc/
31
*/
32
33
#include "replace.h"
34
#include "talloc.h"
35
36
#ifdef HAVE_SYS_AUXV_H
37
#include <sys/auxv.h>
38
#endif
39
40
#if (TALLOC_VERSION_MAJOR != TALLOC_BUILD_VERSION_MAJOR)
41
#error "TALLOC_VERSION_MAJOR != TALLOC_BUILD_VERSION_MAJOR"
42
#endif
43
44
#if (TALLOC_VERSION_MINOR != TALLOC_BUILD_VERSION_MINOR)
45
#error "TALLOC_VERSION_MINOR != TALLOC_BUILD_VERSION_MINOR"
46
#endif
47
48
/* Special macros that are no-ops except when run under Valgrind on
49
 * x86.  They've moved a little bit from valgrind 1.0.4 to 1.9.4 */
50
#ifdef HAVE_VALGRIND_MEMCHECK_H
51
        /* memcheck.h includes valgrind.h */
52
#include <valgrind/memcheck.h>
53
#elif defined(HAVE_VALGRIND_H)
54
#include <valgrind.h>
55
#endif
56
57
83.5M
#define MAX_TALLOC_SIZE 0x10000000
58
59
174M
#define TALLOC_FLAG_FREE 0x01
60
122M
#define TALLOC_FLAG_LOOP 0x02
61
237M
#define TALLOC_FLAG_POOL 0x04    /* This is a talloc pool */
62
237M
#define TALLOC_FLAG_POOLMEM 0x08  /* This is allocated in a pool */
63
64
/*
65
 * Bits above this are random, used to make it harder to fake talloc
66
 * headers during an attack.  Try not to change this without good reason.
67
 */
68
322M
#define TALLOC_FLAG_MASK 0x0F
69
70
0
#define TALLOC_MAGIC_REFERENCE ((const char *)1)
71
72
148M
#define TALLOC_MAGIC_BASE 0xe814ec70
73
148M
#define TALLOC_MAGIC_NON_RANDOM ( \
74
148M
  ~TALLOC_FLAG_MASK & ( \
75
148M
    TALLOC_MAGIC_BASE + \
76
148M
    (TALLOC_BUILD_VERSION_MAJOR << 24) + \
77
148M
    (TALLOC_BUILD_VERSION_MINOR << 16) + \
78
148M
    (TALLOC_BUILD_VERSION_RELEASE << 8)))
79
static unsigned int talloc_magic = TALLOC_MAGIC_NON_RANDOM;
80
81
/* by default we abort when given a bad pointer (such as when talloc_free() is called
82
   on a pointer that came from malloc() */
83
#ifndef TALLOC_ABORT
84
0
#define TALLOC_ABORT(reason) abort()
85
#endif
86
87
#ifndef discard_const_p
88
#if defined(__intptr_t_defined) || defined(HAVE_INTPTR_T)
89
# define discard_const_p(type, ptr) ((type *)((intptr_t)(ptr)))
90
#else
91
# define discard_const_p(type, ptr) ((type *)(ptr))
92
#endif
93
#endif
94
95
/* these macros gain us a few percent of speed on gcc */
96
#if (__GNUC__ >= 3)
97
/* the strange !! is to ensure that __builtin_expect() takes either 0 or 1
98
   as its first argument */
99
#ifndef likely
100
#define likely(x)   __builtin_expect(!!(x), 1)
101
#endif
102
#ifndef unlikely
103
#define unlikely(x) __builtin_expect(!!(x), 0)
104
#endif
105
#else
106
#ifndef likely
107
#define likely(x) (x)
108
#endif
109
#ifndef unlikely
110
#define unlikely(x) (x)
111
#endif
112
#endif
113
114
/* this null_context is only used if talloc_enable_leak_report() or
115
   talloc_enable_leak_report_full() is called, otherwise it remains
116
   NULL
117
*/
118
static void *null_context;
119
static bool talloc_report_null;
120
static bool talloc_report_null_full;
121
static void *autofree_context;
122
123
static void talloc_setup_atexit(void);
124
125
/* used to enable fill of memory on free, which can be useful for
126
 * catching use after free errors when valgrind is too slow
127
 */
128
static struct {
129
  bool initialised;
130
  bool enabled;
131
  uint8_t fill_value;
132
} talloc_fill;
133
134
159
#define TALLOC_FILL_ENV "TALLOC_FREE_FILL"
135
136
/*
137
 * do not wipe the header, to allow the
138
 * double-free logic to still work
139
 */
140
121M
#define TC_INVALIDATE_FULL_FILL_CHUNK(_tc) do { \
141
121M
  if (unlikely(talloc_fill.enabled)) { \
142
0
    size_t _flen = (_tc)->size; \
143
0
    char *_fptr = (char *)TC_PTR_FROM_CHUNK(_tc); \
144
0
    memset(_fptr, talloc_fill.fill_value, _flen); \
145
0
  } \
146
121M
} while (0)
147
148
#if defined(DEVELOPER) && defined(VALGRIND_MAKE_MEM_NOACCESS)
149
/* Mark the whole chunk as not accessible */
150
#define TC_INVALIDATE_FULL_VALGRIND_CHUNK(_tc) do { \
151
  size_t _flen = TC_HDR_SIZE + (_tc)->size; \
152
  char *_fptr = (char *)(_tc); \
153
  VALGRIND_MAKE_MEM_NOACCESS(_fptr, _flen); \
154
} while(0)
155
#else
156
121M
#define TC_INVALIDATE_FULL_VALGRIND_CHUNK(_tc) do { } while (0)
157
#endif
158
159
121M
#define TC_INVALIDATE_FULL_CHUNK(_tc) do { \
160
121M
  TC_INVALIDATE_FULL_FILL_CHUNK(_tc); \
161
121M
  TC_INVALIDATE_FULL_VALGRIND_CHUNK(_tc); \
162
121M
} while (0)
163
164
0
#define TC_INVALIDATE_SHRINK_FILL_CHUNK(_tc, _new_size) do { \
165
0
  if (unlikely(talloc_fill.enabled)) { \
166
0
    size_t _flen = (_tc)->size - (_new_size); \
167
0
    char *_fptr = (char *)TC_PTR_FROM_CHUNK(_tc); \
168
0
    _fptr += (_new_size); \
169
0
    memset(_fptr, talloc_fill.fill_value, _flen); \
170
0
  } \
171
0
} while (0)
172
173
#if defined(DEVELOPER) && defined(VALGRIND_MAKE_MEM_NOACCESS)
174
/* Mark the unused bytes not accessible */
175
#define TC_INVALIDATE_SHRINK_VALGRIND_CHUNK(_tc, _new_size) do { \
176
  size_t _flen = (_tc)->size - (_new_size); \
177
  char *_fptr = (char *)TC_PTR_FROM_CHUNK(_tc); \
178
  _fptr += (_new_size); \
179
  VALGRIND_MAKE_MEM_NOACCESS(_fptr, _flen); \
180
} while (0)
181
#else
182
0
#define TC_INVALIDATE_SHRINK_VALGRIND_CHUNK(_tc, _new_size) do { } while (0)
183
#endif
184
185
0
#define TC_INVALIDATE_SHRINK_CHUNK(_tc, _new_size) do { \
186
0
  TC_INVALIDATE_SHRINK_FILL_CHUNK(_tc, _new_size); \
187
0
  TC_INVALIDATE_SHRINK_VALGRIND_CHUNK(_tc, _new_size); \
188
0
} while (0)
189
190
24.4k
#define TC_UNDEFINE_SHRINK_FILL_CHUNK(_tc, _new_size) do { \
191
24.4k
  if (unlikely(talloc_fill.enabled)) { \
192
0
    size_t _flen = (_tc)->size - (_new_size); \
193
0
    char *_fptr = (char *)TC_PTR_FROM_CHUNK(_tc); \
194
0
    _fptr += (_new_size); \
195
0
    memset(_fptr, talloc_fill.fill_value, _flen); \
196
0
  } \
197
24.4k
} while (0)
198
199
#if defined(DEVELOPER) && defined(VALGRIND_MAKE_MEM_UNDEFINED)
200
/* Mark the unused bytes as undefined */
201
#define TC_UNDEFINE_SHRINK_VALGRIND_CHUNK(_tc, _new_size) do { \
202
  size_t _flen = (_tc)->size - (_new_size); \
203
  char *_fptr = (char *)TC_PTR_FROM_CHUNK(_tc); \
204
  _fptr += (_new_size); \
205
  VALGRIND_MAKE_MEM_UNDEFINED(_fptr, _flen); \
206
} while (0)
207
#else
208
24.4k
#define TC_UNDEFINE_SHRINK_VALGRIND_CHUNK(_tc, _new_size) do { } while (0)
209
#endif
210
211
24.4k
#define TC_UNDEFINE_SHRINK_CHUNK(_tc, _new_size) do { \
212
24.4k
  TC_UNDEFINE_SHRINK_FILL_CHUNK(_tc, _new_size); \
213
24.4k
  TC_UNDEFINE_SHRINK_VALGRIND_CHUNK(_tc, _new_size); \
214
24.4k
} while (0)
215
216
#if defined(DEVELOPER) && defined(VALGRIND_MAKE_MEM_UNDEFINED)
217
/* Mark the new bytes as undefined */
218
#define TC_UNDEFINE_GROW_VALGRIND_CHUNK(_tc, _new_size) do { \
219
  size_t _old_used = TC_HDR_SIZE + (_tc)->size; \
220
  size_t _new_used = TC_HDR_SIZE + (_new_size); \
221
  size_t _flen = _new_used - _old_used; \
222
  char *_fptr = _old_used + (char *)(_tc); \
223
  VALGRIND_MAKE_MEM_UNDEFINED(_fptr, _flen); \
224
} while (0)
225
#else
226
0
#define TC_UNDEFINE_GROW_VALGRIND_CHUNK(_tc, _new_size) do { } while (0)
227
#endif
228
229
0
#define TC_UNDEFINE_GROW_CHUNK(_tc, _new_size) do { \
230
0
  TC_UNDEFINE_GROW_VALGRIND_CHUNK(_tc, _new_size); \
231
0
} while (0)
232
233
struct talloc_reference_handle {
234
  struct talloc_reference_handle *next, *prev;
235
  void *ptr;
236
  const char *location;
237
};
238
239
struct talloc_memlimit {
240
  struct talloc_chunk *parent;
241
  struct talloc_memlimit *upper;
242
  size_t max_size;
243
  size_t cur_size;
244
};
245
246
static inline bool talloc_memlimit_check(struct talloc_memlimit *limit, size_t size);
247
static inline void talloc_memlimit_grow(struct talloc_memlimit *limit,
248
        size_t size);
249
static inline void talloc_memlimit_shrink(struct talloc_memlimit *limit,
250
        size_t size);
251
static inline void tc_memlimit_update_on_free(struct talloc_chunk *tc);
252
253
static inline void _tc_set_name_const(struct talloc_chunk *tc,
254
        const char *name);
255
static struct talloc_chunk *_vasprintf_tc(const void *t,
256
        const char *fmt,
257
        va_list ap);
258
259
typedef int (*talloc_destructor_t)(void *);
260
261
struct talloc_pool_hdr;
262
263
struct talloc_chunk {
264
  /*
265
   * flags includes the talloc magic, which is randomised to
266
   * make overwrite attacks harder
267
   */
268
  unsigned flags;
269
270
  /*
271
   * If you have a logical tree like:
272
   *
273
   *           <parent>
274
   *           /   |   \
275
   *          /    |    \
276
   *         /     |     \
277
   * <child 1> <child 2> <child 3>
278
   *
279
   * The actual talloc tree is:
280
   *
281
   *  <parent>
282
   *     |
283
   *  <child 1> - <child 2> - <child 3>
284
   *
285
   * The children are linked with next/prev pointers, and
286
   * child 1 is linked to the parent with parent/child
287
   * pointers.
288
   */
289
290
  struct talloc_chunk *next, *prev;
291
  struct talloc_chunk *parent, *child;
292
  struct talloc_reference_handle *refs;
293
  talloc_destructor_t destructor;
294
  const char *name;
295
  size_t size;
296
297
  /*
298
   * limit semantics:
299
   * if 'limit' is set it means all *new* children of the context will
300
   * be limited to a total aggregate size ox max_size for memory
301
   * allocations.
302
   * cur_size is used to keep track of the current use
303
   */
304
  struct talloc_memlimit *limit;
305
306
  /*
307
   * For members of a pool (i.e. TALLOC_FLAG_POOLMEM is set), "pool"
308
   * is a pointer to the struct talloc_chunk of the pool that it was
309
   * allocated from. This way children can quickly find the pool to chew
310
   * from.
311
   */
312
  struct talloc_pool_hdr *pool;
313
};
314
315
union talloc_chunk_cast_u {
316
  uint8_t *ptr;
317
  struct talloc_chunk *chunk;
318
};
319
320
/* 16 byte alignment seems to keep everyone happy */
321
640M
#define TC_ALIGN16(s) (((s)+15)&~15)
322
640M
#define TC_HDR_SIZE TC_ALIGN16(sizeof(struct talloc_chunk))
323
376M
#define TC_PTR_FROM_CHUNK(tc) ((void *)(TC_HDR_SIZE + (char*)tc))
324
325
_PUBLIC_ int talloc_version_major(void)
326
0
{
327
0
  return TALLOC_VERSION_MAJOR;
328
0
}
329
330
_PUBLIC_ int talloc_version_minor(void)
331
0
{
332
0
  return TALLOC_VERSION_MINOR;
333
0
}
334
335
_PUBLIC_ int talloc_test_get_magic(void)
336
0
{
337
0
  return talloc_magic;
338
0
}
339
340
static inline void _talloc_chunk_set_free(struct talloc_chunk *tc,
341
            const char *location)
342
148M
{
343
  /*
344
   * Mark this memory as free, and also over-stamp the talloc
345
   * magic with the old-style magic.
346
   *
347
   * Why?  This tries to avoid a memory read use-after-free from
348
   * disclosing our talloc magic, which would then allow an
349
   * attacker to prepare a valid header and so run a destructor.
350
   *
351
   */
352
148M
  tc->flags = TALLOC_MAGIC_NON_RANDOM | TALLOC_FLAG_FREE
353
148M
    | (tc->flags & TALLOC_FLAG_MASK);
354
355
  /* we mark the freed memory with where we called the free
356
   * from. This means on a double free error we can report where
357
   * the first free came from
358
   */
359
148M
  if (location) {
360
121M
    tc->name = location;
361
121M
  }
362
148M
}
363
364
static inline void _talloc_chunk_set_not_free(struct talloc_chunk *tc)
365
26.3M
{
366
  /*
367
   * Mark this memory as not free.
368
   *
369
   * Why? This is memory either in a pool (and so available for
370
   * talloc's re-use or after the realloc().  We need to mark
371
   * the memory as free() before any realloc() call as we can't
372
   * write to the memory after that.
373
   *
374
   * We put back the normal magic instead of the 'not random'
375
   * magic.
376
   */
377
378
26.3M
  tc->flags = talloc_magic |
379
26.3M
    ((tc->flags & TALLOC_FLAG_MASK) & ~TALLOC_FLAG_FREE);
380
26.3M
}
381
382
static void (*talloc_log_fn)(const char *message);
383
384
_PUBLIC_ void talloc_set_log_fn(void (*log_fn)(const char *message))
385
17
{
386
17
  talloc_log_fn = log_fn;
387
17
}
388
389
#ifdef HAVE_CONSTRUCTOR_ATTRIBUTE
390
#define CONSTRUCTOR __attribute__((constructor))
391
#elif defined(HAVE_PRAGMA_INIT)
392
#define CONSTRUCTOR
393
#pragma init (talloc_lib_init)
394
#endif
395
#if defined(HAVE_CONSTRUCTOR_ATTRIBUTE) || defined(HAVE_PRAGMA_INIT)
396
void talloc_lib_init(void) CONSTRUCTOR;
397
void talloc_lib_init(void)
398
376
{
399
376
  uint32_t random_value;
400
376
#if defined(HAVE_GETAUXVAL) && defined(AT_RANDOM)
401
376
  uint8_t *p;
402
  /*
403
   * Use the kernel-provided random values used for
404
   * ASLR.  This won't change per-exec, which is ideal for us
405
   */
406
376
  p = (uint8_t *) getauxval(AT_RANDOM);
407
376
  if (p) {
408
    /*
409
     * We get 16 bytes from getauxval.  By calling rand(),
410
     * a totally insecure PRNG, but one that will
411
     * deterministically have a different value when called
412
     * twice, we ensure that if two talloc-like libraries
413
     * are somehow loaded in the same address space, that
414
     * because we choose different bytes, we will keep the
415
     * protection against collision of multiple talloc
416
     * libs.
417
     *
418
     * This protection is important because the effects of
419
     * passing a talloc pointer from one to the other may
420
     * be very hard to determine.
421
     */
422
376
    int offset = rand() % (16 - sizeof(random_value));
423
376
    memcpy(&random_value, p + offset, sizeof(random_value));
424
376
  } else
425
0
#endif
426
0
  {
427
    /*
428
     * Otherwise, hope the location we are loaded in
429
     * memory is randomised by someone else
430
     */
431
0
    random_value = ((uintptr_t)talloc_lib_init & 0xFFFFFFFF);
432
0
  }
433
376
  talloc_magic = random_value & ~TALLOC_FLAG_MASK;
434
376
}
435
#else
436
#warning "No __attribute__((constructor)) support found on this platform, additional talloc security measures not available"
437
#endif
438
439
static void talloc_lib_atexit(void)
440
0
{
441
0
  TALLOC_FREE(autofree_context);
442
443
0
  if (talloc_total_size(null_context) == 0) {
444
0
    return;
445
0
  }
446
447
0
  if (talloc_report_null_full) {
448
0
    talloc_report_full(null_context, stderr);
449
0
  } else if (talloc_report_null) {
450
0
    talloc_report(null_context, stderr);
451
0
  }
452
0
}
453
454
static void talloc_setup_atexit(void)
455
0
{
456
0
  static bool done;
457
458
0
  if (done) {
459
0
    return;
460
0
  }
461
462
0
  atexit(talloc_lib_atexit);
463
0
  done = true;
464
0
}
465
466
static void talloc_log(const char *fmt, ...) PRINTF_ATTRIBUTE(1,2);
467
static void talloc_log(const char *fmt, ...)
468
0
{
469
0
  va_list ap;
470
0
  char *message;
471
472
0
  if (!talloc_log_fn) {
473
0
    return;
474
0
  }
475
476
0
  va_start(ap, fmt);
477
0
  message = talloc_vasprintf(NULL, fmt, ap);
478
0
  va_end(ap);
479
480
0
  talloc_log_fn(message);
481
0
  talloc_free(message);
482
0
}
483
484
static void talloc_log_stderr(const char *message)
485
0
{
486
0
  fprintf(stderr, "%s", message);
487
0
}
488
489
_PUBLIC_ void talloc_set_log_stderr(void)
490
0
{
491
0
  talloc_set_log_fn(talloc_log_stderr);
492
0
}
493
494
static void (*talloc_abort_fn)(const char *reason);
495
496
_PUBLIC_ void talloc_set_abort_fn(void (*abort_fn)(const char *reason))
497
0
{
498
0
  talloc_abort_fn = abort_fn;
499
0
}
500
501
static void talloc_abort(const char *reason)
502
0
{
503
0
  talloc_log("%s\n", reason);
504
505
0
  if (!talloc_abort_fn) {
506
0
    TALLOC_ABORT(reason);
507
0
  }
508
509
0
  talloc_abort_fn(reason);
510
0
}
511
512
static void talloc_abort_access_after_free(void)
513
0
{
514
0
  talloc_abort("Bad talloc magic value - access after free");
515
0
}
516
517
static void talloc_abort_unknown_value(void)
518
0
{
519
0
  talloc_abort("Bad talloc magic value - unknown value");
520
0
}
521
522
/* panic if we get a bad magic value */
523
static inline struct talloc_chunk *talloc_chunk_from_ptr(const void *ptr)
524
1.45G
{
525
1.45G
  const char *pp = (const char *)ptr;
526
1.45G
  struct talloc_chunk *tc = discard_const_p(struct talloc_chunk, pp - TC_HDR_SIZE);
527
1.45G
  if (unlikely((tc->flags & (TALLOC_FLAG_FREE | ~TALLOC_FLAG_MASK)) != talloc_magic)) {
528
0
    if ((tc->flags & (TALLOC_FLAG_FREE | ~TALLOC_FLAG_MASK))
529
0
        == (TALLOC_MAGIC_NON_RANDOM | TALLOC_FLAG_FREE)) {
530
0
      talloc_log("talloc: access after free error - first free may be at %s\n", tc->name);
531
0
      talloc_abort_access_after_free();
532
0
      return NULL;
533
0
    }
534
535
0
    talloc_abort_unknown_value();
536
0
    return NULL;
537
0
  }
538
1.45G
  return tc;
539
1.45G
}
540
541
/* hook into the front of the list */
542
170k
#define _TLIST_ADD(list, p) \
543
170k
do { \
544
170k
        if (!(list)) { \
545
27.9k
    (list) = (p); \
546
27.9k
    (p)->next = (p)->prev = NULL; \
547
142k
  } else { \
548
142k
    (list)->prev = (p); \
549
142k
    (p)->next = (list); \
550
142k
    (p)->prev = NULL; \
551
142k
    (list) = (p); \
552
142k
  }\
553
170k
} while (0)
554
555
/* remove an element from a list - element doesn't have to be in list. */
556
113M
#define _TLIST_REMOVE(list, p) \
557
113M
do { \
558
113M
  if ((p) == (list)) { \
559
113M
    (list) = (p)->next; \
560
113M
    if (list) (list)->prev = NULL; \
561
113M
  } else { \
562
0
    if ((p)->prev) (p)->prev->next = (p)->next; \
563
0
    if ((p)->next) (p)->next->prev = (p)->prev; \
564
0
  } \
565
113M
  if ((p) && ((p) != (list))) (p)->next = (p)->prev = NULL; \
566
113M
} while (0)
567
568
569
/*
570
  return the parent chunk of a pointer
571
*/
572
static inline struct talloc_chunk *talloc_parent_chunk(const void *ptr)
573
54.0k
{
574
54.0k
  struct talloc_chunk *tc;
575
576
54.0k
  if (unlikely(ptr == NULL)) {
577
0
    return NULL;
578
0
  }
579
580
54.0k
  tc = talloc_chunk_from_ptr(ptr);
581
76.8k
  while (tc->prev) tc=tc->prev;
582
583
54.0k
  return tc->parent;
584
54.0k
}
585
586
_PUBLIC_ void *talloc_parent(const void *ptr)
587
54.0k
{
588
54.0k
  struct talloc_chunk *tc = talloc_parent_chunk(ptr);
589
54.0k
  return tc? TC_PTR_FROM_CHUNK(tc) : NULL;
590
54.0k
}
591
592
/*
593
  find parents name
594
*/
595
_PUBLIC_ const char *talloc_parent_name(const void *ptr)
596
0
{
597
0
  struct talloc_chunk *tc = talloc_parent_chunk(ptr);
598
0
  return tc? tc->name : NULL;
599
0
}
600
601
/*
602
  A pool carries an in-pool object count count in the first 16 bytes.
603
  bytes. This is done to support talloc_steal() to a parent outside of the
604
  pool. The count includes the pool itself, so a talloc_free() on a pool will
605
  only destroy the pool if the count has dropped to zero. A talloc_free() of a
606
  pool member will reduce the count, and eventually also call free(3) on the
607
  pool memory.
608
609
  The object count is not put into "struct talloc_chunk" because it is only
610
  relevant for talloc pools and the alignment to 16 bytes would increase the
611
  memory footprint of each talloc chunk by those 16 bytes.
612
*/
613
614
struct talloc_pool_hdr {
615
  void *end;
616
  unsigned int object_count;
617
  size_t poolsize;
618
};
619
620
union talloc_pool_hdr_cast_u {
621
  uint8_t *ptr;
622
  struct talloc_pool_hdr *hdr;
623
};
624
625
0
#define TP_HDR_SIZE TC_ALIGN16(sizeof(struct talloc_pool_hdr))
626
627
static inline struct talloc_pool_hdr *talloc_pool_from_chunk(struct talloc_chunk *c)
628
0
{
629
0
  union talloc_chunk_cast_u tcc = { .chunk = c };
630
0
  union talloc_pool_hdr_cast_u tphc = { tcc.ptr - TP_HDR_SIZE };
631
0
  return tphc.hdr;
632
0
}
633
634
static inline struct talloc_chunk *talloc_chunk_from_pool(struct talloc_pool_hdr *h)
635
0
{
636
0
  union talloc_pool_hdr_cast_u tphc = { .hdr = h };
637
0
  union talloc_chunk_cast_u tcc = { .ptr = tphc.ptr + TP_HDR_SIZE };
638
0
  return tcc.chunk;
639
0
}
640
641
static inline void *tc_pool_end(struct talloc_pool_hdr *pool_hdr)
642
0
{
643
0
  struct talloc_chunk *tc = talloc_chunk_from_pool(pool_hdr);
644
0
  return (char *)tc + TC_HDR_SIZE + pool_hdr->poolsize;
645
0
}
646
647
static inline size_t tc_pool_space_left(struct talloc_pool_hdr *pool_hdr)
648
0
{
649
0
  return (char *)tc_pool_end(pool_hdr) - (char *)pool_hdr->end;
650
0
}
651
652
/* If tc is inside a pool, this gives the next neighbour. */
653
static inline void *tc_next_chunk(struct talloc_chunk *tc)
654
0
{
655
0
  return (char *)tc + TC_ALIGN16(TC_HDR_SIZE + tc->size);
656
0
}
657
658
static inline void *tc_pool_first_chunk(struct talloc_pool_hdr *pool_hdr)
659
0
{
660
0
  struct talloc_chunk *tc = talloc_chunk_from_pool(pool_hdr);
661
0
  return tc_next_chunk(tc);
662
0
}
663
664
/* Mark the whole remaining pool as not accessible */
665
static inline void tc_invalidate_pool(struct talloc_pool_hdr *pool_hdr)
666
0
{
667
0
  size_t flen = tc_pool_space_left(pool_hdr);
668
669
0
  if (unlikely(talloc_fill.enabled)) {
670
0
    memset(pool_hdr->end, talloc_fill.fill_value, flen);
671
0
  }
672
673
#if defined(DEVELOPER) && defined(VALGRIND_MAKE_MEM_NOACCESS)
674
  VALGRIND_MAKE_MEM_NOACCESS(pool_hdr->end, flen);
675
#endif
676
0
}
677
678
/*
679
  Allocate from a pool
680
*/
681
682
static inline struct talloc_chunk *tc_alloc_pool(struct talloc_chunk *parent,
683
                 size_t size, size_t prefix_len)
684
115M
{
685
115M
  struct talloc_pool_hdr *pool_hdr = NULL;
686
115M
  union talloc_chunk_cast_u tcc;
687
115M
  size_t space_left;
688
115M
  struct talloc_chunk *result;
689
115M
  size_t chunk_size;
690
691
115M
  if (parent == NULL) {
692
0
    return NULL;
693
0
  }
694
695
115M
  if (parent->flags & TALLOC_FLAG_POOL) {
696
0
    pool_hdr = talloc_pool_from_chunk(parent);
697
0
  }
698
115M
  else if (parent->flags & TALLOC_FLAG_POOLMEM) {
699
0
    pool_hdr = parent->pool;
700
0
  }
701
702
115M
  if (pool_hdr == NULL) {
703
115M
    return NULL;
704
115M
  }
705
706
0
  space_left = tc_pool_space_left(pool_hdr);
707
708
  /*
709
   * Align size to 16 bytes
710
   */
711
0
  chunk_size = TC_ALIGN16(size + prefix_len);
712
713
0
  if (space_left < chunk_size) {
714
0
    return NULL;
715
0
  }
716
717
0
  tcc = (union talloc_chunk_cast_u) {
718
0
    .ptr = ((uint8_t *)pool_hdr->end) + prefix_len
719
0
  };
720
0
  result = tcc.chunk;
721
722
#if defined(DEVELOPER) && defined(VALGRIND_MAKE_MEM_UNDEFINED)
723
  VALGRIND_MAKE_MEM_UNDEFINED(pool_hdr->end, chunk_size);
724
#endif
725
726
0
  pool_hdr->end = (void *)((char *)pool_hdr->end + chunk_size);
727
728
0
  result->flags = talloc_magic | TALLOC_FLAG_POOLMEM;
729
0
  result->pool = pool_hdr;
730
731
0
  pool_hdr->object_count++;
732
733
0
  return result;
734
0
}
735
736
/*
737
   Allocate a bit of memory as a child of an existing pointer
738
*/
739
static inline void *__talloc_with_prefix(const void *context,
740
          size_t size,
741
          size_t prefix_len,
742
          struct talloc_chunk **tc_ret)
743
121M
{
744
121M
  struct talloc_chunk *tc = NULL;
745
121M
  struct talloc_memlimit *limit = NULL;
746
121M
  size_t total_len = TC_HDR_SIZE + size + prefix_len;
747
121M
  struct talloc_chunk *parent = NULL;
748
749
121M
  if (unlikely(context == NULL)) {
750
6.57M
    context = null_context;
751
6.57M
  }
752
753
121M
  if (unlikely(size >= MAX_TALLOC_SIZE)) {
754
0
    return NULL;
755
0
  }
756
757
121M
  if (unlikely(total_len < TC_HDR_SIZE)) {
758
0
    return NULL;
759
0
  }
760
761
121M
  if (likely(context != NULL)) {
762
115M
    parent = talloc_chunk_from_ptr(context);
763
764
115M
    if (parent->limit != NULL) {
765
0
      limit = parent->limit;
766
0
    }
767
768
115M
    tc = tc_alloc_pool(parent, TC_HDR_SIZE+size, prefix_len);
769
115M
  }
770
771
121M
  if (tc == NULL) {
772
121M
    uint8_t *ptr = NULL;
773
121M
    union talloc_chunk_cast_u tcc;
774
775
    /*
776
     * Only do the memlimit check/update on actual allocation.
777
     */
778
121M
    if (!talloc_memlimit_check(limit, total_len)) {
779
0
      errno = ENOMEM;
780
0
      return NULL;
781
0
    }
782
783
121M
    ptr = malloc(total_len);
784
121M
    if (unlikely(ptr == NULL)) {
785
0
      return NULL;
786
0
    }
787
121M
    tcc = (union talloc_chunk_cast_u) { .ptr = ptr + prefix_len };
788
121M
    tc = tcc.chunk;
789
121M
    tc->flags = talloc_magic;
790
121M
    tc->pool  = NULL;
791
792
121M
    talloc_memlimit_grow(limit, total_len);
793
121M
  }
794
795
121M
  tc->limit = limit;
796
121M
  tc->size = size;
797
121M
  tc->destructor = NULL;
798
121M
  tc->child = NULL;
799
121M
  tc->name = NULL;
800
121M
  tc->refs = NULL;
801
802
121M
  if (likely(context != NULL)) {
803
115M
    if (parent->child) {
804
98.4M
      parent->child->parent = NULL;
805
98.4M
      tc->next = parent->child;
806
98.4M
      tc->next->prev = tc;
807
98.4M
    } else {
808
16.8M
      tc->next = NULL;
809
16.8M
    }
810
115M
    tc->parent = parent;
811
115M
    tc->prev = NULL;
812
115M
    parent->child = tc;
813
115M
  } else {
814
6.57M
    tc->next = tc->prev = tc->parent = NULL;
815
6.57M
  }
816
817
121M
  *tc_ret = tc;
818
121M
  return TC_PTR_FROM_CHUNK(tc);
819
121M
}
820
821
static inline void *__talloc(const void *context,
822
      size_t size,
823
      struct talloc_chunk **tc)
824
121M
{
825
121M
  return __talloc_with_prefix(context, size, 0, tc);
826
121M
}
827
828
/*
829
 * Create a talloc pool
830
 */
831
832
static inline void *_talloc_pool(const void *context, size_t size)
833
0
{
834
0
  struct talloc_chunk *tc = NULL;
835
0
  struct talloc_pool_hdr *pool_hdr;
836
0
  void *result;
837
838
0
  result = __talloc_with_prefix(context, size, TP_HDR_SIZE, &tc);
839
840
0
  if (unlikely(result == NULL)) {
841
0
    return NULL;
842
0
  }
843
844
0
  pool_hdr = talloc_pool_from_chunk(tc);
845
846
0
  tc->flags |= TALLOC_FLAG_POOL;
847
0
  tc->size = 0;
848
849
0
  pool_hdr->object_count = 1;
850
0
  pool_hdr->end = result;
851
0
  pool_hdr->poolsize = size;
852
853
0
  tc_invalidate_pool(pool_hdr);
854
855
0
  return result;
856
0
}
857
858
_PUBLIC_ void *talloc_pool(const void *context, size_t size)
859
0
{
860
0
  return _talloc_pool(context, size);
861
0
}
862
863
/*
864
 * Create a talloc pool correctly sized for a basic size plus
865
 * a number of subobjects whose total size is given. Essentially
866
 * a custom allocator for talloc to reduce fragmentation.
867
 */
868
869
_PUBLIC_ void *_talloc_pooled_object(const void *ctx,
870
             size_t type_size,
871
             const char *type_name,
872
             unsigned num_subobjects,
873
             size_t total_subobjects_size)
874
0
{
875
0
  size_t poolsize, subobjects_slack, tmp;
876
0
  struct talloc_chunk *tc;
877
0
  struct talloc_pool_hdr *pool_hdr;
878
0
  void *ret;
879
880
0
  poolsize = type_size + total_subobjects_size;
881
882
0
  if ((poolsize < type_size) || (poolsize < total_subobjects_size)) {
883
0
    goto overflow;
884
0
  }
885
886
0
  if (num_subobjects == UINT_MAX) {
887
0
    goto overflow;
888
0
  }
889
0
  num_subobjects += 1;       /* the object body itself */
890
891
  /*
892
   * Alignment can increase the pool size by at most 15 bytes per object
893
   * plus alignment for the object itself
894
   */
895
0
  subobjects_slack = (TC_HDR_SIZE + TP_HDR_SIZE + 15) * num_subobjects;
896
0
  if (subobjects_slack < num_subobjects) {
897
0
    goto overflow;
898
0
  }
899
900
0
  tmp = poolsize + subobjects_slack;
901
0
  if ((tmp < poolsize) || (tmp < subobjects_slack)) {
902
0
    goto overflow;
903
0
  }
904
0
  poolsize = tmp;
905
906
0
  ret = _talloc_pool(ctx, poolsize);
907
0
  if (ret == NULL) {
908
0
    return NULL;
909
0
  }
910
911
0
  tc = talloc_chunk_from_ptr(ret);
912
0
  tc->size = type_size;
913
914
0
  pool_hdr = talloc_pool_from_chunk(tc);
915
916
#if defined(DEVELOPER) && defined(VALGRIND_MAKE_MEM_UNDEFINED)
917
  VALGRIND_MAKE_MEM_UNDEFINED(pool_hdr->end, type_size);
918
#endif
919
920
0
  pool_hdr->end = ((char *)pool_hdr->end + TC_ALIGN16(type_size));
921
922
0
  _tc_set_name_const(tc, type_name);
923
0
  return ret;
924
925
0
overflow:
926
0
  return NULL;
927
0
}
928
929
/*
930
  setup a destructor to be called on free of a pointer
931
  the destructor should return 0 on success, or -1 on failure.
932
  if the destructor fails then the free is failed, and the memory can
933
  be continued to be used
934
*/
935
_PUBLIC_ void _talloc_set_destructor(const void *ptr, int (*destructor)(void *))
936
61.9k
{
937
61.9k
  struct talloc_chunk *tc = talloc_chunk_from_ptr(ptr);
938
61.9k
  tc->destructor = destructor;
939
61.9k
}
940
941
/*
942
  increase the reference count on a piece of memory.
943
*/
944
_PUBLIC_ int talloc_increase_ref_count(const void *ptr)
945
0
{
946
0
  if (unlikely(!talloc_reference(null_context, ptr))) {
947
0
    return -1;
948
0
  }
949
0
  return 0;
950
0
}
951
952
/*
953
  helper for talloc_reference()
954
955
  this is referenced by a function pointer and should not be inline
956
*/
957
static int talloc_reference_destructor(struct talloc_reference_handle *handle)
958
0
{
959
0
  struct talloc_chunk *ptr_tc = talloc_chunk_from_ptr(handle->ptr);
960
0
  _TLIST_REMOVE(ptr_tc->refs, handle);
961
0
  return 0;
962
0
}
963
964
/*
965
   more efficient way to add a name to a pointer - the name must point to a
966
   true string constant
967
*/
968
static inline void _tc_set_name_const(struct talloc_chunk *tc,
969
          const char *name)
970
168M
{
971
168M
  tc->name = name;
972
168M
}
973
974
/*
975
  internal talloc_named_const()
976
*/
977
static inline void *_talloc_named_const(const void *context, size_t size, const char *name)
978
101M
{
979
101M
  void *ptr;
980
101M
  struct talloc_chunk *tc = NULL;
981
982
101M
  ptr = __talloc(context, size, &tc);
983
101M
  if (unlikely(ptr == NULL)) {
984
0
    return NULL;
985
0
  }
986
987
101M
  _tc_set_name_const(tc, name);
988
989
101M
  return ptr;
990
101M
}
991
992
/*
993
  make a secondary reference to a pointer, hanging off the given context.
994
  the pointer remains valid until both the original caller and this given
995
  context are freed.
996
997
  the major use for this is when two different structures need to reference the
998
  same underlying data, and you want to be able to free the two instances separately,
999
  and in either order
1000
*/
1001
_PUBLIC_ void *_talloc_reference_loc(const void *context, const void *ptr, const char *location)
1002
0
{
1003
0
  struct talloc_chunk *tc;
1004
0
  struct talloc_reference_handle *handle;
1005
0
  if (unlikely(ptr == NULL)) return NULL;
1006
1007
0
  tc = talloc_chunk_from_ptr(ptr);
1008
0
  handle = (struct talloc_reference_handle *)_talloc_named_const(context,
1009
0
               sizeof(struct talloc_reference_handle),
1010
0
               TALLOC_MAGIC_REFERENCE);
1011
0
  if (unlikely(handle == NULL)) return NULL;
1012
1013
  /* note that we hang the destructor off the handle, not the
1014
     main context as that allows the caller to still setup their
1015
     own destructor on the context if they want to */
1016
0
  talloc_set_destructor(handle, talloc_reference_destructor);
1017
0
  handle->ptr = discard_const_p(void, ptr);
1018
0
  handle->location = location;
1019
0
  _TLIST_ADD(tc->refs, handle);
1020
0
  return handle->ptr;
1021
0
}
1022
1023
static void *_talloc_steal_internal(const void *new_ctx, const void *ptr);
1024
1025
static inline void _tc_free_poolmem(struct talloc_chunk *tc,
1026
          const char *location)
1027
0
{
1028
0
  struct talloc_pool_hdr *pool;
1029
0
  struct talloc_chunk *pool_tc;
1030
0
  void *next_tc;
1031
1032
0
  pool = tc->pool;
1033
0
  pool_tc = talloc_chunk_from_pool(pool);
1034
0
  next_tc = tc_next_chunk(tc);
1035
1036
0
  _talloc_chunk_set_free(tc, location);
1037
1038
0
  TC_INVALIDATE_FULL_CHUNK(tc);
1039
1040
0
  if (unlikely(pool->object_count == 0)) {
1041
0
    talloc_abort("Pool object count zero!");
1042
0
    return;
1043
0
  }
1044
1045
0
  pool->object_count--;
1046
1047
0
  if (unlikely(pool->object_count == 1
1048
0
         && !(pool_tc->flags & TALLOC_FLAG_FREE))) {
1049
    /*
1050
     * if there is just one object left in the pool
1051
     * and pool->flags does not have TALLOC_FLAG_FREE,
1052
     * it means this is the pool itself and
1053
     * the rest is available for new objects
1054
     * again.
1055
     */
1056
0
    pool->end = tc_pool_first_chunk(pool);
1057
0
    tc_invalidate_pool(pool);
1058
0
    return;
1059
0
  }
1060
1061
0
  if (unlikely(pool->object_count == 0)) {
1062
    /*
1063
     * we mark the freed memory with where we called the free
1064
     * from. This means on a double free error we can report where
1065
     * the first free came from
1066
     */
1067
0
    pool_tc->name = location;
1068
1069
0
    if (pool_tc->flags & TALLOC_FLAG_POOLMEM) {
1070
0
      _tc_free_poolmem(pool_tc, location);
1071
0
    } else {
1072
      /*
1073
       * The tc_memlimit_update_on_free()
1074
       * call takes into account the
1075
       * prefix TP_HDR_SIZE allocated before
1076
       * the pool talloc_chunk.
1077
       */
1078
0
      tc_memlimit_update_on_free(pool_tc);
1079
0
      TC_INVALIDATE_FULL_CHUNK(pool_tc);
1080
0
      free(pool);
1081
0
    }
1082
0
    return;
1083
0
  }
1084
1085
0
  if (pool->end == next_tc) {
1086
    /*
1087
     * if pool->pool still points to end of
1088
     * 'tc' (which is stored in the 'next_tc' variable),
1089
     * we can reclaim the memory of 'tc'.
1090
     */
1091
0
    pool->end = tc;
1092
0
    return;
1093
0
  }
1094
1095
  /*
1096
   * Do nothing. The memory is just "wasted", waiting for the pool
1097
   * itself to be freed.
1098
   */
1099
0
}
1100
1101
static inline void _tc_free_children_internal(struct talloc_chunk *tc,
1102
              void *ptr,
1103
              const char *location);
1104
1105
static inline int _talloc_free_internal(void *ptr, const char *location);
1106
1107
/*
1108
   internal free call that takes a struct talloc_chunk *.
1109
*/
1110
static inline int _tc_free_internal(struct talloc_chunk *tc,
1111
        const char *location)
1112
121M
{
1113
121M
  void *ptr_to_free;
1114
121M
  void *ptr = TC_PTR_FROM_CHUNK(tc);
1115
1116
121M
  if (unlikely(tc->refs)) {
1117
0
    int is_child;
1118
    /* check if this is a reference from a child or
1119
     * grandchild back to it's parent or grandparent
1120
     *
1121
     * in that case we need to remove the reference and
1122
     * call another instance of talloc_free() on the current
1123
     * pointer.
1124
     */
1125
0
    is_child = talloc_is_parent(tc->refs, ptr);
1126
0
    _talloc_free_internal(tc->refs, location);
1127
0
    if (is_child) {
1128
0
      return _talloc_free_internal(ptr, location);
1129
0
    }
1130
0
    return -1;
1131
0
  }
1132
1133
121M
  if (unlikely(tc->flags & TALLOC_FLAG_LOOP)) {
1134
    /* we have a free loop - stop looping */
1135
0
    return 0;
1136
0
  }
1137
1138
121M
  if (unlikely(tc->destructor)) {
1139
61.5k
    talloc_destructor_t d = tc->destructor;
1140
1141
    /*
1142
     * Protect the destructor against some overwrite
1143
     * attacks, by explicitly checking it has the right
1144
     * magic here.
1145
     */
1146
61.5k
    if (talloc_chunk_from_ptr(ptr) != tc) {
1147
      /*
1148
       * This can't actually happen, the
1149
       * call itself will panic.
1150
       */
1151
0
      TALLOC_ABORT("talloc_chunk_from_ptr failed!");
1152
0
    }
1153
1154
61.5k
    if (d == (talloc_destructor_t)-1) {
1155
0
      return -1;
1156
0
    }
1157
61.5k
    tc->destructor = (talloc_destructor_t)-1;
1158
61.5k
    if (d(ptr) == -1) {
1159
      /*
1160
       * Only replace the destructor pointer if
1161
       * calling the destructor didn't modify it.
1162
       */
1163
0
      if (tc->destructor == (talloc_destructor_t)-1) {
1164
0
        tc->destructor = d;
1165
0
      }
1166
0
      return -1;
1167
0
    }
1168
61.5k
    tc->destructor = NULL;
1169
61.5k
  }
1170
1171
121M
  if (tc->parent) {
1172
112M
    _TLIST_REMOVE(tc->parent->child, tc);
1173
112M
    if (tc->parent->child) {
1174
96.5M
      tc->parent->child->parent = tc->parent;
1175
96.5M
    }
1176
112M
  } else {
1177
9.28M
    if (tc->prev) tc->prev->next = tc->next;
1178
9.28M
    if (tc->next) tc->next->prev = tc->prev;
1179
9.28M
    tc->prev = tc->next = NULL;
1180
9.28M
  }
1181
1182
121M
  tc->flags |= TALLOC_FLAG_LOOP;
1183
1184
121M
  _tc_free_children_internal(tc, ptr, location);
1185
1186
121M
  _talloc_chunk_set_free(tc, location);
1187
1188
121M
  if (tc->flags & TALLOC_FLAG_POOL) {
1189
0
    struct talloc_pool_hdr *pool;
1190
1191
0
    pool = talloc_pool_from_chunk(tc);
1192
1193
0
    if (unlikely(pool->object_count == 0)) {
1194
0
      talloc_abort("Pool object count zero!");
1195
0
      return 0;
1196
0
    }
1197
1198
0
    pool->object_count--;
1199
1200
0
    if (likely(pool->object_count != 0)) {
1201
0
      return 0;
1202
0
    }
1203
1204
    /*
1205
     * With object_count==0, a pool becomes a normal piece of
1206
     * memory to free. If it's allocated inside a pool, it needs
1207
     * to be freed as poolmem, else it needs to be just freed.
1208
    */
1209
0
    ptr_to_free = pool;
1210
121M
  } else {
1211
121M
    ptr_to_free = tc;
1212
121M
  }
1213
1214
121M
  if (tc->flags & TALLOC_FLAG_POOLMEM) {
1215
0
    _tc_free_poolmem(tc, location);
1216
0
    return 0;
1217
0
  }
1218
1219
121M
  tc_memlimit_update_on_free(tc);
1220
1221
121M
  TC_INVALIDATE_FULL_CHUNK(tc);
1222
121M
  free(ptr_to_free);
1223
121M
  return 0;
1224
121M
}
1225
1226
/*
1227
   internal talloc_free call
1228
*/
1229
static inline int _talloc_free_internal(void *ptr, const char *location)
1230
17.4M
{
1231
17.4M
  struct talloc_chunk *tc;
1232
1233
17.4M
  if (unlikely(ptr == NULL)) {
1234
0
    return -1;
1235
0
  }
1236
1237
  /* possibly initialised the talloc fill value */
1238
17.4M
  if (unlikely(!talloc_fill.initialised)) {
1239
159
    const char *fill = getenv(TALLOC_FILL_ENV);
1240
159
    if (fill != NULL) {
1241
0
      talloc_fill.enabled = true;
1242
0
      talloc_fill.fill_value = strtoul(fill, NULL, 0);
1243
0
    }
1244
159
    talloc_fill.initialised = true;
1245
159
  }
1246
1247
17.4M
  tc = talloc_chunk_from_ptr(ptr);
1248
17.4M
  return _tc_free_internal(tc, location);
1249
17.4M
}
1250
1251
static inline size_t _talloc_total_limit_size(const void *ptr,
1252
          struct talloc_memlimit *old_limit,
1253
          struct talloc_memlimit *new_limit);
1254
1255
/*
1256
   move a lump of memory from one talloc context to another returning the
1257
   ptr on success, or NULL if it could not be transferred.
1258
   passing NULL as ptr will always return NULL with no side effects.
1259
*/
1260
static void *_talloc_steal_internal(const void *new_ctx, const void *ptr)
1261
1.03M
{
1262
1.03M
  struct talloc_chunk *tc, *new_tc;
1263
1.03M
  size_t ctx_size = 0;
1264
1265
1.03M
  if (unlikely(!ptr)) {
1266
0
    return NULL;
1267
0
  }
1268
1269
1.03M
  if (unlikely(new_ctx == NULL)) {
1270
852k
    new_ctx = null_context;
1271
852k
  }
1272
1273
1.03M
  tc = talloc_chunk_from_ptr(ptr);
1274
1275
1.03M
  if (tc->limit != NULL) {
1276
1277
0
    ctx_size = _talloc_total_limit_size(ptr, NULL, NULL);
1278
1279
    /* Decrement the memory limit from the source .. */
1280
0
    talloc_memlimit_shrink(tc->limit->upper, ctx_size);
1281
1282
0
    if (tc->limit->parent == tc) {
1283
0
      tc->limit->upper = NULL;
1284
0
    } else {
1285
0
      tc->limit = NULL;
1286
0
    }
1287
0
  }
1288
1289
1.03M
  if (unlikely(new_ctx == NULL)) {
1290
852k
    if (tc->parent) {
1291
832k
      _TLIST_REMOVE(tc->parent->child, tc);
1292
832k
      if (tc->parent->child) {
1293
0
        tc->parent->child->parent = tc->parent;
1294
0
      }
1295
832k
    } else {
1296
19.9k
      if (tc->prev) tc->prev->next = tc->next;
1297
19.9k
      if (tc->next) tc->next->prev = tc->prev;
1298
19.9k
    }
1299
1300
852k
    tc->parent = tc->next = tc->prev = NULL;
1301
852k
    return discard_const_p(void, ptr);
1302
852k
  }
1303
1304
179k
  new_tc = talloc_chunk_from_ptr(new_ctx);
1305
1306
179k
  if (unlikely(tc == new_tc || tc->parent == new_tc)) {
1307
8.59k
    return discard_const_p(void, ptr);
1308
8.59k
  }
1309
1310
170k
  if (tc->parent) {
1311
89.4k
    _TLIST_REMOVE(tc->parent->child, tc);
1312
89.4k
    if (tc->parent->child) {
1313
49.8k
      tc->parent->child->parent = tc->parent;
1314
49.8k
    }
1315
89.4k
  } else {
1316
81.0k
    if (tc->prev) tc->prev->next = tc->next;
1317
81.0k
    if (tc->next) tc->next->prev = tc->prev;
1318
81.0k
    tc->prev = tc->next = NULL;
1319
81.0k
  }
1320
1321
170k
  tc->parent = new_tc;
1322
170k
  if (new_tc->child) new_tc->child->parent = NULL;
1323
170k
  _TLIST_ADD(new_tc->child, tc);
1324
1325
170k
  if (tc->limit || new_tc->limit) {
1326
0
    ctx_size = _talloc_total_limit_size(ptr, tc->limit,
1327
0
                new_tc->limit);
1328
    /* .. and increment it in the destination. */
1329
0
    if (new_tc->limit) {
1330
0
      talloc_memlimit_grow(new_tc->limit, ctx_size);
1331
0
    }
1332
0
  }
1333
1334
170k
  return discard_const_p(void, ptr);
1335
179k
}
1336
1337
/*
1338
   move a lump of memory from one talloc context to another returning the
1339
   ptr on success, or NULL if it could not be transferred.
1340
   passing NULL as ptr will always return NULL with no side effects.
1341
*/
1342
_PUBLIC_ void *_talloc_steal_loc(const void *new_ctx, const void *ptr, const char *location)
1343
986k
{
1344
986k
  struct talloc_chunk *tc;
1345
1346
986k
  if (unlikely(ptr == NULL)) {
1347
861
    return NULL;
1348
861
  }
1349
1350
985k
  tc = talloc_chunk_from_ptr(ptr);
1351
1352
985k
  if (unlikely(tc->refs != NULL) && talloc_parent(ptr) != new_ctx) {
1353
0
    struct talloc_reference_handle *h;
1354
1355
0
    talloc_log("WARNING: talloc_steal with references at %s\n",
1356
0
         location);
1357
1358
0
    for (h=tc->refs; h; h=h->next) {
1359
0
      talloc_log("\treference at %s\n",
1360
0
           h->location);
1361
0
    }
1362
0
  }
1363
1364
#if 0
1365
  /* this test is probably too expensive to have on in the
1366
     normal build, but it useful for debugging */
1367
  if (talloc_is_parent(new_ctx, ptr)) {
1368
    talloc_log("WARNING: stealing into talloc child at %s\n", location);
1369
  }
1370
#endif
1371
1372
985k
  return _talloc_steal_internal(new_ctx, ptr);
1373
986k
}
1374
1375
/*
1376
   this is like a talloc_steal(), but you must supply the old
1377
   parent. This resolves the ambiguity in a talloc_steal() which is
1378
   called on a context that has more than one parent (via references)
1379
1380
   The old parent can be either a reference or a parent
1381
*/
1382
_PUBLIC_ void *talloc_reparent(const void *old_parent, const void *new_parent, const void *ptr)
1383
45.5k
{
1384
45.5k
  struct talloc_chunk *tc;
1385
45.5k
  struct talloc_reference_handle *h;
1386
1387
45.5k
  if (unlikely(ptr == NULL)) {
1388
0
    return NULL;
1389
0
  }
1390
1391
45.5k
  if (old_parent == talloc_parent(ptr)) {
1392
45.5k
    return _talloc_steal_internal(new_parent, ptr);
1393
45.5k
  }
1394
1395
0
  tc = talloc_chunk_from_ptr(ptr);
1396
0
  for (h=tc->refs;h;h=h->next) {
1397
0
    if (talloc_parent(h) == old_parent) {
1398
0
      if (_talloc_steal_internal(new_parent, h) != h) {
1399
0
        return NULL;
1400
0
      }
1401
0
      return discard_const_p(void, ptr);
1402
0
    }
1403
0
  }
1404
1405
  /* it wasn't a parent */
1406
0
  return NULL;
1407
0
}
1408
1409
/*
1410
  remove a secondary reference to a pointer. This undo's what
1411
  talloc_reference() has done. The context and pointer arguments
1412
  must match those given to a talloc_reference()
1413
*/
1414
static inline int talloc_unreference(const void *context, const void *ptr)
1415
0
{
1416
0
  struct talloc_chunk *tc = talloc_chunk_from_ptr(ptr);
1417
0
  struct talloc_reference_handle *h;
1418
1419
0
  if (unlikely(context == NULL)) {
1420
0
    context = null_context;
1421
0
  }
1422
1423
0
  for (h=tc->refs;h;h=h->next) {
1424
0
    struct talloc_chunk *p = talloc_parent_chunk(h);
1425
0
    if (p == NULL) {
1426
0
      if (context == NULL) break;
1427
0
    } else if (TC_PTR_FROM_CHUNK(p) == context) {
1428
0
      break;
1429
0
    }
1430
0
  }
1431
0
  if (h == NULL) {
1432
0
    return -1;
1433
0
  }
1434
1435
0
  return _talloc_free_internal(h, __location__);
1436
0
}
1437
1438
/*
1439
  remove a specific parent context from a pointer. This is a more
1440
  controlled variant of talloc_free()
1441
*/
1442
1443
/* coverity[ -tainted_data_sink : arg-1 ] */
1444
_PUBLIC_ int talloc_unlink(const void *context, void *ptr)
1445
0
{
1446
0
  struct talloc_chunk *tc_p, *new_p, *tc_c;
1447
0
  void *new_parent;
1448
1449
0
  if (ptr == NULL) {
1450
0
    return -1;
1451
0
  }
1452
1453
0
  if (context == NULL) {
1454
0
    context = null_context;
1455
0
  }
1456
1457
0
  if (talloc_unreference(context, ptr) == 0) {
1458
0
    return 0;
1459
0
  }
1460
1461
0
  if (context != NULL) {
1462
0
    tc_c = talloc_chunk_from_ptr(context);
1463
0
  } else {
1464
0
    tc_c = NULL;
1465
0
  }
1466
0
  if (tc_c != talloc_parent_chunk(ptr)) {
1467
0
    return -1;
1468
0
  }
1469
1470
0
  tc_p = talloc_chunk_from_ptr(ptr);
1471
1472
0
  if (tc_p->refs == NULL) {
1473
0
    return _talloc_free_internal(ptr, __location__);
1474
0
  }
1475
1476
0
  new_p = talloc_parent_chunk(tc_p->refs);
1477
0
  if (new_p) {
1478
0
    new_parent = TC_PTR_FROM_CHUNK(new_p);
1479
0
  } else {
1480
0
    new_parent = NULL;
1481
0
  }
1482
1483
0
  if (talloc_unreference(new_parent, ptr) != 0) {
1484
0
    return -1;
1485
0
  }
1486
1487
0
  _talloc_steal_internal(new_parent, ptr);
1488
1489
0
  return 0;
1490
0
}
1491
1492
/*
1493
  add a name to an existing pointer - va_list version
1494
*/
1495
static inline const char *tc_set_name_v(struct talloc_chunk *tc,
1496
        const char *fmt,
1497
        va_list ap) PRINTF_ATTRIBUTE(2,0);
1498
1499
static inline const char *tc_set_name_v(struct talloc_chunk *tc,
1500
        const char *fmt,
1501
        va_list ap)
1502
811k
{
1503
811k
  struct talloc_chunk *name_tc = _vasprintf_tc(TC_PTR_FROM_CHUNK(tc),
1504
811k
              fmt,
1505
811k
              ap);
1506
811k
  if (likely(name_tc)) {
1507
811k
    tc->name = TC_PTR_FROM_CHUNK(name_tc);
1508
811k
    _tc_set_name_const(name_tc, ".name");
1509
811k
  } else {
1510
0
    tc->name = NULL;
1511
0
  }
1512
811k
  return tc->name;
1513
811k
}
1514
1515
/*
1516
  add a name to an existing pointer
1517
*/
1518
_PUBLIC_ const char *talloc_set_name(const void *ptr, const char *fmt, ...)
1519
0
{
1520
0
  struct talloc_chunk *tc = talloc_chunk_from_ptr(ptr);
1521
0
  const char *name;
1522
0
  va_list ap;
1523
0
  va_start(ap, fmt);
1524
0
  name = tc_set_name_v(tc, fmt, ap);
1525
0
  va_end(ap);
1526
0
  return name;
1527
0
}
1528
1529
1530
/*
1531
  create a named talloc pointer. Any talloc pointer can be named, and
1532
  talloc_named() operates just like talloc() except that it allows you
1533
  to name the pointer.
1534
*/
1535
_PUBLIC_ void *talloc_named(const void *context, size_t size, const char *fmt, ...)
1536
305
{
1537
305
  va_list ap;
1538
305
  void *ptr;
1539
305
  const char *name;
1540
305
  struct talloc_chunk *tc = NULL;
1541
1542
305
  ptr = __talloc(context, size, &tc);
1543
305
  if (unlikely(ptr == NULL)) return NULL;
1544
1545
305
  va_start(ap, fmt);
1546
305
  name = tc_set_name_v(tc, fmt, ap);
1547
305
  va_end(ap);
1548
1549
305
  if (unlikely(name == NULL)) {
1550
0
    _talloc_free_internal(ptr, __location__);
1551
0
    return NULL;
1552
0
  }
1553
1554
305
  return ptr;
1555
305
}
1556
1557
/*
1558
  return the name of a talloc ptr, or "UNNAMED"
1559
*/
1560
static inline const char *__talloc_get_name(const void *ptr)
1561
0
{
1562
0
  struct talloc_chunk *tc = talloc_chunk_from_ptr(ptr);
1563
0
  if (unlikely(tc->name == TALLOC_MAGIC_REFERENCE)) {
1564
0
    return ".reference";
1565
0
  }
1566
0
  if (likely(tc->name)) {
1567
0
    return tc->name;
1568
0
  }
1569
0
  return "UNNAMED";
1570
0
}
1571
1572
_PUBLIC_ const char *talloc_get_name(const void *ptr)
1573
0
{
1574
0
  return __talloc_get_name(ptr);
1575
0
}
1576
1577
/*
1578
  check if a pointer has the given name. If it does, return the pointer,
1579
  otherwise return NULL
1580
*/
1581
_PUBLIC_ void *talloc_check_name(const void *ptr, const char *name)
1582
0
{
1583
0
  const char *pname;
1584
0
  if (unlikely(ptr == NULL)) return NULL;
1585
0
  pname = __talloc_get_name(ptr);
1586
0
  if (likely(pname == name || strcmp(pname, name) == 0)) {
1587
0
    return discard_const_p(void, ptr);
1588
0
  }
1589
0
  return NULL;
1590
0
}
1591
1592
static void talloc_abort_type_mismatch(const char *location,
1593
          const char *name,
1594
          const char *expected)
1595
0
{
1596
0
  const char *reason;
1597
1598
0
  reason = talloc_asprintf(NULL,
1599
0
         "%s: Type mismatch: name[%s] expected[%s]",
1600
0
         location,
1601
0
         name?name:"NULL",
1602
0
         expected);
1603
0
  if (!reason) {
1604
0
    reason = "Type mismatch";
1605
0
  }
1606
1607
0
  talloc_abort(reason);
1608
0
}
1609
1610
_PUBLIC_ void *_talloc_get_type_abort(const void *ptr, const char *name, const char *location)
1611
0
{
1612
0
  const char *pname;
1613
1614
0
  if (unlikely(ptr == NULL)) {
1615
0
    talloc_abort_type_mismatch(location, NULL, name);
1616
0
    return NULL;
1617
0
  }
1618
1619
0
  pname = __talloc_get_name(ptr);
1620
0
  if (likely(pname == name || strcmp(pname, name) == 0)) {
1621
0
    return discard_const_p(void, ptr);
1622
0
  }
1623
1624
0
  talloc_abort_type_mismatch(location, pname, name);
1625
0
  return NULL;
1626
0
}
1627
1628
/*
1629
  this is for compatibility with older versions of talloc
1630
*/
1631
_PUBLIC_ void *talloc_init(const char *fmt, ...)
1632
811k
{
1633
811k
  va_list ap;
1634
811k
  void *ptr;
1635
811k
  const char *name;
1636
811k
  struct talloc_chunk *tc = NULL;
1637
1638
811k
  ptr = __talloc(NULL, 0, &tc);
1639
811k
  if (unlikely(ptr == NULL)) return NULL;
1640
1641
811k
  va_start(ap, fmt);
1642
811k
  name = tc_set_name_v(tc, fmt, ap);
1643
811k
  va_end(ap);
1644
1645
811k
  if (unlikely(name == NULL)) {
1646
0
    _talloc_free_internal(ptr, __location__);
1647
0
    return NULL;
1648
0
  }
1649
1650
811k
  return ptr;
1651
811k
}
1652
1653
static inline void _tc_free_children_internal(struct talloc_chunk *tc,
1654
              void *ptr,
1655
              const char *location)
1656
121M
{
1657
226M
  while (tc->child) {
1658
    /* we need to work out who will own an abandoned child
1659
       if it cannot be freed. In priority order, the first
1660
       choice is owner of any remaining reference to this
1661
       pointer, the second choice is our parent, and the
1662
       final choice is the null context. */
1663
104M
    void *child = TC_PTR_FROM_CHUNK(tc->child);
1664
104M
    const void *new_parent = null_context;
1665
104M
    if (unlikely(tc->child->refs)) {
1666
0
      struct talloc_chunk *p = talloc_parent_chunk(tc->child->refs);
1667
0
      if (p) new_parent = TC_PTR_FROM_CHUNK(p);
1668
0
    }
1669
104M
    if (unlikely(_tc_free_internal(tc->child, location) == -1)) {
1670
0
      if (talloc_parent_chunk(child) != tc) {
1671
        /*
1672
         * Destructor already reparented this child.
1673
         * No further reparenting needed.
1674
         */
1675
0
        continue;
1676
0
      }
1677
0
      if (new_parent == null_context) {
1678
0
        struct talloc_chunk *p = talloc_parent_chunk(ptr);
1679
0
        if (p) new_parent = TC_PTR_FROM_CHUNK(p);
1680
0
      }
1681
0
      _talloc_steal_internal(new_parent, child);
1682
0
    }
1683
104M
  }
1684
121M
}
1685
1686
/*
1687
  this is a replacement for the Samba3 talloc_destroy_pool functionality. It
1688
  should probably not be used in new code. It's in here to keep the talloc
1689
  code consistent across Samba 3 and 4.
1690
*/
1691
_PUBLIC_ void talloc_free_children(void *ptr)
1692
61.5k
{
1693
61.5k
  struct talloc_chunk *tc_name = NULL;
1694
61.5k
  struct talloc_chunk *tc;
1695
1696
61.5k
  if (unlikely(ptr == NULL)) {
1697
0
    return;
1698
0
  }
1699
1700
61.5k
  tc = talloc_chunk_from_ptr(ptr);
1701
1702
  /* we do not want to free the context name if it is a child .. */
1703
61.5k
  if (likely(tc->child)) {
1704
128k
    for (tc_name = tc->child; tc_name; tc_name = tc_name->next) {
1705
68.4k
      if (tc->name == TC_PTR_FROM_CHUNK(tc_name)) break;
1706
68.4k
    }
1707
59.6k
    if (tc_name) {
1708
0
      _TLIST_REMOVE(tc->child, tc_name);
1709
0
      if (tc->child) {
1710
0
        tc->child->parent = tc;
1711
0
      }
1712
0
    }
1713
59.6k
  }
1714
1715
61.5k
  _tc_free_children_internal(tc, ptr, __location__);
1716
1717
  /* .. so we put it back after all other children have been freed */
1718
61.5k
  if (tc_name) {
1719
0
    if (tc->child) {
1720
0
      tc->child->parent = NULL;
1721
0
    }
1722
0
    tc_name->parent = tc;
1723
0
    _TLIST_ADD(tc->child, tc_name);
1724
0
  }
1725
61.5k
}
1726
1727
/*
1728
   Allocate a bit of memory as a child of an existing pointer
1729
*/
1730
_PUBLIC_ void *_talloc(const void *context, size_t size)
1731
0
{
1732
0
  struct talloc_chunk *tc;
1733
0
  return __talloc(context, size, &tc);
1734
0
}
1735
1736
/*
1737
  externally callable talloc_set_name_const()
1738
*/
1739
_PUBLIC_ void talloc_set_name_const(const void *ptr, const char *name)
1740
2.83M
{
1741
2.83M
  _tc_set_name_const(talloc_chunk_from_ptr(ptr), name);
1742
2.83M
}
1743
1744
/*
1745
  create a named talloc pointer. Any talloc pointer can be named, and
1746
  talloc_named() operates just like talloc() except that it allows you
1747
  to name the pointer.
1748
*/
1749
_PUBLIC_ void *talloc_named_const(const void *context, size_t size, const char *name)
1750
26.7M
{
1751
26.7M
  return _talloc_named_const(context, size, name);
1752
26.7M
}
1753
1754
/*
1755
   free a talloc pointer. This also frees all child pointers of this
1756
   pointer recursively
1757
1758
   return 0 if the memory is actually freed, otherwise -1. The memory
1759
   will not be freed if the ref_count is > 1 or the destructor (if
1760
   any) returns non-zero
1761
*/
1762
_PUBLIC_ int _talloc_free(void *ptr, const char *location)
1763
17.4M
{
1764
17.4M
  struct talloc_chunk *tc;
1765
1766
17.4M
  if (unlikely(ptr == NULL)) {
1767
5.03k
    return -1;
1768
5.03k
  }
1769
1770
17.4M
  tc = talloc_chunk_from_ptr(ptr);
1771
1772
17.4M
  if (unlikely(tc->refs != NULL)) {
1773
0
    struct talloc_reference_handle *h;
1774
1775
0
    if (talloc_parent(ptr) == null_context && tc->refs->next == NULL) {
1776
      /* in this case we do know which parent should
1777
         get this pointer, as there is really only
1778
         one parent */
1779
0
      return talloc_unlink(null_context, ptr);
1780
0
    }
1781
1782
0
    talloc_log("ERROR: talloc_free with references at %s\n",
1783
0
         location);
1784
1785
0
    for (h=tc->refs; h; h=h->next) {
1786
0
      talloc_log("\treference at %s\n",
1787
0
           h->location);
1788
0
    }
1789
0
    return -1;
1790
0
  }
1791
1792
17.4M
  return _talloc_free_internal(ptr, location);
1793
17.4M
}
1794
1795
1796
1797
/*
1798
  A talloc version of realloc. The context argument is only used if
1799
  ptr is NULL
1800
*/
1801
_PUBLIC_ void *_talloc_realloc(const void *context, void *ptr, size_t size, const char *name)
1802
52.6M
{
1803
52.6M
  struct talloc_chunk *tc;
1804
52.6M
  void *new_ptr;
1805
52.6M
  bool malloced = false;
1806
52.6M
  struct talloc_pool_hdr *pool_hdr = NULL;
1807
52.6M
  size_t old_size = 0;
1808
52.6M
  size_t new_size = 0;
1809
1810
  /* size zero is equivalent to free() */
1811
52.6M
  if (unlikely(size == 0)) {
1812
0
    talloc_unlink(context, ptr);
1813
0
    return NULL;
1814
0
  }
1815
1816
52.6M
  if (unlikely(size >= MAX_TALLOC_SIZE)) {
1817
0
    return NULL;
1818
0
  }
1819
1820
  /* realloc(NULL) is equivalent to malloc() */
1821
52.6M
  if (ptr == NULL) {
1822
26.3M
    return _talloc_named_const(context, size, name);
1823
26.3M
  }
1824
1825
26.3M
  tc = talloc_chunk_from_ptr(ptr);
1826
1827
  /* don't allow realloc on referenced pointers */
1828
26.3M
  if (unlikely(tc->refs)) {
1829
0
    return NULL;
1830
0
  }
1831
1832
  /* don't let anybody try to realloc a talloc_pool */
1833
26.3M
  if (unlikely(tc->flags & TALLOC_FLAG_POOL)) {
1834
0
    return NULL;
1835
0
  }
1836
1837
  /* handle realloc inside a talloc_pool */
1838
26.3M
  if (unlikely(tc->flags & TALLOC_FLAG_POOLMEM)) {
1839
0
    pool_hdr = tc->pool;
1840
0
  }
1841
1842
  /* don't shrink if we have less than 1k to gain */
1843
26.3M
  if (size < tc->size && tc->limit == NULL) {
1844
410k
    if (pool_hdr) {
1845
0
      void *next_tc = tc_next_chunk(tc);
1846
0
      TC_INVALIDATE_SHRINK_CHUNK(tc, size);
1847
0
      tc->size = size;
1848
0
      if (next_tc == pool_hdr->end) {
1849
        /* note: tc->size has changed, so this works */
1850
0
        pool_hdr->end = tc_next_chunk(tc);
1851
0
      }
1852
0
      return ptr;
1853
410k
    } else if ((tc->size - size) < 1024) {
1854
      /*
1855
       * if we call TC_INVALIDATE_SHRINK_CHUNK() here
1856
       * we would need to call TC_UNDEFINE_GROW_CHUNK()
1857
       * after each realloc call, which slows down
1858
       * testing a lot :-(.
1859
       *
1860
       * That is why we only mark memory as undefined here.
1861
       */
1862
24.4k
      TC_UNDEFINE_SHRINK_CHUNK(tc, size);
1863
1864
      /* do not shrink if we have less than 1k to gain */
1865
24.4k
      tc->size = size;
1866
24.4k
      return ptr;
1867
24.4k
    }
1868
25.9M
  } else if (tc->size == size) {
1869
    /*
1870
     * do not change the pointer if it is exactly
1871
     * the same size.
1872
     */
1873
2.70k
    return ptr;
1874
2.70k
  }
1875
1876
  /*
1877
   * by resetting magic we catch users of the old memory
1878
   *
1879
   * We mark this memory as free, and also over-stamp the talloc
1880
   * magic with the old-style magic.
1881
   *
1882
   * Why?  This tries to avoid a memory read use-after-free from
1883
   * disclosing our talloc magic, which would then allow an
1884
   * attacker to prepare a valid header and so run a destructor.
1885
   *
1886
   * What else?  We have to re-stamp back a valid normal magic
1887
   * on this memory once realloc() is done, as it will have done
1888
   * a memcpy() into the new valid memory.  We can't do this in
1889
   * reverse as that would be a real use-after-free.
1890
   */
1891
26.3M
  _talloc_chunk_set_free(tc, NULL);
1892
1893
26.3M
  if (pool_hdr) {
1894
0
    struct talloc_chunk *pool_tc;
1895
0
    void *next_tc = tc_next_chunk(tc);
1896
0
    size_t old_chunk_size = TC_ALIGN16(TC_HDR_SIZE + tc->size);
1897
0
    size_t new_chunk_size = TC_ALIGN16(TC_HDR_SIZE + size);
1898
0
    size_t space_needed;
1899
0
    size_t space_left;
1900
0
    unsigned int chunk_count = pool_hdr->object_count;
1901
1902
0
    pool_tc = talloc_chunk_from_pool(pool_hdr);
1903
0
    if (!(pool_tc->flags & TALLOC_FLAG_FREE)) {
1904
0
      chunk_count -= 1;
1905
0
    }
1906
1907
0
    if (chunk_count == 1) {
1908
      /*
1909
       * optimize for the case where 'tc' is the only
1910
       * chunk in the pool.
1911
       */
1912
0
      char *start = tc_pool_first_chunk(pool_hdr);
1913
0
      space_needed = new_chunk_size;
1914
0
      space_left = (char *)tc_pool_end(pool_hdr) - start;
1915
1916
0
      if (space_left >= space_needed) {
1917
0
        size_t old_used = TC_HDR_SIZE + tc->size;
1918
0
        size_t new_used = TC_HDR_SIZE + size;
1919
0
        new_ptr = start;
1920
1921
#if defined(DEVELOPER) && defined(VALGRIND_MAKE_MEM_UNDEFINED)
1922
        {
1923
          /*
1924
           * The area from
1925
           * start -> tc may have
1926
           * been freed and thus been marked as
1927
           * VALGRIND_MEM_NOACCESS. Set it to
1928
           * VALGRIND_MEM_UNDEFINED so we can
1929
           * copy into it without valgrind errors.
1930
           * We can't just mark
1931
           * new_ptr -> new_ptr + old_used
1932
           * as this may overlap on top of tc,
1933
           * (which is why we use memmove, not
1934
           * memcpy below) hence the MIN.
1935
           */
1936
          size_t undef_len = MIN((((char *)tc) - ((char *)new_ptr)),old_used);
1937
          VALGRIND_MAKE_MEM_UNDEFINED(new_ptr, undef_len);
1938
        }
1939
#endif
1940
1941
0
        memmove(new_ptr, tc, old_used);
1942
1943
0
        tc = (struct talloc_chunk *)new_ptr;
1944
0
        TC_UNDEFINE_GROW_CHUNK(tc, size);
1945
1946
        /*
1947
         * first we do not align the pool pointer
1948
         * because we want to invalidate the padding
1949
         * too.
1950
         */
1951
0
        pool_hdr->end = new_used + (char *)new_ptr;
1952
0
        tc_invalidate_pool(pool_hdr);
1953
1954
        /* now the aligned pointer */
1955
0
        pool_hdr->end = new_chunk_size + (char *)new_ptr;
1956
0
        goto got_new_ptr;
1957
0
      }
1958
1959
0
      next_tc = NULL;
1960
0
    }
1961
1962
0
    if (new_chunk_size == old_chunk_size) {
1963
0
      TC_UNDEFINE_GROW_CHUNK(tc, size);
1964
0
      _talloc_chunk_set_not_free(tc);
1965
0
      tc->size = size;
1966
0
      return ptr;
1967
0
    }
1968
1969
0
    if (next_tc == pool_hdr->end) {
1970
      /*
1971
       * optimize for the case where 'tc' is the last
1972
       * chunk in the pool.
1973
       */
1974
0
      space_needed = new_chunk_size - old_chunk_size;
1975
0
      space_left = tc_pool_space_left(pool_hdr);
1976
1977
0
      if (space_left >= space_needed) {
1978
0
        TC_UNDEFINE_GROW_CHUNK(tc, size);
1979
0
        _talloc_chunk_set_not_free(tc);
1980
0
        tc->size = size;
1981
0
        pool_hdr->end = tc_next_chunk(tc);
1982
0
        return ptr;
1983
0
      }
1984
0
    }
1985
1986
0
    new_ptr = tc_alloc_pool(tc, size + TC_HDR_SIZE, 0);
1987
1988
0
    if (new_ptr == NULL) {
1989
      /*
1990
       * Couldn't allocate from pool (pool size
1991
       * counts as already allocated for memlimit
1992
       * purposes). We must check memory limit
1993
       * before any real malloc.
1994
       */
1995
0
      if (tc->limit) {
1996
        /*
1997
         * Note we're doing an extra malloc,
1998
         * on top of the pool size, so account
1999
         * for size only, not the difference
2000
         * between old and new size.
2001
         */
2002
0
        if (!talloc_memlimit_check(tc->limit, size)) {
2003
0
          _talloc_chunk_set_not_free(tc);
2004
0
          errno = ENOMEM;
2005
0
          return NULL;
2006
0
        }
2007
0
      }
2008
0
      new_ptr = malloc(TC_HDR_SIZE+size);
2009
0
      malloced = true;
2010
0
      new_size = size;
2011
0
    }
2012
2013
0
    if (new_ptr) {
2014
0
      memcpy(new_ptr, tc, MIN(tc->size,size) + TC_HDR_SIZE);
2015
2016
0
      _tc_free_poolmem(tc, __location__ "_talloc_realloc");
2017
0
    }
2018
0
  }
2019
26.3M
  else {
2020
    /* We're doing realloc here, so record the difference. */
2021
26.3M
    old_size = tc->size;
2022
26.3M
    new_size = size;
2023
    /*
2024
     * We must check memory limit
2025
     * before any real realloc.
2026
     */
2027
26.3M
    if (tc->limit && (size > old_size)) {
2028
0
      if (!talloc_memlimit_check(tc->limit,
2029
0
          (size - old_size))) {
2030
0
        _talloc_chunk_set_not_free(tc);
2031
0
        errno = ENOMEM;
2032
0
        return NULL;
2033
0
      }
2034
0
    }
2035
26.3M
    new_ptr = realloc(tc, size + TC_HDR_SIZE);
2036
26.3M
  }
2037
26.3M
got_new_ptr:
2038
2039
26.3M
  if (unlikely(!new_ptr)) {
2040
    /*
2041
     * Ok, this is a strange spot.  We have to put back
2042
     * the old talloc_magic and any flags, except the
2043
     * TALLOC_FLAG_FREE as this was not free'ed by the
2044
     * realloc() call after all
2045
     */
2046
0
    _talloc_chunk_set_not_free(tc);
2047
0
    return NULL;
2048
0
  }
2049
2050
  /*
2051
   * tc is now the new value from realloc(), the old memory we
2052
   * can't access any more and was preemptively marked as
2053
   * TALLOC_FLAG_FREE before the call.  Now we mark it as not
2054
   * free again
2055
   */
2056
26.3M
  tc = (struct talloc_chunk *)new_ptr;
2057
26.3M
  _talloc_chunk_set_not_free(tc);
2058
26.3M
  if (malloced) {
2059
0
    tc->flags &= ~TALLOC_FLAG_POOLMEM;
2060
0
  }
2061
26.3M
  if (tc->parent) {
2062
18.4M
    tc->parent->child = tc;
2063
18.4M
  }
2064
26.3M
  if (tc->child) {
2065
290k
    tc->child->parent = tc;
2066
290k
  }
2067
2068
26.3M
  if (tc->prev) {
2069
7.85M
    tc->prev->next = tc;
2070
7.85M
  }
2071
26.3M
  if (tc->next) {
2072
24.5M
    tc->next->prev = tc;
2073
24.5M
  }
2074
2075
26.3M
  if (new_size > old_size) {
2076
25.9M
    talloc_memlimit_grow(tc->limit, new_size - old_size);
2077
25.9M
  } else if (new_size < old_size) {
2078
386k
    talloc_memlimit_shrink(tc->limit, old_size - new_size);
2079
386k
  }
2080
2081
26.3M
  tc->size = size;
2082
26.3M
  _tc_set_name_const(tc, name);
2083
2084
26.3M
  return TC_PTR_FROM_CHUNK(tc);
2085
26.3M
}
2086
2087
/*
2088
  a wrapper around talloc_steal() for situations where you are moving a pointer
2089
  between two structures, and want the old pointer to be set to NULL
2090
*/
2091
_PUBLIC_ void *_talloc_move(const void *new_ctx, const void *_pptr)
2092
0
{
2093
0
  const void **pptr = discard_const_p(const void *,_pptr);
2094
0
  void *ret = talloc_steal(new_ctx, discard_const_p(void, *pptr));
2095
0
  (*pptr) = NULL;
2096
0
  return ret;
2097
0
}
2098
2099
enum talloc_mem_count_type {
2100
  TOTAL_MEM_SIZE,
2101
  TOTAL_MEM_BLOCKS,
2102
  TOTAL_MEM_LIMIT,
2103
};
2104
2105
static inline size_t _talloc_total_mem_internal(const void *ptr,
2106
           enum talloc_mem_count_type type,
2107
           struct talloc_memlimit *old_limit,
2108
           struct talloc_memlimit *new_limit)
2109
61.5k
{
2110
61.5k
  size_t total = 0;
2111
61.5k
  struct talloc_chunk *c, *tc;
2112
2113
61.5k
  if (ptr == NULL) {
2114
0
    ptr = null_context;
2115
0
  }
2116
61.5k
  if (ptr == NULL) {
2117
0
    return 0;
2118
0
  }
2119
2120
61.5k
  tc = talloc_chunk_from_ptr(ptr);
2121
2122
61.5k
  if (old_limit || new_limit) {
2123
0
    if (tc->limit && tc->limit->upper == old_limit) {
2124
0
      tc->limit->upper = new_limit;
2125
0
    }
2126
0
  }
2127
2128
  /* optimize in the memlimits case */
2129
61.5k
  if (type == TOTAL_MEM_LIMIT &&
2130
61.5k
      tc->limit != NULL &&
2131
61.5k
      tc->limit != old_limit &&
2132
61.5k
      tc->limit->parent == tc) {
2133
0
    return tc->limit->cur_size;
2134
0
  }
2135
2136
61.5k
  if (tc->flags & TALLOC_FLAG_LOOP) {
2137
0
    return 0;
2138
0
  }
2139
2140
61.5k
  tc->flags |= TALLOC_FLAG_LOOP;
2141
2142
61.5k
  if (old_limit || new_limit) {
2143
0
    if (old_limit == tc->limit) {
2144
0
      tc->limit = new_limit;
2145
0
    }
2146
0
  }
2147
2148
61.5k
  switch (type) {
2149
0
  case TOTAL_MEM_SIZE:
2150
0
    if (likely(tc->name != TALLOC_MAGIC_REFERENCE)) {
2151
0
      total = tc->size;
2152
0
    }
2153
0
    break;
2154
61.5k
  case TOTAL_MEM_BLOCKS:
2155
61.5k
    total++;
2156
61.5k
    break;
2157
0
  case TOTAL_MEM_LIMIT:
2158
0
    if (likely(tc->name != TALLOC_MAGIC_REFERENCE)) {
2159
      /*
2160
       * Don't count memory allocated from a pool
2161
       * when calculating limits. Only count the
2162
       * pool itself.
2163
       */
2164
0
      if (!(tc->flags & TALLOC_FLAG_POOLMEM)) {
2165
0
        if (tc->flags & TALLOC_FLAG_POOL) {
2166
          /*
2167
           * If this is a pool, the allocated
2168
           * size is in the pool header, and
2169
           * remember to add in the prefix
2170
           * length.
2171
           */
2172
0
          struct talloc_pool_hdr *pool_hdr
2173
0
              = talloc_pool_from_chunk(tc);
2174
0
          total = pool_hdr->poolsize +
2175
0
              TC_HDR_SIZE +
2176
0
              TP_HDR_SIZE;
2177
0
        } else {
2178
0
          total = tc->size + TC_HDR_SIZE;
2179
0
        }
2180
0
      }
2181
0
    }
2182
0
    break;
2183
61.5k
  }
2184
61.5k
  for (c = tc->child; c; c = c->next) {
2185
0
    total += _talloc_total_mem_internal(TC_PTR_FROM_CHUNK(c), type,
2186
0
                old_limit, new_limit);
2187
0
  }
2188
2189
61.5k
  tc->flags &= ~TALLOC_FLAG_LOOP;
2190
2191
61.5k
  return total;
2192
61.5k
}
2193
2194
/*
2195
  return the total size of a talloc pool (subtree)
2196
*/
2197
_PUBLIC_ size_t talloc_total_size(const void *ptr)
2198
0
{
2199
0
  return _talloc_total_mem_internal(ptr, TOTAL_MEM_SIZE, NULL, NULL);
2200
0
}
2201
2202
/*
2203
  return the total number of blocks in a talloc pool (subtree)
2204
*/
2205
_PUBLIC_ size_t talloc_total_blocks(const void *ptr)
2206
61.5k
{
2207
61.5k
  return _talloc_total_mem_internal(ptr, TOTAL_MEM_BLOCKS, NULL, NULL);
2208
61.5k
}
2209
2210
/*
2211
  return the number of external references to a pointer
2212
*/
2213
_PUBLIC_ size_t talloc_reference_count(const void *ptr)
2214
0
{
2215
0
  struct talloc_chunk *tc = talloc_chunk_from_ptr(ptr);
2216
0
  struct talloc_reference_handle *h;
2217
0
  size_t ret = 0;
2218
2219
0
  for (h=tc->refs;h;h=h->next) {
2220
0
    ret++;
2221
0
  }
2222
0
  return ret;
2223
0
}
2224
2225
/*
2226
  report on memory usage by all children of a pointer, giving a full tree view
2227
*/
2228
_PUBLIC_ void talloc_report_depth_cb(const void *ptr, int depth, int max_depth,
2229
          void (*callback)(const void *ptr,
2230
                 int depth, int max_depth,
2231
               int is_ref,
2232
               void *private_data),
2233
          void *private_data)
2234
0
{
2235
0
  struct talloc_chunk *c, *tc;
2236
2237
0
  if (ptr == NULL) {
2238
0
    ptr = null_context;
2239
0
  }
2240
0
  if (ptr == NULL) return;
2241
2242
0
  tc = talloc_chunk_from_ptr(ptr);
2243
2244
0
  if (tc->flags & TALLOC_FLAG_LOOP) {
2245
0
    return;
2246
0
  }
2247
2248
0
  callback(ptr, depth, max_depth, 0, private_data);
2249
2250
0
  if (max_depth >= 0 && depth >= max_depth) {
2251
0
    return;
2252
0
  }
2253
2254
0
  tc->flags |= TALLOC_FLAG_LOOP;
2255
0
  for (c=tc->child;c;c=c->next) {
2256
0
    if (c->name == TALLOC_MAGIC_REFERENCE) {
2257
0
      struct talloc_reference_handle *h = (struct talloc_reference_handle *)TC_PTR_FROM_CHUNK(c);
2258
0
      callback(h->ptr, depth + 1, max_depth, 1, private_data);
2259
0
    } else {
2260
0
      talloc_report_depth_cb(TC_PTR_FROM_CHUNK(c), depth + 1, max_depth, callback, private_data);
2261
0
    }
2262
0
  }
2263
0
  tc->flags &= ~TALLOC_FLAG_LOOP;
2264
0
}
2265
2266
static void talloc_report_depth_FILE_helper(const void *ptr, int depth, int max_depth, int is_ref, void *_f)
2267
0
{
2268
0
  const char *name = __talloc_get_name(ptr);
2269
0
  struct talloc_chunk *tc;
2270
0
  FILE *f = (FILE *)_f;
2271
2272
0
  if (is_ref) {
2273
0
    fprintf(f, "%*sreference to: %s\n", depth*4, "", name);
2274
0
    return;
2275
0
  }
2276
2277
0
  tc = talloc_chunk_from_ptr(ptr);
2278
0
  if (tc->limit && tc->limit->parent == tc) {
2279
0
    fprintf(f, "%*s%-30s is a memlimit context"
2280
0
      " (max_size = %lu bytes, cur_size = %lu bytes)\n",
2281
0
      depth*4, "",
2282
0
      name,
2283
0
      (unsigned long)tc->limit->max_size,
2284
0
      (unsigned long)tc->limit->cur_size);
2285
0
  }
2286
2287
0
  if (depth == 0) {
2288
0
    fprintf(f,"%stalloc report on '%s' (total %6lu bytes in %3lu blocks)\n",
2289
0
      (max_depth < 0 ? "full " :""), name,
2290
0
      (unsigned long)talloc_total_size(ptr),
2291
0
      (unsigned long)talloc_total_blocks(ptr));
2292
0
    return;
2293
0
  }
2294
2295
0
  fprintf(f, "%*s%-30s contains %6lu bytes in %3lu blocks (ref %d) %p\n",
2296
0
    depth*4, "",
2297
0
    name,
2298
0
    (unsigned long)talloc_total_size(ptr),
2299
0
    (unsigned long)talloc_total_blocks(ptr),
2300
0
    (int)talloc_reference_count(ptr), ptr);
2301
2302
#if 0
2303
  fprintf(f, "content: ");
2304
  if (talloc_total_size(ptr)) {
2305
    int tot = talloc_total_size(ptr);
2306
    int i;
2307
2308
    for (i = 0; i < tot; i++) {
2309
      if ((((char *)ptr)[i] > 31) && (((char *)ptr)[i] < 126)) {
2310
        fprintf(f, "%c", ((char *)ptr)[i]);
2311
      } else {
2312
        fprintf(f, "~%02x", ((char *)ptr)[i]);
2313
      }
2314
    }
2315
  }
2316
  fprintf(f, "\n");
2317
#endif
2318
0
}
2319
2320
/*
2321
  report on memory usage by all children of a pointer, giving a full tree view
2322
*/
2323
_PUBLIC_ void talloc_report_depth_file(const void *ptr, int depth, int max_depth, FILE *f)
2324
0
{
2325
0
  if (f) {
2326
0
    talloc_report_depth_cb(ptr, depth, max_depth, talloc_report_depth_FILE_helper, f);
2327
0
    fflush(f);
2328
0
  }
2329
0
}
2330
2331
/*
2332
  report on memory usage by all children of a pointer, giving a full tree view
2333
*/
2334
_PUBLIC_ void talloc_report_full(const void *ptr, FILE *f)
2335
0
{
2336
0
  talloc_report_depth_file(ptr, 0, -1, f);
2337
0
}
2338
2339
/*
2340
  report on memory usage by all children of a pointer
2341
*/
2342
_PUBLIC_ void talloc_report(const void *ptr, FILE *f)
2343
0
{
2344
0
  talloc_report_depth_file(ptr, 0, 1, f);
2345
0
}
2346
2347
/*
2348
  enable tracking of the NULL context
2349
*/
2350
_PUBLIC_ void talloc_enable_null_tracking(void)
2351
0
{
2352
0
  if (null_context == NULL) {
2353
0
    null_context = _talloc_named_const(NULL, 0, "null_context");
2354
0
    if (autofree_context != NULL) {
2355
0
      talloc_reparent(NULL, null_context, autofree_context);
2356
0
    }
2357
0
  }
2358
0
}
2359
2360
/*
2361
  enable tracking of the NULL context, not moving the autofree context
2362
  into the NULL context. This is needed for the talloc testsuite
2363
*/
2364
_PUBLIC_ void talloc_enable_null_tracking_no_autofree(void)
2365
0
{
2366
0
  if (null_context == NULL) {
2367
0
    null_context = _talloc_named_const(NULL, 0, "null_context");
2368
0
  }
2369
0
}
2370
2371
/*
2372
  disable tracking of the NULL context
2373
*/
2374
_PUBLIC_ void talloc_disable_null_tracking(void)
2375
0
{
2376
0
  if (null_context != NULL) {
2377
    /* we have to move any children onto the real NULL
2378
       context */
2379
0
    struct talloc_chunk *tc, *tc2;
2380
0
    tc = talloc_chunk_from_ptr(null_context);
2381
0
    for (tc2 = tc->child; tc2; tc2=tc2->next) {
2382
0
      if (tc2->parent == tc) tc2->parent = NULL;
2383
0
      if (tc2->prev == tc) tc2->prev = NULL;
2384
0
    }
2385
0
    for (tc2 = tc->next; tc2; tc2=tc2->next) {
2386
0
      if (tc2->parent == tc) tc2->parent = NULL;
2387
0
      if (tc2->prev == tc) tc2->prev = NULL;
2388
0
    }
2389
0
    tc->child = NULL;
2390
0
    tc->next = NULL;
2391
0
  }
2392
0
  talloc_free(null_context);
2393
0
  null_context = NULL;
2394
0
}
2395
2396
/*
2397
  enable leak reporting on exit
2398
*/
2399
_PUBLIC_ void talloc_enable_leak_report(void)
2400
0
{
2401
0
  talloc_enable_null_tracking();
2402
0
  talloc_report_null = true;
2403
0
  talloc_setup_atexit();
2404
0
}
2405
2406
/*
2407
  enable full leak reporting on exit
2408
*/
2409
_PUBLIC_ void talloc_enable_leak_report_full(void)
2410
0
{
2411
0
  talloc_enable_null_tracking();
2412
0
  talloc_report_null_full = true;
2413
0
  talloc_setup_atexit();
2414
0
}
2415
2416
/*
2417
   talloc and zero memory.
2418
*/
2419
_PUBLIC_ void *_talloc_zero(const void *ctx, size_t size, const char *name)
2420
19.6M
{
2421
19.6M
  void *p = _talloc_named_const(ctx, size, name);
2422
2423
19.6M
  if (p) {
2424
19.6M
    memset(p, '\0', size);
2425
19.6M
  }
2426
2427
19.6M
  return p;
2428
19.6M
}
2429
2430
/*
2431
  memdup with a talloc.
2432
*/
2433
_PUBLIC_ void *_talloc_memdup(const void *t, const void *p, size_t size, const char *name)
2434
3.28M
{
2435
3.28M
  void *newp = NULL;
2436
2437
3.28M
  if (likely(size > 0) && unlikely(p == NULL)) {
2438
0
    return NULL;
2439
0
  }
2440
2441
3.28M
  newp = _talloc_named_const(t, size, name);
2442
3.28M
  if (likely(newp != NULL) && likely(size > 0)) {
2443
2.76M
    memcpy(newp, p, size);
2444
2.76M
  }
2445
2446
3.28M
  return newp;
2447
3.28M
}
2448
2449
static inline char *__talloc_strlendup(const void *t, const char *p, size_t len)
2450
17.8M
{
2451
17.8M
  char *ret;
2452
17.8M
  struct talloc_chunk *tc = NULL;
2453
2454
17.8M
  ret = (char *)__talloc(t, len + 1, &tc);
2455
17.8M
  if (unlikely(!ret)) return NULL;
2456
2457
17.8M
  memcpy(ret, p, len);
2458
17.8M
  ret[len] = 0;
2459
2460
17.8M
  _tc_set_name_const(tc, ret);
2461
17.8M
  return ret;
2462
17.8M
}
2463
2464
/*
2465
  strdup with a talloc
2466
*/
2467
_PUBLIC_ char *talloc_strdup(const void *t, const char *p)
2468
17.5M
{
2469
17.5M
  if (unlikely(!p)) return NULL;
2470
17.5M
  return __talloc_strlendup(t, p, strlen(p));
2471
17.5M
}
2472
2473
/*
2474
  strndup with a talloc
2475
*/
2476
_PUBLIC_ char *talloc_strndup(const void *t, const char *p, size_t n)
2477
313k
{
2478
313k
  if (unlikely(!p)) return NULL;
2479
313k
  return __talloc_strlendup(t, p, strnlen(p, n));
2480
313k
}
2481
2482
static inline char *__talloc_strlendup_append(char *s, size_t slen,
2483
                const char *a, size_t alen)
2484
5.05M
{
2485
5.05M
  char *ret;
2486
2487
5.05M
  ret = talloc_realloc(NULL, s, char, slen + alen + 1);
2488
5.05M
  if (unlikely(!ret)) return NULL;
2489
2490
  /* append the string and the trailing \0 */
2491
5.05M
  memcpy(&ret[slen], a, alen);
2492
5.05M
  ret[slen+alen] = 0;
2493
2494
5.05M
  _tc_set_name_const(talloc_chunk_from_ptr(ret), ret);
2495
5.05M
  return ret;
2496
5.05M
}
2497
2498
/*
2499
 * Appends at the end of the string.
2500
 */
2501
_PUBLIC_ char *talloc_strdup_append(char *s, const char *a)
2502
5.05M
{
2503
5.05M
  if (unlikely(!s)) {
2504
0
    return talloc_strdup(NULL, a);
2505
0
  }
2506
2507
5.05M
  if (unlikely(!a)) {
2508
0
    return s;
2509
0
  }
2510
2511
5.05M
  return __talloc_strlendup_append(s, strlen(s), a, strlen(a));
2512
5.05M
}
2513
2514
/*
2515
 * Appends at the end of the talloc'ed buffer,
2516
 * not the end of the string.
2517
 */
2518
_PUBLIC_ char *talloc_strdup_append_buffer(char *s, const char *a)
2519
0
{
2520
0
  size_t slen;
2521
2522
0
  if (unlikely(!s)) {
2523
0
    return talloc_strdup(NULL, a);
2524
0
  }
2525
2526
0
  if (unlikely(!a)) {
2527
0
    return s;
2528
0
  }
2529
2530
0
  slen = talloc_get_size(s);
2531
0
  if (likely(slen > 0)) {
2532
0
    slen--;
2533
0
  }
2534
2535
0
  return __talloc_strlendup_append(s, slen, a, strlen(a));
2536
0
}
2537
2538
/*
2539
 * Appends at the end of the string.
2540
 */
2541
_PUBLIC_ char *talloc_strndup_append(char *s, const char *a, size_t n)
2542
0
{
2543
0
  if (unlikely(!s)) {
2544
0
    return talloc_strndup(NULL, a, n);
2545
0
  }
2546
2547
0
  if (unlikely(!a)) {
2548
0
    return s;
2549
0
  }
2550
2551
0
  return __talloc_strlendup_append(s, strlen(s), a, strnlen(a, n));
2552
0
}
2553
2554
/*
2555
 * Appends at the end of the talloc'ed buffer,
2556
 * not the end of the string.
2557
 */
2558
_PUBLIC_ char *talloc_strndup_append_buffer(char *s, const char *a, size_t n)
2559
0
{
2560
0
  size_t slen;
2561
2562
0
  if (unlikely(!s)) {
2563
0
    return talloc_strndup(NULL, a, n);
2564
0
  }
2565
2566
0
  if (unlikely(!a)) {
2567
0
    return s;
2568
0
  }
2569
2570
0
  slen = talloc_get_size(s);
2571
0
  if (likely(slen > 0)) {
2572
0
    slen--;
2573
0
  }
2574
2575
0
  return __talloc_strlendup_append(s, slen, a, strnlen(a, n));
2576
0
}
2577
2578
#ifndef HAVE_VA_COPY
2579
#ifdef HAVE___VA_COPY
2580
#define va_copy(dest, src) __va_copy(dest, src)
2581
#else
2582
#define va_copy(dest, src) (dest) = (src)
2583
#endif
2584
#endif
2585
2586
static struct talloc_chunk *_vasprintf_tc(const void *t,
2587
            const char *fmt,
2588
            va_list ap) PRINTF_ATTRIBUTE(2,0);
2589
2590
static struct talloc_chunk *_vasprintf_tc(const void *t,
2591
            const char *fmt,
2592
            va_list ap)
2593
1.25M
{
2594
1.25M
  int vlen;
2595
1.25M
  size_t len;
2596
1.25M
  char *ret;
2597
1.25M
  va_list ap2;
2598
1.25M
  struct talloc_chunk *tc = NULL;
2599
1.25M
  char buf[1024];
2600
2601
1.25M
  va_copy(ap2, ap);
2602
1.25M
  vlen = vsnprintf(buf, sizeof(buf), fmt, ap2);
2603
1.25M
  va_end(ap2);
2604
1.25M
  if (unlikely(vlen < 0)) {
2605
0
    return NULL;
2606
0
  }
2607
1.25M
  len = vlen;
2608
1.25M
  if (unlikely(len + 1 < len)) {
2609
0
    return NULL;
2610
0
  }
2611
2612
1.25M
  ret = (char *)__talloc(t, len+1, &tc);
2613
1.25M
  if (unlikely(!ret)) return NULL;
2614
2615
1.25M
  if (len < sizeof(buf)) {
2616
1.25M
    memcpy(ret, buf, len+1);
2617
1.25M
  } else {
2618
3.35k
    va_copy(ap2, ap);
2619
3.35k
    vsnprintf(ret, len+1, fmt, ap2);
2620
3.35k
    va_end(ap2);
2621
3.35k
  }
2622
2623
1.25M
  _tc_set_name_const(tc, ret);
2624
1.25M
  return tc;
2625
1.25M
}
2626
2627
_PUBLIC_ char *talloc_vasprintf(const void *t, const char *fmt, va_list ap)
2628
446k
{
2629
446k
  struct talloc_chunk *tc = _vasprintf_tc(t, fmt, ap);
2630
446k
  if (tc == NULL) {
2631
0
    return NULL;
2632
0
  }
2633
446k
  return TC_PTR_FROM_CHUNK(tc);
2634
446k
}
2635
2636
2637
/*
2638
  Perform string formatting, and return a pointer to newly allocated
2639
  memory holding the result, inside a memory pool.
2640
 */
2641
_PUBLIC_ char *talloc_asprintf(const void *t, const char *fmt, ...)
2642
438k
{
2643
438k
  va_list ap;
2644
438k
  char *ret;
2645
2646
438k
  va_start(ap, fmt);
2647
438k
  ret = talloc_vasprintf(t, fmt, ap);
2648
438k
  va_end(ap);
2649
438k
  return ret;
2650
438k
}
2651
2652
static inline char *__talloc_vaslenprintf_append(char *s, size_t slen,
2653
             const char *fmt, va_list ap)
2654
             PRINTF_ATTRIBUTE(3,0);
2655
2656
static inline char *__talloc_vaslenprintf_append(char *s, size_t slen,
2657
             const char *fmt, va_list ap)
2658
12.7M
{
2659
12.7M
  ssize_t alen;
2660
12.7M
  va_list ap2;
2661
12.7M
  char c;
2662
2663
12.7M
  va_copy(ap2, ap);
2664
  /* this call looks strange, but it makes it work on older solaris boxes */
2665
12.7M
  alen = vsnprintf(&c, 1, fmt, ap2);
2666
12.7M
  va_end(ap2);
2667
2668
12.7M
  if (alen <= 0) {
2669
    /* Either the vsnprintf failed or the format resulted in
2670
     * no characters being formatted. In the former case, we
2671
     * ought to return NULL, in the latter we ought to return
2672
     * the original string. Most current callers of this
2673
     * function expect it to never return NULL.
2674
     */
2675
113k
    return s;
2676
113k
  }
2677
2678
12.6M
  s = talloc_realloc(NULL, s, char, slen + alen + 1);
2679
12.6M
  if (!s) return NULL;
2680
2681
12.6M
  vsnprintf(s + slen, alen + 1, fmt, ap);
2682
2683
12.6M
  _tc_set_name_const(talloc_chunk_from_ptr(s), s);
2684
12.6M
  return s;
2685
12.6M
}
2686
2687
/**
2688
 * Realloc @p s to append the formatted result of @p fmt and @p ap,
2689
 * and return @p s, which may have moved.  Good for gradually
2690
 * accumulating output into a string buffer. Appends at the end
2691
 * of the string.
2692
 **/
2693
_PUBLIC_ char *talloc_vasprintf_append(char *s, const char *fmt, va_list ap)
2694
11.7M
{
2695
11.7M
  if (unlikely(!s)) {
2696
0
    return talloc_vasprintf(NULL, fmt, ap);
2697
0
  }
2698
2699
11.7M
  return __talloc_vaslenprintf_append(s, strlen(s), fmt, ap);
2700
11.7M
}
2701
2702
/**
2703
 * Realloc @p s to append the formatted result of @p fmt and @p ap,
2704
 * and return @p s, which may have moved. Always appends at the
2705
 * end of the talloc'ed buffer, not the end of the string.
2706
 **/
2707
_PUBLIC_ char *talloc_vasprintf_append_buffer(char *s, const char *fmt, va_list ap)
2708
962k
{
2709
962k
  size_t slen;
2710
2711
962k
  if (unlikely(!s)) {
2712
0
    return talloc_vasprintf(NULL, fmt, ap);
2713
0
  }
2714
2715
962k
  slen = talloc_get_size(s);
2716
962k
  if (likely(slen > 0)) {
2717
962k
    slen--;
2718
962k
  }
2719
2720
962k
  return __talloc_vaslenprintf_append(s, slen, fmt, ap);
2721
962k
}
2722
2723
/*
2724
  Realloc @p s to append the formatted result of @p fmt and return @p
2725
  s, which may have moved.  Good for gradually accumulating output
2726
  into a string buffer.
2727
 */
2728
_PUBLIC_ char *talloc_asprintf_append(char *s, const char *fmt, ...)
2729
11.7M
{
2730
11.7M
  va_list ap;
2731
2732
11.7M
  va_start(ap, fmt);
2733
11.7M
  s = talloc_vasprintf_append(s, fmt, ap);
2734
11.7M
  va_end(ap);
2735
11.7M
  return s;
2736
11.7M
}
2737
2738
/*
2739
  Realloc @p s to append the formatted result of @p fmt and return @p
2740
  s, which may have moved.  Good for gradually accumulating output
2741
  into a buffer.
2742
 */
2743
_PUBLIC_ char *talloc_asprintf_append_buffer(char *s, const char *fmt, ...)
2744
277k
{
2745
277k
  va_list ap;
2746
2747
277k
  va_start(ap, fmt);
2748
277k
  s = talloc_vasprintf_append_buffer(s, fmt, ap);
2749
277k
  va_end(ap);
2750
277k
  return s;
2751
277k
}
2752
2753
/*
2754
 * Function to make string-building simple by handling intermediate
2755
 * realloc failures. See for example commit a37ea9d750e1.
2756
 */
2757
_PUBLIC_ void talloc_asprintf_addbuf(char **ps, const char *fmt, ...)
2758
684k
{
2759
684k
  va_list ap;
2760
684k
  char *s = *ps;
2761
684k
  char *t = NULL;
2762
2763
684k
  if (s == NULL) {
2764
0
    return;
2765
0
  }
2766
2767
684k
  va_start(ap, fmt);
2768
684k
  t = talloc_vasprintf_append_buffer(s, fmt, ap);
2769
684k
  va_end(ap);
2770
2771
684k
  if (t == NULL) {
2772
    /* signal failure to the next caller */
2773
0
    TALLOC_FREE(s);
2774
0
    *ps = NULL;
2775
684k
  } else {
2776
684k
    *ps = t;
2777
684k
  }
2778
684k
}
2779
2780
/*
2781
  alloc an array, checking for integer overflow in the array size
2782
*/
2783
_PUBLIC_ void *_talloc_array(const void *ctx, size_t el_size, unsigned count, const char *name)
2784
25.8M
{
2785
25.8M
  if (count >= MAX_TALLOC_SIZE/el_size) {
2786
2.70k
    return NULL;
2787
2.70k
  }
2788
25.8M
  return _talloc_named_const(ctx, el_size * count, name);
2789
25.8M
}
2790
2791
/*
2792
  alloc an zero array, checking for integer overflow in the array size
2793
*/
2794
_PUBLIC_ void *_talloc_zero_array(const void *ctx, size_t el_size, unsigned count, const char *name)
2795
4.99M
{
2796
4.99M
  if (count >= MAX_TALLOC_SIZE/el_size) {
2797
16
    return NULL;
2798
16
  }
2799
4.99M
  return _talloc_zero(ctx, el_size * count, name);
2800
4.99M
}
2801
2802
/*
2803
  realloc an array, checking for integer overflow in the array size
2804
*/
2805
_PUBLIC_ void *_talloc_realloc_array(const void *ctx, void *ptr, size_t el_size, unsigned count, const char *name)
2806
52.6M
{
2807
52.6M
  if (count >= MAX_TALLOC_SIZE/el_size) {
2808
38
    return NULL;
2809
38
  }
2810
52.6M
  return _talloc_realloc(ctx, ptr, el_size * count, name);
2811
52.6M
}
2812
2813
/*
2814
  a function version of talloc_realloc(), so it can be passed as a function pointer
2815
  to libraries that want a realloc function (a realloc function encapsulates
2816
  all the basic capabilities of an allocation library, which is why this is useful)
2817
*/
2818
_PUBLIC_ void *talloc_realloc_fn(const void *context, void *ptr, size_t size)
2819
0
{
2820
0
  return _talloc_realloc(context, ptr, size, NULL);
2821
0
}
2822
2823
2824
static int talloc_autofree_destructor(void *ptr)
2825
0
{
2826
0
  autofree_context = NULL;
2827
0
  return 0;
2828
0
}
2829
2830
/*
2831
  return a context which will be auto-freed on exit
2832
  this is useful for reducing the noise in leak reports
2833
*/
2834
_PUBLIC_ void *talloc_autofree_context(void)
2835
0
{
2836
0
  if (autofree_context == NULL) {
2837
0
    autofree_context = _talloc_named_const(NULL, 0, "autofree_context");
2838
0
    talloc_set_destructor(autofree_context, talloc_autofree_destructor);
2839
0
    talloc_setup_atexit();
2840
0
  }
2841
0
  return autofree_context;
2842
0
}
2843
2844
_PUBLIC_ size_t talloc_get_size(const void *context)
2845
1.25G
{
2846
1.25G
  struct talloc_chunk *tc;
2847
2848
1.25G
  if (context == NULL) {
2849
0
    return 0;
2850
0
  }
2851
2852
1.25G
  tc = talloc_chunk_from_ptr(context);
2853
2854
1.25G
  return tc->size;
2855
1.25G
}
2856
2857
/*
2858
  find a parent of this context that has the given name, if any
2859
*/
2860
_PUBLIC_ void *talloc_find_parent_byname(const void *context, const char *name)
2861
0
{
2862
0
  struct talloc_chunk *tc;
2863
2864
0
  if (context == NULL) {
2865
0
    return NULL;
2866
0
  }
2867
2868
0
  tc = talloc_chunk_from_ptr(context);
2869
0
  while (tc) {
2870
0
    if (tc->name && strcmp(tc->name, name) == 0) {
2871
0
      return TC_PTR_FROM_CHUNK(tc);
2872
0
    }
2873
0
    while (tc && tc->prev) tc = tc->prev;
2874
0
    if (tc) {
2875
0
      tc = tc->parent;
2876
0
    }
2877
0
  }
2878
0
  return NULL;
2879
0
}
2880
2881
/*
2882
  show the parentage of a context
2883
*/
2884
_PUBLIC_ void talloc_show_parents(const void *context, FILE *file)
2885
0
{
2886
0
  struct talloc_chunk *tc;
2887
2888
0
  if (context == NULL) {
2889
0
    fprintf(file, "talloc no parents for NULL\n");
2890
0
    return;
2891
0
  }
2892
2893
0
  tc = talloc_chunk_from_ptr(context);
2894
0
  fprintf(file, "talloc parents of '%s'\n", __talloc_get_name(context));
2895
0
  while (tc) {
2896
0
    fprintf(file, "\t'%s'\n", __talloc_get_name(TC_PTR_FROM_CHUNK(tc)));
2897
0
    while (tc && tc->prev) tc = tc->prev;
2898
0
    if (tc) {
2899
0
      tc = tc->parent;
2900
0
    }
2901
0
  }
2902
0
  fflush(file);
2903
0
}
2904
2905
/*
2906
  return 1 if ptr is a parent of context
2907
*/
2908
static int _talloc_is_parent(const void *context, const void *ptr, int depth)
2909
0
{
2910
0
  struct talloc_chunk *tc;
2911
2912
0
  if (context == NULL) {
2913
0
    return 0;
2914
0
  }
2915
2916
0
  tc = talloc_chunk_from_ptr(context);
2917
0
  while (tc) {
2918
0
    if (depth <= 0) {
2919
0
      return 0;
2920
0
    }
2921
0
    if (TC_PTR_FROM_CHUNK(tc) == ptr) return 1;
2922
0
    while (tc && tc->prev) tc = tc->prev;
2923
0
    if (tc) {
2924
0
      tc = tc->parent;
2925
0
      depth--;
2926
0
    }
2927
0
  }
2928
0
  return 0;
2929
0
}
2930
2931
/*
2932
  return 1 if ptr is a parent of context
2933
*/
2934
_PUBLIC_ int talloc_is_parent(const void *context, const void *ptr)
2935
0
{
2936
0
  return _talloc_is_parent(context, ptr, TALLOC_MAX_DEPTH);
2937
0
}
2938
2939
/*
2940
  return the total size of memory used by this context and all children
2941
*/
2942
static inline size_t _talloc_total_limit_size(const void *ptr,
2943
          struct talloc_memlimit *old_limit,
2944
          struct talloc_memlimit *new_limit)
2945
0
{
2946
0
  return _talloc_total_mem_internal(ptr, TOTAL_MEM_LIMIT,
2947
0
            old_limit, new_limit);
2948
0
}
2949
2950
static inline bool talloc_memlimit_check(struct talloc_memlimit *limit, size_t size)
2951
121M
{
2952
121M
  struct talloc_memlimit *l;
2953
2954
121M
  for (l = limit; l != NULL; l = l->upper) {
2955
0
    if (l->max_size != 0 &&
2956
0
        ((l->max_size <= l->cur_size) ||
2957
0
         (l->max_size - l->cur_size < size))) {
2958
0
      return false;
2959
0
    }
2960
0
  }
2961
2962
121M
  return true;
2963
121M
}
2964
2965
/*
2966
  Update memory limits when freeing a talloc_chunk.
2967
*/
2968
static void tc_memlimit_update_on_free(struct talloc_chunk *tc)
2969
121M
{
2970
121M
  size_t limit_shrink_size;
2971
2972
121M
  if (!tc->limit) {
2973
121M
    return;
2974
121M
  }
2975
2976
  /*
2977
   * Pool entries don't count. Only the pools
2978
   * themselves are counted as part of the memory
2979
   * limits. Note that this also takes care of
2980
   * nested pools which have both flags
2981
   * TALLOC_FLAG_POOLMEM|TALLOC_FLAG_POOL set.
2982
   */
2983
0
  if (tc->flags & TALLOC_FLAG_POOLMEM) {
2984
0
    return;
2985
0
  }
2986
2987
  /*
2988
   * If we are part of a memory limited context hierarchy
2989
   * we need to subtract the memory used from the counters
2990
   */
2991
2992
0
  limit_shrink_size = tc->size+TC_HDR_SIZE;
2993
2994
  /*
2995
   * If we're deallocating a pool, take into
2996
   * account the prefix size added for the pool.
2997
   */
2998
2999
0
  if (tc->flags & TALLOC_FLAG_POOL) {
3000
0
    limit_shrink_size += TP_HDR_SIZE;
3001
0
  }
3002
3003
0
  talloc_memlimit_shrink(tc->limit, limit_shrink_size);
3004
3005
0
  if (tc->limit->parent == tc) {
3006
0
    free(tc->limit);
3007
0
  }
3008
3009
0
  tc->limit = NULL;
3010
0
}
3011
3012
/*
3013
  Increase memory limit accounting after a malloc/realloc.
3014
*/
3015
static void talloc_memlimit_grow(struct talloc_memlimit *limit,
3016
        size_t size)
3017
147M
{
3018
147M
  struct talloc_memlimit *l;
3019
3020
147M
  for (l = limit; l != NULL; l = l->upper) {
3021
0
    size_t new_cur_size = l->cur_size + size;
3022
0
    if (new_cur_size < l->cur_size) {
3023
0
      talloc_abort("logic error in talloc_memlimit_grow\n");
3024
0
      return;
3025
0
    }
3026
0
    l->cur_size = new_cur_size;
3027
0
  }
3028
147M
}
3029
3030
/*
3031
  Decrease memory limit accounting after a free/realloc.
3032
*/
3033
static void talloc_memlimit_shrink(struct talloc_memlimit *limit,
3034
        size_t size)
3035
386k
{
3036
386k
  struct talloc_memlimit *l;
3037
3038
386k
  for (l = limit; l != NULL; l = l->upper) {
3039
0
    if (l->cur_size < size) {
3040
0
      talloc_abort("logic error in talloc_memlimit_shrink\n");
3041
0
      return;
3042
0
    }
3043
0
    l->cur_size = l->cur_size - size;
3044
0
  }
3045
386k
}
3046
3047
_PUBLIC_ int talloc_set_memlimit(const void *ctx, size_t max_size)
3048
0
{
3049
0
  struct talloc_chunk *tc = talloc_chunk_from_ptr(ctx);
3050
0
  struct talloc_memlimit *orig_limit;
3051
0
  struct talloc_memlimit *limit = NULL;
3052
3053
0
  if (tc->limit && tc->limit->parent == tc) {
3054
0
    tc->limit->max_size = max_size;
3055
0
    return 0;
3056
0
  }
3057
0
  orig_limit = tc->limit;
3058
3059
0
  limit = malloc(sizeof(struct talloc_memlimit));
3060
0
  if (limit == NULL) {
3061
0
    return 1;
3062
0
  }
3063
0
  limit->parent = tc;
3064
0
  limit->max_size = max_size;
3065
0
  limit->cur_size = _talloc_total_limit_size(ctx, tc->limit, limit);
3066
3067
0
  if (orig_limit) {
3068
0
    limit->upper = orig_limit;
3069
0
  } else {
3070
0
    limit->upper = NULL;
3071
0
  }
3072
3073
0
  return 0;
3074
0
}