Coverage Report

Created: 2026-02-14 06:52

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
/src/php-src/Zend/zend_alloc.c
Line
Count
Source
1
/*
2
   +----------------------------------------------------------------------+
3
   | Zend Engine                                                          |
4
   +----------------------------------------------------------------------+
5
   | Copyright (c) Zend Technologies Ltd. (http://www.zend.com)           |
6
   +----------------------------------------------------------------------+
7
   | This source file is subject to version 2.00 of the Zend license,     |
8
   | that is bundled with this package in the file LICENSE, and is        |
9
   | available through the world-wide-web at the following url:           |
10
   | http://www.zend.com/license/2_00.txt.                                |
11
   | If you did not receive a copy of the Zend license and are unable to  |
12
   | obtain it through the world-wide-web, please send a note to          |
13
   | license@zend.com so we can mail you a copy immediately.              |
14
   +----------------------------------------------------------------------+
15
   | Authors: Andi Gutmans <andi@php.net>                                 |
16
   |          Zeev Suraski <zeev@php.net>                                 |
17
   |          Dmitry Stogov <dmitry@php.net>                              |
18
   +----------------------------------------------------------------------+
19
*/
20
21
/*
22
 * zend_alloc is designed to be a modern CPU cache friendly memory manager
23
 * for PHP. Most ideas are taken from jemalloc and tcmalloc implementations.
24
 *
25
 * All allocations are split into 3 categories:
26
 *
27
 * Huge  - the size is greater than CHUNK size (~2M by default), allocation is
28
 *         performed using mmap(). The result is aligned on 2M boundary.
29
 *
30
 * Large - a number of 4096K pages inside a CHUNK. Large blocks
31
 *         are always aligned on page boundary.
32
 *
33
 * Small - less than 3/4 of page size. Small sizes are rounded up to nearest
34
 *         greater predefined small size (there are 30 predefined sizes:
35
 *         8, 16, 24, 32, ... 3072). Small blocks are allocated from
36
 *         RUNs. Each RUN is allocated as a single or few following pages.
37
 *         Allocation inside RUNs implemented using linked list of free
38
 *         elements. The result is aligned to 8 bytes.
39
 *
40
 * zend_alloc allocates memory from OS by CHUNKs, these CHUNKs and huge memory
41
 * blocks are always aligned to CHUNK boundary. So it's very easy to determine
42
 * the CHUNK owning the certain pointer. Regular CHUNKs reserve a single
43
 * page at start for special purpose. It contains bitset of free pages,
44
 * few bitset for available runs of predefined small sizes, map of pages that
45
 * keeps information about usage of each page in this CHUNK, etc.
46
 *
47
 * zend_alloc provides familiar emalloc/efree/erealloc API, but in addition it
48
 * provides specialized and optimized routines to allocate blocks of predefined
49
 * sizes (e.g. emalloc_2(), emallc_4(), ..., emalloc_large(), etc)
50
 * The library uses C preprocessor tricks that substitute calls to emalloc()
51
 * with more specialized routines when the requested size is known.
52
 */
53
54
#include "zend.h"
55
#include "zend_alloc.h"
56
#include "zend_globals.h"
57
#include "zend_hrtime.h"
58
#include "zend_operators.h"
59
#include "zend_multiply.h"
60
#include "zend_bitset.h"
61
#include "zend_mmap.h"
62
#include "zend_portability.h"
63
#include <signal.h>
64
65
#ifdef HAVE_UNISTD_H
66
# include <unistd.h>
67
#endif
68
69
#ifdef ZEND_WIN32
70
# include <wincrypt.h>
71
# include <process.h>
72
# include "win32/winutil.h"
73
# define getpid _getpid
74
typedef int pid_t;
75
#endif
76
77
#include <stdio.h>
78
#include <stdlib.h>
79
#include <string.h>
80
81
#include <sys/types.h>
82
#include <sys/stat.h>
83
#include <limits.h>
84
#include <fcntl.h>
85
#include <errno.h>
86
#ifdef __SANITIZE_ADDRESS__
87
# include <sanitizer/asan_interface.h>
88
#endif
89
90
#ifndef _WIN32
91
# include <sys/mman.h>
92
# ifndef MAP_ANON
93
#  ifdef MAP_ANONYMOUS
94
#   define MAP_ANON MAP_ANONYMOUS
95
#  endif
96
# endif
97
# ifndef MAP_FAILED
98
#  define MAP_FAILED ((void*)-1)
99
# endif
100
# ifndef MAP_POPULATE
101
#  define MAP_POPULATE 0
102
# endif
103
#  if defined(_SC_PAGESIZE) || (_SC_PAGE_SIZE)
104
16
#    define REAL_PAGE_SIZE _real_page_size
105
static size_t _real_page_size = ZEND_MM_PAGE_SIZE;
106
#  endif
107
# ifdef MAP_ALIGNED_SUPER
108
#    define MAP_HUGETLB MAP_ALIGNED_SUPER
109
# endif
110
#endif
111
112
#ifndef REAL_PAGE_SIZE
113
# define REAL_PAGE_SIZE ZEND_MM_PAGE_SIZE
114
#endif
115
116
/* NetBSD has an mremap() function with a signature that is incompatible with Linux (WTF?),
117
 * so pretend it doesn't exist. */
118
#ifndef __linux__
119
# undef HAVE_MREMAP
120
#endif
121
122
#ifndef __APPLE__
123
0
# define ZEND_MM_FD -1
124
#else
125
# include <mach/vm_statistics.h>
126
/* Mac allows to track anonymous page via vmmap per TAG id.
127
 * user land applications are allowed to take from 240 to 255.
128
 */
129
# define ZEND_MM_FD VM_MAKE_TAG(250U)
130
#endif
131
132
#ifndef ZEND_MM_STAT
133
# define ZEND_MM_STAT 1    /* track current and peak memory usage            */
134
#endif
135
#ifndef ZEND_MM_LIMIT
136
# define ZEND_MM_LIMIT 1   /* support for user-defined memory limit          */
137
#endif
138
#ifndef ZEND_MM_CUSTOM
139
# define ZEND_MM_CUSTOM 1  /* support for custom memory allocator            */
140
                           /* USE_ZEND_ALLOC=0 may switch to system malloc() */
141
#endif
142
#ifndef ZEND_MM_STORAGE
143
# define ZEND_MM_STORAGE 1 /* support for custom memory storage              */
144
#endif
145
#ifndef ZEND_MM_ERROR
146
# define ZEND_MM_ERROR 1   /* report system errors                           */
147
#endif
148
#ifndef ZEND_MM_HEAP_PROTECTION
149
# define ZEND_MM_HEAP_PROTECTION 1 /* protect heap against corruptions       */
150
#endif
151
152
#if ZEND_MM_HEAP_PROTECTION
153
/* Define ZEND_MM_MIN_USEABLE_BIN_SIZE to the size of two pointers */
154
# if UINTPTR_MAX == UINT64_MAX
155
0
#  define ZEND_MM_MIN_USEABLE_BIN_SIZE 16
156
# elif UINTPTR_MAX == UINT32_MAX
157
#  define ZEND_MM_MIN_USEABLE_BIN_SIZE 8
158
# else
159
#  error
160
# endif
161
# if ZEND_MM_MIN_USEABLE_BIN_SIZE < ZEND_MM_MIN_SMALL_SIZE
162
#  error
163
# endif
164
#else /* ZEND_MM_HEAP_PROTECTION */
165
# define ZEND_MM_MIN_USEABLE_BIN_SIZE ZEND_MM_MIN_SMALL_SIZE
166
#endif /* ZEND_MM_HEAP_PROTECTION */
167
168
#ifndef ZEND_MM_CHECK
169
0
# define ZEND_MM_CHECK(condition, message)  do { \
170
0
    if (UNEXPECTED(!(condition))) { \
171
0
      zend_mm_panic(message); \
172
0
    } \
173
0
  } while (0)
174
#endif
175
176
typedef uint32_t   zend_mm_page_info; /* 4-byte integer */
177
typedef zend_ulong zend_mm_bitset;    /* 4-byte or 8-byte integer */
178
179
#define ZEND_MM_ALIGNED_OFFSET(size, alignment) \
180
0
  (((size_t)(size)) & ((alignment) - 1))
181
#define ZEND_MM_ALIGNED_BASE(size, alignment) \
182
0
  (((size_t)(size)) & ~((alignment) - 1))
183
#define ZEND_MM_SIZE_TO_NUM(size, alignment) \
184
0
  (((size_t)(size) + ((alignment) - 1)) / (alignment))
185
186
0
#define ZEND_MM_BITSET_LEN    (sizeof(zend_mm_bitset) * 8)       /* 32 or 64 */
187
#define ZEND_MM_PAGE_MAP_LEN  (ZEND_MM_PAGES / ZEND_MM_BITSET_LEN) /* 16 or 8 */
188
189
typedef zend_mm_bitset zend_mm_page_map[ZEND_MM_PAGE_MAP_LEN];     /* 64B */
190
191
#define ZEND_MM_IS_FRUN                  0x00000000
192
0
#define ZEND_MM_IS_LRUN                  0x40000000
193
0
#define ZEND_MM_IS_SRUN                  0x80000000
194
195
0
#define ZEND_MM_LRUN_PAGES_MASK          0x000003ff
196
0
#define ZEND_MM_LRUN_PAGES_OFFSET        0
197
198
0
#define ZEND_MM_SRUN_BIN_NUM_MASK        0x0000001f
199
0
#define ZEND_MM_SRUN_BIN_NUM_OFFSET      0
200
201
0
#define ZEND_MM_SRUN_FREE_COUNTER_MASK   0x01ff0000
202
0
#define ZEND_MM_SRUN_FREE_COUNTER_OFFSET 16
203
204
0
#define ZEND_MM_NRUN_OFFSET_MASK         0x01ff0000
205
0
#define ZEND_MM_NRUN_OFFSET_OFFSET       16
206
207
0
#define ZEND_MM_LRUN_PAGES(info)         (((info) & ZEND_MM_LRUN_PAGES_MASK) >> ZEND_MM_LRUN_PAGES_OFFSET)
208
0
#define ZEND_MM_SRUN_BIN_NUM(info)       (((info) & ZEND_MM_SRUN_BIN_NUM_MASK) >> ZEND_MM_SRUN_BIN_NUM_OFFSET)
209
0
#define ZEND_MM_SRUN_FREE_COUNTER(info)  (((info) & ZEND_MM_SRUN_FREE_COUNTER_MASK) >> ZEND_MM_SRUN_FREE_COUNTER_OFFSET)
210
0
#define ZEND_MM_NRUN_OFFSET(info)        (((info) & ZEND_MM_NRUN_OFFSET_MASK) >> ZEND_MM_NRUN_OFFSET_OFFSET)
211
212
#define ZEND_MM_FRUN()                   ZEND_MM_IS_FRUN
213
0
#define ZEND_MM_LRUN(count)              (ZEND_MM_IS_LRUN | ((count) << ZEND_MM_LRUN_PAGES_OFFSET))
214
0
#define ZEND_MM_SRUN(bin_num)            (ZEND_MM_IS_SRUN | ((bin_num) << ZEND_MM_SRUN_BIN_NUM_OFFSET))
215
0
#define ZEND_MM_SRUN_EX(bin_num, count)  (ZEND_MM_IS_SRUN | ((bin_num) << ZEND_MM_SRUN_BIN_NUM_OFFSET) | ((count) << ZEND_MM_SRUN_FREE_COUNTER_OFFSET))
216
0
#define ZEND_MM_NRUN(bin_num, offset)    (ZEND_MM_IS_SRUN | ZEND_MM_IS_LRUN | ((bin_num) << ZEND_MM_SRUN_BIN_NUM_OFFSET) | ((offset) << ZEND_MM_NRUN_OFFSET_OFFSET))
217
218
0
#define ZEND_MM_BINS 30
219
220
#if UINTPTR_MAX == UINT64_MAX
221
0
#  define BSWAPPTR(u) ZEND_BYTES_SWAP64(u)
222
#else
223
#  define BSWAPPTR(u) ZEND_BYTES_SWAP32(u)
224
#endif
225
226
typedef struct  _zend_mm_page      zend_mm_page;
227
typedef struct  _zend_mm_bin       zend_mm_bin;
228
typedef struct  _zend_mm_free_slot zend_mm_free_slot;
229
typedef struct  _zend_mm_chunk     zend_mm_chunk;
230
typedef struct  _zend_mm_huge_list zend_mm_huge_list;
231
232
static bool zend_mm_use_huge_pages = false;
233
234
/*
235
 * Memory is retrieved from OS by chunks of fixed size 2MB.
236
 * Inside chunk it's managed by pages of fixed size 4096B.
237
 * So each chunk consists from 512 pages.
238
 * The first page of each chunk is reserved for chunk header.
239
 * It contains service information about all pages.
240
 *
241
 * free_pages - current number of free pages in this chunk
242
 *
243
 * free_tail  - number of continuous free pages at the end of chunk
244
 *
245
 * free_map   - bitset (a bit for each page). The bit is set if the corresponding
246
 *              page is allocated. Allocator for "large sizes" may easily find a
247
 *              free page (or a continuous number of pages) searching for zero
248
 *              bits.
249
 *
250
 * map        - contains service information for each page. (32-bits for each
251
 *              page).
252
 *    usage:
253
 *        (2 bits)
254
 *        FRUN - free page,
255
 *              LRUN - first page of "large" allocation
256
 *              SRUN - first page of a bin used for "small" allocation
257
 *
258
 *    lrun_pages:
259
 *              (10 bits) number of allocated pages
260
 *
261
 *    srun_bin_num:
262
 *              (5 bits) bin number (e.g. 0 for sizes 0-2, 1 for 3-4,
263
 *               2 for 5-8, 3 for 9-16 etc) see zend_alloc_sizes.h
264
 */
265
266
struct _zend_mm_heap {
267
#if ZEND_MM_CUSTOM
268
  int                use_custom_heap;
269
#endif
270
#if ZEND_MM_STORAGE
271
  zend_mm_storage   *storage;
272
#endif
273
#if ZEND_MM_STAT
274
  size_t             size;                    /* current memory usage */
275
  size_t             peak;                    /* peak memory usage */
276
#endif
277
  uintptr_t          shadow_key;              /* free slot shadow ptr xor key */
278
  zend_mm_free_slot *free_slot[ZEND_MM_BINS]; /* free lists for small sizes */
279
#if ZEND_MM_STAT || ZEND_MM_LIMIT
280
  size_t             real_size;               /* current size of allocated pages */
281
#endif
282
#if ZEND_MM_STAT
283
  size_t             real_peak;               /* peak size of allocated pages */
284
#endif
285
#if ZEND_MM_LIMIT
286
  size_t             limit;                   /* memory limit */
287
  int                overflow;                /* memory overflow flag */
288
#endif
289
290
  zend_mm_huge_list *huge_list;               /* list of huge allocated blocks */
291
292
  zend_mm_chunk     *main_chunk;
293
  zend_mm_chunk     *cached_chunks;     /* list of unused chunks */
294
  int                chunks_count;      /* number of allocated chunks */
295
  int                peak_chunks_count;   /* peak number of allocated chunks for current request */
296
  int                cached_chunks_count;   /* number of cached chunks */
297
  double             avg_chunks_count;    /* average number of chunks allocated per request */
298
  int                last_chunks_delete_boundary; /* number of chunks after last deletion */
299
  int                last_chunks_delete_count;    /* number of deletion over the last boundary */
300
#if ZEND_MM_CUSTOM
301
  struct {
302
    void      *(*_malloc)(size_t ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC);
303
    void       (*_free)(void*  ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC);
304
    void      *(*_realloc)(void*, size_t  ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC);
305
    size_t     (*_gc)(void);
306
    void       (*_shutdown)(bool full, bool silent);
307
  } custom_heap;
308
  union {
309
    HashTable *tracked_allocs;
310
    struct {
311
      bool    poison_alloc;
312
      uint8_t poison_alloc_value;
313
      bool    poison_free;
314
      uint8_t poison_free_value;
315
      uint8_t padding;
316
      bool    check_freelists_on_shutdown;
317
    } debug;
318
  };
319
#endif
320
#if ZEND_DEBUG
321
  pid_t pid;
322
#endif
323
  zend_random_bytes_insecure_state rand_state;
324
};
325
326
struct _zend_mm_chunk {
327
  zend_mm_heap      *heap;
328
  zend_mm_chunk     *next;
329
  zend_mm_chunk     *prev;
330
  uint32_t           free_pages;        /* number of free pages */
331
  uint32_t           free_tail;               /* number of free pages at the end of chunk */
332
  uint32_t           num;
333
  char               reserve[64 - (sizeof(void*) * 3 + sizeof(uint32_t) * 3)];
334
  zend_mm_heap       heap_slot;               /* used only in main chunk */
335
  zend_mm_page_map   free_map;                /* 512 bits or 64 bytes */
336
  zend_mm_page_info  map[ZEND_MM_PAGES];      /* 2 KB = 512 * 4 */
337
};
338
339
struct _zend_mm_page {
340
  char               bytes[ZEND_MM_PAGE_SIZE];
341
};
342
343
/*
344
 * bin - is one or few continuous pages (up to 8) used for allocation of
345
 * a particular "small size".
346
 */
347
struct _zend_mm_bin {
348
  char               bytes[ZEND_MM_PAGE_SIZE * 8];
349
};
350
351
struct _zend_mm_free_slot {
352
  zend_mm_free_slot *next_free_slot;
353
};
354
355
struct _zend_mm_huge_list {
356
  void              *ptr;
357
  size_t             size;
358
  zend_mm_huge_list *next;
359
#if ZEND_DEBUG
360
  zend_mm_debug_info dbg;
361
#endif
362
};
363
364
#define ZEND_MM_PAGE_ADDR(chunk, page_num) \
365
0
  ((void*)(((zend_mm_page*)(chunk)) + (page_num)))
366
367
#define _BIN_DATA_SIZE(num, size, elements, pages, x, y) size,
368
static const uint32_t bin_data_size[] = {
369
  ZEND_MM_BINS_INFO(_BIN_DATA_SIZE, x, y)
370
};
371
372
#define _BIN_DATA_ELEMENTS(num, size, elements, pages, x, y) elements,
373
static const uint32_t bin_elements[] = {
374
  ZEND_MM_BINS_INFO(_BIN_DATA_ELEMENTS, x, y)
375
};
376
377
#define _BIN_DATA_PAGES(num, size, elements, pages, x, y) pages,
378
static const uint32_t bin_pages[] = {
379
  ZEND_MM_BINS_INFO(_BIN_DATA_PAGES, x, y)
380
};
381
382
static ZEND_COLD ZEND_NORETURN void zend_mm_panic(const char *message)
383
0
{
384
0
  fprintf(stderr, "%s\n", message);
385
/* See http://support.microsoft.com/kb/190351 */
386
#ifdef ZEND_WIN32
387
  fflush(stderr);
388
#endif
389
0
#if ZEND_DEBUG && defined(HAVE_KILL) && defined(HAVE_GETPID)
390
0
  kill(getpid(), SIGSEGV);
391
0
#endif
392
0
  abort();
393
0
}
394
395
static ZEND_COLD ZEND_NORETURN void zend_mm_safe_error(zend_mm_heap *heap,
396
  const char *format,
397
  size_t limit,
398
#if ZEND_DEBUG
399
  const char *filename,
400
  uint32_t lineno,
401
#endif
402
  size_t size)
403
544
{
404
405
544
  heap->overflow = 1;
406
544
  zend_try {
407
544
    zend_error_noreturn(E_ERROR,
408
544
      format,
409
544
      limit,
410
544
#if ZEND_DEBUG
411
544
      filename,
412
544
      lineno,
413
544
#endif
414
544
      size);
415
544
  } zend_catch {
416
0
  }  zend_end_try();
417
0
  heap->overflow = 0;
418
0
  zend_bailout();
419
0
  exit(1);
420
544
}
421
422
#ifdef _WIN32
423
static void stderr_last_error(char *msg)
424
{
425
  DWORD err = GetLastError();
426
  char *buf = php_win32_error_to_msg(err);
427
428
  if (!buf[0]) {
429
    fprintf(stderr, "\n%s: [0x%08lx]\n", msg, err);
430
  }
431
  else {
432
    fprintf(stderr, "\n%s: [0x%08lx] %s\n", msg, err, buf);
433
  }
434
435
  php_win32_error_msg_free(buf);
436
}
437
#endif
438
439
/*****************/
440
/* OS Allocation */
441
/*****************/
442
443
static void zend_mm_munmap(void *addr, size_t size)
444
0
{
445
#ifdef _WIN32
446
  if (VirtualFree(addr, 0, MEM_RELEASE) == 0) {
447
    /** ERROR_INVALID_ADDRESS is expected when addr is not range start address */
448
    if (GetLastError() != ERROR_INVALID_ADDRESS) {
449
#if ZEND_MM_ERROR
450
      stderr_last_error("VirtualFree() failed");
451
#endif
452
      return;
453
    }
454
    SetLastError(0);
455
456
    MEMORY_BASIC_INFORMATION mbi;
457
    if (VirtualQuery(addr, &mbi, sizeof(mbi)) == 0) {
458
#if ZEND_MM_ERROR
459
      stderr_last_error("VirtualQuery() failed");
460
#endif
461
      return;
462
    }
463
    addr = mbi.AllocationBase;
464
465
    if (VirtualFree(addr, 0, MEM_RELEASE) == 0) {
466
#if ZEND_MM_ERROR
467
      stderr_last_error("VirtualFree() failed");
468
#endif
469
    }
470
  }
471
#else
472
0
  if (munmap(addr, size) != 0) {
473
0
#if ZEND_MM_ERROR
474
0
    fprintf(stderr, "\nmunmap() failed: [%d] %s\n", errno, strerror(errno));
475
0
#endif
476
0
  }
477
0
#endif
478
0
}
479
480
#ifndef HAVE_MREMAP
481
static void *zend_mm_mmap_fixed(void *addr, size_t size)
482
{
483
#ifdef _WIN32
484
  void *ptr = VirtualAlloc(addr, size, MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE);
485
486
  if (ptr == NULL) {
487
    /** ERROR_INVALID_ADDRESS is expected when fixed addr range is not free */
488
    if (GetLastError() != ERROR_INVALID_ADDRESS) {
489
#if ZEND_MM_ERROR
490
      stderr_last_error("VirtualAlloc() fixed failed");
491
#endif
492
    }
493
    SetLastError(0);
494
    return NULL;
495
  }
496
  ZEND_ASSERT(ptr == addr);
497
  return ptr;
498
#else
499
  int flags = MAP_PRIVATE | MAP_ANON;
500
#if defined(MAP_EXCL)
501
  flags |= MAP_FIXED | MAP_EXCL;
502
#elif defined(MAP_TRYFIXED)
503
  flags |= MAP_TRYFIXED;
504
#endif
505
  /* MAP_FIXED leads to discarding of the old mapping, so it can't be used. */
506
  void *ptr = mmap(addr, size, PROT_READ | PROT_WRITE, flags /*| MAP_POPULATE | MAP_HUGETLB*/, ZEND_MM_FD, 0);
507
508
  if (ptr == MAP_FAILED) {
509
#if ZEND_MM_ERROR && !defined(MAP_EXCL) && !defined(MAP_TRYFIXED)
510
    fprintf(stderr, "\nmmap() fixed failed: [%d] %s\n", errno, strerror(errno));
511
#endif
512
    return NULL;
513
  } else if (ptr != addr) {
514
    zend_mm_munmap(ptr, size);
515
    return NULL;
516
  }
517
  return ptr;
518
#endif
519
}
520
#endif
521
522
static void *zend_mm_mmap(size_t size)
523
0
{
524
#ifdef _WIN32
525
  void *ptr = VirtualAlloc(NULL, size, MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE);
526
527
  if (ptr == NULL) {
528
#if ZEND_MM_ERROR
529
    stderr_last_error("VirtualAlloc() failed");
530
#endif
531
    return NULL;
532
  }
533
  return ptr;
534
#else
535
0
  void *ptr;
536
537
0
#if defined(MAP_HUGETLB) || defined(VM_FLAGS_SUPERPAGE_SIZE_2MB)
538
0
  if (zend_mm_use_huge_pages && size == ZEND_MM_CHUNK_SIZE) {
539
0
    int fd = -1;
540
0
    int mflags = MAP_PRIVATE | MAP_ANON;
541
0
#if defined(MAP_HUGETLB)
542
0
    mflags |= MAP_HUGETLB;
543
#else
544
    fd = VM_FLAGS_SUPERPAGE_SIZE_2MB;
545
#endif
546
0
    ptr = mmap(NULL, size, PROT_READ | PROT_WRITE, mflags, fd, 0);
547
0
    if (ptr != MAP_FAILED) {
548
0
      zend_mmap_set_name(ptr, size, "zend_alloc");
549
0
      return ptr;
550
0
    }
551
0
  }
552
0
#endif
553
554
0
  ptr = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, ZEND_MM_FD, 0);
555
556
0
  if (ptr == MAP_FAILED) {
557
0
#if ZEND_MM_ERROR
558
0
    fprintf(stderr, "\nmmap() failed: [%d] %s\n", errno, strerror(errno));
559
0
#endif
560
0
    return NULL;
561
0
  }
562
0
  zend_mmap_set_name(ptr, size, "zend_alloc");
563
0
  return ptr;
564
0
#endif
565
0
}
566
567
/***********/
568
/* Bitmask */
569
/***********/
570
571
/* number of trailing set (1) bits */
572
ZEND_ATTRIBUTE_CONST static zend_always_inline int zend_mm_bitset_nts(zend_mm_bitset bitset)
573
0
{
574
0
#if (defined(__GNUC__) || __has_builtin(__builtin_ctzl)) && SIZEOF_ZEND_LONG == SIZEOF_LONG && defined(PHP_HAVE_BUILTIN_CTZL)
575
0
  return __builtin_ctzl(~bitset);
576
#elif (defined(__GNUC__) || __has_builtin(__builtin_ctzll)) && defined(PHP_HAVE_BUILTIN_CTZLL)
577
  return __builtin_ctzll(~bitset);
578
#elif defined(_WIN32)
579
  unsigned long index;
580
581
#if defined(_WIN64)
582
  if (!BitScanForward64(&index, ~bitset)) {
583
#else
584
  if (!BitScanForward(&index, ~bitset)) {
585
#endif
586
    /* undefined behavior */
587
    return 32;
588
  }
589
590
  return (int)index;
591
#else
592
  int n;
593
594
  if (bitset == (zend_mm_bitset)-1) return ZEND_MM_BITSET_LEN;
595
596
  n = 0;
597
#if SIZEOF_ZEND_LONG == 8
598
  if (sizeof(zend_mm_bitset) == 8) {
599
    if ((bitset & 0xffffffff) == 0xffffffff) {n += 32; bitset = bitset >> Z_UL(32);}
600
  }
601
#endif
602
  if ((bitset & 0x0000ffff) == 0x0000ffff) {n += 16; bitset = bitset >> 16;}
603
  if ((bitset & 0x000000ff) == 0x000000ff) {n +=  8; bitset = bitset >>  8;}
604
  if ((bitset & 0x0000000f) == 0x0000000f) {n +=  4; bitset = bitset >>  4;}
605
  if ((bitset & 0x00000003) == 0x00000003) {n +=  2; bitset = bitset >>  2;}
606
  return n + (bitset & 1);
607
#endif
608
0
}
609
610
static zend_always_inline int zend_mm_bitset_is_set(zend_mm_bitset *bitset, int bit)
611
0
{
612
0
  return ZEND_BIT_TEST(bitset, bit);
613
0
}
614
615
static zend_always_inline void zend_mm_bitset_set_bit(zend_mm_bitset *bitset, int bit)
616
0
{
617
0
  bitset[bit / ZEND_MM_BITSET_LEN] |= (Z_UL(1) << (bit & (ZEND_MM_BITSET_LEN-1)));
618
0
}
619
620
static zend_always_inline void zend_mm_bitset_reset_bit(zend_mm_bitset *bitset, int bit)
621
0
{
622
0
  bitset[bit / ZEND_MM_BITSET_LEN] &= ~(Z_UL(1) << (bit & (ZEND_MM_BITSET_LEN-1)));
623
0
}
624
625
static zend_always_inline void zend_mm_bitset_set_range(zend_mm_bitset *bitset, int start, int len)
626
0
{
627
0
  if (len == 1) {
628
0
    zend_mm_bitset_set_bit(bitset, start);
629
0
  } else {
630
0
    int pos = start / ZEND_MM_BITSET_LEN;
631
0
    int end = (start + len - 1) / ZEND_MM_BITSET_LEN;
632
0
    int bit = start & (ZEND_MM_BITSET_LEN - 1);
633
0
    zend_mm_bitset tmp;
634
635
0
    if (pos != end) {
636
      /* set bits from "bit" to ZEND_MM_BITSET_LEN-1 */
637
0
      tmp = (zend_mm_bitset)-1 << bit;
638
0
      bitset[pos++] |= tmp;
639
0
      while (pos != end) {
640
        /* set all bits */
641
0
        bitset[pos++] = (zend_mm_bitset)-1;
642
0
      }
643
0
      end = (start + len - 1) & (ZEND_MM_BITSET_LEN - 1);
644
      /* set bits from "0" to "end" */
645
0
      tmp = (zend_mm_bitset)-1 >> ((ZEND_MM_BITSET_LEN - 1) - end);
646
0
      bitset[pos] |= tmp;
647
0
    } else {
648
0
      end = (start + len - 1) & (ZEND_MM_BITSET_LEN - 1);
649
      /* set bits from "bit" to "end" */
650
0
      tmp = (zend_mm_bitset)-1 << bit;
651
0
      tmp &= (zend_mm_bitset)-1 >> ((ZEND_MM_BITSET_LEN - 1) - end);
652
0
      bitset[pos] |= tmp;
653
0
    }
654
0
  }
655
0
}
656
657
static zend_always_inline void zend_mm_bitset_reset_range(zend_mm_bitset *bitset, int start, int len)
658
0
{
659
0
  if (len == 1) {
660
0
    zend_mm_bitset_reset_bit(bitset, start);
661
0
  } else {
662
0
    int pos = start / ZEND_MM_BITSET_LEN;
663
0
    int end = (start + len - 1) / ZEND_MM_BITSET_LEN;
664
0
    int bit = start & (ZEND_MM_BITSET_LEN - 1);
665
0
    zend_mm_bitset tmp;
666
667
0
    if (pos != end) {
668
      /* reset bits from "bit" to ZEND_MM_BITSET_LEN-1 */
669
0
      tmp = ~((Z_UL(1) << bit) - 1);
670
0
      bitset[pos++] &= ~tmp;
671
0
      while (pos != end) {
672
        /* set all bits */
673
0
        bitset[pos++] = 0;
674
0
      }
675
0
      end = (start + len - 1) & (ZEND_MM_BITSET_LEN - 1);
676
      /* reset bits from "0" to "end" */
677
0
      tmp = (zend_mm_bitset)-1 >> ((ZEND_MM_BITSET_LEN - 1) - end);
678
0
      bitset[pos] &= ~tmp;
679
0
    } else {
680
0
      end = (start + len - 1) & (ZEND_MM_BITSET_LEN - 1);
681
      /* reset bits from "bit" to "end" */
682
0
      tmp = (zend_mm_bitset)-1 << bit;
683
0
      tmp &= (zend_mm_bitset)-1 >> ((ZEND_MM_BITSET_LEN - 1) - end);
684
0
      bitset[pos] &= ~tmp;
685
0
    }
686
0
  }
687
0
}
688
689
static zend_always_inline int zend_mm_bitset_is_free_range(zend_mm_bitset *bitset, int start, int len)
690
0
{
691
0
  if (len == 1) {
692
0
    return !zend_mm_bitset_is_set(bitset, start);
693
0
  } else {
694
0
    int pos = start / ZEND_MM_BITSET_LEN;
695
0
    int end = (start + len - 1) / ZEND_MM_BITSET_LEN;
696
0
    int bit = start & (ZEND_MM_BITSET_LEN - 1);
697
0
    zend_mm_bitset tmp;
698
699
0
    if (pos != end) {
700
      /* set bits from "bit" to ZEND_MM_BITSET_LEN-1 */
701
0
      tmp = (zend_mm_bitset)-1 << bit;
702
0
      if ((bitset[pos++] & tmp) != 0) {
703
0
        return 0;
704
0
      }
705
0
      while (pos != end) {
706
        /* set all bits */
707
0
        if (bitset[pos++] != 0) {
708
0
          return 0;
709
0
        }
710
0
      }
711
0
      end = (start + len - 1) & (ZEND_MM_BITSET_LEN - 1);
712
      /* set bits from "0" to "end" */
713
0
      tmp = (zend_mm_bitset)-1 >> ((ZEND_MM_BITSET_LEN - 1) - end);
714
0
      return (bitset[pos] & tmp) == 0;
715
0
    } else {
716
0
      end = (start + len - 1) & (ZEND_MM_BITSET_LEN - 1);
717
      /* set bits from "bit" to "end" */
718
0
      tmp = (zend_mm_bitset)-1 << bit;
719
0
      tmp &= (zend_mm_bitset)-1 >> ((ZEND_MM_BITSET_LEN - 1) - end);
720
0
      return (bitset[pos] & tmp) == 0;
721
0
    }
722
0
  }
723
0
}
724
725
/**********/
726
/* Chunks */
727
/**********/
728
729
static zend_always_inline void zend_mm_hugepage(void* ptr, size_t size)
730
0
{
731
0
#if defined(MADV_HUGEPAGE)
732
0
  (void)madvise(ptr, size, MADV_HUGEPAGE);
733
#elif defined(HAVE_MEMCNTL)
734
  struct memcntl_mha m = {.mha_cmd = MHA_MAPSIZE_VA, .mha_pagesize = ZEND_MM_CHUNK_SIZE, .mha_flags = 0};
735
  (void)memcntl(ptr, size, MC_HAT_ADVISE, (char *)&m, 0, 0);
736
#elif !defined(VM_FLAGS_SUPERPAGE_SIZE_2MB) && !defined(MAP_ALIGNED_SUPER)
737
  zend_error_noreturn(E_ERROR, "huge_pages: thp unsupported on this platform");
738
#endif
739
0
}
740
741
static void *zend_mm_chunk_alloc_int(size_t size, size_t alignment)
742
0
{
743
0
  void *ptr = zend_mm_mmap(size);
744
745
0
  if (ptr == NULL) {
746
0
    return NULL;
747
0
  } else if (ZEND_MM_ALIGNED_OFFSET(ptr, alignment) == 0) {
748
0
    if (zend_mm_use_huge_pages) {
749
0
      zend_mm_hugepage(ptr, size);
750
0
    }
751
#ifdef __SANITIZE_ADDRESS__
752
    ASAN_UNPOISON_MEMORY_REGION(ptr, size);
753
#endif
754
0
    return ptr;
755
0
  } else {
756
0
    size_t offset;
757
758
    /* chunk has to be aligned */
759
0
    zend_mm_munmap(ptr, size);
760
0
    ptr = zend_mm_mmap(size + alignment - REAL_PAGE_SIZE);
761
#ifdef _WIN32
762
    offset = ZEND_MM_ALIGNED_OFFSET(ptr, alignment);
763
    if (offset != 0) {
764
      offset = alignment - offset;
765
    }
766
    zend_mm_munmap(ptr, size + alignment - REAL_PAGE_SIZE);
767
    ptr = zend_mm_mmap_fixed((void*)((char*)ptr + offset), size);
768
    if (ptr == NULL) { // fix GH-9650, fixed addr range is not free
769
      ptr = zend_mm_mmap(size + alignment - REAL_PAGE_SIZE);
770
      if (ptr == NULL) {
771
        return NULL;
772
      }
773
      offset = ZEND_MM_ALIGNED_OFFSET(ptr, alignment);
774
      if (offset != 0) {
775
        ptr = (void*)((char*)ptr + alignment - offset);
776
      }
777
    }
778
    return ptr;
779
#else
780
0
    offset = ZEND_MM_ALIGNED_OFFSET(ptr, alignment);
781
0
    if (offset != 0) {
782
0
      offset = alignment - offset;
783
0
      zend_mm_munmap(ptr, offset);
784
0
      ptr = (char*)ptr + offset;
785
0
      alignment -= offset;
786
0
    }
787
0
    if (alignment > REAL_PAGE_SIZE) {
788
0
      zend_mm_munmap((char*)ptr + size, alignment - REAL_PAGE_SIZE);
789
0
    }
790
0
    if (zend_mm_use_huge_pages) {
791
0
      zend_mm_hugepage(ptr, size);
792
0
    }
793
# ifdef __SANITIZE_ADDRESS__
794
    ASAN_UNPOISON_MEMORY_REGION(ptr, size);
795
# endif
796
0
#endif
797
0
    return ptr;
798
0
  }
799
0
}
800
801
static void *zend_mm_chunk_alloc(zend_mm_heap *heap, size_t size, size_t alignment)
802
0
{
803
0
#if ZEND_MM_STORAGE
804
0
  if (UNEXPECTED(heap->storage)) {
805
0
    void *ptr = heap->storage->handlers.chunk_alloc(heap->storage, size, alignment);
806
0
    ZEND_ASSERT(((uintptr_t)((char*)ptr + (alignment-1)) & (alignment-1)) == (uintptr_t)ptr);
807
0
    return ptr;
808
0
  }
809
0
#endif
810
0
  return zend_mm_chunk_alloc_int(size, alignment);
811
0
}
812
813
static void zend_mm_chunk_free(zend_mm_heap *heap, void *addr, size_t size)
814
0
{
815
0
#if ZEND_MM_STORAGE
816
0
  if (UNEXPECTED(heap->storage)) {
817
0
    heap->storage->handlers.chunk_free(heap->storage, addr, size);
818
0
    return;
819
0
  }
820
0
#endif
821
0
  zend_mm_munmap(addr, size);
822
0
}
823
824
static int zend_mm_chunk_truncate(zend_mm_heap *heap, void *addr, size_t old_size, size_t new_size)
825
0
{
826
0
#if ZEND_MM_STORAGE
827
0
  if (UNEXPECTED(heap->storage)) {
828
0
    if (heap->storage->handlers.chunk_truncate) {
829
0
      return heap->storage->handlers.chunk_truncate(heap->storage, addr, old_size, new_size);
830
0
    } else {
831
0
      return 0;
832
0
    }
833
0
  }
834
0
#endif
835
0
#ifndef _WIN32
836
0
  zend_mm_munmap((char*)addr + new_size, old_size - new_size);
837
0
  return 1;
838
#else
839
  return 0;
840
#endif
841
0
}
842
843
static int zend_mm_chunk_extend(zend_mm_heap *heap, void *addr, size_t old_size, size_t new_size)
844
0
{
845
0
#if ZEND_MM_STORAGE
846
0
  if (UNEXPECTED(heap->storage)) {
847
0
    if (heap->storage->handlers.chunk_extend) {
848
0
      return heap->storage->handlers.chunk_extend(heap->storage, addr, old_size, new_size);
849
0
    } else {
850
0
      return 0;
851
0
    }
852
0
  }
853
0
#endif
854
0
#ifdef HAVE_MREMAP
855
  /* We don't use MREMAP_MAYMOVE due to alignment requirements. */
856
0
  void *ptr = mremap(addr, old_size, new_size, 0);
857
0
  if (ptr == MAP_FAILED) {
858
0
    return 0;
859
0
  }
860
  /* Sanity check: The mapping shouldn't have moved. */
861
0
  ZEND_ASSERT(ptr == addr);
862
0
  return 1;
863
#elif !defined(_WIN32)
864
  return (zend_mm_mmap_fixed((char*)addr + old_size, new_size - old_size) != NULL);
865
#else
866
  return 0;
867
#endif
868
0
}
869
870
static zend_always_inline void zend_mm_chunk_init(zend_mm_heap *heap, zend_mm_chunk *chunk)
871
0
{
872
0
  chunk->heap = heap;
873
0
  chunk->next = heap->main_chunk;
874
0
  chunk->prev = heap->main_chunk->prev;
875
0
  chunk->prev->next = chunk;
876
0
  chunk->next->prev = chunk;
877
  /* mark first pages as allocated */
878
0
  chunk->free_pages = ZEND_MM_PAGES - ZEND_MM_FIRST_PAGE;
879
0
  chunk->free_tail = ZEND_MM_FIRST_PAGE;
880
  /* the younger chunks have bigger number */
881
0
  chunk->num = chunk->prev->num + 1;
882
  /* mark first pages as allocated */
883
0
  chunk->free_map[0] = (1L << ZEND_MM_FIRST_PAGE) - 1;
884
0
  chunk->map[0] = ZEND_MM_LRUN(ZEND_MM_FIRST_PAGE);
885
0
}
886
887
/***********************/
888
/* Huge Runs (forward) */
889
/***********************/
890
891
static size_t zend_mm_get_huge_block_size(zend_mm_heap *heap, void *ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC);
892
static void *zend_mm_alloc_huge(zend_mm_heap *heap, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC);
893
static void zend_mm_free_huge(zend_mm_heap *heap, void *ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC);
894
895
#if ZEND_DEBUG
896
static void zend_mm_change_huge_block_size(zend_mm_heap *heap, void *ptr, size_t size, size_t dbg_size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC);
897
#else
898
static void zend_mm_change_huge_block_size(zend_mm_heap *heap, void *ptr, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC);
899
#endif
900
901
/**************/
902
/* Large Runs */
903
/**************/
904
905
#if ZEND_DEBUG
906
static void *zend_mm_alloc_pages(zend_mm_heap *heap, uint32_t pages_count, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
907
#else
908
static void *zend_mm_alloc_pages(zend_mm_heap *heap, uint32_t pages_count ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
909
#endif
910
0
{
911
0
  zend_mm_chunk *chunk = heap->main_chunk;
912
0
  uint32_t page_num, len;
913
0
  int steps = 0;
914
915
0
  while (1) {
916
0
    if (UNEXPECTED(chunk->free_pages < pages_count)) {
917
0
      goto not_found;
918
#if 0
919
    } else if (UNEXPECTED(chunk->free_pages + chunk->free_tail == ZEND_MM_PAGES)) {
920
      if (UNEXPECTED(ZEND_MM_PAGES - chunk->free_tail < pages_count)) {
921
        goto not_found;
922
      } else {
923
        page_num = chunk->free_tail;
924
        goto found;
925
      }
926
    } else if (0) {
927
      /* First-Fit Search */
928
      int free_tail = chunk->free_tail;
929
      zend_mm_bitset *bitset = chunk->free_map;
930
      zend_mm_bitset tmp = *(bitset++);
931
      int i = 0;
932
933
      while (1) {
934
        /* skip allocated blocks */
935
        while (tmp == (zend_mm_bitset)-1) {
936
          i += ZEND_MM_BITSET_LEN;
937
          if (i == ZEND_MM_PAGES) {
938
            goto not_found;
939
          }
940
          tmp = *(bitset++);
941
        }
942
        /* find first 0 bit */
943
        page_num = i + zend_mm_bitset_nts(tmp);
944
        /* reset bits from 0 to "bit" */
945
        tmp &= tmp + 1;
946
        /* skip free blocks */
947
        while (tmp == 0) {
948
          i += ZEND_MM_BITSET_LEN;
949
          len = i - page_num;
950
          if (len >= pages_count) {
951
            goto found;
952
          } else if (i >= free_tail) {
953
            goto not_found;
954
          }
955
          tmp = *(bitset++);
956
        }
957
        /* find first 1 bit */
958
        len = (i + zend_ulong_ntz(tmp)) - page_num;
959
        if (len >= pages_count) {
960
          goto found;
961
        }
962
        /* set bits from 0 to "bit" */
963
        tmp |= tmp - 1;
964
      }
965
#endif
966
0
    } else {
967
      /* Best-Fit Search */
968
0
      int best = -1;
969
0
      uint32_t best_len = ZEND_MM_PAGES;
970
0
      uint32_t free_tail = chunk->free_tail;
971
0
      zend_mm_bitset *bitset = chunk->free_map;
972
0
      zend_mm_bitset tmp = *(bitset++);
973
0
      uint32_t i = 0;
974
975
0
      while (1) {
976
        /* skip allocated blocks */
977
0
        while (tmp == (zend_mm_bitset)-1) {
978
0
          i += ZEND_MM_BITSET_LEN;
979
0
          if (i == ZEND_MM_PAGES) {
980
0
            if (best > 0) {
981
0
              page_num = best;
982
0
              goto found;
983
0
            } else {
984
0
              goto not_found;
985
0
            }
986
0
          }
987
0
          tmp = *(bitset++);
988
0
        }
989
        /* find first 0 bit */
990
0
        page_num = i + zend_mm_bitset_nts(tmp);
991
        /* reset bits from 0 to "bit" */
992
0
        tmp &= tmp + 1;
993
        /* skip free blocks */
994
0
        while (tmp == 0) {
995
0
          i += ZEND_MM_BITSET_LEN;
996
0
          if (i >= free_tail || i == ZEND_MM_PAGES) {
997
0
            len = ZEND_MM_PAGES - page_num;
998
0
            if (len >= pages_count && len < best_len) {
999
0
              chunk->free_tail = page_num + pages_count;
1000
0
              goto found;
1001
0
            } else {
1002
              /* set accurate value */
1003
0
              chunk->free_tail = page_num;
1004
0
              if (best > 0) {
1005
0
                page_num = best;
1006
0
                goto found;
1007
0
              } else {
1008
0
                goto not_found;
1009
0
              }
1010
0
            }
1011
0
          }
1012
0
          tmp = *(bitset++);
1013
0
        }
1014
        /* find first 1 bit */
1015
0
        len = i + zend_ulong_ntz(tmp) - page_num;
1016
0
        if (len >= pages_count) {
1017
0
          if (len == pages_count) {
1018
0
            goto found;
1019
0
          } else if (len < best_len) {
1020
0
            best_len = len;
1021
0
            best = page_num;
1022
0
          }
1023
0
        }
1024
        /* set bits from 0 to "bit" */
1025
0
        tmp |= tmp - 1;
1026
0
      }
1027
0
    }
1028
1029
0
not_found:
1030
0
    if (chunk->next == heap->main_chunk) {
1031
0
get_chunk:
1032
0
      if (heap->cached_chunks) {
1033
0
        heap->cached_chunks_count--;
1034
0
        chunk = heap->cached_chunks;
1035
0
        heap->cached_chunks = chunk->next;
1036
0
      } else {
1037
0
#if ZEND_MM_LIMIT
1038
0
        if (UNEXPECTED(ZEND_MM_CHUNK_SIZE > heap->limit - heap->real_size)) {
1039
0
          if (zend_mm_gc(heap)) {
1040
0
            goto get_chunk;
1041
0
          } else if (heap->overflow == 0) {
1042
0
#if ZEND_DEBUG
1043
0
            zend_mm_safe_error(heap, "Allowed memory size of %zu bytes exhausted at %s:%d (tried to allocate %zu bytes)", heap->limit, __zend_filename, __zend_lineno, size);
1044
#else
1045
            zend_mm_safe_error(heap, "Allowed memory size of %zu bytes exhausted (tried to allocate %zu bytes)", heap->limit, ZEND_MM_PAGE_SIZE * pages_count);
1046
#endif
1047
0
            return NULL;
1048
0
          }
1049
0
        }
1050
0
#endif
1051
0
        chunk = (zend_mm_chunk*)zend_mm_chunk_alloc(heap, ZEND_MM_CHUNK_SIZE, ZEND_MM_CHUNK_SIZE);
1052
0
        if (UNEXPECTED(chunk == NULL)) {
1053
          /* insufficient memory */
1054
0
          if (zend_mm_gc(heap) &&
1055
0
              (chunk = (zend_mm_chunk*)zend_mm_chunk_alloc(heap, ZEND_MM_CHUNK_SIZE, ZEND_MM_CHUNK_SIZE)) != NULL) {
1056
            /* pass */
1057
0
          } else {
1058
#if !ZEND_MM_LIMIT
1059
            zend_mm_safe_error(heap, "Out of memory");
1060
#elif ZEND_DEBUG
1061
            zend_mm_safe_error(heap, "Out of memory (allocated %zu bytes) at %s:%d (tried to allocate %zu bytes)", heap->real_size, __zend_filename, __zend_lineno, size);
1062
#else
1063
            zend_mm_safe_error(heap, "Out of memory (allocated %zu bytes) (tried to allocate %zu bytes)", heap->real_size, ZEND_MM_PAGE_SIZE * pages_count);
1064
#endif
1065
0
            return NULL;
1066
0
          }
1067
0
        }
1068
0
#if ZEND_MM_STAT
1069
0
        do {
1070
0
          size_t size = heap->real_size + ZEND_MM_CHUNK_SIZE;
1071
0
          size_t peak = MAX(heap->real_peak, size);
1072
0
          heap->real_size = size;
1073
0
          heap->real_peak = peak;
1074
0
        } while (0);
1075
#elif ZEND_MM_LIMIT
1076
        heap->real_size += ZEND_MM_CHUNK_SIZE;
1077
1078
#endif
1079
0
      }
1080
0
      heap->chunks_count++;
1081
0
      if (heap->chunks_count > heap->peak_chunks_count) {
1082
0
        heap->peak_chunks_count = heap->chunks_count;
1083
0
      }
1084
0
      zend_mm_chunk_init(heap, chunk);
1085
0
      page_num = ZEND_MM_FIRST_PAGE;
1086
0
      len = ZEND_MM_PAGES - ZEND_MM_FIRST_PAGE;
1087
0
      goto found;
1088
0
    } else {
1089
0
      chunk = chunk->next;
1090
0
      steps++;
1091
0
    }
1092
0
  }
1093
1094
0
found:
1095
0
  if (steps > 2 && pages_count < 8) {
1096
0
    ZEND_MM_CHECK(chunk->next->prev == chunk, "zend_mm_heap corrupted");
1097
0
    ZEND_MM_CHECK(chunk->prev->next == chunk, "zend_mm_heap corrupted");
1098
1099
    /* move chunk into the head of the linked-list */
1100
0
    chunk->prev->next = chunk->next;
1101
0
    chunk->next->prev = chunk->prev;
1102
0
    chunk->next = heap->main_chunk->next;
1103
0
    chunk->prev = heap->main_chunk;
1104
0
    chunk->prev->next = chunk;
1105
0
    chunk->next->prev = chunk;
1106
0
  }
1107
  /* mark run as allocated */
1108
0
  chunk->free_pages -= pages_count;
1109
0
  zend_mm_bitset_set_range(chunk->free_map, page_num, pages_count);
1110
0
  chunk->map[page_num] = ZEND_MM_LRUN(pages_count);
1111
0
  if (page_num == chunk->free_tail) {
1112
0
    chunk->free_tail = page_num + pages_count;
1113
0
  }
1114
0
  return ZEND_MM_PAGE_ADDR(chunk, page_num);
1115
0
}
1116
1117
static zend_always_inline void *zend_mm_alloc_large_ex(zend_mm_heap *heap, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
1118
0
{
1119
0
  int pages_count = (int)ZEND_MM_SIZE_TO_NUM(size, ZEND_MM_PAGE_SIZE);
1120
0
#if ZEND_DEBUG
1121
0
  void *ptr = zend_mm_alloc_pages(heap, pages_count, size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1122
#else
1123
  void *ptr = zend_mm_alloc_pages(heap, pages_count ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1124
#endif
1125
0
#if ZEND_MM_STAT
1126
0
  do {
1127
0
    size_t size = heap->size + pages_count * ZEND_MM_PAGE_SIZE;
1128
0
    size_t peak = MAX(heap->peak, size);
1129
0
    heap->size = size;
1130
0
    heap->peak = peak;
1131
0
  } while (0);
1132
0
#endif
1133
0
  return ptr;
1134
0
}
1135
1136
static zend_never_inline void *zend_mm_alloc_large(zend_mm_heap *heap, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
1137
0
{
1138
0
  return zend_mm_alloc_large_ex(heap, size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1139
0
}
1140
1141
static zend_always_inline void zend_mm_delete_chunk(zend_mm_heap *heap, zend_mm_chunk *chunk)
1142
0
{
1143
0
  ZEND_MM_CHECK(chunk->next->prev == chunk, "zend_mm_heap corrupted");
1144
0
  ZEND_MM_CHECK(chunk->prev->next == chunk, "zend_mm_heap corrupted");
1145
1146
0
  chunk->next->prev = chunk->prev;
1147
0
  chunk->prev->next = chunk->next;
1148
0
  heap->chunks_count--;
1149
0
  if (heap->chunks_count + heap->cached_chunks_count < heap->avg_chunks_count + 0.1
1150
0
   || (heap->chunks_count == heap->last_chunks_delete_boundary
1151
0
    && heap->last_chunks_delete_count >= 4)) {
1152
    /* delay deletion */
1153
0
    heap->cached_chunks_count++;
1154
0
    chunk->next = heap->cached_chunks;
1155
0
    heap->cached_chunks = chunk;
1156
0
  } else {
1157
0
#if ZEND_MM_STAT || ZEND_MM_LIMIT
1158
0
    heap->real_size -= ZEND_MM_CHUNK_SIZE;
1159
0
#endif
1160
0
    if (!heap->cached_chunks) {
1161
0
      if (heap->chunks_count != heap->last_chunks_delete_boundary) {
1162
0
        heap->last_chunks_delete_boundary = heap->chunks_count;
1163
0
        heap->last_chunks_delete_count = 0;
1164
0
      } else {
1165
0
        heap->last_chunks_delete_count++;
1166
0
      }
1167
0
    }
1168
0
    if (!heap->cached_chunks || chunk->num > heap->cached_chunks->num) {
1169
0
      zend_mm_chunk_free(heap, chunk, ZEND_MM_CHUNK_SIZE);
1170
0
    } else {
1171
//TODO: select the best chunk to delete???
1172
0
      chunk->next = heap->cached_chunks->next;
1173
0
      zend_mm_chunk_free(heap, heap->cached_chunks, ZEND_MM_CHUNK_SIZE);
1174
0
      heap->cached_chunks = chunk;
1175
0
    }
1176
0
  }
1177
0
}
1178
1179
static zend_always_inline void zend_mm_free_pages_ex(zend_mm_heap *heap, zend_mm_chunk *chunk, uint32_t page_num, uint32_t pages_count, int free_chunk)
1180
0
{
1181
0
  chunk->free_pages += pages_count;
1182
0
  zend_mm_bitset_reset_range(chunk->free_map, page_num, pages_count);
1183
0
  chunk->map[page_num] = 0;
1184
0
  if (chunk->free_tail == page_num + pages_count) {
1185
    /* this setting may be not accurate */
1186
0
    chunk->free_tail = page_num;
1187
0
  }
1188
0
  if (free_chunk && chunk != heap->main_chunk && chunk->free_pages == ZEND_MM_PAGES - ZEND_MM_FIRST_PAGE) {
1189
0
    zend_mm_delete_chunk(heap, chunk);
1190
0
  }
1191
0
}
1192
1193
static zend_never_inline void zend_mm_free_pages(zend_mm_heap *heap, zend_mm_chunk *chunk, int page_num, int pages_count)
1194
0
{
1195
0
  zend_mm_free_pages_ex(heap, chunk, page_num, pages_count, 1);
1196
0
}
1197
1198
static zend_always_inline void zend_mm_free_large(zend_mm_heap *heap, zend_mm_chunk *chunk, int page_num, int pages_count)
1199
0
{
1200
0
#if ZEND_MM_STAT
1201
0
  heap->size -= pages_count * ZEND_MM_PAGE_SIZE;
1202
0
#endif
1203
0
  zend_mm_free_pages(heap, chunk, page_num, pages_count);
1204
0
}
1205
1206
/**************/
1207
/* Small Runs */
1208
/**************/
1209
1210
/* higher set bit number (0->N/A, 1->1, 2->2, 4->3, 8->4, 127->7, 128->8 etc) */
1211
static zend_always_inline int zend_mm_small_size_to_bit(int size)
1212
0
{
1213
0
#if (defined(__GNUC__) || __has_builtin(__builtin_clz))  && defined(PHP_HAVE_BUILTIN_CLZ)
1214
0
  return (__builtin_clz(size) ^ 0x1f) + 1;
1215
#elif defined(_WIN32)
1216
  unsigned long index;
1217
1218
  if (!BitScanReverse(&index, (unsigned long)size)) {
1219
    /* undefined behavior */
1220
    return 64;
1221
  }
1222
1223
  return (((31 - (int)index) ^ 0x1f) + 1);
1224
#else
1225
  int n = 16;
1226
  if (size <= 0x00ff) {n -= 8; size = size << 8;}
1227
  if (size <= 0x0fff) {n -= 4; size = size << 4;}
1228
  if (size <= 0x3fff) {n -= 2; size = size << 2;}
1229
  if (size <= 0x7fff) {n -= 1;}
1230
  return n;
1231
#endif
1232
0
}
1233
1234
#ifndef MAX
1235
# define MAX(a, b) (((a) > (b)) ? (a) : (b))
1236
#endif
1237
1238
#ifndef MIN
1239
# define MIN(a, b) (((a) < (b)) ? (a) : (b))
1240
#endif
1241
1242
static zend_always_inline int zend_mm_small_size_to_bin(size_t size)
1243
0
{
1244
#if 0
1245
  int n;
1246
                            /*0,  1,  2,  3,  4,  5,  6,  7,  8,  9  10, 11, 12*/
1247
  static const int f1[] = { 3,  3,  3,  3,  3,  3,  3,  4,  5,  6,  7,  8,  9};
1248
  static const int f2[] = { 0,  0,  0,  0,  0,  0,  0,  4,  8, 12, 16, 20, 24};
1249
1250
  if (UNEXPECTED(size <= 2)) return 0;
1251
  n = zend_mm_small_size_to_bit(size - 1);
1252
  return ((size-1) >> f1[n]) + f2[n];
1253
#else
1254
0
  unsigned int t1, t2;
1255
1256
0
  if (size <= 64) {
1257
    /* we need to support size == 0 ... */
1258
0
    return (size - !!size) >> 3;
1259
0
  } else {
1260
0
    t1 = size - 1;
1261
0
    t2 = zend_mm_small_size_to_bit(t1) - 3;
1262
0
    t1 = t1 >> t2;
1263
0
    t2 = t2 - 3;
1264
0
    t2 = t2 << 2;
1265
0
    return (int)(t1 + t2);
1266
0
  }
1267
0
#endif
1268
0
}
1269
1270
0
#define ZEND_MM_SMALL_SIZE_TO_BIN(size)  zend_mm_small_size_to_bin(size)
1271
1272
#if ZEND_MM_HEAP_PROTECTION
1273
/* We keep track of free slots by organizing them in a linked list, with the
1274
 * first word of every free slot being a pointer to the next one.
1275
 *
1276
 * In order to frustrate corruptions, we check the consistency of these pointers
1277
 * before dereference by comparing them with a shadow.
1278
 *
1279
 * The shadow is a copy of the pointer, stored at the end of the slot. It is
1280
 * XOR'ed with a random key, and converted to big-endian so that smaller
1281
 * corruptions affect the most significant bytes, which has a high chance of
1282
 * resulting in an invalid address instead of pointing to an adjacent slot.
1283
 */
1284
1285
#define ZEND_MM_FREE_SLOT_PTR_SHADOW(free_slot, bin_num) \
1286
0
  *((zend_mm_free_slot**)((char*)(free_slot) + bin_data_size[(bin_num)] - sizeof(zend_mm_free_slot*)))
1287
1288
static zend_always_inline zend_mm_free_slot* zend_mm_encode_free_slot(const zend_mm_heap *heap, const zend_mm_free_slot *slot)
1289
0
{
1290
#ifdef WORDS_BIGENDIAN
1291
  return (zend_mm_free_slot*)(((uintptr_t)slot) ^ heap->shadow_key);
1292
#else
1293
0
  return (zend_mm_free_slot*)(BSWAPPTR((uintptr_t)slot) ^ heap->shadow_key);
1294
0
#endif
1295
0
}
1296
1297
static zend_always_inline zend_mm_free_slot* zend_mm_decode_free_slot_key(uintptr_t shadow_key, zend_mm_free_slot *slot)
1298
0
{
1299
#ifdef WORDS_BIGENDIAN
1300
  return (zend_mm_free_slot*)((uintptr_t)slot ^ shadow_key);
1301
#else
1302
0
  return (zend_mm_free_slot*)(BSWAPPTR((uintptr_t)slot ^ shadow_key));
1303
0
#endif
1304
0
}
1305
1306
static zend_always_inline zend_mm_free_slot* zend_mm_decode_free_slot(zend_mm_heap *heap, zend_mm_free_slot *slot)
1307
0
{
1308
0
  return zend_mm_decode_free_slot_key(heap->shadow_key, slot);
1309
0
}
1310
1311
static zend_always_inline void zend_mm_set_next_free_slot(zend_mm_heap *heap, uint32_t bin_num, zend_mm_free_slot *slot, zend_mm_free_slot *next)
1312
0
{
1313
0
  ZEND_ASSERT(bin_data_size[bin_num] >= ZEND_MM_MIN_USEABLE_BIN_SIZE);
1314
1315
0
  slot->next_free_slot = next;
1316
0
  ZEND_MM_FREE_SLOT_PTR_SHADOW(slot, bin_num) = zend_mm_encode_free_slot(heap, next);
1317
0
}
1318
1319
static zend_always_inline zend_mm_free_slot *zend_mm_get_next_free_slot(zend_mm_heap *heap, uint32_t bin_num, zend_mm_free_slot* slot)
1320
0
{
1321
0
  zend_mm_free_slot *next = slot->next_free_slot;
1322
0
  if (EXPECTED(next != NULL)) {
1323
0
    zend_mm_free_slot *shadow = ZEND_MM_FREE_SLOT_PTR_SHADOW(slot, bin_num);
1324
0
    if (UNEXPECTED(next != zend_mm_decode_free_slot(heap, shadow))) {
1325
0
      zend_mm_panic("zend_mm_heap corrupted");
1326
0
    }
1327
0
  }
1328
0
  return (zend_mm_free_slot*)next;
1329
0
}
1330
1331
#else /* ZEND_MM_HEAP_PROTECTION */
1332
# define zend_mm_set_next_free_slot(heap, bin_num, slot, next) do { \
1333
    (slot)->next_free_slot = (next);                            \
1334
  } while (0)
1335
# define zend_mm_get_next_free_slot(heap, bin_num, slot) (slot)->next_free_slot
1336
#endif /* ZEND_MM_HEAP_PROTECTION */
1337
1338
static zend_never_inline void *zend_mm_alloc_small_slow(zend_mm_heap *heap, uint32_t bin_num ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
1339
0
{
1340
0
  zend_mm_chunk *chunk;
1341
0
  int page_num;
1342
0
  zend_mm_bin *bin;
1343
0
  zend_mm_free_slot *p, *end;
1344
1345
0
#if ZEND_DEBUG
1346
0
  bin = (zend_mm_bin*)zend_mm_alloc_pages(heap, bin_pages[bin_num], bin_data_size[bin_num] ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1347
#else
1348
  bin = (zend_mm_bin*)zend_mm_alloc_pages(heap, bin_pages[bin_num] ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1349
#endif
1350
0
  if (UNEXPECTED(bin == NULL)) {
1351
    /* insufficient memory */
1352
0
    return NULL;
1353
0
  }
1354
1355
0
  chunk = (zend_mm_chunk*)ZEND_MM_ALIGNED_BASE(bin, ZEND_MM_CHUNK_SIZE);
1356
0
  page_num = ZEND_MM_ALIGNED_OFFSET(bin, ZEND_MM_CHUNK_SIZE) / ZEND_MM_PAGE_SIZE;
1357
0
  chunk->map[page_num] = ZEND_MM_SRUN(bin_num);
1358
0
  if (bin_pages[bin_num] > 1) {
1359
0
    uint32_t i = 1;
1360
1361
0
    do {
1362
0
      chunk->map[page_num+i] = ZEND_MM_NRUN(bin_num, i);
1363
0
      i++;
1364
0
    } while (i < bin_pages[bin_num]);
1365
0
  }
1366
1367
  /* create a linked list of elements from 1 to last */
1368
0
  end = (zend_mm_free_slot*)((char*)bin + (bin_data_size[bin_num] * (bin_elements[bin_num] - 1)));
1369
0
  heap->free_slot[bin_num] = p = (zend_mm_free_slot*)((char*)bin + bin_data_size[bin_num]);
1370
0
  do {
1371
0
    zend_mm_set_next_free_slot(heap, bin_num, p, (zend_mm_free_slot*)((char*)p + bin_data_size[bin_num]));
1372
0
#if ZEND_DEBUG
1373
0
    do {
1374
0
      zend_mm_debug_info *dbg = (zend_mm_debug_info*)((char*)p + bin_data_size[bin_num] - ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info)));
1375
0
      dbg->size = 0;
1376
0
    } while (0);
1377
0
#endif
1378
0
    p = (zend_mm_free_slot*)((char*)p + bin_data_size[bin_num]);
1379
0
  } while (p != end);
1380
1381
  /* terminate list using NULL */
1382
0
  p->next_free_slot = NULL;
1383
0
#if ZEND_DEBUG
1384
0
    do {
1385
0
      zend_mm_debug_info *dbg = (zend_mm_debug_info*)((char*)p + bin_data_size[bin_num] - ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info)));
1386
0
      dbg->size = 0;
1387
0
    } while (0);
1388
0
#endif
1389
1390
  /* return first element */
1391
0
  return bin;
1392
0
}
1393
1394
static zend_always_inline void *zend_mm_alloc_small(zend_mm_heap *heap, int bin_num ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
1395
0
{
1396
0
  ZEND_ASSERT(bin_data_size[bin_num] >= ZEND_MM_MIN_USEABLE_BIN_SIZE);
1397
1398
0
#if ZEND_MM_STAT
1399
0
  do {
1400
0
    size_t size = heap->size + bin_data_size[bin_num];
1401
0
    size_t peak = MAX(heap->peak, size);
1402
0
    heap->size = size;
1403
0
    heap->peak = peak;
1404
0
  } while (0);
1405
0
#endif
1406
1407
0
  if (EXPECTED(heap->free_slot[bin_num] != NULL)) {
1408
0
    zend_mm_free_slot *p = heap->free_slot[bin_num];
1409
0
    heap->free_slot[bin_num] = zend_mm_get_next_free_slot(heap, bin_num, p);
1410
0
    return p;
1411
0
  } else {
1412
0
    return zend_mm_alloc_small_slow(heap, bin_num ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1413
0
  }
1414
0
}
1415
1416
static zend_always_inline void zend_mm_free_small(zend_mm_heap *heap, void *ptr, int bin_num)
1417
0
{
1418
0
  ZEND_ASSERT(bin_data_size[bin_num] >= ZEND_MM_MIN_USEABLE_BIN_SIZE);
1419
1420
0
  zend_mm_free_slot *p;
1421
1422
0
#if ZEND_MM_STAT
1423
0
  heap->size -= bin_data_size[bin_num];
1424
0
#endif
1425
1426
0
#if ZEND_DEBUG
1427
0
  do {
1428
0
    zend_mm_debug_info *dbg = (zend_mm_debug_info*)((char*)ptr + bin_data_size[bin_num] - ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info)));
1429
0
    dbg->size = 0;
1430
0
  } while (0);
1431
0
#endif
1432
1433
0
  p = (zend_mm_free_slot*)ptr;
1434
0
  zend_mm_set_next_free_slot(heap, bin_num, p, heap->free_slot[bin_num]);
1435
0
  heap->free_slot[bin_num] = p;
1436
0
}
1437
1438
/********/
1439
/* Heap */
1440
/********/
1441
1442
#if ZEND_DEBUG
1443
static zend_always_inline zend_mm_debug_info *zend_mm_get_debug_info(zend_mm_heap *heap, void *ptr)
1444
0
{
1445
0
  size_t page_offset = ZEND_MM_ALIGNED_OFFSET(ptr, ZEND_MM_CHUNK_SIZE);
1446
0
  zend_mm_chunk *chunk;
1447
0
  int page_num;
1448
0
  zend_mm_page_info info;
1449
1450
0
  ZEND_MM_CHECK(page_offset != 0, "zend_mm_heap corrupted");
1451
0
  chunk = (zend_mm_chunk*)ZEND_MM_ALIGNED_BASE(ptr, ZEND_MM_CHUNK_SIZE);
1452
0
  page_num = (int)(page_offset / ZEND_MM_PAGE_SIZE);
1453
0
  info = chunk->map[page_num];
1454
0
  ZEND_MM_CHECK(chunk->heap == heap, "zend_mm_heap corrupted");
1455
0
  if (EXPECTED(info & ZEND_MM_IS_SRUN)) {
1456
0
    int bin_num = ZEND_MM_SRUN_BIN_NUM(info);
1457
0
    return (zend_mm_debug_info*)((char*)ptr + bin_data_size[bin_num] - ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info)));
1458
0
  } else /* if (info & ZEND_MM_IS_LRUN) */ {
1459
0
    int pages_count = ZEND_MM_LRUN_PAGES(info);
1460
1461
0
    return (zend_mm_debug_info*)((char*)ptr + ZEND_MM_PAGE_SIZE * pages_count - ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info)));
1462
0
  }
1463
0
}
1464
#endif
1465
1466
static zend_always_inline void *zend_mm_alloc_heap(zend_mm_heap *heap, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
1467
0
{
1468
0
  void *ptr;
1469
0
#if ZEND_MM_HEAP_PROTECTION
1470
0
  if (size < ZEND_MM_MIN_USEABLE_BIN_SIZE) {
1471
0
    size = ZEND_MM_MIN_USEABLE_BIN_SIZE;
1472
0
  }
1473
0
#endif /* ZEND_MM_HEAP_PROTECTION */
1474
0
#if ZEND_DEBUG
1475
0
  size_t real_size = size;
1476
0
  zend_mm_debug_info *dbg;
1477
1478
  /* special handling for zero-size allocation */
1479
0
  size = MAX(size, 1);
1480
0
  size = ZEND_MM_ALIGNED_SIZE(size) + ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info));
1481
0
  if (UNEXPECTED(size < real_size)) {
1482
0
    zend_error_noreturn(E_ERROR, "Possible integer overflow in memory allocation (%zu + %zu)", ZEND_MM_ALIGNED_SIZE(real_size), ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info)));
1483
0
  }
1484
0
#endif
1485
0
  if (EXPECTED(size <= ZEND_MM_MAX_SMALL_SIZE)) {
1486
0
    ptr = zend_mm_alloc_small(heap, ZEND_MM_SMALL_SIZE_TO_BIN(size) ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1487
0
#if ZEND_DEBUG
1488
0
    dbg = zend_mm_get_debug_info(heap, ptr);
1489
0
    dbg->size = real_size;
1490
0
    dbg->filename = __zend_filename;
1491
0
    dbg->orig_filename = __zend_orig_filename;
1492
0
    dbg->lineno = __zend_lineno;
1493
0
    dbg->orig_lineno = __zend_orig_lineno;
1494
0
#endif
1495
0
    return ptr;
1496
0
  } else if (EXPECTED(size <= ZEND_MM_MAX_LARGE_SIZE)) {
1497
0
    ptr = zend_mm_alloc_large(heap, size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1498
0
#if ZEND_DEBUG
1499
0
    dbg = zend_mm_get_debug_info(heap, ptr);
1500
0
    dbg->size = real_size;
1501
0
    dbg->filename = __zend_filename;
1502
0
    dbg->orig_filename = __zend_orig_filename;
1503
0
    dbg->lineno = __zend_lineno;
1504
0
    dbg->orig_lineno = __zend_orig_lineno;
1505
0
#endif
1506
0
    return ptr;
1507
0
  } else {
1508
0
#if ZEND_DEBUG
1509
0
    size = real_size;
1510
0
#endif
1511
0
    return zend_mm_alloc_huge(heap, size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1512
0
  }
1513
0
}
1514
1515
static zend_always_inline void zend_mm_free_heap(zend_mm_heap *heap, void *ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
1516
0
{
1517
0
  size_t page_offset = ZEND_MM_ALIGNED_OFFSET(ptr, ZEND_MM_CHUNK_SIZE);
1518
1519
0
  if (UNEXPECTED(page_offset == 0)) {
1520
0
    if (ptr != NULL) {
1521
0
      zend_mm_free_huge(heap, ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1522
0
    }
1523
0
  } else {
1524
0
    zend_mm_chunk *chunk = (zend_mm_chunk*)ZEND_MM_ALIGNED_BASE(ptr, ZEND_MM_CHUNK_SIZE);
1525
0
    int page_num = (int)(page_offset / ZEND_MM_PAGE_SIZE);
1526
0
    zend_mm_page_info info = chunk->map[page_num];
1527
1528
0
    ZEND_MM_CHECK(chunk->heap == heap, "zend_mm_heap corrupted");
1529
0
    if (EXPECTED(info & ZEND_MM_IS_SRUN)) {
1530
0
      zend_mm_free_small(heap, ptr, ZEND_MM_SRUN_BIN_NUM(info));
1531
0
    } else /* if (info & ZEND_MM_IS_LRUN) */ {
1532
0
      int pages_count = ZEND_MM_LRUN_PAGES(info);
1533
1534
0
      ZEND_MM_CHECK(ZEND_MM_ALIGNED_OFFSET(page_offset, ZEND_MM_PAGE_SIZE) == 0, "zend_mm_heap corrupted");
1535
0
      zend_mm_free_large(heap, chunk, page_num, pages_count);
1536
0
    }
1537
0
  }
1538
0
}
1539
1540
static size_t zend_mm_size(zend_mm_heap *heap, void *ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
1541
0
{
1542
0
  size_t page_offset = ZEND_MM_ALIGNED_OFFSET(ptr, ZEND_MM_CHUNK_SIZE);
1543
1544
0
  if (UNEXPECTED(page_offset == 0)) {
1545
0
    return zend_mm_get_huge_block_size(heap, ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1546
0
  } else {
1547
0
    zend_mm_chunk *chunk;
1548
#if 0 && ZEND_DEBUG
1549
    zend_mm_debug_info *dbg = zend_mm_get_debug_info(heap, ptr);
1550
    return dbg->size;
1551
#else
1552
0
    int page_num;
1553
0
    zend_mm_page_info info;
1554
1555
0
    chunk = (zend_mm_chunk*)ZEND_MM_ALIGNED_BASE(ptr, ZEND_MM_CHUNK_SIZE);
1556
0
    page_num = (int)(page_offset / ZEND_MM_PAGE_SIZE);
1557
0
    info = chunk->map[page_num];
1558
0
    ZEND_MM_CHECK(chunk->heap == heap, "zend_mm_heap corrupted");
1559
0
    if (EXPECTED(info & ZEND_MM_IS_SRUN)) {
1560
0
      return bin_data_size[ZEND_MM_SRUN_BIN_NUM(info)];
1561
0
    } else /* if (info & ZEND_MM_IS_LARGE_RUN) */ {
1562
0
      return ZEND_MM_LRUN_PAGES(info) * ZEND_MM_PAGE_SIZE;
1563
0
    }
1564
0
#endif
1565
0
  }
1566
0
}
1567
1568
static zend_never_inline void *zend_mm_realloc_slow(zend_mm_heap *heap, void *ptr, size_t size, size_t copy_size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
1569
0
{
1570
0
  void *ret;
1571
1572
0
#if ZEND_MM_STAT
1573
0
  do {
1574
0
    size_t orig_peak = heap->peak;
1575
0
#endif
1576
0
    ret = zend_mm_alloc_heap(heap, size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1577
0
    memcpy(ret, ptr, copy_size);
1578
0
    zend_mm_free_heap(heap, ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1579
0
#if ZEND_MM_STAT
1580
0
    heap->peak = MAX(orig_peak, heap->size);
1581
0
  } while (0);
1582
0
#endif
1583
0
  return ret;
1584
0
}
1585
1586
static zend_never_inline void *zend_mm_realloc_huge(zend_mm_heap *heap, void *ptr, size_t size, size_t copy_size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
1587
0
{
1588
0
  size_t old_size;
1589
0
  size_t new_size;
1590
0
#if ZEND_DEBUG
1591
0
  size_t real_size;
1592
0
#endif
1593
1594
0
  old_size = zend_mm_get_huge_block_size(heap, ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1595
0
#if ZEND_DEBUG
1596
0
  real_size = size;
1597
0
  size = ZEND_MM_ALIGNED_SIZE(size) + ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info));
1598
0
#endif
1599
0
  if (size > ZEND_MM_MAX_LARGE_SIZE) {
1600
0
#if ZEND_DEBUG
1601
0
    size = real_size;
1602
0
#endif
1603
#ifdef ZEND_WIN32
1604
    /* On Windows we don't have ability to extend huge blocks in-place.
1605
     * We allocate them with 2MB size granularity, to avoid many
1606
     * reallocations when they are extended by small pieces
1607
     */
1608
    new_size = ZEND_MM_ALIGNED_SIZE_EX(size, MAX(REAL_PAGE_SIZE, ZEND_MM_CHUNK_SIZE));
1609
#else
1610
0
    new_size = ZEND_MM_ALIGNED_SIZE_EX(size, REAL_PAGE_SIZE);
1611
0
#endif
1612
0
    if (new_size == old_size) {
1613
0
#if ZEND_DEBUG
1614
0
      zend_mm_change_huge_block_size(heap, ptr, new_size, real_size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1615
#else
1616
      zend_mm_change_huge_block_size(heap, ptr, new_size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1617
#endif
1618
0
      return ptr;
1619
0
    } else if (new_size < old_size) {
1620
      /* unmup tail */
1621
0
      if (zend_mm_chunk_truncate(heap, ptr, old_size, new_size)) {
1622
0
#if ZEND_MM_STAT || ZEND_MM_LIMIT
1623
0
        heap->real_size -= old_size - new_size;
1624
0
#endif
1625
0
#if ZEND_MM_STAT
1626
0
        heap->size -= old_size - new_size;
1627
0
#endif
1628
0
#if ZEND_DEBUG
1629
0
        zend_mm_change_huge_block_size(heap, ptr, new_size, real_size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1630
#else
1631
        zend_mm_change_huge_block_size(heap, ptr, new_size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1632
#endif
1633
0
        return ptr;
1634
0
      }
1635
0
    } else /* if (new_size > old_size) */ {
1636
0
#if ZEND_MM_LIMIT
1637
0
      if (UNEXPECTED(new_size - old_size > heap->limit - heap->real_size)) {
1638
0
        if (zend_mm_gc(heap) && new_size - old_size <= heap->limit - heap->real_size) {
1639
          /* pass */
1640
0
        } else if (heap->overflow == 0) {
1641
0
#if ZEND_DEBUG
1642
0
          zend_mm_safe_error(heap, "Allowed memory size of %zu bytes exhausted at %s:%d (tried to allocate %zu bytes)", heap->limit, __zend_filename, __zend_lineno, size);
1643
#else
1644
          zend_mm_safe_error(heap, "Allowed memory size of %zu bytes exhausted (tried to allocate %zu bytes)", heap->limit, size);
1645
#endif
1646
0
          return NULL;
1647
0
        }
1648
0
      }
1649
0
#endif
1650
      /* try to map tail right after this block */
1651
0
      if (zend_mm_chunk_extend(heap, ptr, old_size, new_size)) {
1652
0
#if ZEND_MM_STAT || ZEND_MM_LIMIT
1653
0
        heap->real_size += new_size - old_size;
1654
0
#endif
1655
0
#if ZEND_MM_STAT
1656
0
        heap->real_peak = MAX(heap->real_peak, heap->real_size);
1657
0
        heap->size += new_size - old_size;
1658
0
        heap->peak = MAX(heap->peak, heap->size);
1659
0
#endif
1660
0
#if ZEND_DEBUG
1661
0
        zend_mm_change_huge_block_size(heap, ptr, new_size, real_size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1662
#else
1663
        zend_mm_change_huge_block_size(heap, ptr, new_size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1664
#endif
1665
0
        return ptr;
1666
0
      }
1667
0
    }
1668
0
  }
1669
1670
0
  return zend_mm_realloc_slow(heap, ptr, size, MIN(old_size, copy_size) ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1671
0
}
1672
1673
static zend_always_inline void *zend_mm_realloc_heap(zend_mm_heap *heap, void *ptr, size_t size, bool use_copy_size, size_t copy_size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
1674
0
{
1675
0
  size_t page_offset;
1676
0
  size_t old_size;
1677
0
  size_t new_size;
1678
0
  void *ret;
1679
0
#if ZEND_DEBUG
1680
0
  zend_mm_debug_info *dbg;
1681
0
#endif
1682
1683
0
  page_offset = ZEND_MM_ALIGNED_OFFSET(ptr, ZEND_MM_CHUNK_SIZE);
1684
0
  if (UNEXPECTED(page_offset == 0)) {
1685
0
    if (EXPECTED(ptr == NULL)) {
1686
0
      return _zend_mm_alloc(heap, size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1687
0
    } else {
1688
0
      return zend_mm_realloc_huge(heap, ptr, size, copy_size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1689
0
    }
1690
0
  } else {
1691
0
    zend_mm_chunk *chunk = (zend_mm_chunk*)ZEND_MM_ALIGNED_BASE(ptr, ZEND_MM_CHUNK_SIZE);
1692
0
    int page_num = (int)(page_offset / ZEND_MM_PAGE_SIZE);
1693
0
    zend_mm_page_info info = chunk->map[page_num];
1694
0
#if ZEND_MM_HEAP_PROTECTION
1695
0
    if (size < ZEND_MM_MIN_USEABLE_BIN_SIZE) {
1696
0
      size = ZEND_MM_MIN_USEABLE_BIN_SIZE;
1697
0
    }
1698
0
#endif /* ZEND_MM_HEAP_PROTECTION */
1699
0
#if ZEND_DEBUG
1700
0
    size_t real_size = size;
1701
1702
0
    size = ZEND_MM_ALIGNED_SIZE(size) + ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info));
1703
0
#endif
1704
1705
0
    ZEND_MM_CHECK(chunk->heap == heap, "zend_mm_heap corrupted");
1706
0
    if (info & ZEND_MM_IS_SRUN) {
1707
0
      int old_bin_num = ZEND_MM_SRUN_BIN_NUM(info);
1708
1709
0
      do {
1710
0
        old_size = bin_data_size[old_bin_num];
1711
1712
        /* Check if requested size fits into current bin */
1713
0
        if (size <= old_size) {
1714
          /* Check if truncation is necessary */
1715
0
          if (old_bin_num > 0 && size < bin_data_size[old_bin_num - 1]) {
1716
            /* truncation */
1717
0
            ret = zend_mm_alloc_small(heap, ZEND_MM_SMALL_SIZE_TO_BIN(size) ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1718
0
            copy_size = use_copy_size ? MIN(size, copy_size) : size;
1719
0
            memcpy(ret, ptr, copy_size);
1720
0
            zend_mm_free_small(heap, ptr, old_bin_num);
1721
0
          } else {
1722
            /* reallocation in-place */
1723
0
            ret = ptr;
1724
0
          }
1725
0
        } else if (size <= ZEND_MM_MAX_SMALL_SIZE) {
1726
          /* small extension */
1727
1728
0
#if ZEND_MM_STAT
1729
0
          do {
1730
0
            size_t orig_peak = heap->peak;
1731
0
#endif
1732
0
            ret = zend_mm_alloc_small(heap, ZEND_MM_SMALL_SIZE_TO_BIN(size) ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1733
0
            copy_size = use_copy_size ? MIN(old_size, copy_size) : old_size;
1734
0
            memcpy(ret, ptr, copy_size);
1735
0
            zend_mm_free_small(heap, ptr, old_bin_num);
1736
0
#if ZEND_MM_STAT
1737
0
            heap->peak = MAX(orig_peak, heap->size);
1738
0
          } while (0);
1739
0
#endif
1740
0
        } else {
1741
          /* slow reallocation */
1742
0
          break;
1743
0
        }
1744
1745
0
#if ZEND_DEBUG
1746
0
        dbg = zend_mm_get_debug_info(heap, ret);
1747
0
        dbg->size = real_size;
1748
0
        dbg->filename = __zend_filename;
1749
0
        dbg->orig_filename = __zend_orig_filename;
1750
0
        dbg->lineno = __zend_lineno;
1751
0
        dbg->orig_lineno = __zend_orig_lineno;
1752
0
#endif
1753
0
        return ret;
1754
0
      }  while (0);
1755
1756
0
    } else /* if (info & ZEND_MM_IS_LARGE_RUN) */ {
1757
0
      ZEND_MM_CHECK(ZEND_MM_ALIGNED_OFFSET(page_offset, ZEND_MM_PAGE_SIZE) == 0, "zend_mm_heap corrupted");
1758
0
      old_size = ZEND_MM_LRUN_PAGES(info) * ZEND_MM_PAGE_SIZE;
1759
0
      if (size > ZEND_MM_MAX_SMALL_SIZE && size <= ZEND_MM_MAX_LARGE_SIZE) {
1760
0
        new_size = ZEND_MM_ALIGNED_SIZE_EX(size, ZEND_MM_PAGE_SIZE);
1761
0
        if (new_size == old_size) {
1762
0
#if ZEND_DEBUG
1763
0
          dbg = zend_mm_get_debug_info(heap, ptr);
1764
0
          dbg->size = real_size;
1765
0
          dbg->filename = __zend_filename;
1766
0
          dbg->orig_filename = __zend_orig_filename;
1767
0
          dbg->lineno = __zend_lineno;
1768
0
          dbg->orig_lineno = __zend_orig_lineno;
1769
0
#endif
1770
0
          return ptr;
1771
0
        } else if (new_size < old_size) {
1772
          /* free tail pages */
1773
0
          int new_pages_count = (int)(new_size / ZEND_MM_PAGE_SIZE);
1774
0
          int rest_pages_count = (int)((old_size - new_size) / ZEND_MM_PAGE_SIZE);
1775
1776
0
#if ZEND_MM_STAT
1777
0
          heap->size -= rest_pages_count * ZEND_MM_PAGE_SIZE;
1778
0
#endif
1779
0
          chunk->map[page_num] = ZEND_MM_LRUN(new_pages_count);
1780
0
          chunk->free_pages += rest_pages_count;
1781
0
          zend_mm_bitset_reset_range(chunk->free_map, page_num + new_pages_count, rest_pages_count);
1782
0
#if ZEND_DEBUG
1783
0
          dbg = zend_mm_get_debug_info(heap, ptr);
1784
0
          dbg->size = real_size;
1785
0
          dbg->filename = __zend_filename;
1786
0
          dbg->orig_filename = __zend_orig_filename;
1787
0
          dbg->lineno = __zend_lineno;
1788
0
          dbg->orig_lineno = __zend_orig_lineno;
1789
0
#endif
1790
0
          return ptr;
1791
0
        } else /* if (new_size > old_size) */ {
1792
0
          int new_pages_count = (int)(new_size / ZEND_MM_PAGE_SIZE);
1793
0
          int old_pages_count = (int)(old_size / ZEND_MM_PAGE_SIZE);
1794
1795
          /* try to allocate tail pages after this block */
1796
0
          if (page_num + new_pages_count <= ZEND_MM_PAGES &&
1797
0
              zend_mm_bitset_is_free_range(chunk->free_map, page_num + old_pages_count, new_pages_count - old_pages_count)) {
1798
0
#if ZEND_MM_STAT
1799
0
            do {
1800
0
              size_t size = heap->size + (new_size - old_size);
1801
0
              size_t peak = MAX(heap->peak, size);
1802
0
              heap->size = size;
1803
0
              heap->peak = peak;
1804
0
            } while (0);
1805
0
#endif
1806
0
            chunk->free_pages -= new_pages_count - old_pages_count;
1807
0
            zend_mm_bitset_set_range(chunk->free_map, page_num + old_pages_count, new_pages_count - old_pages_count);
1808
0
            chunk->map[page_num] = ZEND_MM_LRUN(new_pages_count);
1809
0
#if ZEND_DEBUG
1810
0
            dbg = zend_mm_get_debug_info(heap, ptr);
1811
0
            dbg->size = real_size;
1812
0
            dbg->filename = __zend_filename;
1813
0
            dbg->orig_filename = __zend_orig_filename;
1814
0
            dbg->lineno = __zend_lineno;
1815
0
            dbg->orig_lineno = __zend_orig_lineno;
1816
0
#endif
1817
0
            return ptr;
1818
0
          }
1819
0
        }
1820
0
      }
1821
0
    }
1822
0
#if ZEND_DEBUG
1823
0
    size = real_size;
1824
0
#endif
1825
0
  }
1826
1827
0
  copy_size = MIN(old_size, copy_size);
1828
0
  return zend_mm_realloc_slow(heap, ptr, size, copy_size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1829
0
}
1830
1831
/*********************/
1832
/* Huge Runs (again) */
1833
/*********************/
1834
1835
#if ZEND_DEBUG
1836
static void zend_mm_add_huge_block(zend_mm_heap *heap, void *ptr, size_t size, size_t dbg_size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
1837
#else
1838
static void zend_mm_add_huge_block(zend_mm_heap *heap, void *ptr, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
1839
#endif
1840
0
{
1841
0
  zend_mm_huge_list *list = (zend_mm_huge_list*)zend_mm_alloc_heap(heap, sizeof(zend_mm_huge_list) ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1842
0
  list->ptr = ptr;
1843
0
  list->size = size;
1844
0
  list->next = heap->huge_list;
1845
0
#if ZEND_DEBUG
1846
0
  list->dbg.size = dbg_size;
1847
0
  list->dbg.filename = __zend_filename;
1848
0
  list->dbg.orig_filename = __zend_orig_filename;
1849
0
  list->dbg.lineno = __zend_lineno;
1850
0
  list->dbg.orig_lineno = __zend_orig_lineno;
1851
0
#endif
1852
0
  heap->huge_list = list;
1853
0
}
1854
1855
static size_t zend_mm_del_huge_block(zend_mm_heap *heap, void *ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
1856
0
{
1857
0
  zend_mm_huge_list *prev = NULL;
1858
0
  zend_mm_huge_list *list = heap->huge_list;
1859
0
  while (list != NULL) {
1860
0
    if (list->ptr == ptr) {
1861
0
      size_t size;
1862
1863
0
      if (prev) {
1864
0
        prev->next = list->next;
1865
0
      } else {
1866
0
        heap->huge_list = list->next;
1867
0
      }
1868
0
      size = list->size;
1869
0
      zend_mm_free_heap(heap, list ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1870
0
      return size;
1871
0
    }
1872
0
    prev = list;
1873
0
    list = list->next;
1874
0
  }
1875
0
  ZEND_MM_CHECK(0, "zend_mm_heap corrupted");
1876
0
  return 0;
1877
0
}
1878
1879
static size_t zend_mm_get_huge_block_size(zend_mm_heap *heap, void *ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
1880
0
{
1881
0
  zend_mm_huge_list *list = heap->huge_list;
1882
0
  while (list != NULL) {
1883
0
    if (list->ptr == ptr) {
1884
0
      return list->size;
1885
0
    }
1886
0
    list = list->next;
1887
0
  }
1888
0
  ZEND_MM_CHECK(0, "zend_mm_heap corrupted");
1889
0
  return 0;
1890
0
}
1891
1892
#if ZEND_DEBUG
1893
static void zend_mm_change_huge_block_size(zend_mm_heap *heap, void *ptr, size_t size, size_t dbg_size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
1894
#else
1895
static void zend_mm_change_huge_block_size(zend_mm_heap *heap, void *ptr, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
1896
#endif
1897
0
{
1898
0
  zend_mm_huge_list *list = heap->huge_list;
1899
0
  while (list != NULL) {
1900
0
    if (list->ptr == ptr) {
1901
0
      list->size = size;
1902
0
#if ZEND_DEBUG
1903
0
      list->dbg.size = dbg_size;
1904
0
      list->dbg.filename = __zend_filename;
1905
0
      list->dbg.orig_filename = __zend_orig_filename;
1906
0
      list->dbg.lineno = __zend_lineno;
1907
0
      list->dbg.orig_lineno = __zend_orig_lineno;
1908
0
#endif
1909
0
      return;
1910
0
    }
1911
0
    list = list->next;
1912
0
  }
1913
0
}
1914
1915
static void *zend_mm_alloc_huge(zend_mm_heap *heap, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
1916
0
{
1917
#ifdef ZEND_WIN32
1918
  /* On Windows we don't have ability to extend huge blocks in-place.
1919
   * We allocate them with 2MB size granularity, to avoid many
1920
   * reallocations when they are extended by small pieces
1921
   */
1922
  size_t alignment = MAX(REAL_PAGE_SIZE, ZEND_MM_CHUNK_SIZE);
1923
#else
1924
0
  size_t alignment = REAL_PAGE_SIZE;
1925
0
#endif
1926
0
  size_t new_size = ZEND_MM_ALIGNED_SIZE_EX(size, alignment);
1927
0
  void *ptr;
1928
1929
0
  if (UNEXPECTED(new_size < size)) {
1930
0
    zend_error_noreturn(E_ERROR, "Possible integer overflow in memory allocation (%zu + %zu)", size, alignment);
1931
0
  }
1932
1933
0
#if ZEND_MM_LIMIT
1934
0
  if (UNEXPECTED(new_size > heap->limit - heap->real_size)) {
1935
0
    if (zend_mm_gc(heap) && new_size <= heap->limit - heap->real_size) {
1936
      /* pass */
1937
0
    } else if (heap->overflow == 0) {
1938
0
#if ZEND_DEBUG
1939
0
      zend_mm_safe_error(heap, "Allowed memory size of %zu bytes exhausted at %s:%d (tried to allocate %zu bytes)", heap->limit, __zend_filename, __zend_lineno, size);
1940
#else
1941
      zend_mm_safe_error(heap, "Allowed memory size of %zu bytes exhausted (tried to allocate %zu bytes)", heap->limit, size);
1942
#endif
1943
0
      return NULL;
1944
0
    }
1945
0
  }
1946
0
#endif
1947
0
  ptr = zend_mm_chunk_alloc(heap, new_size, ZEND_MM_CHUNK_SIZE);
1948
0
  if (UNEXPECTED(ptr == NULL)) {
1949
    /* insufficient memory */
1950
0
    if (zend_mm_gc(heap) &&
1951
0
        (ptr = zend_mm_chunk_alloc(heap, new_size, ZEND_MM_CHUNK_SIZE)) != NULL) {
1952
      /* pass */
1953
0
    } else {
1954
#if !ZEND_MM_LIMIT
1955
      zend_mm_safe_error(heap, "Out of memory");
1956
#elif ZEND_DEBUG
1957
      zend_mm_safe_error(heap, "Out of memory (allocated %zu bytes) at %s:%d (tried to allocate %zu bytes)", heap->real_size, __zend_filename, __zend_lineno, size);
1958
#else
1959
      zend_mm_safe_error(heap, "Out of memory (allocated %zu bytes) (tried to allocate %zu bytes)", heap->real_size, size);
1960
#endif
1961
0
      return NULL;
1962
0
    }
1963
0
  }
1964
0
#if ZEND_DEBUG
1965
0
  zend_mm_add_huge_block(heap, ptr, new_size, size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1966
#else
1967
  zend_mm_add_huge_block(heap, ptr, new_size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1968
#endif
1969
0
#if ZEND_MM_STAT
1970
0
  do {
1971
0
    size_t size = heap->real_size + new_size;
1972
0
    size_t peak = MAX(heap->real_peak, size);
1973
0
    heap->real_size = size;
1974
0
    heap->real_peak = peak;
1975
0
  } while (0);
1976
0
  do {
1977
0
    size_t size = heap->size + new_size;
1978
0
    size_t peak = MAX(heap->peak, size);
1979
0
    heap->size = size;
1980
0
    heap->peak = peak;
1981
0
  } while (0);
1982
#elif ZEND_MM_LIMIT
1983
  heap->real_size += new_size;
1984
#endif
1985
0
  return ptr;
1986
0
}
1987
1988
static void zend_mm_free_huge(zend_mm_heap *heap, void *ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
1989
0
{
1990
0
  size_t size;
1991
1992
0
  ZEND_MM_CHECK(ZEND_MM_ALIGNED_OFFSET(ptr, ZEND_MM_CHUNK_SIZE) == 0, "zend_mm_heap corrupted");
1993
0
  size = zend_mm_del_huge_block(heap, ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1994
0
  zend_mm_chunk_free(heap, ptr, size);
1995
0
#if ZEND_MM_STAT || ZEND_MM_LIMIT
1996
0
  heap->real_size -= size;
1997
0
#endif
1998
0
#if ZEND_MM_STAT
1999
0
  heap->size -= size;
2000
0
#endif
2001
0
}
2002
2003
/******************/
2004
/* Initialization */
2005
/******************/
2006
2007
static void zend_mm_refresh_key(zend_mm_heap *heap)
2008
0
{
2009
0
  zend_random_bytes_insecure(&heap->rand_state, &heap->shadow_key, sizeof(heap->shadow_key));
2010
0
}
2011
2012
static void zend_mm_init_key(zend_mm_heap *heap)
2013
0
{
2014
0
  memset(&heap->rand_state, 0, sizeof(heap->rand_state));
2015
0
  zend_mm_refresh_key(heap);
2016
0
}
2017
2018
ZEND_API void zend_mm_refresh_key_child(zend_mm_heap *heap)
2019
0
{
2020
0
  uintptr_t old_key = heap->shadow_key;
2021
2022
0
  zend_mm_init_key(heap);
2023
2024
  /* Update shadow pointers with new key */
2025
0
  for (int i = 0; i < ZEND_MM_BINS; i++) {
2026
0
    zend_mm_free_slot *slot = heap->free_slot[i];
2027
0
    if (!slot) {
2028
0
      continue;
2029
0
    }
2030
0
    zend_mm_free_slot *next;
2031
0
    while ((next = slot->next_free_slot)) {
2032
0
      zend_mm_free_slot *shadow = ZEND_MM_FREE_SLOT_PTR_SHADOW(slot, i);
2033
0
      if (UNEXPECTED(next != zend_mm_decode_free_slot_key(old_key, shadow))) {
2034
0
        zend_mm_panic("zend_mm_heap corrupted");
2035
0
      }
2036
0
      zend_mm_set_next_free_slot(heap, i, slot, next);
2037
0
      slot = next;
2038
0
    }
2039
0
  }
2040
2041
0
#if ZEND_DEBUG
2042
0
  heap->pid = getpid();
2043
0
#endif
2044
0
}
2045
2046
static zend_mm_heap *zend_mm_init(void)
2047
0
{
2048
0
  zend_mm_chunk *chunk = (zend_mm_chunk*)zend_mm_chunk_alloc_int(ZEND_MM_CHUNK_SIZE, ZEND_MM_CHUNK_SIZE);
2049
0
  zend_mm_heap *heap;
2050
2051
0
  if (UNEXPECTED(chunk == NULL)) {
2052
0
#if ZEND_MM_ERROR
2053
0
    fprintf(stderr, "Can't initialize heap\n");
2054
0
#endif
2055
0
    return NULL;
2056
0
  }
2057
0
  heap = &chunk->heap_slot;
2058
0
  chunk->heap = heap;
2059
0
  chunk->next = chunk;
2060
0
  chunk->prev = chunk;
2061
0
  chunk->free_pages = ZEND_MM_PAGES - ZEND_MM_FIRST_PAGE;
2062
0
  chunk->free_tail = ZEND_MM_FIRST_PAGE;
2063
0
  chunk->num = 0;
2064
0
  chunk->free_map[0] = (Z_L(1) << ZEND_MM_FIRST_PAGE) - 1;
2065
0
  chunk->map[0] = ZEND_MM_LRUN(ZEND_MM_FIRST_PAGE);
2066
0
  heap->main_chunk = chunk;
2067
0
  heap->cached_chunks = NULL;
2068
0
  heap->chunks_count = 1;
2069
0
  heap->peak_chunks_count = 1;
2070
0
  heap->cached_chunks_count = 0;
2071
0
  heap->avg_chunks_count = 1.0;
2072
0
  heap->last_chunks_delete_boundary = 0;
2073
0
  heap->last_chunks_delete_count = 0;
2074
0
#if ZEND_MM_STAT || ZEND_MM_LIMIT
2075
0
  heap->real_size = ZEND_MM_CHUNK_SIZE;
2076
0
#endif
2077
0
#if ZEND_MM_STAT
2078
0
  heap->real_peak = ZEND_MM_CHUNK_SIZE;
2079
0
  heap->size = 0;
2080
0
  heap->peak = 0;
2081
0
#endif
2082
0
  zend_mm_init_key(heap);
2083
0
#if ZEND_MM_LIMIT
2084
0
  heap->limit = (size_t)Z_L(-1) >> 1;
2085
0
  heap->overflow = 0;
2086
0
#endif
2087
0
#if ZEND_MM_CUSTOM
2088
0
  heap->use_custom_heap = ZEND_MM_CUSTOM_HEAP_NONE;
2089
0
#endif
2090
0
#if ZEND_MM_STORAGE
2091
0
  heap->storage = NULL;
2092
0
#endif
2093
0
  heap->huge_list = NULL;
2094
0
#if ZEND_DEBUG
2095
0
  heap->pid = getpid();
2096
0
#endif
2097
0
  return heap;
2098
0
}
2099
2100
ZEND_API size_t zend_mm_gc(zend_mm_heap *heap)
2101
0
{
2102
0
  zend_mm_free_slot *p, *q;
2103
0
  zend_mm_chunk *chunk;
2104
0
  size_t page_offset;
2105
0
  int page_num;
2106
0
  zend_mm_page_info info;
2107
0
  uint32_t i, free_counter;
2108
0
  bool has_free_pages;
2109
0
  size_t collected = 0;
2110
2111
0
#if ZEND_MM_CUSTOM
2112
0
  if (heap->use_custom_heap) {
2113
0
    size_t (*gc)(void) = heap->custom_heap._gc;
2114
0
    if (gc) {
2115
0
      return gc();
2116
0
    }
2117
0
    return 0;
2118
0
  }
2119
0
#endif
2120
2121
0
  for (i = 0; i < ZEND_MM_BINS; i++) {
2122
0
    has_free_pages = false;
2123
0
    p = heap->free_slot[i];
2124
0
    while (p != NULL) {
2125
0
      chunk = (zend_mm_chunk*)ZEND_MM_ALIGNED_BASE(p, ZEND_MM_CHUNK_SIZE);
2126
0
      ZEND_MM_CHECK(chunk->heap == heap, "zend_mm_heap corrupted");
2127
0
      page_offset = ZEND_MM_ALIGNED_OFFSET(p, ZEND_MM_CHUNK_SIZE);
2128
0
      ZEND_ASSERT(page_offset != 0);
2129
0
      page_num = (int)(page_offset / ZEND_MM_PAGE_SIZE);
2130
0
      info = chunk->map[page_num];
2131
0
      ZEND_ASSERT(info & ZEND_MM_IS_SRUN);
2132
0
      if (info & ZEND_MM_IS_LRUN) {
2133
0
        page_num -= ZEND_MM_NRUN_OFFSET(info);
2134
0
        info = chunk->map[page_num];
2135
0
        ZEND_ASSERT(info & ZEND_MM_IS_SRUN);
2136
0
        ZEND_ASSERT(!(info & ZEND_MM_IS_LRUN));
2137
0
      }
2138
0
      ZEND_ASSERT(ZEND_MM_SRUN_BIN_NUM(info) == i);
2139
0
      free_counter = ZEND_MM_SRUN_FREE_COUNTER(info) + 1;
2140
0
      if (free_counter == bin_elements[i]) {
2141
0
        has_free_pages = true;
2142
0
      }
2143
0
      chunk->map[page_num] = ZEND_MM_SRUN_EX(i, free_counter);
2144
0
      p = zend_mm_get_next_free_slot(heap, i, p);
2145
0
    }
2146
2147
0
    if (!has_free_pages) {
2148
0
      continue;
2149
0
    }
2150
2151
0
    q = (zend_mm_free_slot*)&heap->free_slot[i];
2152
0
    p = q->next_free_slot;
2153
0
    while (p != NULL) {
2154
0
      chunk = (zend_mm_chunk*)ZEND_MM_ALIGNED_BASE(p, ZEND_MM_CHUNK_SIZE);
2155
0
      ZEND_MM_CHECK(chunk->heap == heap, "zend_mm_heap corrupted");
2156
0
      page_offset = ZEND_MM_ALIGNED_OFFSET(p, ZEND_MM_CHUNK_SIZE);
2157
0
      ZEND_ASSERT(page_offset != 0);
2158
0
      page_num = (int)(page_offset / ZEND_MM_PAGE_SIZE);
2159
0
      info = chunk->map[page_num];
2160
0
      ZEND_ASSERT(info & ZEND_MM_IS_SRUN);
2161
0
      if (info & ZEND_MM_IS_LRUN) {
2162
0
        page_num -= ZEND_MM_NRUN_OFFSET(info);
2163
0
        info = chunk->map[page_num];
2164
0
        ZEND_ASSERT(info & ZEND_MM_IS_SRUN);
2165
0
        ZEND_ASSERT(!(info & ZEND_MM_IS_LRUN));
2166
0
      }
2167
0
      ZEND_ASSERT(ZEND_MM_SRUN_BIN_NUM(info) == i);
2168
0
      if (ZEND_MM_SRUN_FREE_COUNTER(info) == bin_elements[i]) {
2169
        /* remove from cache */
2170
0
        p = zend_mm_get_next_free_slot(heap, i, p);
2171
0
        if (q == (zend_mm_free_slot*)&heap->free_slot[i]) {
2172
0
          q->next_free_slot = p;
2173
0
        } else {
2174
0
          zend_mm_set_next_free_slot(heap, i, q, p);
2175
0
        }
2176
0
      } else {
2177
0
        q = p;
2178
0
        if (q == (zend_mm_free_slot*)&heap->free_slot[i]) {
2179
0
          p = q->next_free_slot;
2180
0
        } else {
2181
0
          p = zend_mm_get_next_free_slot(heap, i, q);
2182
0
        }
2183
0
      }
2184
0
    }
2185
0
  }
2186
2187
0
  chunk = heap->main_chunk;
2188
0
  do {
2189
0
    i = ZEND_MM_FIRST_PAGE;
2190
0
    while (i < chunk->free_tail) {
2191
0
      if (zend_mm_bitset_is_set(chunk->free_map, i)) {
2192
0
        info = chunk->map[i];
2193
0
        if (info & ZEND_MM_IS_SRUN) {
2194
0
          int bin_num = ZEND_MM_SRUN_BIN_NUM(info);
2195
0
          int pages_count = bin_pages[bin_num];
2196
2197
0
          if (ZEND_MM_SRUN_FREE_COUNTER(info) == bin_elements[bin_num]) {
2198
            /* all elements are free */
2199
0
            zend_mm_free_pages_ex(heap, chunk, i, pages_count, 0);
2200
0
            collected += pages_count;
2201
0
          } else {
2202
            /* reset counter */
2203
0
            chunk->map[i] = ZEND_MM_SRUN(bin_num);
2204
0
          }
2205
0
          i += bin_pages[bin_num];
2206
0
        } else /* if (info & ZEND_MM_IS_LRUN) */ {
2207
0
          i += ZEND_MM_LRUN_PAGES(info);
2208
0
        }
2209
0
      } else {
2210
0
        i++;
2211
0
      }
2212
0
    }
2213
0
    if (chunk->free_pages == ZEND_MM_PAGES - ZEND_MM_FIRST_PAGE && chunk != heap->main_chunk) {
2214
0
      zend_mm_chunk *next_chunk = chunk->next;
2215
2216
0
      zend_mm_delete_chunk(heap, chunk);
2217
0
      chunk = next_chunk;
2218
0
    } else {
2219
0
      chunk = chunk->next;
2220
0
    }
2221
0
  } while (chunk != heap->main_chunk);
2222
2223
0
  return collected * ZEND_MM_PAGE_SIZE;
2224
0
}
2225
2226
#if ZEND_DEBUG
2227
/******************/
2228
/* Leak detection */
2229
/******************/
2230
2231
static zend_long zend_mm_find_leaks_small(zend_mm_chunk *p, uint32_t i, uint32_t j, zend_leak_info *leak)
2232
0
{
2233
0
  bool empty = true;
2234
0
  zend_long count = 0;
2235
0
  int bin_num = ZEND_MM_SRUN_BIN_NUM(p->map[i]);
2236
0
  zend_mm_debug_info *dbg = (zend_mm_debug_info*)((char*)p + ZEND_MM_PAGE_SIZE * i + bin_data_size[bin_num] * (j + 1) - ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info)));
2237
2238
0
  while (j < bin_elements[bin_num]) {
2239
0
    if (dbg->size != 0) {
2240
0
      if (dbg->filename == leak->filename && dbg->lineno == leak->lineno) {
2241
0
        count++;
2242
0
        dbg->size = 0;
2243
0
        dbg->filename = NULL;
2244
0
        dbg->lineno = 0;
2245
0
      } else {
2246
0
        empty = false;
2247
0
      }
2248
0
    }
2249
0
    j++;
2250
0
    dbg = (zend_mm_debug_info*)((char*)dbg + bin_data_size[bin_num]);
2251
0
  }
2252
0
  if (empty) {
2253
0
    zend_mm_bitset_reset_range(p->free_map, i, bin_pages[bin_num]);
2254
0
  }
2255
0
  return count;
2256
0
}
2257
2258
static zend_long zend_mm_find_leaks(zend_mm_heap *heap, zend_mm_chunk *p, uint32_t i, zend_leak_info *leak)
2259
0
{
2260
0
  zend_long count = 0;
2261
2262
0
  do {
2263
0
    while (i < p->free_tail) {
2264
0
      if (zend_mm_bitset_is_set(p->free_map, i)) {
2265
0
        if (p->map[i] & ZEND_MM_IS_SRUN) {
2266
0
          int bin_num = ZEND_MM_SRUN_BIN_NUM(p->map[i]);
2267
0
          count += zend_mm_find_leaks_small(p, i, 0, leak);
2268
0
          i += bin_pages[bin_num];
2269
0
        } else /* if (p->map[i] & ZEND_MM_IS_LRUN) */ {
2270
0
          int pages_count = ZEND_MM_LRUN_PAGES(p->map[i]);
2271
0
          zend_mm_debug_info *dbg = (zend_mm_debug_info*)((char*)p + ZEND_MM_PAGE_SIZE * (i + pages_count) - ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info)));
2272
2273
0
          if (dbg->filename == leak->filename && dbg->lineno == leak->lineno) {
2274
0
            count++;
2275
0
          }
2276
0
          zend_mm_bitset_reset_range(p->free_map, i, pages_count);
2277
0
          i += pages_count;
2278
0
        }
2279
0
      } else {
2280
0
        i++;
2281
0
      }
2282
0
    }
2283
0
    p = p->next;
2284
0
    i = ZEND_MM_FIRST_PAGE;
2285
0
  } while (p != heap->main_chunk);
2286
0
  return count;
2287
0
}
2288
2289
static zend_long zend_mm_find_leaks_huge(zend_mm_heap *heap, zend_mm_huge_list *list)
2290
0
{
2291
0
  zend_long count = 0;
2292
0
  zend_mm_huge_list *prev = list;
2293
0
  zend_mm_huge_list *p = list->next;
2294
2295
0
  while (p) {
2296
0
    if (p->dbg.filename == list->dbg.filename && p->dbg.lineno == list->dbg.lineno) {
2297
0
      prev->next = p->next;
2298
0
      zend_mm_chunk_free(heap, p->ptr, p->size);
2299
0
      zend_mm_free_heap(heap, p, NULL, 0, NULL, 0);
2300
0
      count++;
2301
0
    } else {
2302
0
      prev = p;
2303
0
    }
2304
0
    p = prev->next;
2305
0
  }
2306
2307
0
  return count;
2308
0
}
2309
2310
static void zend_mm_check_leaks(zend_mm_heap *heap)
2311
0
{
2312
0
  zend_mm_huge_list *list;
2313
0
  zend_mm_chunk *p;
2314
0
  zend_leak_info leak;
2315
0
  zend_long repeated = 0;
2316
0
  uint32_t total = 0;
2317
0
  uint32_t i, j;
2318
2319
  /* find leaked huge blocks and free them */
2320
0
  list = heap->huge_list;
2321
0
  while (list) {
2322
0
    zend_mm_huge_list *q = list;
2323
2324
0
    leak.addr = list->ptr;
2325
0
    leak.size = list->dbg.size;
2326
0
    leak.filename = list->dbg.filename;
2327
0
    leak.orig_filename = list->dbg.orig_filename;
2328
0
    leak.lineno = list->dbg.lineno;
2329
0
    leak.orig_lineno = list->dbg.orig_lineno;
2330
2331
0
    zend_message_dispatcher(ZMSG_LOG_SCRIPT_NAME, NULL);
2332
0
    zend_message_dispatcher(ZMSG_MEMORY_LEAK_DETECTED, &leak);
2333
0
    repeated = zend_mm_find_leaks_huge(heap, list);
2334
0
    total += 1 + repeated;
2335
0
    if (repeated) {
2336
0
      zend_message_dispatcher(ZMSG_MEMORY_LEAK_REPEATED, (void *)(uintptr_t)repeated);
2337
0
    }
2338
2339
0
    heap->huge_list = list = list->next;
2340
0
    zend_mm_chunk_free(heap, q->ptr, q->size);
2341
0
    zend_mm_free_heap(heap, q, NULL, 0, NULL, 0);
2342
0
  }
2343
2344
  /* for each chunk */
2345
0
  p = heap->main_chunk;
2346
0
  do {
2347
0
    i = ZEND_MM_FIRST_PAGE;
2348
0
    while (i < p->free_tail) {
2349
0
      if (zend_mm_bitset_is_set(p->free_map, i)) {
2350
0
        if (p->map[i] & ZEND_MM_IS_SRUN) {
2351
0
          int bin_num = ZEND_MM_SRUN_BIN_NUM(p->map[i]);
2352
0
          zend_mm_debug_info *dbg = (zend_mm_debug_info*)((char*)p + ZEND_MM_PAGE_SIZE * i + bin_data_size[bin_num] - ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info)));
2353
2354
0
          j = 0;
2355
0
          while (j < bin_elements[bin_num]) {
2356
0
            if (dbg->size != 0) {
2357
0
              leak.addr = (zend_mm_debug_info*)((char*)p + ZEND_MM_PAGE_SIZE * i + bin_data_size[bin_num] * j);
2358
0
              leak.size = dbg->size;
2359
0
              leak.filename = dbg->filename;
2360
0
              leak.orig_filename = dbg->orig_filename;
2361
0
              leak.lineno = dbg->lineno;
2362
0
              leak.orig_lineno = dbg->orig_lineno;
2363
2364
0
              zend_message_dispatcher(ZMSG_LOG_SCRIPT_NAME, NULL);
2365
0
              zend_message_dispatcher(ZMSG_MEMORY_LEAK_DETECTED, &leak);
2366
2367
0
              dbg->size = 0;
2368
0
              dbg->filename = NULL;
2369
0
              dbg->lineno = 0;
2370
2371
0
              repeated = zend_mm_find_leaks_small(p, i, j + 1, &leak) +
2372
0
                         zend_mm_find_leaks(heap, p, i + bin_pages[bin_num], &leak);
2373
0
              total += 1 + repeated;
2374
0
              if (repeated) {
2375
0
                zend_message_dispatcher(ZMSG_MEMORY_LEAK_REPEATED, (void *)(uintptr_t)repeated);
2376
0
              }
2377
0
            }
2378
0
            dbg = (zend_mm_debug_info*)((char*)dbg + bin_data_size[bin_num]);
2379
0
            j++;
2380
0
          }
2381
0
          i += bin_pages[bin_num];
2382
0
        } else /* if (p->map[i] & ZEND_MM_IS_LRUN) */ {
2383
0
          int pages_count = ZEND_MM_LRUN_PAGES(p->map[i]);
2384
0
          zend_mm_debug_info *dbg = (zend_mm_debug_info*)((char*)p + ZEND_MM_PAGE_SIZE * (i + pages_count) - ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info)));
2385
2386
0
          leak.addr = (void*)((char*)p + ZEND_MM_PAGE_SIZE * i);
2387
0
          leak.size = dbg->size;
2388
0
          leak.filename = dbg->filename;
2389
0
          leak.orig_filename = dbg->orig_filename;
2390
0
          leak.lineno = dbg->lineno;
2391
0
          leak.orig_lineno = dbg->orig_lineno;
2392
2393
0
          zend_message_dispatcher(ZMSG_LOG_SCRIPT_NAME, NULL);
2394
0
          zend_message_dispatcher(ZMSG_MEMORY_LEAK_DETECTED, &leak);
2395
2396
0
          zend_mm_bitset_reset_range(p->free_map, i, pages_count);
2397
2398
0
          repeated = zend_mm_find_leaks(heap, p, i + pages_count, &leak);
2399
0
          total += 1 + repeated;
2400
0
          if (repeated) {
2401
0
            zend_message_dispatcher(ZMSG_MEMORY_LEAK_REPEATED, (void *)(uintptr_t)repeated);
2402
0
          }
2403
0
          i += pages_count;
2404
0
        }
2405
0
      } else {
2406
0
        i++;
2407
0
      }
2408
0
    }
2409
0
    p = p->next;
2410
0
  } while (p != heap->main_chunk);
2411
0
  if (total) {
2412
0
    zend_message_dispatcher(ZMSG_MEMORY_LEAKS_GRAND_TOTAL, &total);
2413
0
  }
2414
0
}
2415
#endif
2416
2417
#if ZEND_MM_CUSTOM
2418
static void *tracked_malloc(size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC);
2419
static void tracked_free_all(zend_mm_heap *heap);
2420
static void *poison_malloc(size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC);
2421
2422
static void zend_mm_check_freelists(zend_mm_heap *heap)
2423
0
{
2424
0
  for (uint32_t bin_num = 0; bin_num < ZEND_MM_BINS; bin_num++) {
2425
0
    zend_mm_free_slot *slot = heap->free_slot[bin_num];
2426
0
    while (slot) {
2427
0
      slot = zend_mm_get_next_free_slot(heap, bin_num, slot);
2428
0
    }
2429
0
  }
2430
0
}
2431
#endif
2432
2433
ZEND_API void zend_mm_shutdown(zend_mm_heap *heap, bool full, bool silent)
2434
191k
{
2435
191k
  zend_mm_chunk *p;
2436
191k
  zend_mm_huge_list *list;
2437
2438
191k
#if ZEND_MM_CUSTOM
2439
191k
  if (heap->use_custom_heap) {
2440
191k
    if (heap->custom_heap._malloc == tracked_malloc) {
2441
125k
      if (silent) {
2442
18.7k
        tracked_free_all(heap);
2443
18.7k
      }
2444
125k
      zend_hash_clean(heap->tracked_allocs);
2445
125k
      if (full) {
2446
0
        zend_hash_destroy(heap->tracked_allocs);
2447
0
        free(heap->tracked_allocs);
2448
        /* Make sure the heap free below does not use tracked_free(). */
2449
0
        heap->custom_heap._free = __zend_free;
2450
0
      }
2451
125k
#if ZEND_MM_STAT
2452
125k
      heap->size = 0;
2453
125k
      heap->real_size = 0;
2454
125k
#endif
2455
125k
    }
2456
2457
191k
    void (*shutdown)(bool, bool) = heap->custom_heap._shutdown;
2458
2459
191k
    if (full) {
2460
0
      heap->custom_heap._free(heap ZEND_FILE_LINE_CC ZEND_FILE_LINE_EMPTY_CC);
2461
0
    }
2462
2463
191k
    if (shutdown) {
2464
0
      shutdown(full, silent);
2465
0
    }
2466
2467
191k
    return;
2468
191k
  }
2469
0
#endif
2470
2471
0
#if ZEND_DEBUG
2472
0
  if (!silent) {
2473
0
    char *tmp = getenv("ZEND_ALLOC_PRINT_LEAKS");
2474
0
    if (!tmp || ZEND_ATOL(tmp)) {
2475
0
      zend_mm_check_leaks(heap);
2476
0
    }
2477
0
  }
2478
0
#endif
2479
2480
  /* free huge blocks */
2481
0
  list = heap->huge_list;
2482
0
  heap->huge_list = NULL;
2483
0
  while (list) {
2484
0
    zend_mm_huge_list *q = list;
2485
0
    list = list->next;
2486
0
    zend_mm_chunk_free(heap, q->ptr, q->size);
2487
0
  }
2488
2489
  /* move all chunks except of the first one into the cache */
2490
0
  p = heap->main_chunk->next;
2491
0
  while (p != heap->main_chunk) {
2492
0
    zend_mm_chunk *q = p->next;
2493
0
    p->next = heap->cached_chunks;
2494
0
    heap->cached_chunks = p;
2495
0
    p = q;
2496
0
    heap->chunks_count--;
2497
0
    heap->cached_chunks_count++;
2498
0
  }
2499
2500
0
  if (full) {
2501
    /* free all cached chunks */
2502
0
    while (heap->cached_chunks) {
2503
0
      p = heap->cached_chunks;
2504
0
      heap->cached_chunks = p->next;
2505
0
      zend_mm_chunk_free(heap, p, ZEND_MM_CHUNK_SIZE);
2506
0
    }
2507
    /* free the first chunk */
2508
0
    zend_mm_chunk_free(heap, heap->main_chunk, ZEND_MM_CHUNK_SIZE);
2509
0
  } else {
2510
    /* free some cached chunks to keep average count */
2511
0
    heap->avg_chunks_count = (heap->avg_chunks_count + (double)heap->peak_chunks_count) / 2.0;
2512
0
    while ((double)heap->cached_chunks_count + 0.9 > heap->avg_chunks_count &&
2513
0
           heap->cached_chunks) {
2514
0
      p = heap->cached_chunks;
2515
0
      heap->cached_chunks = p->next;
2516
0
      zend_mm_chunk_free(heap, p, ZEND_MM_CHUNK_SIZE);
2517
0
      heap->cached_chunks_count--;
2518
0
    }
2519
    /* clear cached chunks */
2520
0
    p = heap->cached_chunks;
2521
0
    while (p != NULL) {
2522
0
      zend_mm_chunk *q = p->next;
2523
0
      memset(p, 0, sizeof(zend_mm_chunk));
2524
0
      p->next = q;
2525
0
      p = q;
2526
0
    }
2527
2528
    /* reinitialize the first chunk and heap */
2529
0
    p = heap->main_chunk;
2530
0
    p->heap = &p->heap_slot;
2531
0
    p->next = p;
2532
0
    p->prev = p;
2533
0
    p->free_pages = ZEND_MM_PAGES - ZEND_MM_FIRST_PAGE;
2534
0
    p->free_tail = ZEND_MM_FIRST_PAGE;
2535
0
    p->num = 0;
2536
2537
0
#if ZEND_MM_STAT
2538
0
    heap->size = heap->peak = 0;
2539
0
#endif
2540
0
    memset(heap->free_slot, 0, sizeof(heap->free_slot));
2541
0
#if ZEND_MM_STAT || ZEND_MM_LIMIT
2542
0
    heap->real_size = (heap->cached_chunks_count + 1) * ZEND_MM_CHUNK_SIZE;
2543
0
#endif
2544
0
#if ZEND_MM_STAT
2545
0
    heap->real_peak = (heap->cached_chunks_count + 1) * ZEND_MM_CHUNK_SIZE;
2546
0
#endif
2547
0
    heap->chunks_count = 1;
2548
0
    heap->peak_chunks_count = 1;
2549
0
    heap->last_chunks_delete_boundary = 0;
2550
0
    heap->last_chunks_delete_count = 0;
2551
2552
0
    memset(p->free_map, 0, sizeof(p->free_map) + sizeof(p->map));
2553
0
    p->free_map[0] = (1L << ZEND_MM_FIRST_PAGE) - 1;
2554
0
    p->map[0] = ZEND_MM_LRUN(ZEND_MM_FIRST_PAGE);
2555
2556
0
#if ZEND_DEBUG
2557
0
    ZEND_ASSERT(getpid() == heap->pid
2558
0
        && "heap was re-used without calling zend_mm_refresh_key_child() after a fork");
2559
0
#endif
2560
2561
0
    zend_mm_refresh_key(heap);
2562
0
  }
2563
0
}
2564
2565
/**************/
2566
/* PUBLIC API */
2567
/**************/
2568
2569
ZEND_API void* ZEND_FASTCALL _zend_mm_alloc(zend_mm_heap *heap, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
2570
0
{
2571
0
  return zend_mm_alloc_heap(heap, size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
2572
0
}
2573
2574
ZEND_API void ZEND_FASTCALL _zend_mm_free(zend_mm_heap *heap, void *ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
2575
0
{
2576
0
  zend_mm_free_heap(heap, ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
2577
0
}
2578
2579
void* ZEND_FASTCALL _zend_mm_realloc(zend_mm_heap *heap, void *ptr, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
2580
0
{
2581
0
  return zend_mm_realloc_heap(heap, ptr, size, 0, size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
2582
0
}
2583
2584
void* ZEND_FASTCALL _zend_mm_realloc2(zend_mm_heap *heap, void *ptr, size_t size, size_t copy_size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
2585
0
{
2586
0
  return zend_mm_realloc_heap(heap, ptr, size, 1, copy_size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
2587
0
}
2588
2589
ZEND_API size_t ZEND_FASTCALL _zend_mm_block_size(zend_mm_heap *heap, void *ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
2590
0
{
2591
0
#if ZEND_MM_CUSTOM
2592
0
  if (UNEXPECTED(heap->use_custom_heap)) {
2593
0
    if (heap->custom_heap._malloc == tracked_malloc) {
2594
0
      zend_ulong h = ((uintptr_t) ptr) >> ZEND_MM_ALIGNMENT_LOG2;
2595
0
      zval *size_zv = zend_hash_index_find(heap->tracked_allocs, h);
2596
0
      if  (size_zv) {
2597
0
        return Z_LVAL_P(size_zv);
2598
0
      }
2599
0
    } else if (heap->custom_heap._malloc != poison_malloc) {
2600
0
      return 0;
2601
0
    }
2602
0
  }
2603
0
#endif
2604
0
  return zend_mm_size(heap, ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
2605
0
}
2606
2607
/**********************/
2608
/* Allocation Manager */
2609
/**********************/
2610
2611
typedef struct _zend_alloc_globals {
2612
  zend_mm_heap *mm_heap;
2613
} zend_alloc_globals;
2614
2615
#ifdef ZTS
2616
static int alloc_globals_id;
2617
static size_t alloc_globals_offset;
2618
# define AG(v) ZEND_TSRMG_FAST(alloc_globals_offset, zend_alloc_globals *, v)
2619
#else
2620
4.11G
# define AG(v) (alloc_globals.v)
2621
static zend_alloc_globals alloc_globals;
2622
#endif
2623
2624
ZEND_API bool is_zend_mm(void)
2625
4
{
2626
4
#if ZEND_MM_CUSTOM
2627
4
  return !AG(mm_heap)->use_custom_heap;
2628
#else
2629
  return true;
2630
#endif
2631
4
}
2632
2633
ZEND_API bool is_zend_ptr(const void *ptr)
2634
0
{
2635
0
#if ZEND_MM_CUSTOM
2636
0
  if (AG(mm_heap)->use_custom_heap) {
2637
0
    if (AG(mm_heap)->custom_heap._malloc == tracked_malloc) {
2638
0
      zend_ulong h = ((uintptr_t) ptr) >> ZEND_MM_ALIGNMENT_LOG2;
2639
0
      zval *size_zv = zend_hash_index_find(AG(mm_heap)->tracked_allocs, h);
2640
0
      if  (size_zv) {
2641
0
        return 1;
2642
0
      }
2643
0
    }
2644
0
    return 0;
2645
0
  }
2646
0
#endif
2647
2648
0
  if (AG(mm_heap)->main_chunk) {
2649
0
    zend_mm_chunk *chunk = AG(mm_heap)->main_chunk;
2650
2651
0
    do {
2652
0
      if (ptr >= (void*)chunk
2653
0
       && ptr < (void*)((char*)chunk + ZEND_MM_CHUNK_SIZE)) {
2654
0
        return 1;
2655
0
      }
2656
0
      chunk = chunk->next;
2657
0
    } while (chunk != AG(mm_heap)->main_chunk);
2658
0
  }
2659
2660
0
  zend_mm_huge_list *block = AG(mm_heap)->huge_list;
2661
0
  while (block) {
2662
0
    if (ptr >= block->ptr
2663
0
        && ptr < (void*)((char*)block->ptr + block->size)) {
2664
0
      return 1;
2665
0
    }
2666
0
    block = block->next;
2667
0
  }
2668
2669
0
  return 0;
2670
0
}
2671
2672
#if !ZEND_DEBUG && defined(HAVE_BUILTIN_CONSTANT_P)
2673
#undef _emalloc
2674
2675
#if ZEND_MM_CUSTOM
2676
# define ZEND_MM_CUSTOM_ALLOCATOR(size) do { \
2677
    if (UNEXPECTED(AG(mm_heap)->use_custom_heap)) { \
2678
      return AG(mm_heap)->custom_heap._malloc(size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC); \
2679
    } \
2680
  } while (0)
2681
# define ZEND_MM_CUSTOM_DEALLOCATOR(ptr) do { \
2682
    if (UNEXPECTED(AG(mm_heap)->use_custom_heap)) { \
2683
      AG(mm_heap)->custom_heap._free(ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC); \
2684
      return; \
2685
    } \
2686
  } while (0)
2687
#else
2688
# define ZEND_MM_CUSTOM_ALLOCATOR(size)
2689
# define ZEND_MM_CUSTOM_DEALLOCATOR(ptr)
2690
#endif
2691
2692
# define _ZEND_BIN_ALLOCATOR(_num, _size, _elements, _pages, _min_size, y) \
2693
  ZEND_API void* ZEND_FASTCALL _emalloc_ ## _size(void) { \
2694
    ZEND_MM_CUSTOM_ALLOCATOR(_size); \
2695
    if (_size < _min_size) { \
2696
      return _emalloc_ ## _min_size(); \
2697
    } \
2698
    return zend_mm_alloc_small(AG(mm_heap), _num ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC); \
2699
  }
2700
2701
ZEND_MM_BINS_INFO(_ZEND_BIN_ALLOCATOR, ZEND_MM_MIN_USEABLE_BIN_SIZE, y)
2702
2703
ZEND_API void* ZEND_FASTCALL _emalloc_large(size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
2704
{
2705
  ZEND_MM_CUSTOM_ALLOCATOR(size);
2706
  return zend_mm_alloc_large_ex(AG(mm_heap), size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
2707
}
2708
2709
ZEND_API void* ZEND_FASTCALL _emalloc_huge(size_t size)
2710
{
2711
  ZEND_MM_CUSTOM_ALLOCATOR(size);
2712
  return zend_mm_alloc_huge(AG(mm_heap), size);
2713
}
2714
2715
#if ZEND_DEBUG
2716
# define _ZEND_BIN_FREE(_num, _size, _elements, _pages, _min_size, y) \
2717
  ZEND_API void ZEND_FASTCALL _efree_ ## _size(void *ptr) { \
2718
    ZEND_MM_CUSTOM_DEALLOCATOR(ptr); \
2719
    if (_size < _min_size) { \
2720
      _efree_ ## _min_size(ptr); \
2721
      return; \
2722
    } \
2723
    { \
2724
      size_t page_offset = ZEND_MM_ALIGNED_OFFSET(ptr, ZEND_MM_CHUNK_SIZE); \
2725
      zend_mm_chunk *chunk = (zend_mm_chunk*)ZEND_MM_ALIGNED_BASE(ptr, ZEND_MM_CHUNK_SIZE); \
2726
      int page_num = page_offset / ZEND_MM_PAGE_SIZE; \
2727
      ZEND_MM_CHECK(chunk->heap == AG(mm_heap), "zend_mm_heap corrupted"); \
2728
      ZEND_ASSERT(chunk->map[page_num] & ZEND_MM_IS_SRUN); \
2729
      ZEND_ASSERT(ZEND_MM_SRUN_BIN_NUM(chunk->map[page_num]) == _num); \
2730
      zend_mm_free_small(AG(mm_heap), ptr, _num); \
2731
    } \
2732
  }
2733
#else
2734
# define _ZEND_BIN_FREE(_num, _size, _elements, _pages, _min_size, y) \
2735
  ZEND_API void ZEND_FASTCALL _efree_ ## _size(void *ptr) { \
2736
    ZEND_MM_CUSTOM_DEALLOCATOR(ptr); \
2737
    if (_size < _min_size) { \
2738
      _efree_ ## _min_size(ptr); \
2739
      return; \
2740
    } \
2741
    { \
2742
      zend_mm_chunk *chunk = (zend_mm_chunk*)ZEND_MM_ALIGNED_BASE(ptr, ZEND_MM_CHUNK_SIZE); \
2743
      ZEND_MM_CHECK(chunk->heap == AG(mm_heap), "zend_mm_heap corrupted"); \
2744
      zend_mm_free_small(AG(mm_heap), ptr, _num); \
2745
    } \
2746
  }
2747
#endif
2748
2749
ZEND_MM_BINS_INFO(_ZEND_BIN_FREE, ZEND_MM_MIN_USEABLE_BIN_SIZE, y)
2750
2751
ZEND_API void ZEND_FASTCALL _efree_large(void *ptr, size_t size)
2752
{
2753
  ZEND_MM_CUSTOM_DEALLOCATOR(ptr);
2754
  {
2755
    size_t page_offset = ZEND_MM_ALIGNED_OFFSET(ptr, ZEND_MM_CHUNK_SIZE);
2756
    zend_mm_chunk *chunk = (zend_mm_chunk*)ZEND_MM_ALIGNED_BASE(ptr, ZEND_MM_CHUNK_SIZE);
2757
    int page_num = page_offset / ZEND_MM_PAGE_SIZE;
2758
    uint32_t pages_count = ZEND_MM_ALIGNED_SIZE_EX(size, ZEND_MM_PAGE_SIZE) / ZEND_MM_PAGE_SIZE;
2759
2760
    ZEND_MM_CHECK(chunk->heap == AG(mm_heap) && ZEND_MM_ALIGNED_OFFSET(page_offset, ZEND_MM_PAGE_SIZE) == 0, "zend_mm_heap corrupted");
2761
    ZEND_ASSERT(chunk->map[page_num] & ZEND_MM_IS_LRUN);
2762
    ZEND_ASSERT(ZEND_MM_LRUN_PAGES(chunk->map[page_num]) == pages_count);
2763
    zend_mm_free_large(AG(mm_heap), chunk, page_num, pages_count);
2764
  }
2765
}
2766
2767
ZEND_API void ZEND_FASTCALL _efree_huge(void *ptr, size_t size)
2768
{
2769
2770
  ZEND_MM_CUSTOM_DEALLOCATOR(ptr);
2771
  zend_mm_free_huge(AG(mm_heap), ptr);
2772
}
2773
#endif
2774
2775
ZEND_API void* ZEND_FASTCALL _emalloc(size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
2776
1.16G
{
2777
1.16G
#if ZEND_MM_CUSTOM
2778
1.16G
  if (UNEXPECTED(AG(mm_heap)->use_custom_heap)) {
2779
1.16G
    return AG(mm_heap)->custom_heap._malloc(size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC); \
2780
1.16G
  }
2781
0
#endif
2782
0
  return zend_mm_alloc_heap(AG(mm_heap), size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
2783
1.16G
}
2784
2785
ZEND_API void ZEND_FASTCALL _efree(void *ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
2786
1.16G
{
2787
1.16G
#if ZEND_MM_CUSTOM
2788
1.16G
  if (UNEXPECTED(AG(mm_heap)->use_custom_heap)) {
2789
1.16G
    AG(mm_heap)->custom_heap._free(ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
2790
1.16G
    return;
2791
1.16G
  }
2792
0
#endif
2793
0
  zend_mm_free_heap(AG(mm_heap), ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
2794
0
}
2795
2796
ZEND_API void* ZEND_FASTCALL _erealloc(void *ptr, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
2797
23.1M
{
2798
23.1M
#if ZEND_MM_CUSTOM
2799
23.1M
  if (UNEXPECTED(AG(mm_heap)->use_custom_heap)) {
2800
23.1M
    return AG(mm_heap)->custom_heap._realloc(ptr, size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
2801
23.1M
  }
2802
0
#endif
2803
0
  return zend_mm_realloc_heap(AG(mm_heap), ptr, size, 0, size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
2804
23.1M
}
2805
2806
ZEND_API void* ZEND_FASTCALL _erealloc2(void *ptr, size_t size, size_t copy_size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
2807
255k
{
2808
255k
#if ZEND_MM_CUSTOM
2809
255k
  if (UNEXPECTED(AG(mm_heap)->use_custom_heap)) {
2810
255k
    return AG(mm_heap)->custom_heap._realloc(ptr, size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
2811
255k
  }
2812
0
#endif
2813
0
  return zend_mm_realloc_heap(AG(mm_heap), ptr, size, 1, copy_size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
2814
255k
}
2815
2816
ZEND_API size_t ZEND_FASTCALL _zend_mem_block_size(void *ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
2817
0
{
2818
0
  return _zend_mm_block_size(AG(mm_heap), ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
2819
0
}
2820
2821
ZEND_API void* ZEND_FASTCALL _safe_emalloc(size_t nmemb, size_t size, size_t offset ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
2822
1.04M
{
2823
1.04M
  return _emalloc(zend_safe_address_guarded(nmemb, size, offset) ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
2824
1.04M
}
2825
2826
ZEND_API void* ZEND_FASTCALL _safe_malloc(size_t nmemb, size_t size, size_t offset)
2827
0
{
2828
0
  return pemalloc(zend_safe_address_guarded(nmemb, size, offset), 1);
2829
0
}
2830
2831
ZEND_API void* ZEND_FASTCALL _safe_erealloc(void *ptr, size_t nmemb, size_t size, size_t offset ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
2832
1.20M
{
2833
1.20M
  return _erealloc(ptr, zend_safe_address_guarded(nmemb, size, offset) ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
2834
1.20M
}
2835
2836
ZEND_API void* ZEND_FASTCALL _safe_realloc(void *ptr, size_t nmemb, size_t size, size_t offset)
2837
0
{
2838
0
  return perealloc(ptr, zend_safe_address_guarded(nmemb, size, offset), 1);
2839
0
}
2840
2841
ZEND_API void* ZEND_FASTCALL _ecalloc(size_t nmemb, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
2842
203M
{
2843
203M
  void *p;
2844
2845
203M
  size = zend_safe_address_guarded(nmemb, size, 0);
2846
203M
  p = _emalloc(size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
2847
203M
  memset(p, 0, size);
2848
203M
  return p;
2849
203M
}
2850
2851
ZEND_API char* ZEND_FASTCALL _estrdup(const char *s ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
2852
882M
{
2853
882M
  size_t length;
2854
882M
  char *p;
2855
2856
882M
  length = strlen(s);
2857
882M
  if (UNEXPECTED(length + 1 == 0)) {
2858
0
    zend_error_noreturn(E_ERROR, "Possible integer overflow in memory allocation (1 * %zu + 1)", length);
2859
0
  }
2860
882M
  p = (char *) _emalloc(length + 1 ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
2861
882M
  memcpy(p, s, length+1);
2862
882M
  return p;
2863
882M
}
2864
2865
ZEND_API char* ZEND_FASTCALL _estrndup(const char *s, size_t length ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
2866
6.02M
{
2867
6.02M
  char *p;
2868
2869
6.02M
  if (UNEXPECTED(length + 1 == 0)) {
2870
0
    zend_error_noreturn(E_ERROR, "Possible integer overflow in memory allocation (1 * %zu + 1)", length);
2871
0
  }
2872
6.02M
  p = (char *) _emalloc(length + 1 ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
2873
6.02M
  memcpy(p, s, length);
2874
6.02M
  p[length] = 0;
2875
6.02M
  return p;
2876
6.02M
}
2877
2878
static ZEND_COLD ZEND_NORETURN void zend_out_of_memory(void);
2879
2880
ZEND_API char* ZEND_FASTCALL zend_strndup(const char *s, size_t length)
2881
13
{
2882
13
  char *p;
2883
2884
13
  if (UNEXPECTED(length + 1 == 0)) {
2885
0
    zend_error_noreturn(E_ERROR, "Possible integer overflow in memory allocation (1 * %zu + 1)", length);
2886
0
  }
2887
13
  p = (char *) malloc(length + 1);
2888
13
  if (UNEXPECTED(p == NULL)) {
2889
0
    zend_out_of_memory();
2890
0
  }
2891
13
  if (EXPECTED(length)) {
2892
13
    memcpy(p, s, length);
2893
13
  }
2894
13
  p[length] = 0;
2895
13
  return p;
2896
13
}
2897
2898
ZEND_API zend_result zend_set_memory_limit(size_t memory_limit)
2899
198k
{
2900
198k
#if ZEND_MM_LIMIT
2901
198k
  zend_mm_heap *heap = AG(mm_heap);
2902
2903
198k
  if (UNEXPECTED(memory_limit < heap->real_size)) {
2904
39
    if (memory_limit >= heap->real_size - heap->cached_chunks_count * ZEND_MM_CHUNK_SIZE) {
2905
      /* free some cached chunks to fit into new memory limit */
2906
0
      do {
2907
0
        zend_mm_chunk *p = heap->cached_chunks;
2908
0
        heap->cached_chunks = p->next;
2909
0
        zend_mm_chunk_free(heap, p, ZEND_MM_CHUNK_SIZE);
2910
0
        heap->cached_chunks_count--;
2911
0
        heap->real_size -= ZEND_MM_CHUNK_SIZE;
2912
0
      } while (memory_limit < heap->real_size);
2913
0
      return SUCCESS;
2914
0
    }
2915
39
    return FAILURE;
2916
39
  }
2917
198k
  AG(mm_heap)->limit = memory_limit;
2918
198k
#endif
2919
198k
  return SUCCESS;
2920
198k
}
2921
2922
ZEND_API bool zend_alloc_in_memory_limit_error_reporting(void)
2923
2.27M
{
2924
2.27M
#if ZEND_MM_LIMIT
2925
2.27M
  return AG(mm_heap)->overflow;
2926
#else
2927
  return false;
2928
#endif
2929
2.27M
}
2930
2931
ZEND_API size_t zend_memory_usage(bool real_usage)
2932
56
{
2933
56
#if ZEND_MM_STAT
2934
56
  if (real_usage) {
2935
31
    return AG(mm_heap)->real_size;
2936
31
  } else {
2937
25
    size_t usage = AG(mm_heap)->size;
2938
25
    return usage;
2939
25
  }
2940
0
#endif
2941
0
  return 0;
2942
56
}
2943
2944
ZEND_API size_t zend_memory_peak_usage(bool real_usage)
2945
0
{
2946
0
#if ZEND_MM_STAT
2947
0
  if (real_usage) {
2948
0
    return AG(mm_heap)->real_peak;
2949
0
  } else {
2950
0
    return AG(mm_heap)->peak;
2951
0
  }
2952
0
#endif
2953
0
  return 0;
2954
0
}
2955
2956
ZEND_API void zend_memory_reset_peak_usage(void)
2957
0
{
2958
0
#if ZEND_MM_STAT
2959
0
  AG(mm_heap)->real_peak = AG(mm_heap)->real_size;
2960
0
  AG(mm_heap)->peak = AG(mm_heap)->size;
2961
0
#endif
2962
0
}
2963
2964
ZEND_API void shutdown_memory_manager(bool silent, bool full_shutdown)
2965
191k
{
2966
191k
  zend_mm_shutdown(AG(mm_heap), full_shutdown, silent);
2967
191k
}
2968
2969
ZEND_API void refresh_memory_manager(void)
2970
0
{
2971
0
  zend_mm_refresh_key_child(AG(mm_heap));
2972
0
}
2973
2974
static ZEND_COLD ZEND_NORETURN void zend_out_of_memory(void)
2975
0
{
2976
0
  fprintf(stderr, "Out of memory\n");
2977
0
  exit(1);
2978
0
}
2979
2980
#if ZEND_MM_CUSTOM
2981
892M
static zend_always_inline void tracked_add(zend_mm_heap *heap, void *ptr, size_t size) {
2982
892M
  zval size_zv;
2983
892M
  zend_ulong h = ((uintptr_t) ptr) >> ZEND_MM_ALIGNMENT_LOG2;
2984
892M
  ZEND_ASSERT((void *) (uintptr_t) (h << ZEND_MM_ALIGNMENT_LOG2) == ptr);
2985
892M
  ZVAL_LONG(&size_zv, size);
2986
892M
  zend_hash_index_add_new(heap->tracked_allocs, h, &size_zv);
2987
892M
}
2988
2989
881M
static zend_always_inline zval *tracked_get_size_zv(zend_mm_heap *heap, void *ptr) {
2990
881M
  zend_ulong h = ((uintptr_t) ptr) >> ZEND_MM_ALIGNMENT_LOG2;
2991
881M
  zval *size_zv = zend_hash_index_find(heap->tracked_allocs, h);
2992
881M
  ZEND_ASSERT(size_zv && "Trying to free pointer not allocated through ZendMM");
2993
881M
  return size_zv;
2994
881M
}
2995
2996
887M
static zend_always_inline void tracked_check_limit(zend_mm_heap *heap, size_t add_size) {
2997
887M
#if ZEND_MM_STAT
2998
887M
  if (add_size > heap->limit - heap->size && !heap->overflow) {
2999
544
#if ZEND_DEBUG
3000
544
    zend_mm_safe_error(heap,
3001
544
      "Allowed memory size of %zu bytes exhausted at %s:%d (tried to allocate %zu bytes)",
3002
544
      heap->limit, "file", 0, add_size);
3003
#else
3004
    zend_mm_safe_error(heap,
3005
      "Allowed memory size of %zu bytes exhausted (tried to allocate %zu bytes)",
3006
      heap->limit, add_size);
3007
#endif
3008
544
  }
3009
887M
#endif
3010
887M
}
3011
3012
static void *tracked_malloc(size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
3013
870M
{
3014
870M
  zend_mm_heap *heap = AG(mm_heap);
3015
870M
  tracked_check_limit(heap, size);
3016
3017
870M
  void *ptr = malloc(size);
3018
870M
  if (!ptr) {
3019
0
    zend_out_of_memory();
3020
0
  }
3021
3022
870M
  tracked_add(heap, ptr, size);
3023
870M
#if ZEND_MM_STAT
3024
870M
  heap->size += size;
3025
870M
  heap->real_size = heap->size;
3026
870M
#endif
3027
870M
  return ptr;
3028
870M
}
3029
3030
863M
static void tracked_free(void *ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC) {
3031
863M
  if (!ptr) {
3032
365k
    return;
3033
365k
  }
3034
3035
862M
  zend_mm_heap *heap = AG(mm_heap);
3036
862M
  zval *size_zv = tracked_get_size_zv(heap, ptr);
3037
862M
#if ZEND_MM_STAT
3038
862M
  heap->size -= Z_LVAL_P(size_zv);
3039
862M
  heap->real_size = heap->size;
3040
862M
#endif
3041
862M
  zend_hash_del_bucket(heap->tracked_allocs, (Bucket *) size_zv);
3042
862M
  free(ptr);
3043
862M
}
3044
3045
21.8M
static void *tracked_realloc(void *ptr, size_t new_size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC) {
3046
21.8M
  zend_mm_heap *heap = AG(mm_heap);
3047
21.8M
  zval *old_size_zv = NULL;
3048
21.8M
  size_t old_size = 0;
3049
21.8M
  if (ptr) {
3050
18.5M
    old_size_zv = tracked_get_size_zv(heap, ptr);
3051
18.5M
    old_size = Z_LVAL_P(old_size_zv);
3052
18.5M
  }
3053
3054
21.8M
  if (new_size > old_size) {
3055
17.3M
    tracked_check_limit(heap, new_size - old_size);
3056
17.3M
  }
3057
3058
  /* Delete information about old allocation only after checking the memory limit. */
3059
21.8M
  if (old_size_zv) {
3060
18.5M
    zend_hash_del_bucket(heap->tracked_allocs, (Bucket *) old_size_zv);
3061
18.5M
  }
3062
3063
21.8M
  ptr = __zend_realloc(ptr, new_size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
3064
21.8M
  tracked_add(heap, ptr, new_size);
3065
21.8M
#if ZEND_MM_STAT
3066
21.8M
  heap->size += new_size - old_size;
3067
21.8M
  heap->real_size = heap->size;
3068
21.8M
#endif
3069
21.8M
  return ptr;
3070
21.8M
}
3071
3072
18.7k
static void tracked_free_all(zend_mm_heap *heap) {
3073
18.7k
  HashTable *tracked_allocs = heap->tracked_allocs;
3074
18.7k
  zend_ulong h;
3075
221M
  ZEND_HASH_FOREACH_NUM_KEY(tracked_allocs, h) {
3076
221M
    void *ptr = (void *) (uintptr_t) (h << ZEND_MM_ALIGNMENT_LOG2);
3077
221M
    free(ptr);
3078
221M
  } ZEND_HASH_FOREACH_END();
3079
18.7k
}
3080
3081
static void* poison_malloc(size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
3082
0
{
3083
0
  zend_mm_heap *heap = AG(mm_heap);
3084
3085
0
  if (SIZE_MAX - heap->debug.padding * 2 < size) {
3086
0
    zend_mm_panic("Integer overflow in memory allocation");
3087
0
  }
3088
0
  size += heap->debug.padding * 2;
3089
3090
0
  void *ptr = zend_mm_alloc_heap(heap, size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
3091
3092
0
  if (EXPECTED(ptr)) {
3093
0
    if (heap->debug.poison_alloc) {
3094
0
      memset(ptr, heap->debug.poison_alloc_value, size);
3095
0
    }
3096
3097
0
    ptr = (char*)ptr + heap->debug.padding;
3098
0
  }
3099
3100
0
  return ptr;
3101
0
}
3102
3103
static void poison_free(void *ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
3104
0
{
3105
0
  zend_mm_heap *heap = AG(mm_heap);
3106
3107
0
  if (EXPECTED(ptr)) {
3108
    /* zend_mm_shutdown() will try to free the heap when custom handlers
3109
     * are installed */
3110
0
    if (UNEXPECTED(ptr == heap)) {
3111
0
      return;
3112
0
    }
3113
3114
0
    ptr = (char*)ptr - heap->debug.padding;
3115
3116
0
    size_t size = zend_mm_size(heap, ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
3117
3118
0
    if (heap->debug.poison_free) {
3119
0
      memset(ptr, heap->debug.poison_free_value, size);
3120
0
    }
3121
0
  }
3122
3123
0
  zend_mm_free_heap(heap, ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
3124
0
}
3125
3126
static void* poison_realloc(void *ptr, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
3127
0
{
3128
0
  zend_mm_heap *heap = AG(mm_heap);
3129
3130
0
  void *new = poison_malloc(size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
3131
3132
0
  if (ptr) {
3133
      /* Determine the size of the old allocation from the unpadded pointer. */
3134
0
    size_t oldsize = zend_mm_size(heap, (char*)ptr - heap->debug.padding ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
3135
3136
    /* Remove the padding size to determine the size that is available to the user. */
3137
0
    oldsize -= (2 * heap->debug.padding);
3138
3139
0
#if ZEND_DEBUG
3140
0
    oldsize -= sizeof(zend_mm_debug_info);
3141
0
#endif
3142
3143
0
    memcpy(new, ptr, MIN(oldsize, size));
3144
0
    poison_free(ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
3145
0
  }
3146
3147
0
  return new;
3148
0
}
3149
3150
static size_t poison_gc(void)
3151
0
{
3152
0
  zend_mm_heap *heap = AG(mm_heap);
3153
3154
0
  void* (*_malloc)(size_t ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC);
3155
0
  void  (*_free)(void* ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC);
3156
0
  void* (*_realloc)(void*, size_t ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC);
3157
0
  size_t (*_gc)(void);
3158
0
  void   (*_shutdown)(bool, bool);
3159
3160
0
  zend_mm_get_custom_handlers_ex(heap, &_malloc, &_free, &_realloc, &_gc, &_shutdown);
3161
0
  zend_mm_set_custom_handlers_ex(heap, NULL, NULL, NULL, NULL, NULL);
3162
3163
0
  size_t collected = zend_mm_gc(heap);
3164
3165
0
  zend_mm_set_custom_handlers_ex(heap, _malloc, _free, _realloc, _gc, _shutdown);
3166
3167
0
  return collected;
3168
0
}
3169
3170
static void poison_shutdown(bool full, bool silent)
3171
0
{
3172
0
  zend_mm_heap *heap = AG(mm_heap);
3173
3174
0
  void* (*_malloc)(size_t ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC);
3175
0
  void  (*_free)(void* ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC);
3176
0
  void* (*_realloc)(void*, size_t ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC);
3177
0
  size_t (*_gc)(void);
3178
0
  void   (*_shutdown)(bool, bool);
3179
3180
0
  zend_mm_get_custom_handlers_ex(heap, &_malloc, &_free, &_realloc, &_gc, &_shutdown);
3181
0
  zend_mm_set_custom_handlers_ex(heap, NULL, NULL, NULL, NULL, NULL);
3182
3183
0
  if (heap->debug.check_freelists_on_shutdown) {
3184
0
    zend_mm_check_freelists(heap);
3185
0
  }
3186
3187
0
  zend_mm_shutdown(heap, full, silent);
3188
3189
0
  if (!full) {
3190
0
    zend_mm_set_custom_handlers_ex(heap, _malloc, _free, _realloc, _gc, _shutdown);
3191
0
  }
3192
0
}
3193
3194
static void poison_enable(zend_mm_heap *heap, char *parameters)
3195
0
{
3196
0
  char *tmp = parameters;
3197
0
  char *end = tmp + strlen(tmp);
3198
3199
  /* Trim heading/trailing whitespaces */
3200
0
  while (*tmp == ' ' || *tmp == '\t' || *tmp == '\n') {
3201
0
    tmp++;
3202
0
  }
3203
0
  while (end != tmp && (*(end-1) == ' ' || *(end-1) == '\t' || *(end-1) == '\n')) {
3204
0
    end--;
3205
0
  }
3206
3207
0
  if (tmp == end) {
3208
0
    return;
3209
0
  }
3210
3211
0
  while (1) {
3212
0
    char *key = tmp;
3213
3214
0
    tmp = memchr(tmp, '=', end - tmp);
3215
0
    if (!tmp) {
3216
0
      size_t key_len = end - key;
3217
0
      fprintf(stderr, "Unexpected EOF after ZEND_MM_DEBUG parameter '%.*s', expected '='\n",
3218
0
          (int)key_len, key);
3219
0
      return;
3220
0
    }
3221
3222
0
    size_t key_len = tmp - key;
3223
0
    char *value = tmp + 1;
3224
3225
0
    if (key_len == strlen("poison_alloc")
3226
0
        && !memcmp(key, "poison_alloc", key_len)) {
3227
3228
0
      heap->debug.poison_alloc = true;
3229
0
      heap->debug.poison_alloc_value = (uint8_t) ZEND_STRTOUL(value, &tmp, 0);
3230
3231
0
    } else if (key_len == strlen("poison_free")
3232
0
        && !memcmp(key, "poison_free", key_len)) {
3233
3234
0
      heap->debug.poison_free = true;
3235
0
      heap->debug.poison_free_value = (uint8_t) ZEND_STRTOUL(value, &tmp, 0);
3236
3237
0
    } else if (key_len == strlen("padding")
3238
0
        && !memcmp(key, "padding", key_len)) {
3239
3240
0
      uint8_t padding = ZEND_STRTOUL(value, &tmp, 0);
3241
0
      if (ZEND_MM_ALIGNED_SIZE(padding) != padding) {
3242
0
        fprintf(stderr, "ZEND_MM_DEBUG padding must be a multiple of %u, %u given\n",
3243
0
            (unsigned int)ZEND_MM_ALIGNMENT,
3244
0
            (unsigned int)padding);
3245
0
        return;
3246
0
      }
3247
0
      heap->debug.padding = padding;
3248
3249
0
    } else if (key_len == strlen("check_freelists_on_shutdown")
3250
0
        && !memcmp(key, "check_freelists_on_shutdown", key_len)) {
3251
3252
0
      heap->debug.check_freelists_on_shutdown = (bool) ZEND_STRTOUL(value, &tmp, 0);
3253
3254
0
    } else {
3255
0
      fprintf(stderr, "Unknown ZEND_MM_DEBUG parameter: '%.*s'\n",
3256
0
          (int)key_len, key);
3257
0
      return;
3258
0
    }
3259
3260
0
    if (tmp == end) {
3261
0
      break;
3262
0
    }
3263
0
    if (*tmp != ',') {
3264
0
      fprintf(stderr, "Unexpected '%c' after value of ZEND_MM_DEBUG parameter '%.*s', expected ','\n",
3265
0
          *tmp, (int)key_len, key);
3266
0
      return;
3267
0
    }
3268
0
    tmp++;
3269
0
  }
3270
3271
0
  zend_mm_set_custom_handlers_ex(heap, poison_malloc, poison_free,
3272
0
      poison_realloc, poison_gc, poison_shutdown);
3273
0
}
3274
#endif
3275
3276
static void alloc_globals_ctor(zend_alloc_globals *alloc_globals)
3277
16
{
3278
16
  char *tmp;
3279
3280
16
#if ZEND_MM_CUSTOM
3281
16
  tmp = getenv("USE_ZEND_ALLOC");
3282
16
  if (tmp && !ZEND_ATOL(tmp)) {
3283
16
    bool tracked = (tmp = getenv("USE_TRACKED_ALLOC")) && ZEND_ATOL(tmp);
3284
16
    zend_mm_heap *mm_heap = alloc_globals->mm_heap = malloc(sizeof(zend_mm_heap));
3285
16
    memset(mm_heap, 0, sizeof(zend_mm_heap));
3286
16
    mm_heap->use_custom_heap = ZEND_MM_CUSTOM_HEAP_STD;
3287
16
    mm_heap->limit = (size_t)Z_L(-1) >> 1;
3288
16
    mm_heap->overflow = 0;
3289
3290
16
    if (!tracked) {
3291
      /* Use system allocator. */
3292
8
      mm_heap->custom_heap._malloc = __zend_malloc;
3293
8
      mm_heap->custom_heap._free = __zend_free;
3294
8
      mm_heap->custom_heap._realloc = __zend_realloc;
3295
8
    } else {
3296
      /* Use system allocator and track allocations for auto-free. */
3297
8
      mm_heap->custom_heap._malloc = tracked_malloc;
3298
8
      mm_heap->custom_heap._free = tracked_free;
3299
8
      mm_heap->custom_heap._realloc = tracked_realloc;
3300
8
      mm_heap->tracked_allocs = malloc(sizeof(HashTable));
3301
8
      zend_hash_init(mm_heap->tracked_allocs, 1024, NULL, NULL, 1);
3302
8
    }
3303
16
    return;
3304
16
  }
3305
0
#endif
3306
3307
0
  tmp = getenv("USE_ZEND_ALLOC_HUGE_PAGES");
3308
0
  if (tmp && ZEND_ATOL(tmp)) {
3309
0
    zend_mm_use_huge_pages = true;
3310
0
  }
3311
0
  alloc_globals->mm_heap = zend_mm_init();
3312
3313
0
#if ZEND_MM_CUSTOM
3314
0
  ZEND_ASSERT(!alloc_globals->mm_heap->tracked_allocs);
3315
0
  tmp = getenv("ZEND_MM_DEBUG");
3316
0
  if (tmp) {
3317
0
    poison_enable(alloc_globals->mm_heap, tmp);
3318
0
  }
3319
0
#endif
3320
0
}
3321
3322
#ifdef ZTS
3323
static void alloc_globals_dtor(zend_alloc_globals *alloc_globals)
3324
{
3325
  zend_mm_shutdown(alloc_globals->mm_heap, 1, 1);
3326
}
3327
#endif
3328
3329
ZEND_API void start_memory_manager(void)
3330
16
{
3331
16
#ifndef _WIN32
3332
16
#  if defined(_SC_PAGESIZE)
3333
16
  REAL_PAGE_SIZE = sysconf(_SC_PAGESIZE);
3334
#  elif defined(_SC_PAGE_SIZE)
3335
  REAL_PAGE_SIZE = sysconf(_SC_PAGE_SIZE);
3336
#  endif
3337
16
#endif
3338
#ifdef ZTS
3339
  ts_allocate_fast_id(&alloc_globals_id, &alloc_globals_offset, sizeof(zend_alloc_globals), (ts_allocate_ctor) alloc_globals_ctor, (ts_allocate_dtor) alloc_globals_dtor);
3340
#else
3341
16
  alloc_globals_ctor(&alloc_globals);
3342
16
#endif
3343
16
}
3344
3345
ZEND_API zend_mm_heap *zend_mm_set_heap(zend_mm_heap *new_heap)
3346
0
{
3347
0
  zend_mm_heap *old_heap;
3348
3349
0
  old_heap = AG(mm_heap);
3350
0
  AG(mm_heap) = (zend_mm_heap*)new_heap;
3351
0
  return (zend_mm_heap*)old_heap;
3352
0
}
3353
3354
ZEND_API zend_mm_heap *zend_mm_get_heap(void)
3355
0
{
3356
0
  return AG(mm_heap);
3357
0
}
3358
3359
ZEND_API bool zend_mm_is_custom_heap(zend_mm_heap *new_heap)
3360
0
{
3361
0
#if ZEND_MM_CUSTOM
3362
0
  return AG(mm_heap)->use_custom_heap;
3363
#else
3364
  return 0;
3365
#endif
3366
0
}
3367
3368
ZEND_API void zend_mm_set_custom_handlers(zend_mm_heap *heap,
3369
                                          void* (*_malloc)(size_t ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC),
3370
                                          void  (*_free)(void* ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC),
3371
                                          void* (*_realloc)(void*, size_t ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC))
3372
0
{
3373
0
#if ZEND_MM_CUSTOM
3374
0
  zend_mm_set_custom_handlers_ex(heap, _malloc, _free, _realloc, NULL, NULL);
3375
0
#endif
3376
0
}
3377
3378
ZEND_API void zend_mm_set_custom_handlers_ex(zend_mm_heap *heap,
3379
                                          void* (*_malloc)(size_t ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC),
3380
                                          void  (*_free)(void* ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC),
3381
                                          void* (*_realloc)(void*, size_t ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC),
3382
                                          size_t (*_gc)(void),
3383
                                          void   (*_shutdown)(bool, bool))
3384
0
{
3385
0
#if ZEND_MM_CUSTOM
3386
0
  zend_mm_heap *_heap = (zend_mm_heap*)heap;
3387
3388
0
  if (!_malloc && !_free && !_realloc) {
3389
0
    _heap->use_custom_heap = ZEND_MM_CUSTOM_HEAP_NONE;
3390
0
  } else {
3391
0
    _heap->use_custom_heap = ZEND_MM_CUSTOM_HEAP_STD;
3392
0
    _heap->custom_heap._malloc = _malloc;
3393
0
    _heap->custom_heap._free = _free;
3394
0
    _heap->custom_heap._realloc = _realloc;
3395
0
    _heap->custom_heap._gc = _gc;
3396
0
    _heap->custom_heap._shutdown = _shutdown;
3397
0
  }
3398
0
#endif
3399
0
}
3400
3401
ZEND_API void zend_mm_get_custom_handlers(zend_mm_heap *heap,
3402
                                             void* (**_malloc)(size_t ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC),
3403
                                             void  (**_free)(void* ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC),
3404
                                             void* (**_realloc)(void*, size_t ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC))
3405
0
{
3406
0
#if ZEND_MM_CUSTOM
3407
0
  zend_mm_get_custom_handlers_ex(heap, _malloc, _free, _realloc, NULL, NULL);
3408
0
#endif
3409
0
}
3410
3411
ZEND_API void zend_mm_get_custom_handlers_ex(zend_mm_heap *heap,
3412
                                             void* (**_malloc)(size_t ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC),
3413
                                             void  (**_free)(void* ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC),
3414
                                             void* (**_realloc)(void*, size_t ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC),
3415
                                             size_t (**_gc)(void),
3416
                                             void   (**_shutdown)(bool, bool))
3417
0
{
3418
0
#if ZEND_MM_CUSTOM
3419
0
  zend_mm_heap *_heap = (zend_mm_heap*)heap;
3420
3421
0
  if (heap->use_custom_heap) {
3422
0
    *_malloc = _heap->custom_heap._malloc;
3423
0
    *_free = _heap->custom_heap._free;
3424
0
    *_realloc = _heap->custom_heap._realloc;
3425
0
    if (_gc != NULL) {
3426
0
      *_gc = _heap->custom_heap._gc;
3427
0
    }
3428
0
    if (_shutdown != NULL) {
3429
0
      *_shutdown = _heap->custom_heap._shutdown;
3430
0
    }
3431
0
  } else {
3432
0
    *_malloc = NULL;
3433
0
    *_free = NULL;
3434
0
    *_realloc = NULL;
3435
0
    if (_gc != NULL) {
3436
0
      *_gc = NULL;
3437
0
    }
3438
0
    if (_shutdown != NULL) {
3439
0
      *_shutdown = NULL;
3440
0
    }
3441
0
  }
3442
#else
3443
  *_malloc = NULL;
3444
  *_free = NULL;
3445
  *_realloc = NULL;
3446
  *_gc = NULL;
3447
  *_shutdown = NULL;
3448
#endif
3449
0
}
3450
3451
ZEND_API zend_mm_storage *zend_mm_get_storage(zend_mm_heap *heap)
3452
0
{
3453
0
#if ZEND_MM_STORAGE
3454
0
  return heap->storage;
3455
#else
3456
  return NULL;
3457
#endif
3458
0
}
3459
3460
ZEND_API zend_mm_heap *zend_mm_startup(void)
3461
0
{
3462
0
  return zend_mm_init();
3463
0
}
3464
3465
ZEND_API zend_mm_heap *zend_mm_startup_ex(const zend_mm_handlers *handlers, void *data, size_t data_size)
3466
0
{
3467
0
#if ZEND_MM_STORAGE
3468
0
  zend_mm_storage *storage;
3469
0
  zend_mm_storage tmp_storage = {
3470
0
    .handlers = *handlers,
3471
0
    .data = data,
3472
0
  };
3473
0
  zend_mm_chunk *chunk;
3474
0
  zend_mm_heap *heap;
3475
3476
0
  chunk = (zend_mm_chunk*)handlers->chunk_alloc(&tmp_storage, ZEND_MM_CHUNK_SIZE, ZEND_MM_CHUNK_SIZE);
3477
0
  if (UNEXPECTED(chunk == NULL)) {
3478
0
#if ZEND_MM_ERROR
3479
0
    fprintf(stderr, "Can't initialize heap\n");
3480
0
#endif
3481
0
    return NULL;
3482
0
  }
3483
0
  heap = &chunk->heap_slot;
3484
0
  chunk->heap = heap;
3485
0
  chunk->next = chunk;
3486
0
  chunk->prev = chunk;
3487
0
  chunk->free_pages = ZEND_MM_PAGES - ZEND_MM_FIRST_PAGE;
3488
0
  chunk->free_tail = ZEND_MM_FIRST_PAGE;
3489
0
  chunk->num = 0;
3490
0
  chunk->free_map[0] = (Z_L(1) << ZEND_MM_FIRST_PAGE) - 1;
3491
0
  chunk->map[0] = ZEND_MM_LRUN(ZEND_MM_FIRST_PAGE);
3492
0
  heap->main_chunk = chunk;
3493
0
  heap->cached_chunks = NULL;
3494
0
  heap->chunks_count = 1;
3495
0
  heap->peak_chunks_count = 1;
3496
0
  heap->cached_chunks_count = 0;
3497
0
  heap->avg_chunks_count = 1.0;
3498
0
  heap->last_chunks_delete_boundary = 0;
3499
0
  heap->last_chunks_delete_count = 0;
3500
0
#if ZEND_MM_STAT || ZEND_MM_LIMIT
3501
0
  heap->real_size = ZEND_MM_CHUNK_SIZE;
3502
0
#endif
3503
0
#if ZEND_MM_STAT
3504
0
  heap->real_peak = ZEND_MM_CHUNK_SIZE;
3505
0
  heap->size = 0;
3506
0
  heap->peak = 0;
3507
0
#endif
3508
0
  zend_mm_init_key(heap);
3509
0
#if ZEND_MM_LIMIT
3510
0
  heap->limit = (size_t)Z_L(-1) >> 1;
3511
0
  heap->overflow = 0;
3512
0
#endif
3513
0
#if ZEND_MM_CUSTOM
3514
0
  heap->use_custom_heap = 0;
3515
0
#endif
3516
0
  heap->storage = &tmp_storage;
3517
0
  heap->huge_list = NULL;
3518
0
  memset(heap->free_slot, 0, sizeof(heap->free_slot));
3519
0
  storage = _zend_mm_alloc(heap, sizeof(zend_mm_storage) + data_size ZEND_FILE_LINE_CC ZEND_FILE_LINE_CC);
3520
0
  if (!storage) {
3521
0
    handlers->chunk_free(&tmp_storage, chunk, ZEND_MM_CHUNK_SIZE);
3522
0
#if ZEND_MM_ERROR
3523
0
    fprintf(stderr, "Can't initialize heap\n");
3524
0
#endif
3525
0
    return NULL;
3526
0
  }
3527
0
  memcpy(storage, &tmp_storage, sizeof(zend_mm_storage));
3528
0
  if (data) {
3529
0
    storage->data = (void*)(((char*)storage + sizeof(zend_mm_storage)));
3530
0
    memcpy(storage->data, data, data_size);
3531
0
  }
3532
0
  heap->storage = storage;
3533
0
#if ZEND_DEBUG
3534
0
  heap->pid = getpid();
3535
0
#endif
3536
0
  return heap;
3537
#else
3538
  return NULL;
3539
#endif
3540
0
}
3541
3542
ZEND_API void * __zend_malloc(size_t len ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
3543
299M
{
3544
299M
  void *tmp = malloc(len);
3545
299M
  if (EXPECTED(tmp || !len)) {
3546
299M
    return tmp;
3547
299M
  }
3548
0
  zend_out_of_memory();
3549
299M
}
3550
3551
ZEND_API void * __zend_calloc(size_t nmemb, size_t len ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
3552
0
{
3553
0
  void *tmp;
3554
3555
0
  len = zend_safe_address_guarded(nmemb, len, 0);
3556
0
  tmp = __zend_malloc(len ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
3557
0
  memset(tmp, 0, len);
3558
0
  return tmp;
3559
0
}
3560
3561
ZEND_API void * __zend_realloc(void *p, size_t len ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
3562
23.3M
{
3563
23.3M
  p = realloc(p, len);
3564
23.3M
  if (EXPECTED(p || !len)) {
3565
23.3M
    return p;
3566
23.3M
  }
3567
0
  zend_out_of_memory();
3568
23.3M
}
3569
3570
ZEND_API void __zend_free(void *p ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
3571
299M
{
3572
299M
  free(p);
3573
299M
  return;
3574
299M
}
3575
3576
ZEND_API char * __zend_strdup(const char *s)
3577
0
{
3578
0
  char *tmp = strdup(s);
3579
0
  if (EXPECTED(tmp)) {
3580
0
    return tmp;
3581
0
  }
3582
0
  zend_out_of_memory();
3583
0
}
3584
3585
#ifdef ZTS
3586
size_t zend_mm_globals_size(void)
3587
{
3588
  return sizeof(zend_alloc_globals);
3589
}
3590
#endif