Coverage Report

Created: 2025-09-27 06:26

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
/src/php-src/Zend/zend_alloc.c
Line
Count
Source
1
/*
2
   +----------------------------------------------------------------------+
3
   | Zend Engine                                                          |
4
   +----------------------------------------------------------------------+
5
   | Copyright (c) Zend Technologies Ltd. (http://www.zend.com)           |
6
   +----------------------------------------------------------------------+
7
   | This source file is subject to version 2.00 of the Zend license,     |
8
   | that is bundled with this package in the file LICENSE, and is        |
9
   | available through the world-wide-web at the following url:           |
10
   | http://www.zend.com/license/2_00.txt.                                |
11
   | If you did not receive a copy of the Zend license and are unable to  |
12
   | obtain it through the world-wide-web, please send a note to          |
13
   | license@zend.com so we can mail you a copy immediately.              |
14
   +----------------------------------------------------------------------+
15
   | Authors: Andi Gutmans <andi@php.net>                                 |
16
   |          Zeev Suraski <zeev@php.net>                                 |
17
   |          Dmitry Stogov <dmitry@php.net>                              |
18
   +----------------------------------------------------------------------+
19
*/
20
21
/*
22
 * zend_alloc is designed to be a modern CPU cache friendly memory manager
23
 * for PHP. Most ideas are taken from jemalloc and tcmalloc implementations.
24
 *
25
 * All allocations are split into 3 categories:
26
 *
27
 * Huge  - the size is greater than CHUNK size (~2M by default), allocation is
28
 *         performed using mmap(). The result is aligned on 2M boundary.
29
 *
30
 * Large - a number of 4096K pages inside a CHUNK. Large blocks
31
 *         are always aligned on page boundary.
32
 *
33
 * Small - less than 3/4 of page size. Small sizes are rounded up to nearest
34
 *         greater predefined small size (there are 30 predefined sizes:
35
 *         8, 16, 24, 32, ... 3072). Small blocks are allocated from
36
 *         RUNs. Each RUN is allocated as a single or few following pages.
37
 *         Allocation inside RUNs implemented using linked list of free
38
 *         elements. The result is aligned to 8 bytes.
39
 *
40
 * zend_alloc allocates memory from OS by CHUNKs, these CHUNKs and huge memory
41
 * blocks are always aligned to CHUNK boundary. So it's very easy to determine
42
 * the CHUNK owning the certain pointer. Regular CHUNKs reserve a single
43
 * page at start for special purpose. It contains bitset of free pages,
44
 * few bitset for available runs of predefined small sizes, map of pages that
45
 * keeps information about usage of each page in this CHUNK, etc.
46
 *
47
 * zend_alloc provides familiar emalloc/efree/erealloc API, but in addition it
48
 * provides specialized and optimized routines to allocate blocks of predefined
49
 * sizes (e.g. emalloc_2(), emallc_4(), ..., emalloc_large(), etc)
50
 * The library uses C preprocessor tricks that substitute calls to emalloc()
51
 * with more specialized routines when the requested size is known.
52
 */
53
54
#include "zend.h"
55
#include "zend_alloc.h"
56
#include "zend_globals.h"
57
#include "zend_hrtime.h"
58
#include "zend_operators.h"
59
#include "zend_multiply.h"
60
#include "zend_bitset.h"
61
#include "zend_mmap.h"
62
#include "zend_portability.h"
63
#include <signal.h>
64
65
#ifdef HAVE_UNISTD_H
66
# include <unistd.h>
67
#endif
68
69
#ifdef ZEND_WIN32
70
# include <wincrypt.h>
71
# include <process.h>
72
# include "win32/winutil.h"
73
# define getpid _getpid
74
typedef int pid_t;
75
#endif
76
77
#include <stdio.h>
78
#include <stdlib.h>
79
#include <string.h>
80
81
#include <sys/types.h>
82
#include <sys/stat.h>
83
#include <limits.h>
84
#include <fcntl.h>
85
#include <errno.h>
86
#ifdef __SANITIZE_ADDRESS__
87
# include <sanitizer/asan_interface.h>
88
#endif
89
90
#ifndef _WIN32
91
# include <sys/mman.h>
92
# ifndef MAP_ANON
93
#  ifdef MAP_ANONYMOUS
94
#   define MAP_ANON MAP_ANONYMOUS
95
#  endif
96
# endif
97
# ifndef MAP_FAILED
98
#  define MAP_FAILED ((void*)-1)
99
# endif
100
# ifndef MAP_POPULATE
101
#  define MAP_POPULATE 0
102
# endif
103
#  if defined(_SC_PAGESIZE) || (_SC_PAGE_SIZE)
104
16
#    define REAL_PAGE_SIZE _real_page_size
105
static size_t _real_page_size = ZEND_MM_PAGE_SIZE;
106
#  endif
107
# ifdef MAP_ALIGNED_SUPER
108
#    define MAP_HUGETLB MAP_ALIGNED_SUPER
109
# endif
110
#endif
111
112
#ifndef REAL_PAGE_SIZE
113
# define REAL_PAGE_SIZE ZEND_MM_PAGE_SIZE
114
#endif
115
116
/* NetBSD has an mremap() function with a signature that is incompatible with Linux (WTF?),
117
 * so pretend it doesn't exist. */
118
#ifndef __linux__
119
# undef HAVE_MREMAP
120
#endif
121
122
#ifndef __APPLE__
123
0
# define ZEND_MM_FD -1
124
#else
125
# include <mach/vm_statistics.h>
126
/* Mac allows to track anonymous page via vmmap per TAG id.
127
 * user land applications are allowed to take from 240 to 255.
128
 */
129
# define ZEND_MM_FD VM_MAKE_TAG(250U)
130
#endif
131
132
#ifndef ZEND_MM_STAT
133
# define ZEND_MM_STAT 1    /* track current and peak memory usage            */
134
#endif
135
#ifndef ZEND_MM_LIMIT
136
# define ZEND_MM_LIMIT 1   /* support for user-defined memory limit          */
137
#endif
138
#ifndef ZEND_MM_CUSTOM
139
# define ZEND_MM_CUSTOM 1  /* support for custom memory allocator            */
140
                           /* USE_ZEND_ALLOC=0 may switch to system malloc() */
141
#endif
142
#ifndef ZEND_MM_STORAGE
143
# define ZEND_MM_STORAGE 1 /* support for custom memory storage              */
144
#endif
145
#ifndef ZEND_MM_ERROR
146
# define ZEND_MM_ERROR 1   /* report system errors                           */
147
#endif
148
#ifndef ZEND_MM_HEAP_PROTECTION
149
# define ZEND_MM_HEAP_PROTECTION 1 /* protect heap against corruptions       */
150
#endif
151
152
#if ZEND_MM_HEAP_PROTECTION
153
/* Define ZEND_MM_MIN_USEABLE_BIN_SIZE to the size of two pointers */
154
# if UINTPTR_MAX == UINT64_MAX
155
0
#  define ZEND_MM_MIN_USEABLE_BIN_SIZE 16
156
# elif UINTPTR_MAX == UINT32_MAX
157
#  define ZEND_MM_MIN_USEABLE_BIN_SIZE 8
158
# else
159
#  error
160
# endif
161
# if ZEND_MM_MIN_USEABLE_BIN_SIZE < ZEND_MM_MIN_SMALL_SIZE
162
#  error
163
# endif
164
#else /* ZEND_MM_HEAP_PROTECTION */
165
# define ZEND_MM_MIN_USEABLE_BIN_SIZE ZEND_MM_MIN_SMALL_SIZE
166
#endif /* ZEND_MM_HEAP_PROTECTION */
167
168
#ifndef ZEND_MM_CHECK
169
0
# define ZEND_MM_CHECK(condition, message)  do { \
170
0
    if (UNEXPECTED(!(condition))) { \
171
0
      zend_mm_panic(message); \
172
0
    } \
173
0
  } while (0)
174
#endif
175
176
typedef uint32_t   zend_mm_page_info; /* 4-byte integer */
177
typedef zend_ulong zend_mm_bitset;    /* 4-byte or 8-byte integer */
178
179
#define ZEND_MM_ALIGNED_OFFSET(size, alignment) \
180
0
  (((size_t)(size)) & ((alignment) - 1))
181
#define ZEND_MM_ALIGNED_BASE(size, alignment) \
182
0
  (((size_t)(size)) & ~((alignment) - 1))
183
#define ZEND_MM_SIZE_TO_NUM(size, alignment) \
184
0
  (((size_t)(size) + ((alignment) - 1)) / (alignment))
185
186
0
#define ZEND_MM_BITSET_LEN    (sizeof(zend_mm_bitset) * 8)       /* 32 or 64 */
187
#define ZEND_MM_PAGE_MAP_LEN  (ZEND_MM_PAGES / ZEND_MM_BITSET_LEN) /* 16 or 8 */
188
189
typedef zend_mm_bitset zend_mm_page_map[ZEND_MM_PAGE_MAP_LEN];     /* 64B */
190
191
#define ZEND_MM_IS_FRUN                  0x00000000
192
0
#define ZEND_MM_IS_LRUN                  0x40000000
193
0
#define ZEND_MM_IS_SRUN                  0x80000000
194
195
0
#define ZEND_MM_LRUN_PAGES_MASK          0x000003ff
196
0
#define ZEND_MM_LRUN_PAGES_OFFSET        0
197
198
0
#define ZEND_MM_SRUN_BIN_NUM_MASK        0x0000001f
199
0
#define ZEND_MM_SRUN_BIN_NUM_OFFSET      0
200
201
0
#define ZEND_MM_SRUN_FREE_COUNTER_MASK   0x01ff0000
202
0
#define ZEND_MM_SRUN_FREE_COUNTER_OFFSET 16
203
204
0
#define ZEND_MM_NRUN_OFFSET_MASK         0x01ff0000
205
0
#define ZEND_MM_NRUN_OFFSET_OFFSET       16
206
207
0
#define ZEND_MM_LRUN_PAGES(info)         (((info) & ZEND_MM_LRUN_PAGES_MASK) >> ZEND_MM_LRUN_PAGES_OFFSET)
208
0
#define ZEND_MM_SRUN_BIN_NUM(info)       (((info) & ZEND_MM_SRUN_BIN_NUM_MASK) >> ZEND_MM_SRUN_BIN_NUM_OFFSET)
209
0
#define ZEND_MM_SRUN_FREE_COUNTER(info)  (((info) & ZEND_MM_SRUN_FREE_COUNTER_MASK) >> ZEND_MM_SRUN_FREE_COUNTER_OFFSET)
210
0
#define ZEND_MM_NRUN_OFFSET(info)        (((info) & ZEND_MM_NRUN_OFFSET_MASK) >> ZEND_MM_NRUN_OFFSET_OFFSET)
211
212
#define ZEND_MM_FRUN()                   ZEND_MM_IS_FRUN
213
0
#define ZEND_MM_LRUN(count)              (ZEND_MM_IS_LRUN | ((count) << ZEND_MM_LRUN_PAGES_OFFSET))
214
0
#define ZEND_MM_SRUN(bin_num)            (ZEND_MM_IS_SRUN | ((bin_num) << ZEND_MM_SRUN_BIN_NUM_OFFSET))
215
0
#define ZEND_MM_SRUN_EX(bin_num, count)  (ZEND_MM_IS_SRUN | ((bin_num) << ZEND_MM_SRUN_BIN_NUM_OFFSET) | ((count) << ZEND_MM_SRUN_FREE_COUNTER_OFFSET))
216
0
#define ZEND_MM_NRUN(bin_num, offset)    (ZEND_MM_IS_SRUN | ZEND_MM_IS_LRUN | ((bin_num) << ZEND_MM_SRUN_BIN_NUM_OFFSET) | ((offset) << ZEND_MM_NRUN_OFFSET_OFFSET))
217
218
0
#define ZEND_MM_BINS 30
219
220
#if UINTPTR_MAX == UINT64_MAX
221
0
#  define BSWAPPTR(u) ZEND_BYTES_SWAP64(u)
222
#else
223
#  define BSWAPPTR(u) ZEND_BYTES_SWAP32(u)
224
#endif
225
226
typedef struct  _zend_mm_page      zend_mm_page;
227
typedef struct  _zend_mm_bin       zend_mm_bin;
228
typedef struct  _zend_mm_free_slot zend_mm_free_slot;
229
typedef struct  _zend_mm_chunk     zend_mm_chunk;
230
typedef struct  _zend_mm_huge_list zend_mm_huge_list;
231
232
static bool zend_mm_use_huge_pages = false;
233
234
/*
235
 * Memory is retrieved from OS by chunks of fixed size 2MB.
236
 * Inside chunk it's managed by pages of fixed size 4096B.
237
 * So each chunk consists from 512 pages.
238
 * The first page of each chunk is reserved for chunk header.
239
 * It contains service information about all pages.
240
 *
241
 * free_pages - current number of free pages in this chunk
242
 *
243
 * free_tail  - number of continuous free pages at the end of chunk
244
 *
245
 * free_map   - bitset (a bit for each page). The bit is set if the corresponding
246
 *              page is allocated. Allocator for "large sizes" may easily find a
247
 *              free page (or a continuous number of pages) searching for zero
248
 *              bits.
249
 *
250
 * map        - contains service information for each page. (32-bits for each
251
 *              page).
252
 *    usage:
253
 *        (2 bits)
254
 *        FRUN - free page,
255
 *              LRUN - first page of "large" allocation
256
 *              SRUN - first page of a bin used for "small" allocation
257
 *
258
 *    lrun_pages:
259
 *              (10 bits) number of allocated pages
260
 *
261
 *    srun_bin_num:
262
 *              (5 bits) bin number (e.g. 0 for sizes 0-2, 1 for 3-4,
263
 *               2 for 5-8, 3 for 9-16 etc) see zend_alloc_sizes.h
264
 */
265
266
struct _zend_mm_heap {
267
#if ZEND_MM_CUSTOM
268
  int                use_custom_heap;
269
#endif
270
#if ZEND_MM_STORAGE
271
  zend_mm_storage   *storage;
272
#endif
273
#if ZEND_MM_STAT
274
  size_t             size;                    /* current memory usage */
275
  size_t             peak;                    /* peak memory usage */
276
#endif
277
  uintptr_t          shadow_key;              /* free slot shadow ptr xor key */
278
  zend_mm_free_slot *free_slot[ZEND_MM_BINS]; /* free lists for small sizes */
279
#if ZEND_MM_STAT || ZEND_MM_LIMIT
280
  size_t             real_size;               /* current size of allocated pages */
281
#endif
282
#if ZEND_MM_STAT
283
  size_t             real_peak;               /* peak size of allocated pages */
284
#endif
285
#if ZEND_MM_LIMIT
286
  size_t             limit;                   /* memory limit */
287
  int                overflow;                /* memory overflow flag */
288
#endif
289
290
  zend_mm_huge_list *huge_list;               /* list of huge allocated blocks */
291
292
  zend_mm_chunk     *main_chunk;
293
  zend_mm_chunk     *cached_chunks;     /* list of unused chunks */
294
  int                chunks_count;      /* number of allocated chunks */
295
  int                peak_chunks_count;   /* peak number of allocated chunks for current request */
296
  int                cached_chunks_count;   /* number of cached chunks */
297
  double             avg_chunks_count;    /* average number of chunks allocated per request */
298
  int                last_chunks_delete_boundary; /* number of chunks after last deletion */
299
  int                last_chunks_delete_count;    /* number of deletion over the last boundary */
300
#if ZEND_MM_CUSTOM
301
  struct {
302
    void      *(*_malloc)(size_t ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC);
303
    void       (*_free)(void*  ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC);
304
    void      *(*_realloc)(void*, size_t  ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC);
305
    size_t     (*_gc)(void);
306
    void       (*_shutdown)(bool full, bool silent);
307
  } custom_heap;
308
  union {
309
    HashTable *tracked_allocs;
310
    struct {
311
      bool    poison_alloc;
312
      uint8_t poison_alloc_value;
313
      bool    poison_free;
314
      uint8_t poison_free_value;
315
      uint8_t padding;
316
      bool    check_freelists_on_shutdown;
317
    } debug;
318
  };
319
#endif
320
#if ZEND_DEBUG
321
  pid_t pid;
322
#endif
323
  zend_random_bytes_insecure_state rand_state;
324
};
325
326
struct _zend_mm_chunk {
327
  zend_mm_heap      *heap;
328
  zend_mm_chunk     *next;
329
  zend_mm_chunk     *prev;
330
  uint32_t           free_pages;        /* number of free pages */
331
  uint32_t           free_tail;               /* number of free pages at the end of chunk */
332
  uint32_t           num;
333
  char               reserve[64 - (sizeof(void*) * 3 + sizeof(uint32_t) * 3)];
334
  zend_mm_heap       heap_slot;               /* used only in main chunk */
335
  zend_mm_page_map   free_map;                /* 512 bits or 64 bytes */
336
  zend_mm_page_info  map[ZEND_MM_PAGES];      /* 2 KB = 512 * 4 */
337
};
338
339
struct _zend_mm_page {
340
  char               bytes[ZEND_MM_PAGE_SIZE];
341
};
342
343
/*
344
 * bin - is one or few continuous pages (up to 8) used for allocation of
345
 * a particular "small size".
346
 */
347
struct _zend_mm_bin {
348
  char               bytes[ZEND_MM_PAGE_SIZE * 8];
349
};
350
351
struct _zend_mm_free_slot {
352
  zend_mm_free_slot *next_free_slot;
353
};
354
355
struct _zend_mm_huge_list {
356
  void              *ptr;
357
  size_t             size;
358
  zend_mm_huge_list *next;
359
#if ZEND_DEBUG
360
  zend_mm_debug_info dbg;
361
#endif
362
};
363
364
#define ZEND_MM_PAGE_ADDR(chunk, page_num) \
365
0
  ((void*)(((zend_mm_page*)(chunk)) + (page_num)))
366
367
#define _BIN_DATA_SIZE(num, size, elements, pages, x, y) size,
368
static const uint32_t bin_data_size[] = {
369
  ZEND_MM_BINS_INFO(_BIN_DATA_SIZE, x, y)
370
};
371
372
#define _BIN_DATA_ELEMENTS(num, size, elements, pages, x, y) elements,
373
static const uint32_t bin_elements[] = {
374
  ZEND_MM_BINS_INFO(_BIN_DATA_ELEMENTS, x, y)
375
};
376
377
#define _BIN_DATA_PAGES(num, size, elements, pages, x, y) pages,
378
static const uint32_t bin_pages[] = {
379
  ZEND_MM_BINS_INFO(_BIN_DATA_PAGES, x, y)
380
};
381
382
static ZEND_COLD ZEND_NORETURN void zend_mm_panic(const char *message)
383
0
{
384
0
  fprintf(stderr, "%s\n", message);
385
/* See http://support.microsoft.com/kb/190351 */
386
#ifdef ZEND_WIN32
387
  fflush(stderr);
388
#endif
389
0
#if ZEND_DEBUG && defined(HAVE_KILL) && defined(HAVE_GETPID)
390
0
  kill(getpid(), SIGSEGV);
391
0
#endif
392
0
  abort();
393
0
}
394
395
static ZEND_COLD ZEND_NORETURN void zend_mm_safe_error(zend_mm_heap *heap,
396
  const char *format,
397
  size_t limit,
398
#if ZEND_DEBUG
399
  const char *filename,
400
  uint32_t lineno,
401
#endif
402
  size_t size)
403
478
{
404
405
478
  heap->overflow = 1;
406
478
  zend_try {
407
478
    zend_error_noreturn(E_ERROR,
408
478
      format,
409
478
      limit,
410
478
#if ZEND_DEBUG
411
478
      filename,
412
478
      lineno,
413
478
#endif
414
478
      size);
415
478
  } zend_catch {
416
0
  }  zend_end_try();
417
0
  heap->overflow = 0;
418
0
  zend_bailout();
419
0
  exit(1);
420
478
}
421
422
#ifdef _WIN32
423
static void stderr_last_error(char *msg)
424
{
425
  DWORD err = GetLastError();
426
  char *buf = php_win32_error_to_msg(err);
427
428
  if (!buf[0]) {
429
    fprintf(stderr, "\n%s: [0x%08lx]\n", msg, err);
430
  }
431
  else {
432
    fprintf(stderr, "\n%s: [0x%08lx] %s\n", msg, err, buf);
433
  }
434
435
  php_win32_error_msg_free(buf);
436
}
437
#endif
438
439
/*****************/
440
/* OS Allocation */
441
/*****************/
442
443
static void zend_mm_munmap(void *addr, size_t size)
444
0
{
445
#ifdef _WIN32
446
  if (VirtualFree(addr, 0, MEM_RELEASE) == 0) {
447
    /** ERROR_INVALID_ADDRESS is expected when addr is not range start address */
448
    if (GetLastError() != ERROR_INVALID_ADDRESS) {
449
#if ZEND_MM_ERROR
450
      stderr_last_error("VirtualFree() failed");
451
#endif
452
      return;
453
    }
454
    SetLastError(0);
455
456
    MEMORY_BASIC_INFORMATION mbi;
457
    if (VirtualQuery(addr, &mbi, sizeof(mbi)) == 0) {
458
#if ZEND_MM_ERROR
459
      stderr_last_error("VirtualQuery() failed");
460
#endif
461
      return;
462
    }
463
    addr = mbi.AllocationBase;
464
465
    if (VirtualFree(addr, 0, MEM_RELEASE) == 0) {
466
#if ZEND_MM_ERROR
467
      stderr_last_error("VirtualFree() failed");
468
#endif
469
    }
470
  }
471
#else
472
0
  if (munmap(addr, size) != 0) {
473
0
#if ZEND_MM_ERROR
474
0
    fprintf(stderr, "\nmunmap() failed: [%d] %s\n", errno, strerror(errno));
475
0
#endif
476
0
  }
477
0
#endif
478
0
}
479
480
#ifndef HAVE_MREMAP
481
static void *zend_mm_mmap_fixed(void *addr, size_t size)
482
{
483
#ifdef _WIN32
484
  void *ptr = VirtualAlloc(addr, size, MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE);
485
486
  if (ptr == NULL) {
487
    /** ERROR_INVALID_ADDRESS is expected when fixed addr range is not free */
488
    if (GetLastError() != ERROR_INVALID_ADDRESS) {
489
#if ZEND_MM_ERROR
490
      stderr_last_error("VirtualAlloc() fixed failed");
491
#endif
492
    }
493
    SetLastError(0);
494
    return NULL;
495
  }
496
  ZEND_ASSERT(ptr == addr);
497
  return ptr;
498
#else
499
  int flags = MAP_PRIVATE | MAP_ANON;
500
#if defined(MAP_EXCL)
501
  flags |= MAP_FIXED | MAP_EXCL;
502
#elif defined(MAP_TRYFIXED)
503
  flags |= MAP_TRYFIXED;
504
#endif
505
  /* MAP_FIXED leads to discarding of the old mapping, so it can't be used. */
506
  void *ptr = mmap(addr, size, PROT_READ | PROT_WRITE, flags /*| MAP_POPULATE | MAP_HUGETLB*/, ZEND_MM_FD, 0);
507
508
  if (ptr == MAP_FAILED) {
509
#if ZEND_MM_ERROR && !defined(MAP_EXCL) && !defined(MAP_TRYFIXED)
510
    fprintf(stderr, "\nmmap() fixed failed: [%d] %s\n", errno, strerror(errno));
511
#endif
512
    return NULL;
513
  } else if (ptr != addr) {
514
    zend_mm_munmap(ptr, size);
515
    return NULL;
516
  }
517
  return ptr;
518
#endif
519
}
520
#endif
521
522
static void *zend_mm_mmap(size_t size)
523
0
{
524
#ifdef _WIN32
525
  void *ptr = VirtualAlloc(NULL, size, MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE);
526
527
  if (ptr == NULL) {
528
#if ZEND_MM_ERROR
529
    stderr_last_error("VirtualAlloc() failed");
530
#endif
531
    return NULL;
532
  }
533
  return ptr;
534
#else
535
0
  void *ptr;
536
537
0
#if defined(MAP_HUGETLB) || defined(VM_FLAGS_SUPERPAGE_SIZE_2MB)
538
0
  if (zend_mm_use_huge_pages && size == ZEND_MM_CHUNK_SIZE) {
539
0
    int fd = -1;
540
0
    int mflags = MAP_PRIVATE | MAP_ANON;
541
0
#if defined(MAP_HUGETLB)
542
0
    mflags |= MAP_HUGETLB;
543
#else
544
    fd = VM_FLAGS_SUPERPAGE_SIZE_2MB;
545
#endif
546
0
    ptr = mmap(NULL, size, PROT_READ | PROT_WRITE, mflags, fd, 0);
547
0
    if (ptr != MAP_FAILED) {
548
0
      zend_mmap_set_name(ptr, size, "zend_alloc");
549
0
      return ptr;
550
0
    }
551
0
  }
552
0
#endif
553
554
0
  ptr = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, ZEND_MM_FD, 0);
555
556
0
  if (ptr == MAP_FAILED) {
557
0
#if ZEND_MM_ERROR
558
0
    fprintf(stderr, "\nmmap() failed: [%d] %s\n", errno, strerror(errno));
559
0
#endif
560
0
    return NULL;
561
0
  }
562
0
  zend_mmap_set_name(ptr, size, "zend_alloc");
563
0
  return ptr;
564
0
#endif
565
0
}
566
567
/***********/
568
/* Bitmask */
569
/***********/
570
571
/* number of trailing set (1) bits */
572
ZEND_ATTRIBUTE_CONST static zend_always_inline int zend_mm_bitset_nts(zend_mm_bitset bitset)
573
0
{
574
0
#if (defined(__GNUC__) || __has_builtin(__builtin_ctzl)) && SIZEOF_ZEND_LONG == SIZEOF_LONG && defined(PHP_HAVE_BUILTIN_CTZL)
575
0
  return __builtin_ctzl(~bitset);
576
#elif (defined(__GNUC__) || __has_builtin(__builtin_ctzll)) && defined(PHP_HAVE_BUILTIN_CTZLL)
577
  return __builtin_ctzll(~bitset);
578
#elif defined(_WIN32)
579
  unsigned long index;
580
581
#if defined(_WIN64)
582
  if (!BitScanForward64(&index, ~bitset)) {
583
#else
584
  if (!BitScanForward(&index, ~bitset)) {
585
#endif
586
    /* undefined behavior */
587
    return 32;
588
  }
589
590
  return (int)index;
591
#else
592
  int n;
593
594
  if (bitset == (zend_mm_bitset)-1) return ZEND_MM_BITSET_LEN;
595
596
  n = 0;
597
#if SIZEOF_ZEND_LONG == 8
598
  if (sizeof(zend_mm_bitset) == 8) {
599
    if ((bitset & 0xffffffff) == 0xffffffff) {n += 32; bitset = bitset >> Z_UL(32);}
600
  }
601
#endif
602
  if ((bitset & 0x0000ffff) == 0x0000ffff) {n += 16; bitset = bitset >> 16;}
603
  if ((bitset & 0x000000ff) == 0x000000ff) {n +=  8; bitset = bitset >>  8;}
604
  if ((bitset & 0x0000000f) == 0x0000000f) {n +=  4; bitset = bitset >>  4;}
605
  if ((bitset & 0x00000003) == 0x00000003) {n +=  2; bitset = bitset >>  2;}
606
  return n + (bitset & 1);
607
#endif
608
0
}
609
610
static zend_always_inline int zend_mm_bitset_is_set(zend_mm_bitset *bitset, int bit)
611
0
{
612
0
  return ZEND_BIT_TEST(bitset, bit);
613
0
}
614
615
static zend_always_inline void zend_mm_bitset_set_bit(zend_mm_bitset *bitset, int bit)
616
0
{
617
0
  bitset[bit / ZEND_MM_BITSET_LEN] |= (Z_UL(1) << (bit & (ZEND_MM_BITSET_LEN-1)));
618
0
}
619
620
static zend_always_inline void zend_mm_bitset_reset_bit(zend_mm_bitset *bitset, int bit)
621
0
{
622
0
  bitset[bit / ZEND_MM_BITSET_LEN] &= ~(Z_UL(1) << (bit & (ZEND_MM_BITSET_LEN-1)));
623
0
}
624
625
static zend_always_inline void zend_mm_bitset_set_range(zend_mm_bitset *bitset, int start, int len)
626
0
{
627
0
  if (len == 1) {
628
0
    zend_mm_bitset_set_bit(bitset, start);
629
0
  } else {
630
0
    int pos = start / ZEND_MM_BITSET_LEN;
631
0
    int end = (start + len - 1) / ZEND_MM_BITSET_LEN;
632
0
    int bit = start & (ZEND_MM_BITSET_LEN - 1);
633
0
    zend_mm_bitset tmp;
634
635
0
    if (pos != end) {
636
      /* set bits from "bit" to ZEND_MM_BITSET_LEN-1 */
637
0
      tmp = (zend_mm_bitset)-1 << bit;
638
0
      bitset[pos++] |= tmp;
639
0
      while (pos != end) {
640
        /* set all bits */
641
0
        bitset[pos++] = (zend_mm_bitset)-1;
642
0
      }
643
0
      end = (start + len - 1) & (ZEND_MM_BITSET_LEN - 1);
644
      /* set bits from "0" to "end" */
645
0
      tmp = (zend_mm_bitset)-1 >> ((ZEND_MM_BITSET_LEN - 1) - end);
646
0
      bitset[pos] |= tmp;
647
0
    } else {
648
0
      end = (start + len - 1) & (ZEND_MM_BITSET_LEN - 1);
649
      /* set bits from "bit" to "end" */
650
0
      tmp = (zend_mm_bitset)-1 << bit;
651
0
      tmp &= (zend_mm_bitset)-1 >> ((ZEND_MM_BITSET_LEN - 1) - end);
652
0
      bitset[pos] |= tmp;
653
0
    }
654
0
  }
655
0
}
656
657
static zend_always_inline void zend_mm_bitset_reset_range(zend_mm_bitset *bitset, int start, int len)
658
0
{
659
0
  if (len == 1) {
660
0
    zend_mm_bitset_reset_bit(bitset, start);
661
0
  } else {
662
0
    int pos = start / ZEND_MM_BITSET_LEN;
663
0
    int end = (start + len - 1) / ZEND_MM_BITSET_LEN;
664
0
    int bit = start & (ZEND_MM_BITSET_LEN - 1);
665
0
    zend_mm_bitset tmp;
666
667
0
    if (pos != end) {
668
      /* reset bits from "bit" to ZEND_MM_BITSET_LEN-1 */
669
0
      tmp = ~((Z_UL(1) << bit) - 1);
670
0
      bitset[pos++] &= ~tmp;
671
0
      while (pos != end) {
672
        /* set all bits */
673
0
        bitset[pos++] = 0;
674
0
      }
675
0
      end = (start + len - 1) & (ZEND_MM_BITSET_LEN - 1);
676
      /* reset bits from "0" to "end" */
677
0
      tmp = (zend_mm_bitset)-1 >> ((ZEND_MM_BITSET_LEN - 1) - end);
678
0
      bitset[pos] &= ~tmp;
679
0
    } else {
680
0
      end = (start + len - 1) & (ZEND_MM_BITSET_LEN - 1);
681
      /* reset bits from "bit" to "end" */
682
0
      tmp = (zend_mm_bitset)-1 << bit;
683
0
      tmp &= (zend_mm_bitset)-1 >> ((ZEND_MM_BITSET_LEN - 1) - end);
684
0
      bitset[pos] &= ~tmp;
685
0
    }
686
0
  }
687
0
}
688
689
static zend_always_inline int zend_mm_bitset_is_free_range(zend_mm_bitset *bitset, int start, int len)
690
0
{
691
0
  if (len == 1) {
692
0
    return !zend_mm_bitset_is_set(bitset, start);
693
0
  } else {
694
0
    int pos = start / ZEND_MM_BITSET_LEN;
695
0
    int end = (start + len - 1) / ZEND_MM_BITSET_LEN;
696
0
    int bit = start & (ZEND_MM_BITSET_LEN - 1);
697
0
    zend_mm_bitset tmp;
698
699
0
    if (pos != end) {
700
      /* set bits from "bit" to ZEND_MM_BITSET_LEN-1 */
701
0
      tmp = (zend_mm_bitset)-1 << bit;
702
0
      if ((bitset[pos++] & tmp) != 0) {
703
0
        return 0;
704
0
      }
705
0
      while (pos != end) {
706
        /* set all bits */
707
0
        if (bitset[pos++] != 0) {
708
0
          return 0;
709
0
        }
710
0
      }
711
0
      end = (start + len - 1) & (ZEND_MM_BITSET_LEN - 1);
712
      /* set bits from "0" to "end" */
713
0
      tmp = (zend_mm_bitset)-1 >> ((ZEND_MM_BITSET_LEN - 1) - end);
714
0
      return (bitset[pos] & tmp) == 0;
715
0
    } else {
716
0
      end = (start + len - 1) & (ZEND_MM_BITSET_LEN - 1);
717
      /* set bits from "bit" to "end" */
718
0
      tmp = (zend_mm_bitset)-1 << bit;
719
0
      tmp &= (zend_mm_bitset)-1 >> ((ZEND_MM_BITSET_LEN - 1) - end);
720
0
      return (bitset[pos] & tmp) == 0;
721
0
    }
722
0
  }
723
0
}
724
725
/**********/
726
/* Chunks */
727
/**********/
728
729
static zend_always_inline void zend_mm_hugepage(void* ptr, size_t size)
730
0
{
731
0
#if defined(MADV_HUGEPAGE)
732
0
  (void)madvise(ptr, size, MADV_HUGEPAGE);
733
#elif defined(HAVE_MEMCNTL)
734
  struct memcntl_mha m = {.mha_cmd = MHA_MAPSIZE_VA, .mha_pagesize = ZEND_MM_CHUNK_SIZE, .mha_flags = 0};
735
  (void)memcntl(ptr, size, MC_HAT_ADVISE, (char *)&m, 0, 0);
736
#elif !defined(VM_FLAGS_SUPERPAGE_SIZE_2MB) && !defined(MAP_ALIGNED_SUPER)
737
  zend_error_noreturn(E_ERROR, "huge_pages: thp unsupported on this platform");
738
#endif
739
0
}
740
741
static void *zend_mm_chunk_alloc_int(size_t size, size_t alignment)
742
0
{
743
0
  void *ptr = zend_mm_mmap(size);
744
745
0
  if (ptr == NULL) {
746
0
    return NULL;
747
0
  } else if (ZEND_MM_ALIGNED_OFFSET(ptr, alignment) == 0) {
748
0
    if (zend_mm_use_huge_pages) {
749
0
      zend_mm_hugepage(ptr, size);
750
0
    }
751
#ifdef __SANITIZE_ADDRESS__
752
    ASAN_UNPOISON_MEMORY_REGION(ptr, size);
753
#endif
754
0
    return ptr;
755
0
  } else {
756
0
    size_t offset;
757
758
    /* chunk has to be aligned */
759
0
    zend_mm_munmap(ptr, size);
760
0
    ptr = zend_mm_mmap(size + alignment - REAL_PAGE_SIZE);
761
#ifdef _WIN32
762
    offset = ZEND_MM_ALIGNED_OFFSET(ptr, alignment);
763
    if (offset != 0) {
764
      offset = alignment - offset;
765
    }
766
    zend_mm_munmap(ptr, size + alignment - REAL_PAGE_SIZE);
767
    ptr = zend_mm_mmap_fixed((void*)((char*)ptr + offset), size);
768
    if (ptr == NULL) { // fix GH-9650, fixed addr range is not free
769
      ptr = zend_mm_mmap(size + alignment - REAL_PAGE_SIZE);
770
      if (ptr == NULL) {
771
        return NULL;
772
      }
773
      offset = ZEND_MM_ALIGNED_OFFSET(ptr, alignment);
774
      if (offset != 0) {
775
        ptr = (void*)((char*)ptr + alignment - offset);
776
      }
777
    }
778
    return ptr;
779
#else
780
0
    offset = ZEND_MM_ALIGNED_OFFSET(ptr, alignment);
781
0
    if (offset != 0) {
782
0
      offset = alignment - offset;
783
0
      zend_mm_munmap(ptr, offset);
784
0
      ptr = (char*)ptr + offset;
785
0
      alignment -= offset;
786
0
    }
787
0
    if (alignment > REAL_PAGE_SIZE) {
788
0
      zend_mm_munmap((char*)ptr + size, alignment - REAL_PAGE_SIZE);
789
0
    }
790
0
    if (zend_mm_use_huge_pages) {
791
0
      zend_mm_hugepage(ptr, size);
792
0
    }
793
# ifdef __SANITIZE_ADDRESS__
794
    ASAN_UNPOISON_MEMORY_REGION(ptr, size);
795
# endif
796
0
#endif
797
0
    return ptr;
798
0
  }
799
0
}
800
801
static void *zend_mm_chunk_alloc(zend_mm_heap *heap, size_t size, size_t alignment)
802
0
{
803
0
#if ZEND_MM_STORAGE
804
0
  if (UNEXPECTED(heap->storage)) {
805
0
    void *ptr = heap->storage->handlers.chunk_alloc(heap->storage, size, alignment);
806
0
    ZEND_ASSERT(((uintptr_t)((char*)ptr + (alignment-1)) & (alignment-1)) == (uintptr_t)ptr);
807
0
    return ptr;
808
0
  }
809
0
#endif
810
0
  return zend_mm_chunk_alloc_int(size, alignment);
811
0
}
812
813
static void zend_mm_chunk_free(zend_mm_heap *heap, void *addr, size_t size)
814
0
{
815
0
#if ZEND_MM_STORAGE
816
0
  if (UNEXPECTED(heap->storage)) {
817
0
    heap->storage->handlers.chunk_free(heap->storage, addr, size);
818
0
    return;
819
0
  }
820
0
#endif
821
0
  zend_mm_munmap(addr, size);
822
0
}
823
824
static int zend_mm_chunk_truncate(zend_mm_heap *heap, void *addr, size_t old_size, size_t new_size)
825
0
{
826
0
#if ZEND_MM_STORAGE
827
0
  if (UNEXPECTED(heap->storage)) {
828
0
    if (heap->storage->handlers.chunk_truncate) {
829
0
      return heap->storage->handlers.chunk_truncate(heap->storage, addr, old_size, new_size);
830
0
    } else {
831
0
      return 0;
832
0
    }
833
0
  }
834
0
#endif
835
0
#ifndef _WIN32
836
0
  zend_mm_munmap((char*)addr + new_size, old_size - new_size);
837
0
  return 1;
838
#else
839
  return 0;
840
#endif
841
0
}
842
843
static int zend_mm_chunk_extend(zend_mm_heap *heap, void *addr, size_t old_size, size_t new_size)
844
0
{
845
0
#if ZEND_MM_STORAGE
846
0
  if (UNEXPECTED(heap->storage)) {
847
0
    if (heap->storage->handlers.chunk_extend) {
848
0
      return heap->storage->handlers.chunk_extend(heap->storage, addr, old_size, new_size);
849
0
    } else {
850
0
      return 0;
851
0
    }
852
0
  }
853
0
#endif
854
0
#ifdef HAVE_MREMAP
855
  /* We don't use MREMAP_MAYMOVE due to alignment requirements. */
856
0
  void *ptr = mremap(addr, old_size, new_size, 0);
857
0
  if (ptr == MAP_FAILED) {
858
0
    return 0;
859
0
  }
860
  /* Sanity check: The mapping shouldn't have moved. */
861
0
  ZEND_ASSERT(ptr == addr);
862
0
  return 1;
863
#elif !defined(_WIN32)
864
  return (zend_mm_mmap_fixed((char*)addr + old_size, new_size - old_size) != NULL);
865
#else
866
  return 0;
867
#endif
868
0
}
869
870
static zend_always_inline void zend_mm_chunk_init(zend_mm_heap *heap, zend_mm_chunk *chunk)
871
0
{
872
0
  chunk->heap = heap;
873
0
  chunk->next = heap->main_chunk;
874
0
  chunk->prev = heap->main_chunk->prev;
875
0
  chunk->prev->next = chunk;
876
0
  chunk->next->prev = chunk;
877
  /* mark first pages as allocated */
878
0
  chunk->free_pages = ZEND_MM_PAGES - ZEND_MM_FIRST_PAGE;
879
0
  chunk->free_tail = ZEND_MM_FIRST_PAGE;
880
  /* the younger chunks have bigger number */
881
0
  chunk->num = chunk->prev->num + 1;
882
  /* mark first pages as allocated */
883
0
  chunk->free_map[0] = (1L << ZEND_MM_FIRST_PAGE) - 1;
884
0
  chunk->map[0] = ZEND_MM_LRUN(ZEND_MM_FIRST_PAGE);
885
0
}
886
887
/***********************/
888
/* Huge Runs (forward) */
889
/***********************/
890
891
static size_t zend_mm_get_huge_block_size(zend_mm_heap *heap, void *ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC);
892
static void *zend_mm_alloc_huge(zend_mm_heap *heap, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC);
893
static void zend_mm_free_huge(zend_mm_heap *heap, void *ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC);
894
895
#if ZEND_DEBUG
896
static void zend_mm_change_huge_block_size(zend_mm_heap *heap, void *ptr, size_t size, size_t dbg_size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC);
897
#else
898
static void zend_mm_change_huge_block_size(zend_mm_heap *heap, void *ptr, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC);
899
#endif
900
901
/**************/
902
/* Large Runs */
903
/**************/
904
905
#if ZEND_DEBUG
906
static void *zend_mm_alloc_pages(zend_mm_heap *heap, uint32_t pages_count, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
907
#else
908
static void *zend_mm_alloc_pages(zend_mm_heap *heap, uint32_t pages_count ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
909
#endif
910
0
{
911
0
  zend_mm_chunk *chunk = heap->main_chunk;
912
0
  uint32_t page_num, len;
913
0
  int steps = 0;
914
915
0
  while (1) {
916
0
    if (UNEXPECTED(chunk->free_pages < pages_count)) {
917
0
      goto not_found;
918
#if 0
919
    } else if (UNEXPECTED(chunk->free_pages + chunk->free_tail == ZEND_MM_PAGES)) {
920
      if (UNEXPECTED(ZEND_MM_PAGES - chunk->free_tail < pages_count)) {
921
        goto not_found;
922
      } else {
923
        page_num = chunk->free_tail;
924
        goto found;
925
      }
926
    } else if (0) {
927
      /* First-Fit Search */
928
      int free_tail = chunk->free_tail;
929
      zend_mm_bitset *bitset = chunk->free_map;
930
      zend_mm_bitset tmp = *(bitset++);
931
      int i = 0;
932
933
      while (1) {
934
        /* skip allocated blocks */
935
        while (tmp == (zend_mm_bitset)-1) {
936
          i += ZEND_MM_BITSET_LEN;
937
          if (i == ZEND_MM_PAGES) {
938
            goto not_found;
939
          }
940
          tmp = *(bitset++);
941
        }
942
        /* find first 0 bit */
943
        page_num = i + zend_mm_bitset_nts(tmp);
944
        /* reset bits from 0 to "bit" */
945
        tmp &= tmp + 1;
946
        /* skip free blocks */
947
        while (tmp == 0) {
948
          i += ZEND_MM_BITSET_LEN;
949
          len = i - page_num;
950
          if (len >= pages_count) {
951
            goto found;
952
          } else if (i >= free_tail) {
953
            goto not_found;
954
          }
955
          tmp = *(bitset++);
956
        }
957
        /* find first 1 bit */
958
        len = (i + zend_ulong_ntz(tmp)) - page_num;
959
        if (len >= pages_count) {
960
          goto found;
961
        }
962
        /* set bits from 0 to "bit" */
963
        tmp |= tmp - 1;
964
      }
965
#endif
966
0
    } else {
967
      /* Best-Fit Search */
968
0
      int best = -1;
969
0
      uint32_t best_len = ZEND_MM_PAGES;
970
0
      uint32_t free_tail = chunk->free_tail;
971
0
      zend_mm_bitset *bitset = chunk->free_map;
972
0
      zend_mm_bitset tmp = *(bitset++);
973
0
      uint32_t i = 0;
974
975
0
      while (1) {
976
        /* skip allocated blocks */
977
0
        while (tmp == (zend_mm_bitset)-1) {
978
0
          i += ZEND_MM_BITSET_LEN;
979
0
          if (i == ZEND_MM_PAGES) {
980
0
            if (best > 0) {
981
0
              page_num = best;
982
0
              goto found;
983
0
            } else {
984
0
              goto not_found;
985
0
            }
986
0
          }
987
0
          tmp = *(bitset++);
988
0
        }
989
        /* find first 0 bit */
990
0
        page_num = i + zend_mm_bitset_nts(tmp);
991
        /* reset bits from 0 to "bit" */
992
0
        tmp &= tmp + 1;
993
        /* skip free blocks */
994
0
        while (tmp == 0) {
995
0
          i += ZEND_MM_BITSET_LEN;
996
0
          if (i >= free_tail || i == ZEND_MM_PAGES) {
997
0
            len = ZEND_MM_PAGES - page_num;
998
0
            if (len >= pages_count && len < best_len) {
999
0
              chunk->free_tail = page_num + pages_count;
1000
0
              goto found;
1001
0
            } else {
1002
              /* set accurate value */
1003
0
              chunk->free_tail = page_num;
1004
0
              if (best > 0) {
1005
0
                page_num = best;
1006
0
                goto found;
1007
0
              } else {
1008
0
                goto not_found;
1009
0
              }
1010
0
            }
1011
0
          }
1012
0
          tmp = *(bitset++);
1013
0
        }
1014
        /* find first 1 bit */
1015
0
        len = i + zend_ulong_ntz(tmp) - page_num;
1016
0
        if (len >= pages_count) {
1017
0
          if (len == pages_count) {
1018
0
            goto found;
1019
0
          } else if (len < best_len) {
1020
0
            best_len = len;
1021
0
            best = page_num;
1022
0
          }
1023
0
        }
1024
        /* set bits from 0 to "bit" */
1025
0
        tmp |= tmp - 1;
1026
0
      }
1027
0
    }
1028
1029
0
not_found:
1030
0
    if (chunk->next == heap->main_chunk) {
1031
0
get_chunk:
1032
0
      if (heap->cached_chunks) {
1033
0
        heap->cached_chunks_count--;
1034
0
        chunk = heap->cached_chunks;
1035
0
        heap->cached_chunks = chunk->next;
1036
0
      } else {
1037
0
#if ZEND_MM_LIMIT
1038
0
        if (UNEXPECTED(ZEND_MM_CHUNK_SIZE > heap->limit - heap->real_size)) {
1039
0
          if (zend_mm_gc(heap)) {
1040
0
            goto get_chunk;
1041
0
          } else if (heap->overflow == 0) {
1042
0
#if ZEND_DEBUG
1043
0
            zend_mm_safe_error(heap, "Allowed memory size of %zu bytes exhausted at %s:%d (tried to allocate %zu bytes)", heap->limit, __zend_filename, __zend_lineno, size);
1044
#else
1045
            zend_mm_safe_error(heap, "Allowed memory size of %zu bytes exhausted (tried to allocate %zu bytes)", heap->limit, ZEND_MM_PAGE_SIZE * pages_count);
1046
#endif
1047
0
            return NULL;
1048
0
          }
1049
0
        }
1050
0
#endif
1051
0
        chunk = (zend_mm_chunk*)zend_mm_chunk_alloc(heap, ZEND_MM_CHUNK_SIZE, ZEND_MM_CHUNK_SIZE);
1052
0
        if (UNEXPECTED(chunk == NULL)) {
1053
          /* insufficient memory */
1054
0
          if (zend_mm_gc(heap) &&
1055
0
              (chunk = (zend_mm_chunk*)zend_mm_chunk_alloc(heap, ZEND_MM_CHUNK_SIZE, ZEND_MM_CHUNK_SIZE)) != NULL) {
1056
            /* pass */
1057
0
          } else {
1058
#if !ZEND_MM_LIMIT
1059
            zend_mm_safe_error(heap, "Out of memory");
1060
#elif ZEND_DEBUG
1061
            zend_mm_safe_error(heap, "Out of memory (allocated %zu bytes) at %s:%d (tried to allocate %zu bytes)", heap->real_size, __zend_filename, __zend_lineno, size);
1062
#else
1063
            zend_mm_safe_error(heap, "Out of memory (allocated %zu bytes) (tried to allocate %zu bytes)", heap->real_size, ZEND_MM_PAGE_SIZE * pages_count);
1064
#endif
1065
0
            return NULL;
1066
0
          }
1067
0
        }
1068
0
#if ZEND_MM_STAT
1069
0
        do {
1070
0
          size_t size = heap->real_size + ZEND_MM_CHUNK_SIZE;
1071
0
          size_t peak = MAX(heap->real_peak, size);
1072
0
          heap->real_size = size;
1073
0
          heap->real_peak = peak;
1074
0
        } while (0);
1075
#elif ZEND_MM_LIMIT
1076
        heap->real_size += ZEND_MM_CHUNK_SIZE;
1077
1078
#endif
1079
0
      }
1080
0
      heap->chunks_count++;
1081
0
      if (heap->chunks_count > heap->peak_chunks_count) {
1082
0
        heap->peak_chunks_count = heap->chunks_count;
1083
0
      }
1084
0
      zend_mm_chunk_init(heap, chunk);
1085
0
      page_num = ZEND_MM_FIRST_PAGE;
1086
0
      len = ZEND_MM_PAGES - ZEND_MM_FIRST_PAGE;
1087
0
      goto found;
1088
0
    } else {
1089
0
      chunk = chunk->next;
1090
0
      steps++;
1091
0
    }
1092
0
  }
1093
1094
0
found:
1095
0
  if (steps > 2 && pages_count < 8) {
1096
0
    ZEND_MM_CHECK(chunk->next->prev == chunk, "zend_mm_heap corrupted");
1097
0
    ZEND_MM_CHECK(chunk->prev->next == chunk, "zend_mm_heap corrupted");
1098
1099
    /* move chunk into the head of the linked-list */
1100
0
    chunk->prev->next = chunk->next;
1101
0
    chunk->next->prev = chunk->prev;
1102
0
    chunk->next = heap->main_chunk->next;
1103
0
    chunk->prev = heap->main_chunk;
1104
0
    chunk->prev->next = chunk;
1105
0
    chunk->next->prev = chunk;
1106
0
  }
1107
  /* mark run as allocated */
1108
0
  chunk->free_pages -= pages_count;
1109
0
  zend_mm_bitset_set_range(chunk->free_map, page_num, pages_count);
1110
0
  chunk->map[page_num] = ZEND_MM_LRUN(pages_count);
1111
0
  if (page_num == chunk->free_tail) {
1112
0
    chunk->free_tail = page_num + pages_count;
1113
0
  }
1114
0
  return ZEND_MM_PAGE_ADDR(chunk, page_num);
1115
0
}
1116
1117
static zend_always_inline void *zend_mm_alloc_large_ex(zend_mm_heap *heap, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
1118
0
{
1119
0
  int pages_count = (int)ZEND_MM_SIZE_TO_NUM(size, ZEND_MM_PAGE_SIZE);
1120
0
#if ZEND_DEBUG
1121
0
  void *ptr = zend_mm_alloc_pages(heap, pages_count, size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1122
#else
1123
  void *ptr = zend_mm_alloc_pages(heap, pages_count ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1124
#endif
1125
0
#if ZEND_MM_STAT
1126
0
  do {
1127
0
    size_t size = heap->size + pages_count * ZEND_MM_PAGE_SIZE;
1128
0
    size_t peak = MAX(heap->peak, size);
1129
0
    heap->size = size;
1130
0
    heap->peak = peak;
1131
0
  } while (0);
1132
0
#endif
1133
0
  return ptr;
1134
0
}
1135
1136
static zend_never_inline void *zend_mm_alloc_large(zend_mm_heap *heap, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
1137
0
{
1138
0
  return zend_mm_alloc_large_ex(heap, size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1139
0
}
1140
1141
static zend_always_inline void zend_mm_delete_chunk(zend_mm_heap *heap, zend_mm_chunk *chunk)
1142
0
{
1143
0
  ZEND_MM_CHECK(chunk->next->prev == chunk, "zend_mm_heap corrupted");
1144
0
  ZEND_MM_CHECK(chunk->prev->next == chunk, "zend_mm_heap corrupted");
1145
1146
0
  chunk->next->prev = chunk->prev;
1147
0
  chunk->prev->next = chunk->next;
1148
0
  heap->chunks_count--;
1149
0
  if (heap->chunks_count + heap->cached_chunks_count < heap->avg_chunks_count + 0.1
1150
0
   || (heap->chunks_count == heap->last_chunks_delete_boundary
1151
0
    && heap->last_chunks_delete_count >= 4)) {
1152
    /* delay deletion */
1153
0
    heap->cached_chunks_count++;
1154
0
    chunk->next = heap->cached_chunks;
1155
0
    heap->cached_chunks = chunk;
1156
0
  } else {
1157
0
#if ZEND_MM_STAT || ZEND_MM_LIMIT
1158
0
    heap->real_size -= ZEND_MM_CHUNK_SIZE;
1159
0
#endif
1160
0
    if (!heap->cached_chunks) {
1161
0
      if (heap->chunks_count != heap->last_chunks_delete_boundary) {
1162
0
        heap->last_chunks_delete_boundary = heap->chunks_count;
1163
0
        heap->last_chunks_delete_count = 0;
1164
0
      } else {
1165
0
        heap->last_chunks_delete_count++;
1166
0
      }
1167
0
    }
1168
0
    if (!heap->cached_chunks || chunk->num > heap->cached_chunks->num) {
1169
0
      zend_mm_chunk_free(heap, chunk, ZEND_MM_CHUNK_SIZE);
1170
0
    } else {
1171
//TODO: select the best chunk to delete???
1172
0
      chunk->next = heap->cached_chunks->next;
1173
0
      zend_mm_chunk_free(heap, heap->cached_chunks, ZEND_MM_CHUNK_SIZE);
1174
0
      heap->cached_chunks = chunk;
1175
0
    }
1176
0
  }
1177
0
}
1178
1179
static zend_always_inline void zend_mm_free_pages_ex(zend_mm_heap *heap, zend_mm_chunk *chunk, uint32_t page_num, uint32_t pages_count, int free_chunk)
1180
0
{
1181
0
  chunk->free_pages += pages_count;
1182
0
  zend_mm_bitset_reset_range(chunk->free_map, page_num, pages_count);
1183
0
  chunk->map[page_num] = 0;
1184
0
  if (chunk->free_tail == page_num + pages_count) {
1185
    /* this setting may be not accurate */
1186
0
    chunk->free_tail = page_num;
1187
0
  }
1188
0
  if (free_chunk && chunk != heap->main_chunk && chunk->free_pages == ZEND_MM_PAGES - ZEND_MM_FIRST_PAGE) {
1189
0
    zend_mm_delete_chunk(heap, chunk);
1190
0
  }
1191
0
}
1192
1193
static zend_never_inline void zend_mm_free_pages(zend_mm_heap *heap, zend_mm_chunk *chunk, int page_num, int pages_count)
1194
0
{
1195
0
  zend_mm_free_pages_ex(heap, chunk, page_num, pages_count, 1);
1196
0
}
1197
1198
static zend_always_inline void zend_mm_free_large(zend_mm_heap *heap, zend_mm_chunk *chunk, int page_num, int pages_count)
1199
0
{
1200
0
#if ZEND_MM_STAT
1201
0
  heap->size -= pages_count * ZEND_MM_PAGE_SIZE;
1202
0
#endif
1203
0
  zend_mm_free_pages(heap, chunk, page_num, pages_count);
1204
0
}
1205
1206
/**************/
1207
/* Small Runs */
1208
/**************/
1209
1210
/* higher set bit number (0->N/A, 1->1, 2->2, 4->3, 8->4, 127->7, 128->8 etc) */
1211
static zend_always_inline int zend_mm_small_size_to_bit(int size)
1212
0
{
1213
0
#if (defined(__GNUC__) || __has_builtin(__builtin_clz))  && defined(PHP_HAVE_BUILTIN_CLZ)
1214
0
  return (__builtin_clz(size) ^ 0x1f) + 1;
1215
#elif defined(_WIN32)
1216
  unsigned long index;
1217
1218
  if (!BitScanReverse(&index, (unsigned long)size)) {
1219
    /* undefined behavior */
1220
    return 64;
1221
  }
1222
1223
  return (((31 - (int)index) ^ 0x1f) + 1);
1224
#else
1225
  int n = 16;
1226
  if (size <= 0x00ff) {n -= 8; size = size << 8;}
1227
  if (size <= 0x0fff) {n -= 4; size = size << 4;}
1228
  if (size <= 0x3fff) {n -= 2; size = size << 2;}
1229
  if (size <= 0x7fff) {n -= 1;}
1230
  return n;
1231
#endif
1232
0
}
1233
1234
#ifndef MAX
1235
# define MAX(a, b) (((a) > (b)) ? (a) : (b))
1236
#endif
1237
1238
#ifndef MIN
1239
# define MIN(a, b) (((a) < (b)) ? (a) : (b))
1240
#endif
1241
1242
static zend_always_inline int zend_mm_small_size_to_bin(size_t size)
1243
0
{
1244
#if 0
1245
  int n;
1246
                            /*0,  1,  2,  3,  4,  5,  6,  7,  8,  9  10, 11, 12*/
1247
  static const int f1[] = { 3,  3,  3,  3,  3,  3,  3,  4,  5,  6,  7,  8,  9};
1248
  static const int f2[] = { 0,  0,  0,  0,  0,  0,  0,  4,  8, 12, 16, 20, 24};
1249
1250
  if (UNEXPECTED(size <= 2)) return 0;
1251
  n = zend_mm_small_size_to_bit(size - 1);
1252
  return ((size-1) >> f1[n]) + f2[n];
1253
#else
1254
0
  unsigned int t1, t2;
1255
1256
0
  if (size <= 64) {
1257
    /* we need to support size == 0 ... */
1258
0
    return (size - !!size) >> 3;
1259
0
  } else {
1260
0
    t1 = size - 1;
1261
0
    t2 = zend_mm_small_size_to_bit(t1) - 3;
1262
0
    t1 = t1 >> t2;
1263
0
    t2 = t2 - 3;
1264
0
    t2 = t2 << 2;
1265
0
    return (int)(t1 + t2);
1266
0
  }
1267
0
#endif
1268
0
}
1269
1270
0
#define ZEND_MM_SMALL_SIZE_TO_BIN(size)  zend_mm_small_size_to_bin(size)
1271
1272
#if ZEND_MM_HEAP_PROTECTION
1273
/* We keep track of free slots by organizing them in a linked list, with the
1274
 * first word of every free slot being a pointer to the next one.
1275
 *
1276
 * In order to frustrate corruptions, we check the consistency of these pointers
1277
 * before dereference by comparing them with a shadow.
1278
 *
1279
 * The shadow is a copy of the pointer, stored at the end of the slot. It is
1280
 * XOR'ed with a random key, and converted to big-endian so that smaller
1281
 * corruptions affect the most significant bytes, which has a high chance of
1282
 * resulting in an invalid address instead of pointing to an adjacent slot.
1283
 */
1284
1285
#define ZEND_MM_FREE_SLOT_PTR_SHADOW(free_slot, bin_num) \
1286
0
  *((zend_mm_free_slot**)((char*)(free_slot) + bin_data_size[(bin_num)] - sizeof(zend_mm_free_slot*)))
1287
1288
static zend_always_inline zend_mm_free_slot* zend_mm_encode_free_slot(const zend_mm_heap *heap, const zend_mm_free_slot *slot)
1289
0
{
1290
#ifdef WORDS_BIGENDIAN
1291
  return (zend_mm_free_slot*)(((uintptr_t)slot) ^ heap->shadow_key);
1292
#else
1293
0
  return (zend_mm_free_slot*)(BSWAPPTR((uintptr_t)slot) ^ heap->shadow_key);
1294
0
#endif
1295
0
}
1296
1297
static zend_always_inline zend_mm_free_slot* zend_mm_decode_free_slot_key(uintptr_t shadow_key, zend_mm_free_slot *slot)
1298
0
{
1299
#ifdef WORDS_BIGENDIAN
1300
  return (zend_mm_free_slot*)((uintptr_t)slot ^ shadow_key);
1301
#else
1302
0
  return (zend_mm_free_slot*)(BSWAPPTR((uintptr_t)slot ^ shadow_key));
1303
0
#endif
1304
0
}
1305
1306
static zend_always_inline zend_mm_free_slot* zend_mm_decode_free_slot(zend_mm_heap *heap, zend_mm_free_slot *slot)
1307
0
{
1308
0
  return zend_mm_decode_free_slot_key(heap->shadow_key, slot);
1309
0
}
1310
1311
static zend_always_inline void zend_mm_set_next_free_slot(zend_mm_heap *heap, uint32_t bin_num, zend_mm_free_slot *slot, zend_mm_free_slot *next)
1312
0
{
1313
0
  ZEND_ASSERT(bin_data_size[bin_num] >= ZEND_MM_MIN_USEABLE_BIN_SIZE);
1314
1315
0
  slot->next_free_slot = next;
1316
0
  ZEND_MM_FREE_SLOT_PTR_SHADOW(slot, bin_num) = zend_mm_encode_free_slot(heap, next);
1317
0
}
1318
1319
static zend_always_inline zend_mm_free_slot *zend_mm_get_next_free_slot(zend_mm_heap *heap, uint32_t bin_num, zend_mm_free_slot* slot)
1320
0
{
1321
0
  zend_mm_free_slot *next = slot->next_free_slot;
1322
0
  if (EXPECTED(next != NULL)) {
1323
0
    zend_mm_free_slot *shadow = ZEND_MM_FREE_SLOT_PTR_SHADOW(slot, bin_num);
1324
0
    if (UNEXPECTED(next != zend_mm_decode_free_slot(heap, shadow))) {
1325
0
      zend_mm_panic("zend_mm_heap corrupted");
1326
0
    }
1327
0
  }
1328
0
  return (zend_mm_free_slot*)next;
1329
0
}
1330
1331
#else /* ZEND_MM_HEAP_PROTECTION */
1332
# define zend_mm_set_next_free_slot(heap, bin_num, slot, next) do { \
1333
    (slot)->next_free_slot = (next);                            \
1334
  } while (0)
1335
# define zend_mm_get_next_free_slot(heap, bin_num, slot) (slot)->next_free_slot
1336
#endif /* ZEND_MM_HEAP_PROTECTION */
1337
1338
static zend_never_inline void *zend_mm_alloc_small_slow(zend_mm_heap *heap, uint32_t bin_num ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
1339
0
{
1340
0
  zend_mm_chunk *chunk;
1341
0
  int page_num;
1342
0
  zend_mm_bin *bin;
1343
0
  zend_mm_free_slot *p, *end;
1344
1345
0
#if ZEND_DEBUG
1346
0
  bin = (zend_mm_bin*)zend_mm_alloc_pages(heap, bin_pages[bin_num], bin_data_size[bin_num] ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1347
#else
1348
  bin = (zend_mm_bin*)zend_mm_alloc_pages(heap, bin_pages[bin_num] ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1349
#endif
1350
0
  if (UNEXPECTED(bin == NULL)) {
1351
    /* insufficient memory */
1352
0
    return NULL;
1353
0
  }
1354
1355
0
  chunk = (zend_mm_chunk*)ZEND_MM_ALIGNED_BASE(bin, ZEND_MM_CHUNK_SIZE);
1356
0
  page_num = ZEND_MM_ALIGNED_OFFSET(bin, ZEND_MM_CHUNK_SIZE) / ZEND_MM_PAGE_SIZE;
1357
0
  chunk->map[page_num] = ZEND_MM_SRUN(bin_num);
1358
0
  if (bin_pages[bin_num] > 1) {
1359
0
    uint32_t i = 1;
1360
1361
0
    do {
1362
0
      chunk->map[page_num+i] = ZEND_MM_NRUN(bin_num, i);
1363
0
      i++;
1364
0
    } while (i < bin_pages[bin_num]);
1365
0
  }
1366
1367
  /* create a linked list of elements from 1 to last */
1368
0
  end = (zend_mm_free_slot*)((char*)bin + (bin_data_size[bin_num] * (bin_elements[bin_num] - 1)));
1369
0
  heap->free_slot[bin_num] = p = (zend_mm_free_slot*)((char*)bin + bin_data_size[bin_num]);
1370
0
  do {
1371
0
    zend_mm_set_next_free_slot(heap, bin_num, p, (zend_mm_free_slot*)((char*)p + bin_data_size[bin_num]));
1372
0
#if ZEND_DEBUG
1373
0
    do {
1374
0
      zend_mm_debug_info *dbg = (zend_mm_debug_info*)((char*)p + bin_data_size[bin_num] - ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info)));
1375
0
      dbg->size = 0;
1376
0
    } while (0);
1377
0
#endif
1378
0
    p = (zend_mm_free_slot*)((char*)p + bin_data_size[bin_num]);
1379
0
  } while (p != end);
1380
1381
  /* terminate list using NULL */
1382
0
  p->next_free_slot = NULL;
1383
0
#if ZEND_DEBUG
1384
0
    do {
1385
0
      zend_mm_debug_info *dbg = (zend_mm_debug_info*)((char*)p + bin_data_size[bin_num] - ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info)));
1386
0
      dbg->size = 0;
1387
0
    } while (0);
1388
0
#endif
1389
1390
  /* return first element */
1391
0
  return bin;
1392
0
}
1393
1394
static zend_always_inline void *zend_mm_alloc_small(zend_mm_heap *heap, int bin_num ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
1395
0
{
1396
0
  ZEND_ASSERT(bin_data_size[bin_num] >= ZEND_MM_MIN_USEABLE_BIN_SIZE);
1397
1398
0
#if ZEND_MM_STAT
1399
0
  do {
1400
0
    size_t size = heap->size + bin_data_size[bin_num];
1401
0
    size_t peak = MAX(heap->peak, size);
1402
0
    heap->size = size;
1403
0
    heap->peak = peak;
1404
0
  } while (0);
1405
0
#endif
1406
1407
0
  if (EXPECTED(heap->free_slot[bin_num] != NULL)) {
1408
0
    zend_mm_free_slot *p = heap->free_slot[bin_num];
1409
0
    heap->free_slot[bin_num] = zend_mm_get_next_free_slot(heap, bin_num, p);
1410
0
    return p;
1411
0
  } else {
1412
0
    return zend_mm_alloc_small_slow(heap, bin_num ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1413
0
  }
1414
0
}
1415
1416
static zend_always_inline void zend_mm_free_small(zend_mm_heap *heap, void *ptr, int bin_num)
1417
0
{
1418
0
  ZEND_ASSERT(bin_data_size[bin_num] >= ZEND_MM_MIN_USEABLE_BIN_SIZE);
1419
1420
0
  zend_mm_free_slot *p;
1421
1422
0
#if ZEND_MM_STAT
1423
0
  heap->size -= bin_data_size[bin_num];
1424
0
#endif
1425
1426
0
#if ZEND_DEBUG
1427
0
  do {
1428
0
    zend_mm_debug_info *dbg = (zend_mm_debug_info*)((char*)ptr + bin_data_size[bin_num] - ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info)));
1429
0
    dbg->size = 0;
1430
0
  } while (0);
1431
0
#endif
1432
1433
0
  p = (zend_mm_free_slot*)ptr;
1434
0
  zend_mm_set_next_free_slot(heap, bin_num, p, heap->free_slot[bin_num]);
1435
0
  heap->free_slot[bin_num] = p;
1436
0
}
1437
1438
/********/
1439
/* Heap */
1440
/********/
1441
1442
#if ZEND_DEBUG
1443
static zend_always_inline zend_mm_debug_info *zend_mm_get_debug_info(zend_mm_heap *heap, void *ptr)
1444
0
{
1445
0
  size_t page_offset = ZEND_MM_ALIGNED_OFFSET(ptr, ZEND_MM_CHUNK_SIZE);
1446
0
  zend_mm_chunk *chunk;
1447
0
  int page_num;
1448
0
  zend_mm_page_info info;
1449
1450
0
  ZEND_MM_CHECK(page_offset != 0, "zend_mm_heap corrupted");
1451
0
  chunk = (zend_mm_chunk*)ZEND_MM_ALIGNED_BASE(ptr, ZEND_MM_CHUNK_SIZE);
1452
0
  page_num = (int)(page_offset / ZEND_MM_PAGE_SIZE);
1453
0
  info = chunk->map[page_num];
1454
0
  ZEND_MM_CHECK(chunk->heap == heap, "zend_mm_heap corrupted");
1455
0
  if (EXPECTED(info & ZEND_MM_IS_SRUN)) {
1456
0
    int bin_num = ZEND_MM_SRUN_BIN_NUM(info);
1457
0
    return (zend_mm_debug_info*)((char*)ptr + bin_data_size[bin_num] - ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info)));
1458
0
  } else /* if (info & ZEND_MM_IS_LRUN) */ {
1459
0
    int pages_count = ZEND_MM_LRUN_PAGES(info);
1460
1461
0
    return (zend_mm_debug_info*)((char*)ptr + ZEND_MM_PAGE_SIZE * pages_count - ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info)));
1462
0
  }
1463
0
}
1464
#endif
1465
1466
static zend_always_inline void *zend_mm_alloc_heap(zend_mm_heap *heap, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
1467
0
{
1468
0
  void *ptr;
1469
0
#if ZEND_MM_HEAP_PROTECTION
1470
0
  if (size < ZEND_MM_MIN_USEABLE_BIN_SIZE) {
1471
0
    size = ZEND_MM_MIN_USEABLE_BIN_SIZE;
1472
0
  }
1473
0
#endif /* ZEND_MM_HEAP_PROTECTION */
1474
0
#if ZEND_DEBUG
1475
0
  size_t real_size = size;
1476
0
  zend_mm_debug_info *dbg;
1477
1478
  /* special handling for zero-size allocation */
1479
0
  size = MAX(size, 1);
1480
0
  size = ZEND_MM_ALIGNED_SIZE(size) + ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info));
1481
0
  if (UNEXPECTED(size < real_size)) {
1482
0
    zend_error_noreturn(E_ERROR, "Possible integer overflow in memory allocation (%zu + %zu)", ZEND_MM_ALIGNED_SIZE(real_size), ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info)));
1483
0
    return NULL;
1484
0
  }
1485
0
#endif
1486
0
  if (EXPECTED(size <= ZEND_MM_MAX_SMALL_SIZE)) {
1487
0
    ptr = zend_mm_alloc_small(heap, ZEND_MM_SMALL_SIZE_TO_BIN(size) ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1488
0
#if ZEND_DEBUG
1489
0
    dbg = zend_mm_get_debug_info(heap, ptr);
1490
0
    dbg->size = real_size;
1491
0
    dbg->filename = __zend_filename;
1492
0
    dbg->orig_filename = __zend_orig_filename;
1493
0
    dbg->lineno = __zend_lineno;
1494
0
    dbg->orig_lineno = __zend_orig_lineno;
1495
0
#endif
1496
0
    return ptr;
1497
0
  } else if (EXPECTED(size <= ZEND_MM_MAX_LARGE_SIZE)) {
1498
0
    ptr = zend_mm_alloc_large(heap, size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1499
0
#if ZEND_DEBUG
1500
0
    dbg = zend_mm_get_debug_info(heap, ptr);
1501
0
    dbg->size = real_size;
1502
0
    dbg->filename = __zend_filename;
1503
0
    dbg->orig_filename = __zend_orig_filename;
1504
0
    dbg->lineno = __zend_lineno;
1505
0
    dbg->orig_lineno = __zend_orig_lineno;
1506
0
#endif
1507
0
    return ptr;
1508
0
  } else {
1509
0
#if ZEND_DEBUG
1510
0
    size = real_size;
1511
0
#endif
1512
0
    return zend_mm_alloc_huge(heap, size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1513
0
  }
1514
0
}
1515
1516
static zend_always_inline void zend_mm_free_heap(zend_mm_heap *heap, void *ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
1517
0
{
1518
0
  size_t page_offset = ZEND_MM_ALIGNED_OFFSET(ptr, ZEND_MM_CHUNK_SIZE);
1519
1520
0
  if (UNEXPECTED(page_offset == 0)) {
1521
0
    if (ptr != NULL) {
1522
0
      zend_mm_free_huge(heap, ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1523
0
    }
1524
0
  } else {
1525
0
    zend_mm_chunk *chunk = (zend_mm_chunk*)ZEND_MM_ALIGNED_BASE(ptr, ZEND_MM_CHUNK_SIZE);
1526
0
    int page_num = (int)(page_offset / ZEND_MM_PAGE_SIZE);
1527
0
    zend_mm_page_info info = chunk->map[page_num];
1528
1529
0
    ZEND_MM_CHECK(chunk->heap == heap, "zend_mm_heap corrupted");
1530
0
    if (EXPECTED(info & ZEND_MM_IS_SRUN)) {
1531
0
      zend_mm_free_small(heap, ptr, ZEND_MM_SRUN_BIN_NUM(info));
1532
0
    } else /* if (info & ZEND_MM_IS_LRUN) */ {
1533
0
      int pages_count = ZEND_MM_LRUN_PAGES(info);
1534
1535
0
      ZEND_MM_CHECK(ZEND_MM_ALIGNED_OFFSET(page_offset, ZEND_MM_PAGE_SIZE) == 0, "zend_mm_heap corrupted");
1536
0
      zend_mm_free_large(heap, chunk, page_num, pages_count);
1537
0
    }
1538
0
  }
1539
0
}
1540
1541
static size_t zend_mm_size(zend_mm_heap *heap, void *ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
1542
0
{
1543
0
  size_t page_offset = ZEND_MM_ALIGNED_OFFSET(ptr, ZEND_MM_CHUNK_SIZE);
1544
1545
0
  if (UNEXPECTED(page_offset == 0)) {
1546
0
    return zend_mm_get_huge_block_size(heap, ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1547
0
  } else {
1548
0
    zend_mm_chunk *chunk;
1549
#if 0 && ZEND_DEBUG
1550
    zend_mm_debug_info *dbg = zend_mm_get_debug_info(heap, ptr);
1551
    return dbg->size;
1552
#else
1553
0
    int page_num;
1554
0
    zend_mm_page_info info;
1555
1556
0
    chunk = (zend_mm_chunk*)ZEND_MM_ALIGNED_BASE(ptr, ZEND_MM_CHUNK_SIZE);
1557
0
    page_num = (int)(page_offset / ZEND_MM_PAGE_SIZE);
1558
0
    info = chunk->map[page_num];
1559
0
    ZEND_MM_CHECK(chunk->heap == heap, "zend_mm_heap corrupted");
1560
0
    if (EXPECTED(info & ZEND_MM_IS_SRUN)) {
1561
0
      return bin_data_size[ZEND_MM_SRUN_BIN_NUM(info)];
1562
0
    } else /* if (info & ZEND_MM_IS_LARGE_RUN) */ {
1563
0
      return ZEND_MM_LRUN_PAGES(info) * ZEND_MM_PAGE_SIZE;
1564
0
    }
1565
0
#endif
1566
0
  }
1567
0
}
1568
1569
static zend_never_inline void *zend_mm_realloc_slow(zend_mm_heap *heap, void *ptr, size_t size, size_t copy_size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
1570
0
{
1571
0
  void *ret;
1572
1573
0
#if ZEND_MM_STAT
1574
0
  do {
1575
0
    size_t orig_peak = heap->peak;
1576
0
#endif
1577
0
    ret = zend_mm_alloc_heap(heap, size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1578
0
    memcpy(ret, ptr, copy_size);
1579
0
    zend_mm_free_heap(heap, ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1580
0
#if ZEND_MM_STAT
1581
0
    heap->peak = MAX(orig_peak, heap->size);
1582
0
  } while (0);
1583
0
#endif
1584
0
  return ret;
1585
0
}
1586
1587
static zend_never_inline void *zend_mm_realloc_huge(zend_mm_heap *heap, void *ptr, size_t size, size_t copy_size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
1588
0
{
1589
0
  size_t old_size;
1590
0
  size_t new_size;
1591
0
#if ZEND_DEBUG
1592
0
  size_t real_size;
1593
0
#endif
1594
1595
0
  old_size = zend_mm_get_huge_block_size(heap, ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1596
0
#if ZEND_DEBUG
1597
0
  real_size = size;
1598
0
  size = ZEND_MM_ALIGNED_SIZE(size) + ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info));
1599
0
#endif
1600
0
  if (size > ZEND_MM_MAX_LARGE_SIZE) {
1601
0
#if ZEND_DEBUG
1602
0
    size = real_size;
1603
0
#endif
1604
#ifdef ZEND_WIN32
1605
    /* On Windows we don't have ability to extend huge blocks in-place.
1606
     * We allocate them with 2MB size granularity, to avoid many
1607
     * reallocations when they are extended by small pieces
1608
     */
1609
    new_size = ZEND_MM_ALIGNED_SIZE_EX(size, MAX(REAL_PAGE_SIZE, ZEND_MM_CHUNK_SIZE));
1610
#else
1611
0
    new_size = ZEND_MM_ALIGNED_SIZE_EX(size, REAL_PAGE_SIZE);
1612
0
#endif
1613
0
    if (new_size == old_size) {
1614
0
#if ZEND_DEBUG
1615
0
      zend_mm_change_huge_block_size(heap, ptr, new_size, real_size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1616
#else
1617
      zend_mm_change_huge_block_size(heap, ptr, new_size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1618
#endif
1619
0
      return ptr;
1620
0
    } else if (new_size < old_size) {
1621
      /* unmup tail */
1622
0
      if (zend_mm_chunk_truncate(heap, ptr, old_size, new_size)) {
1623
0
#if ZEND_MM_STAT || ZEND_MM_LIMIT
1624
0
        heap->real_size -= old_size - new_size;
1625
0
#endif
1626
0
#if ZEND_MM_STAT
1627
0
        heap->size -= old_size - new_size;
1628
0
#endif
1629
0
#if ZEND_DEBUG
1630
0
        zend_mm_change_huge_block_size(heap, ptr, new_size, real_size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1631
#else
1632
        zend_mm_change_huge_block_size(heap, ptr, new_size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1633
#endif
1634
0
        return ptr;
1635
0
      }
1636
0
    } else /* if (new_size > old_size) */ {
1637
0
#if ZEND_MM_LIMIT
1638
0
      if (UNEXPECTED(new_size - old_size > heap->limit - heap->real_size)) {
1639
0
        if (zend_mm_gc(heap) && new_size - old_size <= heap->limit - heap->real_size) {
1640
          /* pass */
1641
0
        } else if (heap->overflow == 0) {
1642
0
#if ZEND_DEBUG
1643
0
          zend_mm_safe_error(heap, "Allowed memory size of %zu bytes exhausted at %s:%d (tried to allocate %zu bytes)", heap->limit, __zend_filename, __zend_lineno, size);
1644
#else
1645
          zend_mm_safe_error(heap, "Allowed memory size of %zu bytes exhausted (tried to allocate %zu bytes)", heap->limit, size);
1646
#endif
1647
0
          return NULL;
1648
0
        }
1649
0
      }
1650
0
#endif
1651
      /* try to map tail right after this block */
1652
0
      if (zend_mm_chunk_extend(heap, ptr, old_size, new_size)) {
1653
0
#if ZEND_MM_STAT || ZEND_MM_LIMIT
1654
0
        heap->real_size += new_size - old_size;
1655
0
#endif
1656
0
#if ZEND_MM_STAT
1657
0
        heap->real_peak = MAX(heap->real_peak, heap->real_size);
1658
0
        heap->size += new_size - old_size;
1659
0
        heap->peak = MAX(heap->peak, heap->size);
1660
0
#endif
1661
0
#if ZEND_DEBUG
1662
0
        zend_mm_change_huge_block_size(heap, ptr, new_size, real_size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1663
#else
1664
        zend_mm_change_huge_block_size(heap, ptr, new_size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1665
#endif
1666
0
        return ptr;
1667
0
      }
1668
0
    }
1669
0
  }
1670
1671
0
  return zend_mm_realloc_slow(heap, ptr, size, MIN(old_size, copy_size) ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1672
0
}
1673
1674
static zend_always_inline void *zend_mm_realloc_heap(zend_mm_heap *heap, void *ptr, size_t size, bool use_copy_size, size_t copy_size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
1675
0
{
1676
0
  size_t page_offset;
1677
0
  size_t old_size;
1678
0
  size_t new_size;
1679
0
  void *ret;
1680
0
#if ZEND_DEBUG
1681
0
  zend_mm_debug_info *dbg;
1682
0
#endif
1683
1684
0
  page_offset = ZEND_MM_ALIGNED_OFFSET(ptr, ZEND_MM_CHUNK_SIZE);
1685
0
  if (UNEXPECTED(page_offset == 0)) {
1686
0
    if (EXPECTED(ptr == NULL)) {
1687
0
      return _zend_mm_alloc(heap, size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1688
0
    } else {
1689
0
      return zend_mm_realloc_huge(heap, ptr, size, copy_size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1690
0
    }
1691
0
  } else {
1692
0
    zend_mm_chunk *chunk = (zend_mm_chunk*)ZEND_MM_ALIGNED_BASE(ptr, ZEND_MM_CHUNK_SIZE);
1693
0
    int page_num = (int)(page_offset / ZEND_MM_PAGE_SIZE);
1694
0
    zend_mm_page_info info = chunk->map[page_num];
1695
0
#if ZEND_MM_HEAP_PROTECTION
1696
0
    if (size < ZEND_MM_MIN_USEABLE_BIN_SIZE) {
1697
0
      size = ZEND_MM_MIN_USEABLE_BIN_SIZE;
1698
0
    }
1699
0
#endif /* ZEND_MM_HEAP_PROTECTION */
1700
0
#if ZEND_DEBUG
1701
0
    size_t real_size = size;
1702
1703
0
    size = ZEND_MM_ALIGNED_SIZE(size) + ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info));
1704
0
#endif
1705
1706
0
    ZEND_MM_CHECK(chunk->heap == heap, "zend_mm_heap corrupted");
1707
0
    if (info & ZEND_MM_IS_SRUN) {
1708
0
      int old_bin_num = ZEND_MM_SRUN_BIN_NUM(info);
1709
1710
0
      do {
1711
0
        old_size = bin_data_size[old_bin_num];
1712
1713
        /* Check if requested size fits into current bin */
1714
0
        if (size <= old_size) {
1715
          /* Check if truncation is necessary */
1716
0
          if (old_bin_num > 0 && size < bin_data_size[old_bin_num - 1]) {
1717
            /* truncation */
1718
0
            ret = zend_mm_alloc_small(heap, ZEND_MM_SMALL_SIZE_TO_BIN(size) ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1719
0
            copy_size = use_copy_size ? MIN(size, copy_size) : size;
1720
0
            memcpy(ret, ptr, copy_size);
1721
0
            zend_mm_free_small(heap, ptr, old_bin_num);
1722
0
          } else {
1723
            /* reallocation in-place */
1724
0
            ret = ptr;
1725
0
          }
1726
0
        } else if (size <= ZEND_MM_MAX_SMALL_SIZE) {
1727
          /* small extension */
1728
1729
0
#if ZEND_MM_STAT
1730
0
          do {
1731
0
            size_t orig_peak = heap->peak;
1732
0
#endif
1733
0
            ret = zend_mm_alloc_small(heap, ZEND_MM_SMALL_SIZE_TO_BIN(size) ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1734
0
            copy_size = use_copy_size ? MIN(old_size, copy_size) : old_size;
1735
0
            memcpy(ret, ptr, copy_size);
1736
0
            zend_mm_free_small(heap, ptr, old_bin_num);
1737
0
#if ZEND_MM_STAT
1738
0
            heap->peak = MAX(orig_peak, heap->size);
1739
0
          } while (0);
1740
0
#endif
1741
0
        } else {
1742
          /* slow reallocation */
1743
0
          break;
1744
0
        }
1745
1746
0
#if ZEND_DEBUG
1747
0
        dbg = zend_mm_get_debug_info(heap, ret);
1748
0
        dbg->size = real_size;
1749
0
        dbg->filename = __zend_filename;
1750
0
        dbg->orig_filename = __zend_orig_filename;
1751
0
        dbg->lineno = __zend_lineno;
1752
0
        dbg->orig_lineno = __zend_orig_lineno;
1753
0
#endif
1754
0
        return ret;
1755
0
      }  while (0);
1756
1757
0
    } else /* if (info & ZEND_MM_IS_LARGE_RUN) */ {
1758
0
      ZEND_MM_CHECK(ZEND_MM_ALIGNED_OFFSET(page_offset, ZEND_MM_PAGE_SIZE) == 0, "zend_mm_heap corrupted");
1759
0
      old_size = ZEND_MM_LRUN_PAGES(info) * ZEND_MM_PAGE_SIZE;
1760
0
      if (size > ZEND_MM_MAX_SMALL_SIZE && size <= ZEND_MM_MAX_LARGE_SIZE) {
1761
0
        new_size = ZEND_MM_ALIGNED_SIZE_EX(size, ZEND_MM_PAGE_SIZE);
1762
0
        if (new_size == old_size) {
1763
0
#if ZEND_DEBUG
1764
0
          dbg = zend_mm_get_debug_info(heap, ptr);
1765
0
          dbg->size = real_size;
1766
0
          dbg->filename = __zend_filename;
1767
0
          dbg->orig_filename = __zend_orig_filename;
1768
0
          dbg->lineno = __zend_lineno;
1769
0
          dbg->orig_lineno = __zend_orig_lineno;
1770
0
#endif
1771
0
          return ptr;
1772
0
        } else if (new_size < old_size) {
1773
          /* free tail pages */
1774
0
          int new_pages_count = (int)(new_size / ZEND_MM_PAGE_SIZE);
1775
0
          int rest_pages_count = (int)((old_size - new_size) / ZEND_MM_PAGE_SIZE);
1776
1777
0
#if ZEND_MM_STAT
1778
0
          heap->size -= rest_pages_count * ZEND_MM_PAGE_SIZE;
1779
0
#endif
1780
0
          chunk->map[page_num] = ZEND_MM_LRUN(new_pages_count);
1781
0
          chunk->free_pages += rest_pages_count;
1782
0
          zend_mm_bitset_reset_range(chunk->free_map, page_num + new_pages_count, rest_pages_count);
1783
0
#if ZEND_DEBUG
1784
0
          dbg = zend_mm_get_debug_info(heap, ptr);
1785
0
          dbg->size = real_size;
1786
0
          dbg->filename = __zend_filename;
1787
0
          dbg->orig_filename = __zend_orig_filename;
1788
0
          dbg->lineno = __zend_lineno;
1789
0
          dbg->orig_lineno = __zend_orig_lineno;
1790
0
#endif
1791
0
          return ptr;
1792
0
        } else /* if (new_size > old_size) */ {
1793
0
          int new_pages_count = (int)(new_size / ZEND_MM_PAGE_SIZE);
1794
0
          int old_pages_count = (int)(old_size / ZEND_MM_PAGE_SIZE);
1795
1796
          /* try to allocate tail pages after this block */
1797
0
          if (page_num + new_pages_count <= ZEND_MM_PAGES &&
1798
0
              zend_mm_bitset_is_free_range(chunk->free_map, page_num + old_pages_count, new_pages_count - old_pages_count)) {
1799
0
#if ZEND_MM_STAT
1800
0
            do {
1801
0
              size_t size = heap->size + (new_size - old_size);
1802
0
              size_t peak = MAX(heap->peak, size);
1803
0
              heap->size = size;
1804
0
              heap->peak = peak;
1805
0
            } while (0);
1806
0
#endif
1807
0
            chunk->free_pages -= new_pages_count - old_pages_count;
1808
0
            zend_mm_bitset_set_range(chunk->free_map, page_num + old_pages_count, new_pages_count - old_pages_count);
1809
0
            chunk->map[page_num] = ZEND_MM_LRUN(new_pages_count);
1810
0
#if ZEND_DEBUG
1811
0
            dbg = zend_mm_get_debug_info(heap, ptr);
1812
0
            dbg->size = real_size;
1813
0
            dbg->filename = __zend_filename;
1814
0
            dbg->orig_filename = __zend_orig_filename;
1815
0
            dbg->lineno = __zend_lineno;
1816
0
            dbg->orig_lineno = __zend_orig_lineno;
1817
0
#endif
1818
0
            return ptr;
1819
0
          }
1820
0
        }
1821
0
      }
1822
0
    }
1823
0
#if ZEND_DEBUG
1824
0
    size = real_size;
1825
0
#endif
1826
0
  }
1827
1828
0
  copy_size = MIN(old_size, copy_size);
1829
0
  return zend_mm_realloc_slow(heap, ptr, size, copy_size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1830
0
}
1831
1832
/*********************/
1833
/* Huge Runs (again) */
1834
/*********************/
1835
1836
#if ZEND_DEBUG
1837
static void zend_mm_add_huge_block(zend_mm_heap *heap, void *ptr, size_t size, size_t dbg_size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
1838
#else
1839
static void zend_mm_add_huge_block(zend_mm_heap *heap, void *ptr, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
1840
#endif
1841
0
{
1842
0
  zend_mm_huge_list *list = (zend_mm_huge_list*)zend_mm_alloc_heap(heap, sizeof(zend_mm_huge_list) ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1843
0
  list->ptr = ptr;
1844
0
  list->size = size;
1845
0
  list->next = heap->huge_list;
1846
0
#if ZEND_DEBUG
1847
0
  list->dbg.size = dbg_size;
1848
0
  list->dbg.filename = __zend_filename;
1849
0
  list->dbg.orig_filename = __zend_orig_filename;
1850
0
  list->dbg.lineno = __zend_lineno;
1851
0
  list->dbg.orig_lineno = __zend_orig_lineno;
1852
0
#endif
1853
0
  heap->huge_list = list;
1854
0
}
1855
1856
static size_t zend_mm_del_huge_block(zend_mm_heap *heap, void *ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
1857
0
{
1858
0
  zend_mm_huge_list *prev = NULL;
1859
0
  zend_mm_huge_list *list = heap->huge_list;
1860
0
  while (list != NULL) {
1861
0
    if (list->ptr == ptr) {
1862
0
      size_t size;
1863
1864
0
      if (prev) {
1865
0
        prev->next = list->next;
1866
0
      } else {
1867
0
        heap->huge_list = list->next;
1868
0
      }
1869
0
      size = list->size;
1870
0
      zend_mm_free_heap(heap, list ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1871
0
      return size;
1872
0
    }
1873
0
    prev = list;
1874
0
    list = list->next;
1875
0
  }
1876
0
  ZEND_MM_CHECK(0, "zend_mm_heap corrupted");
1877
0
  return 0;
1878
0
}
1879
1880
static size_t zend_mm_get_huge_block_size(zend_mm_heap *heap, void *ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
1881
0
{
1882
0
  zend_mm_huge_list *list = heap->huge_list;
1883
0
  while (list != NULL) {
1884
0
    if (list->ptr == ptr) {
1885
0
      return list->size;
1886
0
    }
1887
0
    list = list->next;
1888
0
  }
1889
0
  ZEND_MM_CHECK(0, "zend_mm_heap corrupted");
1890
0
  return 0;
1891
0
}
1892
1893
#if ZEND_DEBUG
1894
static void zend_mm_change_huge_block_size(zend_mm_heap *heap, void *ptr, size_t size, size_t dbg_size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
1895
#else
1896
static void zend_mm_change_huge_block_size(zend_mm_heap *heap, void *ptr, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
1897
#endif
1898
0
{
1899
0
  zend_mm_huge_list *list = heap->huge_list;
1900
0
  while (list != NULL) {
1901
0
    if (list->ptr == ptr) {
1902
0
      list->size = size;
1903
0
#if ZEND_DEBUG
1904
0
      list->dbg.size = dbg_size;
1905
0
      list->dbg.filename = __zend_filename;
1906
0
      list->dbg.orig_filename = __zend_orig_filename;
1907
0
      list->dbg.lineno = __zend_lineno;
1908
0
      list->dbg.orig_lineno = __zend_orig_lineno;
1909
0
#endif
1910
0
      return;
1911
0
    }
1912
0
    list = list->next;
1913
0
  }
1914
0
}
1915
1916
static void *zend_mm_alloc_huge(zend_mm_heap *heap, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
1917
0
{
1918
#ifdef ZEND_WIN32
1919
  /* On Windows we don't have ability to extend huge blocks in-place.
1920
   * We allocate them with 2MB size granularity, to avoid many
1921
   * reallocations when they are extended by small pieces
1922
   */
1923
  size_t alignment = MAX(REAL_PAGE_SIZE, ZEND_MM_CHUNK_SIZE);
1924
#else
1925
0
  size_t alignment = REAL_PAGE_SIZE;
1926
0
#endif
1927
0
  size_t new_size = ZEND_MM_ALIGNED_SIZE_EX(size, alignment);
1928
0
  void *ptr;
1929
1930
0
  if (UNEXPECTED(new_size < size)) {
1931
0
    zend_error_noreturn(E_ERROR, "Possible integer overflow in memory allocation (%zu + %zu)", size, alignment);
1932
0
  }
1933
1934
0
#if ZEND_MM_LIMIT
1935
0
  if (UNEXPECTED(new_size > heap->limit - heap->real_size)) {
1936
0
    if (zend_mm_gc(heap) && new_size <= heap->limit - heap->real_size) {
1937
      /* pass */
1938
0
    } else if (heap->overflow == 0) {
1939
0
#if ZEND_DEBUG
1940
0
      zend_mm_safe_error(heap, "Allowed memory size of %zu bytes exhausted at %s:%d (tried to allocate %zu bytes)", heap->limit, __zend_filename, __zend_lineno, size);
1941
#else
1942
      zend_mm_safe_error(heap, "Allowed memory size of %zu bytes exhausted (tried to allocate %zu bytes)", heap->limit, size);
1943
#endif
1944
0
      return NULL;
1945
0
    }
1946
0
  }
1947
0
#endif
1948
0
  ptr = zend_mm_chunk_alloc(heap, new_size, ZEND_MM_CHUNK_SIZE);
1949
0
  if (UNEXPECTED(ptr == NULL)) {
1950
    /* insufficient memory */
1951
0
    if (zend_mm_gc(heap) &&
1952
0
        (ptr = zend_mm_chunk_alloc(heap, new_size, ZEND_MM_CHUNK_SIZE)) != NULL) {
1953
      /* pass */
1954
0
    } else {
1955
#if !ZEND_MM_LIMIT
1956
      zend_mm_safe_error(heap, "Out of memory");
1957
#elif ZEND_DEBUG
1958
      zend_mm_safe_error(heap, "Out of memory (allocated %zu bytes) at %s:%d (tried to allocate %zu bytes)", heap->real_size, __zend_filename, __zend_lineno, size);
1959
#else
1960
      zend_mm_safe_error(heap, "Out of memory (allocated %zu bytes) (tried to allocate %zu bytes)", heap->real_size, size);
1961
#endif
1962
0
      return NULL;
1963
0
    }
1964
0
  }
1965
0
#if ZEND_DEBUG
1966
0
  zend_mm_add_huge_block(heap, ptr, new_size, size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1967
#else
1968
  zend_mm_add_huge_block(heap, ptr, new_size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1969
#endif
1970
0
#if ZEND_MM_STAT
1971
0
  do {
1972
0
    size_t size = heap->real_size + new_size;
1973
0
    size_t peak = MAX(heap->real_peak, size);
1974
0
    heap->real_size = size;
1975
0
    heap->real_peak = peak;
1976
0
  } while (0);
1977
0
  do {
1978
0
    size_t size = heap->size + new_size;
1979
0
    size_t peak = MAX(heap->peak, size);
1980
0
    heap->size = size;
1981
0
    heap->peak = peak;
1982
0
  } while (0);
1983
#elif ZEND_MM_LIMIT
1984
  heap->real_size += new_size;
1985
#endif
1986
0
  return ptr;
1987
0
}
1988
1989
static void zend_mm_free_huge(zend_mm_heap *heap, void *ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
1990
0
{
1991
0
  size_t size;
1992
1993
0
  ZEND_MM_CHECK(ZEND_MM_ALIGNED_OFFSET(ptr, ZEND_MM_CHUNK_SIZE) == 0, "zend_mm_heap corrupted");
1994
0
  size = zend_mm_del_huge_block(heap, ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1995
0
  zend_mm_chunk_free(heap, ptr, size);
1996
0
#if ZEND_MM_STAT || ZEND_MM_LIMIT
1997
0
  heap->real_size -= size;
1998
0
#endif
1999
0
#if ZEND_MM_STAT
2000
0
  heap->size -= size;
2001
0
#endif
2002
0
}
2003
2004
/******************/
2005
/* Initialization */
2006
/******************/
2007
2008
static void zend_mm_refresh_key(zend_mm_heap *heap)
2009
0
{
2010
0
  zend_random_bytes_insecure(&heap->rand_state, &heap->shadow_key, sizeof(heap->shadow_key));
2011
0
}
2012
2013
static void zend_mm_init_key(zend_mm_heap *heap)
2014
0
{
2015
0
  memset(&heap->rand_state, 0, sizeof(heap->rand_state));
2016
0
  zend_mm_refresh_key(heap);
2017
0
}
2018
2019
ZEND_API void zend_mm_refresh_key_child(zend_mm_heap *heap)
2020
0
{
2021
0
  uintptr_t old_key = heap->shadow_key;
2022
2023
0
  zend_mm_init_key(heap);
2024
2025
  /* Update shadow pointers with new key */
2026
0
  for (int i = 0; i < ZEND_MM_BINS; i++) {
2027
0
    zend_mm_free_slot *slot = heap->free_slot[i];
2028
0
    if (!slot) {
2029
0
      continue;
2030
0
    }
2031
0
    zend_mm_free_slot *next;
2032
0
    while ((next = slot->next_free_slot)) {
2033
0
      zend_mm_free_slot *shadow = ZEND_MM_FREE_SLOT_PTR_SHADOW(slot, i);
2034
0
      if (UNEXPECTED(next != zend_mm_decode_free_slot_key(old_key, shadow))) {
2035
0
        zend_mm_panic("zend_mm_heap corrupted");
2036
0
      }
2037
0
      zend_mm_set_next_free_slot(heap, i, slot, next);
2038
0
      slot = next;
2039
0
    }
2040
0
  }
2041
2042
0
#if ZEND_DEBUG
2043
0
  heap->pid = getpid();
2044
0
#endif
2045
0
}
2046
2047
static zend_mm_heap *zend_mm_init(void)
2048
0
{
2049
0
  zend_mm_chunk *chunk = (zend_mm_chunk*)zend_mm_chunk_alloc_int(ZEND_MM_CHUNK_SIZE, ZEND_MM_CHUNK_SIZE);
2050
0
  zend_mm_heap *heap;
2051
2052
0
  if (UNEXPECTED(chunk == NULL)) {
2053
0
#if ZEND_MM_ERROR
2054
0
    fprintf(stderr, "Can't initialize heap\n");
2055
0
#endif
2056
0
    return NULL;
2057
0
  }
2058
0
  heap = &chunk->heap_slot;
2059
0
  chunk->heap = heap;
2060
0
  chunk->next = chunk;
2061
0
  chunk->prev = chunk;
2062
0
  chunk->free_pages = ZEND_MM_PAGES - ZEND_MM_FIRST_PAGE;
2063
0
  chunk->free_tail = ZEND_MM_FIRST_PAGE;
2064
0
  chunk->num = 0;
2065
0
  chunk->free_map[0] = (Z_L(1) << ZEND_MM_FIRST_PAGE) - 1;
2066
0
  chunk->map[0] = ZEND_MM_LRUN(ZEND_MM_FIRST_PAGE);
2067
0
  heap->main_chunk = chunk;
2068
0
  heap->cached_chunks = NULL;
2069
0
  heap->chunks_count = 1;
2070
0
  heap->peak_chunks_count = 1;
2071
0
  heap->cached_chunks_count = 0;
2072
0
  heap->avg_chunks_count = 1.0;
2073
0
  heap->last_chunks_delete_boundary = 0;
2074
0
  heap->last_chunks_delete_count = 0;
2075
0
#if ZEND_MM_STAT || ZEND_MM_LIMIT
2076
0
  heap->real_size = ZEND_MM_CHUNK_SIZE;
2077
0
#endif
2078
0
#if ZEND_MM_STAT
2079
0
  heap->real_peak = ZEND_MM_CHUNK_SIZE;
2080
0
  heap->size = 0;
2081
0
  heap->peak = 0;
2082
0
#endif
2083
0
  zend_mm_init_key(heap);
2084
0
#if ZEND_MM_LIMIT
2085
0
  heap->limit = (size_t)Z_L(-1) >> 1;
2086
0
  heap->overflow = 0;
2087
0
#endif
2088
0
#if ZEND_MM_CUSTOM
2089
0
  heap->use_custom_heap = ZEND_MM_CUSTOM_HEAP_NONE;
2090
0
#endif
2091
0
#if ZEND_MM_STORAGE
2092
0
  heap->storage = NULL;
2093
0
#endif
2094
0
  heap->huge_list = NULL;
2095
0
#if ZEND_DEBUG
2096
0
  heap->pid = getpid();
2097
0
#endif
2098
0
  return heap;
2099
0
}
2100
2101
ZEND_API size_t zend_mm_gc(zend_mm_heap *heap)
2102
0
{
2103
0
  zend_mm_free_slot *p, *q;
2104
0
  zend_mm_chunk *chunk;
2105
0
  size_t page_offset;
2106
0
  int page_num;
2107
0
  zend_mm_page_info info;
2108
0
  uint32_t i, free_counter;
2109
0
  bool has_free_pages;
2110
0
  size_t collected = 0;
2111
2112
0
#if ZEND_MM_CUSTOM
2113
0
  if (heap->use_custom_heap) {
2114
0
    size_t (*gc)(void) = heap->custom_heap._gc;
2115
0
    if (gc) {
2116
0
      return gc();
2117
0
    }
2118
0
    return 0;
2119
0
  }
2120
0
#endif
2121
2122
0
  for (i = 0; i < ZEND_MM_BINS; i++) {
2123
0
    has_free_pages = false;
2124
0
    p = heap->free_slot[i];
2125
0
    while (p != NULL) {
2126
0
      chunk = (zend_mm_chunk*)ZEND_MM_ALIGNED_BASE(p, ZEND_MM_CHUNK_SIZE);
2127
0
      ZEND_MM_CHECK(chunk->heap == heap, "zend_mm_heap corrupted");
2128
0
      page_offset = ZEND_MM_ALIGNED_OFFSET(p, ZEND_MM_CHUNK_SIZE);
2129
0
      ZEND_ASSERT(page_offset != 0);
2130
0
      page_num = (int)(page_offset / ZEND_MM_PAGE_SIZE);
2131
0
      info = chunk->map[page_num];
2132
0
      ZEND_ASSERT(info & ZEND_MM_IS_SRUN);
2133
0
      if (info & ZEND_MM_IS_LRUN) {
2134
0
        page_num -= ZEND_MM_NRUN_OFFSET(info);
2135
0
        info = chunk->map[page_num];
2136
0
        ZEND_ASSERT(info & ZEND_MM_IS_SRUN);
2137
0
        ZEND_ASSERT(!(info & ZEND_MM_IS_LRUN));
2138
0
      }
2139
0
      ZEND_ASSERT(ZEND_MM_SRUN_BIN_NUM(info) == i);
2140
0
      free_counter = ZEND_MM_SRUN_FREE_COUNTER(info) + 1;
2141
0
      if (free_counter == bin_elements[i]) {
2142
0
        has_free_pages = true;
2143
0
      }
2144
0
      chunk->map[page_num] = ZEND_MM_SRUN_EX(i, free_counter);
2145
0
      p = zend_mm_get_next_free_slot(heap, i, p);
2146
0
    }
2147
2148
0
    if (!has_free_pages) {
2149
0
      continue;
2150
0
    }
2151
2152
0
    q = (zend_mm_free_slot*)&heap->free_slot[i];
2153
0
    p = q->next_free_slot;
2154
0
    while (p != NULL) {
2155
0
      chunk = (zend_mm_chunk*)ZEND_MM_ALIGNED_BASE(p, ZEND_MM_CHUNK_SIZE);
2156
0
      ZEND_MM_CHECK(chunk->heap == heap, "zend_mm_heap corrupted");
2157
0
      page_offset = ZEND_MM_ALIGNED_OFFSET(p, ZEND_MM_CHUNK_SIZE);
2158
0
      ZEND_ASSERT(page_offset != 0);
2159
0
      page_num = (int)(page_offset / ZEND_MM_PAGE_SIZE);
2160
0
      info = chunk->map[page_num];
2161
0
      ZEND_ASSERT(info & ZEND_MM_IS_SRUN);
2162
0
      if (info & ZEND_MM_IS_LRUN) {
2163
0
        page_num -= ZEND_MM_NRUN_OFFSET(info);
2164
0
        info = chunk->map[page_num];
2165
0
        ZEND_ASSERT(info & ZEND_MM_IS_SRUN);
2166
0
        ZEND_ASSERT(!(info & ZEND_MM_IS_LRUN));
2167
0
      }
2168
0
      ZEND_ASSERT(ZEND_MM_SRUN_BIN_NUM(info) == i);
2169
0
      if (ZEND_MM_SRUN_FREE_COUNTER(info) == bin_elements[i]) {
2170
        /* remove from cache */
2171
0
        p = zend_mm_get_next_free_slot(heap, i, p);
2172
0
        if (q == (zend_mm_free_slot*)&heap->free_slot[i]) {
2173
0
          q->next_free_slot = p;
2174
0
        } else {
2175
0
          zend_mm_set_next_free_slot(heap, i, q, p);
2176
0
        }
2177
0
      } else {
2178
0
        q = p;
2179
0
        if (q == (zend_mm_free_slot*)&heap->free_slot[i]) {
2180
0
          p = q->next_free_slot;
2181
0
        } else {
2182
0
          p = zend_mm_get_next_free_slot(heap, i, q);
2183
0
        }
2184
0
      }
2185
0
    }
2186
0
  }
2187
2188
0
  chunk = heap->main_chunk;
2189
0
  do {
2190
0
    i = ZEND_MM_FIRST_PAGE;
2191
0
    while (i < chunk->free_tail) {
2192
0
      if (zend_mm_bitset_is_set(chunk->free_map, i)) {
2193
0
        info = chunk->map[i];
2194
0
        if (info & ZEND_MM_IS_SRUN) {
2195
0
          int bin_num = ZEND_MM_SRUN_BIN_NUM(info);
2196
0
          int pages_count = bin_pages[bin_num];
2197
2198
0
          if (ZEND_MM_SRUN_FREE_COUNTER(info) == bin_elements[bin_num]) {
2199
            /* all elements are free */
2200
0
            zend_mm_free_pages_ex(heap, chunk, i, pages_count, 0);
2201
0
            collected += pages_count;
2202
0
          } else {
2203
            /* reset counter */
2204
0
            chunk->map[i] = ZEND_MM_SRUN(bin_num);
2205
0
          }
2206
0
          i += bin_pages[bin_num];
2207
0
        } else /* if (info & ZEND_MM_IS_LRUN) */ {
2208
0
          i += ZEND_MM_LRUN_PAGES(info);
2209
0
        }
2210
0
      } else {
2211
0
        i++;
2212
0
      }
2213
0
    }
2214
0
    if (chunk->free_pages == ZEND_MM_PAGES - ZEND_MM_FIRST_PAGE && chunk != heap->main_chunk) {
2215
0
      zend_mm_chunk *next_chunk = chunk->next;
2216
2217
0
      zend_mm_delete_chunk(heap, chunk);
2218
0
      chunk = next_chunk;
2219
0
    } else {
2220
0
      chunk = chunk->next;
2221
0
    }
2222
0
  } while (chunk != heap->main_chunk);
2223
2224
0
  return collected * ZEND_MM_PAGE_SIZE;
2225
0
}
2226
2227
#if ZEND_DEBUG
2228
/******************/
2229
/* Leak detection */
2230
/******************/
2231
2232
static zend_long zend_mm_find_leaks_small(zend_mm_chunk *p, uint32_t i, uint32_t j, zend_leak_info *leak)
2233
0
{
2234
0
  bool empty = true;
2235
0
  zend_long count = 0;
2236
0
  int bin_num = ZEND_MM_SRUN_BIN_NUM(p->map[i]);
2237
0
  zend_mm_debug_info *dbg = (zend_mm_debug_info*)((char*)p + ZEND_MM_PAGE_SIZE * i + bin_data_size[bin_num] * (j + 1) - ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info)));
2238
2239
0
  while (j < bin_elements[bin_num]) {
2240
0
    if (dbg->size != 0) {
2241
0
      if (dbg->filename == leak->filename && dbg->lineno == leak->lineno) {
2242
0
        count++;
2243
0
        dbg->size = 0;
2244
0
        dbg->filename = NULL;
2245
0
        dbg->lineno = 0;
2246
0
      } else {
2247
0
        empty = false;
2248
0
      }
2249
0
    }
2250
0
    j++;
2251
0
    dbg = (zend_mm_debug_info*)((char*)dbg + bin_data_size[bin_num]);
2252
0
  }
2253
0
  if (empty) {
2254
0
    zend_mm_bitset_reset_range(p->free_map, i, bin_pages[bin_num]);
2255
0
  }
2256
0
  return count;
2257
0
}
2258
2259
static zend_long zend_mm_find_leaks(zend_mm_heap *heap, zend_mm_chunk *p, uint32_t i, zend_leak_info *leak)
2260
0
{
2261
0
  zend_long count = 0;
2262
2263
0
  do {
2264
0
    while (i < p->free_tail) {
2265
0
      if (zend_mm_bitset_is_set(p->free_map, i)) {
2266
0
        if (p->map[i] & ZEND_MM_IS_SRUN) {
2267
0
          int bin_num = ZEND_MM_SRUN_BIN_NUM(p->map[i]);
2268
0
          count += zend_mm_find_leaks_small(p, i, 0, leak);
2269
0
          i += bin_pages[bin_num];
2270
0
        } else /* if (p->map[i] & ZEND_MM_IS_LRUN) */ {
2271
0
          int pages_count = ZEND_MM_LRUN_PAGES(p->map[i]);
2272
0
          zend_mm_debug_info *dbg = (zend_mm_debug_info*)((char*)p + ZEND_MM_PAGE_SIZE * (i + pages_count) - ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info)));
2273
2274
0
          if (dbg->filename == leak->filename && dbg->lineno == leak->lineno) {
2275
0
            count++;
2276
0
          }
2277
0
          zend_mm_bitset_reset_range(p->free_map, i, pages_count);
2278
0
          i += pages_count;
2279
0
        }
2280
0
      } else {
2281
0
        i++;
2282
0
      }
2283
0
    }
2284
0
    p = p->next;
2285
0
    i = ZEND_MM_FIRST_PAGE;
2286
0
  } while (p != heap->main_chunk);
2287
0
  return count;
2288
0
}
2289
2290
static zend_long zend_mm_find_leaks_huge(zend_mm_heap *heap, zend_mm_huge_list *list)
2291
0
{
2292
0
  zend_long count = 0;
2293
0
  zend_mm_huge_list *prev = list;
2294
0
  zend_mm_huge_list *p = list->next;
2295
2296
0
  while (p) {
2297
0
    if (p->dbg.filename == list->dbg.filename && p->dbg.lineno == list->dbg.lineno) {
2298
0
      prev->next = p->next;
2299
0
      zend_mm_chunk_free(heap, p->ptr, p->size);
2300
0
      zend_mm_free_heap(heap, p, NULL, 0, NULL, 0);
2301
0
      count++;
2302
0
    } else {
2303
0
      prev = p;
2304
0
    }
2305
0
    p = prev->next;
2306
0
  }
2307
2308
0
  return count;
2309
0
}
2310
2311
static void zend_mm_check_leaks(zend_mm_heap *heap)
2312
0
{
2313
0
  zend_mm_huge_list *list;
2314
0
  zend_mm_chunk *p;
2315
0
  zend_leak_info leak;
2316
0
  zend_long repeated = 0;
2317
0
  uint32_t total = 0;
2318
0
  uint32_t i, j;
2319
2320
  /* find leaked huge blocks and free them */
2321
0
  list = heap->huge_list;
2322
0
  while (list) {
2323
0
    zend_mm_huge_list *q = list;
2324
2325
0
    leak.addr = list->ptr;
2326
0
    leak.size = list->dbg.size;
2327
0
    leak.filename = list->dbg.filename;
2328
0
    leak.orig_filename = list->dbg.orig_filename;
2329
0
    leak.lineno = list->dbg.lineno;
2330
0
    leak.orig_lineno = list->dbg.orig_lineno;
2331
2332
0
    zend_message_dispatcher(ZMSG_LOG_SCRIPT_NAME, NULL);
2333
0
    zend_message_dispatcher(ZMSG_MEMORY_LEAK_DETECTED, &leak);
2334
0
    repeated = zend_mm_find_leaks_huge(heap, list);
2335
0
    total += 1 + repeated;
2336
0
    if (repeated) {
2337
0
      zend_message_dispatcher(ZMSG_MEMORY_LEAK_REPEATED, (void *)(uintptr_t)repeated);
2338
0
    }
2339
2340
0
    heap->huge_list = list = list->next;
2341
0
    zend_mm_chunk_free(heap, q->ptr, q->size);
2342
0
    zend_mm_free_heap(heap, q, NULL, 0, NULL, 0);
2343
0
  }
2344
2345
  /* for each chunk */
2346
0
  p = heap->main_chunk;
2347
0
  do {
2348
0
    i = ZEND_MM_FIRST_PAGE;
2349
0
    while (i < p->free_tail) {
2350
0
      if (zend_mm_bitset_is_set(p->free_map, i)) {
2351
0
        if (p->map[i] & ZEND_MM_IS_SRUN) {
2352
0
          int bin_num = ZEND_MM_SRUN_BIN_NUM(p->map[i]);
2353
0
          zend_mm_debug_info *dbg = (zend_mm_debug_info*)((char*)p + ZEND_MM_PAGE_SIZE * i + bin_data_size[bin_num] - ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info)));
2354
2355
0
          j = 0;
2356
0
          while (j < bin_elements[bin_num]) {
2357
0
            if (dbg->size != 0) {
2358
0
              leak.addr = (zend_mm_debug_info*)((char*)p + ZEND_MM_PAGE_SIZE * i + bin_data_size[bin_num] * j);
2359
0
              leak.size = dbg->size;
2360
0
              leak.filename = dbg->filename;
2361
0
              leak.orig_filename = dbg->orig_filename;
2362
0
              leak.lineno = dbg->lineno;
2363
0
              leak.orig_lineno = dbg->orig_lineno;
2364
2365
0
              zend_message_dispatcher(ZMSG_LOG_SCRIPT_NAME, NULL);
2366
0
              zend_message_dispatcher(ZMSG_MEMORY_LEAK_DETECTED, &leak);
2367
2368
0
              dbg->size = 0;
2369
0
              dbg->filename = NULL;
2370
0
              dbg->lineno = 0;
2371
2372
0
              repeated = zend_mm_find_leaks_small(p, i, j + 1, &leak) +
2373
0
                         zend_mm_find_leaks(heap, p, i + bin_pages[bin_num], &leak);
2374
0
              total += 1 + repeated;
2375
0
              if (repeated) {
2376
0
                zend_message_dispatcher(ZMSG_MEMORY_LEAK_REPEATED, (void *)(uintptr_t)repeated);
2377
0
              }
2378
0
            }
2379
0
            dbg = (zend_mm_debug_info*)((char*)dbg + bin_data_size[bin_num]);
2380
0
            j++;
2381
0
          }
2382
0
          i += bin_pages[bin_num];
2383
0
        } else /* if (p->map[i] & ZEND_MM_IS_LRUN) */ {
2384
0
          int pages_count = ZEND_MM_LRUN_PAGES(p->map[i]);
2385
0
          zend_mm_debug_info *dbg = (zend_mm_debug_info*)((char*)p + ZEND_MM_PAGE_SIZE * (i + pages_count) - ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info)));
2386
2387
0
          leak.addr = (void*)((char*)p + ZEND_MM_PAGE_SIZE * i);
2388
0
          leak.size = dbg->size;
2389
0
          leak.filename = dbg->filename;
2390
0
          leak.orig_filename = dbg->orig_filename;
2391
0
          leak.lineno = dbg->lineno;
2392
0
          leak.orig_lineno = dbg->orig_lineno;
2393
2394
0
          zend_message_dispatcher(ZMSG_LOG_SCRIPT_NAME, NULL);
2395
0
          zend_message_dispatcher(ZMSG_MEMORY_LEAK_DETECTED, &leak);
2396
2397
0
          zend_mm_bitset_reset_range(p->free_map, i, pages_count);
2398
2399
0
          repeated = zend_mm_find_leaks(heap, p, i + pages_count, &leak);
2400
0
          total += 1 + repeated;
2401
0
          if (repeated) {
2402
0
            zend_message_dispatcher(ZMSG_MEMORY_LEAK_REPEATED, (void *)(uintptr_t)repeated);
2403
0
          }
2404
0
          i += pages_count;
2405
0
        }
2406
0
      } else {
2407
0
        i++;
2408
0
      }
2409
0
    }
2410
0
    p = p->next;
2411
0
  } while (p != heap->main_chunk);
2412
0
  if (total) {
2413
0
    zend_message_dispatcher(ZMSG_MEMORY_LEAKS_GRAND_TOTAL, &total);
2414
0
  }
2415
0
}
2416
#endif
2417
2418
#if ZEND_MM_CUSTOM
2419
static void *tracked_malloc(size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC);
2420
static void tracked_free_all(zend_mm_heap *heap);
2421
static void *poison_malloc(size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC);
2422
2423
static void zend_mm_check_freelists(zend_mm_heap *heap)
2424
0
{
2425
0
  for (uint32_t bin_num = 0; bin_num < ZEND_MM_BINS; bin_num++) {
2426
0
    zend_mm_free_slot *slot = heap->free_slot[bin_num];
2427
0
    while (slot) {
2428
0
      slot = zend_mm_get_next_free_slot(heap, bin_num, slot);
2429
0
    }
2430
0
  }
2431
0
}
2432
#endif
2433
2434
ZEND_API void zend_mm_shutdown(zend_mm_heap *heap, bool full, bool silent)
2435
278k
{
2436
278k
  zend_mm_chunk *p;
2437
278k
  zend_mm_huge_list *list;
2438
2439
278k
#if ZEND_MM_CUSTOM
2440
278k
  if (heap->use_custom_heap) {
2441
278k
    if (heap->custom_heap._malloc == tracked_malloc) {
2442
174k
      if (silent) {
2443
20.7k
        tracked_free_all(heap);
2444
20.7k
      }
2445
174k
      zend_hash_clean(heap->tracked_allocs);
2446
174k
      if (full) {
2447
0
        zend_hash_destroy(heap->tracked_allocs);
2448
0
        free(heap->tracked_allocs);
2449
        /* Make sure the heap free below does not use tracked_free(). */
2450
0
        heap->custom_heap._free = __zend_free;
2451
0
      }
2452
174k
#if ZEND_MM_STAT
2453
174k
      heap->size = 0;
2454
174k
      heap->real_size = 0;
2455
174k
#endif
2456
174k
    }
2457
2458
278k
    void (*shutdown)(bool, bool) = heap->custom_heap._shutdown;
2459
2460
278k
    if (full) {
2461
0
      heap->custom_heap._free(heap ZEND_FILE_LINE_CC ZEND_FILE_LINE_EMPTY_CC);
2462
0
    }
2463
2464
278k
    if (shutdown) {
2465
0
      shutdown(full, silent);
2466
0
    }
2467
2468
278k
    return;
2469
278k
  }
2470
0
#endif
2471
2472
0
#if ZEND_DEBUG
2473
0
  if (!silent) {
2474
0
    char *tmp = getenv("ZEND_ALLOC_PRINT_LEAKS");
2475
0
    if (!tmp || ZEND_ATOL(tmp)) {
2476
0
      zend_mm_check_leaks(heap);
2477
0
    }
2478
0
  }
2479
0
#endif
2480
2481
  /* free huge blocks */
2482
0
  list = heap->huge_list;
2483
0
  heap->huge_list = NULL;
2484
0
  while (list) {
2485
0
    zend_mm_huge_list *q = list;
2486
0
    list = list->next;
2487
0
    zend_mm_chunk_free(heap, q->ptr, q->size);
2488
0
  }
2489
2490
  /* move all chunks except of the first one into the cache */
2491
0
  p = heap->main_chunk->next;
2492
0
  while (p != heap->main_chunk) {
2493
0
    zend_mm_chunk *q = p->next;
2494
0
    p->next = heap->cached_chunks;
2495
0
    heap->cached_chunks = p;
2496
0
    p = q;
2497
0
    heap->chunks_count--;
2498
0
    heap->cached_chunks_count++;
2499
0
  }
2500
2501
0
  if (full) {
2502
    /* free all cached chunks */
2503
0
    while (heap->cached_chunks) {
2504
0
      p = heap->cached_chunks;
2505
0
      heap->cached_chunks = p->next;
2506
0
      zend_mm_chunk_free(heap, p, ZEND_MM_CHUNK_SIZE);
2507
0
    }
2508
    /* free the first chunk */
2509
0
    zend_mm_chunk_free(heap, heap->main_chunk, ZEND_MM_CHUNK_SIZE);
2510
0
  } else {
2511
    /* free some cached chunks to keep average count */
2512
0
    heap->avg_chunks_count = (heap->avg_chunks_count + (double)heap->peak_chunks_count) / 2.0;
2513
0
    while ((double)heap->cached_chunks_count + 0.9 > heap->avg_chunks_count &&
2514
0
           heap->cached_chunks) {
2515
0
      p = heap->cached_chunks;
2516
0
      heap->cached_chunks = p->next;
2517
0
      zend_mm_chunk_free(heap, p, ZEND_MM_CHUNK_SIZE);
2518
0
      heap->cached_chunks_count--;
2519
0
    }
2520
    /* clear cached chunks */
2521
0
    p = heap->cached_chunks;
2522
0
    while (p != NULL) {
2523
0
      zend_mm_chunk *q = p->next;
2524
0
      memset(p, 0, sizeof(zend_mm_chunk));
2525
0
      p->next = q;
2526
0
      p = q;
2527
0
    }
2528
2529
    /* reinitialize the first chunk and heap */
2530
0
    p = heap->main_chunk;
2531
0
    p->heap = &p->heap_slot;
2532
0
    p->next = p;
2533
0
    p->prev = p;
2534
0
    p->free_pages = ZEND_MM_PAGES - ZEND_MM_FIRST_PAGE;
2535
0
    p->free_tail = ZEND_MM_FIRST_PAGE;
2536
0
    p->num = 0;
2537
2538
0
#if ZEND_MM_STAT
2539
0
    heap->size = heap->peak = 0;
2540
0
#endif
2541
0
    memset(heap->free_slot, 0, sizeof(heap->free_slot));
2542
0
#if ZEND_MM_STAT || ZEND_MM_LIMIT
2543
0
    heap->real_size = (heap->cached_chunks_count + 1) * ZEND_MM_CHUNK_SIZE;
2544
0
#endif
2545
0
#if ZEND_MM_STAT
2546
0
    heap->real_peak = (heap->cached_chunks_count + 1) * ZEND_MM_CHUNK_SIZE;
2547
0
#endif
2548
0
    heap->chunks_count = 1;
2549
0
    heap->peak_chunks_count = 1;
2550
0
    heap->last_chunks_delete_boundary = 0;
2551
0
    heap->last_chunks_delete_count = 0;
2552
2553
0
    memset(p->free_map, 0, sizeof(p->free_map) + sizeof(p->map));
2554
0
    p->free_map[0] = (1L << ZEND_MM_FIRST_PAGE) - 1;
2555
0
    p->map[0] = ZEND_MM_LRUN(ZEND_MM_FIRST_PAGE);
2556
2557
0
#if ZEND_DEBUG
2558
0
    ZEND_ASSERT(getpid() == heap->pid
2559
0
        && "heap was re-used without calling zend_mm_refresh_key_child() after a fork");
2560
0
#endif
2561
2562
0
    zend_mm_refresh_key(heap);
2563
0
  }
2564
0
}
2565
2566
/**************/
2567
/* PUBLIC API */
2568
/**************/
2569
2570
ZEND_API void* ZEND_FASTCALL _zend_mm_alloc(zend_mm_heap *heap, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
2571
0
{
2572
0
  return zend_mm_alloc_heap(heap, size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
2573
0
}
2574
2575
ZEND_API void ZEND_FASTCALL _zend_mm_free(zend_mm_heap *heap, void *ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
2576
0
{
2577
0
  zend_mm_free_heap(heap, ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
2578
0
}
2579
2580
void* ZEND_FASTCALL _zend_mm_realloc(zend_mm_heap *heap, void *ptr, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
2581
0
{
2582
0
  return zend_mm_realloc_heap(heap, ptr, size, 0, size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
2583
0
}
2584
2585
void* ZEND_FASTCALL _zend_mm_realloc2(zend_mm_heap *heap, void *ptr, size_t size, size_t copy_size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
2586
0
{
2587
0
  return zend_mm_realloc_heap(heap, ptr, size, 1, copy_size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
2588
0
}
2589
2590
ZEND_API size_t ZEND_FASTCALL _zend_mm_block_size(zend_mm_heap *heap, void *ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
2591
0
{
2592
0
#if ZEND_MM_CUSTOM
2593
0
  if (UNEXPECTED(heap->use_custom_heap)) {
2594
0
    if (heap->custom_heap._malloc == tracked_malloc) {
2595
0
      zend_ulong h = ((uintptr_t) ptr) >> ZEND_MM_ALIGNMENT_LOG2;
2596
0
      zval *size_zv = zend_hash_index_find(heap->tracked_allocs, h);
2597
0
      if  (size_zv) {
2598
0
        return Z_LVAL_P(size_zv);
2599
0
      }
2600
0
    } else if (heap->custom_heap._malloc != poison_malloc) {
2601
0
      return 0;
2602
0
    }
2603
0
  }
2604
0
#endif
2605
0
  return zend_mm_size(heap, ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
2606
0
}
2607
2608
/**********************/
2609
/* Allocation Manager */
2610
/**********************/
2611
2612
typedef struct _zend_alloc_globals {
2613
  zend_mm_heap *mm_heap;
2614
} zend_alloc_globals;
2615
2616
#ifdef ZTS
2617
static int alloc_globals_id;
2618
static size_t alloc_globals_offset;
2619
# define AG(v) ZEND_TSRMG_FAST(alloc_globals_offset, zend_alloc_globals *, v)
2620
#else
2621
4.96G
# define AG(v) (alloc_globals.v)
2622
static zend_alloc_globals alloc_globals;
2623
#endif
2624
2625
ZEND_API bool is_zend_mm(void)
2626
9
{
2627
9
#if ZEND_MM_CUSTOM
2628
9
  return !AG(mm_heap)->use_custom_heap;
2629
#else
2630
  return true;
2631
#endif
2632
9
}
2633
2634
ZEND_API bool is_zend_ptr(const void *ptr)
2635
0
{
2636
0
#if ZEND_MM_CUSTOM
2637
0
  if (AG(mm_heap)->use_custom_heap) {
2638
0
    if (AG(mm_heap)->custom_heap._malloc == tracked_malloc) {
2639
0
      zend_ulong h = ((uintptr_t) ptr) >> ZEND_MM_ALIGNMENT_LOG2;
2640
0
      zval *size_zv = zend_hash_index_find(AG(mm_heap)->tracked_allocs, h);
2641
0
      if  (size_zv) {
2642
0
        return 1;
2643
0
      }
2644
0
    }
2645
0
    return 0;
2646
0
  }
2647
0
#endif
2648
2649
0
  if (AG(mm_heap)->main_chunk) {
2650
0
    zend_mm_chunk *chunk = AG(mm_heap)->main_chunk;
2651
2652
0
    do {
2653
0
      if (ptr >= (void*)chunk
2654
0
       && ptr < (void*)((char*)chunk + ZEND_MM_CHUNK_SIZE)) {
2655
0
        return 1;
2656
0
      }
2657
0
      chunk = chunk->next;
2658
0
    } while (chunk != AG(mm_heap)->main_chunk);
2659
0
  }
2660
2661
0
  zend_mm_huge_list *block = AG(mm_heap)->huge_list;
2662
0
  while (block) {
2663
0
    if (ptr >= block->ptr
2664
0
        && ptr < (void*)((char*)block->ptr + block->size)) {
2665
0
      return 1;
2666
0
    }
2667
0
    block = block->next;
2668
0
  }
2669
2670
0
  return 0;
2671
0
}
2672
2673
#if !ZEND_DEBUG && defined(HAVE_BUILTIN_CONSTANT_P)
2674
#undef _emalloc
2675
2676
#if ZEND_MM_CUSTOM
2677
# define ZEND_MM_CUSTOM_ALLOCATOR(size) do { \
2678
    if (UNEXPECTED(AG(mm_heap)->use_custom_heap)) { \
2679
      return AG(mm_heap)->custom_heap._malloc(size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC); \
2680
    } \
2681
  } while (0)
2682
# define ZEND_MM_CUSTOM_DEALLOCATOR(ptr) do { \
2683
    if (UNEXPECTED(AG(mm_heap)->use_custom_heap)) { \
2684
      AG(mm_heap)->custom_heap._free(ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC); \
2685
      return; \
2686
    } \
2687
  } while (0)
2688
#else
2689
# define ZEND_MM_CUSTOM_ALLOCATOR(size)
2690
# define ZEND_MM_CUSTOM_DEALLOCATOR(ptr)
2691
#endif
2692
2693
# define _ZEND_BIN_ALLOCATOR(_num, _size, _elements, _pages, _min_size, y) \
2694
  ZEND_API void* ZEND_FASTCALL _emalloc_ ## _size(void) { \
2695
    ZEND_MM_CUSTOM_ALLOCATOR(_size); \
2696
    if (_size < _min_size) { \
2697
      return _emalloc_ ## _min_size(); \
2698
    } \
2699
    return zend_mm_alloc_small(AG(mm_heap), _num ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC); \
2700
  }
2701
2702
ZEND_MM_BINS_INFO(_ZEND_BIN_ALLOCATOR, ZEND_MM_MIN_USEABLE_BIN_SIZE, y)
2703
2704
ZEND_API void* ZEND_FASTCALL _emalloc_large(size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
2705
{
2706
  ZEND_MM_CUSTOM_ALLOCATOR(size);
2707
  return zend_mm_alloc_large_ex(AG(mm_heap), size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
2708
}
2709
2710
ZEND_API void* ZEND_FASTCALL _emalloc_huge(size_t size)
2711
{
2712
  ZEND_MM_CUSTOM_ALLOCATOR(size);
2713
  return zend_mm_alloc_huge(AG(mm_heap), size);
2714
}
2715
2716
#if ZEND_DEBUG
2717
# define _ZEND_BIN_FREE(_num, _size, _elements, _pages, _min_size, y) \
2718
  ZEND_API void ZEND_FASTCALL _efree_ ## _size(void *ptr) { \
2719
    ZEND_MM_CUSTOM_DEALLOCATOR(ptr); \
2720
    if (_size < _min_size) { \
2721
      _efree_ ## _min_size(ptr); \
2722
      return; \
2723
    } \
2724
    { \
2725
      size_t page_offset = ZEND_MM_ALIGNED_OFFSET(ptr, ZEND_MM_CHUNK_SIZE); \
2726
      zend_mm_chunk *chunk = (zend_mm_chunk*)ZEND_MM_ALIGNED_BASE(ptr, ZEND_MM_CHUNK_SIZE); \
2727
      int page_num = page_offset / ZEND_MM_PAGE_SIZE; \
2728
      ZEND_MM_CHECK(chunk->heap == AG(mm_heap), "zend_mm_heap corrupted"); \
2729
      ZEND_ASSERT(chunk->map[page_num] & ZEND_MM_IS_SRUN); \
2730
      ZEND_ASSERT(ZEND_MM_SRUN_BIN_NUM(chunk->map[page_num]) == _num); \
2731
      zend_mm_free_small(AG(mm_heap), ptr, _num); \
2732
    } \
2733
  }
2734
#else
2735
# define _ZEND_BIN_FREE(_num, _size, _elements, _pages, _min_size, y) \
2736
  ZEND_API void ZEND_FASTCALL _efree_ ## _size(void *ptr) { \
2737
    ZEND_MM_CUSTOM_DEALLOCATOR(ptr); \
2738
    if (_size < _min_size) { \
2739
      _efree_ ## _min_size(ptr); \
2740
      return; \
2741
    } \
2742
    { \
2743
      zend_mm_chunk *chunk = (zend_mm_chunk*)ZEND_MM_ALIGNED_BASE(ptr, ZEND_MM_CHUNK_SIZE); \
2744
      ZEND_MM_CHECK(chunk->heap == AG(mm_heap), "zend_mm_heap corrupted"); \
2745
      zend_mm_free_small(AG(mm_heap), ptr, _num); \
2746
    } \
2747
  }
2748
#endif
2749
2750
ZEND_MM_BINS_INFO(_ZEND_BIN_FREE, ZEND_MM_MIN_USEABLE_BIN_SIZE, y)
2751
2752
ZEND_API void ZEND_FASTCALL _efree_large(void *ptr, size_t size)
2753
{
2754
  ZEND_MM_CUSTOM_DEALLOCATOR(ptr);
2755
  {
2756
    size_t page_offset = ZEND_MM_ALIGNED_OFFSET(ptr, ZEND_MM_CHUNK_SIZE);
2757
    zend_mm_chunk *chunk = (zend_mm_chunk*)ZEND_MM_ALIGNED_BASE(ptr, ZEND_MM_CHUNK_SIZE);
2758
    int page_num = page_offset / ZEND_MM_PAGE_SIZE;
2759
    uint32_t pages_count = ZEND_MM_ALIGNED_SIZE_EX(size, ZEND_MM_PAGE_SIZE) / ZEND_MM_PAGE_SIZE;
2760
2761
    ZEND_MM_CHECK(chunk->heap == AG(mm_heap) && ZEND_MM_ALIGNED_OFFSET(page_offset, ZEND_MM_PAGE_SIZE) == 0, "zend_mm_heap corrupted");
2762
    ZEND_ASSERT(chunk->map[page_num] & ZEND_MM_IS_LRUN);
2763
    ZEND_ASSERT(ZEND_MM_LRUN_PAGES(chunk->map[page_num]) == pages_count);
2764
    zend_mm_free_large(AG(mm_heap), chunk, page_num, pages_count);
2765
  }
2766
}
2767
2768
ZEND_API void ZEND_FASTCALL _efree_huge(void *ptr, size_t size)
2769
{
2770
2771
  ZEND_MM_CUSTOM_DEALLOCATOR(ptr);
2772
  zend_mm_free_huge(AG(mm_heap), ptr);
2773
}
2774
#endif
2775
2776
ZEND_API void* ZEND_FASTCALL _emalloc(size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
2777
1.48G
{
2778
1.48G
#if ZEND_MM_CUSTOM
2779
1.48G
  if (UNEXPECTED(AG(mm_heap)->use_custom_heap)) {
2780
1.48G
    return AG(mm_heap)->custom_heap._malloc(size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC); \
2781
1.48G
  }
2782
0
#endif
2783
0
  return zend_mm_alloc_heap(AG(mm_heap), size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
2784
1.48G
}
2785
2786
ZEND_API void ZEND_FASTCALL _efree(void *ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
2787
1.47G
{
2788
1.47G
#if ZEND_MM_CUSTOM
2789
1.47G
  if (UNEXPECTED(AG(mm_heap)->use_custom_heap)) {
2790
1.47G
    AG(mm_heap)->custom_heap._free(ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
2791
1.47G
    return;
2792
1.47G
  }
2793
0
#endif
2794
0
  zend_mm_free_heap(AG(mm_heap), ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
2795
0
}
2796
2797
ZEND_API void* ZEND_FASTCALL _erealloc(void *ptr, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
2798
30.2M
{
2799
30.2M
#if ZEND_MM_CUSTOM
2800
30.2M
  if (UNEXPECTED(AG(mm_heap)->use_custom_heap)) {
2801
30.2M
    return AG(mm_heap)->custom_heap._realloc(ptr, size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
2802
30.2M
  }
2803
0
#endif
2804
0
  return zend_mm_realloc_heap(AG(mm_heap), ptr, size, 0, size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
2805
30.2M
}
2806
2807
ZEND_API void* ZEND_FASTCALL _erealloc2(void *ptr, size_t size, size_t copy_size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
2808
686k
{
2809
686k
#if ZEND_MM_CUSTOM
2810
686k
  if (UNEXPECTED(AG(mm_heap)->use_custom_heap)) {
2811
686k
    return AG(mm_heap)->custom_heap._realloc(ptr, size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
2812
686k
  }
2813
0
#endif
2814
0
  return zend_mm_realloc_heap(AG(mm_heap), ptr, size, 1, copy_size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
2815
686k
}
2816
2817
ZEND_API size_t ZEND_FASTCALL _zend_mem_block_size(void *ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
2818
0
{
2819
0
  return _zend_mm_block_size(AG(mm_heap), ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
2820
0
}
2821
2822
ZEND_API void* ZEND_FASTCALL _safe_emalloc(size_t nmemb, size_t size, size_t offset ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
2823
1.81M
{
2824
1.81M
  return _emalloc(zend_safe_address_guarded(nmemb, size, offset) ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
2825
1.81M
}
2826
2827
ZEND_API void* ZEND_FASTCALL _safe_malloc(size_t nmemb, size_t size, size_t offset)
2828
0
{
2829
0
  return pemalloc(zend_safe_address_guarded(nmemb, size, offset), 1);
2830
0
}
2831
2832
ZEND_API void* ZEND_FASTCALL _safe_erealloc(void *ptr, size_t nmemb, size_t size, size_t offset ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
2833
1.56M
{
2834
1.56M
  return _erealloc(ptr, zend_safe_address_guarded(nmemb, size, offset) ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
2835
1.56M
}
2836
2837
ZEND_API void* ZEND_FASTCALL _safe_realloc(void *ptr, size_t nmemb, size_t size, size_t offset)
2838
0
{
2839
0
  return perealloc(ptr, zend_safe_address_guarded(nmemb, size, offset), 1);
2840
0
}
2841
2842
ZEND_API void* ZEND_FASTCALL _ecalloc(size_t nmemb, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
2843
273M
{
2844
273M
  void *p;
2845
2846
273M
  size = zend_safe_address_guarded(nmemb, size, 0);
2847
273M
  p = _emalloc(size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
2848
273M
  memset(p, 0, size);
2849
273M
  return p;
2850
273M
}
2851
2852
ZEND_API char* ZEND_FASTCALL _estrdup(const char *s ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
2853
1.11G
{
2854
1.11G
  size_t length;
2855
1.11G
  char *p;
2856
2857
1.11G
  length = strlen(s);
2858
1.11G
  if (UNEXPECTED(length + 1 == 0)) {
2859
0
    zend_error_noreturn(E_ERROR, "Possible integer overflow in memory allocation (1 * %zu + 1)", length);
2860
0
  }
2861
1.11G
  p = (char *) _emalloc(length + 1 ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
2862
1.11G
  memcpy(p, s, length+1);
2863
1.11G
  return p;
2864
1.11G
}
2865
2866
ZEND_API char* ZEND_FASTCALL _estrndup(const char *s, size_t length ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
2867
6.96M
{
2868
6.96M
  char *p;
2869
2870
6.96M
  if (UNEXPECTED(length + 1 == 0)) {
2871
0
    zend_error_noreturn(E_ERROR, "Possible integer overflow in memory allocation (1 * %zu + 1)", length);
2872
0
  }
2873
6.96M
  p = (char *) _emalloc(length + 1 ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
2874
6.96M
  memcpy(p, s, length);
2875
6.96M
  p[length] = 0;
2876
6.96M
  return p;
2877
6.96M
}
2878
2879
static ZEND_COLD ZEND_NORETURN void zend_out_of_memory(void);
2880
2881
ZEND_API char* ZEND_FASTCALL zend_strndup(const char *s, size_t length)
2882
8
{
2883
8
  char *p;
2884
2885
8
  if (UNEXPECTED(length + 1 == 0)) {
2886
0
    zend_error_noreturn(E_ERROR, "Possible integer overflow in memory allocation (1 * %zu + 1)", length);
2887
0
  }
2888
8
  p = (char *) malloc(length + 1);
2889
8
  if (UNEXPECTED(p == NULL)) {
2890
0
    zend_out_of_memory();
2891
0
  }
2892
8
  if (EXPECTED(length)) {
2893
8
    memcpy(p, s, length);
2894
8
  }
2895
8
  p[length] = 0;
2896
8
  return p;
2897
8
}
2898
2899
ZEND_API zend_result zend_set_memory_limit(size_t memory_limit)
2900
287k
{
2901
287k
#if ZEND_MM_LIMIT
2902
287k
  zend_mm_heap *heap = AG(mm_heap);
2903
2904
287k
  if (UNEXPECTED(memory_limit < heap->real_size)) {
2905
45
    if (memory_limit >= heap->real_size - heap->cached_chunks_count * ZEND_MM_CHUNK_SIZE) {
2906
      /* free some cached chunks to fit into new memory limit */
2907
0
      do {
2908
0
        zend_mm_chunk *p = heap->cached_chunks;
2909
0
        heap->cached_chunks = p->next;
2910
0
        zend_mm_chunk_free(heap, p, ZEND_MM_CHUNK_SIZE);
2911
0
        heap->cached_chunks_count--;
2912
0
        heap->real_size -= ZEND_MM_CHUNK_SIZE;
2913
0
      } while (memory_limit < heap->real_size);
2914
0
      return SUCCESS;
2915
0
    }
2916
45
    return FAILURE;
2917
45
  }
2918
287k
  AG(mm_heap)->limit = memory_limit;
2919
287k
#endif
2920
287k
  return SUCCESS;
2921
287k
}
2922
2923
ZEND_API bool zend_alloc_in_memory_limit_error_reporting(void)
2924
2.73M
{
2925
2.73M
#if ZEND_MM_LIMIT
2926
2.73M
  return AG(mm_heap)->overflow;
2927
#else
2928
  return false;
2929
#endif
2930
2.73M
}
2931
2932
ZEND_API size_t zend_memory_usage(bool real_usage)
2933
66
{
2934
66
#if ZEND_MM_STAT
2935
66
  if (real_usage) {
2936
32
    return AG(mm_heap)->real_size;
2937
34
  } else {
2938
34
    size_t usage = AG(mm_heap)->size;
2939
34
    return usage;
2940
34
  }
2941
0
#endif
2942
0
  return 0;
2943
66
}
2944
2945
ZEND_API size_t zend_memory_peak_usage(bool real_usage)
2946
0
{
2947
0
#if ZEND_MM_STAT
2948
0
  if (real_usage) {
2949
0
    return AG(mm_heap)->real_peak;
2950
0
  } else {
2951
0
    return AG(mm_heap)->peak;
2952
0
  }
2953
0
#endif
2954
0
  return 0;
2955
0
}
2956
2957
ZEND_API void zend_memory_reset_peak_usage(void)
2958
0
{
2959
0
#if ZEND_MM_STAT
2960
0
  AG(mm_heap)->real_peak = AG(mm_heap)->real_size;
2961
0
  AG(mm_heap)->peak = AG(mm_heap)->size;
2962
0
#endif
2963
0
}
2964
2965
ZEND_API void shutdown_memory_manager(bool silent, bool full_shutdown)
2966
278k
{
2967
278k
  zend_mm_shutdown(AG(mm_heap), full_shutdown, silent);
2968
278k
}
2969
2970
ZEND_API void refresh_memory_manager(void)
2971
0
{
2972
0
  zend_mm_refresh_key_child(AG(mm_heap));
2973
0
}
2974
2975
static ZEND_COLD ZEND_NORETURN void zend_out_of_memory(void)
2976
0
{
2977
0
  fprintf(stderr, "Out of memory\n");
2978
0
  exit(1);
2979
0
}
2980
2981
#if ZEND_MM_CUSTOM
2982
1.00G
static zend_always_inline void tracked_add(zend_mm_heap *heap, void *ptr, size_t size) {
2983
1.00G
  zval size_zv;
2984
1.00G
  zend_ulong h = ((uintptr_t) ptr) >> ZEND_MM_ALIGNMENT_LOG2;
2985
1.00G
  ZEND_ASSERT((void *) (uintptr_t) (h << ZEND_MM_ALIGNMENT_LOG2) == ptr);
2986
1.00G
  ZVAL_LONG(&size_zv, size);
2987
1.00G
  zend_hash_index_add_new(heap->tracked_allocs, h, &size_zv);
2988
1.00G
}
2989
2990
988M
static zend_always_inline zval *tracked_get_size_zv(zend_mm_heap *heap, void *ptr) {
2991
988M
  zend_ulong h = ((uintptr_t) ptr) >> ZEND_MM_ALIGNMENT_LOG2;
2992
988M
  zval *size_zv = zend_hash_index_find(heap->tracked_allocs, h);
2993
988M
  ZEND_ASSERT(size_zv && "Trying to free pointer not allocated through ZendMM");
2994
988M
  return size_zv;
2995
988M
}
2996
2997
1.00G
static zend_always_inline void tracked_check_limit(zend_mm_heap *heap, size_t add_size) {
2998
1.00G
#if ZEND_MM_STAT
2999
1.00G
  if (add_size > heap->limit - heap->size && !heap->overflow) {
3000
478
#if ZEND_DEBUG
3001
478
    zend_mm_safe_error(heap,
3002
478
      "Allowed memory size of %zu bytes exhausted at %s:%d (tried to allocate %zu bytes)",
3003
478
      heap->limit, "file", 0, add_size);
3004
#else
3005
    zend_mm_safe_error(heap,
3006
      "Allowed memory size of %zu bytes exhausted (tried to allocate %zu bytes)",
3007
      heap->limit, add_size);
3008
#endif
3009
478
  }
3010
1.00G
#endif
3011
1.00G
}
3012
3013
static void *tracked_malloc(size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
3014
978M
{
3015
978M
  zend_mm_heap *heap = AG(mm_heap);
3016
978M
  tracked_check_limit(heap, size);
3017
3018
978M
  void *ptr = malloc(size);
3019
978M
  if (!ptr) {
3020
0
    zend_out_of_memory();
3021
0
  }
3022
3023
978M
  tracked_add(heap, ptr, size);
3024
978M
#if ZEND_MM_STAT
3025
978M
  heap->size += size;
3026
978M
  heap->real_size = heap->size;
3027
978M
#endif
3028
978M
  return ptr;
3029
978M
}
3030
3031
964M
static void tracked_free(void *ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC) {
3032
964M
  if (!ptr) {
3033
510k
    return;
3034
510k
  }
3035
3036
964M
  zend_mm_heap *heap = AG(mm_heap);
3037
964M
  zval *size_zv = tracked_get_size_zv(heap, ptr);
3038
964M
#if ZEND_MM_STAT
3039
964M
  heap->size -= Z_LVAL_P(size_zv);
3040
964M
  heap->real_size = heap->size;
3041
964M
#endif
3042
964M
  zend_hash_del_bucket(heap->tracked_allocs, (Bucket *) size_zv);
3043
964M
  free(ptr);
3044
964M
}
3045
3046
28.2M
static void *tracked_realloc(void *ptr, size_t new_size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC) {
3047
28.2M
  zend_mm_heap *heap = AG(mm_heap);
3048
28.2M
  zval *old_size_zv = NULL;
3049
28.2M
  size_t old_size = 0;
3050
28.2M
  if (ptr) {
3051
23.9M
    old_size_zv = tracked_get_size_zv(heap, ptr);
3052
23.9M
    old_size = Z_LVAL_P(old_size_zv);
3053
23.9M
  }
3054
3055
28.2M
  if (new_size > old_size) {
3056
23.2M
    tracked_check_limit(heap, new_size - old_size);
3057
23.2M
  }
3058
3059
  /* Delete information about old allocation only after checking the memory limit. */
3060
28.2M
  if (old_size_zv) {
3061
23.9M
    zend_hash_del_bucket(heap->tracked_allocs, (Bucket *) old_size_zv);
3062
23.9M
  }
3063
3064
28.2M
  ptr = __zend_realloc(ptr, new_size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
3065
28.2M
  tracked_add(heap, ptr, new_size);
3066
28.2M
#if ZEND_MM_STAT
3067
28.2M
  heap->size += new_size - old_size;
3068
28.2M
  heap->real_size = heap->size;
3069
28.2M
#endif
3070
28.2M
  return ptr;
3071
28.2M
}
3072
3073
20.7k
static void tracked_free_all(zend_mm_heap *heap) {
3074
20.7k
  HashTable *tracked_allocs = heap->tracked_allocs;
3075
20.7k
  zend_ulong h;
3076
225M
  ZEND_HASH_FOREACH_NUM_KEY(tracked_allocs, h) {
3077
225M
    void *ptr = (void *) (uintptr_t) (h << ZEND_MM_ALIGNMENT_LOG2);
3078
225M
    free(ptr);
3079
225M
  } ZEND_HASH_FOREACH_END();
3080
20.7k
}
3081
3082
static void* poison_malloc(size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
3083
0
{
3084
0
  zend_mm_heap *heap = AG(mm_heap);
3085
3086
0
  if (SIZE_MAX - heap->debug.padding * 2 < size) {
3087
0
    zend_mm_panic("Integer overflow in memory allocation");
3088
0
  }
3089
0
  size += heap->debug.padding * 2;
3090
3091
0
  void *ptr = zend_mm_alloc_heap(heap, size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
3092
3093
0
  if (EXPECTED(ptr)) {
3094
0
    if (heap->debug.poison_alloc) {
3095
0
      memset(ptr, heap->debug.poison_alloc_value, size);
3096
0
    }
3097
3098
0
    ptr = (char*)ptr + heap->debug.padding;
3099
0
  }
3100
3101
0
  return ptr;
3102
0
}
3103
3104
static void poison_free(void *ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
3105
0
{
3106
0
  zend_mm_heap *heap = AG(mm_heap);
3107
3108
0
  if (EXPECTED(ptr)) {
3109
    /* zend_mm_shutdown() will try to free the heap when custom handlers
3110
     * are installed */
3111
0
    if (UNEXPECTED(ptr == heap)) {
3112
0
      return;
3113
0
    }
3114
3115
0
    ptr = (char*)ptr - heap->debug.padding;
3116
3117
0
    size_t size = zend_mm_size(heap, ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
3118
3119
0
    if (heap->debug.poison_free) {
3120
0
      memset(ptr, heap->debug.poison_free_value, size);
3121
0
    }
3122
0
  }
3123
3124
0
  zend_mm_free_heap(heap, ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
3125
0
}
3126
3127
static void* poison_realloc(void *ptr, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
3128
0
{
3129
0
  zend_mm_heap *heap = AG(mm_heap);
3130
3131
0
  void *new = poison_malloc(size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
3132
3133
0
  if (ptr) {
3134
      /* Determine the size of the old allocation from the unpadded pointer. */
3135
0
    size_t oldsize = zend_mm_size(heap, (char*)ptr - heap->debug.padding ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
3136
3137
    /* Remove the padding size to determine the size that is available to the user. */
3138
0
    oldsize -= (2 * heap->debug.padding);
3139
3140
0
#if ZEND_DEBUG
3141
0
    oldsize -= sizeof(zend_mm_debug_info);
3142
0
#endif
3143
3144
0
    memcpy(new, ptr, MIN(oldsize, size));
3145
0
    poison_free(ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
3146
0
  }
3147
3148
0
  return new;
3149
0
}
3150
3151
static size_t poison_gc(void)
3152
0
{
3153
0
  zend_mm_heap *heap = AG(mm_heap);
3154
3155
0
  void* (*_malloc)(size_t ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC);
3156
0
  void  (*_free)(void* ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC);
3157
0
  void* (*_realloc)(void*, size_t ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC);
3158
0
  size_t (*_gc)(void);
3159
0
  void   (*_shutdown)(bool, bool);
3160
3161
0
  zend_mm_get_custom_handlers_ex(heap, &_malloc, &_free, &_realloc, &_gc, &_shutdown);
3162
0
  zend_mm_set_custom_handlers_ex(heap, NULL, NULL, NULL, NULL, NULL);
3163
3164
0
  size_t collected = zend_mm_gc(heap);
3165
3166
0
  zend_mm_set_custom_handlers_ex(heap, _malloc, _free, _realloc, _gc, _shutdown);
3167
3168
0
  return collected;
3169
0
}
3170
3171
static void poison_shutdown(bool full, bool silent)
3172
0
{
3173
0
  zend_mm_heap *heap = AG(mm_heap);
3174
3175
0
  void* (*_malloc)(size_t ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC);
3176
0
  void  (*_free)(void* ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC);
3177
0
  void* (*_realloc)(void*, size_t ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC);
3178
0
  size_t (*_gc)(void);
3179
0
  void   (*_shutdown)(bool, bool);
3180
3181
0
  zend_mm_get_custom_handlers_ex(heap, &_malloc, &_free, &_realloc, &_gc, &_shutdown);
3182
0
  zend_mm_set_custom_handlers_ex(heap, NULL, NULL, NULL, NULL, NULL);
3183
3184
0
  if (heap->debug.check_freelists_on_shutdown) {
3185
0
    zend_mm_check_freelists(heap);
3186
0
  }
3187
3188
0
  zend_mm_shutdown(heap, full, silent);
3189
3190
0
  if (!full) {
3191
0
    zend_mm_set_custom_handlers_ex(heap, _malloc, _free, _realloc, _gc, _shutdown);
3192
0
  }
3193
0
}
3194
3195
static void poison_enable(zend_mm_heap *heap, char *parameters)
3196
0
{
3197
0
  char *tmp = parameters;
3198
0
  char *end = tmp + strlen(tmp);
3199
3200
  /* Trim heading/trailing whitespaces */
3201
0
  while (*tmp == ' ' || *tmp == '\t' || *tmp == '\n') {
3202
0
    tmp++;
3203
0
  }
3204
0
  while (end != tmp && (*(end-1) == ' ' || *(end-1) == '\t' || *(end-1) == '\n')) {
3205
0
    end--;
3206
0
  }
3207
3208
0
  if (tmp == end) {
3209
0
    return;
3210
0
  }
3211
3212
0
  while (1) {
3213
0
    char *key = tmp;
3214
3215
0
    tmp = memchr(tmp, '=', end - tmp);
3216
0
    if (!tmp) {
3217
0
      size_t key_len = end - key;
3218
0
      fprintf(stderr, "Unexpected EOF after ZEND_MM_DEBUG parameter '%.*s', expected '='\n",
3219
0
          (int)key_len, key);
3220
0
      return;
3221
0
    }
3222
3223
0
    size_t key_len = tmp - key;
3224
0
    char *value = tmp + 1;
3225
3226
0
    if (key_len == strlen("poison_alloc")
3227
0
        && !memcmp(key, "poison_alloc", key_len)) {
3228
3229
0
      heap->debug.poison_alloc = true;
3230
0
      heap->debug.poison_alloc_value = (uint8_t) ZEND_STRTOUL(value, &tmp, 0);
3231
3232
0
    } else if (key_len == strlen("poison_free")
3233
0
        && !memcmp(key, "poison_free", key_len)) {
3234
3235
0
      heap->debug.poison_free = true;
3236
0
      heap->debug.poison_free_value = (uint8_t) ZEND_STRTOUL(value, &tmp, 0);
3237
3238
0
    } else if (key_len == strlen("padding")
3239
0
        && !memcmp(key, "padding", key_len)) {
3240
3241
0
      uint8_t padding = ZEND_STRTOUL(value, &tmp, 0);
3242
0
      if (ZEND_MM_ALIGNED_SIZE(padding) != padding) {
3243
0
        fprintf(stderr, "ZEND_MM_DEBUG padding must be a multiple of %u, %u given\n",
3244
0
            (unsigned int)ZEND_MM_ALIGNMENT,
3245
0
            (unsigned int)padding);
3246
0
        return;
3247
0
      }
3248
0
      heap->debug.padding = padding;
3249
3250
0
    } else if (key_len == strlen("check_freelists_on_shutdown")
3251
0
        && !memcmp(key, "check_freelists_on_shutdown", key_len)) {
3252
3253
0
      heap->debug.check_freelists_on_shutdown = (bool) ZEND_STRTOUL(value, &tmp, 0);
3254
3255
0
    } else {
3256
0
      fprintf(stderr, "Unknown ZEND_MM_DEBUG parameter: '%.*s'\n",
3257
0
          (int)key_len, key);
3258
0
      return;
3259
0
    }
3260
3261
0
    if (tmp == end) {
3262
0
      break;
3263
0
    }
3264
0
    if (*tmp != ',') {
3265
0
      fprintf(stderr, "Unexpected '%c' after value of ZEND_MM_DEBUG parameter '%.*s', expected ','\n",
3266
0
          *tmp, (int)key_len, key);
3267
0
      return;
3268
0
    }
3269
0
    tmp++;
3270
0
  }
3271
3272
0
  zend_mm_set_custom_handlers_ex(heap, poison_malloc, poison_free,
3273
0
      poison_realloc, poison_gc, poison_shutdown);
3274
0
}
3275
#endif
3276
3277
static void alloc_globals_ctor(zend_alloc_globals *alloc_globals)
3278
16
{
3279
16
  char *tmp;
3280
3281
16
#if ZEND_MM_CUSTOM
3282
16
  tmp = getenv("USE_ZEND_ALLOC");
3283
16
  if (tmp && !ZEND_ATOL(tmp)) {
3284
16
    bool tracked = (tmp = getenv("USE_TRACKED_ALLOC")) && ZEND_ATOL(tmp);
3285
16
    zend_mm_heap *mm_heap = alloc_globals->mm_heap = malloc(sizeof(zend_mm_heap));
3286
16
    memset(mm_heap, 0, sizeof(zend_mm_heap));
3287
16
    mm_heap->use_custom_heap = ZEND_MM_CUSTOM_HEAP_STD;
3288
16
    mm_heap->limit = (size_t)Z_L(-1) >> 1;
3289
16
    mm_heap->overflow = 0;
3290
3291
16
    if (!tracked) {
3292
      /* Use system allocator. */
3293
8
      mm_heap->custom_heap._malloc = __zend_malloc;
3294
8
      mm_heap->custom_heap._free = __zend_free;
3295
8
      mm_heap->custom_heap._realloc = __zend_realloc;
3296
8
    } else {
3297
      /* Use system allocator and track allocations for auto-free. */
3298
8
      mm_heap->custom_heap._malloc = tracked_malloc;
3299
8
      mm_heap->custom_heap._free = tracked_free;
3300
8
      mm_heap->custom_heap._realloc = tracked_realloc;
3301
8
      mm_heap->tracked_allocs = malloc(sizeof(HashTable));
3302
8
      zend_hash_init(mm_heap->tracked_allocs, 1024, NULL, NULL, 1);
3303
8
    }
3304
16
    return;
3305
16
  }
3306
0
#endif
3307
3308
0
  tmp = getenv("USE_ZEND_ALLOC_HUGE_PAGES");
3309
0
  if (tmp && ZEND_ATOL(tmp)) {
3310
0
    zend_mm_use_huge_pages = true;
3311
0
  }
3312
0
  alloc_globals->mm_heap = zend_mm_init();
3313
3314
0
#if ZEND_MM_CUSTOM
3315
0
  ZEND_ASSERT(!alloc_globals->mm_heap->tracked_allocs);
3316
0
  tmp = getenv("ZEND_MM_DEBUG");
3317
0
  if (tmp) {
3318
0
    poison_enable(alloc_globals->mm_heap, tmp);
3319
0
  }
3320
0
#endif
3321
0
}
3322
3323
#ifdef ZTS
3324
static void alloc_globals_dtor(zend_alloc_globals *alloc_globals)
3325
{
3326
  zend_mm_shutdown(alloc_globals->mm_heap, 1, 1);
3327
}
3328
#endif
3329
3330
ZEND_API void start_memory_manager(void)
3331
16
{
3332
16
#ifndef _WIN32
3333
16
#  if defined(_SC_PAGESIZE)
3334
16
  REAL_PAGE_SIZE = sysconf(_SC_PAGESIZE);
3335
#  elif defined(_SC_PAGE_SIZE)
3336
  REAL_PAGE_SIZE = sysconf(_SC_PAGE_SIZE);
3337
#  endif
3338
16
#endif
3339
#ifdef ZTS
3340
  ts_allocate_fast_id(&alloc_globals_id, &alloc_globals_offset, sizeof(zend_alloc_globals), (ts_allocate_ctor) alloc_globals_ctor, (ts_allocate_dtor) alloc_globals_dtor);
3341
#else
3342
16
  alloc_globals_ctor(&alloc_globals);
3343
16
#endif
3344
16
}
3345
3346
ZEND_API zend_mm_heap *zend_mm_set_heap(zend_mm_heap *new_heap)
3347
0
{
3348
0
  zend_mm_heap *old_heap;
3349
3350
0
  old_heap = AG(mm_heap);
3351
0
  AG(mm_heap) = (zend_mm_heap*)new_heap;
3352
0
  return (zend_mm_heap*)old_heap;
3353
0
}
3354
3355
ZEND_API zend_mm_heap *zend_mm_get_heap(void)
3356
0
{
3357
0
  return AG(mm_heap);
3358
0
}
3359
3360
ZEND_API bool zend_mm_is_custom_heap(zend_mm_heap *new_heap)
3361
0
{
3362
0
#if ZEND_MM_CUSTOM
3363
0
  return AG(mm_heap)->use_custom_heap;
3364
#else
3365
  return 0;
3366
#endif
3367
0
}
3368
3369
ZEND_API void zend_mm_set_custom_handlers(zend_mm_heap *heap,
3370
                                          void* (*_malloc)(size_t ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC),
3371
                                          void  (*_free)(void* ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC),
3372
                                          void* (*_realloc)(void*, size_t ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC))
3373
0
{
3374
0
#if ZEND_MM_CUSTOM
3375
0
  zend_mm_set_custom_handlers_ex(heap, _malloc, _free, _realloc, NULL, NULL);
3376
0
#endif
3377
0
}
3378
3379
ZEND_API void zend_mm_set_custom_handlers_ex(zend_mm_heap *heap,
3380
                                          void* (*_malloc)(size_t ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC),
3381
                                          void  (*_free)(void* ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC),
3382
                                          void* (*_realloc)(void*, size_t ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC),
3383
                                          size_t (*_gc)(void),
3384
                                          void   (*_shutdown)(bool, bool))
3385
0
{
3386
0
#if ZEND_MM_CUSTOM
3387
0
  zend_mm_heap *_heap = (zend_mm_heap*)heap;
3388
3389
0
  if (!_malloc && !_free && !_realloc) {
3390
0
    _heap->use_custom_heap = ZEND_MM_CUSTOM_HEAP_NONE;
3391
0
  } else {
3392
0
    _heap->use_custom_heap = ZEND_MM_CUSTOM_HEAP_STD;
3393
0
    _heap->custom_heap._malloc = _malloc;
3394
0
    _heap->custom_heap._free = _free;
3395
0
    _heap->custom_heap._realloc = _realloc;
3396
0
    _heap->custom_heap._gc = _gc;
3397
0
    _heap->custom_heap._shutdown = _shutdown;
3398
0
  }
3399
0
#endif
3400
0
}
3401
3402
ZEND_API void zend_mm_get_custom_handlers(zend_mm_heap *heap,
3403
                                             void* (**_malloc)(size_t ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC),
3404
                                             void  (**_free)(void* ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC),
3405
                                             void* (**_realloc)(void*, size_t ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC))
3406
0
{
3407
0
#if ZEND_MM_CUSTOM
3408
0
  zend_mm_get_custom_handlers_ex(heap, _malloc, _free, _realloc, NULL, NULL);
3409
0
#endif
3410
0
}
3411
3412
ZEND_API void zend_mm_get_custom_handlers_ex(zend_mm_heap *heap,
3413
                                             void* (**_malloc)(size_t ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC),
3414
                                             void  (**_free)(void* ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC),
3415
                                             void* (**_realloc)(void*, size_t ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC),
3416
                                             size_t (**_gc)(void),
3417
                                             void   (**_shutdown)(bool, bool))
3418
0
{
3419
0
#if ZEND_MM_CUSTOM
3420
0
  zend_mm_heap *_heap = (zend_mm_heap*)heap;
3421
3422
0
  if (heap->use_custom_heap) {
3423
0
    *_malloc = _heap->custom_heap._malloc;
3424
0
    *_free = _heap->custom_heap._free;
3425
0
    *_realloc = _heap->custom_heap._realloc;
3426
0
    if (_gc != NULL) {
3427
0
      *_gc = _heap->custom_heap._gc;
3428
0
    }
3429
0
    if (_shutdown != NULL) {
3430
0
      *_shutdown = _heap->custom_heap._shutdown;
3431
0
    }
3432
0
  } else {
3433
0
    *_malloc = NULL;
3434
0
    *_free = NULL;
3435
0
    *_realloc = NULL;
3436
0
    if (_gc != NULL) {
3437
0
      *_gc = NULL;
3438
0
    }
3439
0
    if (_shutdown != NULL) {
3440
0
      *_shutdown = NULL;
3441
0
    }
3442
0
  }
3443
#else
3444
  *_malloc = NULL;
3445
  *_free = NULL;
3446
  *_realloc = NULL;
3447
  *_gc = NULL;
3448
  *_shutdown = NULL;
3449
#endif
3450
0
}
3451
3452
ZEND_API zend_mm_storage *zend_mm_get_storage(zend_mm_heap *heap)
3453
0
{
3454
0
#if ZEND_MM_STORAGE
3455
0
  return heap->storage;
3456
#else
3457
  return NULL;
3458
#endif
3459
0
}
3460
3461
ZEND_API zend_mm_heap *zend_mm_startup(void)
3462
0
{
3463
0
  return zend_mm_init();
3464
0
}
3465
3466
ZEND_API zend_mm_heap *zend_mm_startup_ex(const zend_mm_handlers *handlers, void *data, size_t data_size)
3467
0
{
3468
0
#if ZEND_MM_STORAGE
3469
0
  zend_mm_storage tmp_storage, *storage;
3470
0
  zend_mm_chunk *chunk;
3471
0
  zend_mm_heap *heap;
3472
3473
0
  memcpy((zend_mm_handlers*)&tmp_storage.handlers, handlers, sizeof(zend_mm_handlers));
3474
0
  tmp_storage.data = data;
3475
0
  chunk = (zend_mm_chunk*)handlers->chunk_alloc(&tmp_storage, ZEND_MM_CHUNK_SIZE, ZEND_MM_CHUNK_SIZE);
3476
0
  if (UNEXPECTED(chunk == NULL)) {
3477
0
#if ZEND_MM_ERROR
3478
0
    fprintf(stderr, "Can't initialize heap\n");
3479
0
#endif
3480
0
    return NULL;
3481
0
  }
3482
0
  heap = &chunk->heap_slot;
3483
0
  chunk->heap = heap;
3484
0
  chunk->next = chunk;
3485
0
  chunk->prev = chunk;
3486
0
  chunk->free_pages = ZEND_MM_PAGES - ZEND_MM_FIRST_PAGE;
3487
0
  chunk->free_tail = ZEND_MM_FIRST_PAGE;
3488
0
  chunk->num = 0;
3489
0
  chunk->free_map[0] = (Z_L(1) << ZEND_MM_FIRST_PAGE) - 1;
3490
0
  chunk->map[0] = ZEND_MM_LRUN(ZEND_MM_FIRST_PAGE);
3491
0
  heap->main_chunk = chunk;
3492
0
  heap->cached_chunks = NULL;
3493
0
  heap->chunks_count = 1;
3494
0
  heap->peak_chunks_count = 1;
3495
0
  heap->cached_chunks_count = 0;
3496
0
  heap->avg_chunks_count = 1.0;
3497
0
  heap->last_chunks_delete_boundary = 0;
3498
0
  heap->last_chunks_delete_count = 0;
3499
0
#if ZEND_MM_STAT || ZEND_MM_LIMIT
3500
0
  heap->real_size = ZEND_MM_CHUNK_SIZE;
3501
0
#endif
3502
0
#if ZEND_MM_STAT
3503
0
  heap->real_peak = ZEND_MM_CHUNK_SIZE;
3504
0
  heap->size = 0;
3505
0
  heap->peak = 0;
3506
0
#endif
3507
0
  zend_mm_init_key(heap);
3508
0
#if ZEND_MM_LIMIT
3509
0
  heap->limit = (size_t)Z_L(-1) >> 1;
3510
0
  heap->overflow = 0;
3511
0
#endif
3512
0
#if ZEND_MM_CUSTOM
3513
0
  heap->use_custom_heap = 0;
3514
0
#endif
3515
0
  heap->storage = &tmp_storage;
3516
0
  heap->huge_list = NULL;
3517
0
  memset(heap->free_slot, 0, sizeof(heap->free_slot));
3518
0
  storage = _zend_mm_alloc(heap, sizeof(zend_mm_storage) + data_size ZEND_FILE_LINE_CC ZEND_FILE_LINE_CC);
3519
0
  if (!storage) {
3520
0
    handlers->chunk_free(&tmp_storage, chunk, ZEND_MM_CHUNK_SIZE);
3521
0
#if ZEND_MM_ERROR
3522
0
    fprintf(stderr, "Can't initialize heap\n");
3523
0
#endif
3524
0
    return NULL;
3525
0
  }
3526
0
  memcpy(storage, &tmp_storage, sizeof(zend_mm_storage));
3527
0
  if (data) {
3528
0
    storage->data = (void*)(((char*)storage + sizeof(zend_mm_storage)));
3529
0
    memcpy(storage->data, data, data_size);
3530
0
  }
3531
0
  heap->storage = storage;
3532
0
#if ZEND_DEBUG
3533
0
  heap->pid = getpid();
3534
0
#endif
3535
0
  return heap;
3536
#else
3537
  return NULL;
3538
#endif
3539
0
}
3540
3541
ZEND_API void * __zend_malloc(size_t len ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
3542
508M
{
3543
508M
  void *tmp = malloc(len);
3544
508M
  if (EXPECTED(tmp || !len)) {
3545
508M
    return tmp;
3546
508M
  }
3547
0
  zend_out_of_memory();
3548
508M
}
3549
3550
ZEND_API void * __zend_calloc(size_t nmemb, size_t len ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
3551
0
{
3552
0
  void *tmp;
3553
3554
0
  len = zend_safe_address_guarded(nmemb, len, 0);
3555
0
  tmp = __zend_malloc(len ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
3556
0
  memset(tmp, 0, len);
3557
0
  return tmp;
3558
0
}
3559
3560
ZEND_API void * __zend_realloc(void *p, size_t len ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
3561
30.9M
{
3562
30.9M
  p = realloc(p, len);
3563
30.9M
  if (EXPECTED(p || !len)) {
3564
30.9M
    return p;
3565
30.9M
  }
3566
0
  zend_out_of_memory();
3567
30.9M
}
3568
3569
ZEND_API void __zend_free(void *p ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
3570
509M
{
3571
509M
  free(p);
3572
509M
  return;
3573
509M
}
3574
3575
ZEND_API char * __zend_strdup(const char *s)
3576
0
{
3577
0
  char *tmp = strdup(s);
3578
0
  if (EXPECTED(tmp)) {
3579
0
    return tmp;
3580
0
  }
3581
0
  zend_out_of_memory();
3582
0
}
3583
3584
#ifdef ZTS
3585
size_t zend_mm_globals_size(void)
3586
{
3587
  return sizeof(zend_alloc_globals);
3588
}
3589
#endif