Coverage Report

Created: 2025-09-27 06:26

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
/src/php-src/ext/opcache/shared_alloc_mmap.c
Line
Count
Source
1
/*
2
   +----------------------------------------------------------------------+
3
   | Zend OPcache                                                         |
4
   +----------------------------------------------------------------------+
5
   | Copyright (c) The PHP Group                                          |
6
   +----------------------------------------------------------------------+
7
   | This source file is subject to version 3.01 of the PHP license,      |
8
   | that is bundled with this package in the file LICENSE, and is        |
9
   | available through the world-wide-web at the following url:           |
10
   | https://www.php.net/license/3_01.txt                                 |
11
   | If you did not receive a copy of the PHP license and are unable to   |
12
   | obtain it through the world-wide-web, please send a note to          |
13
   | license@php.net so we can mail you a copy immediately.               |
14
   +----------------------------------------------------------------------+
15
   | Authors: Andi Gutmans <andi@php.net>                                 |
16
   |          Zeev Suraski <zeev@php.net>                                 |
17
   |          Stanislav Malyshev <stas@zend.com>                          |
18
   |          Dmitry Stogov <dmitry@php.net>                              |
19
   +----------------------------------------------------------------------+
20
*/
21
22
#include "zend_shared_alloc.h"
23
#ifdef HAVE_JIT
24
# include "jit/zend_jit.h"
25
#endif
26
27
#ifdef USE_MMAP
28
29
#include <sys/types.h>
30
#include <sys/stat.h>
31
#include <stdio.h>
32
#include <stdlib.h>
33
#include <sys/mman.h>
34
35
#ifdef __APPLE__
36
#include <mach/vm_statistics.h>
37
#endif
38
39
#include "zend_execute.h"
40
#ifdef HAVE_PROCCTL
41
#include <sys/procctl.h>
42
#endif
43
44
#if defined(MAP_ANON) && !defined(MAP_ANONYMOUS)
45
# define MAP_ANONYMOUS MAP_ANON
46
#endif
47
#if defined(MAP_ALIGNED_SUPER)
48
# include <sys/types.h>
49
# include <sys/sysctl.h>
50
# include <sys/user.h>
51
# define MAP_HUGETLB MAP_ALIGNED_SUPER
52
#endif
53
54
#if defined(HAVE_JIT) && (defined(__linux__) || defined(__FreeBSD__)) && (defined(__x86_64__) || defined (__aarch64__)) && !defined(__SANITIZE_ADDRESS__)
55
static void *find_preferred_mmap_base(size_t requested_size)
56
0
{
57
0
  size_t huge_page_size = 2 * 1024 * 1024;
58
0
  uintptr_t last_free_addr = huge_page_size;
59
0
  uintptr_t last_candidate = (uintptr_t)MAP_FAILED;
60
0
  uintptr_t start, end, text_start = 0;
61
0
#if defined(__linux__)
62
0
  FILE *f;
63
0
  char buffer[MAXPATHLEN];
64
65
0
  f = fopen("/proc/self/maps", "r");
66
0
  if (!f) {
67
0
    return MAP_FAILED;
68
0
  }
69
70
0
  while (fgets(buffer, MAXPATHLEN, f) && sscanf(buffer, "%lx-%lx", &start, &end) == 2) {
71
    /* Don't place the segment directly before or after the heap segment. Due to an selinux bug,
72
     * a segment directly preceding or following the heap is interpreted as heap memory, which
73
     * will result in an execheap violation for the JIT.
74
     * See https://bugzilla.kernel.org/show_bug.cgi?id=218258. */
75
0
    bool heap_segment = strstr(buffer, "[heap]") != NULL;
76
0
    if (heap_segment) {
77
0
      uintptr_t start_base = start & ~(huge_page_size - 1);
78
0
      if (last_free_addr + requested_size >= start_base) {
79
0
        last_free_addr = ZEND_MM_ALIGNED_SIZE_EX(end + huge_page_size, huge_page_size);
80
0
        continue;
81
0
      }
82
0
    }
83
0
    if ((uintptr_t)execute_ex >= start) {
84
      /* the current segment lays before PHP .text segment or PHP .text segment itself */
85
      /*Search for candidates at the end of the free segment near the .text segment
86
        to prevent candidates from being missed due to large hole*/
87
0
      if (last_free_addr + requested_size <= start) {
88
0
        last_candidate = ZEND_MM_ALIGNED_SIZE_EX(start - requested_size, huge_page_size);
89
0
        if (last_candidate + requested_size > start) {
90
0
          last_candidate -= huge_page_size;
91
0
        }
92
0
      }
93
0
      if ((uintptr_t)execute_ex < end) {
94
        /* the current segment is PHP .text segment itself */
95
0
        if (last_candidate != (uintptr_t)MAP_FAILED) {
96
0
          if (end - last_candidate < UINT32_MAX) {
97
            /* we have found a big enough hole before the text segment */
98
0
            break;
99
0
          }
100
0
          last_candidate = (uintptr_t)MAP_FAILED;
101
0
        }
102
0
        text_start = start;
103
0
      }
104
0
    } else {
105
      /* the current segment lays after PHP .text segment */
106
0
      if (last_free_addr + requested_size - text_start > UINT32_MAX) {
107
        /* the current segment and the following segments lay too far from PHP .text segment */
108
0
        break;
109
0
      }
110
0
      if (last_free_addr + requested_size <= start) {
111
0
        last_candidate = last_free_addr;
112
0
        break;
113
0
      }
114
0
    }
115
0
    last_free_addr = ZEND_MM_ALIGNED_SIZE_EX(end, huge_page_size);
116
0
    if (heap_segment) {
117
0
      last_free_addr += huge_page_size;
118
0
    }
119
0
  }
120
0
  fclose(f);
121
#elif defined(__FreeBSD__)
122
  size_t s = 0;
123
  int mib[4] = {CTL_KERN, KERN_PROC, KERN_PROC_VMMAP, getpid()};
124
  if (sysctl(mib, 4, NULL, &s, NULL, 0) == 0) {
125
    s = s * 4 / 3;
126
    void *addr = mmap(NULL, s, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANON, -1, 0);
127
    if (addr != MAP_FAILED) {
128
      if (sysctl(mib, 4, addr, &s, NULL, 0) == 0) {
129
        start = (uintptr_t)addr;
130
        end = start + s;
131
        while (start < end) {
132
          struct kinfo_vmentry *entry = (struct kinfo_vmentry *)start;
133
          size_t sz = entry->kve_structsize;
134
          if (sz == 0) {
135
            break;
136
          }
137
          uintptr_t e_start = entry->kve_start;
138
          uintptr_t e_end = entry->kve_end;
139
          if ((uintptr_t)execute_ex >= e_start) {
140
            /* the current segment lays before PHP .text segment or PHP .text segment itself */
141
            if (last_free_addr + requested_size <= e_start) {
142
              last_candidate = ZEND_MM_ALIGNED_SIZE_EX(e_start - requested_size, huge_page_size);
143
              if (last_candidate + requested_size > e_start) {
144
                last_candidate -= huge_page_size;
145
              }
146
            }
147
            if ((uintptr_t)execute_ex < e_end) {
148
              /* the current segment is PHP .text segment itself */
149
              if (last_candidate != (uintptr_t)MAP_FAILED) {
150
                if (e_end - last_candidate < UINT32_MAX) {
151
                  /* we have found a big enough hole before the text segment */
152
                  break;
153
                }
154
                last_candidate = (uintptr_t)MAP_FAILED;
155
              }
156
              text_start = e_start;
157
            }
158
          } else {
159
            /* the current segment lays after PHP .text segment */
160
            if (last_free_addr + requested_size - text_start > UINT32_MAX) {
161
              /* the current segment and the following segments lay too far from PHP .text segment */
162
              break;
163
            }
164
            if (last_free_addr + requested_size <= e_start) {
165
              last_candidate = last_free_addr;
166
              break;
167
            }
168
          }
169
          last_free_addr = ZEND_MM_ALIGNED_SIZE_EX(e_end, huge_page_size);
170
          start += sz;
171
        }
172
      }
173
      munmap(addr, s);
174
    }
175
  }
176
#endif
177
178
0
  return (void*)last_candidate;
179
0
}
180
#endif
181
182
static int create_segments(size_t requested_size, zend_shared_segment ***shared_segments_p, int *shared_segments_count, const char **error_in)
183
16
{
184
16
  zend_shared_segment *shared_segment;
185
16
  int flags = PROT_READ | PROT_WRITE, fd = -1;
186
16
  void *p;
187
#if defined(HAVE_PROCCTL) && defined(PROC_WXMAP_CTL)
188
  int enable_wxmap = PROC_WX_MAPPINGS_PERMIT;
189
  if (procctl(P_PID, getpid(), PROC_WXMAP_CTL, &enable_wxmap) == -1) {
190
    return ALLOC_FAILURE;
191
  }
192
#endif
193
#ifdef PROT_MPROTECT
194
  flags |= PROT_MPROTECT(PROT_EXEC);
195
#endif
196
#ifdef VM_MAKE_TAG
197
  /* allows tracking segments via tools such as vmmap */
198
  fd = VM_MAKE_TAG(251U);
199
#endif
200
#ifdef PROT_MAX
201
  flags |= PROT_MAX(PROT_READ | PROT_WRITE | PROT_EXEC);
202
#endif
203
16
#if defined(HAVE_JIT) && (defined(__linux__) || defined(__FreeBSD__)) && (defined(__x86_64__) || defined (__aarch64__)) && !defined(__SANITIZE_ADDRESS__)
204
16
  void *hint;
205
16
  if (JIT_G(enabled) && JIT_G(buffer_size)
206
0
      && zend_jit_check_support() == SUCCESS) {
207
0
    hint = find_preferred_mmap_base(requested_size);
208
16
  } else {
209
    /* Do not use a hint if JIT is not enabled, as this profits only JIT and
210
     * this is potentially unsafe when the only suitable candidate is just
211
     * after the heap (e.g. in non-PIE builds) (GH-13775). */
212
16
    hint = MAP_FAILED;
213
16
  }
214
16
  if (hint != MAP_FAILED) {
215
0
# ifdef MAP_HUGETLB
216
0
    size_t huge_page_size = 2 * 1024 * 1024;
217
0
    if (requested_size >= huge_page_size && requested_size % huge_page_size == 0) {
218
0
      p = mmap(hint, requested_size, flags, MAP_SHARED|MAP_ANONYMOUS|MAP_HUGETLB|MAP_FIXED, -1, 0);
219
0
      if (p != MAP_FAILED) {
220
0
        goto success;
221
0
      }
222
0
    }
223
0
#endif
224
0
    p = mmap(hint, requested_size, flags, MAP_SHARED|MAP_ANONYMOUS|MAP_FIXED, -1, 0);
225
0
    if (p != MAP_FAILED) {
226
0
      goto success;
227
0
    }
228
0
  }
229
16
#endif
230
16
#ifdef MAP_HUGETLB
231
16
  size_t huge_page_size = 2 * 1024 * 1024;
232
233
  /* Try to allocate huge pages first to reduce dTLB misses.
234
   * OSes has to be configured properly
235
   * on Linux
236
   * (e.g. https://wiki.debian.org/Hugepages#Enabling_HugeTlbPage)
237
   * You may verify huge page usage with the following command:
238
   * `grep "Huge" /proc/meminfo`
239
   * on FreeBSD
240
   * sysctl vm.pmap.pg_ps_enabled entry
241
   * (boot time config only, but enabled by default on most arches).
242
   */
243
16
  if (requested_size >= huge_page_size && requested_size % huge_page_size == 0) {
244
16
# if defined(__x86_64__) && defined(MAP_32BIT)
245
    /* to got HUGE PAGES in low 32-bit address we have to reserve address
246
       space and then remap it using MAP_HUGETLB */
247
248
16
    p = mmap(NULL, requested_size, flags, MAP_SHARED|MAP_ANONYMOUS|MAP_32BIT, fd, 0);
249
16
    if (p != MAP_FAILED) {
250
14
      munmap(p, requested_size);
251
14
      p = (void*)(ZEND_MM_ALIGNED_SIZE_EX((ptrdiff_t)p, huge_page_size));
252
14
      p = mmap(p, requested_size, flags, MAP_SHARED|MAP_ANONYMOUS|MAP_32BIT|MAP_HUGETLB|MAP_FIXED, -1, 0);
253
14
      if (p != MAP_FAILED) {
254
0
        goto success;
255
14
      } else {
256
14
        p = mmap(NULL, requested_size, flags, MAP_SHARED|MAP_ANONYMOUS|MAP_32BIT, fd, 0);
257
14
        if (p != MAP_FAILED) {
258
14
          goto success;
259
14
        }
260
14
      }
261
14
    }
262
2
# endif
263
2
    p = mmap(0, requested_size, flags, MAP_SHARED|MAP_ANONYMOUS|MAP_HUGETLB, fd, 0);
264
2
    if (p != MAP_FAILED) {
265
0
      goto success;
266
0
    }
267
2
  }
268
#elif defined(PREFER_MAP_32BIT) && defined(__x86_64__) && defined(MAP_32BIT)
269
  p = mmap(NULL, requested_size, flags, MAP_SHARED|MAP_ANONYMOUS|MAP_32BIT, fd, 0);
270
  if (p != MAP_FAILED) {
271
    goto success;
272
  }
273
#endif
274
275
2
  p = mmap(0, requested_size, flags, MAP_SHARED|MAP_ANONYMOUS, fd, 0);
276
2
  if (p == MAP_FAILED) {
277
0
    *error_in = "mmap";
278
0
    return ALLOC_FAILURE;
279
0
  }
280
281
16
success: ZEND_ATTRIBUTE_UNUSED;
282
16
  *shared_segments_count = 1;
283
16
  *shared_segments_p = (zend_shared_segment **) calloc(1, sizeof(zend_shared_segment) + sizeof(void *));
284
16
  if (!*shared_segments_p) {
285
0
    munmap(p, requested_size);
286
0
    *error_in = "calloc";
287
0
    return ALLOC_FAILURE;
288
0
  }
289
16
  shared_segment = (zend_shared_segment *)((char *)(*shared_segments_p) + sizeof(void *));
290
16
  (*shared_segments_p)[0] = shared_segment;
291
292
16
  shared_segment->p = p;
293
16
  shared_segment->pos = 0;
294
16
  shared_segment->size = requested_size;
295
296
16
  return ALLOC_SUCCESS;
297
16
}
298
299
static int detach_segment(zend_shared_segment *shared_segment)
300
0
{
301
0
  munmap(shared_segment->p, shared_segment->size);
302
0
  return 0;
303
0
}
304
305
static size_t segment_type_size(void)
306
32
{
307
32
  return sizeof(zend_shared_segment);
308
32
}
309
310
const zend_shared_memory_handlers zend_alloc_mmap_handlers = {
311
  create_segments,
312
  detach_segment,
313
  segment_type_size
314
};
315
316
#endif /* USE_MMAP */