/src/hermes/lib/Support/OSCompatPosix.cpp
Line | Count | Source |
1 | | /* |
2 | | * Copyright (c) Meta Platforms, Inc. and affiliates. |
3 | | * |
4 | | * This source code is licensed under the MIT license found in the |
5 | | * LICENSE file in the root directory of this source tree. |
6 | | */ |
7 | | |
8 | | #if !defined(_WINDOWS) && !defined(__EMSCRIPTEN__) |
9 | | |
10 | | #include "hermes/Support/Compiler.h" |
11 | | #include "hermes/Support/ErrorHandling.h" |
12 | | #include "hermes/Support/OSCompat.h" |
13 | | |
14 | | #include <cassert> |
15 | | #include <fstream> |
16 | | #include <vector> |
17 | | |
18 | | #include <signal.h> |
19 | | #include <sys/mman.h> |
20 | | #include <sys/resource.h> |
21 | | |
22 | | #if defined(__linux__) |
23 | | #if !defined(RUSAGE_THREAD) |
24 | | #define RUSAGE_THREAD 1 |
25 | | #endif |
26 | | #endif // __linux__ |
27 | | |
28 | | #include <pthread.h> |
29 | | #include <sys/types.h> |
30 | | #include <unistd.h> |
31 | | |
32 | | #ifdef __MACH__ |
33 | | #include <mach/mach.h> |
34 | | |
35 | | #endif // __MACH__ |
36 | | |
37 | | #ifdef __linux__ |
38 | | |
39 | | #if !defined(_POSIX_TIMERS) || _POSIX_TIMERS <= 0 |
40 | | #error "Timers not supported on this Android platform." |
41 | | #endif |
42 | | |
43 | | #ifndef CLOCK_THREAD_CPUTIME_ID |
44 | | #error "CLOCK_THREAD_CPUTIME_ID not supported by clock_gettime" |
45 | | #endif |
46 | | |
47 | | #include <sys/syscall.h> |
48 | | #include <time.h> |
49 | | |
50 | | #endif // __linux__ |
51 | | |
52 | | #if defined(__linux__) || defined(__ANDROID__) |
53 | | #include <sys/prctl.h> |
54 | | #endif |
55 | | |
56 | | #ifdef __ANDROID__ |
57 | | #ifndef PR_SET_VMA |
58 | | #define PR_SET_VMA 0x53564d41 |
59 | | #endif |
60 | | |
61 | | #ifndef PR_SET_VMA_ANON_NAME |
62 | | #define PR_SET_VMA_ANON_NAME 0 |
63 | | #endif |
64 | | #endif // __ANDROID__ |
65 | | |
66 | | #ifdef __APPLE__ |
67 | | #include <TargetConditionals.h> |
68 | | #endif |
69 | | |
70 | | #include "llvh/Config/config.h" |
71 | | #include "llvh/Support/raw_ostream.h" |
72 | | |
73 | | namespace hermes { |
74 | | namespace oscompat { |
75 | | |
76 | | #ifndef NDEBUG |
77 | | static size_t testPgSz = 0; |
78 | | |
79 | 0 | void set_test_page_size(size_t pageSz) { |
80 | 0 | testPgSz = pageSz; |
81 | 0 | } |
82 | | |
83 | 0 | void reset_test_page_size() { |
84 | 0 | testPgSz = 0; |
85 | 0 | } |
86 | | #endif |
87 | | |
88 | 2.14k | static inline size_t page_size_real() { |
89 | 2.14k | return getpagesize(); |
90 | 2.14k | } |
91 | | |
92 | 1.13k | size_t page_size() { |
93 | 1.13k | #ifndef NDEBUG |
94 | 1.13k | if (testPgSz != 0) { |
95 | 0 | return testPgSz; |
96 | 0 | } |
97 | 1.13k | #endif |
98 | 1.13k | return page_size_real(); |
99 | 1.13k | } |
100 | | |
101 | | #ifndef NDEBUG |
102 | | static constexpr size_t unsetVMAllocLimit = std::numeric_limits<size_t>::max(); |
103 | | static size_t totalVMAllocLimit = unsetVMAllocLimit; |
104 | | |
105 | 0 | void set_test_vm_allocate_limit(size_t totSz) { |
106 | 0 | totalVMAllocLimit = totSz; |
107 | 0 | } |
108 | | |
109 | 0 | void unset_test_vm_allocate_limit() { |
110 | 0 | totalVMAllocLimit = unsetVMAllocLimit; |
111 | 0 | } |
112 | | #endif // !NDEBUG |
113 | | |
114 | | static llvh::ErrorOr<void *> |
115 | 904 | vm_mmap(void *addr, size_t sz, int prot, int flags, bool checkDebugLimit) { |
116 | 904 | assert(sz % page_size_real() == 0); |
117 | 904 | #ifndef NDEBUG |
118 | 904 | if (checkDebugLimit) { |
119 | 226 | if (LLVM_UNLIKELY(sz > totalVMAllocLimit)) { |
120 | 0 | return make_error_code(OOMError::TestVMLimitReached); |
121 | 226 | } else if (LLVM_UNLIKELY(totalVMAllocLimit != unsetVMAllocLimit)) { |
122 | 0 | totalVMAllocLimit -= sz; |
123 | 0 | } |
124 | 226 | } |
125 | 904 | #endif // !NDEBUG |
126 | 904 | void *result = mmap(addr, sz, prot, flags, -1, 0); |
127 | 904 | if (result == MAP_FAILED) { |
128 | | // Since mmap is a POSIX API, even on MacOS, errno should use the POSIX |
129 | | // generic_category. |
130 | 0 | return std::error_code(errno, std::generic_category()); |
131 | 0 | } |
132 | 904 | return result; |
133 | 904 | } |
134 | | |
135 | 339 | static void vm_munmap(void *addr, size_t sz) { |
136 | 339 | auto ret = munmap(addr, sz); |
137 | 339 | assert(!ret && "Failed to free memory region."); |
138 | 339 | (void)ret; |
139 | | |
140 | 339 | #ifndef NDEBUG |
141 | 339 | if (LLVM_UNLIKELY(totalVMAllocLimit != unsetVMAllocLimit) && addr) { |
142 | 0 | totalVMAllocLimit += sz; |
143 | 0 | } |
144 | 339 | #endif |
145 | 339 | } |
146 | | |
147 | 113 | static char *alignAlloc(void *p, size_t alignment) { |
148 | 113 | return reinterpret_cast<char *>( |
149 | 113 | llvh::alignTo(reinterpret_cast<uintptr_t>(p), alignment)); |
150 | 113 | } |
151 | | |
152 | | static llvh::ErrorOr<void *> |
153 | 113 | vm_mmap_aligned(void *addr, size_t sz, size_t alignment, int prot, int flags) { |
154 | 113 | assert(sz > 0 && sz % page_size() == 0); |
155 | 113 | assert(alignment > 0 && alignment % page_size() == 0); |
156 | | |
157 | | // Allocate a larger section to ensure that it contains a subsection that |
158 | | // satisfies the request. Use *real* page size here since that's what vm_mmap |
159 | | // guarantees. |
160 | 113 | const size_t excessSize = sz + alignment - page_size_real(); |
161 | 113 | auto result = vm_mmap(addr, excessSize, prot, flags, true); |
162 | 113 | if (!result) |
163 | 0 | return result; |
164 | | |
165 | 113 | void *raw = *result; |
166 | 113 | char *aligned = alignAlloc(raw, alignment); |
167 | 113 | size_t excessAtFront = aligned - static_cast<char *>(raw); |
168 | 113 | size_t excessAtBack = excessSize - excessAtFront - sz; |
169 | | |
170 | 113 | if (excessAtFront) |
171 | 0 | vm_munmap(raw, excessAtFront); |
172 | 113 | if (excessAtBack) |
173 | 113 | vm_munmap(aligned + sz, excessAtBack); |
174 | | |
175 | 113 | return aligned; |
176 | 113 | } |
177 | | |
178 | | static constexpr int kVMAllocateFlags = MAP_PRIVATE | MAP_ANONYMOUS; |
179 | | static constexpr int kVMAllocateProt = PROT_READ | PROT_WRITE; |
180 | | |
181 | 113 | llvh::ErrorOr<void *> vm_allocate(size_t sz, void *hint) { |
182 | 113 | assert(sz % page_size() == 0); |
183 | 113 | #ifndef NDEBUG |
184 | 113 | if (testPgSz != 0 && testPgSz > static_cast<size_t>(page_size_real())) { |
185 | 0 | return vm_allocate_aligned(sz, testPgSz); |
186 | 0 | } |
187 | 113 | #endif // !NDEBUG |
188 | 113 | return vm_mmap(hint, sz, kVMAllocateProt, kVMAllocateFlags, true); |
189 | 113 | } |
190 | | |
191 | | llvh::ErrorOr<void *> |
192 | 0 | vm_allocate_aligned(size_t sz, size_t alignment, void *hint) { |
193 | 0 | assert(sz > 0 && sz % page_size() == 0); |
194 | 0 | assert(alignment > 0 && alignment % page_size() == 0); |
195 | | // While not specifically required for the posix implementation, check the |
196 | | // alignment requirement here to avoid creating bugs on other platforms. |
197 | 0 | assert(llvh::isPowerOf2_64(alignment)); |
198 | | |
199 | | // Opportunistically allocate without alignment constraint, |
200 | | // and see if the memory happens to be aligned. |
201 | | // While this may be unlikely on the first allocation request, |
202 | | // subsequent allocation requests have a good chance. |
203 | 0 | auto result = vm_mmap(hint, sz, kVMAllocateProt, kVMAllocateFlags, true); |
204 | 0 | if (!result) { |
205 | 0 | return result; |
206 | 0 | } |
207 | 0 | void *mem = *result; |
208 | 0 | if (mem == alignAlloc(mem, alignment)) { |
209 | 0 | return mem; |
210 | 0 | } |
211 | | |
212 | | // Free the opportunistic allocation. |
213 | 0 | vm_munmap(mem, sz); |
214 | |
|
215 | 0 | return vm_mmap_aligned( |
216 | 0 | hint, sz, alignment, kVMAllocateProt, kVMAllocateFlags); |
217 | 0 | } |
218 | | |
219 | 113 | void vm_free(void *p, size_t sz) { |
220 | 113 | vm_munmap(p, sz); |
221 | 113 | } |
222 | | |
223 | 0 | void vm_free_aligned(void *p, size_t sz) { |
224 | 0 | vm_free(p, sz); |
225 | 0 | } |
226 | | |
227 | | static constexpr int kVMReserveProt = PROT_NONE; |
228 | | static constexpr int kVMReserveFlags = |
229 | | MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE; |
230 | | |
231 | | llvh::ErrorOr<void *> |
232 | 113 | vm_reserve_aligned(size_t sz, size_t alignment, void *hint) { |
233 | 113 | return vm_mmap_aligned(hint, sz, alignment, kVMReserveProt, kVMReserveFlags); |
234 | 113 | } |
235 | | |
236 | 113 | void vm_release_aligned(void *p, size_t sz) { |
237 | 113 | vm_munmap(p, sz); |
238 | 113 | } |
239 | | |
240 | 339 | llvh::ErrorOr<void *> vm_commit(void *p, size_t sz) { |
241 | 339 | return vm_mmap(p, sz, kVMAllocateProt, kVMAllocateFlags | MAP_FIXED, false); |
242 | 339 | } |
243 | | |
244 | 339 | void vm_uncommit(void *p, size_t sz) { |
245 | 339 | auto res = vm_mmap(p, sz, kVMReserveProt, kVMReserveFlags | MAP_FIXED, false); |
246 | 339 | (void)res; |
247 | 339 | assert(res && "uncommit failed"); |
248 | 339 | } |
249 | | |
250 | 0 | void vm_hugepage(void *p, size_t sz) { |
251 | 0 | assert( |
252 | 0 | reinterpret_cast<uintptr_t>(p) % page_size() == 0 && |
253 | 0 | "Precondition: pointer is page-aligned."); |
254 | | |
255 | 0 | #if defined(__linux__) || defined(__ANDROID__) |
256 | | // Since the alloc is aligned, it may benefit from huge pages. |
257 | 0 | madvise(p, sz, MADV_HUGEPAGE); |
258 | 0 | #endif |
259 | 0 | } |
260 | | |
261 | 0 | void vm_unused(void *p, size_t sz) { |
262 | 0 | #ifndef NDEBUG |
263 | 0 | const size_t PS = page_size(); |
264 | 0 | assert( |
265 | 0 | reinterpret_cast<intptr_t>(p) % PS == 0 && |
266 | 0 | "Precondition: pointer is page-aligned."); |
267 | 0 | #endif |
268 | | |
269 | | /// Change the flag we pass to \p madvise based on the platform, so that we are |
270 | | /// always acting to reduce memory pressure, as perceived by that platform. |
271 | | #if defined(__MACH__) |
272 | | |
273 | | /// On the mach kernel, \p MADV_FREE causes the OS to deduct this memory from |
274 | | /// the process's physical footprint. |
275 | | #define MADV_UNUSED MADV_FREE |
276 | | |
277 | | #elif defined(__linux__) |
278 | | |
279 | | /// On linux, telling the OS that we \p MADV_DONTNEED some pages will cause it |
280 | | /// to immediately deduct their size from the process's resident set. |
281 | 0 | #define MADV_UNUSED MADV_DONTNEED |
282 | | |
283 | | #else |
284 | | #error "Don't know how to return memory to the OS on this platform." |
285 | | #endif // __MACH__, __linux__ |
286 | | |
287 | 0 | madvise(p, sz, MADV_UNUSED); |
288 | |
|
289 | 0 | #undef MADV_UNUSED |
290 | 0 | } |
291 | | |
292 | 0 | void vm_prefetch(void *p, size_t sz) { |
293 | 0 | assert( |
294 | 0 | reinterpret_cast<intptr_t>(p) % page_size() == 0 && |
295 | 0 | "Precondition: pointer is page-aligned."); |
296 | 0 | madvise(p, sz, MADV_WILLNEED); |
297 | 0 | } |
298 | | |
299 | 791 | void vm_name(void *p, size_t sz, const char *name) { |
300 | | #ifdef __ANDROID__ |
301 | | prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, p, sz, name); |
302 | | #else |
303 | 791 | (void)p; |
304 | 791 | (void)sz; |
305 | 791 | (void)name; |
306 | 791 | #endif // __ANDROID__ |
307 | 791 | } |
308 | | |
309 | 452 | bool vm_protect(void *p, size_t sz, ProtectMode mode) { |
310 | 452 | auto prot = PROT_NONE; |
311 | 452 | if (mode == ProtectMode::ReadWrite) { |
312 | 226 | prot = PROT_WRITE | PROT_READ; |
313 | 226 | } |
314 | 452 | int err = mprotect(p, sz, prot); |
315 | 452 | return err != -1; |
316 | 452 | } |
317 | | |
318 | 0 | bool vm_madvise(void *p, size_t sz, MAdvice advice) { |
319 | 0 | #ifndef NDEBUG |
320 | 0 | const size_t PS = page_size(); |
321 | 0 | assert( |
322 | 0 | reinterpret_cast<intptr_t>(p) % PS == 0 && |
323 | 0 | "Precondition: pointer is page-aligned."); |
324 | 0 | #endif |
325 | | |
326 | 0 | int param = MADV_NORMAL; |
327 | 0 | switch (advice) { |
328 | 0 | case MAdvice::Random: |
329 | 0 | param = MADV_RANDOM; |
330 | 0 | break; |
331 | 0 | case MAdvice::Sequential: |
332 | 0 | param = MADV_SEQUENTIAL; |
333 | 0 | break; |
334 | 0 | } |
335 | 0 | return madvise(p, sz, param) == 0; |
336 | 0 | } |
337 | | |
338 | 0 | llvh::ErrorOr<size_t> vm_footprint(char *start, char *end) { |
339 | | #ifdef __MACH__ |
340 | | const task_t self = mach_task_self(); |
341 | | |
342 | | vm_address_t vAddr = reinterpret_cast<vm_address_t>(start); |
343 | | vm_size_t vSz = static_cast<vm_size_t>(end - start); |
344 | | vm_region_extended_info_data_t info; |
345 | | mach_msg_type_number_t fields = VM_REGION_EXTENDED_INFO_COUNT; |
346 | | mach_port_t unused; |
347 | | |
348 | | auto ret = vm_region_64( |
349 | | self, |
350 | | &vAddr, |
351 | | &vSz, |
352 | | VM_REGION_EXTENDED_INFO, |
353 | | // The expected contents, and requisite size of this struct depend on the |
354 | | // previous and next parameters to this function respectively. We cast it |
355 | | // to a "generic" info type to indicate this. |
356 | | reinterpret_cast<vm_region_info_t>(&info), |
357 | | &fields, |
358 | | &unused); |
359 | | |
360 | | if (ret != KERN_SUCCESS) |
361 | | return std::error_code(errno, std::generic_category()); |
362 | | |
363 | | return info.pages_dirtied; |
364 | | #else |
365 | 0 | auto rStart = reinterpret_cast<uintptr_t>(start); |
366 | 0 | auto rEnd = reinterpret_cast<uintptr_t>(end); |
367 | |
|
368 | 0 | char label[] = "Rss:"; |
369 | |
|
370 | 0 | std::ifstream smaps("/proc/self/smaps"); |
371 | |
|
372 | 0 | while (smaps) { |
373 | 0 | std::string firstToken; |
374 | 0 | smaps >> firstToken; |
375 | | |
376 | | // Ignore the rest of the line. |
377 | 0 | smaps.ignore(std::numeric_limits<std::streamsize>::max(), '\n'); |
378 | |
|
379 | 0 | if (firstToken.find_last_of(':') != std::string::npos) { |
380 | | // We are inside an entry, rather than at the start of one, so we should |
381 | | // ignore this line. |
382 | 0 | continue; |
383 | 0 | } |
384 | | |
385 | | // The first token should be the mapping's virtual address range if this is |
386 | | // the first line of a mapping's entry, so we extract it. |
387 | 0 | std::stringstream ris(firstToken); |
388 | 0 | uintptr_t mStart, mEnd; |
389 | 0 | ris >> std::hex >> mStart; |
390 | | // Ignore '-' |
391 | 0 | ris.ignore(); |
392 | 0 | ris >> mEnd; |
393 | | |
394 | | // The working assumption is that the kernel will not split a single memory |
395 | | // region allocated by \p mmap across multiple entries in the smaps output. |
396 | 0 | if (mStart <= rStart && rEnd <= mEnd) { |
397 | | // Found the start of the section pertaining to our memory map |
398 | 0 | break; |
399 | 0 | } |
400 | 0 | } |
401 | |
|
402 | 0 | while (smaps) { |
403 | 0 | std::string line; |
404 | 0 | std::getline(smaps, line); |
405 | |
|
406 | 0 | if (line.compare(0, sizeof(label) - 1, label) != 0) { |
407 | 0 | continue; |
408 | 0 | } |
409 | | |
410 | 0 | std::stringstream lis(line); |
411 | 0 | lis.ignore(line.length(), ' '); // Pop the label |
412 | |
|
413 | 0 | size_t rss; |
414 | 0 | std::string unit; |
415 | 0 | lis >> std::skipws >> rss >> unit; |
416 | |
|
417 | 0 | assert(unit == "kB"); |
418 | 0 | return rss * 1024 / page_size(); |
419 | 0 | } |
420 | | |
421 | 0 | return std::error_code(errno, std::generic_category()); |
422 | 0 | #endif |
423 | 0 | } |
424 | | |
425 | 0 | int pages_in_ram(const void *p, size_t sz, llvh::SmallVectorImpl<int> *runs) { |
426 | 0 | const auto PS = page_size(); |
427 | 0 | { |
428 | | // Align region start down to page boundary. |
429 | 0 | const uintptr_t addr = reinterpret_cast<uintptr_t>(p); |
430 | 0 | const size_t adjust = addr % PS; |
431 | 0 | p = reinterpret_cast<const void *>(addr - adjust); |
432 | 0 | sz += adjust; |
433 | 0 | } |
434 | | // Total number of pages that the region overlaps. |
435 | 0 | const size_t mapSize = (sz + PS - 1) / PS; |
436 | 0 | #ifdef __linux__ |
437 | 0 | using MapElm = unsigned char; |
438 | | #else |
439 | | using MapElm = char; |
440 | | #endif |
441 | 0 | std::vector<MapElm> bitMap(mapSize); |
442 | 0 | if (mincore(const_cast<void *>(p), sz, bitMap.data())) { |
443 | 0 | return -1; |
444 | 0 | } |
445 | | // Total pages in RAM. |
446 | 0 | int totalIn = 0; |
447 | 0 | bool currentRunStatus = true; |
448 | 0 | if (runs) |
449 | 0 | runs->push_back(0); |
450 | 0 | for (auto elm : bitMap) { |
451 | | // Lowest bit tells whether in RAM. |
452 | 0 | bool thisStatus = (elm & 1); |
453 | 0 | totalIn += thisStatus; |
454 | 0 | if (runs) { |
455 | 0 | if (thisStatus != currentRunStatus) |
456 | 0 | runs->push_back(0); |
457 | 0 | currentRunStatus = thisStatus; |
458 | 0 | ++runs->back(); |
459 | 0 | } |
460 | 0 | } |
461 | 0 | return totalIn; |
462 | 0 | } |
463 | | |
464 | 0 | uint64_t peak_rss() { |
465 | 0 | rusage ru; |
466 | 0 | if (getrusage(RUSAGE_SELF, &ru)) { |
467 | | // failed |
468 | 0 | return 0; |
469 | 0 | } |
470 | 0 | uint64_t rss = ru.ru_maxrss; |
471 | 0 | #if !defined(__APPLE__) || !defined(__MACH__) |
472 | | // Linux maxrss is in kilobytes, expand into bytes. |
473 | 0 | rss *= 1024; |
474 | 0 | #endif |
475 | 0 | return rss; |
476 | 0 | } |
477 | | |
478 | 0 | uint64_t current_rss() { |
479 | | #if defined(__APPLE__) && defined(__MACH__) |
480 | | struct mach_task_basic_info info; |
481 | | mach_msg_type_number_t infoCount = MACH_TASK_BASIC_INFO_COUNT; |
482 | | if (task_info( |
483 | | mach_task_self(), |
484 | | MACH_TASK_BASIC_INFO, |
485 | | (task_info_t)&info, |
486 | | &infoCount) != KERN_SUCCESS) |
487 | | return 0; |
488 | | return info.resident_size * page_size_real(); |
489 | | #else |
490 | 0 | FILE *fp = fopen("/proc/self/statm", "r"); |
491 | 0 | if (!fp) { |
492 | 0 | return 0; |
493 | 0 | } |
494 | 0 | long rss = 0; |
495 | | // The first field is total program size, second field is resident set size. |
496 | 0 | if (fscanf(fp, "%*d %ld", &rss) != 1) { |
497 | 0 | fclose(fp); |
498 | 0 | return 0; |
499 | 0 | } |
500 | 0 | fclose(fp); |
501 | | // The RSS number from from statm is in number of pages. Multiply by the real |
502 | | // page size to get the number in bytes. |
503 | 0 | return rss * page_size_real(); |
504 | 0 | #endif |
505 | 0 | } |
506 | | |
507 | 0 | uint64_t current_private_dirty() { |
508 | 0 | #if defined(__linux__) |
509 | 0 | uint64_t sum = 0; |
510 | 0 | FILE *fp = fopen("/proc/self/smaps", "r"); |
511 | 0 | static const char kPrefix[] = "Private_Dirty:"; |
512 | 0 | constexpr size_t kPrefixLen = sizeof(kPrefix) - 1; |
513 | 0 | char buf[128]; // Just needs to fit the lines we care about. |
514 | 0 | while (fgets(buf, sizeof(buf), fp)) |
515 | 0 | if (strncmp(buf, kPrefix, kPrefixLen) == 0) |
516 | 0 | sum += atoll(buf + kPrefixLen); |
517 | 0 | fclose(fp); |
518 | 0 | return sum * 1024; |
519 | | #else |
520 | | return 0; |
521 | | #endif |
522 | 0 | } |
523 | | |
524 | | #if defined(__linux__) |
525 | 0 | static bool overlap(uintptr_t a, size_t asize, uintptr_t b, size_t bsize) { |
526 | | // An empty interval has no overlap. |
527 | 0 | if (asize == 0 || bsize == 0) |
528 | 0 | return false; |
529 | | // Order by start address. |
530 | 0 | if (a > b) |
531 | 0 | return overlap(b, bsize, a, asize); |
532 | | // Overlap iff the first interval extends beyond the start of the second. |
533 | 0 | return a + asize > b; |
534 | 0 | } |
535 | | #endif |
536 | | |
537 | 0 | std::vector<std::string> get_vm_protect_modes(const void *p, size_t sz) { |
538 | 0 | std::vector<std::string> modes; |
539 | 0 | #if defined(__linux__) |
540 | 0 | unsigned long long begin; |
541 | 0 | unsigned long long end; |
542 | 0 | char mode[4 + 1]; |
543 | 0 | FILE *fp = fopen("/proc/self/maps", "r"); |
544 | 0 | if (!fp) { |
545 | 0 | modes.emplace_back("unknown"); |
546 | 0 | return modes; |
547 | 0 | } |
548 | 0 | while (fscanf(fp, "%llx-%llx %4s", &begin, &end, mode) == 3) { |
549 | 0 | if (overlap( |
550 | 0 | reinterpret_cast<uintptr_t>(p), |
551 | 0 | sz, |
552 | 0 | static_cast<uintptr_t>(begin), |
553 | 0 | static_cast<size_t>(end - begin))) { |
554 | 0 | modes.push_back(mode); |
555 | 0 | } |
556 | | // Discard remainder of the line. |
557 | 0 | int result; |
558 | 0 | do { |
559 | 0 | result = fgetc(fp); |
560 | 0 | } while (result != '\n' && result > 0); |
561 | 0 | } |
562 | 0 | #endif |
563 | 0 | return modes; |
564 | 0 | } |
565 | | |
566 | 0 | bool num_context_switches(long &voluntary, long &involuntary) { |
567 | 0 | voluntary = involuntary = -1; |
568 | 0 | rusage ru; |
569 | | // Only Linux is known to have RUSAGE_THREAD. |
570 | 0 | #if defined(__linux__) |
571 | 0 | const int who = RUSAGE_THREAD; |
572 | | #else |
573 | | const int who = RUSAGE_SELF; |
574 | | #endif |
575 | 0 | if (getrusage(who, &ru)) { |
576 | | // failed |
577 | 0 | return false; |
578 | 0 | } |
579 | 0 | voluntary = ru.ru_nvcsw; |
580 | 0 | involuntary = ru.ru_nivcsw; |
581 | 0 | return true; |
582 | 0 | } |
583 | | |
584 | 0 | uint64_t process_id() { |
585 | 0 | return getpid(); |
586 | 0 | } |
587 | | |
588 | | // Platform-specific implementations of global_thread_id |
589 | | #if defined(__APPLE__) && defined(__MACH__) |
590 | | |
591 | | uint64_t global_thread_id() { |
592 | | uint64_t tid = 0; |
593 | | auto ret = pthread_threadid_np(nullptr, &tid); |
594 | | assert(ret == 0 && "pthread_threadid_np shouldn't fail for current thread"); |
595 | | (void)ret; |
596 | | return tid; |
597 | | } |
598 | | |
599 | | #elif defined(__ANDROID__) |
600 | | |
601 | | uint64_t global_thread_id() { |
602 | | return gettid(); |
603 | | } |
604 | | |
605 | | #elif defined(__linux__) |
606 | | |
607 | 0 | uint64_t global_thread_id() { |
608 | 0 | return syscall(__NR_gettid); |
609 | 0 | } |
610 | | |
611 | | #else |
612 | | #error "Thread ID not supported on this platform" |
613 | | #endif |
614 | | |
615 | | namespace detail { |
616 | | |
617 | | #if defined(__APPLE__) && defined(__MACH__) |
618 | | |
619 | | std::pair<const void *, size_t> thread_stack_bounds_impl() { |
620 | | pthread_t tid = pthread_self(); |
621 | | void *origin = pthread_get_stackaddr_np(tid); |
622 | | rlim_t size = 0; |
623 | | if (pthread_main_np()) { |
624 | | // According to |
625 | | // https://opensource.apple.com/source/WTFEmbedded/WTFEmbedded-95.23/wtf/StackBounds.cpp.auto.html |
626 | | // pthread_get_size lies to us when we're the main thread, use get_rlimit |
627 | | // instead |
628 | | struct rlimit limit; |
629 | | getrlimit(RLIMIT_STACK, &limit); |
630 | | size = limit.rlim_cur; |
631 | | } else { |
632 | | size = pthread_get_stacksize_np(tid); |
633 | | } |
634 | | |
635 | | return {origin, size}; |
636 | | } |
637 | | |
638 | | #else |
639 | | |
640 | 1 | std::pair<const void *, size_t> thread_stack_bounds_impl() { |
641 | 1 | pthread_attr_t attr; |
642 | 1 | pthread_attr_init(&attr); |
643 | 1 | pthread_getattr_np(pthread_self(), &attr); |
644 | | |
645 | 1 | void *origin; |
646 | 1 | size_t size; |
647 | 1 | if (pthread_attr_getstack(&attr, &origin, &size)) |
648 | 0 | hermes_fatal("Unable to obtain native stack bounds"); |
649 | | |
650 | | #ifdef __BIONIC__ |
651 | | // It appears that on Android/Bionic, the range returned by |
652 | | // pthread_attr_getstack() includes the stack guard pages. We must remove them |
653 | | // from the bounds. |
654 | | size_t guardSize; |
655 | | if (pthread_attr_getguardsize(&attr, &guardSize)) { |
656 | | // Don't give up in case of error. |
657 | | guardSize = 0; |
658 | | } |
659 | | if (guardSize > size) |
660 | | guardSize = size; |
661 | | // Exclude the guard pages from the available stack. |
662 | | origin = (char *)origin + guardSize; |
663 | | size -= guardSize; |
664 | | #endif |
665 | | |
666 | 1 | pthread_attr_destroy(&attr); |
667 | | |
668 | | // origin is now the lowest addressable byte. |
669 | 1 | return {(char *)origin + size, size}; |
670 | 1 | } |
671 | | |
672 | | #endif |
673 | | |
674 | | } // namespace detail |
675 | | |
676 | 226 | void set_thread_name(const char *name) { |
677 | | // Set the thread name for TSAN. It doesn't share the same name mapping as the |
678 | | // OS does. This macro expands to nothing if TSAN isn't on. |
679 | 226 | TsanThreadName(name); |
680 | 226 | #if defined(__linux__) || defined(__ANDROID__) |
681 | 226 | prctl(PR_SET_NAME, name); |
682 | | #elif defined(__APPLE__) |
683 | | ::pthread_setname_np(name); |
684 | | #endif |
685 | | // Do nothing if the platform doesn't support it. |
686 | 226 | } |
687 | | |
688 | | // Platform-specific implementations of thread_cpu_time |
689 | | #if defined(__APPLE__) && defined(__MACH__) |
690 | | |
691 | | std::chrono::microseconds thread_cpu_time() { |
692 | | using namespace std::chrono; |
693 | | |
694 | | struct thread_basic_info tbi; |
695 | | mach_port_t self = pthread_mach_thread_np(pthread_self()); |
696 | | mach_msg_type_number_t fields = THREAD_BASIC_INFO_COUNT; |
697 | | |
698 | | if (thread_info(self, THREAD_BASIC_INFO, (thread_info_t)&tbi, &fields) != |
699 | | KERN_SUCCESS) { |
700 | | return microseconds::max(); |
701 | | } |
702 | | |
703 | | microseconds::rep total = 0; |
704 | | total += tbi.user_time.microseconds; |
705 | | total += tbi.user_time.seconds * 1000000; |
706 | | |
707 | | total += tbi.system_time.microseconds; |
708 | | total += tbi.system_time.seconds * 1000000; |
709 | | |
710 | | return microseconds(total); |
711 | | } |
712 | | |
713 | | #elif defined(__linux__) // !(__APPLE__ && __MACH__) |
714 | | |
715 | 108 | std::chrono::microseconds thread_cpu_time() { |
716 | 108 | using namespace std::chrono; |
717 | | |
718 | 108 | struct timespec ts; |
719 | | |
720 | 108 | if (clock_gettime(CLOCK_THREAD_CPUTIME_ID, &ts) != 0) { |
721 | 0 | return microseconds::max(); |
722 | 0 | } |
723 | | |
724 | 108 | microseconds::rep total = 0; |
725 | 108 | total += ts.tv_nsec / 1000; |
726 | 108 | total += ts.tv_sec * 1000000; |
727 | 108 | return microseconds(total); |
728 | 108 | } |
729 | | |
730 | | #else // !(__APPLE__ && __MACH__), !__linux__ |
731 | | #error "Thread CPU Time not supported on this platform" |
732 | | #endif // thread_cpu_time: (__APPLE__ && __MACH__), __linux__ |
733 | | |
734 | | // Platform-specific implementations of thread_page_fault_count |
735 | | |
736 | | #if defined(__APPLE__) && defined(__MACH__) |
737 | | |
738 | | bool thread_page_fault_count(int64_t *outMinorFaults, int64_t *outMajorFaults) { |
739 | | task_events_info eventsInfo; |
740 | | mach_msg_type_number_t count = TASK_EVENTS_INFO_COUNT; |
741 | | kern_return_t kr = task_info( |
742 | | mach_task_self(), TASK_EVENTS_INFO, (task_info_t)&eventsInfo, &count); |
743 | | if (kr == KERN_SUCCESS) { |
744 | | *outMinorFaults = eventsInfo.faults; |
745 | | *outMajorFaults = eventsInfo.pageins; |
746 | | } |
747 | | return kr == KERN_SUCCESS; |
748 | | } |
749 | | |
750 | | #elif defined(__linux__) // !(__APPLE__ && __MACH__) |
751 | | |
752 | 0 | bool thread_page_fault_count(int64_t *outMinorFaults, int64_t *outMajorFaults) { |
753 | 0 | struct rusage stats = {}; |
754 | 0 | int ret = getrusage(RUSAGE_THREAD, &stats); |
755 | 0 | if (ret == 0) { |
756 | 0 | *outMinorFaults = stats.ru_minflt; |
757 | 0 | *outMajorFaults = stats.ru_majflt; |
758 | 0 | } |
759 | 0 | return ret == 0; |
760 | 0 | } |
761 | | |
762 | | #else // !(__APPLE__ && __MACH__), !__linux__ |
763 | | #error "Thread page fault count not supported on this platform" |
764 | | #endif // thread_page_fault_count: (__APPLE__ && __MACH__), __linux__ |
765 | | |
766 | 0 | std::string thread_name() { |
767 | 0 | constexpr int kMaxThreadNameSize = 100; |
768 | 0 | int ret = 0; |
769 | 0 | char threadName[kMaxThreadNameSize]; |
770 | | #ifdef __ANDROID__ |
771 | | ret = prctl(PR_GET_NAME, threadName); |
772 | | #else |
773 | 0 | ret = pthread_getname_np(pthread_self(), threadName, sizeof(threadName)); |
774 | 0 | #endif |
775 | 0 | if (ret != 0) { |
776 | | // thread name error should be non-fatal, simply return empty thread name. |
777 | 0 | perror("thread_name failed"); |
778 | 0 | return ""; |
779 | 0 | } |
780 | 0 | return threadName; |
781 | 0 | } |
782 | | |
783 | | #ifdef __linux__ |
784 | 0 | std::vector<bool> sched_getaffinity() { |
785 | 0 | std::vector<bool> v; |
786 | 0 | cpu_set_t mask; |
787 | 0 | CPU_ZERO(&mask); |
788 | 0 | int status = ::sched_getaffinity(0, sizeof(mask), &mask); |
789 | 0 | if (status != 0) { |
790 | 0 | return v; |
791 | 0 | } |
792 | 0 | int lastSet = -1; |
793 | 0 | for (int cpu = 0; cpu < CPU_SETSIZE; ++cpu) { |
794 | 0 | v.push_back(CPU_ISSET(cpu, &mask)); |
795 | 0 | if (v.back()) |
796 | 0 | lastSet = cpu; |
797 | 0 | } |
798 | | // Trim trailing zeroes. |
799 | 0 | v.resize(lastSet + 1); |
800 | 0 | return v; |
801 | 0 | } |
802 | | |
803 | 0 | int sched_getcpu() { |
804 | 0 | return ::sched_getcpu(); |
805 | 0 | } |
806 | | #else |
807 | | std::vector<bool> sched_getaffinity() { |
808 | | // Not yet supported. |
809 | | return std::vector<bool>(); |
810 | | } |
811 | | |
812 | | int sched_getcpu() { |
813 | | // Not yet supported. |
814 | | return -1; |
815 | | } |
816 | | #endif |
817 | | |
818 | 0 | uint64_t cpu_cycle_counter() { |
819 | | #if defined(__aarch64__) |
820 | | // Clang's builtin causes SIGILL on some 64-bit ARM environments. |
821 | | uint64_t cnt; |
822 | | __asm __volatile("mrs %0, cntvct_el0" : "=&r"(cnt)); |
823 | | return cnt; |
824 | | #elif __has_builtin(__builtin_readcyclecounter) |
825 | | return __builtin_readcyclecounter(); |
826 | | #else |
827 | | timespec t; |
828 | | clock_gettime(CLOCK_MONOTONIC, &t); |
829 | | return t.tv_sec * 1000LL * 1000LL * 1000LL + t.tv_nsec; |
830 | | #endif |
831 | 0 | } |
832 | | |
833 | 0 | bool set_env(const char *name, const char *value) { |
834 | | // Enforce the contract of this function that value must not be empty |
835 | 0 | assert(*value != '\0' && "value cannot be empty string"); |
836 | 0 | return setenv(name, value, 1) == 0; |
837 | 0 | } |
838 | | |
839 | 0 | bool unset_env(const char *name) { |
840 | 0 | return unsetenv(name) == 0; |
841 | 0 | } |
842 | | |
843 | | /*static*/ |
844 | | void *SigAltStackLeakSuppressor::stackRoot_{nullptr}; |
845 | | |
846 | 0 | SigAltStackLeakSuppressor::~SigAltStackLeakSuppressor() { |
847 | 0 | #ifdef HAVE_SIGALTSTACK |
848 | 0 | stack_t oldAltStack; |
849 | 0 | if (sigaltstack(nullptr, &oldAltStack) == 0) { |
850 | 0 | stackRoot_ = oldAltStack.ss_sp; |
851 | 0 | } |
852 | 0 | #endif |
853 | 0 | } |
854 | | |
855 | | } // namespace oscompat |
856 | | } // namespace hermes |
857 | | |
858 | | #endif // not _WINDOWS |