Coverage Report

Created: 2025-12-12 07:27

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
/src/hermes/include/hermes/VM/GCConcurrency.h
Line
Count
Source
1
/*
2
 * Copyright (c) Meta Platforms, Inc. and affiliates.
3
 *
4
 * This source code is licensed under the MIT license found in the
5
 * LICENSE file in the root directory of this source tree.
6
 */
7
8
#ifndef HERMES_VM_GCCONCURRENCY_H
9
#define HERMES_VM_GCCONCURRENCY_H
10
11
#include <atomic>
12
#include <cassert>
13
#include <condition_variable>
14
#include <mutex>
15
#include <thread>
16
17
namespace hermes {
18
namespace vm {
19
20
/// If true, the Hades will run with a concurrent background thread. If false,
21
/// Hades will run with a single thread that interleaves work with the YG and
22
/// OG. Has no effect on non-Hades GCs.
23
static constexpr bool kConcurrentGC =
24
#if defined(HERMESVM_ALLOW_CONCURRENT_GC) && \
25
    (defined(HERMESVM_GC_HADES) || defined(HERMESVM_GC_RUNTIME))
26
    // Only use Hades concurrently if on a 64-bit platform.
27
    sizeof(void *) == 8
28
#else
29
    false
30
#endif
31
    ;
32
33
namespace impl {
34
35
/// FakeAtomic has the same API as std::atomic, but ignores the memory order
36
/// argument and always accesses data non-atomically.
37
/// Used when the GC doesn't require atomicity.
38
/// In the JS VM, there is currently only one mutator thread and at most one GC
39
/// thread. The GC thread will not do any modifications to these atomics, and
40
/// will only read them. Therefore it is typically safe for the mutator to use
41
/// relaxed reads. Writes will typically require std::memory_order_release or
42
/// stricter to make sure the GC sees the writes which occur before the atomic
43
/// write.
44
/// NOTE: This differs from std::atomic where it doesn't have default memory
45
/// orders, since we want all atomic operations to be very explicit with their
46
/// requirements. Also don't define operator T for the same reason.
47
template <typename T>
48
class FakeAtomic final {
49
 public:
50
  constexpr FakeAtomic() : data_{} {}
51
  constexpr FakeAtomic(T desired) : data_{desired} {}
52
53
  T load(std::memory_order order) const {
54
    (void)order;
55
    return data_;
56
  }
57
58
  void store(T desired, std::memory_order order) {
59
    (void)order;
60
    data_ = desired;
61
  }
62
63
  T fetch_add(T arg, std::memory_order order) {
64
    (void)order;
65
    const T oldData = data_;
66
    data_ += arg;
67
    return oldData;
68
  }
69
70
  T exchange(T arg, std::memory_order order) {
71
    (void)order;
72
    const T oldData = data_;
73
    data_ = arg;
74
    return oldData;
75
  }
76
77
  T fetch_sub(T arg, std::memory_order order) {
78
    (void)order;
79
    const T oldData = data_;
80
    data_ -= arg;
81
    return oldData;
82
  }
83
84
  /// Use store explicitly instead.
85
  FakeAtomic &operator=(const FakeAtomic &) = delete;
86
87
 private:
88
  T data_;
89
};
90
91
/// A DebugMutex wraps a std::recursive_mutex and also tracks which thread
92
/// currently has the mutex locked. Only available in debug modes.
93
class DebugMutex {
94
 public:
95
453
  DebugMutex() : tid_() {}
96
453
  ~DebugMutex() = default;
97
98
503k
  operator bool() const {
99
    // Check that this thread owns the mutex.
100
    // The mutex must be held in order to check this condition safely.
101
503k
    return tid_.load(std::memory_order_relaxed) == std::this_thread::get_id();
102
503k
  }
103
104
253k
  void lock() {
105
253k
    inner_.lock();
106
253k
    depth_++;
107
253k
    tid_.store(std::this_thread::get_id(), std::memory_order_relaxed);
108
253k
  }
109
110
253k
  void unlock() {
111
253k
    assert(depth_ && "Should not unlock an unlocked mutex");
112
253k
    assert(
113
253k
        tid_.load(std::memory_order_relaxed) == std::this_thread::get_id() &&
114
253k
        "Mutex should be acquired and unlocked on the same thread");
115
253k
    depth_--;
116
253k
    if (!depth_) {
117
253k
      tid_.store(std::thread::id{}, std::memory_order_relaxed);
118
253k
    }
119
253k
    inner_.unlock();
120
253k
  }
121
122
1
  uint32_t depth() const {
123
1
    assert(*this && "Must hold the inner mutex to call depth");
124
1
    return depth_;
125
1
  }
126
127
 private:
128
  std::recursive_mutex inner_;
129
  // Sometimes we want to assert that the the current thread does not hold the
130
  // mutex. Since the mutex is not held, TSAN complains that the access to tid_
131
  // is not thread safe. It is safe to use this atomic with any memory ordering
132
  // because all we care about is the last value assigned to tid_ by the
133
  // *current* thread.
134
  std::atomic<std::thread::id> tid_;
135
  uint32_t depth_{0};
136
};
137
138
/// A FakeMutex has the same API as a std::mutex but does nothing.
139
/// It pretends to always be locked for convenience of asserts that need to work
140
/// in both concurrent code and non-concurrent code.
141
class FakeMutex {
142
 public:
143
  explicit FakeMutex() = default;
144
145
0
  operator bool() const {
146
0
    return true;
147
0
  }
148
149
0
  uint32_t depth() const {
150
0
    return 1;
151
0
  }
152
153
0
  void lock() {}
154
0
  bool try_lock() {
155
0
    return true;
156
0
  }
157
0
  void unlock() {}
158
};
159
160
} // namespace impl
161
162
// Only these typedefs should be used by the rest of the VM.
163
template <typename T>
164
using AtomicIfConcurrentGC = typename std::
165
    conditional<kConcurrentGC, std::atomic<T>, impl::FakeAtomic<T>>::type;
166
167
using Mutex = std::conditional<
168
    kConcurrentGC,
169
#ifndef NDEBUG
170
    impl::DebugMutex
171
#else
172
    std::recursive_mutex
173
#endif
174
    ,
175
    impl::FakeMutex>::type;
176
177
} // namespace vm
178
} // namespace hermes
179
180
#endif