/src/node/deps/v8/include/v8-sandbox.h
Line | Count | Source |
1 | | // Copyright 2024 the V8 project authors. All rights reserved. |
2 | | // Use of this source code is governed by a BSD-style license that can be |
3 | | // found in the LICENSE file. |
4 | | |
5 | | #ifndef INCLUDE_V8_SANDBOX_H_ |
6 | | #define INCLUDE_V8_SANDBOX_H_ |
7 | | |
8 | | #include <cstdint> |
9 | | |
10 | | #include "v8-internal.h" // NOLINT(build/include_directory) |
11 | | #include "v8config.h" // NOLINT(build/include_directory) |
12 | | |
13 | | namespace v8 { |
14 | | |
15 | | /** |
16 | | * A pointer tag used for wrapping and unwrapping `CppHeap` pointers as used |
17 | | * with JS API wrapper objects that rely on `v8::Object::Wrap()` and |
18 | | * `v8::Object::Unwrap()`. |
19 | | * |
20 | | * The CppHeapPointers use a range-based type checking scheme, where on access |
21 | | * to a pointer, the actual type of the pointer is checked to be within a |
22 | | * specified range of types. This allows supporting type hierarchies, where a |
23 | | * type check for a supertype must succeed for any subtype. |
24 | | * |
25 | | * The tag is currently in practice limited to 15 bits since it needs to fit |
26 | | * together with a marking bit into the unused parts of a pointer. |
27 | | */ |
28 | | enum class CppHeapPointerTag : uint16_t { |
29 | | kFirstTag = 0, |
30 | | kNullTag = 0, |
31 | | |
32 | | /** |
33 | | * The lower type ids are reserved for the embedder to assign. For that, the |
34 | | * main requirement is that all (transitive) child classes of a given parent |
35 | | * class have type ids in the same range, and that there are no unrelated |
36 | | * types in that range. For example, given the following type hierarchy: |
37 | | * |
38 | | * A F |
39 | | * / \ |
40 | | * B E |
41 | | * / \ |
42 | | * C D |
43 | | * |
44 | | * a potential type id assignment that satistifes these requirements is |
45 | | * {C: 0, D: 1, B: 2, A: 3, E: 4, F: 5}. With that, the type check for type A |
46 | | * would check for the range [0, 4], while the check for B would check range |
47 | | * [0, 2], and for F it would simply check [5, 5]. |
48 | | * |
49 | | * In addition, there is an option for performance tweaks: if the size of the |
50 | | * type range corresponding to a supertype is a power of two and starts at a |
51 | | * power of two (e.g. [0x100, 0x13f]), then the compiler can often optimize |
52 | | * the type check to use even fewer instructions (essentially replace a AND + |
53 | | * SUB with a single AND). |
54 | | */ |
55 | | |
56 | | kDefaultTag = 0x7000, |
57 | | |
58 | | kZappedEntryTag = 0x7ffd, |
59 | | kEvacuationEntryTag = 0x7ffe, |
60 | | kFreeEntryTag = 0x7fff, |
61 | | // The tags are limited to 15 bits, so the last tag is 0x7fff. |
62 | | kLastTag = 0x7fff, |
63 | | }; |
64 | | |
65 | | // Convenience struct to represent tag ranges. This is used for type checks |
66 | | // against supertypes, which cover a range of types (their subtypes). |
67 | | // Both the lower- and the upper bound are inclusive. In other words, this |
68 | | // struct represents the range [lower_bound, upper_bound]. |
69 | | // TODO(saelo): reuse internal::TagRange here. |
70 | | struct CppHeapPointerTagRange { |
71 | | constexpr CppHeapPointerTagRange(CppHeapPointerTag lower, |
72 | | CppHeapPointerTag upper) |
73 | 0 | : lower_bound(lower), upper_bound(upper) {} |
74 | | CppHeapPointerTag lower_bound; |
75 | | CppHeapPointerTag upper_bound; |
76 | | |
77 | | // Check whether the tag of the given CppHeapPointerTable entry is within |
78 | | // this range. This method encodes implementation details of the |
79 | | // CppHeapPointerTable, which is necessary as it is used by |
80 | | // ReadCppHeapPointerField below. |
81 | | // Returns true if the check is successful and the tag of the given entry is |
82 | | // within this range, false otherwise. |
83 | 0 | bool CheckTagOf(uint64_t entry) { |
84 | 0 | // Note: the cast to uint32_t is important here. Otherwise, the uint16_t's |
85 | 0 | // would be promoted to int in the range check below, which would result in |
86 | 0 | // undefined behavior (signed integer undeflow) if the actual value is less |
87 | 0 | // than the lower bound. Then, the compiler would take advantage of the |
88 | 0 | // undefined behavior and turn the range check into a simple |
89 | 0 | // `actual_tag <= last_tag` comparison, which is incorrect. |
90 | 0 | uint32_t actual_tag = static_cast<uint16_t>(entry); |
91 | 0 | // The actual_tag is shifted to the left by one and contains the marking |
92 | 0 | // bit in the LSB. To ignore that during the type check, simply add one to |
93 | 0 | // the (shifted) range. |
94 | 0 | constexpr int kTagShift = internal::kCppHeapPointerTagShift; |
95 | 0 | uint32_t first_tag = static_cast<uint32_t>(lower_bound) << kTagShift; |
96 | 0 | uint32_t last_tag = (static_cast<uint32_t>(upper_bound) << kTagShift) + 1; |
97 | 0 | return actual_tag >= first_tag && actual_tag <= last_tag; |
98 | 0 | } |
99 | | }; |
100 | | |
101 | | constexpr CppHeapPointerTagRange kAnyCppHeapPointer( |
102 | | CppHeapPointerTag::kFirstTag, CppHeapPointerTag::kLastTag); |
103 | | |
104 | | /** |
105 | | * Hardware support for the V8 Sandbox. |
106 | | * |
107 | | * This is an experimental feature that may change or be removed without |
108 | | * further notice. Use at your own risk. |
109 | | */ |
110 | | class SandboxHardwareSupport { |
111 | | public: |
112 | | /** |
113 | | * Initialize sandbox hardware support. This needs to be called before |
114 | | * creating any thread that might access sandbox memory since it sets up |
115 | | * hardware permissions to the memory that will be inherited on clone. |
116 | | */ |
117 | | V8_EXPORT static void InitializeBeforeThreadCreation(); |
118 | | |
119 | | /** |
120 | | * Prepares the current thread for executing sandboxed code. |
121 | | * |
122 | | * This must be called on newly created threads before they execute any |
123 | | * sandboxed code (in particular any JavaScript or WebAssembly code). It |
124 | | * should not be invoked on threads that never execute sandboxed code, |
125 | | * although it is fine to do so from a security point of view. |
126 | | */ |
127 | | V8_EXPORT static void PrepareCurrentThreadForHardwareSandboxing(); |
128 | | }; |
129 | | |
130 | | namespace internal { |
131 | | |
132 | | #ifdef V8_COMPRESS_POINTERS |
133 | | V8_INLINE static Address* GetCppHeapPointerTableBase(v8::Isolate* isolate) { |
134 | | Address addr = reinterpret_cast<Address>(isolate) + |
135 | | Internals::kIsolateCppHeapPointerTableOffset + |
136 | | Internals::kExternalPointerTableBasePointerOffset; |
137 | | return *reinterpret_cast<Address**>(addr); |
138 | | } |
139 | | #endif // V8_COMPRESS_POINTERS |
140 | | |
141 | | template <typename T> |
142 | | V8_INLINE static T* ReadCppHeapPointerField(v8::Isolate* isolate, |
143 | | Address heap_object_ptr, int offset, |
144 | | CppHeapPointerTagRange tag_range) { |
145 | | #ifdef V8_COMPRESS_POINTERS |
146 | | // See src/sandbox/cppheap-pointer-table-inl.h. Logic duplicated here so |
147 | | // it can be inlined and doesn't require an additional call. |
148 | | const CppHeapPointerHandle handle = |
149 | | Internals::ReadRawField<CppHeapPointerHandle>(heap_object_ptr, offset); |
150 | | const uint32_t index = handle >> kExternalPointerIndexShift; |
151 | | const Address* table = GetCppHeapPointerTableBase(isolate); |
152 | | const std::atomic<Address>* ptr = |
153 | | reinterpret_cast<const std::atomic<Address>*>(&table[index]); |
154 | | Address entry = std::atomic_load_explicit(ptr, std::memory_order_relaxed); |
155 | | |
156 | | Address pointer = entry; |
157 | | if (V8_LIKELY(tag_range.CheckTagOf(entry))) { |
158 | | pointer = entry >> kCppHeapPointerPayloadShift; |
159 | | } else { |
160 | | // If the type check failed, we simply return nullptr here. That way: |
161 | | // 1. The null handle always results in nullptr being returned here, which |
162 | | // is a desired property. Otherwise, we would need an explicit check for |
163 | | // the null handle above, and therefore an additional branch. This |
164 | | // works because the 0th entry of the table always contains nullptr |
165 | | // tagged with the null tag (i.e. an all-zeros entry). As such, |
166 | | // regardless of whether the type check succeeds, the result will |
167 | | // always be nullptr. |
168 | | // 2. The returned pointer is guaranteed to crash even on platforms with |
169 | | // top byte ignore (TBI), such as Arm64. The alternative would be to |
170 | | // simply return the original entry with the left-shifted payload. |
171 | | // However, due to TBI, an access to that may not always result in a |
172 | | // crash (specifically, if the second most significant byte happens to |
173 | | // be zero). In addition, there shouldn't be a difference on Arm64 |
174 | | // between returning nullptr or the original entry, since it will |
175 | | // simply compile to a `csel x0, x8, xzr, lo` instead of a |
176 | | // `csel x0, x10, x8, lo` instruction. |
177 | | pointer = 0; |
178 | | } |
179 | | return reinterpret_cast<T*>(pointer); |
180 | | #else // !V8_COMPRESS_POINTERS |
181 | | return reinterpret_cast<T*>( |
182 | | Internals::ReadRawField<Address>(heap_object_ptr, offset)); |
183 | | #endif // !V8_COMPRESS_POINTERS |
184 | | } |
185 | | |
186 | | } // namespace internal |
187 | | } // namespace v8 |
188 | | |
189 | | #endif // INCLUDE_V8_SANDBOX_H_ |