Line data Source code
1 : // Copyright (c) 1994-2006 Sun Microsystems Inc.
2 : // All Rights Reserved.
3 : //
4 : // Redistribution and use in source and binary forms, with or without
5 : // modification, are permitted provided that the following conditions are
6 : // met:
7 : //
8 : // - Redistributions of source code must retain the above copyright notice,
9 : // this list of conditions and the following disclaimer.
10 : //
11 : // - Redistribution in binary form must reproduce the above copyright
12 : // notice, this list of conditions and the following disclaimer in the
13 : // documentation and/or other materials provided with the distribution.
14 : //
15 : // - Neither the name of Sun Microsystems or the names of contributors may
16 : // be used to endorse or promote products derived from this software without
17 : // specific prior written permission.
18 : //
19 : // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
20 : // IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
21 : // THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 : // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
23 : // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
24 : // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
25 : // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
26 : // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
27 : // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
28 : // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
29 : // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 :
31 : // The original source code covered by the above license above has been
32 : // modified significantly by Google Inc.
33 : // Copyright 2012 the V8 project authors. All rights reserved.
34 :
35 : #ifndef V8_ASSEMBLER_H_
36 : #define V8_ASSEMBLER_H_
37 :
38 : #include <forward_list>
39 :
40 : #include "src/code-comments.h"
41 : #include "src/cpu-features.h"
42 : #include "src/deoptimize-reason.h"
43 : #include "src/external-reference.h"
44 : #include "src/flags.h"
45 : #include "src/globals.h"
46 : #include "src/handles.h"
47 : #include "src/objects.h"
48 : #include "src/reglist.h"
49 : #include "src/reloc-info.h"
50 :
51 : namespace v8 {
52 :
53 : // Forward declarations.
54 : class ApiFunction;
55 :
56 : namespace internal {
57 :
58 : // Forward declarations.
59 : class EmbeddedData;
60 : class InstructionStream;
61 : class Isolate;
62 : class SCTableReference;
63 : class SourcePosition;
64 : class StatsCounter;
65 : class StringConstantBase;
66 :
67 : // -----------------------------------------------------------------------------
68 : // Optimization for far-jmp like instructions that can be replaced by shorter.
69 :
70 200748 : class JumpOptimizationInfo {
71 : public:
72 : bool is_collecting() const { return stage_ == kCollection; }
73 : bool is_optimizing() const { return stage_ == kOptimization; }
74 57008 : void set_optimizing() { stage_ = kOptimization; }
75 :
76 : bool is_optimizable() const { return optimizable_; }
77 57008 : void set_optimizable() { optimizable_ = true; }
78 :
79 : // Used to verify the instruction sequence is always the same in two stages.
80 : size_t hash_code() const { return hash_code_; }
81 65464 : void set_hash_code(size_t hash_code) { hash_code_ = hash_code; }
82 :
83 65464 : std::vector<uint32_t>& farjmp_bitmap() { return farjmp_bitmap_; }
84 :
85 : private:
86 : enum { kCollection, kOptimization } stage_ = kCollection;
87 : bool optimizable_ = false;
88 : std::vector<uint32_t> farjmp_bitmap_;
89 : size_t hash_code_ = 0u;
90 : };
91 :
92 : class HeapObjectRequest {
93 : public:
94 : explicit HeapObjectRequest(double heap_number, int offset = -1);
95 : explicit HeapObjectRequest(const StringConstantBase* string, int offset = -1);
96 :
97 : enum Kind { kHeapNumber, kStringConstant };
98 : Kind kind() const { return kind_; }
99 :
100 : double heap_number() const {
101 : DCHECK_EQ(kind(), kHeapNumber);
102 : return value_.heap_number;
103 : }
104 :
105 : const StringConstantBase* string() const {
106 : DCHECK_EQ(kind(), kStringConstant);
107 : return value_.string;
108 : }
109 :
110 : // The code buffer offset at the time of the request.
111 : int offset() const {
112 : DCHECK_GE(offset_, 0);
113 : return offset_;
114 : }
115 : void set_offset(int offset) {
116 : DCHECK_LT(offset_, 0);
117 47349 : offset_ = offset;
118 : DCHECK_GE(offset_, 0);
119 : }
120 :
121 : private:
122 : Kind kind_;
123 :
124 : union {
125 : double heap_number;
126 : const StringConstantBase* string;
127 : } value_;
128 :
129 : int offset_;
130 : };
131 :
132 : // -----------------------------------------------------------------------------
133 : // Platform independent assembler base class.
134 :
135 : enum class CodeObjectRequired { kNo, kYes };
136 :
137 1165765 : struct V8_EXPORT_PRIVATE AssemblerOptions {
138 : // Prohibits using any V8-specific features of assembler like (isolates,
139 : // heap objects, external references, etc.).
140 : bool v8_agnostic_code = false;
141 : // Recording reloc info for external references and off-heap targets is
142 : // needed whenever code is serialized, e.g. into the snapshot or as a WASM
143 : // module. This flag allows this reloc info to be disabled for code that
144 : // will not survive process destruction.
145 : bool record_reloc_info_for_serialization = true;
146 : // Recording reloc info can be disabled wholesale. This is needed when the
147 : // assembler is used on existing code directly (e.g. JumpTableAssembler)
148 : // without any buffer to hold reloc information.
149 : bool disable_reloc_info_for_patching = false;
150 : // Enables access to exrefs by computing a delta from the root array.
151 : // Only valid if code will not survive the process.
152 : bool enable_root_array_delta_access = false;
153 : // Enables specific assembler sequences only used for the simulator.
154 : bool enable_simulator_code = false;
155 : // Enables use of isolate-independent constants, indirected through the
156 : // root array.
157 : // (macro assembler feature).
158 : bool isolate_independent_code = false;
159 : // Enables the use of isolate-independent builtins through an off-heap
160 : // trampoline. (macro assembler feature).
161 : bool inline_offheap_trampolines = false;
162 : // On some platforms, all code is within a given range in the process,
163 : // and the start of this range is configured here.
164 : Address code_range_start = 0;
165 : // Enable pc-relative calls/jumps on platforms that support it. When setting
166 : // this flag, the code range must be small enough to fit all offsets into
167 : // the instruction immediates.
168 : bool use_pc_relative_calls_and_jumps = false;
169 :
170 : // Constructs V8-agnostic set of options from current state.
171 : AssemblerOptions EnableV8AgnosticCode() const;
172 :
173 : static AssemblerOptions Default(
174 : Isolate* isolate, bool explicitly_support_serialization = false);
175 : };
176 :
177 41531061 : class AssemblerBuffer {
178 : public:
179 41527720 : virtual ~AssemblerBuffer() = default;
180 : virtual byte* start() const = 0;
181 : virtual int size() const = 0;
182 : // Return a grown copy of this buffer. The contained data is uninitialized.
183 : // The data in {this} will still be read afterwards (until {this} is
184 : // destructed), but not written.
185 : virtual std::unique_ptr<AssemblerBuffer> Grow(int new_size)
186 : V8_WARN_UNUSED_RESULT = 0;
187 : };
188 :
189 : // Allocate an AssemblerBuffer which uses an existing buffer. This buffer cannot
190 : // grow, so it must be large enough for all code emitted by the Assembler.
191 : V8_EXPORT_PRIVATE
192 : std::unique_ptr<AssemblerBuffer> ExternalAssemblerBuffer(void* buffer,
193 : int size);
194 :
195 : // Allocate a new growable AssemblerBuffer with a given initial size.
196 : V8_EXPORT_PRIVATE
197 : std::unique_ptr<AssemblerBuffer> NewAssemblerBuffer(int size);
198 :
199 82948446 : class V8_EXPORT_PRIVATE AssemblerBase : public Malloced {
200 : public:
201 : AssemblerBase(const AssemblerOptions& options,
202 : std::unique_ptr<AssemblerBuffer>);
203 : virtual ~AssemblerBase();
204 :
205 : const AssemblerOptions& options() const { return options_; }
206 :
207 112 : bool emit_debug_code() const { return emit_debug_code_; }
208 44028 : void set_emit_debug_code(bool value) { emit_debug_code_ = value; }
209 :
210 : bool predictable_code_size() const { return predictable_code_size_; }
211 0 : void set_predictable_code_size(bool value) { predictable_code_size_ = value; }
212 :
213 : uint64_t enabled_cpu_features() const { return enabled_cpu_features_; }
214 : void set_enabled_cpu_features(uint64_t features) {
215 : enabled_cpu_features_ = features;
216 : }
217 : // Features are usually enabled by CpuFeatureScope, which also asserts that
218 : // the features are supported before they are enabled.
219 : bool IsEnabled(CpuFeature f) {
220 : return (enabled_cpu_features_ & (static_cast<uint64_t>(1) << f)) != 0;
221 : }
222 : void EnableCpuFeature(CpuFeature f) {
223 41469486 : enabled_cpu_features_ |= (static_cast<uint64_t>(1) << f);
224 : }
225 :
226 : bool is_constant_pool_available() const {
227 : if (FLAG_enable_embedded_constant_pool) {
228 : return constant_pool_available_;
229 : } else {
230 : // Embedded constant pool not supported on this architecture.
231 : UNREACHABLE();
232 : }
233 : }
234 :
235 : JumpOptimizationInfo* jump_optimization_info() {
236 : return jump_optimization_info_;
237 : }
238 : void set_jump_optimization_info(JumpOptimizationInfo* jump_opt) {
239 2515427 : jump_optimization_info_ = jump_opt;
240 : }
241 :
242 : void FinalizeJumpOptimizationInfo() {}
243 :
244 : // Overwrite a host NaN with a quiet target NaN. Used by mksnapshot for
245 : // cross-snapshotting.
246 : static void QuietNaN(HeapObject nan) {}
247 :
248 227188994 : int pc_offset() const { return static_cast<int>(pc_ - buffer_start_); }
249 :
250 3343280 : byte* buffer_start() const { return buffer_->start(); }
251 3343701 : int buffer_size() const { return buffer_->size(); }
252 : int instruction_size() const { return pc_offset(); }
253 :
254 : // This function is called when code generation is aborted, so that
255 : // the assembler could clean up internal data structures.
256 0 : virtual void AbortedCodeGeneration() { }
257 :
258 : // Debugging
259 : void Print(Isolate* isolate);
260 :
261 : // Record an inline code comment that can be used by a disassembler.
262 : // Use --code-comments to enable.
263 19421941 : void RecordComment(const char* msg) {
264 19421941 : if (FLAG_code_comments) {
265 22923 : code_comments_writer_.Add(pc_offset(), std::string(msg));
266 : }
267 19421941 : }
268 :
269 : static const int kMinimalBufferSize = 4*KB;
270 :
271 : protected:
272 : // Add 'target' to the {code_targets_} vector, if necessary, and return the
273 : // offset at which it is stored.
274 : int AddCodeTarget(Handle<Code> target);
275 : Handle<Code> GetCodeTarget(intptr_t code_target_index) const;
276 : // Update to the code target at {code_target_index} to {target}.
277 : void UpdateCodeTarget(intptr_t code_target_index, Handle<Code> target);
278 : // Reserves space in the code target vector.
279 : void ReserveCodeTargetSpace(size_t num_of_code_targets);
280 :
281 : // The buffer into which code and relocation info are generated.
282 : std::unique_ptr<AssemblerBuffer> buffer_;
283 : // Cached from {buffer_->start()}, for faster access.
284 : byte* buffer_start_;
285 : std::forward_list<HeapObjectRequest> heap_object_requests_;
286 : // The program counter, which points into the buffer above and moves forward.
287 : // TODO(jkummerow): This should probably have type {Address}.
288 : byte* pc_;
289 :
290 : void set_constant_pool_available(bool available) {
291 : if (FLAG_enable_embedded_constant_pool) {
292 : constant_pool_available_ = available;
293 : } else {
294 : // Embedded constant pool not supported on this architecture.
295 : UNREACHABLE();
296 : }
297 : }
298 :
299 : // {RequestHeapObject} records the need for a future heap number allocation,
300 : // code stub generation or string allocation. After code assembly, each
301 : // platform's {Assembler::AllocateAndInstallRequestedHeapObjects} will
302 : // allocate these objects and place them where they are expected (determined
303 : // by the pc offset associated with each request).
304 : void RequestHeapObject(HeapObjectRequest request);
305 :
306 : bool ShouldRecordRelocInfo(RelocInfo::Mode rmode) const {
307 : DCHECK(!RelocInfo::IsNone(rmode));
308 54707348 : if (options().disable_reloc_info_for_patching) return false;
309 25109097 : if (RelocInfo::IsOnlyForSerializer(rmode) &&
310 24909300 : !options().record_reloc_info_for_serialization && !emit_debug_code()) {
311 : return false;
312 : }
313 : return true;
314 : }
315 :
316 : CodeCommentsWriter code_comments_writer_;
317 :
318 : private:
319 : // Before we copy code into the code space, we sometimes cannot encode
320 : // call/jump code targets as we normally would, as the difference between the
321 : // instruction's location in the temporary buffer and the call target is not
322 : // guaranteed to fit in the instruction's offset field. We keep track of the
323 : // code handles we encounter in calls in this vector, and encode the index of
324 : // the code handle in the vector instead.
325 : std::vector<Handle<Code>> code_targets_;
326 :
327 : const AssemblerOptions options_;
328 : uint64_t enabled_cpu_features_;
329 : bool emit_debug_code_;
330 : bool predictable_code_size_;
331 :
332 : // Indicates whether the constant pool can be accessed, which is only possible
333 : // if the pp register points to the current code object's constant pool.
334 : bool constant_pool_available_;
335 :
336 : JumpOptimizationInfo* jump_optimization_info_;
337 :
338 : // Constant pool.
339 : friend class FrameAndConstantPoolScope;
340 : friend class ConstantPoolUnavailableScope;
341 : };
342 :
343 : // Avoids emitting debug code during the lifetime of this scope object.
344 : class DontEmitDebugCodeScope {
345 : public:
346 : explicit DontEmitDebugCodeScope(AssemblerBase* assembler)
347 : : assembler_(assembler), old_value_(assembler->emit_debug_code()) {
348 : assembler_->set_emit_debug_code(false);
349 : }
350 : ~DontEmitDebugCodeScope() {
351 : assembler_->set_emit_debug_code(old_value_);
352 : }
353 : private:
354 : AssemblerBase* assembler_;
355 : bool old_value_;
356 : };
357 :
358 :
359 : // Avoids using instructions that vary in size in unpredictable ways between the
360 : // snapshot and the running VM.
361 : class PredictableCodeSizeScope {
362 : public:
363 : PredictableCodeSizeScope(AssemblerBase* assembler, int expected_size);
364 : ~PredictableCodeSizeScope();
365 :
366 : private:
367 : AssemblerBase* const assembler_;
368 : int const expected_size_;
369 : int const start_offset_;
370 : bool const old_value_;
371 : };
372 :
373 :
374 : // Enable a specified feature within a scope.
375 : class CpuFeatureScope {
376 : public:
377 : enum CheckPolicy {
378 : kCheckSupported,
379 : kDontCheckSupported,
380 : };
381 :
382 : #ifdef DEBUG
383 : CpuFeatureScope(AssemblerBase* assembler, CpuFeature f,
384 : CheckPolicy check = kCheckSupported);
385 : ~CpuFeatureScope();
386 :
387 : private:
388 : AssemblerBase* assembler_;
389 : uint64_t old_enabled_;
390 : #else
391 243239 : CpuFeatureScope(AssemblerBase* assembler, CpuFeature f,
392 243239 : CheckPolicy check = kCheckSupported) {}
393 243239 : ~CpuFeatureScope() { // NOLINT (modernize-use-equals-default)
394 : // Define a destructor to avoid unused variable warnings.
395 243239 : }
396 : #endif
397 : };
398 :
399 : } // namespace internal
400 : } // namespace v8
401 : #endif // V8_ASSEMBLER_H_
|