Line data Source code
1 : // Copyright (c) 1994-2006 Sun Microsystems Inc.
2 : // All Rights Reserved.
3 : //
4 : // Redistribution and use in source and binary forms, with or without
5 : // modification, are permitted provided that the following conditions are
6 : // met:
7 : //
8 : // - Redistributions of source code must retain the above copyright notice,
9 : // this list of conditions and the following disclaimer.
10 : //
11 : // - Redistribution in binary form must reproduce the above copyright
12 : // notice, this list of conditions and the following disclaimer in the
13 : // documentation and/or other materials provided with the distribution.
14 : //
15 : // - Neither the name of Sun Microsystems or the names of contributors may
16 : // be used to endorse or promote products derived from this software without
17 : // specific prior written permission.
18 : //
19 : // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
20 : // IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
21 : // THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 : // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
23 : // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
24 : // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
25 : // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
26 : // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
27 : // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
28 : // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
29 : // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 :
31 : // The original source code covered by the above license above has been
32 : // modified significantly by Google Inc.
33 : // Copyright 2012 the V8 project authors. All rights reserved.
34 :
35 : #ifndef V8_ASSEMBLER_H_
36 : #define V8_ASSEMBLER_H_
37 :
38 : #include <forward_list>
39 :
40 : #include "src/code-comments.h"
41 : #include "src/cpu-features.h"
42 : #include "src/deoptimize-reason.h"
43 : #include "src/external-reference.h"
44 : #include "src/flags.h"
45 : #include "src/globals.h"
46 : #include "src/handles.h"
47 : #include "src/objects.h"
48 : #include "src/reglist.h"
49 : #include "src/reloc-info.h"
50 :
51 : namespace v8 {
52 :
53 : // Forward declarations.
54 : class ApiFunction;
55 :
56 : namespace internal {
57 :
58 : // Forward declarations.
59 : class EmbeddedData;
60 : class InstructionStream;
61 : class Isolate;
62 : class SCTableReference;
63 : class SourcePosition;
64 : class StatsCounter;
65 : class StringConstantBase;
66 :
67 : // -----------------------------------------------------------------------------
68 : // Optimization for far-jmp like instructions that can be replaced by shorter.
69 :
70 201972 : class JumpOptimizationInfo {
71 : public:
72 : bool is_collecting() const { return stage_ == kCollection; }
73 : bool is_optimizing() const { return stage_ == kOptimization; }
74 57344 : void set_optimizing() { stage_ = kOptimization; }
75 :
76 : bool is_optimizable() const { return optimizable_; }
77 57344 : void set_optimizable() { optimizable_ = true; }
78 :
79 : // Used to verify the instruction sequence is always the same in two stages.
80 : size_t hash_code() const { return hash_code_; }
81 65744 : void set_hash_code(size_t hash_code) { hash_code_ = hash_code; }
82 :
83 65744 : std::vector<uint32_t>& farjmp_bitmap() { return farjmp_bitmap_; }
84 :
85 : private:
86 : enum { kCollection, kOptimization } stage_ = kCollection;
87 : bool optimizable_ = false;
88 : std::vector<uint32_t> farjmp_bitmap_;
89 : size_t hash_code_ = 0u;
90 : };
91 :
92 : class HeapObjectRequest {
93 : public:
94 : explicit HeapObjectRequest(double heap_number, int offset = -1);
95 : explicit HeapObjectRequest(const StringConstantBase* string, int offset = -1);
96 :
97 : enum Kind { kHeapNumber, kStringConstant };
98 : Kind kind() const { return kind_; }
99 :
100 : double heap_number() const {
101 : DCHECK_EQ(kind(), kHeapNumber);
102 : return value_.heap_number;
103 : }
104 :
105 : const StringConstantBase* string() const {
106 : DCHECK_EQ(kind(), kStringConstant);
107 : return value_.string;
108 : }
109 :
110 : // The code buffer offset at the time of the request.
111 : int offset() const {
112 : DCHECK_GE(offset_, 0);
113 : return offset_;
114 : }
115 : void set_offset(int offset) {
116 : DCHECK_LT(offset_, 0);
117 41711 : offset_ = offset;
118 : DCHECK_GE(offset_, 0);
119 : }
120 :
121 : private:
122 : Kind kind_;
123 :
124 : union {
125 : double heap_number;
126 : const StringConstantBase* string;
127 : } value_;
128 :
129 : int offset_;
130 : };
131 :
132 : // -----------------------------------------------------------------------------
133 : // Platform independent assembler base class.
134 :
135 : enum class CodeObjectRequired { kNo, kYes };
136 :
137 899639 : struct V8_EXPORT_PRIVATE AssemblerOptions {
138 : // Prohibits using any V8-specific features of assembler like (isolates,
139 : // heap objects, external references, etc.).
140 : bool v8_agnostic_code = false;
141 : // Recording reloc info for external references and off-heap targets is
142 : // needed whenever code is serialized, e.g. into the snapshot or as a WASM
143 : // module. This flag allows this reloc info to be disabled for code that
144 : // will not survive process destruction.
145 : bool record_reloc_info_for_serialization = true;
146 : // Recording reloc info can be disabled wholesale. This is needed when the
147 : // assembler is used on existing code directly (e.g. JumpTableAssembler)
148 : // without any buffer to hold reloc information.
149 : bool disable_reloc_info_for_patching = false;
150 : // Enables access to exrefs by computing a delta from the root array.
151 : // Only valid if code will not survive the process.
152 : bool enable_root_array_delta_access = false;
153 : // Enables specific assembler sequences only used for the simulator.
154 : bool enable_simulator_code = false;
155 : // Enables use of isolate-independent constants, indirected through the
156 : // root array.
157 : // (macro assembler feature).
158 : bool isolate_independent_code = false;
159 : // Enables the use of isolate-independent builtins through an off-heap
160 : // trampoline. (macro assembler feature).
161 : bool inline_offheap_trampolines = false;
162 : // On some platforms, all code is within a given range in the process,
163 : // and the start of this range is configured here.
164 : Address code_range_start = 0;
165 : // Enable pc-relative calls/jumps on platforms that support it. When setting
166 : // this flag, the code range must be small enough to fit all offsets into
167 : // the instruction immediates.
168 : bool use_pc_relative_calls_and_jumps = false;
169 : // Enables the collection of information useful for the generation of unwind
170 : // info. This is useful in some platform (Win64) where the unwind info depends
171 : // on a function prologue/epilogue.
172 : bool collect_win64_unwind_info = false;
173 :
174 : // Constructs V8-agnostic set of options from current state.
175 : AssemblerOptions EnableV8AgnosticCode() const;
176 :
177 : static AssemblerOptions Default(
178 : Isolate* isolate, bool explicitly_support_serialization = false);
179 : };
180 :
181 41357348 : class AssemblerBuffer {
182 : public:
183 41355062 : virtual ~AssemblerBuffer() = default;
184 : virtual byte* start() const = 0;
185 : virtual int size() const = 0;
186 : // Return a grown copy of this buffer. The contained data is uninitialized.
187 : // The data in {this} will still be read afterwards (until {this} is
188 : // destructed), but not written.
189 : virtual std::unique_ptr<AssemblerBuffer> Grow(int new_size)
190 : V8_WARN_UNUSED_RESULT = 0;
191 : };
192 :
193 : // Allocate an AssemblerBuffer which uses an existing buffer. This buffer cannot
194 : // grow, so it must be large enough for all code emitted by the Assembler.
195 : V8_EXPORT_PRIVATE
196 : std::unique_ptr<AssemblerBuffer> ExternalAssemblerBuffer(void* buffer,
197 : int size);
198 :
199 : // Allocate a new growable AssemblerBuffer with a given initial size.
200 : V8_EXPORT_PRIVATE
201 : std::unique_ptr<AssemblerBuffer> NewAssemblerBuffer(int size);
202 :
203 82620869 : class V8_EXPORT_PRIVATE AssemblerBase : public Malloced {
204 : public:
205 : AssemblerBase(const AssemblerOptions& options,
206 : std::unique_ptr<AssemblerBuffer>);
207 : virtual ~AssemblerBase();
208 :
209 : const AssemblerOptions& options() const { return options_; }
210 :
211 : bool emit_debug_code() const { return emit_debug_code_; }
212 44775 : void set_emit_debug_code(bool value) { emit_debug_code_ = value; }
213 :
214 : bool predictable_code_size() const { return predictable_code_size_; }
215 0 : void set_predictable_code_size(bool value) { predictable_code_size_ = value; }
216 :
217 : uint64_t enabled_cpu_features() const { return enabled_cpu_features_; }
218 : void set_enabled_cpu_features(uint64_t features) {
219 : enabled_cpu_features_ = features;
220 : }
221 : // Features are usually enabled by CpuFeatureScope, which also asserts that
222 : // the features are supported before they are enabled.
223 : bool IsEnabled(CpuFeature f) {
224 : return (enabled_cpu_features_ & (static_cast<uint64_t>(1) << f)) != 0;
225 : }
226 : void EnableCpuFeature(CpuFeature f) {
227 41304350 : enabled_cpu_features_ |= (static_cast<uint64_t>(1) << f);
228 : }
229 :
230 : bool is_constant_pool_available() const {
231 : if (FLAG_enable_embedded_constant_pool) {
232 : return constant_pool_available_;
233 : } else {
234 : // Embedded constant pool not supported on this architecture.
235 : UNREACHABLE();
236 : }
237 : }
238 :
239 : JumpOptimizationInfo* jump_optimization_info() {
240 : return jump_optimization_info_;
241 : }
242 : void set_jump_optimization_info(JumpOptimizationInfo* jump_opt) {
243 2629966 : jump_optimization_info_ = jump_opt;
244 : }
245 :
246 : void FinalizeJumpOptimizationInfo() {}
247 :
248 : // Overwrite a host NaN with a quiet target NaN. Used by mksnapshot for
249 : // cross-snapshotting.
250 : static void QuietNaN(HeapObject nan) {}
251 :
252 228970628 : int pc_offset() const { return static_cast<int>(pc_ - buffer_start_); }
253 :
254 3332920 : byte* buffer_start() const { return buffer_->start(); }
255 3333234 : int buffer_size() const { return buffer_->size(); }
256 : int instruction_size() const { return pc_offset(); }
257 :
258 : // This function is called when code generation is aborted, so that
259 : // the assembler could clean up internal data structures.
260 0 : virtual void AbortedCodeGeneration() { }
261 :
262 : // Debugging
263 : void Print(Isolate* isolate);
264 :
265 : // Record an inline code comment that can be used by a disassembler.
266 : // Use --code-comments to enable.
267 3799037 : void RecordComment(const char* msg) {
268 3799037 : if (FLAG_code_comments) {
269 14955 : code_comments_writer_.Add(pc_offset(), std::string(msg));
270 : }
271 3799037 : }
272 :
273 : static const int kMinimalBufferSize = 4*KB;
274 :
275 : protected:
276 : // Add 'target' to the {code_targets_} vector, if necessary, and return the
277 : // offset at which it is stored.
278 : int AddCodeTarget(Handle<Code> target);
279 : Handle<Code> GetCodeTarget(intptr_t code_target_index) const;
280 : // Update to the code target at {code_target_index} to {target}.
281 : void UpdateCodeTarget(intptr_t code_target_index, Handle<Code> target);
282 :
283 : // The buffer into which code and relocation info are generated.
284 : std::unique_ptr<AssemblerBuffer> buffer_;
285 : // Cached from {buffer_->start()}, for faster access.
286 : byte* buffer_start_;
287 : std::forward_list<HeapObjectRequest> heap_object_requests_;
288 : // The program counter, which points into the buffer above and moves forward.
289 : // TODO(jkummerow): This should probably have type {Address}.
290 : byte* pc_;
291 :
292 : void set_constant_pool_available(bool available) {
293 : if (FLAG_enable_embedded_constant_pool) {
294 : constant_pool_available_ = available;
295 : } else {
296 : // Embedded constant pool not supported on this architecture.
297 : UNREACHABLE();
298 : }
299 : }
300 :
301 : // {RequestHeapObject} records the need for a future heap number allocation,
302 : // code stub generation or string allocation. After code assembly, each
303 : // platform's {Assembler::AllocateAndInstallRequestedHeapObjects} will
304 : // allocate these objects and place them where they are expected (determined
305 : // by the pc offset associated with each request).
306 : void RequestHeapObject(HeapObjectRequest request);
307 :
308 : bool ShouldRecordRelocInfo(RelocInfo::Mode rmode) const {
309 : DCHECK(!RelocInfo::IsNone(rmode));
310 53867537 : if (options().disable_reloc_info_for_patching) return false;
311 24046211 : if (RelocInfo::IsOnlyForSerializer(rmode) &&
312 23845912 : !options().record_reloc_info_for_serialization && !emit_debug_code()) {
313 : return false;
314 : }
315 : return true;
316 : }
317 :
318 : CodeCommentsWriter code_comments_writer_;
319 :
320 : private:
321 : // Before we copy code into the code space, we sometimes cannot encode
322 : // call/jump code targets as we normally would, as the difference between the
323 : // instruction's location in the temporary buffer and the call target is not
324 : // guaranteed to fit in the instruction's offset field. We keep track of the
325 : // code handles we encounter in calls in this vector, and encode the index of
326 : // the code handle in the vector instead.
327 : std::vector<Handle<Code>> code_targets_;
328 :
329 : const AssemblerOptions options_;
330 : uint64_t enabled_cpu_features_;
331 : bool emit_debug_code_;
332 : bool predictable_code_size_;
333 :
334 : // Indicates whether the constant pool can be accessed, which is only possible
335 : // if the pp register points to the current code object's constant pool.
336 : bool constant_pool_available_;
337 :
338 : JumpOptimizationInfo* jump_optimization_info_;
339 :
340 : // Constant pool.
341 : friend class FrameAndConstantPoolScope;
342 : friend class ConstantPoolUnavailableScope;
343 : };
344 :
345 : // Avoids emitting debug code during the lifetime of this scope object.
346 : class DontEmitDebugCodeScope {
347 : public:
348 : explicit DontEmitDebugCodeScope(AssemblerBase* assembler)
349 : : assembler_(assembler), old_value_(assembler->emit_debug_code()) {
350 : assembler_->set_emit_debug_code(false);
351 : }
352 : ~DontEmitDebugCodeScope() {
353 : assembler_->set_emit_debug_code(old_value_);
354 : }
355 : private:
356 : AssemblerBase* assembler_;
357 : bool old_value_;
358 : };
359 :
360 :
361 : // Avoids using instructions that vary in size in unpredictable ways between the
362 : // snapshot and the running VM.
363 : class PredictableCodeSizeScope {
364 : public:
365 : PredictableCodeSizeScope(AssemblerBase* assembler, int expected_size);
366 : ~PredictableCodeSizeScope();
367 :
368 : private:
369 : AssemblerBase* const assembler_;
370 : int const expected_size_;
371 : int const start_offset_;
372 : bool const old_value_;
373 : };
374 :
375 :
376 : // Enable a specified feature within a scope.
377 : class V8_EXPORT_PRIVATE CpuFeatureScope {
378 : public:
379 : enum CheckPolicy {
380 : kCheckSupported,
381 : kDontCheckSupported,
382 : };
383 :
384 : #ifdef DEBUG
385 : CpuFeatureScope(AssemblerBase* assembler, CpuFeature f,
386 : CheckPolicy check = kCheckSupported);
387 : ~CpuFeatureScope();
388 :
389 : private:
390 : AssemblerBase* assembler_;
391 : uint64_t old_enabled_;
392 : #else
393 : CpuFeatureScope(AssemblerBase* assembler, CpuFeature f,
394 : CheckPolicy check = kCheckSupported) {}
395 : ~CpuFeatureScope() { // NOLINT (modernize-use-equals-default)
396 : // Define a destructor to avoid unused variable warnings.
397 : }
398 : #endif
399 : };
400 :
401 : } // namespace internal
402 : } // namespace v8
403 : #endif // V8_ASSEMBLER_H_
|