Line data Source code
1 : // Copyright 2017 the V8 project authors. All rights reserved.
2 : // Use of this source code is governed by a BSD-style license that can be
3 : // found in the LICENSE file.
4 :
5 : #include "src/assembler-inl.h"
6 : #include "src/base/utils/random-number-generator.h"
7 : #include "src/code-stub-assembler.h"
8 : #include "src/compiler/backend/code-generator.h"
9 : #include "src/compiler/backend/instruction.h"
10 : #include "src/compiler/linkage.h"
11 : #include "src/isolate.h"
12 : #include "src/objects-inl.h"
13 : #include "src/objects/heap-number-inl.h"
14 : #include "src/objects/smi.h"
15 : #include "src/optimized-compilation-info.h"
16 :
17 : #include "test/cctest/cctest.h"
18 : #include "test/cctest/compiler/code-assembler-tester.h"
19 : #include "test/cctest/compiler/function-tester.h"
20 :
21 : namespace v8 {
22 : namespace internal {
23 : namespace compiler {
24 :
25 : #define __ assembler.
26 :
27 : namespace {
28 :
29 7072 : int GetSlotSizeInBytes(MachineRepresentation rep) {
30 7072 : switch (rep) {
31 : case MachineRepresentation::kTagged:
32 : // Spill slots for tagged values are always uncompressed.
33 : return kSystemPointerSize;
34 : case MachineRepresentation::kFloat32:
35 : return kSystemPointerSize;
36 : case MachineRepresentation::kFloat64:
37 : return kDoubleSize;
38 : case MachineRepresentation::kSimd128:
39 796 : return kSimd128Size;
40 : default:
41 : break;
42 : }
43 0 : UNREACHABLE();
44 : }
45 :
46 : // Forward declaration.
47 : Handle<Code> BuildTeardownFunction(Isolate* isolate,
48 : CallDescriptor* call_descriptor,
49 : std::vector<AllocatedOperand> parameters);
50 :
51 : // Build the `setup` function. It takes a code object and a FixedArray as
52 : // parameters and calls the former while passing it each element of the array as
53 : // arguments:
54 : // ~~~
55 : // FixedArray setup(CodeObject* test, FixedArray state_in) {
56 : // FixedArray state_out = AllocateZeroedFixedArray(state_in.length());
57 : // // `test` will tail-call to its first parameter which will be `teardown`.
58 : // return test(teardown, state_out, state_in[0], state_in[1],
59 : // state_in[2], ...);
60 : // }
61 : // ~~~
62 : //
63 : // This function needs to convert each element of the FixedArray to raw unboxed
64 : // values to pass to the `test` function. The array will have been created using
65 : // `GenerateInitialState()` and needs to be converted in the following way:
66 : //
67 : // | Parameter type | FixedArray element | Conversion |
68 : // |----------------+---------------------+------------------------------------|
69 : // | kTagged | Smi | None. |
70 : // | kFloat32 | HeapNumber | Load value and convert to Float32. |
71 : // | kFloat64 | HeapNumber | Load value. |
72 : // | kSimd128 | FixedArray<Smi>[4] | Untag each Smi and write the |
73 : // | | | results into lanes of a new |
74 : // | | | 128-bit vector. |
75 : //
76 24 : Handle<Code> BuildSetupFunction(Isolate* isolate,
77 : CallDescriptor* call_descriptor,
78 15768 : std::vector<AllocatedOperand> parameters) {
79 24 : CodeAssemblerTester tester(isolate, 2, Code::BUILTIN, "setup");
80 24 : CodeStubAssembler assembler(tester.state());
81 : std::vector<Node*> params;
82 : // The first parameter is always the callee.
83 48 : params.push_back(__ Parameter(0));
84 : params.push_back(__ HeapConstant(
85 96 : BuildTeardownFunction(isolate, call_descriptor, parameters)));
86 : // First allocate the FixedArray which will hold the final results. Here we
87 : // should take care of all allocations, meaning we allocate HeapNumbers and
88 : // FixedArrays representing Simd128 values.
89 : TNode<FixedArray> state_out =
90 24 : __ AllocateZeroedFixedArray(__ IntPtrConstant(parameters.size()));
91 10512 : for (int i = 0; i < static_cast<int>(parameters.size()); i++) {
92 10464 : switch (parameters[i].representation()) {
93 : case MachineRepresentation::kTagged:
94 : break;
95 : case MachineRepresentation::kFloat32:
96 : case MachineRepresentation::kFloat64:
97 6336 : __ StoreFixedArrayElement(state_out, i, __ AllocateHeapNumber());
98 3168 : break;
99 : case MachineRepresentation::kSimd128: {
100 : TNode<FixedArray> vector =
101 432 : __ AllocateZeroedFixedArray(__ IntPtrConstant(4));
102 2160 : for (int lane = 0; lane < 4; lane++) {
103 1728 : __ StoreFixedArrayElement(vector, lane, __ SmiConstant(0));
104 : }
105 432 : __ StoreFixedArrayElement(state_out, i, vector);
106 : break;
107 : }
108 : default:
109 0 : UNREACHABLE();
110 : break;
111 : }
112 : }
113 48 : params.push_back(state_out);
114 : // Then take each element of the initial state and pass them as arguments.
115 24 : TNode<FixedArray> state_in = __ Cast(__ Parameter(1));
116 10512 : for (int i = 0; i < static_cast<int>(parameters.size()); i++) {
117 10464 : Node* element = __ LoadFixedArrayElement(state_in, __ IntPtrConstant(i));
118 : // Unbox all elements before passing them as arguments.
119 10464 : switch (parameters[i].representation()) {
120 : // Tagged parameters are Smis, they do not need unboxing.
121 : case MachineRepresentation::kTagged:
122 : break;
123 : case MachineRepresentation::kFloat32:
124 4752 : element = __ TruncateFloat64ToFloat32(__ LoadHeapNumberValue(element));
125 1584 : break;
126 : case MachineRepresentation::kFloat64:
127 3168 : element = __ LoadHeapNumberValue(element);
128 1584 : break;
129 : case MachineRepresentation::kSimd128: {
130 : Node* vector = tester.raw_assembler_for_testing()->AddNode(
131 : tester.raw_assembler_for_testing()->machine()->I32x4Splat(),
132 1296 : __ Int32Constant(0));
133 2160 : for (int lane = 0; lane < 4; lane++) {
134 : TNode<Int32T> lane_value = __ LoadAndUntagToWord32FixedArrayElement(
135 3456 : __ CAST(element), __ IntPtrConstant(lane));
136 : vector = tester.raw_assembler_for_testing()->AddNode(
137 : tester.raw_assembler_for_testing()->machine()->I32x4ReplaceLane(
138 : lane),
139 1728 : vector, lane_value);
140 : }
141 432 : element = vector;
142 432 : break;
143 : }
144 : default:
145 0 : UNREACHABLE();
146 : break;
147 : }
148 5232 : params.push_back(element);
149 : }
150 : __ Return(tester.raw_assembler_for_testing()->AddNode(
151 : tester.raw_assembler_for_testing()->common()->Call(call_descriptor),
152 120 : static_cast<int>(params.size()), params.data()));
153 48 : return tester.GenerateCodeCloseAndEscape();
154 : }
155 :
156 : // Build the `teardown` function. It takes a FixedArray as argument, fills it
157 : // with the rest of its parameters and returns it. The parameters need to be
158 : // consistent with `parameters`.
159 : // ~~~
160 : // FixedArray teardown(CodeObject* /* unused */, FixedArray result,
161 : // // Tagged registers.
162 : // Object r0, Object r1, ...,
163 : // // FP registers.
164 : // Float32 s0, Float64 d1, ...,
165 : // // Mixed stack slots.
166 : // Float64 mem0, Object mem1, Float32 mem2, ...) {
167 : // result[0] = r0;
168 : // result[1] = r1;
169 : // ...
170 : // result[..] = s0;
171 : // ...
172 : // result[..] = mem0;
173 : // ...
174 : // return result;
175 : // }
176 : // ~~~
177 : //
178 : // This function needs to convert its parameters into values fit for a
179 : // FixedArray, essentially reverting what the `setup` function did:
180 : //
181 : // | Parameter type | Parameter value | Conversion |
182 : // |----------------+-------------------+--------------------------------------|
183 : // | kTagged | Smi or HeapNumber | None. |
184 : // | kFloat32 | Raw Float32 | Convert to Float64. |
185 : // | kFloat64 | Raw Float64 | None. |
186 : // | kSimd128 | Raw Simd128 | Split into 4 Word32 values and tag |
187 : // | | | them. |
188 : //
189 : // Note that it is possible for a `kTagged` value to go from a Smi to a
190 : // HeapNumber. This is because `AssembleMove` will allocate a new HeapNumber if
191 : // it is asked to move a FP constant to a tagged register or slot.
192 : //
193 : // Finally, it is important that this function does not call `RecordWrite` which
194 : // is why "setup" is in charge of all allocations and we are using
195 : // SKIP_WRITE_BARRIER. The reason for this is that `RecordWrite` may clobber the
196 : // top 64 bits of Simd128 registers. This is the case on x64, ia32 and Arm64 for
197 : // example.
198 24 : Handle<Code> BuildTeardownFunction(Isolate* isolate,
199 : CallDescriptor* call_descriptor,
200 10488 : std::vector<AllocatedOperand> parameters) {
201 24 : CodeAssemblerTester tester(isolate, call_descriptor, "teardown");
202 24 : CodeStubAssembler assembler(tester.state());
203 24 : TNode<FixedArray> result_array = __ Cast(__ Parameter(1));
204 10512 : for (int i = 0; i < static_cast<int>(parameters.size()); i++) {
205 : // The first argument is not used and the second is "result_array".
206 5232 : Node* param = __ Parameter(i + 2);
207 10464 : switch (parameters[i].representation()) {
208 : case MachineRepresentation::kTagged:
209 1632 : __ StoreFixedArrayElement(result_array, i, param, SKIP_WRITE_BARRIER);
210 1632 : break;
211 : // Box FP values into HeapNumbers.
212 : case MachineRepresentation::kFloat32:
213 : param =
214 1584 : tester.raw_assembler_for_testing()->ChangeFloat32ToFloat64(param);
215 : V8_FALLTHROUGH;
216 : case MachineRepresentation::kFloat64:
217 : __ StoreObjectFieldNoWriteBarrier(
218 : __ LoadFixedArrayElement(result_array, i), HeapNumber::kValueOffset,
219 6336 : param, MachineRepresentation::kFloat64);
220 3168 : break;
221 : case MachineRepresentation::kSimd128: {
222 : TNode<FixedArray> vector =
223 432 : __ Cast(__ LoadFixedArrayElement(result_array, i));
224 2160 : for (int lane = 0; lane < 4; lane++) {
225 : Node* lane_value =
226 : __ SmiFromInt32(tester.raw_assembler_for_testing()->AddNode(
227 : tester.raw_assembler_for_testing()
228 : ->machine()
229 : ->I32x4ExtractLane(lane),
230 5184 : param));
231 : __ StoreFixedArrayElement(vector, lane, lane_value,
232 1728 : SKIP_WRITE_BARRIER);
233 : }
234 : break;
235 : }
236 : default:
237 0 : UNREACHABLE();
238 : break;
239 : }
240 : }
241 24 : __ Return(result_array);
242 48 : return tester.GenerateCodeCloseAndEscape();
243 : }
244 :
245 : // Print the content of `value`, representing the register or stack slot
246 : // described by `operand`.
247 0 : void PrintStateValue(std::ostream& os, Isolate* isolate, Handle<Object> value,
248 : AllocatedOperand operand) {
249 0 : switch (operand.representation()) {
250 : case MachineRepresentation::kTagged:
251 0 : if (value->IsSmi()) {
252 0 : os << Smi::cast(*value)->value();
253 : } else {
254 0 : os << value->Number();
255 : }
256 : break;
257 : case MachineRepresentation::kFloat32:
258 : case MachineRepresentation::kFloat64:
259 0 : os << value->Number();
260 0 : break;
261 : case MachineRepresentation::kSimd128: {
262 0 : FixedArray vector = FixedArray::cast(*value);
263 0 : os << "[";
264 0 : for (int lane = 0; lane < 4; lane++) {
265 0 : os << Smi::cast(*vector->GetValueChecked<Smi>(isolate, lane))->value();
266 0 : if (lane < 3) {
267 0 : os << ", ";
268 : }
269 : }
270 0 : os << "]";
271 : break;
272 : }
273 : default:
274 0 : UNREACHABLE();
275 : break;
276 : }
277 0 : os << " (" << operand.representation() << " ";
278 0 : if (operand.location_kind() == AllocatedOperand::REGISTER) {
279 0 : os << "register";
280 : } else {
281 : DCHECK_EQ(operand.location_kind(), AllocatedOperand::STACK_SLOT);
282 0 : os << "stack slot";
283 : }
284 0 : os << ")";
285 0 : }
286 :
287 : bool TestSimd128Moves() {
288 : return CpuFeatures::SupportsWasmSimd128();
289 : }
290 :
291 : } // namespace
292 :
293 : #undef __
294 :
295 : // Representation of a test environment. It describes a set of registers, stack
296 : // slots and constants available to the CodeGeneratorTester to perform moves
297 : // with. It has the ability to randomly generate lists of moves and run the code
298 : // generated by the CodeGeneratorTester.
299 : //
300 : // The following representations are tested:
301 : // - kTagged
302 : // - kFloat32
303 : // - kFloat64
304 : // - kSimd128 (if supported)
305 : // There is no need to test using Word32 or Word64 as they are the same as
306 : // Tagged as far as the code generator is concerned.
307 : //
308 : // Testing the generated code is achieved by wrapping it around `setup` and
309 : // `teardown` functions, written using the CodeStubAssembler. The key idea here
310 : // is that `teardown` and the generated code share the same custom
311 : // CallDescriptor. This descriptor assigns parameters to either registers or
312 : // stack slot of a given representation and therefore essentially describes the
313 : // environment.
314 : //
315 : // What happens is the following:
316 : //
317 : // - The `setup` function receives a FixedArray as the initial state. It
318 : // unpacks it and passes each element as arguments to the generated code
319 : // `test`. We also pass the `teardown` function as a first argument as well
320 : // as a newly allocated FixedArray as a second argument which will hold the
321 : // final results. Thanks to the custom CallDescriptor, registers and stack
322 : // slots get initialised according to the content of the initial FixedArray.
323 : //
324 : // - The `test` function performs the list of moves on its parameters and
325 : // eventually tail-calls to its first parameter, which is the `teardown`
326 : // function.
327 : //
328 : // - The `teardown` function receives the final results as a FixedArray, fills
329 : // it with the rest of its arguments and returns it. Thanks to the
330 : // tail-call, this is as if the `setup` function called `teardown` directly,
331 : // except now moves were performed!
332 : //
333 : // .----------------setup--------------------------.
334 : // | Take a FixedArray as parameters with |
335 : // | all the initial values of registers |
336 : // | and stack slots. | <- CodeStubAssembler
337 : // | |
338 : // | Allocate a new FixedArray `result` with |
339 : // | initial values. |
340 : // | |
341 : // | Call test(teardown, result, state[0], |
342 : // | state[1], state[2], ...); |
343 : // '-----------------------------------------------'
344 : // |
345 : // V
346 : // .----------------test-------------------------------.
347 : // | - Move(param3, param42); |
348 : // | - Swap(param64, param4); |
349 : // | - Move(param2, param6); | <- CodeGeneratorTester
350 : // | ... |
351 : // | |
352 : // | // "teardown" is the first parameter as well as |
353 : // | // the callee. |
354 : // | TailCall teardown(teardown, result, param2, ...); |
355 : // '---------------------------------------------------'
356 : // |
357 : // V
358 : // .----------------teardown---------------------------.
359 : // | Fill in the incoming `result` FixedArray with all |
360 : // | parameters and return it. | <- CodeStubAssembler
361 : // '---------------------------------------------------'
362 :
363 32 : class TestEnvironment : public HandleAndZoneScope {
364 : public:
365 : // These constants may be tuned to experiment with different environments.
366 :
367 : #ifdef V8_TARGET_ARCH_IA32
368 : static constexpr int kGeneralRegisterCount = 3;
369 : #else
370 : static constexpr int kGeneralRegisterCount = 4;
371 : #endif
372 : static constexpr int kDoubleRegisterCount = 6;
373 :
374 : static constexpr int kTaggedSlotCount = 64;
375 : static constexpr int kFloat32SlotCount = 64;
376 : static constexpr int kFloat64SlotCount = 64;
377 : static constexpr int kSimd128SlotCount = 16;
378 :
379 : // TODO(all): Test all types of constants (e.g. ExternalReference and
380 : // HeapObject).
381 : static constexpr int kSmiConstantCount = 4;
382 : static constexpr int kFloatConstantCount = 4;
383 : static constexpr int kDoubleConstantCount = 4;
384 :
385 16 : TestEnvironment()
386 : : blocks_(1, NewBlock(main_zone(), RpoNumber::FromInt(0)), main_zone()),
387 : instructions_(main_isolate(), main_zone(), &blocks_),
388 16 : rng_(CcTest::random_number_generator()),
389 : supported_reps_({MachineRepresentation::kTagged,
390 : MachineRepresentation::kFloat32,
391 96 : MachineRepresentation::kFloat64}) {
392 : stack_slot_count_ =
393 16 : kTaggedSlotCount + kFloat32SlotCount + kFloat64SlotCount;
394 16 : if (TestSimd128Moves()) {
395 16 : stack_slot_count_ += kSimd128SlotCount;
396 32 : supported_reps_.push_back(MachineRepresentation::kSimd128);
397 : }
398 : // The "teardown" and "test" functions share the same descriptor with the
399 : // following signature:
400 : // ~~~
401 : // FixedArray f(CodeObject* teardown, FixedArray preallocated_result,
402 : // // Tagged registers.
403 : // Object, Object, ...,
404 : // // FP registers.
405 : // Float32, Float64, Simd128, ...,
406 : // // Mixed stack slots.
407 : // Float64, Object, Float32, Simd128, ...);
408 : // ~~~
409 : LocationSignature::Builder test_signature(
410 : main_zone(), 1,
411 16 : 2 + kGeneralRegisterCount + kDoubleRegisterCount + stack_slot_count_);
412 :
413 : // The first parameter will be the code object of the "teardown"
414 : // function. This way, the "test" function can tail-call to it.
415 : test_signature.AddParam(LinkageLocation::ForRegister(
416 : kReturnRegister0.code(), MachineType::AnyTagged()));
417 :
418 : // The second parameter will be a pre-allocated FixedArray that the
419 : // "teardown" function will fill with result and then return. We place this
420 : // parameter on the first stack argument slot which is always -1. And
421 : // therefore slots to perform moves on start at -2.
422 : test_signature.AddParam(
423 : LinkageLocation::ForCallerFrameSlot(-1, MachineType::AnyTagged()));
424 : int slot_parameter_n = -2;
425 16 : const int kTotalStackParameterCount = stack_slot_count_ + 1;
426 :
427 : // Initialise registers.
428 :
429 : // Make sure that the target has enough general purpose registers to
430 : // generate a call to a CodeObject using this descriptor. We have reserved
431 : // kReturnRegister0 as the first parameter, and the call will need a
432 : // register to hold the CodeObject address. So the maximum number of
433 : // registers left to test with is the number of available registers minus 2.
434 : DCHECK_LE(kGeneralRegisterCount,
435 : GetRegConfig()->num_allocatable_general_registers() - 2);
436 :
437 16 : int32_t general_mask = GetRegConfig()->allocatable_general_codes_mask();
438 : // kReturnRegister0 is used to hold the "teardown" code object, do not
439 : // generate moves using it.
440 : std::unique_ptr<const RegisterConfiguration> registers(
441 : RegisterConfiguration::RestrictGeneralRegisters(
442 16 : general_mask & ~kReturnRegister0.bit()));
443 :
444 80 : for (int i = 0; i < kGeneralRegisterCount; i++) {
445 64 : int code = registers->GetAllocatableGeneralCode(i);
446 64 : AddRegister(&test_signature, MachineRepresentation::kTagged, code);
447 : }
448 : // We assume that Double, Float and Simd128 registers alias, depending on
449 : // kSimpleFPAliasing. For this reason, we allocate a Float, Double and
450 : // Simd128 together, hence the reason why `kDoubleRegisterCount` should be a
451 : // multiple of 3 and 2 in case Simd128 is not supported.
452 : static_assert(
453 : ((kDoubleRegisterCount % 2) == 0) && ((kDoubleRegisterCount % 3) == 0),
454 : "kDoubleRegisterCount should be a multiple of two and three.");
455 32 : for (int i = 0; i < kDoubleRegisterCount; i += 2) {
456 : if (kSimpleFPAliasing) {
457 : // Allocate three registers at once if kSimd128 is supported, else
458 : // allocate in pairs.
459 : AddRegister(&test_signature, MachineRepresentation::kFloat32,
460 32 : registers->GetAllocatableFloatCode(i));
461 : AddRegister(&test_signature, MachineRepresentation::kFloat64,
462 64 : registers->GetAllocatableDoubleCode(i + 1));
463 32 : if (TestSimd128Moves()) {
464 : AddRegister(&test_signature, MachineRepresentation::kSimd128,
465 64 : registers->GetAllocatableSimd128Code(i + 2));
466 : i++;
467 : }
468 : } else {
469 : // Make sure we do not allocate FP registers which alias. To do this, we
470 : // allocate three 128-bit registers and then convert two of them to a
471 : // float and a double. With this aliasing scheme, a Simd128 register
472 : // aliases two Double registers and four Float registers, so we need to
473 : // scale indexes accordingly:
474 : //
475 : // Simd128 register: q0, q1, q2, q3, q4, q5
476 : // | | | |
477 : // V V V V
478 : // Aliases: s0, d2, q2, s12, d8, q5
479 : //
480 : // This isn't space efficient at all but suits our need.
481 : static_assert(
482 : kDoubleRegisterCount < 8,
483 : "Arm has a q8 and a d16 register but no overlapping s32 register.");
484 : int first_simd128 = registers->GetAllocatableSimd128Code(i);
485 : int second_simd128 = registers->GetAllocatableSimd128Code(i + 1);
486 : AddRegister(&test_signature, MachineRepresentation::kFloat32,
487 : first_simd128 * 4);
488 : AddRegister(&test_signature, MachineRepresentation::kFloat64,
489 : second_simd128 * 2);
490 : if (TestSimd128Moves()) {
491 : int third_simd128 = registers->GetAllocatableSimd128Code(i + 2);
492 : AddRegister(&test_signature, MachineRepresentation::kSimd128,
493 : third_simd128);
494 : i++;
495 : }
496 : }
497 : }
498 :
499 : // Initialise stack slots.
500 :
501 : std::map<MachineRepresentation, int> slots = {
502 : {MachineRepresentation::kTagged, kTaggedSlotCount},
503 : {MachineRepresentation::kFloat32, kFloat32SlotCount},
504 16 : {MachineRepresentation::kFloat64, kFloat64SlotCount}};
505 16 : if (TestSimd128Moves()) {
506 32 : slots.emplace(MachineRepresentation::kSimd128, kSimd128SlotCount);
507 : }
508 :
509 : // Allocate new slots until we run out of them.
510 4976 : while (std::any_of(slots.cbegin(), slots.cend(),
511 : [](const std::pair<MachineRepresentation, int>& entry) {
512 : // True if there are slots left to allocate for this
513 : // representation.
514 : return entry.second > 0;
515 : })) {
516 : // Pick a random MachineRepresentation from supported_reps_.
517 4960 : MachineRepresentation rep = CreateRandomMachineRepresentation();
518 : auto entry = slots.find(rep);
519 : DCHECK(entry != slots.end());
520 : // We may have picked a representation for which all slots have already
521 : // been allocated.
522 4960 : if (entry->second > 0) {
523 : // Keep a map of (MachineRepresentation . std::vector<int>) with
524 : // allocated slots to pick from for each representation.
525 : int slot = slot_parameter_n;
526 3328 : slot_parameter_n -= (GetSlotSizeInBytes(rep) / kSystemPointerSize);
527 3328 : AddStackSlot(&test_signature, rep, slot);
528 3328 : entry->second--;
529 : }
530 : }
531 :
532 : // Initialise random constants.
533 :
534 : // While constants do not know about Smis, we need to be able to
535 : // differentiate between a pointer to a HeapNumber and a integer. For this
536 : // reason, we make sure all integers are Smis, including constants.
537 64 : for (int i = 0; i < kSmiConstantCount; i++) {
538 : intptr_t smi_value = static_cast<intptr_t>(
539 128 : Smi::FromInt(rng_->NextInt(Smi::kMaxValue)).ptr());
540 : Constant constant = kSystemPointerSize == 8
541 : ? Constant(static_cast<int64_t>(smi_value))
542 : : Constant(static_cast<int32_t>(smi_value));
543 64 : AddConstant(MachineRepresentation::kTagged, AllocateConstant(constant));
544 : }
545 : // Float and Double constants can be moved to both Tagged and FP registers
546 : // or slots. Register them as compatible with both FP and Tagged
547 : // destinations.
548 64 : for (int i = 0; i < kFloatConstantCount; i++) {
549 : int virtual_register =
550 128 : AllocateConstant(Constant(DoubleToFloat32(rng_->NextDouble())));
551 64 : AddConstant(MachineRepresentation::kTagged, virtual_register);
552 64 : AddConstant(MachineRepresentation::kFloat32, virtual_register);
553 : }
554 64 : for (int i = 0; i < kDoubleConstantCount; i++) {
555 128 : int virtual_register = AllocateConstant(Constant(rng_->NextDouble()));
556 64 : AddConstant(MachineRepresentation::kTagged, virtual_register);
557 64 : AddConstant(MachineRepresentation::kFloat64, virtual_register);
558 : }
559 :
560 : // The "teardown" function returns a FixedArray with the resulting state.
561 : test_signature.AddReturn(LinkageLocation::ForRegister(
562 16 : kReturnRegister0.code(), MachineType::AnyTagged()));
563 :
564 : test_descriptor_ = new (main_zone())
565 : CallDescriptor(CallDescriptor::kCallCodeObject, // kind
566 : MachineType::AnyTagged(), // target MachineType
567 : LinkageLocation::ForAnyRegister(
568 : MachineType::AnyTagged()), // target location
569 : test_signature.Build(), // location_sig
570 : kTotalStackParameterCount, // stack_parameter_count
571 : Operator::kNoProperties, // properties
572 : kNoCalleeSaved, // callee-saved registers
573 : kNoCalleeSaved, // callee-saved fp
574 48 : CallDescriptor::kNoFlags); // flags
575 16 : }
576 :
577 192 : int AllocateConstant(Constant constant) {
578 192 : int virtual_register = instructions_.NextVirtualRegister();
579 : instructions_.AddConstant(virtual_register, constant);
580 192 : return virtual_register;
581 : }
582 :
583 : // Register a constant referenced by `virtual_register` as compatible with
584 : // `rep`.
585 320 : void AddConstant(MachineRepresentation rep, int virtual_register) {
586 : auto entry = allocated_constants_.find(rep);
587 320 : if (entry == allocated_constants_.end()) {
588 : allocated_constants_.emplace(
589 96 : rep, std::vector<ConstantOperand>{ConstantOperand(virtual_register)});
590 : } else {
591 272 : entry->second.emplace_back(virtual_register);
592 : }
593 320 : }
594 :
595 : // Register a new register or stack slot as compatible with `rep`. As opposed
596 : // to constants, registers and stack slots are written to on `setup` and read
597 : // from on `teardown`. Therefore they are part of the environment's layout,
598 : // and are parameters of the `test` function.
599 :
600 160 : void AddRegister(LocationSignature::Builder* test_signature,
601 : MachineRepresentation rep, int code) {
602 160 : AllocatedOperand operand(AllocatedOperand::REGISTER, rep, code);
603 160 : layout_.push_back(operand);
604 : test_signature->AddParam(LinkageLocation::ForRegister(
605 160 : code, MachineType::TypeForRepresentation(rep)));
606 : auto entry = allocated_registers_.find(rep);
607 160 : if (entry == allocated_registers_.end()) {
608 128 : allocated_registers_.emplace(rep, std::vector<AllocatedOperand>{operand});
609 : } else {
610 96 : entry->second.push_back(operand);
611 : }
612 160 : }
613 :
614 3328 : void AddStackSlot(LocationSignature::Builder* test_signature,
615 : MachineRepresentation rep, int slot) {
616 3328 : AllocatedOperand operand(AllocatedOperand::STACK_SLOT, rep, slot);
617 3328 : layout_.push_back(operand);
618 : test_signature->AddParam(LinkageLocation::ForCallerFrameSlot(
619 3328 : slot, MachineType::TypeForRepresentation(rep)));
620 : auto entry = allocated_slots_.find(rep);
621 3328 : if (entry == allocated_slots_.end()) {
622 128 : allocated_slots_.emplace(rep, std::vector<AllocatedOperand>{operand});
623 : } else {
624 3264 : entry->second.push_back(operand);
625 : }
626 3328 : }
627 :
628 : // Generate a random initial state to test moves against. A "state" is a
629 : // packed FixedArray with Smis and HeapNumbers, according to the layout of the
630 : // environment.
631 12 : Handle<FixedArray> GenerateInitialState() {
632 : Handle<FixedArray> state = main_isolate()->factory()->NewFixedArray(
633 4440 : static_cast<int>(layout_.size()));
634 5256 : for (int i = 0; i < state->length(); i++) {
635 5232 : switch (layout_[i].representation()) {
636 : case MachineRepresentation::kTagged:
637 816 : state->set(i, Smi::FromInt(rng_->NextInt(Smi::kMaxValue)));
638 816 : break;
639 : case MachineRepresentation::kFloat32: {
640 : // HeapNumbers are Float64 values. However, we will convert it to a
641 : // Float32 and back inside `setup` and `teardown`. Make sure the value
642 : // we pick fits in a Float32.
643 : Handle<HeapNumber> num = main_isolate()->factory()->NewHeapNumber(
644 2376 : static_cast<double>(DoubleToFloat32(rng_->NextDouble())));
645 1584 : state->set(i, *num);
646 : break;
647 : }
648 : case MachineRepresentation::kFloat64: {
649 : Handle<HeapNumber> num =
650 1584 : main_isolate()->factory()->NewHeapNumber(rng_->NextDouble());
651 1584 : state->set(i, *num);
652 : break;
653 : }
654 : case MachineRepresentation::kSimd128: {
655 : Handle<FixedArray> vector =
656 216 : main_isolate()->factory()->NewFixedArray(4);
657 1080 : for (int lane = 0; lane < 4; lane++) {
658 864 : vector->set(lane, Smi::FromInt(rng_->NextInt(Smi::kMaxValue)));
659 : }
660 432 : state->set(i, *vector);
661 : break;
662 : }
663 : default:
664 0 : UNREACHABLE();
665 : break;
666 : }
667 : }
668 12 : return state;
669 : }
670 :
671 : // Run the code generated by a CodeGeneratorTester against `state_in` and
672 : // return a new resulting state.
673 24 : Handle<FixedArray> Run(Handle<Code> test, Handle<FixedArray> state_in) {
674 : Handle<FixedArray> state_out = main_isolate()->factory()->NewFixedArray(
675 72 : static_cast<int>(layout_.size()));
676 : {
677 : #ifdef ENABLE_SLOW_DCHECKS
678 : // The "setup" and "teardown" functions are relatively big, and with
679 : // runtime assertions enabled they get so big that memory during register
680 : // allocation becomes a problem. Temporarily disable such assertions.
681 : bool old_enable_slow_asserts = FLAG_enable_slow_asserts;
682 : FLAG_enable_slow_asserts = false;
683 : #endif
684 : Handle<Code> setup =
685 72 : BuildSetupFunction(main_isolate(), test_descriptor_, layout_);
686 : #ifdef ENABLE_SLOW_DCHECKS
687 : FLAG_enable_slow_asserts = old_enable_slow_asserts;
688 : #endif
689 : // FunctionTester maintains its own HandleScope which means that its
690 : // return value will be freed along with it. Copy the result into
691 : // state_out.
692 24 : FunctionTester ft(setup, 2);
693 24 : Handle<FixedArray> result = ft.CallChecked<FixedArray>(test, state_in);
694 24 : CHECK_EQ(result->length(), state_in->length());
695 24 : result->CopyTo(0, *state_out, 0, result->length());
696 : }
697 24 : return state_out;
698 : }
699 :
700 : // For a given operand representing either a register or a stack slot, return
701 : // what position it should live in inside a FixedArray state.
702 29984 : int OperandToStatePosition(const AllocatedOperand& operand) const {
703 : // Search `layout_` for `operand`.
704 : auto it = std::find_if(layout_.cbegin(), layout_.cend(),
705 : [operand](const AllocatedOperand& this_operand) {
706 : return this_operand.Equals(operand);
707 59968 : });
708 : DCHECK_NE(it, layout_.cend());
709 29984 : return static_cast<int>(std::distance(layout_.cbegin(), it));
710 : }
711 :
712 : // Perform the given list of moves on `state_in` and return a newly allocated
713 : // state with the results.
714 3856 : Handle<FixedArray> SimulateMoves(ParallelMove* moves,
715 : Handle<FixedArray> state_in) {
716 : Handle<FixedArray> state_out = main_isolate()->factory()->NewFixedArray(
717 15564 : static_cast<int>(layout_.size()));
718 : // We do not want to modify `state_in` in place so perform the moves on a
719 : // copy.
720 3856 : state_in->CopyTo(0, *state_out, 0, state_in->length());
721 15564 : for (auto move : *moves) {
722 : int to_index =
723 7852 : OperandToStatePosition(AllocatedOperand::cast(move->destination()));
724 7852 : InstructionOperand from = move->source();
725 7852 : if (from.IsConstant()) {
726 : Constant constant = instructions_.GetConstant(
727 : ConstantOperand::cast(from).virtual_register());
728 : Handle<Object> constant_value;
729 2016 : switch (constant.type()) {
730 : case Constant::kInt32:
731 : constant_value =
732 : Handle<Smi>(Smi(static_cast<Address>(
733 : static_cast<intptr_t>(constant.ToInt32()))),
734 0 : main_isolate());
735 0 : break;
736 : case Constant::kInt64:
737 : constant_value = Handle<Smi>(
738 252 : Smi(static_cast<Address>(constant.ToInt64())), main_isolate());
739 252 : break;
740 : case Constant::kFloat32:
741 : constant_value = main_isolate()->factory()->NewHeapNumber(
742 1832 : static_cast<double>(constant.ToFloat32()));
743 916 : break;
744 : case Constant::kFloat64:
745 : constant_value = main_isolate()->factory()->NewHeapNumber(
746 848 : constant.ToFloat64().value());
747 848 : break;
748 : default:
749 0 : UNREACHABLE();
750 : break;
751 : }
752 2016 : state_out->set(to_index, *constant_value);
753 : } else {
754 5836 : int from_index = OperandToStatePosition(AllocatedOperand::cast(from));
755 : state_out->set(to_index, *state_out->GetValueChecked<Object>(
756 11672 : main_isolate(), from_index));
757 : }
758 : }
759 3856 : return state_out;
760 : }
761 :
762 : // Perform the given list of swaps on `state_in` and return a newly allocated
763 : // state with the results.
764 4152 : Handle<FixedArray> SimulateSwaps(ParallelMove* swaps,
765 : Handle<FixedArray> state_in) {
766 : Handle<FixedArray> state_out = main_isolate()->factory()->NewFixedArray(
767 24600 : static_cast<int>(layout_.size()));
768 : // We do not want to modify `state_in` in place so perform the swaps on a
769 : // copy.
770 4152 : state_in->CopyTo(0, *state_out, 0, state_in->length());
771 16452 : for (auto swap : *swaps) {
772 : int lhs_index =
773 8148 : OperandToStatePosition(AllocatedOperand::cast(swap->destination()));
774 : int rhs_index =
775 8148 : OperandToStatePosition(AllocatedOperand::cast(swap->source()));
776 : Handle<Object> lhs =
777 8148 : state_out->GetValueChecked<Object>(main_isolate(), lhs_index);
778 : Handle<Object> rhs =
779 8148 : state_out->GetValueChecked<Object>(main_isolate(), rhs_index);
780 8148 : state_out->set(lhs_index, *rhs);
781 8148 : state_out->set(rhs_index, *lhs);
782 : }
783 4152 : return state_out;
784 : }
785 :
786 : // Compare the given state with a reference.
787 24 : void CheckState(Handle<FixedArray> actual, Handle<FixedArray> expected) {
788 10512 : for (int i = 0; i < static_cast<int>(layout_.size()); i++) {
789 : Handle<Object> actual_value =
790 10464 : actual->GetValueChecked<Object>(main_isolate(), i);
791 : Handle<Object> expected_value =
792 5232 : expected->GetValueChecked<Object>(main_isolate(), i);
793 5232 : if (!CompareValues(actual_value, expected_value,
794 15720 : layout_[i].representation())) {
795 0 : std::ostringstream expected_str;
796 : PrintStateValue(expected_str, main_isolate(), expected_value,
797 0 : layout_[i]);
798 0 : std::ostringstream actual_str;
799 0 : PrintStateValue(actual_str, main_isolate(), actual_value, layout_[i]);
800 : V8_Fatal(__FILE__, __LINE__, "Expected: '%s' but got '%s'",
801 0 : expected_str.str().c_str(), actual_str.str().c_str());
802 : }
803 : }
804 24 : }
805 :
806 5232 : bool CompareValues(Handle<Object> actual, Handle<Object> expected,
807 : MachineRepresentation rep) {
808 5232 : switch (rep) {
809 : case MachineRepresentation::kTagged:
810 : case MachineRepresentation::kFloat32:
811 : case MachineRepresentation::kFloat64:
812 4800 : return actual->StrictEquals(*expected);
813 : case MachineRepresentation::kSimd128:
814 1728 : for (int lane = 0; lane < 4; lane++) {
815 : Handle<Smi> actual_lane =
816 : FixedArray::cast(*actual)->GetValueChecked<Smi>(main_isolate(),
817 3456 : lane);
818 : Handle<Smi> expected_lane =
819 : FixedArray::cast(*expected)->GetValueChecked<Smi>(main_isolate(),
820 1728 : lane);
821 1728 : if (*actual_lane != *expected_lane) {
822 0 : return false;
823 : }
824 : }
825 : return true;
826 : default:
827 0 : UNREACHABLE();
828 : break;
829 : }
830 : }
831 :
832 : enum OperandConstraint {
833 : kNone,
834 : // Restrict operands to non-constants. This is useful when generating a
835 : // destination.
836 : kCannotBeConstant
837 : };
838 :
839 : // Generate parallel moves at random. Note that they may not be compatible
840 : // between each other as this doesn't matter to the code generator.
841 3856 : ParallelMove* GenerateRandomMoves(int size) {
842 : ParallelMove* parallel_move = new (main_zone()) ParallelMove(main_zone());
843 :
844 16328 : for (int i = 0; i < size;) {
845 8616 : MachineRepresentation rep = CreateRandomMachineRepresentation();
846 : MoveOperands mo(CreateRandomOperand(kNone, rep),
847 17232 : CreateRandomOperand(kCannotBeConstant, rep));
848 : // It isn't valid to call `AssembleMove` and `AssembleSwap` with redundant
849 : // moves.
850 9380 : if (mo.IsRedundant()) continue;
851 : parallel_move->AddMove(mo.source(), mo.destination());
852 : // Iterate only when a move was created.
853 7852 : i++;
854 : }
855 :
856 3856 : return parallel_move;
857 : }
858 :
859 4152 : ParallelMove* GenerateRandomSwaps(int size) {
860 : ParallelMove* parallel_move = new (main_zone()) ParallelMove(main_zone());
861 :
862 17600 : for (int i = 0; i < size;) {
863 9296 : MachineRepresentation rep = CreateRandomMachineRepresentation();
864 9296 : InstructionOperand lhs = CreateRandomOperand(kCannotBeConstant, rep);
865 9296 : InstructionOperand rhs = CreateRandomOperand(kCannotBeConstant, rep);
866 : MoveOperands mo(lhs, rhs);
867 : // It isn't valid to call `AssembleMove` and `AssembleSwap` with redundant
868 : // moves.
869 10444 : if (mo.IsRedundant()) continue;
870 : // Canonicalize the swap: the register operand has to be the left hand
871 : // side.
872 15144 : if (lhs.IsStackSlot() || lhs.IsFPStackSlot()) {
873 : std::swap(lhs, rhs);
874 : }
875 : parallel_move->AddMove(lhs, rhs);
876 : // Iterate only when a swap was created.
877 8148 : i++;
878 : }
879 :
880 4152 : return parallel_move;
881 : }
882 :
883 28604 : MachineRepresentation CreateRandomMachineRepresentation() {
884 85812 : int index = rng_->NextInt(static_cast<int>(supported_reps_.size()));
885 57208 : return supported_reps_[index];
886 : }
887 :
888 35824 : InstructionOperand CreateRandomOperand(OperandConstraint constraint,
889 : MachineRepresentation rep) {
890 : // Only generate a Constant if the operand is a source and we have a
891 : // constant with a compatible representation in stock.
892 : bool generate_constant =
893 44440 : (constraint != kCannotBeConstant) &&
894 : (allocated_constants_.find(rep) != allocated_constants_.end());
895 35824 : switch (rng_->NextInt(generate_constant ? 3 : 2)) {
896 : case 0:
897 16872 : return CreateRandomStackSlotOperand(rep);
898 : case 1:
899 16936 : return CreateRandomRegisterOperand(rep);
900 : case 2:
901 2016 : return CreateRandomConstant(rep);
902 : }
903 0 : UNREACHABLE();
904 : }
905 :
906 16936 : AllocatedOperand CreateRandomRegisterOperand(MachineRepresentation rep) {
907 : int index =
908 33872 : rng_->NextInt(static_cast<int>(allocated_registers_[rep].size()));
909 33872 : return allocated_registers_[rep][index];
910 : }
911 :
912 22604 : AllocatedOperand CreateRandomStackSlotOperand(MachineRepresentation rep) {
913 45208 : int index = rng_->NextInt(static_cast<int>(allocated_slots_[rep].size()));
914 45208 : return allocated_slots_[rep][index];
915 : }
916 :
917 2016 : ConstantOperand CreateRandomConstant(MachineRepresentation rep) {
918 : int index =
919 4032 : rng_->NextInt(static_cast<int>(allocated_constants_[rep].size()));
920 4032 : return allocated_constants_[rep][index];
921 : }
922 :
923 16 : static InstructionBlock* NewBlock(Zone* zone, RpoNumber rpo) {
924 : return new (zone) InstructionBlock(zone, rpo, RpoNumber::Invalid(),
925 32 : RpoNumber::Invalid(), false, false);
926 : }
927 :
928 : v8::base::RandomNumberGenerator* rng() const { return rng_; }
929 : InstructionSequence* instructions() { return &instructions_; }
930 : CallDescriptor* test_descriptor() { return test_descriptor_; }
931 : int stack_slot_count() const { return stack_slot_count_; }
932 :
933 : private:
934 : ZoneVector<InstructionBlock*> blocks_;
935 : InstructionSequence instructions_;
936 : v8::base::RandomNumberGenerator* rng_;
937 : // The layout describes the type of each element in the environment, in order.
938 : std::vector<AllocatedOperand> layout_;
939 : CallDescriptor* test_descriptor_;
940 : // Allocated constants, registers and stack slots that we can generate moves
941 : // with. Each per compatible representation.
942 : std::vector<MachineRepresentation> supported_reps_;
943 : std::map<MachineRepresentation, std::vector<ConstantOperand>>
944 : allocated_constants_;
945 : std::map<MachineRepresentation, std::vector<AllocatedOperand>>
946 : allocated_registers_;
947 : std::map<MachineRepresentation, std::vector<AllocatedOperand>>
948 : allocated_slots_;
949 : int stack_slot_count_;
950 : };
951 :
952 : // static
953 : constexpr int TestEnvironment::kGeneralRegisterCount;
954 : constexpr int TestEnvironment::kDoubleRegisterCount;
955 : constexpr int TestEnvironment::kTaggedSlotCount;
956 : constexpr int TestEnvironment::kFloat32SlotCount;
957 : constexpr int TestEnvironment::kFloat64SlotCount;
958 : constexpr int TestEnvironment::kSimd128SlotCount;
959 : constexpr int TestEnvironment::kSmiConstantCount;
960 : constexpr int TestEnvironment::kFloatConstantCount;
961 : constexpr int TestEnvironment::kDoubleConstantCount;
962 :
963 : // Wrapper around the CodeGenerator. Code generated by this can only be called
964 : // using the given `TestEnvironment`.
965 : class CodeGeneratorTester {
966 : public:
967 5876 : explicit CodeGeneratorTester(TestEnvironment* environment,
968 : int extra_stack_space = 0)
969 : : zone_(environment->main_zone()),
970 : info_(ArrayVector("test"), environment->main_zone(), Code::STUB),
971 : linkage_(environment->test_descriptor()),
972 108 : frame_(environment->test_descriptor()->CalculateFixedFrameSize()) {
973 : // Pick half of the stack parameters at random and move them into spill
974 : // slots, separated by `extra_stack_space` bytes.
975 : // When testing a move with stack slots using CheckAssembleMove or
976 : // CheckAssembleSwap, we'll transparently make use of local spill slots
977 : // instead of stack parameters for those that were picked. This allows us to
978 : // test negative, positive, far and near ranges.
979 5804 : for (int i = 0; i < (environment->stack_slot_count() / 2);) {
980 : MachineRepresentation rep =
981 5732 : environment->CreateRandomMachineRepresentation();
982 : LocationOperand old_slot =
983 11464 : LocationOperand::cast(environment->CreateRandomStackSlotOperand(rep));
984 : // Do not pick the same slot twice.
985 11464 : if (GetSpillSlot(&old_slot) != spill_slots_.end()) {
986 1988 : continue;
987 : }
988 : LocationOperand new_slot =
989 : AllocatedOperand(LocationOperand::STACK_SLOT, rep,
990 7488 : frame_.AllocateSpillSlot(GetSlotSizeInBytes(rep)));
991 : // Artificially create space on the stack by allocating a new slot.
992 3744 : if (extra_stack_space > 0) {
993 1248 : frame_.AllocateSpillSlot(extra_stack_space);
994 : }
995 3744 : spill_slots_.emplace_back(old_slot, new_slot);
996 3744 : i++;
997 : }
998 :
999 : generator_ = new CodeGenerator(
1000 : environment->main_zone(), &frame_, &linkage_,
1001 36 : environment->instructions(), &info_, environment->main_isolate(),
1002 : base::Optional<OsrHelper>(), kNoSourcePosition, nullptr,
1003 : PoisoningMitigationLevel::kDontPoison,
1004 : AssemblerOptions::Default(environment->main_isolate()),
1005 180 : Builtins::kNoBuiltinId);
1006 :
1007 : // Force a frame to be created.
1008 36 : generator_->frame_access_state()->MarkHasFrame(true);
1009 36 : generator_->AssembleConstructFrame();
1010 : // TODO(all): Generate a stack check here so that we fail gracefully if the
1011 : // frame is too big.
1012 :
1013 : // Move chosen stack parameters into spill slots.
1014 3816 : for (auto move : spill_slots_) {
1015 3744 : generator_->AssembleMove(&move.first, &move.second);
1016 : }
1017 36 : }
1018 :
1019 72 : ~CodeGeneratorTester() { delete generator_; }
1020 :
1021 : std::vector<std::pair<LocationOperand, LocationOperand>>::iterator
1022 53732 : GetSpillSlot(InstructionOperand* op) {
1023 53732 : if (op->IsAnyStackSlot()) {
1024 : LocationOperand slot = LocationOperand::cast(*op);
1025 : return std::find_if(
1026 : spill_slots_.begin(), spill_slots_.end(),
1027 : [slot](
1028 : const std::pair<LocationOperand, LocationOperand>& moved_pair) {
1029 : return moved_pair.first.index() == slot.index();
1030 : });
1031 : } else {
1032 : return spill_slots_.end();
1033 : }
1034 : }
1035 :
1036 : // If the operand corresponds to a spill slot, return it. Else just pass it
1037 : // through.
1038 : InstructionOperand* MaybeTranslateSlot(InstructionOperand* op) {
1039 48000 : auto it = GetSpillSlot(op);
1040 48000 : if (it != spill_slots_.end()) {
1041 : // The second element is the spill slot associated with op.
1042 14744 : return &it->second;
1043 : } else {
1044 : return op;
1045 : }
1046 : }
1047 :
1048 12 : Instruction* CreateTailCall(int stack_slot_delta) {
1049 : int optional_padding_slot = stack_slot_delta;
1050 : InstructionOperand callee[] = {
1051 : AllocatedOperand(LocationOperand::REGISTER,
1052 : MachineRepresentation::kTagged,
1053 : kReturnRegister0.code()),
1054 : ImmediateOperand(ImmediateOperand::INLINE, -1), // poison index.
1055 : ImmediateOperand(ImmediateOperand::INLINE, optional_padding_slot),
1056 24 : ImmediateOperand(ImmediateOperand::INLINE, stack_slot_delta)};
1057 : Instruction* tail_call =
1058 : Instruction::New(zone_, kArchTailCallCodeObject, 0, nullptr,
1059 12 : arraysize(callee), callee, 0, nullptr);
1060 12 : return tail_call;
1061 : }
1062 :
1063 : enum PushTypeFlag {
1064 : kRegisterPush = CodeGenerator::kRegisterPush,
1065 : kStackSlotPush = CodeGenerator::kStackSlotPush,
1066 : kScalarPush = CodeGenerator::kScalarPush
1067 : };
1068 :
1069 12 : void CheckAssembleTailCallGaps(Instruction* instr,
1070 : int first_unused_stack_slot,
1071 : CodeGeneratorTester::PushTypeFlag push_type) {
1072 12 : generator_->AssembleTailCallBeforeGap(instr, first_unused_stack_slot);
1073 : #if defined(V8_TARGET_ARCH_ARM) || defined(V8_TARGET_ARCH_S390) || \
1074 : defined(V8_TARGET_ARCH_PPC)
1075 : // Only folding register pushes is supported on ARM.
1076 : bool supported = ((push_type & CodeGenerator::kRegisterPush) == push_type);
1077 : #elif defined(V8_TARGET_ARCH_X64) || defined(V8_TARGET_ARCH_IA32) || \
1078 : defined(V8_TARGET_ARCH_X87)
1079 12 : bool supported = ((push_type & CodeGenerator::kScalarPush) == push_type);
1080 : #else
1081 : bool supported = false;
1082 : #endif
1083 12 : if (supported) {
1084 : // Architectures supporting folding adjacent pushes should now have
1085 : // resolved all moves.
1086 72 : for (const auto& move :
1087 12 : *instr->parallel_moves()[Instruction::FIRST_GAP_POSITION]) {
1088 96 : CHECK(move->IsEliminated());
1089 : }
1090 : }
1091 12 : generator_->AssembleGaps(instr);
1092 12 : generator_->AssembleTailCallAfterGap(instr, first_unused_stack_slot);
1093 12 : }
1094 :
1095 11852 : void CheckAssembleMove(InstructionOperand* source,
1096 : InstructionOperand* destination) {
1097 11852 : int start = generator_->tasm()->pc_offset();
1098 : generator_->AssembleMove(MaybeTranslateSlot(source),
1099 11852 : MaybeTranslateSlot(destination));
1100 23704 : CHECK(generator_->tasm()->pc_offset() > start);
1101 11852 : }
1102 :
1103 12148 : void CheckAssembleSwap(InstructionOperand* source,
1104 : InstructionOperand* destination) {
1105 12148 : int start = generator_->tasm()->pc_offset();
1106 : generator_->AssembleSwap(MaybeTranslateSlot(source),
1107 12148 : MaybeTranslateSlot(destination));
1108 24296 : CHECK(generator_->tasm()->pc_offset() > start);
1109 12148 : }
1110 :
1111 36 : Handle<Code> Finalize() {
1112 36 : generator_->FinishCode();
1113 36 : generator_->safepoints()->Emit(generator_->tasm(),
1114 72 : frame_.GetTotalFrameSlotCount());
1115 36 : generator_->MaybeEmitOutOfLineConstantPool();
1116 :
1117 72 : return generator_->FinalizeCode().ToHandleChecked();
1118 : }
1119 :
1120 24 : Handle<Code> FinalizeForExecuting() {
1121 : // The test environment expects us to have performed moves on stack
1122 : // parameters. However, some of them are mapped to local spill slots. They
1123 : // should be moved back into stack parameters so their values are passed
1124 : // along to the `teardown` function.
1125 2544 : for (auto move : spill_slots_) {
1126 2520 : generator_->AssembleMove(&move.second, &move.first);
1127 : }
1128 :
1129 48 : InstructionSequence* sequence = generator_->instructions();
1130 :
1131 24 : sequence->StartBlock(RpoNumber::FromInt(0));
1132 : // The environment expects this code to tail-call to it's first parameter
1133 : // placed in `kReturnRegister0`.
1134 24 : sequence->AddInstruction(Instruction::New(zone_, kArchPrepareTailCall));
1135 :
1136 : // We use either zero or one slots.
1137 : int first_unused_stack_slot =
1138 : V8_TARGET_ARCH_STORES_RETURN_ADDRESS_ON_STACK ? 1 : 0;
1139 : int optional_padding_slot = first_unused_stack_slot;
1140 : InstructionOperand callee[] = {
1141 : AllocatedOperand(LocationOperand::REGISTER,
1142 : MachineRepresentation::kTagged,
1143 : kReturnRegister0.code()),
1144 : ImmediateOperand(ImmediateOperand::INLINE, -1), // poison index.
1145 : ImmediateOperand(ImmediateOperand::INLINE, optional_padding_slot),
1146 24 : ImmediateOperand(ImmediateOperand::INLINE, first_unused_stack_slot)};
1147 : Instruction* tail_call =
1148 : Instruction::New(zone_, kArchTailCallCodeObject, 0, nullptr,
1149 24 : arraysize(callee), callee, 0, nullptr);
1150 24 : sequence->AddInstruction(tail_call);
1151 24 : sequence->EndBlock(RpoNumber::FromInt(0));
1152 :
1153 : generator_->AssembleBlock(
1154 24 : sequence->InstructionBlockAt(RpoNumber::FromInt(0)));
1155 :
1156 24 : return Finalize();
1157 : }
1158 :
1159 : private:
1160 : Zone* zone_;
1161 : OptimizedCompilationInfo info_;
1162 : Linkage linkage_;
1163 : Frame frame_;
1164 : CodeGenerator* generator_;
1165 : // List of operands to be moved from stack parameters to spill slots.
1166 : std::vector<std::pair<LocationOperand, LocationOperand>> spill_slots_;
1167 : };
1168 :
1169 : // The following fuzz tests will assemble a lot of moves, wrap them in
1170 : // executable native code and run them. In order to check that moves were
1171 : // performed correctly, we need to setup an environment with an initial state
1172 : // and get it back after the list of moves were performed.
1173 : //
1174 : // We have two components to do this: TestEnvironment and CodeGeneratorTester.
1175 : //
1176 : // The TestEnvironment is in charge of bringing up an environment consisting of
1177 : // a set of registers, stack slots and constants, with initial values in
1178 : // them. The CodeGeneratorTester is a wrapper around the CodeGenerator and its
1179 : // only purpose is to generate code for a list of moves. The TestEnvironment is
1180 : // then able to run this code against the environment and return a resulting
1181 : // state.
1182 : //
1183 : // A "state" here is a packed FixedArray with tagged values which can either be
1184 : // Smis or HeapNumbers. When calling TestEnvironment::Run(...), registers and
1185 : // stack slots will be initialised according to this FixedArray. A new
1186 : // FixedArray is returned containing values that were moved by the generated
1187 : // code.
1188 : //
1189 : // And finally, we are able to compare the resulting FixedArray against a
1190 : // reference, computed with a simulation of AssembleMove and AssembleSwap. See
1191 : // SimulateMoves and SimulateSwaps.
1192 :
1193 : // Allocate space between slots to increase coverage of moves with larger
1194 : // ranges. Note that this affects how much stack is allocated when running the
1195 : // generated code. It means we have to be careful not to exceed the stack limit,
1196 : // which is lower on Windows.
1197 : #ifdef V8_OS_WIN
1198 : constexpr int kExtraSpace = 0;
1199 : #else
1200 : constexpr int kExtraSpace = 1 * KB;
1201 : #endif
1202 :
1203 25879 : TEST(FuzzAssembleMove) {
1204 4 : TestEnvironment env;
1205 :
1206 4 : Handle<FixedArray> state_in = env.GenerateInitialState();
1207 4 : ParallelMove* moves = env.GenerateRandomMoves(1000);
1208 :
1209 4 : Handle<FixedArray> expected = env.SimulateMoves(moves, state_in);
1210 :
1211 : // Test small and potentially large ranges separately.
1212 12 : for (int extra_space : {0, kExtraSpace}) {
1213 8 : CodeGeneratorTester c(&env, extra_space);
1214 :
1215 8016 : for (auto m : *moves) {
1216 8000 : c.CheckAssembleMove(&m->source(), &m->destination());
1217 : }
1218 :
1219 8 : Handle<Code> test = c.FinalizeForExecuting();
1220 : if (FLAG_print_code) {
1221 : test->Print();
1222 : }
1223 :
1224 8 : Handle<FixedArray> actual = env.Run(test, state_in);
1225 8 : env.CheckState(actual, expected);
1226 12 : }
1227 4 : }
1228 :
1229 25879 : TEST(FuzzAssembleSwap) {
1230 4 : TestEnvironment env;
1231 :
1232 4 : Handle<FixedArray> state_in = env.GenerateInitialState();
1233 4 : ParallelMove* swaps = env.GenerateRandomSwaps(1000);
1234 :
1235 4 : Handle<FixedArray> expected = env.SimulateSwaps(swaps, state_in);
1236 :
1237 : // Test small and potentially large ranges separately.
1238 12 : for (int extra_space : {0, kExtraSpace}) {
1239 8 : CodeGeneratorTester c(&env, extra_space);
1240 :
1241 8016 : for (auto s : *swaps) {
1242 8000 : c.CheckAssembleSwap(&s->source(), &s->destination());
1243 : }
1244 :
1245 8 : Handle<Code> test = c.FinalizeForExecuting();
1246 : if (FLAG_print_code) {
1247 : test->Print();
1248 : }
1249 :
1250 8 : Handle<FixedArray> actual = env.Run(test, state_in);
1251 8 : env.CheckState(actual, expected);
1252 12 : }
1253 4 : }
1254 :
1255 25879 : TEST(FuzzAssembleMoveAndSwap) {
1256 4 : TestEnvironment env;
1257 :
1258 4 : Handle<FixedArray> state_in = env.GenerateInitialState();
1259 : Handle<FixedArray> expected =
1260 4 : env.main_isolate()->factory()->NewFixedArray(state_in->length());
1261 :
1262 : // Test small and potentially large ranges separately.
1263 12 : for (int extra_space : {0, kExtraSpace}) {
1264 8 : CodeGeneratorTester c(&env, extra_space);
1265 :
1266 8 : state_in->CopyTo(0, *expected, 0, state_in->length());
1267 :
1268 8008 : for (int i = 0; i < 1000; i++) {
1269 : // Randomly alternate between swaps and moves.
1270 8000 : if (env.rng()->NextInt(2) == 0) {
1271 3852 : ParallelMove* move = env.GenerateRandomMoves(1);
1272 3852 : expected = env.SimulateMoves(move, expected);
1273 3852 : c.CheckAssembleMove(&move->at(0)->source(),
1274 7704 : &move->at(0)->destination());
1275 : } else {
1276 4148 : ParallelMove* swap = env.GenerateRandomSwaps(1);
1277 4148 : expected = env.SimulateSwaps(swap, expected);
1278 4148 : c.CheckAssembleSwap(&swap->at(0)->source(),
1279 8296 : &swap->at(0)->destination());
1280 : }
1281 : }
1282 :
1283 8 : Handle<Code> test = c.FinalizeForExecuting();
1284 : if (FLAG_print_code) {
1285 : test->Print();
1286 : }
1287 :
1288 8 : Handle<FixedArray> actual = env.Run(test, state_in);
1289 8 : env.CheckState(actual, expected);
1290 12 : }
1291 4 : }
1292 :
1293 25879 : TEST(AssembleTailCallGap) {
1294 12 : const RegisterConfiguration* conf = GetRegConfig();
1295 4 : TestEnvironment env;
1296 :
1297 : // This test assumes at least 4 registers are allocatable.
1298 4 : CHECK_LE(4, conf->num_allocatable_general_registers());
1299 :
1300 : auto r0 = AllocatedOperand(LocationOperand::REGISTER,
1301 : MachineRepresentation::kTagged,
1302 : conf->GetAllocatableGeneralCode(0));
1303 : auto r1 = AllocatedOperand(LocationOperand::REGISTER,
1304 : MachineRepresentation::kTagged,
1305 : conf->GetAllocatableGeneralCode(1));
1306 : auto r2 = AllocatedOperand(LocationOperand::REGISTER,
1307 : MachineRepresentation::kTagged,
1308 : conf->GetAllocatableGeneralCode(2));
1309 : auto r3 = AllocatedOperand(LocationOperand::REGISTER,
1310 : MachineRepresentation::kTagged,
1311 : conf->GetAllocatableGeneralCode(3));
1312 :
1313 : auto slot_minus_4 = AllocatedOperand(LocationOperand::STACK_SLOT,
1314 : MachineRepresentation::kTagged, -4);
1315 : auto slot_minus_3 = AllocatedOperand(LocationOperand::STACK_SLOT,
1316 : MachineRepresentation::kTagged, -3);
1317 : auto slot_minus_2 = AllocatedOperand(LocationOperand::STACK_SLOT,
1318 : MachineRepresentation::kTagged, -2);
1319 : auto slot_minus_1 = AllocatedOperand(LocationOperand::STACK_SLOT,
1320 : MachineRepresentation::kTagged, -1);
1321 :
1322 : // Avoid slot 0 for architectures which use it store the return address.
1323 : int first_slot = V8_TARGET_ARCH_STORES_RETURN_ADDRESS_ON_STACK ? 1 : 0;
1324 : auto slot_0 = AllocatedOperand(LocationOperand::STACK_SLOT,
1325 : MachineRepresentation::kTagged, first_slot);
1326 : auto slot_1 =
1327 : AllocatedOperand(LocationOperand::STACK_SLOT,
1328 : MachineRepresentation::kTagged, first_slot + 1);
1329 : auto slot_2 =
1330 : AllocatedOperand(LocationOperand::STACK_SLOT,
1331 : MachineRepresentation::kTagged, first_slot + 2);
1332 : auto slot_3 =
1333 : AllocatedOperand(LocationOperand::STACK_SLOT,
1334 : MachineRepresentation::kTagged, first_slot + 3);
1335 :
1336 : // These tests all generate series of moves that the code generator should
1337 : // detect as adjacent pushes. Depending on the architecture, we make sure
1338 : // these moves get eliminated.
1339 : // Also, disassembling with `--print-code` is useful when debugging.
1340 :
1341 : {
1342 : // Generate a series of register pushes only.
1343 4 : CodeGeneratorTester c(&env);
1344 4 : Instruction* instr = c.CreateTailCall(first_slot + 4);
1345 : instr
1346 : ->GetOrCreateParallelMove(Instruction::FIRST_GAP_POSITION,
1347 : env.main_zone())
1348 4 : ->AddMove(r3, slot_0);
1349 : instr
1350 : ->GetOrCreateParallelMove(Instruction::FIRST_GAP_POSITION,
1351 : env.main_zone())
1352 4 : ->AddMove(r2, slot_1);
1353 : instr
1354 : ->GetOrCreateParallelMove(Instruction::FIRST_GAP_POSITION,
1355 : env.main_zone())
1356 4 : ->AddMove(r1, slot_2);
1357 : instr
1358 : ->GetOrCreateParallelMove(Instruction::FIRST_GAP_POSITION,
1359 : env.main_zone())
1360 4 : ->AddMove(r0, slot_3);
1361 :
1362 : c.CheckAssembleTailCallGaps(instr, first_slot + 4,
1363 4 : CodeGeneratorTester::kRegisterPush);
1364 4 : Handle<Code> code = c.Finalize();
1365 : if (FLAG_print_code) {
1366 : code->Print();
1367 4 : }
1368 : }
1369 :
1370 : {
1371 : // Generate a series of stack pushes only.
1372 4 : CodeGeneratorTester c(&env);
1373 4 : Instruction* instr = c.CreateTailCall(first_slot + 4);
1374 : instr
1375 : ->GetOrCreateParallelMove(Instruction::FIRST_GAP_POSITION,
1376 : env.main_zone())
1377 4 : ->AddMove(slot_minus_4, slot_0);
1378 : instr
1379 : ->GetOrCreateParallelMove(Instruction::FIRST_GAP_POSITION,
1380 : env.main_zone())
1381 4 : ->AddMove(slot_minus_3, slot_1);
1382 : instr
1383 : ->GetOrCreateParallelMove(Instruction::FIRST_GAP_POSITION,
1384 : env.main_zone())
1385 4 : ->AddMove(slot_minus_2, slot_2);
1386 : instr
1387 : ->GetOrCreateParallelMove(Instruction::FIRST_GAP_POSITION,
1388 : env.main_zone())
1389 4 : ->AddMove(slot_minus_1, slot_3);
1390 :
1391 : c.CheckAssembleTailCallGaps(instr, first_slot + 4,
1392 4 : CodeGeneratorTester::kStackSlotPush);
1393 4 : Handle<Code> code = c.Finalize();
1394 : if (FLAG_print_code) {
1395 : code->Print();
1396 4 : }
1397 : }
1398 :
1399 : {
1400 : // Generate a mix of stack and register pushes.
1401 4 : CodeGeneratorTester c(&env);
1402 4 : Instruction* instr = c.CreateTailCall(first_slot + 4);
1403 : instr
1404 : ->GetOrCreateParallelMove(Instruction::FIRST_GAP_POSITION,
1405 : env.main_zone())
1406 4 : ->AddMove(slot_minus_2, slot_0);
1407 : instr
1408 : ->GetOrCreateParallelMove(Instruction::FIRST_GAP_POSITION,
1409 : env.main_zone())
1410 4 : ->AddMove(r1, slot_1);
1411 : instr
1412 : ->GetOrCreateParallelMove(Instruction::FIRST_GAP_POSITION,
1413 : env.main_zone())
1414 4 : ->AddMove(slot_minus_1, slot_2);
1415 : instr
1416 : ->GetOrCreateParallelMove(Instruction::FIRST_GAP_POSITION,
1417 : env.main_zone())
1418 4 : ->AddMove(r0, slot_3);
1419 :
1420 : c.CheckAssembleTailCallGaps(instr, first_slot + 4,
1421 4 : CodeGeneratorTester::kScalarPush);
1422 4 : Handle<Code> code = c.Finalize();
1423 : if (FLAG_print_code) {
1424 : code->Print();
1425 4 : }
1426 4 : }
1427 4 : }
1428 :
1429 : } // namespace compiler
1430 : } // namespace internal
1431 77625 : } // namespace v8
|