Line data Source code
1 : // Copyright 2016 the V8 project authors. All rights reserved.
2 : // Use of this source code is governed by a BSD-style license that can be
3 : // found in the LICENSE file.
4 :
5 : #include "src/compiler/backend/instruction.h"
6 : #include "src/register-configuration.h"
7 : #include "test/unittests/test-utils.h"
8 : #include "testing/gtest-support.h"
9 :
10 : namespace v8 {
11 : namespace internal {
12 : namespace compiler {
13 : namespace instruction_unittest {
14 :
15 : namespace {
16 :
17 : const MachineRepresentation kWord = MachineRepresentation::kWord32;
18 : const MachineRepresentation kFloat = MachineRepresentation::kFloat32;
19 : const MachineRepresentation kDouble = MachineRepresentation::kFloat64;
20 :
21 2240 : bool Interfere(LocationOperand::LocationKind kind, MachineRepresentation rep1,
22 : int index1, MachineRepresentation rep2, int index2) {
23 : return AllocatedOperand(kind, rep1, index1)
24 2240 : .InterferesWith(AllocatedOperand(kind, rep2, index2));
25 : }
26 :
27 : bool Contains(const ZoneVector<MoveOperands*>* moves,
28 : const InstructionOperand& to, const InstructionOperand& from) {
29 3 : for (auto move : *moves) {
30 5 : if (move->destination().Equals(to) && move->source().Equals(from)) {
31 : return true;
32 : }
33 : }
34 : return false;
35 : }
36 :
37 : } // namespace
38 :
39 : class InstructionTest : public TestWithZone {
40 : public:
41 2 : InstructionTest() = default;
42 2 : ~InstructionTest() override = default;
43 :
44 1 : ParallelMove* CreateParallelMove(
45 : const std::vector<InstructionOperand>& operand_pairs) {
46 : ParallelMove* parallel_move = new (zone()) ParallelMove(zone());
47 9 : for (size_t i = 0; i < operand_pairs.size(); i += 2)
48 4 : parallel_move->AddMove(operand_pairs[i + 1], operand_pairs[i]);
49 1 : return parallel_move;
50 : }
51 : };
52 :
53 15444 : TEST_F(InstructionTest, OperandInterference) {
54 : // All general registers and slots interfere only with themselves.
55 33 : for (int i = 0; i < RegisterConfiguration::kMaxGeneralRegisters; ++i) {
56 64 : EXPECT_TRUE(Interfere(LocationOperand::REGISTER, kWord, i, kWord, i));
57 64 : EXPECT_TRUE(Interfere(LocationOperand::STACK_SLOT, kWord, i, kWord, i));
58 528 : for (int j = i + 1; j < RegisterConfiguration::kMaxGeneralRegisters; ++j) {
59 992 : EXPECT_FALSE(Interfere(LocationOperand::REGISTER, kWord, i, kWord, j));
60 992 : EXPECT_FALSE(Interfere(LocationOperand::STACK_SLOT, kWord, i, kWord, j));
61 : }
62 : }
63 :
64 : // All FP registers interfere with themselves.
65 65 : for (int i = 0; i < RegisterConfiguration::kMaxFPRegisters; ++i) {
66 64 : EXPECT_TRUE(Interfere(LocationOperand::REGISTER, kFloat, i, kFloat, i));
67 64 : EXPECT_TRUE(Interfere(LocationOperand::STACK_SLOT, kFloat, i, kFloat, i));
68 64 : EXPECT_TRUE(Interfere(LocationOperand::REGISTER, kDouble, i, kDouble, i));
69 64 : EXPECT_TRUE(Interfere(LocationOperand::STACK_SLOT, kDouble, i, kDouble, i));
70 : }
71 :
72 : if (kSimpleFPAliasing) {
73 : // Simple FP aliasing: interfering registers of different reps have the same
74 : // index.
75 33 : for (int i = 0; i < RegisterConfiguration::kMaxFPRegisters; ++i) {
76 64 : EXPECT_TRUE(Interfere(LocationOperand::REGISTER, kFloat, i, kDouble, i));
77 64 : EXPECT_TRUE(Interfere(LocationOperand::REGISTER, kDouble, i, kFloat, i));
78 528 : for (int j = i + 1; j < RegisterConfiguration::kMaxFPRegisters; ++j) {
79 992 : EXPECT_FALSE(Interfere(LocationOperand::REGISTER, kWord, i, kWord, j));
80 992 : EXPECT_FALSE(
81 0 : Interfere(LocationOperand::STACK_SLOT, kWord, i, kWord, j));
82 : }
83 : }
84 : } else {
85 : // Complex FP aliasing: sub-registers intefere with containing registers.
86 : // Test sub-register indices which may not exist on the platform. This is
87 : // necessary since the GapResolver may split large moves into smaller ones.
88 : for (int i = 0; i < RegisterConfiguration::kMaxFPRegisters; ++i) {
89 : EXPECT_TRUE(
90 : Interfere(LocationOperand::REGISTER, kFloat, i * 2, kDouble, i));
91 : EXPECT_TRUE(
92 : Interfere(LocationOperand::REGISTER, kFloat, i * 2 + 1, kDouble, i));
93 : EXPECT_TRUE(
94 : Interfere(LocationOperand::REGISTER, kDouble, i, kFloat, i * 2));
95 : EXPECT_TRUE(
96 : Interfere(LocationOperand::REGISTER, kDouble, i, kFloat, i * 2 + 1));
97 :
98 : for (int j = i + 1; j < RegisterConfiguration::kMaxFPRegisters; ++j) {
99 : EXPECT_FALSE(
100 : Interfere(LocationOperand::REGISTER, kFloat, i * 2, kDouble, j));
101 : EXPECT_FALSE(Interfere(LocationOperand::REGISTER, kFloat, i * 2 + 1,
102 : kDouble, j));
103 : EXPECT_FALSE(
104 : Interfere(LocationOperand::REGISTER, kDouble, i, kFloat, j * 2));
105 : EXPECT_FALSE(Interfere(LocationOperand::REGISTER, kDouble, i, kFloat,
106 : j * 2 + 1));
107 : }
108 : }
109 : }
110 1 : }
111 :
112 15444 : TEST_F(InstructionTest, PrepareInsertAfter) {
113 : InstructionOperand r0 = AllocatedOperand(LocationOperand::REGISTER,
114 : MachineRepresentation::kWord32, 0);
115 : InstructionOperand r1 = AllocatedOperand(LocationOperand::REGISTER,
116 : MachineRepresentation::kWord32, 1);
117 : InstructionOperand r2 = AllocatedOperand(LocationOperand::REGISTER,
118 : MachineRepresentation::kWord32, 2);
119 :
120 : InstructionOperand d0 = AllocatedOperand(LocationOperand::REGISTER,
121 : MachineRepresentation::kFloat64, 0);
122 : InstructionOperand d1 = AllocatedOperand(LocationOperand::REGISTER,
123 : MachineRepresentation::kFloat64, 1);
124 : InstructionOperand d2 = AllocatedOperand(LocationOperand::REGISTER,
125 : MachineRepresentation::kFloat64, 2);
126 :
127 : {
128 : // Moves inserted after should pick up assignments to their sources.
129 : // Moves inserted after should cause interfering moves to be eliminated.
130 : ZoneVector<MoveOperands*> to_eliminate(zone());
131 : std::vector<InstructionOperand> moves = {
132 : r1, r0, // r1 <- r0
133 : r2, r0, // r2 <- r0
134 : d1, d0, // d1 <- d0
135 : d2, d0 // d2 <- d0
136 2 : };
137 :
138 1 : ParallelMove* pm = CreateParallelMove(moves);
139 : MoveOperands m1(r1, r2); // r2 <- r1
140 1 : pm->PrepareInsertAfter(&m1, &to_eliminate);
141 1 : CHECK(m1.source().Equals(r0));
142 1 : CHECK(Contains(&to_eliminate, r2, r0));
143 : MoveOperands m2(d1, d2); // d2 <- d1
144 1 : pm->PrepareInsertAfter(&m2, &to_eliminate);
145 1 : CHECK(m2.source().Equals(d0));
146 1 : CHECK(Contains(&to_eliminate, d2, d0));
147 : }
148 :
149 : if (!kSimpleFPAliasing) {
150 : // Moves inserted after should cause all interfering moves to be eliminated.
151 : auto s0 = AllocatedOperand(LocationOperand::REGISTER,
152 : MachineRepresentation::kFloat32, 0);
153 : auto s1 = AllocatedOperand(LocationOperand::REGISTER,
154 : MachineRepresentation::kFloat32, 1);
155 : auto s2 = AllocatedOperand(LocationOperand::REGISTER,
156 : MachineRepresentation::kFloat32, 2);
157 :
158 : {
159 : ZoneVector<MoveOperands*> to_eliminate(zone());
160 : std::vector<InstructionOperand> moves = {
161 : s0, s2, // s0 <- s2
162 : s1, s2 // s1 <- s2
163 : };
164 :
165 : ParallelMove* pm = CreateParallelMove(moves);
166 : MoveOperands m1(d1, d0); // d0 <- d1
167 : pm->PrepareInsertAfter(&m1, &to_eliminate);
168 : CHECK(Contains(&to_eliminate, s0, s2));
169 : CHECK(Contains(&to_eliminate, s1, s2));
170 : }
171 : }
172 1 : }
173 :
174 : } // namespace instruction_unittest
175 : } // namespace compiler
176 : } // namespace internal
177 9264 : } // namespace v8
|