Line data Source code
1 : // Copyright 2016 the V8 project authors. All rights reserved.
2 : // Use of this source code is governed by a BSD-style license that can be
3 : // found in the LICENSE file.
4 :
5 : #include <type_traits>
6 :
7 : #include "src/assembler-inl.h"
8 : #include "src/base/bits.h"
9 : #include "src/base/overflowing-math.h"
10 : #include "test/cctest/cctest.h"
11 : #include "test/cctest/compiler/value-helper.h"
12 : #include "test/cctest/wasm/wasm-run-utils.h"
13 : #include "test/common/wasm/wasm-macro-gen.h"
14 :
15 : namespace v8 {
16 : namespace internal {
17 : namespace wasm {
18 : namespace test_run_wasm_simd {
19 :
20 : namespace {
21 :
22 : using FloatUnOp = float (*)(float);
23 : using FloatBinOp = float (*)(float, float);
24 : using FloatCompareOp = int (*)(float, float);
25 : using Int32UnOp = int32_t (*)(int32_t);
26 : using Int32BinOp = int32_t (*)(int32_t, int32_t);
27 : using Int32CompareOp = int (*)(int32_t, int32_t);
28 : using Int32ShiftOp = int32_t (*)(int32_t, int);
29 : using Int16UnOp = int16_t (*)(int16_t);
30 : using Int16BinOp = int16_t (*)(int16_t, int16_t);
31 : using Int16CompareOp = int (*)(int16_t, int16_t);
32 : using Int16ShiftOp = int16_t (*)(int16_t, int);
33 : using Int8UnOp = int8_t (*)(int8_t);
34 : using Int8BinOp = int8_t (*)(int8_t, int8_t);
35 : using Int8CompareOp = int (*)(int8_t, int8_t);
36 : using Int8ShiftOp = int8_t (*)(int8_t, int);
37 :
38 : #define WASM_SIMD_TEST(name) \
39 : void RunWasm_##name##_Impl(LowerSimd lower_simd, \
40 : ExecutionTier execution_tier); \
41 : TEST(RunWasm_##name##_turbofan) { \
42 : EXPERIMENTAL_FLAG_SCOPE(simd); \
43 : RunWasm_##name##_Impl(kNoLowerSimd, ExecutionTier::kTurbofan); \
44 : } \
45 : TEST(RunWasm_##name##_interpreter) { \
46 : EXPERIMENTAL_FLAG_SCOPE(simd); \
47 : RunWasm_##name##_Impl(kNoLowerSimd, ExecutionTier::kInterpreter); \
48 : } \
49 : TEST(RunWasm_##name##_simd_lowered) { \
50 : EXPERIMENTAL_FLAG_SCOPE(simd); \
51 : RunWasm_##name##_Impl(kLowerSimd, ExecutionTier::kTurbofan); \
52 : } \
53 : void RunWasm_##name##_Impl(LowerSimd lower_simd, ExecutionTier execution_tier)
54 :
55 : // Generic expected value functions.
56 : template <typename T, typename = typename std::enable_if<
57 : std::is_floating_point<T>::value>::type>
58 1512 : T Negate(T a) {
59 1512 : return -a;
60 : }
61 :
62 : // For signed integral types, use base::AddWithWraparound.
63 : template <typename T, typename = typename std::enable_if<
64 : std::is_floating_point<T>::value>::type>
65 160152 : T Add(T a, T b) {
66 160152 : return a + b;
67 : }
68 :
69 : // For signed integral types, use base::SubWithWraparound.
70 : template <typename T, typename = typename std::enable_if<
71 : std::is_floating_point<T>::value>::type>
72 160152 : T Sub(T a, T b) {
73 160152 : return a - b;
74 : }
75 :
76 : // For signed integral types, use base::MulWithWraparound.
77 : template <typename T, typename = typename std::enable_if<
78 : std::is_floating_point<T>::value>::type>
79 160152 : T Mul(T a, T b) {
80 160152 : return a * b;
81 : }
82 :
83 : template <typename T>
84 42312 : T Minimum(T a, T b) {
85 42312 : return a <= b ? a : b;
86 : }
87 :
88 : template <typename T>
89 42312 : T Maximum(T a, T b) {
90 42312 : return a >= b ? a : b;
91 : }
92 :
93 : template <typename T>
94 42312 : T UnsignedMinimum(T a, T b) {
95 : using UnsignedT = typename std::make_unsigned<T>::type;
96 42312 : return static_cast<UnsignedT>(a) <= static_cast<UnsignedT>(b) ? a : b;
97 : }
98 :
99 : template <typename T>
100 42312 : T UnsignedMaximum(T a, T b) {
101 : using UnsignedT = typename std::make_unsigned<T>::type;
102 42312 : return static_cast<UnsignedT>(a) >= static_cast<UnsignedT>(b) ? a : b;
103 : }
104 :
105 158700 : int Equal(float a, float b) { return a == b ? -1 : 0; }
106 :
107 : template <typename T>
108 42312 : T Equal(T a, T b) {
109 42312 : return a == b ? -1 : 0;
110 : }
111 :
112 158700 : int NotEqual(float a, float b) { return a != b ? -1 : 0; }
113 :
114 : template <typename T>
115 42312 : T NotEqual(T a, T b) {
116 42312 : return a != b ? -1 : 0;
117 : }
118 :
119 158700 : int Less(float a, float b) { return a < b ? -1 : 0; }
120 :
121 : template <typename T>
122 42312 : T Less(T a, T b) {
123 42312 : return a < b ? -1 : 0;
124 : }
125 :
126 158700 : int LessEqual(float a, float b) { return a <= b ? -1 : 0; }
127 :
128 : template <typename T>
129 42312 : T LessEqual(T a, T b) {
130 42312 : return a <= b ? -1 : 0;
131 : }
132 :
133 158700 : int Greater(float a, float b) { return a > b ? -1 : 0; }
134 :
135 : template <typename T>
136 42312 : T Greater(T a, T b) {
137 42312 : return a > b ? -1 : 0;
138 : }
139 :
140 158700 : int GreaterEqual(float a, float b) { return a >= b ? -1 : 0; }
141 :
142 : template <typename T>
143 42312 : T GreaterEqual(T a, T b) {
144 42312 : return a >= b ? -1 : 0;
145 : }
146 :
147 : template <typename T>
148 42312 : T UnsignedLess(T a, T b) {
149 : using UnsignedT = typename std::make_unsigned<T>::type;
150 42312 : return static_cast<UnsignedT>(a) < static_cast<UnsignedT>(b) ? -1 : 0;
151 : }
152 :
153 : template <typename T>
154 42312 : T UnsignedLessEqual(T a, T b) {
155 : using UnsignedT = typename std::make_unsigned<T>::type;
156 42312 : return static_cast<UnsignedT>(a) <= static_cast<UnsignedT>(b) ? -1 : 0;
157 : }
158 :
159 : template <typename T>
160 42312 : T UnsignedGreater(T a, T b) {
161 : using UnsignedT = typename std::make_unsigned<T>::type;
162 42312 : return static_cast<UnsignedT>(a) > static_cast<UnsignedT>(b) ? -1 : 0;
163 : }
164 :
165 : template <typename T>
166 42312 : T UnsignedGreaterEqual(T a, T b) {
167 : using UnsignedT = typename std::make_unsigned<T>::type;
168 42312 : return static_cast<UnsignedT>(a) >= static_cast<UnsignedT>(b) ? -1 : 0;
169 : }
170 :
171 : template <typename T>
172 23952 : T LogicalShiftLeft(T a, int shift) {
173 : using UnsignedT = typename std::make_unsigned<T>::type;
174 23952 : return static_cast<UnsignedT>(a) << shift;
175 : }
176 :
177 : template <typename T>
178 23952 : T LogicalShiftRight(T a, int shift) {
179 : using UnsignedT = typename std::make_unsigned<T>::type;
180 23952 : return static_cast<UnsignedT>(a) >> shift;
181 : }
182 :
183 : template <typename T>
184 : T Clamp(int64_t value) {
185 : static_assert(sizeof(int64_t) > sizeof(T), "T must be int32_t or smaller");
186 9384 : int64_t min = static_cast<int64_t>(std::numeric_limits<T>::min());
187 9384 : int64_t max = static_cast<int64_t>(std::numeric_limits<T>::max());
188 9384 : int64_t clamped = std::max(min, std::min(max, value));
189 4692 : return static_cast<T>(clamped);
190 : }
191 :
192 : template <typename T>
193 : int64_t Widen(T value) {
194 : static_assert(sizeof(int64_t) > sizeof(T), "T must be int32_t or smaller");
195 3888 : return static_cast<int64_t>(value);
196 : }
197 :
198 : template <typename T>
199 : int64_t UnsignedWiden(T value) {
200 : static_assert(sizeof(int64_t) > sizeof(T), "T must be int32_t or smaller");
201 : using UnsignedT = typename std::make_unsigned<T>::type;
202 4104 : return static_cast<int64_t>(static_cast<UnsignedT>(value));
203 : }
204 :
205 : template <typename T>
206 : T Narrow(int64_t value) {
207 : return Clamp<T>(value);
208 : }
209 :
210 : template <typename T>
211 : T UnsignedNarrow(int64_t value) {
212 : static_assert(sizeof(int64_t) > sizeof(T), "T must be int32_t or smaller");
213 : using UnsignedT = typename std::make_unsigned<T>::type;
214 1608 : return static_cast<T>(Clamp<UnsignedT>(value & 0xFFFFFFFFu));
215 : }
216 :
217 : template <typename T>
218 1944 : T AddSaturate(T a, T b) {
219 3888 : return Clamp<T>(Widen(a) + Widen(b));
220 : }
221 :
222 : template <typename T>
223 1944 : T SubSaturate(T a, T b) {
224 3888 : return Clamp<T>(Widen(a) - Widen(b));
225 : }
226 :
227 : template <typename T>
228 1944 : T UnsignedAddSaturate(T a, T b) {
229 : using UnsignedT = typename std::make_unsigned<T>::type;
230 3888 : return Clamp<UnsignedT>(UnsignedWiden(a) + UnsignedWiden(b));
231 : }
232 :
233 : template <typename T>
234 1944 : T UnsignedSubSaturate(T a, T b) {
235 : using UnsignedT = typename std::make_unsigned<T>::type;
236 3888 : return Clamp<UnsignedT>(UnsignedWiden(a) - UnsignedWiden(b));
237 : }
238 :
239 : template <typename T>
240 40368 : T And(T a, T b) {
241 40368 : return a & b;
242 : }
243 :
244 : template <typename T>
245 40368 : T Or(T a, T b) {
246 40368 : return a | b;
247 : }
248 :
249 : template <typename T>
250 40368 : T Xor(T a, T b) {
251 40368 : return a ^ b;
252 : }
253 :
254 : template <typename T>
255 696 : T Not(T a) {
256 696 : return ~a;
257 : }
258 :
259 : template <typename T>
260 : T LogicalNot(T a) {
261 : return a == 0 ? -1 : 0;
262 : }
263 :
264 : template <typename T>
265 : T Sqrt(T a) {
266 : return std::sqrt(a);
267 : }
268 :
269 : } // namespace
270 :
271 : #define WASM_SIMD_CHECK_LANE(TYPE, value, LANE_TYPE, lane_value, lane_index) \
272 : WASM_IF(WASM_##LANE_TYPE##_NE(WASM_GET_LOCAL(lane_value), \
273 : WASM_SIMD_##TYPE##_EXTRACT_LANE( \
274 : lane_index, WASM_GET_LOCAL(value))), \
275 : WASM_RETURN1(WASM_ZERO))
276 :
277 : #define TO_BYTE(val) static_cast<byte>(val)
278 : #define WASM_SIMD_OP(op) kSimdPrefix, TO_BYTE(op)
279 : #define WASM_SIMD_SPLAT(Type, x) x, WASM_SIMD_OP(kExpr##Type##Splat)
280 : #define WASM_SIMD_UNOP(op, x) x, WASM_SIMD_OP(op)
281 : #define WASM_SIMD_BINOP(op, x, y) x, y, WASM_SIMD_OP(op)
282 : #define WASM_SIMD_SHIFT_OP(op, shift, x) x, WASM_SIMD_OP(op), TO_BYTE(shift)
283 : #define WASM_SIMD_CONCAT_OP(op, bytes, x, y) \
284 : x, y, WASM_SIMD_OP(op), TO_BYTE(bytes)
285 : #define WASM_SIMD_SELECT(format, x, y, z) x, y, z, WASM_SIMD_OP(kExprS128Select)
286 : #define WASM_SIMD_F32x4_SPLAT(x) x, WASM_SIMD_OP(kExprF32x4Splat)
287 : #define WASM_SIMD_F32x4_EXTRACT_LANE(lane, x) \
288 : x, WASM_SIMD_OP(kExprF32x4ExtractLane), TO_BYTE(lane)
289 : #define WASM_SIMD_F32x4_REPLACE_LANE(lane, x, y) \
290 : x, y, WASM_SIMD_OP(kExprF32x4ReplaceLane), TO_BYTE(lane)
291 :
292 : #define WASM_SIMD_I32x4_SPLAT(x) x, WASM_SIMD_OP(kExprI32x4Splat)
293 : #define WASM_SIMD_I32x4_EXTRACT_LANE(lane, x) \
294 : x, WASM_SIMD_OP(kExprI32x4ExtractLane), TO_BYTE(lane)
295 : #define WASM_SIMD_I32x4_REPLACE_LANE(lane, x, y) \
296 : x, y, WASM_SIMD_OP(kExprI32x4ReplaceLane), TO_BYTE(lane)
297 :
298 : #define WASM_SIMD_I16x8_SPLAT(x) x, WASM_SIMD_OP(kExprI16x8Splat)
299 : #define WASM_SIMD_I16x8_EXTRACT_LANE(lane, x) \
300 : x, WASM_SIMD_OP(kExprI16x8ExtractLane), TO_BYTE(lane)
301 : #define WASM_SIMD_I16x8_REPLACE_LANE(lane, x, y) \
302 : x, y, WASM_SIMD_OP(kExprI16x8ReplaceLane), TO_BYTE(lane)
303 :
304 : #define WASM_SIMD_I8x16_SPLAT(x) x, WASM_SIMD_OP(kExprI8x16Splat)
305 : #define WASM_SIMD_I8x16_EXTRACT_LANE(lane, x) \
306 : x, WASM_SIMD_OP(kExprI8x16ExtractLane), TO_BYTE(lane)
307 : #define WASM_SIMD_I8x16_REPLACE_LANE(lane, x, y) \
308 : x, y, WASM_SIMD_OP(kExprI8x16ReplaceLane), TO_BYTE(lane)
309 :
310 : #define WASM_SIMD_S8x16_SHUFFLE_OP(opcode, m, x, y) \
311 : x, y, WASM_SIMD_OP(opcode), TO_BYTE(m[0]), TO_BYTE(m[1]), TO_BYTE(m[2]), \
312 : TO_BYTE(m[3]), TO_BYTE(m[4]), TO_BYTE(m[5]), TO_BYTE(m[6]), \
313 : TO_BYTE(m[7]), TO_BYTE(m[8]), TO_BYTE(m[9]), TO_BYTE(m[10]), \
314 : TO_BYTE(m[11]), TO_BYTE(m[12]), TO_BYTE(m[13]), TO_BYTE(m[14]), \
315 : TO_BYTE(m[15])
316 :
317 : #define WASM_SIMD_LOAD_MEM(index) \
318 : index, WASM_SIMD_OP(kExprS128LoadMem), ZERO_ALIGNMENT, ZERO_OFFSET
319 : #define WASM_SIMD_STORE_MEM(index, val) \
320 : index, val, WASM_SIMD_OP(kExprS128StoreMem), ZERO_ALIGNMENT, ZERO_OFFSET
321 :
322 : // Runs tests of compiled code, using the interpreter as a reference.
323 : #define WASM_SIMD_COMPILED_TEST(name) \
324 : void RunWasm_##name##_Impl(LowerSimd lower_simd, \
325 : ExecutionTier execution_tier); \
326 : TEST(RunWasm_##name##_turbofan) { \
327 : EXPERIMENTAL_FLAG_SCOPE(simd); \
328 : RunWasm_##name##_Impl(kNoLowerSimd, ExecutionTier::kTurbofan); \
329 : } \
330 : TEST(RunWasm_##name##_simd_lowered) { \
331 : EXPERIMENTAL_FLAG_SCOPE(simd); \
332 : RunWasm_##name##_Impl(kLowerSimd, ExecutionTier::kTurbofan); \
333 : } \
334 : void RunWasm_##name##_Impl(LowerSimd lower_simd, ExecutionTier execution_tier)
335 :
336 : // The macro below disables tests lowering for certain nodes where the simd
337 : // lowering doesn't work correctly. Early return here if the CPU does not
338 : // support SIMD as the graph will be implicitly lowered in that case.
339 : #define WASM_SIMD_TEST_NO_LOWERING(name) \
340 : void RunWasm_##name##_Impl(LowerSimd lower_simd, \
341 : ExecutionTier execution_tier); \
342 : TEST(RunWasm_##name##_turbofan) { \
343 : if (!CpuFeatures::SupportsWasmSimd128()) return; \
344 : EXPERIMENTAL_FLAG_SCOPE(simd); \
345 : RunWasm_##name##_Impl(kNoLowerSimd, ExecutionTier::kTurbofan); \
346 : } \
347 : TEST(RunWasm_##name##_interpreter) { \
348 : EXPERIMENTAL_FLAG_SCOPE(simd); \
349 : RunWasm_##name##_Impl(kNoLowerSimd, ExecutionTier::kInterpreter); \
350 : } \
351 : void RunWasm_##name##_Impl(LowerSimd lower_simd, ExecutionTier execution_tier)
352 :
353 : // Returns true if the platform can represent the result.
354 0 : bool PlatformCanRepresent(float x) {
355 : #if V8_TARGET_ARCH_ARM
356 : return std::fpclassify(x) != FP_SUBNORMAL;
357 : #else
358 0 : return true;
359 : #endif
360 : }
361 :
362 : // Returns true for very small and very large numbers. We skip these test
363 : // values for the approximation instructions, which don't work at the extremes.
364 0 : bool IsExtreme(float x) {
365 : float abs_x = std::fabs(x);
366 : const float kSmallFloatThreshold = 1.0e-32f;
367 : const float kLargeFloatThreshold = 1.0e32f;
368 3024 : return abs_x != 0.0f && // 0 or -0 are fine.
369 2856 : (abs_x < kSmallFloatThreshold || abs_x > kLargeFloatThreshold);
370 : }
371 :
372 26687 : WASM_SIMD_TEST(F32x4Splat) {
373 12 : WasmRunner<int32_t, float> r(execution_tier, lower_simd);
374 : // Set up a global to hold output vector.
375 : float* g = r.builder().AddGlobal<float>(kWasmS128);
376 : byte param1 = 0;
377 12 : BUILD(r, WASM_SET_GLOBAL(0, WASM_SIMD_F32x4_SPLAT(WASM_GET_LOCAL(param1))),
378 : WASM_ONE);
379 :
380 2772 : FOR_FLOAT32_INPUTS(x) {
381 1380 : r.Call(x);
382 : float expected = x;
383 12420 : for (int i = 0; i < 4; i++) {
384 5520 : float actual = ReadLittleEndianValue<float>(&g[i]);
385 5520 : if (std::isnan(expected)) {
386 96 : CHECK(std::isnan(actual));
387 : } else {
388 5424 : CHECK_EQ(actual, expected);
389 : }
390 : }
391 : }
392 12 : }
393 :
394 26687 : WASM_SIMD_TEST(F32x4ReplaceLane) {
395 12 : WasmRunner<int32_t> r(execution_tier, lower_simd);
396 : // Set up a global to hold input/output vector.
397 : float* g = r.builder().AddGlobal<float>(kWasmS128);
398 : // Build function to replace each lane with its (FP) index.
399 : byte temp1 = r.AllocateLocal(kWasmS128);
400 12 : BUILD(r, WASM_SET_LOCAL(temp1, WASM_SIMD_F32x4_SPLAT(WASM_F32(3.14159f))),
401 : WASM_SET_LOCAL(temp1, WASM_SIMD_F32x4_REPLACE_LANE(
402 : 0, WASM_GET_LOCAL(temp1), WASM_F32(0.0f))),
403 : WASM_SET_LOCAL(temp1, WASM_SIMD_F32x4_REPLACE_LANE(
404 : 1, WASM_GET_LOCAL(temp1), WASM_F32(1.0f))),
405 : WASM_SET_LOCAL(temp1, WASM_SIMD_F32x4_REPLACE_LANE(
406 : 2, WASM_GET_LOCAL(temp1), WASM_F32(2.0f))),
407 : WASM_SET_GLOBAL(0, WASM_SIMD_F32x4_REPLACE_LANE(
408 : 3, WASM_GET_LOCAL(temp1), WASM_F32(3.0f))),
409 : WASM_ONE);
410 :
411 12 : r.Call();
412 108 : for (int i = 0; i < 4; i++) {
413 48 : CHECK_EQ(static_cast<float>(i), ReadLittleEndianValue<float>(&g[i]));
414 : }
415 12 : }
416 :
417 : // Tests both signed and unsigned conversion.
418 : // v8:8425 tracks this test being enabled in the interpreter.
419 26671 : WASM_SIMD_COMPILED_TEST(F32x4ConvertI32x4) {
420 8 : WasmRunner<int32_t, int32_t> r(execution_tier, lower_simd);
421 : // Create two output vectors to hold signed and unsigned results.
422 : float* g0 = r.builder().AddGlobal<float>(kWasmS128);
423 : float* g1 = r.builder().AddGlobal<float>(kWasmS128);
424 : // Build fn to splat test value, perform conversions, and write the results.
425 : byte value = 0;
426 : byte temp1 = r.AllocateLocal(kWasmS128);
427 8 : BUILD(r, WASM_SET_LOCAL(temp1, WASM_SIMD_I32x4_SPLAT(WASM_GET_LOCAL(value))),
428 : WASM_SET_GLOBAL(
429 : 0, WASM_SIMD_UNOP(kExprF32x4SConvertI32x4, WASM_GET_LOCAL(temp1))),
430 : WASM_SET_GLOBAL(
431 : 1, WASM_SIMD_UNOP(kExprF32x4UConvertI32x4, WASM_GET_LOCAL(temp1))),
432 : WASM_ONE);
433 :
434 936 : FOR_INT32_INPUTS(x) {
435 464 : r.Call(x);
436 464 : float expected_signed = static_cast<float>(x);
437 464 : float expected_unsigned = static_cast<float>(static_cast<uint32_t>(x));
438 4176 : for (int i = 0; i < 4; i++) {
439 1856 : CHECK_EQ(expected_signed, ReadLittleEndianValue<float>(&g0[i]));
440 1856 : CHECK_EQ(expected_unsigned, ReadLittleEndianValue<float>(&g1[i]));
441 : }
442 : }
443 8 : }
444 :
445 0 : bool IsSameNan(float expected, float actual) {
446 : // Sign is non-deterministic.
447 120640 : uint32_t expected_bits = bit_cast<uint32_t>(expected) & ~0x80000000;
448 120640 : uint32_t actual_bits = bit_cast<uint32_t>(actual) & ~0x80000000;
449 : // Some implementations convert signaling NaNs to quiet NaNs.
450 132256 : return (expected_bits == actual_bits) ||
451 11616 : ((expected_bits | 0x00400000) == actual_bits);
452 : }
453 :
454 0 : bool IsCanonical(float actual) {
455 : uint32_t actual_bits = bit_cast<uint32_t>(actual);
456 : // Canonical NaN has quiet bit and no payload.
457 2880 : return (actual_bits & 0xFFC00000) == actual_bits;
458 : }
459 :
460 2798432 : void CheckFloatResult(float x, float y, float expected, float actual,
461 : bool exact = true) {
462 2798432 : if (std::isnan(expected)) {
463 116800 : CHECK(std::isnan(actual));
464 176464 : if (std::isnan(x) && IsSameNan(x, actual)) return;
465 113456 : if (std::isnan(y) && IsSameNan(y, actual)) return;
466 6768 : if (IsSameNan(expected, actual)) return;
467 2880 : if (IsCanonical(actual)) return;
468 : // This is expected to assert; it's useful for debugging.
469 0 : CHECK_EQ(bit_cast<uint32_t>(expected), bit_cast<uint32_t>(actual));
470 : } else {
471 2681632 : if (exact) {
472 2674000 : CHECK_EQ(expected, actual);
473 : // The sign of 0's must match.
474 2674000 : CHECK_EQ(std::signbit(expected), std::signbit(actual));
475 : return;
476 : }
477 : // Otherwise, perform an approximate equality test. First check for
478 : // equality to handle +/-Infinity where approximate equality doesn't work.
479 7632 : if (expected == actual) return;
480 :
481 : // 1% error allows all platforms to pass easily.
482 : constexpr float kApproximationError = 0.01f;
483 2448 : float abs_error = std::abs(expected) * kApproximationError,
484 2448 : min = expected - abs_error, max = expected + abs_error;
485 2448 : CHECK_LE(min, actual);
486 2448 : CHECK_GE(max, actual);
487 : }
488 : }
489 :
490 : // Test some values not included in the float inputs from value_helper. These
491 : // tests are useful for opcodes that are synthesized during code gen, like Min
492 : // and Max on ia32 and x64.
493 : static constexpr uint32_t nan_test_array[] = {
494 : // Bit patterns of quiet NaNs and signaling NaNs, with or without
495 : // additional payload.
496 : 0x7FC00000, 0xFFC00000, 0x7FFFFFFF, 0x7F800000, 0xFF800000, 0x7F876543,
497 : 0xFF876543,
498 : // Both Infinities.
499 : 0x7F800000, 0xFF800000,
500 : // Some "normal" numbers, 1 and -1.
501 : 0x3F800000, 0xBF800000};
502 :
503 : #define FOR_FLOAT32_NAN_INPUTS(i) \
504 : for (size_t i = 0; i < arraysize(nan_test_array); ++i)
505 :
506 48 : void RunF32x4UnOpTest(ExecutionTier execution_tier, LowerSimd lower_simd,
507 : WasmOpcode opcode, FloatUnOp expected_op,
508 : bool exact = true) {
509 48 : WasmRunner<int32_t, float> r(execution_tier, lower_simd);
510 : // Global to hold output.
511 : float* g = r.builder().AddGlobal<float>(kWasmS128);
512 : // Build fn to splat test value, perform unop, and write the result.
513 : byte value = 0;
514 : byte temp1 = r.AllocateLocal(kWasmS128);
515 48 : BUILD(r, WASM_SET_LOCAL(temp1, WASM_SIMD_F32x4_SPLAT(WASM_GET_LOCAL(value))),
516 : WASM_SET_GLOBAL(0, WASM_SIMD_UNOP(opcode, WASM_GET_LOCAL(temp1))),
517 : WASM_ONE);
518 :
519 11088 : FOR_FLOAT32_INPUTS(x) {
520 : if (!PlatformCanRepresent(x)) continue;
521 : // Extreme values have larger errors so skip them for approximation tests.
522 8280 : if (!exact && IsExtreme(x)) continue;
523 5088 : float expected = expected_op(x);
524 : if (!PlatformCanRepresent(expected)) continue;
525 5088 : r.Call(x);
526 45792 : for (int i = 0; i < 4; i++) {
527 20352 : float actual = ReadLittleEndianValue<float>(&g[i]);
528 20352 : CheckFloatResult(x, x, expected, actual, exact);
529 : }
530 : }
531 :
532 1104 : FOR_FLOAT32_NAN_INPUTS(x) {
533 : if (!PlatformCanRepresent(x)) continue;
534 : // Extreme values have larger errors so skip them for approximation tests.
535 792 : if (!exact && IsExtreme(x)) continue;
536 528 : float expected = expected_op(x);
537 : if (!PlatformCanRepresent(expected)) continue;
538 528 : r.Call(x);
539 4752 : for (int i = 0; i < 4; i++) {
540 2112 : float actual = ReadLittleEndianValue<float>(&g[i]);
541 2112 : CheckFloatResult(x, x, expected, actual, exact);
542 : }
543 : }
544 48 : }
545 :
546 26663 : WASM_SIMD_TEST(F32x4Abs) {
547 12 : RunF32x4UnOpTest(execution_tier, lower_simd, kExprF32x4Abs, std::abs);
548 0 : }
549 26663 : WASM_SIMD_TEST(F32x4Neg) {
550 12 : RunF32x4UnOpTest(execution_tier, lower_simd, kExprF32x4Neg, Negate);
551 0 : }
552 :
553 26663 : WASM_SIMD_TEST(F32x4RecipApprox) {
554 0 : RunF32x4UnOpTest(execution_tier, lower_simd, kExprF32x4RecipApprox,
555 12 : base::Recip, false /* !exact */);
556 0 : }
557 :
558 26663 : WASM_SIMD_TEST(F32x4RecipSqrtApprox) {
559 0 : RunF32x4UnOpTest(execution_tier, lower_simd, kExprF32x4RecipSqrtApprox,
560 12 : base::RecipSqrt, false /* !exact */);
561 0 : }
562 :
563 52 : void RunF32x4BinOpTest(ExecutionTier execution_tier, LowerSimd lower_simd,
564 : WasmOpcode opcode, FloatBinOp expected_op) {
565 52 : WasmRunner<int32_t, float, float> r(execution_tier, lower_simd);
566 : // Global to hold output.
567 : float* g = r.builder().AddGlobal<float>(kWasmS128);
568 : // Build fn to splat test values, perform binop, and write the result.
569 : byte value1 = 0, value2 = 1;
570 : byte temp1 = r.AllocateLocal(kWasmS128);
571 : byte temp2 = r.AllocateLocal(kWasmS128);
572 52 : BUILD(r, WASM_SET_LOCAL(temp1, WASM_SIMD_F32x4_SPLAT(WASM_GET_LOCAL(value1))),
573 : WASM_SET_LOCAL(temp2, WASM_SIMD_F32x4_SPLAT(WASM_GET_LOCAL(value2))),
574 : WASM_SET_GLOBAL(0, WASM_SIMD_BINOP(opcode, WASM_GET_LOCAL(temp1),
575 : WASM_GET_LOCAL(temp2))),
576 : WASM_ONE);
577 :
578 12012 : FOR_FLOAT32_INPUTS(x) {
579 : if (!PlatformCanRepresent(x)) continue;
580 1381380 : FOR_FLOAT32_INPUTS(y) {
581 : if (!PlatformCanRepresent(y)) continue;
582 687700 : float expected = expected_op(x, y);
583 : if (!PlatformCanRepresent(expected)) continue;
584 687700 : r.Call(x, y);
585 6189300 : for (int i = 0; i < 4; i++) {
586 2750800 : float actual = ReadLittleEndianValue<float>(&g[i]);
587 2750800 : CheckFloatResult(x, y, expected, actual, true /* exact */);
588 : }
589 : }
590 : }
591 :
592 1196 : FOR_FLOAT32_NAN_INPUTS(i) {
593 : float x = bit_cast<float>(nan_test_array[i]);
594 : if (!PlatformCanRepresent(x)) continue;
595 13156 : FOR_FLOAT32_NAN_INPUTS(j) {
596 : float y = bit_cast<float>(nan_test_array[j]);
597 : if (!PlatformCanRepresent(y)) continue;
598 6292 : float expected = expected_op(x, y);
599 : if (!PlatformCanRepresent(expected)) continue;
600 6292 : r.Call(x, y);
601 56628 : for (int i = 0; i < 4; i++) {
602 25168 : float actual = ReadLittleEndianValue<float>(&g[i]);
603 25168 : CheckFloatResult(x, y, expected, actual, true /* exact */);
604 : }
605 : }
606 : }
607 52 : }
608 :
609 : #undef FOR_FLOAT32_NAN_INPUTS
610 :
611 26663 : WASM_SIMD_TEST(F32x4Add) {
612 12 : RunF32x4BinOpTest(execution_tier, lower_simd, kExprF32x4Add, Add);
613 0 : }
614 26663 : WASM_SIMD_TEST(F32x4Sub) {
615 12 : RunF32x4BinOpTest(execution_tier, lower_simd, kExprF32x4Sub, Sub);
616 0 : }
617 26663 : WASM_SIMD_TEST(F32x4Mul) {
618 12 : RunF32x4BinOpTest(execution_tier, lower_simd, kExprF32x4Mul, Mul);
619 0 : }
620 : // v8:8425 tracks this test being enabled in the interpreter.
621 26655 : WASM_SIMD_COMPILED_TEST(F32x4Min) {
622 8 : RunF32x4BinOpTest(execution_tier, lower_simd, kExprF32x4Min, JSMin);
623 0 : }
624 : // v8:8425 tracks this test being enabled in the interpreter.
625 26655 : WASM_SIMD_COMPILED_TEST(F32x4Max) {
626 8 : RunF32x4BinOpTest(execution_tier, lower_simd, kExprF32x4Max, JSMax);
627 0 : }
628 :
629 72 : void RunF32x4CompareOpTest(ExecutionTier execution_tier, LowerSimd lower_simd,
630 : WasmOpcode opcode, FloatCompareOp expected_op) {
631 72 : WasmRunner<int32_t, float, float> r(execution_tier, lower_simd);
632 : // Set up global to hold mask output.
633 : int32_t* g = r.builder().AddGlobal<int32_t>(kWasmS128);
634 : // Build fn to splat test values, perform compare op, and write the result.
635 : byte value1 = 0, value2 = 1;
636 : byte temp1 = r.AllocateLocal(kWasmS128);
637 : byte temp2 = r.AllocateLocal(kWasmS128);
638 72 : BUILD(r, WASM_SET_LOCAL(temp1, WASM_SIMD_F32x4_SPLAT(WASM_GET_LOCAL(value1))),
639 : WASM_SET_LOCAL(temp2, WASM_SIMD_F32x4_SPLAT(WASM_GET_LOCAL(value2))),
640 : WASM_SET_GLOBAL(0, WASM_SIMD_BINOP(opcode, WASM_GET_LOCAL(temp1),
641 : WASM_GET_LOCAL(temp2))),
642 : WASM_ONE);
643 :
644 16632 : FOR_FLOAT32_INPUTS(x) {
645 : if (!PlatformCanRepresent(x)) continue;
646 1912680 : FOR_FLOAT32_INPUTS(y) {
647 : if (!PlatformCanRepresent(y)) continue;
648 : float diff = x - y; // Model comparison as subtraction.
649 : if (!PlatformCanRepresent(diff)) continue;
650 952200 : r.Call(x, y);
651 952200 : int32_t expected = expected_op(x, y);
652 8569800 : for (int i = 0; i < 4; i++) {
653 3808800 : CHECK_EQ(expected, ReadLittleEndianValue<int32_t>(&g[i]));
654 : }
655 : }
656 : }
657 72 : }
658 :
659 26663 : WASM_SIMD_TEST(F32x4Eq) {
660 12 : RunF32x4CompareOpTest(execution_tier, lower_simd, kExprF32x4Eq, Equal);
661 0 : }
662 :
663 26663 : WASM_SIMD_TEST(F32x4Ne) {
664 12 : RunF32x4CompareOpTest(execution_tier, lower_simd, kExprF32x4Ne, NotEqual);
665 0 : }
666 :
667 26663 : WASM_SIMD_TEST(F32x4Gt) {
668 12 : RunF32x4CompareOpTest(execution_tier, lower_simd, kExprF32x4Gt, Greater);
669 0 : }
670 :
671 26663 : WASM_SIMD_TEST(F32x4Ge) {
672 12 : RunF32x4CompareOpTest(execution_tier, lower_simd, kExprF32x4Ge, GreaterEqual);
673 0 : }
674 :
675 26663 : WASM_SIMD_TEST(F32x4Lt) {
676 12 : RunF32x4CompareOpTest(execution_tier, lower_simd, kExprF32x4Lt, Less);
677 0 : }
678 :
679 26663 : WASM_SIMD_TEST(F32x4Le) {
680 12 : RunF32x4CompareOpTest(execution_tier, lower_simd, kExprF32x4Le, LessEqual);
681 0 : }
682 :
683 26687 : WASM_SIMD_TEST(I32x4Splat) {
684 12 : WasmRunner<int32_t, int32_t> r(execution_tier, lower_simd);
685 : // Set up a global to hold output vector.
686 : int32_t* g = r.builder().AddGlobal<int32_t>(kWasmS128);
687 : byte param1 = 0;
688 12 : BUILD(r, WASM_SET_GLOBAL(0, WASM_SIMD_I32x4_SPLAT(WASM_GET_LOCAL(param1))),
689 : WASM_ONE);
690 :
691 1404 : FOR_INT32_INPUTS(x) {
692 696 : r.Call(x);
693 : int32_t expected = x;
694 6264 : for (int i = 0; i < 4; i++) {
695 2784 : int32_t actual = ReadLittleEndianValue<int32_t>(&g[i]);
696 2784 : CHECK_EQ(actual, expected);
697 : }
698 : }
699 12 : }
700 :
701 26687 : WASM_SIMD_TEST(I32x4ReplaceLane) {
702 12 : WasmRunner<int32_t> r(execution_tier, lower_simd);
703 : // Set up a global to hold input/output vector.
704 : int32_t* g = r.builder().AddGlobal<int32_t>(kWasmS128);
705 : // Build function to replace each lane with its index.
706 : byte temp1 = r.AllocateLocal(kWasmS128);
707 12 : BUILD(r, WASM_SET_LOCAL(temp1, WASM_SIMD_I32x4_SPLAT(WASM_I32V(-1))),
708 : WASM_SET_LOCAL(temp1, WASM_SIMD_I32x4_REPLACE_LANE(
709 : 0, WASM_GET_LOCAL(temp1), WASM_I32V(0))),
710 : WASM_SET_LOCAL(temp1, WASM_SIMD_I32x4_REPLACE_LANE(
711 : 1, WASM_GET_LOCAL(temp1), WASM_I32V(1))),
712 : WASM_SET_LOCAL(temp1, WASM_SIMD_I32x4_REPLACE_LANE(
713 : 2, WASM_GET_LOCAL(temp1), WASM_I32V(2))),
714 : WASM_SET_GLOBAL(0, WASM_SIMD_I32x4_REPLACE_LANE(
715 : 3, WASM_GET_LOCAL(temp1), WASM_I32V(3))),
716 : WASM_ONE);
717 :
718 12 : r.Call();
719 108 : for (int32_t i = 0; i < 4; i++) {
720 48 : CHECK_EQ(i, ReadLittleEndianValue<int32_t>(&g[i]));
721 : }
722 12 : }
723 :
724 26687 : WASM_SIMD_TEST(I16x8Splat) {
725 12 : WasmRunner<int32_t, int32_t> r(execution_tier, lower_simd);
726 : // Set up a global to hold output vector.
727 : int16_t* g = r.builder().AddGlobal<int16_t>(kWasmS128);
728 : byte param1 = 0;
729 12 : BUILD(r, WASM_SET_GLOBAL(0, WASM_SIMD_I16x8_SPLAT(WASM_GET_LOCAL(param1))),
730 : WASM_ONE);
731 :
732 228 : FOR_INT16_INPUTS(x) {
733 108 : r.Call(x);
734 : int16_t expected = x;
735 1836 : for (int i = 0; i < 8; i++) {
736 864 : int16_t actual = ReadLittleEndianValue<int16_t>(&g[i]);
737 864 : CHECK_EQ(actual, expected);
738 : }
739 : }
740 12 : }
741 :
742 26687 : WASM_SIMD_TEST(I16x8ReplaceLane) {
743 12 : WasmRunner<int32_t> r(execution_tier, lower_simd);
744 : // Set up a global to hold input/output vector.
745 : int16_t* g = r.builder().AddGlobal<int16_t>(kWasmS128);
746 : // Build function to replace each lane with its index.
747 : byte temp1 = r.AllocateLocal(kWasmS128);
748 12 : BUILD(r, WASM_SET_LOCAL(temp1, WASM_SIMD_I16x8_SPLAT(WASM_I32V(-1))),
749 : WASM_SET_LOCAL(temp1, WASM_SIMD_I16x8_REPLACE_LANE(
750 : 0, WASM_GET_LOCAL(temp1), WASM_I32V(0))),
751 : WASM_SET_LOCAL(temp1, WASM_SIMD_I16x8_REPLACE_LANE(
752 : 1, WASM_GET_LOCAL(temp1), WASM_I32V(1))),
753 : WASM_SET_LOCAL(temp1, WASM_SIMD_I16x8_REPLACE_LANE(
754 : 2, WASM_GET_LOCAL(temp1), WASM_I32V(2))),
755 : WASM_SET_LOCAL(temp1, WASM_SIMD_I16x8_REPLACE_LANE(
756 : 3, WASM_GET_LOCAL(temp1), WASM_I32V(3))),
757 : WASM_SET_LOCAL(temp1, WASM_SIMD_I16x8_REPLACE_LANE(
758 : 4, WASM_GET_LOCAL(temp1), WASM_I32V(4))),
759 : WASM_SET_LOCAL(temp1, WASM_SIMD_I16x8_REPLACE_LANE(
760 : 5, WASM_GET_LOCAL(temp1), WASM_I32V(5))),
761 : WASM_SET_LOCAL(temp1, WASM_SIMD_I16x8_REPLACE_LANE(
762 : 6, WASM_GET_LOCAL(temp1), WASM_I32V(6))),
763 : WASM_SET_GLOBAL(0, WASM_SIMD_I16x8_REPLACE_LANE(
764 : 7, WASM_GET_LOCAL(temp1), WASM_I32V(7))),
765 : WASM_ONE);
766 :
767 12 : r.Call();
768 204 : for (int16_t i = 0; i < 8; i++) {
769 96 : CHECK_EQ(i, ReadLittleEndianValue<int16_t>(&g[i]));
770 : }
771 12 : }
772 :
773 26687 : WASM_SIMD_TEST(I8x16Splat) {
774 12 : WasmRunner<int32_t, int32_t> r(execution_tier, lower_simd);
775 : // Set up a global to hold output vector.
776 : int8_t* g = r.builder().AddGlobal<int8_t>(kWasmS128);
777 : byte param1 = 0;
778 12 : BUILD(r, WASM_SET_GLOBAL(0, WASM_SIMD_I8x16_SPLAT(WASM_GET_LOCAL(param1))),
779 : WASM_ONE);
780 :
781 228 : FOR_INT8_INPUTS(x) {
782 108 : r.Call(x);
783 : int8_t expected = x;
784 3564 : for (int i = 0; i < 16; i++) {
785 1728 : int8_t actual = ReadLittleEndianValue<int8_t>(&g[i]);
786 1728 : CHECK_EQ(actual, expected);
787 : }
788 : }
789 12 : }
790 :
791 26687 : WASM_SIMD_TEST(I8x16ReplaceLane) {
792 12 : WasmRunner<int32_t> r(execution_tier, lower_simd);
793 : // Set up a global to hold input/output vector.
794 : int8_t* g = r.builder().AddGlobal<int8_t>(kWasmS128);
795 : // Build function to replace each lane with its index.
796 : byte temp1 = r.AllocateLocal(kWasmS128);
797 12 : BUILD(r, WASM_SET_LOCAL(temp1, WASM_SIMD_I8x16_SPLAT(WASM_I32V(-1))),
798 : WASM_SET_LOCAL(temp1, WASM_SIMD_I8x16_REPLACE_LANE(
799 : 0, WASM_GET_LOCAL(temp1), WASM_I32V(0))),
800 : WASM_SET_LOCAL(temp1, WASM_SIMD_I8x16_REPLACE_LANE(
801 : 1, WASM_GET_LOCAL(temp1), WASM_I32V(1))),
802 : WASM_SET_LOCAL(temp1, WASM_SIMD_I8x16_REPLACE_LANE(
803 : 2, WASM_GET_LOCAL(temp1), WASM_I32V(2))),
804 : WASM_SET_LOCAL(temp1, WASM_SIMD_I8x16_REPLACE_LANE(
805 : 3, WASM_GET_LOCAL(temp1), WASM_I32V(3))),
806 : WASM_SET_LOCAL(temp1, WASM_SIMD_I8x16_REPLACE_LANE(
807 : 4, WASM_GET_LOCAL(temp1), WASM_I32V(4))),
808 : WASM_SET_LOCAL(temp1, WASM_SIMD_I8x16_REPLACE_LANE(
809 : 5, WASM_GET_LOCAL(temp1), WASM_I32V(5))),
810 : WASM_SET_LOCAL(temp1, WASM_SIMD_I8x16_REPLACE_LANE(
811 : 6, WASM_GET_LOCAL(temp1), WASM_I32V(6))),
812 : WASM_SET_LOCAL(temp1, WASM_SIMD_I8x16_REPLACE_LANE(
813 : 7, WASM_GET_LOCAL(temp1), WASM_I32V(7))),
814 : WASM_SET_LOCAL(temp1, WASM_SIMD_I8x16_REPLACE_LANE(
815 : 8, WASM_GET_LOCAL(temp1), WASM_I32V(8))),
816 : WASM_SET_LOCAL(temp1, WASM_SIMD_I8x16_REPLACE_LANE(
817 : 9, WASM_GET_LOCAL(temp1), WASM_I32V(9))),
818 : WASM_SET_LOCAL(temp1, WASM_SIMD_I8x16_REPLACE_LANE(
819 : 10, WASM_GET_LOCAL(temp1), WASM_I32V(10))),
820 : WASM_SET_LOCAL(temp1, WASM_SIMD_I8x16_REPLACE_LANE(
821 : 11, WASM_GET_LOCAL(temp1), WASM_I32V(11))),
822 : WASM_SET_LOCAL(temp1, WASM_SIMD_I8x16_REPLACE_LANE(
823 : 12, WASM_GET_LOCAL(temp1), WASM_I32V(12))),
824 : WASM_SET_LOCAL(temp1, WASM_SIMD_I8x16_REPLACE_LANE(
825 : 13, WASM_GET_LOCAL(temp1), WASM_I32V(13))),
826 : WASM_SET_LOCAL(temp1, WASM_SIMD_I8x16_REPLACE_LANE(
827 : 14, WASM_GET_LOCAL(temp1), WASM_I32V(14))),
828 : WASM_SET_GLOBAL(0, WASM_SIMD_I8x16_REPLACE_LANE(
829 : 15, WASM_GET_LOCAL(temp1), WASM_I32V(15))),
830 : WASM_ONE);
831 :
832 12 : r.Call();
833 396 : for (int8_t i = 0; i < 16; i++) {
834 192 : CHECK_EQ(i, ReadLittleEndianValue<int8_t>(&g[i]));
835 : }
836 12 : }
837 :
838 : // Use doubles to ensure exact conversion.
839 0 : int32_t ConvertToInt(double val, bool unsigned_integer) {
840 2760 : if (std::isnan(val)) return 0;
841 0 : if (unsigned_integer) {
842 1356 : if (val < 0) return 0;
843 612 : if (val > kMaxUInt32) return kMaxUInt32;
844 360 : return static_cast<uint32_t>(val);
845 : } else {
846 1356 : if (val < kMinInt) return kMinInt;
847 1116 : if (val > kMaxInt) return kMaxInt;
848 852 : return static_cast<int>(val);
849 : }
850 : }
851 :
852 : // Tests both signed and unsigned conversion.
853 26687 : WASM_SIMD_TEST(I32x4ConvertF32x4) {
854 12 : WasmRunner<int32_t, float> r(execution_tier, lower_simd);
855 : // Create two output vectors to hold signed and unsigned results.
856 : int32_t* g0 = r.builder().AddGlobal<int32_t>(kWasmS128);
857 : int32_t* g1 = r.builder().AddGlobal<int32_t>(kWasmS128);
858 : // Build fn to splat test value, perform conversions, and write the results.
859 : byte value = 0;
860 : byte temp1 = r.AllocateLocal(kWasmS128);
861 12 : BUILD(r, WASM_SET_LOCAL(temp1, WASM_SIMD_F32x4_SPLAT(WASM_GET_LOCAL(value))),
862 : WASM_SET_GLOBAL(
863 : 0, WASM_SIMD_UNOP(kExprI32x4SConvertF32x4, WASM_GET_LOCAL(temp1))),
864 : WASM_SET_GLOBAL(
865 : 1, WASM_SIMD_UNOP(kExprI32x4UConvertF32x4, WASM_GET_LOCAL(temp1))),
866 : WASM_ONE);
867 :
868 2772 : FOR_FLOAT32_INPUTS(x) {
869 : if (!PlatformCanRepresent(x)) continue;
870 1380 : r.Call(x);
871 1380 : int32_t expected_signed = ConvertToInt(x, false);
872 : int32_t expected_unsigned = ConvertToInt(x, true);
873 12420 : for (int i = 0; i < 4; i++) {
874 5520 : CHECK_EQ(expected_signed, ReadLittleEndianValue<int32_t>(&g0[i]));
875 5520 : CHECK_EQ(expected_unsigned, ReadLittleEndianValue<int32_t>(&g1[i]));
876 : }
877 : }
878 12 : }
879 :
880 : // Tests both signed and unsigned conversion from I16x8 (unpacking).
881 26687 : WASM_SIMD_TEST(I32x4ConvertI16x8) {
882 12 : WasmRunner<int32_t, int32_t> r(execution_tier, lower_simd);
883 : // Create four output vectors to hold signed and unsigned results.
884 : int32_t* g0 = r.builder().AddGlobal<int32_t>(kWasmS128);
885 : int32_t* g1 = r.builder().AddGlobal<int32_t>(kWasmS128);
886 : int32_t* g2 = r.builder().AddGlobal<int32_t>(kWasmS128);
887 : int32_t* g3 = r.builder().AddGlobal<int32_t>(kWasmS128);
888 : // Build fn to splat test value, perform conversions, and write the results.
889 : byte value = 0;
890 : byte temp1 = r.AllocateLocal(kWasmS128);
891 12 : BUILD(r, WASM_SET_LOCAL(temp1, WASM_SIMD_I16x8_SPLAT(WASM_GET_LOCAL(value))),
892 : WASM_SET_GLOBAL(0, WASM_SIMD_UNOP(kExprI32x4SConvertI16x8High,
893 : WASM_GET_LOCAL(temp1))),
894 : WASM_SET_GLOBAL(1, WASM_SIMD_UNOP(kExprI32x4SConvertI16x8Low,
895 : WASM_GET_LOCAL(temp1))),
896 : WASM_SET_GLOBAL(2, WASM_SIMD_UNOP(kExprI32x4UConvertI16x8High,
897 : WASM_GET_LOCAL(temp1))),
898 : WASM_SET_GLOBAL(3, WASM_SIMD_UNOP(kExprI32x4UConvertI16x8Low,
899 : WASM_GET_LOCAL(temp1))),
900 : WASM_ONE);
901 :
902 228 : FOR_INT16_INPUTS(x) {
903 108 : r.Call(x);
904 : int32_t expected_signed = static_cast<int32_t>(Widen<int16_t>(x));
905 108 : int32_t expected_unsigned = static_cast<int32_t>(UnsignedWiden<int16_t>(x));
906 972 : for (int i = 0; i < 4; i++) {
907 432 : CHECK_EQ(expected_signed, ReadLittleEndianValue<int32_t>(&g0[i]));
908 432 : CHECK_EQ(expected_signed, ReadLittleEndianValue<int32_t>(&g1[i]));
909 432 : CHECK_EQ(expected_unsigned, ReadLittleEndianValue<int32_t>(&g2[i]));
910 432 : CHECK_EQ(expected_unsigned, ReadLittleEndianValue<int32_t>(&g3[i]));
911 : }
912 : }
913 12 : }
914 :
915 24 : void RunI32x4UnOpTest(ExecutionTier execution_tier, LowerSimd lower_simd,
916 : WasmOpcode opcode, Int32UnOp expected_op) {
917 24 : WasmRunner<int32_t, int32_t> r(execution_tier, lower_simd);
918 : // Global to hold output.
919 : int32_t* g = r.builder().AddGlobal<int32_t>(kWasmS128);
920 : // Build fn to splat test value, perform unop, and write the result.
921 : byte value = 0;
922 : byte temp1 = r.AllocateLocal(kWasmS128);
923 24 : BUILD(r, WASM_SET_LOCAL(temp1, WASM_SIMD_I32x4_SPLAT(WASM_GET_LOCAL(value))),
924 : WASM_SET_GLOBAL(0, WASM_SIMD_UNOP(opcode, WASM_GET_LOCAL(temp1))),
925 : WASM_ONE);
926 :
927 2808 : FOR_INT32_INPUTS(x) {
928 1392 : r.Call(x);
929 1392 : int32_t expected = expected_op(x);
930 12528 : for (int i = 0; i < 4; i++) {
931 5568 : CHECK_EQ(expected, ReadLittleEndianValue<int32_t>(&g[i]));
932 : }
933 : }
934 24 : }
935 :
936 26663 : WASM_SIMD_TEST(I32x4Neg) {
937 0 : RunI32x4UnOpTest(execution_tier, lower_simd, kExprI32x4Neg,
938 12 : base::NegateWithWraparound);
939 0 : }
940 :
941 26663 : WASM_SIMD_TEST(S128Not) {
942 12 : RunI32x4UnOpTest(execution_tier, lower_simd, kExprS128Not, Not);
943 0 : }
944 :
945 240 : void RunI32x4BinOpTest(ExecutionTier execution_tier, LowerSimd lower_simd,
946 : WasmOpcode opcode, Int32BinOp expected_op) {
947 240 : WasmRunner<int32_t, int32_t, int32_t> r(execution_tier, lower_simd);
948 : // Global to hold output.
949 : int32_t* g = r.builder().AddGlobal<int32_t>(kWasmS128);
950 : // Build fn to splat test values, perform binop, and write the result.
951 : byte value1 = 0, value2 = 1;
952 : byte temp1 = r.AllocateLocal(kWasmS128);
953 : byte temp2 = r.AllocateLocal(kWasmS128);
954 240 : BUILD(r, WASM_SET_LOCAL(temp1, WASM_SIMD_I32x4_SPLAT(WASM_GET_LOCAL(value1))),
955 : WASM_SET_LOCAL(temp2, WASM_SIMD_I32x4_SPLAT(WASM_GET_LOCAL(value2))),
956 : WASM_SET_GLOBAL(0, WASM_SIMD_BINOP(opcode, WASM_GET_LOCAL(temp1),
957 : WASM_GET_LOCAL(temp2))),
958 : WASM_ONE);
959 :
960 28080 : FOR_INT32_INPUTS(x) {
961 1628640 : FOR_INT32_INPUTS(y) {
962 807360 : r.Call(x, y);
963 807360 : int32_t expected = expected_op(x, y);
964 7266240 : for (int i = 0; i < 4; i++) {
965 3229440 : CHECK_EQ(expected, ReadLittleEndianValue<int32_t>(&g[i]));
966 : }
967 : }
968 : }
969 240 : }
970 :
971 26663 : WASM_SIMD_TEST(I32x4Add) {
972 0 : RunI32x4BinOpTest(execution_tier, lower_simd, kExprI32x4Add,
973 12 : base::AddWithWraparound);
974 0 : }
975 :
976 26663 : WASM_SIMD_TEST(I32x4Sub) {
977 0 : RunI32x4BinOpTest(execution_tier, lower_simd, kExprI32x4Sub,
978 12 : base::SubWithWraparound);
979 0 : }
980 :
981 26663 : WASM_SIMD_TEST(I32x4Mul) {
982 0 : RunI32x4BinOpTest(execution_tier, lower_simd, kExprI32x4Mul,
983 12 : base::MulWithWraparound);
984 0 : }
985 :
986 26663 : WASM_SIMD_TEST(I32x4MinS) {
987 12 : RunI32x4BinOpTest(execution_tier, lower_simd, kExprI32x4MinS, Minimum);
988 0 : }
989 :
990 26663 : WASM_SIMD_TEST(I32x4MaxS) {
991 12 : RunI32x4BinOpTest(execution_tier, lower_simd, kExprI32x4MaxS, Maximum);
992 0 : }
993 :
994 26663 : WASM_SIMD_TEST(I32x4MinU) {
995 0 : RunI32x4BinOpTest(execution_tier, lower_simd, kExprI32x4MinU,
996 12 : UnsignedMinimum);
997 0 : }
998 26663 : WASM_SIMD_TEST(I32x4MaxU) {
999 0 : RunI32x4BinOpTest(execution_tier, lower_simd, kExprI32x4MaxU,
1000 :
1001 12 : UnsignedMaximum);
1002 0 : }
1003 :
1004 26663 : WASM_SIMD_TEST(S128And) {
1005 12 : RunI32x4BinOpTest(execution_tier, lower_simd, kExprS128And, And);
1006 0 : }
1007 :
1008 26663 : WASM_SIMD_TEST(S128Or) {
1009 12 : RunI32x4BinOpTest(execution_tier, lower_simd, kExprS128Or, Or);
1010 0 : }
1011 :
1012 26663 : WASM_SIMD_TEST(S128Xor) {
1013 12 : RunI32x4BinOpTest(execution_tier, lower_simd, kExprS128Xor, Xor);
1014 0 : }
1015 :
1016 26663 : WASM_SIMD_TEST(I32x4Eq) {
1017 12 : RunI32x4BinOpTest(execution_tier, lower_simd, kExprI32x4Eq, Equal);
1018 0 : }
1019 :
1020 26663 : WASM_SIMD_TEST(I32x4Ne) {
1021 12 : RunI32x4BinOpTest(execution_tier, lower_simd, kExprI32x4Ne, NotEqual);
1022 0 : }
1023 :
1024 26663 : WASM_SIMD_TEST(I32x4LtS) {
1025 12 : RunI32x4BinOpTest(execution_tier, lower_simd, kExprI32x4LtS, Less);
1026 0 : }
1027 :
1028 26663 : WASM_SIMD_TEST(I32x4LeS) {
1029 12 : RunI32x4BinOpTest(execution_tier, lower_simd, kExprI32x4LeS, LessEqual);
1030 0 : }
1031 :
1032 26663 : WASM_SIMD_TEST(I32x4GtS) {
1033 12 : RunI32x4BinOpTest(execution_tier, lower_simd, kExprI32x4GtS, Greater);
1034 0 : }
1035 :
1036 26663 : WASM_SIMD_TEST(I32x4GeS) {
1037 12 : RunI32x4BinOpTest(execution_tier, lower_simd, kExprI32x4GeS, GreaterEqual);
1038 0 : }
1039 :
1040 26663 : WASM_SIMD_TEST(I32x4LtU) {
1041 12 : RunI32x4BinOpTest(execution_tier, lower_simd, kExprI32x4LtU, UnsignedLess);
1042 0 : }
1043 :
1044 26663 : WASM_SIMD_TEST(I32x4LeU) {
1045 0 : RunI32x4BinOpTest(execution_tier, lower_simd, kExprI32x4LeU,
1046 12 : UnsignedLessEqual);
1047 0 : }
1048 :
1049 26663 : WASM_SIMD_TEST(I32x4GtU) {
1050 12 : RunI32x4BinOpTest(execution_tier, lower_simd, kExprI32x4GtU, UnsignedGreater);
1051 0 : }
1052 :
1053 26663 : WASM_SIMD_TEST(I32x4GeU) {
1054 0 : RunI32x4BinOpTest(execution_tier, lower_simd, kExprI32x4GeU,
1055 12 : UnsignedGreaterEqual);
1056 0 : }
1057 :
1058 36 : void RunI32x4ShiftOpTest(ExecutionTier execution_tier, LowerSimd lower_simd,
1059 : WasmOpcode opcode, Int32ShiftOp expected_op) {
1060 2268 : for (int shift = 1; shift < 32; shift++) {
1061 1116 : WasmRunner<int32_t, int32_t> r(execution_tier, lower_simd);
1062 : int32_t* g = r.builder().AddGlobal<int32_t>(kWasmS128);
1063 : byte value = 0;
1064 : byte simd1 = r.AllocateLocal(kWasmS128);
1065 1116 : BUILD(r,
1066 : WASM_SET_LOCAL(simd1, WASM_SIMD_I32x4_SPLAT(WASM_GET_LOCAL(value))),
1067 : WASM_SET_GLOBAL(
1068 : 0, WASM_SIMD_SHIFT_OP(opcode, shift, WASM_GET_LOCAL(simd1))),
1069 : WASM_ONE);
1070 :
1071 130572 : FOR_INT32_INPUTS(x) {
1072 64728 : r.Call(x);
1073 64728 : float expected = expected_op(x, shift);
1074 582552 : for (int i = 0; i < 4; i++) {
1075 517824 : CHECK_EQ(expected, ReadLittleEndianValue<int32_t>(&g[i]));
1076 : }
1077 : }
1078 : }
1079 36 : }
1080 :
1081 26663 : WASM_SIMD_TEST(I32x4Shl) {
1082 0 : RunI32x4ShiftOpTest(execution_tier, lower_simd, kExprI32x4Shl,
1083 12 : LogicalShiftLeft);
1084 0 : }
1085 :
1086 26663 : WASM_SIMD_TEST(I32x4ShrS) {
1087 0 : RunI32x4ShiftOpTest(execution_tier, lower_simd, kExprI32x4ShrS,
1088 12 : ArithmeticShiftRight);
1089 0 : }
1090 :
1091 26663 : WASM_SIMD_TEST(I32x4ShrU) {
1092 0 : RunI32x4ShiftOpTest(execution_tier, lower_simd, kExprI32x4ShrU,
1093 12 : LogicalShiftRight);
1094 0 : }
1095 :
1096 : // Tests both signed and unsigned conversion from I8x16 (unpacking).
1097 26687 : WASM_SIMD_TEST(I16x8ConvertI8x16) {
1098 12 : WasmRunner<int32_t, int32_t> r(execution_tier, lower_simd);
1099 : // Create four output vectors to hold signed and unsigned results.
1100 : int16_t* g0 = r.builder().AddGlobal<int16_t>(kWasmS128);
1101 : int16_t* g1 = r.builder().AddGlobal<int16_t>(kWasmS128);
1102 : int16_t* g2 = r.builder().AddGlobal<int16_t>(kWasmS128);
1103 : int16_t* g3 = r.builder().AddGlobal<int16_t>(kWasmS128);
1104 : // Build fn to splat test value, perform conversions, and write the results.
1105 : byte value = 0;
1106 : byte temp1 = r.AllocateLocal(kWasmS128);
1107 12 : BUILD(r, WASM_SET_LOCAL(temp1, WASM_SIMD_I8x16_SPLAT(WASM_GET_LOCAL(value))),
1108 : WASM_SET_GLOBAL(0, WASM_SIMD_UNOP(kExprI16x8SConvertI8x16High,
1109 : WASM_GET_LOCAL(temp1))),
1110 : WASM_SET_GLOBAL(1, WASM_SIMD_UNOP(kExprI16x8SConvertI8x16Low,
1111 : WASM_GET_LOCAL(temp1))),
1112 : WASM_SET_GLOBAL(2, WASM_SIMD_UNOP(kExprI16x8UConvertI8x16High,
1113 : WASM_GET_LOCAL(temp1))),
1114 : WASM_SET_GLOBAL(3, WASM_SIMD_UNOP(kExprI16x8UConvertI8x16Low,
1115 : WASM_GET_LOCAL(temp1))),
1116 : WASM_ONE);
1117 :
1118 228 : FOR_INT8_INPUTS(x) {
1119 108 : r.Call(x);
1120 : int16_t expected_signed = static_cast<int16_t>(Widen<int8_t>(x));
1121 : int16_t expected_unsigned = static_cast<int16_t>(UnsignedWiden<int8_t>(x));
1122 1836 : for (int i = 0; i < 8; i++) {
1123 864 : CHECK_EQ(expected_signed, ReadLittleEndianValue<int16_t>(&g0[i]));
1124 864 : CHECK_EQ(expected_signed, ReadLittleEndianValue<int16_t>(&g1[i]));
1125 864 : CHECK_EQ(expected_unsigned, ReadLittleEndianValue<int16_t>(&g2[i]));
1126 864 : CHECK_EQ(expected_unsigned, ReadLittleEndianValue<int16_t>(&g3[i]));
1127 : }
1128 : }
1129 12 : }
1130 :
1131 : // Tests both signed and unsigned conversion from I32x4 (packing).
1132 26687 : WASM_SIMD_TEST(I16x8ConvertI32x4) {
1133 12 : WasmRunner<int32_t, int32_t> r(execution_tier, lower_simd);
1134 : // Create output vectors to hold signed and unsigned results.
1135 : int16_t* g0 = r.builder().AddGlobal<int16_t>(kWasmS128);
1136 : int16_t* g1 = r.builder().AddGlobal<int16_t>(kWasmS128);
1137 : // Build fn to splat test value, perform conversions, and write the results.
1138 : byte value = 0;
1139 : byte temp1 = r.AllocateLocal(kWasmS128);
1140 12 : BUILD(r, WASM_SET_LOCAL(temp1, WASM_SIMD_I32x4_SPLAT(WASM_GET_LOCAL(value))),
1141 : WASM_SET_GLOBAL(
1142 : 0, WASM_SIMD_BINOP(kExprI16x8SConvertI32x4, WASM_GET_LOCAL(temp1),
1143 : WASM_GET_LOCAL(temp1))),
1144 : WASM_SET_GLOBAL(
1145 : 1, WASM_SIMD_BINOP(kExprI16x8UConvertI32x4, WASM_GET_LOCAL(temp1),
1146 : WASM_GET_LOCAL(temp1))),
1147 : WASM_ONE);
1148 :
1149 1404 : FOR_INT32_INPUTS(x) {
1150 696 : r.Call(x);
1151 696 : int16_t expected_signed = Narrow<int16_t>(x);
1152 : int16_t expected_unsigned = UnsignedNarrow<int16_t>(x);
1153 11832 : for (int i = 0; i < 8; i++) {
1154 5568 : CHECK_EQ(expected_signed, ReadLittleEndianValue<int16_t>(&g0[i]));
1155 5568 : CHECK_EQ(expected_unsigned, ReadLittleEndianValue<int16_t>(&g1[i]));
1156 : }
1157 : }
1158 12 : }
1159 :
1160 12 : void RunI16x8UnOpTest(ExecutionTier execution_tier, LowerSimd lower_simd,
1161 : WasmOpcode opcode, Int16UnOp expected_op) {
1162 12 : WasmRunner<int32_t, int32_t> r(execution_tier, lower_simd);
1163 : // Global to hold output.
1164 : int16_t* g = r.builder().AddGlobal<int16_t>(kWasmS128);
1165 : // Build fn to splat test value, perform unop, and write the result.
1166 : byte value = 0;
1167 : byte temp1 = r.AllocateLocal(kWasmS128);
1168 12 : BUILD(r, WASM_SET_LOCAL(temp1, WASM_SIMD_I16x8_SPLAT(WASM_GET_LOCAL(value))),
1169 : WASM_SET_GLOBAL(0, WASM_SIMD_UNOP(opcode, WASM_GET_LOCAL(temp1))),
1170 : WASM_ONE);
1171 :
1172 228 : FOR_INT16_INPUTS(x) {
1173 108 : r.Call(x);
1174 108 : int16_t expected = expected_op(x);
1175 1836 : for (int i = 0; i < 8; i++) {
1176 864 : CHECK_EQ(expected, ReadLittleEndianValue<int16_t>(&g[i]));
1177 : }
1178 : }
1179 12 : }
1180 :
1181 26663 : WASM_SIMD_TEST(I16x8Neg) {
1182 0 : RunI16x8UnOpTest(execution_tier, lower_simd, kExprI16x8Neg,
1183 12 : base::NegateWithWraparound);
1184 0 : }
1185 :
1186 252 : void RunI16x8BinOpTest(ExecutionTier execution_tier, LowerSimd lower_simd,
1187 : WasmOpcode opcode, Int16BinOp expected_op) {
1188 252 : WasmRunner<int32_t, int32_t, int32_t> r(execution_tier, lower_simd);
1189 : // Global to hold output.
1190 : int16_t* g = r.builder().AddGlobal<int16_t>(kWasmS128);
1191 : // Build fn to splat test values, perform binop, and write the result.
1192 : byte value1 = 0, value2 = 1;
1193 : byte temp1 = r.AllocateLocal(kWasmS128);
1194 : byte temp2 = r.AllocateLocal(kWasmS128);
1195 252 : BUILD(r, WASM_SET_LOCAL(temp1, WASM_SIMD_I16x8_SPLAT(WASM_GET_LOCAL(value1))),
1196 : WASM_SET_LOCAL(temp2, WASM_SIMD_I16x8_SPLAT(WASM_GET_LOCAL(value2))),
1197 : WASM_SET_GLOBAL(0, WASM_SIMD_BINOP(opcode, WASM_GET_LOCAL(temp1),
1198 : WASM_GET_LOCAL(temp2))),
1199 : WASM_ONE);
1200 :
1201 4788 : FOR_INT16_INPUTS(x) {
1202 43092 : FOR_INT16_INPUTS(y) {
1203 20412 : r.Call(x, y);
1204 20412 : int16_t expected = expected_op(x, y);
1205 347004 : for (int i = 0; i < 8; i++) {
1206 163296 : CHECK_EQ(expected, ReadLittleEndianValue<int16_t>(&g[i]));
1207 : }
1208 : }
1209 : }
1210 252 : }
1211 :
1212 26663 : WASM_SIMD_TEST(I16x8Add) {
1213 0 : RunI16x8BinOpTest(execution_tier, lower_simd, kExprI16x8Add,
1214 12 : base::AddWithWraparound);
1215 0 : }
1216 :
1217 26663 : WASM_SIMD_TEST(I16x8AddSaturateS) {
1218 0 : RunI16x8BinOpTest(execution_tier, lower_simd, kExprI16x8AddSaturateS,
1219 12 : AddSaturate);
1220 0 : }
1221 :
1222 26663 : WASM_SIMD_TEST(I16x8Sub) {
1223 0 : RunI16x8BinOpTest(execution_tier, lower_simd, kExprI16x8Sub,
1224 12 : base::SubWithWraparound);
1225 0 : }
1226 :
1227 26663 : WASM_SIMD_TEST(I16x8SubSaturateS) {
1228 0 : RunI16x8BinOpTest(execution_tier, lower_simd, kExprI16x8SubSaturateS,
1229 12 : SubSaturate);
1230 0 : }
1231 :
1232 26663 : WASM_SIMD_TEST(I16x8Mul) {
1233 0 : RunI16x8BinOpTest(execution_tier, lower_simd, kExprI16x8Mul,
1234 12 : base::MulWithWraparound);
1235 0 : }
1236 :
1237 26663 : WASM_SIMD_TEST(I16x8MinS) {
1238 12 : RunI16x8BinOpTest(execution_tier, lower_simd, kExprI16x8MinS, Minimum);
1239 0 : }
1240 :
1241 26663 : WASM_SIMD_TEST(I16x8MaxS) {
1242 12 : RunI16x8BinOpTest(execution_tier, lower_simd, kExprI16x8MaxS, Maximum);
1243 0 : }
1244 :
1245 26663 : WASM_SIMD_TEST(I16x8AddSaturateU) {
1246 0 : RunI16x8BinOpTest(execution_tier, lower_simd, kExprI16x8AddSaturateU,
1247 12 : UnsignedAddSaturate);
1248 0 : }
1249 :
1250 26663 : WASM_SIMD_TEST(I16x8SubSaturateU) {
1251 0 : RunI16x8BinOpTest(execution_tier, lower_simd, kExprI16x8SubSaturateU,
1252 12 : UnsignedSubSaturate);
1253 0 : }
1254 :
1255 26663 : WASM_SIMD_TEST(I16x8MinU) {
1256 0 : RunI16x8BinOpTest(execution_tier, lower_simd, kExprI16x8MinU,
1257 12 : UnsignedMinimum);
1258 0 : }
1259 :
1260 26663 : WASM_SIMD_TEST(I16x8MaxU) {
1261 0 : RunI16x8BinOpTest(execution_tier, lower_simd, kExprI16x8MaxU,
1262 12 : UnsignedMaximum);
1263 0 : }
1264 :
1265 26663 : WASM_SIMD_TEST(I16x8Eq) {
1266 12 : RunI16x8BinOpTest(execution_tier, lower_simd, kExprI16x8Eq, Equal);
1267 0 : }
1268 :
1269 26663 : WASM_SIMD_TEST(I16x8Ne) {
1270 12 : RunI16x8BinOpTest(execution_tier, lower_simd, kExprI16x8Ne, NotEqual);
1271 0 : }
1272 :
1273 26663 : WASM_SIMD_TEST(I16x8LtS) {
1274 12 : RunI16x8BinOpTest(execution_tier, lower_simd, kExprI16x8LtS, Less);
1275 0 : }
1276 :
1277 26663 : WASM_SIMD_TEST(I16x8LeS) {
1278 12 : RunI16x8BinOpTest(execution_tier, lower_simd, kExprI16x8LeS, LessEqual);
1279 0 : }
1280 :
1281 26663 : WASM_SIMD_TEST(I16x8GtS) {
1282 12 : RunI16x8BinOpTest(execution_tier, lower_simd, kExprI16x8GtS, Greater);
1283 0 : }
1284 :
1285 26663 : WASM_SIMD_TEST(I16x8GeS) {
1286 12 : RunI16x8BinOpTest(execution_tier, lower_simd, kExprI16x8GeS, GreaterEqual);
1287 0 : }
1288 :
1289 26663 : WASM_SIMD_TEST(I16x8GtU) {
1290 12 : RunI16x8BinOpTest(execution_tier, lower_simd, kExprI16x8GtU, UnsignedGreater);
1291 0 : }
1292 :
1293 26663 : WASM_SIMD_TEST(I16x8GeU) {
1294 0 : RunI16x8BinOpTest(execution_tier, lower_simd, kExprI16x8GeU,
1295 12 : UnsignedGreaterEqual);
1296 0 : }
1297 :
1298 26663 : WASM_SIMD_TEST(I16x8LtU) {
1299 12 : RunI16x8BinOpTest(execution_tier, lower_simd, kExprI16x8LtU, UnsignedLess);
1300 0 : }
1301 :
1302 26663 : WASM_SIMD_TEST(I16x8LeU) {
1303 0 : RunI16x8BinOpTest(execution_tier, lower_simd, kExprI16x8LeU,
1304 12 : UnsignedLessEqual);
1305 0 : }
1306 :
1307 36 : void RunI16x8ShiftOpTest(ExecutionTier execution_tier, LowerSimd lower_simd,
1308 : WasmOpcode opcode, Int16ShiftOp expected_op) {
1309 1116 : for (int shift = 1; shift < 16; shift++) {
1310 540 : WasmRunner<int32_t, int32_t> r(execution_tier, lower_simd);
1311 : int16_t* g = r.builder().AddGlobal<int16_t>(kWasmS128);
1312 : byte value = 0;
1313 : byte simd1 = r.AllocateLocal(kWasmS128);
1314 540 : BUILD(r,
1315 : WASM_SET_LOCAL(simd1, WASM_SIMD_I16x8_SPLAT(WASM_GET_LOCAL(value))),
1316 : WASM_SET_GLOBAL(
1317 : 0, WASM_SIMD_SHIFT_OP(opcode, shift, WASM_GET_LOCAL(simd1))),
1318 : WASM_ONE);
1319 :
1320 10260 : FOR_INT16_INPUTS(x) {
1321 4860 : r.Call(x);
1322 4860 : float expected = expected_op(x, shift);
1323 82620 : for (int i = 0; i < 8; i++) {
1324 77760 : CHECK_EQ(expected, ReadLittleEndianValue<int16_t>(&g[i]));
1325 : }
1326 : }
1327 : }
1328 36 : }
1329 :
1330 26663 : WASM_SIMD_TEST(I16x8Shl) {
1331 0 : RunI16x8ShiftOpTest(execution_tier, lower_simd, kExprI16x8Shl,
1332 12 : LogicalShiftLeft);
1333 0 : }
1334 :
1335 26663 : WASM_SIMD_TEST(I16x8ShrS) {
1336 0 : RunI16x8ShiftOpTest(execution_tier, lower_simd, kExprI16x8ShrS,
1337 12 : ArithmeticShiftRight);
1338 0 : }
1339 :
1340 26663 : WASM_SIMD_TEST(I16x8ShrU) {
1341 0 : RunI16x8ShiftOpTest(execution_tier, lower_simd, kExprI16x8ShrU,
1342 12 : LogicalShiftRight);
1343 0 : }
1344 :
1345 12 : void RunI8x16UnOpTest(ExecutionTier execution_tier, LowerSimd lower_simd,
1346 : WasmOpcode opcode, Int8UnOp expected_op) {
1347 12 : WasmRunner<int32_t, int32_t> r(execution_tier, lower_simd);
1348 : // Global to hold output.
1349 : int8_t* g = r.builder().AddGlobal<int8_t>(kWasmS128);
1350 : // Build fn to splat test value, perform unop, and write the result.
1351 : byte value = 0;
1352 : byte temp1 = r.AllocateLocal(kWasmS128);
1353 12 : BUILD(r, WASM_SET_LOCAL(temp1, WASM_SIMD_I8x16_SPLAT(WASM_GET_LOCAL(value))),
1354 : WASM_SET_GLOBAL(0, WASM_SIMD_UNOP(opcode, WASM_GET_LOCAL(temp1))),
1355 : WASM_ONE);
1356 :
1357 228 : FOR_INT8_INPUTS(x) {
1358 108 : r.Call(x);
1359 108 : int8_t expected = expected_op(x);
1360 3564 : for (int i = 0; i < 16; i++) {
1361 1728 : CHECK_EQ(expected, ReadLittleEndianValue<int8_t>(&g[i]));
1362 : }
1363 : }
1364 12 : }
1365 :
1366 26663 : WASM_SIMD_TEST(I8x16Neg) {
1367 0 : RunI8x16UnOpTest(execution_tier, lower_simd, kExprI8x16Neg,
1368 12 : base::NegateWithWraparound);
1369 0 : }
1370 :
1371 : // Tests both signed and unsigned conversion from I16x8 (packing).
1372 26687 : WASM_SIMD_TEST(I8x16ConvertI16x8) {
1373 12 : WasmRunner<int32_t, int32_t> r(execution_tier, lower_simd);
1374 : // Create output vectors to hold signed and unsigned results.
1375 : int8_t* g0 = r.builder().AddGlobal<int8_t>(kWasmS128);
1376 : int8_t* g1 = r.builder().AddGlobal<int8_t>(kWasmS128);
1377 : // Build fn to splat test value, perform conversions, and write the results.
1378 : byte value = 0;
1379 : byte temp1 = r.AllocateLocal(kWasmS128);
1380 12 : BUILD(r, WASM_SET_LOCAL(temp1, WASM_SIMD_I16x8_SPLAT(WASM_GET_LOCAL(value))),
1381 : WASM_SET_GLOBAL(
1382 : 0, WASM_SIMD_BINOP(kExprI8x16SConvertI16x8, WASM_GET_LOCAL(temp1),
1383 : WASM_GET_LOCAL(temp1))),
1384 : WASM_SET_GLOBAL(
1385 : 1, WASM_SIMD_BINOP(kExprI8x16UConvertI16x8, WASM_GET_LOCAL(temp1),
1386 : WASM_GET_LOCAL(temp1))),
1387 : WASM_ONE);
1388 :
1389 228 : FOR_INT16_INPUTS(x) {
1390 108 : r.Call(x);
1391 108 : int8_t expected_signed = Narrow<int8_t>(x);
1392 : int8_t expected_unsigned = UnsignedNarrow<int8_t>(x);
1393 3564 : for (int i = 0; i < 16; i++) {
1394 1728 : CHECK_EQ(expected_signed, ReadLittleEndianValue<int8_t>(&g0[i]));
1395 1728 : CHECK_EQ(expected_unsigned, ReadLittleEndianValue<int8_t>(&g1[i]));
1396 : }
1397 : }
1398 12 : }
1399 :
1400 252 : void RunI8x16BinOpTest(ExecutionTier execution_tier, LowerSimd lower_simd,
1401 : WasmOpcode opcode, Int8BinOp expected_op) {
1402 252 : WasmRunner<int32_t, int32_t, int32_t> r(execution_tier, lower_simd);
1403 : // Global to hold output.
1404 : int8_t* g = r.builder().AddGlobal<int8_t>(kWasmS128);
1405 : // Build fn to splat test values, perform binop, and write the result.
1406 : byte value1 = 0, value2 = 1;
1407 : byte temp1 = r.AllocateLocal(kWasmS128);
1408 : byte temp2 = r.AllocateLocal(kWasmS128);
1409 252 : BUILD(r, WASM_SET_LOCAL(temp1, WASM_SIMD_I8x16_SPLAT(WASM_GET_LOCAL(value1))),
1410 : WASM_SET_LOCAL(temp2, WASM_SIMD_I8x16_SPLAT(WASM_GET_LOCAL(value2))),
1411 : WASM_SET_GLOBAL(0, WASM_SIMD_BINOP(opcode, WASM_GET_LOCAL(temp1),
1412 : WASM_GET_LOCAL(temp2))),
1413 : WASM_ONE);
1414 :
1415 4788 : FOR_INT8_INPUTS(x) {
1416 43092 : FOR_INT8_INPUTS(y) {
1417 20412 : r.Call(x, y);
1418 20412 : int8_t expected = expected_op(x, y);
1419 673596 : for (int i = 0; i < 16; i++) {
1420 326592 : CHECK_EQ(expected, ReadLittleEndianValue<int8_t>(&g[i]));
1421 : }
1422 : }
1423 : }
1424 252 : }
1425 :
1426 26663 : WASM_SIMD_TEST(I8x16Add) {
1427 0 : RunI8x16BinOpTest(execution_tier, lower_simd, kExprI8x16Add,
1428 12 : base::AddWithWraparound);
1429 0 : }
1430 :
1431 26663 : WASM_SIMD_TEST(I8x16AddSaturateS) {
1432 0 : RunI8x16BinOpTest(execution_tier, lower_simd, kExprI8x16AddSaturateS,
1433 12 : AddSaturate);
1434 0 : }
1435 :
1436 26663 : WASM_SIMD_TEST(I8x16Sub) {
1437 0 : RunI8x16BinOpTest(execution_tier, lower_simd, kExprI8x16Sub,
1438 12 : base::SubWithWraparound);
1439 0 : }
1440 :
1441 26663 : WASM_SIMD_TEST(I8x16SubSaturateS) {
1442 0 : RunI8x16BinOpTest(execution_tier, lower_simd, kExprI8x16SubSaturateS,
1443 12 : SubSaturate);
1444 0 : }
1445 :
1446 26663 : WASM_SIMD_TEST(I8x16MinS) {
1447 12 : RunI8x16BinOpTest(execution_tier, lower_simd, kExprI8x16MinS, Minimum);
1448 0 : }
1449 :
1450 26663 : WASM_SIMD_TEST(I8x16MaxS) {
1451 12 : RunI8x16BinOpTest(execution_tier, lower_simd, kExprI8x16MaxS, Maximum);
1452 0 : }
1453 :
1454 26663 : WASM_SIMD_TEST(I8x16AddSaturateU) {
1455 0 : RunI8x16BinOpTest(execution_tier, lower_simd, kExprI8x16AddSaturateU,
1456 12 : UnsignedAddSaturate);
1457 0 : }
1458 :
1459 26663 : WASM_SIMD_TEST(I8x16SubSaturateU) {
1460 0 : RunI8x16BinOpTest(execution_tier, lower_simd, kExprI8x16SubSaturateU,
1461 12 : UnsignedSubSaturate);
1462 0 : }
1463 :
1464 26663 : WASM_SIMD_TEST(I8x16MinU) {
1465 0 : RunI8x16BinOpTest(execution_tier, lower_simd, kExprI8x16MinU,
1466 12 : UnsignedMinimum);
1467 0 : }
1468 :
1469 26663 : WASM_SIMD_TEST(I8x16MaxU) {
1470 0 : RunI8x16BinOpTest(execution_tier, lower_simd, kExprI8x16MaxU,
1471 12 : UnsignedMaximum);
1472 0 : }
1473 :
1474 26663 : WASM_SIMD_TEST(I8x16Eq) {
1475 12 : RunI8x16BinOpTest(execution_tier, lower_simd, kExprI8x16Eq, Equal);
1476 0 : }
1477 :
1478 26663 : WASM_SIMD_TEST(I8x16Ne) {
1479 12 : RunI8x16BinOpTest(execution_tier, lower_simd, kExprI8x16Ne, NotEqual);
1480 0 : }
1481 :
1482 26663 : WASM_SIMD_TEST(I8x16GtS) {
1483 12 : RunI8x16BinOpTest(execution_tier, lower_simd, kExprI8x16GtS, Greater);
1484 0 : }
1485 :
1486 26663 : WASM_SIMD_TEST(I8x16GeS) {
1487 12 : RunI8x16BinOpTest(execution_tier, lower_simd, kExprI8x16GeS, GreaterEqual);
1488 0 : }
1489 :
1490 26663 : WASM_SIMD_TEST(I8x16LtS) {
1491 12 : RunI8x16BinOpTest(execution_tier, lower_simd, kExprI8x16LtS, Less);
1492 0 : }
1493 :
1494 26663 : WASM_SIMD_TEST(I8x16LeS) {
1495 12 : RunI8x16BinOpTest(execution_tier, lower_simd, kExprI8x16LeS, LessEqual);
1496 0 : }
1497 :
1498 26663 : WASM_SIMD_TEST(I8x16GtU) {
1499 12 : RunI8x16BinOpTest(execution_tier, lower_simd, kExprI8x16GtU, UnsignedGreater);
1500 0 : }
1501 :
1502 26663 : WASM_SIMD_TEST(I8x16GeU) {
1503 0 : RunI8x16BinOpTest(execution_tier, lower_simd, kExprI8x16GeU,
1504 12 : UnsignedGreaterEqual);
1505 0 : }
1506 :
1507 26663 : WASM_SIMD_TEST(I8x16LtU) {
1508 12 : RunI8x16BinOpTest(execution_tier, lower_simd, kExprI8x16LtU, UnsignedLess);
1509 0 : }
1510 :
1511 26663 : WASM_SIMD_TEST(I8x16LeU) {
1512 0 : RunI8x16BinOpTest(execution_tier, lower_simd, kExprI8x16LeU,
1513 12 : UnsignedLessEqual);
1514 0 : }
1515 :
1516 26663 : WASM_SIMD_TEST(I8x16Mul) {
1517 0 : RunI8x16BinOpTest(execution_tier, lower_simd, kExprI8x16Mul,
1518 12 : base::MulWithWraparound);
1519 0 : }
1520 :
1521 36 : void RunI8x16ShiftOpTest(ExecutionTier execution_tier, LowerSimd lower_simd,
1522 : WasmOpcode opcode, Int8ShiftOp expected_op) {
1523 540 : for (int shift = 1; shift < 8; shift++) {
1524 252 : WasmRunner<int32_t, int32_t> r(execution_tier, lower_simd);
1525 : int8_t* g = r.builder().AddGlobal<int8_t>(kWasmS128);
1526 : byte value = 0;
1527 : byte simd1 = r.AllocateLocal(kWasmS128);
1528 252 : BUILD(r,
1529 : WASM_SET_LOCAL(simd1, WASM_SIMD_I8x16_SPLAT(WASM_GET_LOCAL(value))),
1530 : WASM_SET_GLOBAL(
1531 : 0, WASM_SIMD_SHIFT_OP(opcode, shift, WASM_GET_LOCAL(simd1))),
1532 : WASM_ONE);
1533 :
1534 4788 : FOR_INT8_INPUTS(x) {
1535 2268 : r.Call(x);
1536 2268 : float expected = expected_op(x, shift);
1537 74844 : for (int i = 0; i < 16; i++) {
1538 72576 : CHECK_EQ(expected, ReadLittleEndianValue<int8_t>(&g[i]));
1539 : }
1540 : }
1541 : }
1542 36 : }
1543 :
1544 26663 : WASM_SIMD_TEST(I8x16Shl) {
1545 0 : RunI8x16ShiftOpTest(execution_tier, lower_simd, kExprI8x16Shl,
1546 12 : LogicalShiftLeft);
1547 0 : }
1548 :
1549 26663 : WASM_SIMD_TEST(I8x16ShrS) {
1550 0 : RunI8x16ShiftOpTest(execution_tier, lower_simd, kExprI8x16ShrS,
1551 12 : ArithmeticShiftRight);
1552 0 : }
1553 :
1554 26663 : WASM_SIMD_TEST(I8x16ShrU) {
1555 0 : RunI8x16ShiftOpTest(execution_tier, lower_simd, kExprI8x16ShrU,
1556 12 : LogicalShiftRight);
1557 0 : }
1558 :
1559 : // Test Select by making a mask where the 0th and 3rd lanes are true and the
1560 : // rest false, and comparing for non-equality with zero to convert to a boolean
1561 : // vector.
1562 : #define WASM_SIMD_SELECT_TEST(format) \
1563 : WASM_SIMD_TEST(S##format##Select) { \
1564 : WasmRunner<int32_t, int32_t, int32_t> r(execution_tier, lower_simd); \
1565 : byte val1 = 0; \
1566 : byte val2 = 1; \
1567 : byte src1 = r.AllocateLocal(kWasmS128); \
1568 : byte src2 = r.AllocateLocal(kWasmS128); \
1569 : byte zero = r.AllocateLocal(kWasmS128); \
1570 : byte mask = r.AllocateLocal(kWasmS128); \
1571 : BUILD(r, \
1572 : WASM_SET_LOCAL(src1, \
1573 : WASM_SIMD_I##format##_SPLAT(WASM_GET_LOCAL(val1))), \
1574 : WASM_SET_LOCAL(src2, \
1575 : WASM_SIMD_I##format##_SPLAT(WASM_GET_LOCAL(val2))), \
1576 : WASM_SET_LOCAL(zero, WASM_SIMD_I##format##_SPLAT(WASM_ZERO)), \
1577 : WASM_SET_LOCAL(mask, WASM_SIMD_I##format##_REPLACE_LANE( \
1578 : 1, WASM_GET_LOCAL(zero), WASM_I32V(-1))), \
1579 : WASM_SET_LOCAL(mask, WASM_SIMD_I##format##_REPLACE_LANE( \
1580 : 2, WASM_GET_LOCAL(mask), WASM_I32V(-1))), \
1581 : WASM_SET_LOCAL( \
1582 : mask, \
1583 : WASM_SIMD_SELECT( \
1584 : format, WASM_GET_LOCAL(src1), WASM_GET_LOCAL(src2), \
1585 : WASM_SIMD_BINOP(kExprI##format##Ne, WASM_GET_LOCAL(mask), \
1586 : WASM_GET_LOCAL(zero)))), \
1587 : WASM_SIMD_CHECK_LANE(I##format, mask, I32, val2, 0), \
1588 : WASM_SIMD_CHECK_LANE(I##format, mask, I32, val1, 1), \
1589 : WASM_SIMD_CHECK_LANE(I##format, mask, I32, val1, 2), \
1590 : WASM_SIMD_CHECK_LANE(I##format, mask, I32, val2, 3), WASM_ONE); \
1591 : \
1592 : CHECK_EQ(1, r.Call(0x12, 0x34)); \
1593 : }
1594 :
1595 26699 : WASM_SIMD_SELECT_TEST(32x4)
1596 26699 : WASM_SIMD_SELECT_TEST(16x8)
1597 26699 : WASM_SIMD_SELECT_TEST(8x16)
1598 :
1599 : // Test Select by making a mask where the 0th and 3rd lanes are non-zero and the
1600 : // rest 0. The mask is not the result of a comparison op.
1601 : #define WASM_SIMD_NON_CANONICAL_SELECT_TEST(format) \
1602 : WASM_SIMD_TEST_NO_LOWERING(S##format##NonCanonicalSelect) { \
1603 : WasmRunner<int32_t, int32_t, int32_t, int32_t> r(execution_tier, \
1604 : lower_simd); \
1605 : byte val1 = 0; \
1606 : byte val2 = 1; \
1607 : byte combined = 2; \
1608 : byte src1 = r.AllocateLocal(kWasmS128); \
1609 : byte src2 = r.AllocateLocal(kWasmS128); \
1610 : byte zero = r.AllocateLocal(kWasmS128); \
1611 : byte mask = r.AllocateLocal(kWasmS128); \
1612 : BUILD(r, \
1613 : WASM_SET_LOCAL(src1, \
1614 : WASM_SIMD_I##format##_SPLAT(WASM_GET_LOCAL(val1))), \
1615 : WASM_SET_LOCAL(src2, \
1616 : WASM_SIMD_I##format##_SPLAT(WASM_GET_LOCAL(val2))), \
1617 : WASM_SET_LOCAL(zero, WASM_SIMD_I##format##_SPLAT(WASM_ZERO)), \
1618 : WASM_SET_LOCAL(mask, WASM_SIMD_I##format##_REPLACE_LANE( \
1619 : 1, WASM_GET_LOCAL(zero), WASM_I32V(0xF))), \
1620 : WASM_SET_LOCAL(mask, WASM_SIMD_I##format##_REPLACE_LANE( \
1621 : 2, WASM_GET_LOCAL(mask), WASM_I32V(0xF))), \
1622 : WASM_SET_LOCAL(mask, WASM_SIMD_SELECT(format, WASM_GET_LOCAL(src1), \
1623 : WASM_GET_LOCAL(src2), \
1624 : WASM_GET_LOCAL(mask))), \
1625 : WASM_SIMD_CHECK_LANE(I##format, mask, I32, val2, 0), \
1626 : WASM_SIMD_CHECK_LANE(I##format, mask, I32, combined, 1), \
1627 : WASM_SIMD_CHECK_LANE(I##format, mask, I32, combined, 2), \
1628 : WASM_SIMD_CHECK_LANE(I##format, mask, I32, val2, 3), WASM_ONE); \
1629 : \
1630 : CHECK_EQ(1, r.Call(0x12, 0x34, 0x32)); \
1631 : }
1632 :
1633 26679 : WASM_SIMD_NON_CANONICAL_SELECT_TEST(32x4)
1634 26679 : WASM_SIMD_NON_CANONICAL_SELECT_TEST(16x8)
1635 26679 : WASM_SIMD_NON_CANONICAL_SELECT_TEST(8x16)
1636 :
1637 : // Test binary ops with two lane test patterns, all lanes distinct.
1638 : template <typename T>
1639 7764 : void RunBinaryLaneOpTest(
1640 : ExecutionTier execution_tier, LowerSimd lower_simd, WasmOpcode simd_op,
1641 : const std::array<T, kSimd128Size / sizeof(T)>& expected) {
1642 7764 : WasmRunner<int32_t> r(execution_tier, lower_simd);
1643 : // Set up two test patterns as globals, e.g. [0, 1, 2, 3] and [4, 5, 6, 7].
1644 : T* src0 = r.builder().AddGlobal<T>(kWasmS128);
1645 : T* src1 = r.builder().AddGlobal<T>(kWasmS128);
1646 : static const int kElems = kSimd128Size / sizeof(T);
1647 255444 : for (int i = 0; i < kElems; i++) {
1648 123840 : WriteLittleEndianValue<T>(&src0[i], i);
1649 123840 : WriteLittleEndianValue<T>(&src1[i], kElems + i);
1650 : }
1651 7764 : if (simd_op == kExprS8x16Shuffle) {
1652 7728 : BUILD(r,
1653 : WASM_SET_GLOBAL(0, WASM_SIMD_S8x16_SHUFFLE_OP(simd_op, expected,
1654 : WASM_GET_GLOBAL(0),
1655 : WASM_GET_GLOBAL(1))),
1656 : WASM_ONE);
1657 : } else {
1658 36 : BUILD(r,
1659 : WASM_SET_GLOBAL(0, WASM_SIMD_BINOP(simd_op, WASM_GET_GLOBAL(0),
1660 : WASM_GET_GLOBAL(1))),
1661 : WASM_ONE);
1662 : }
1663 :
1664 7764 : CHECK_EQ(1, r.Call());
1665 255444 : for (size_t i = 0; i < expected.size(); i++) {
1666 123840 : CHECK_EQ(ReadLittleEndianValue<T>(&src0[i]), expected[i]);
1667 : }
1668 7764 : }
1669 :
1670 26663 : WASM_SIMD_TEST(I32x4AddHoriz) {
1671 : // Inputs are [0 1 2 3] and [4 5 6 7].
1672 24 : RunBinaryLaneOpTest<int32_t>(execution_tier, lower_simd, kExprI32x4AddHoriz,
1673 12 : {{1, 5, 9, 13}});
1674 0 : }
1675 :
1676 26687 : WASM_SIMD_TEST(I16x8AddHoriz) {
1677 : // Inputs are [0 1 2 3 4 5 6 7] and [8 9 10 11 12 13 14 15].
1678 24 : RunBinaryLaneOpTest<int16_t>(execution_tier, lower_simd, kExprI16x8AddHoriz,
1679 12 : {{1, 5, 9, 13, 17, 21, 25, 29}});
1680 12 : }
1681 :
1682 26663 : WASM_SIMD_TEST(F32x4AddHoriz) {
1683 : // Inputs are [0.0f 1.0f 2.0f 3.0f] and [4.0f 5.0f 6.0f 7.0f].
1684 24 : RunBinaryLaneOpTest<float>(execution_tier, lower_simd, kExprF32x4AddHoriz,
1685 12 : {{1.0f, 5.0f, 9.0f, 13.0f}});
1686 0 : }
1687 :
1688 : // Test shuffle ops.
1689 1932 : void RunShuffleOpTest(ExecutionTier execution_tier, LowerSimd lower_simd,
1690 : WasmOpcode simd_op,
1691 : const std::array<int8_t, kSimd128Size>& shuffle) {
1692 : // Test the original shuffle.
1693 1932 : RunBinaryLaneOpTest<int8_t>(execution_tier, lower_simd, simd_op, shuffle);
1694 :
1695 : // Test a non-canonical (inputs reversed) version of the shuffle.
1696 1932 : std::array<int8_t, kSimd128Size> other_shuffle(shuffle);
1697 32844 : for (size_t i = 0; i < shuffle.size(); ++i) other_shuffle[i] ^= kSimd128Size;
1698 : RunBinaryLaneOpTest<int8_t>(execution_tier, lower_simd, simd_op,
1699 1932 : other_shuffle);
1700 :
1701 : // Test the swizzle (one-operand) version of the shuffle.
1702 1932 : std::array<int8_t, kSimd128Size> swizzle(shuffle);
1703 32844 : for (size_t i = 0; i < shuffle.size(); ++i) swizzle[i] &= (kSimd128Size - 1);
1704 1932 : RunBinaryLaneOpTest<int8_t>(execution_tier, lower_simd, simd_op, swizzle);
1705 :
1706 : // Test the non-canonical swizzle (one-operand) version of the shuffle.
1707 1932 : std::array<int8_t, kSimd128Size> other_swizzle(shuffle);
1708 32844 : for (size_t i = 0; i < shuffle.size(); ++i) other_swizzle[i] |= kSimd128Size;
1709 : RunBinaryLaneOpTest<int8_t>(execution_tier, lower_simd, simd_op,
1710 1932 : other_swizzle);
1711 1932 : }
1712 :
1713 : #define SHUFFLE_LIST(V) \
1714 : V(S128Identity) \
1715 : V(S32x4Dup) \
1716 : V(S32x4ZipLeft) \
1717 : V(S32x4ZipRight) \
1718 : V(S32x4UnzipLeft) \
1719 : V(S32x4UnzipRight) \
1720 : V(S32x4TransposeLeft) \
1721 : V(S32x4TransposeRight) \
1722 : V(S32x2Reverse) \
1723 : V(S32x4Irregular) \
1724 : V(S16x8Dup) \
1725 : V(S16x8ZipLeft) \
1726 : V(S16x8ZipRight) \
1727 : V(S16x8UnzipLeft) \
1728 : V(S16x8UnzipRight) \
1729 : V(S16x8TransposeLeft) \
1730 : V(S16x8TransposeRight) \
1731 : V(S16x4Reverse) \
1732 : V(S16x2Reverse) \
1733 : V(S16x8Irregular) \
1734 : V(S8x16Dup) \
1735 : V(S8x16ZipLeft) \
1736 : V(S8x16ZipRight) \
1737 : V(S8x16UnzipLeft) \
1738 : V(S8x16UnzipRight) \
1739 : V(S8x16TransposeLeft) \
1740 : V(S8x16TransposeRight) \
1741 : V(S8x8Reverse) \
1742 : V(S8x4Reverse) \
1743 : V(S8x2Reverse) \
1744 : V(S8x16Irregular)
1745 :
1746 : enum ShuffleKey {
1747 : #define SHUFFLE_ENUM_VALUE(Name) k##Name,
1748 : SHUFFLE_LIST(SHUFFLE_ENUM_VALUE)
1749 : #undef SHUFFLE_ENUM_VALUE
1750 : kNumShuffleKeys
1751 : };
1752 :
1753 : using Shuffle = std::array<int8_t, kSimd128Size>;
1754 : using ShuffleMap = std::map<ShuffleKey, const Shuffle>;
1755 :
1756 26639 : ShuffleMap test_shuffles = {
1757 : {kS128Identity,
1758 : {{16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}}},
1759 : {kS32x4Dup,
1760 : {{16, 17, 18, 19, 16, 17, 18, 19, 16, 17, 18, 19, 16, 17, 18, 19}}},
1761 : {kS32x4ZipLeft, {{0, 1, 2, 3, 16, 17, 18, 19, 4, 5, 6, 7, 20, 21, 22, 23}}},
1762 : {kS32x4ZipRight,
1763 : {{8, 9, 10, 11, 24, 25, 26, 27, 12, 13, 14, 15, 28, 29, 30, 31}}},
1764 : {kS32x4UnzipLeft,
1765 : {{0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27}}},
1766 : {kS32x4UnzipRight,
1767 : {{4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31}}},
1768 : {kS32x4TransposeLeft,
1769 : {{0, 1, 2, 3, 16, 17, 18, 19, 8, 9, 10, 11, 24, 25, 26, 27}}},
1770 : {kS32x4TransposeRight,
1771 : {{4, 5, 6, 7, 20, 21, 22, 23, 12, 13, 14, 15, 28, 29, 30, 31}}},
1772 : {kS32x2Reverse, // swizzle only
1773 : {{4, 5, 6, 7, 0, 1, 2, 3, 12, 13, 14, 15, 8, 9, 10, 11}}},
1774 : {kS32x4Irregular,
1775 : {{0, 1, 2, 3, 16, 17, 18, 19, 16, 17, 18, 19, 20, 21, 22, 23}}},
1776 : {kS16x8Dup,
1777 : {{18, 19, 18, 19, 18, 19, 18, 19, 18, 19, 18, 19, 18, 19, 18, 19}}},
1778 : {kS16x8ZipLeft, {{0, 1, 16, 17, 2, 3, 18, 19, 4, 5, 20, 21, 6, 7, 22, 23}}},
1779 : {kS16x8ZipRight,
1780 : {{8, 9, 24, 25, 10, 11, 26, 27, 12, 13, 28, 29, 14, 15, 30, 31}}},
1781 : {kS16x8UnzipLeft,
1782 : {{0, 1, 4, 5, 8, 9, 12, 13, 16, 17, 20, 21, 24, 25, 28, 29}}},
1783 : {kS16x8UnzipRight,
1784 : {{2, 3, 6, 7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27, 30, 31}}},
1785 : {kS16x8TransposeLeft,
1786 : {{0, 1, 16, 17, 4, 5, 20, 21, 8, 9, 24, 25, 12, 13, 28, 29}}},
1787 : {kS16x8TransposeRight,
1788 : {{2, 3, 18, 19, 6, 7, 22, 23, 10, 11, 26, 27, 14, 15, 30, 31}}},
1789 : {kS16x4Reverse, // swizzle only
1790 : {{6, 7, 4, 5, 2, 3, 0, 1, 14, 15, 12, 13, 10, 11, 8, 9}}},
1791 : {kS16x2Reverse, // swizzle only
1792 : {{2, 3, 0, 1, 6, 7, 4, 5, 10, 11, 8, 9, 14, 15, 12, 13}}},
1793 : {kS16x8Irregular,
1794 : {{0, 1, 16, 17, 16, 17, 0, 1, 4, 5, 20, 21, 6, 7, 22, 23}}},
1795 : {kS8x16Dup,
1796 : {{19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19}}},
1797 : {kS8x16ZipLeft, {{0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23}}},
1798 : {kS8x16ZipRight,
1799 : {{8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31}}},
1800 : {kS8x16UnzipLeft,
1801 : {{0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30}}},
1802 : {kS8x16UnzipRight,
1803 : {{1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31}}},
1804 : {kS8x16TransposeLeft,
1805 : {{0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30}}},
1806 : {kS8x16TransposeRight,
1807 : {{1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31}}},
1808 : {kS8x8Reverse, // swizzle only
1809 : {{7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8}}},
1810 : {kS8x4Reverse, // swizzle only
1811 : {{3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12}}},
1812 : {kS8x2Reverse, // swizzle only
1813 : {{1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14}}},
1814 : {kS8x16Irregular,
1815 : {{0, 16, 0, 16, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23}}},
1816 : };
1817 :
1818 : #define SHUFFLE_TEST(Name) \
1819 : WASM_SIMD_TEST(Name) { \
1820 : ShuffleMap::const_iterator it = test_shuffles.find(k##Name); \
1821 : DCHECK_NE(it, test_shuffles.end()); \
1822 : RunShuffleOpTest(execution_tier, lower_simd, kExprS8x16Shuffle, \
1823 : it->second); \
1824 : }
1825 28499 : SHUFFLE_LIST(SHUFFLE_TEST)
1826 : #undef SHUFFLE_TEST
1827 : #undef SHUFFLE_LIST
1828 :
1829 : // Test shuffles that blend the two vectors (elements remain in their lanes.)
1830 26687 : WASM_SIMD_TEST(S8x16Blend) {
1831 : std::array<int8_t, kSimd128Size> expected;
1832 372 : for (int bias = 1; bias < kSimd128Size; bias++) {
1833 1620 : for (int i = 0; i < bias; i++) expected[i] = i;
1834 1620 : for (int i = bias; i < kSimd128Size; i++) expected[i] = i + kSimd128Size;
1835 180 : RunShuffleOpTest(execution_tier, lower_simd, kExprS8x16Shuffle, expected);
1836 : }
1837 12 : }
1838 :
1839 : // Test shuffles that concatenate the two vectors.
1840 26687 : WASM_SIMD_TEST(S8x16Concat) {
1841 : std::array<int8_t, kSimd128Size> expected;
1842 : // n is offset or bias of concatenation.
1843 372 : for (int n = 1; n < kSimd128Size; ++n) {
1844 : int i = 0;
1845 : // last kLanes - n bytes of first vector.
1846 3060 : for (int j = n; j < kSimd128Size; ++j) {
1847 1440 : expected[i++] = j;
1848 : }
1849 : // first n bytes of second vector
1850 3060 : for (int j = 0; j < n; ++j) {
1851 1440 : expected[i++] = j + kSimd128Size;
1852 : }
1853 180 : RunShuffleOpTest(execution_tier, lower_simd, kExprS8x16Shuffle, expected);
1854 : }
1855 12 : }
1856 :
1857 : // Combine 3 shuffles a, b, and c by applying both a and b and then applying c
1858 : // to those two results.
1859 0 : Shuffle Combine(const Shuffle& a, const Shuffle& b, const Shuffle& c) {
1860 : Shuffle result;
1861 39600 : for (int i = 0; i < kSimd128Size; ++i) {
1862 19200 : result[i] = c[i] < kSimd128Size ? a[c[i]] : b[c[i] - kSimd128Size];
1863 : }
1864 1200 : return result;
1865 : }
1866 :
1867 11680 : const Shuffle& GetRandomTestShuffle(v8::base::RandomNumberGenerator* rng) {
1868 11680 : return test_shuffles[static_cast<ShuffleKey>(rng->NextInt(kNumShuffleKeys))];
1869 : }
1870 :
1871 : // Test shuffles that are random combinations of 3 test shuffles. Completely
1872 : // random shuffles almost always generate the slow general shuffle code, so
1873 : // don't exercise as many code paths.
1874 26687 : WASM_SIMD_TEST(S8x16ShuffleFuzz) {
1875 12 : v8::base::RandomNumberGenerator* rng = CcTest::random_number_generator();
1876 : static const int kTests = 100;
1877 2412 : for (int i = 0; i < kTests; ++i) {
1878 1200 : auto shuffle = Combine(GetRandomTestShuffle(rng), GetRandomTestShuffle(rng),
1879 2400 : GetRandomTestShuffle(rng));
1880 1200 : RunShuffleOpTest(execution_tier, lower_simd, kExprS8x16Shuffle, shuffle);
1881 : }
1882 12 : }
1883 :
1884 8080 : void AppendShuffle(const Shuffle& shuffle, std::vector<byte>* buffer) {
1885 8080 : byte opcode[] = {WASM_SIMD_OP(kExprS8x16Shuffle)};
1886 24240 : for (size_t i = 0; i < arraysize(opcode); ++i) buffer->push_back(opcode[i]);
1887 266640 : for (size_t i = 0; i < kSimd128Size; ++i) buffer->push_back((shuffle[i]));
1888 8080 : }
1889 :
1890 800 : void BuildShuffle(std::vector<Shuffle>& shuffles, std::vector<byte>* buffer) {
1891 : // Perform the leaf shuffles on globals 0 and 1.
1892 800 : size_t row_index = (shuffles.size() - 1) / 2;
1893 9680 : for (size_t i = row_index; i < shuffles.size(); ++i) {
1894 4440 : byte operands[] = {WASM_GET_GLOBAL(0), WASM_GET_GLOBAL(1)};
1895 39960 : for (size_t j = 0; j < arraysize(operands); ++j)
1896 17760 : buffer->push_back(operands[j]);
1897 4440 : AppendShuffle(shuffles[i], buffer);
1898 : }
1899 : // Now perform inner shuffles in the correct order on operands on the stack.
1900 : do {
1901 5704 : for (size_t i = row_index / 2; i < row_index; ++i) {
1902 3640 : AppendShuffle(shuffles[i], buffer);
1903 : }
1904 : row_index /= 2;
1905 2064 : } while (row_index != 0);
1906 800 : byte epilog[] = {kExprSetGlobal, static_cast<byte>(0), WASM_ONE};
1907 4000 : for (size_t j = 0; j < arraysize(epilog); ++j) buffer->push_back(epilog[j]);
1908 800 : }
1909 :
1910 1600 : void RunWasmCode(ExecutionTier execution_tier, LowerSimd lower_simd,
1911 : const std::vector<byte>& code,
1912 : std::array<int8_t, kSimd128Size>* result) {
1913 1600 : WasmRunner<int32_t> r(execution_tier, lower_simd);
1914 : // Set up two test patterns as globals, e.g. [0, 1, 2, 3] and [4, 5, 6, 7].
1915 : int8_t* src0 = r.builder().AddGlobal<int8_t>(kWasmS128);
1916 : int8_t* src1 = r.builder().AddGlobal<int8_t>(kWasmS128);
1917 52800 : for (int i = 0; i < kSimd128Size; ++i) {
1918 25600 : WriteLittleEndianValue<int8_t>(&src0[i], i);
1919 25600 : WriteLittleEndianValue<int8_t>(&src1[i], kSimd128Size + i);
1920 : }
1921 1600 : r.Build(code.data(), code.data() + code.size());
1922 1600 : CHECK_EQ(1, r.Call());
1923 52800 : for (size_t i = 0; i < kSimd128Size; i++) {
1924 25600 : (*result)[i] = ReadLittleEndianValue<int8_t>(&src0[i]);
1925 : }
1926 1600 : }
1927 :
1928 : // Test multiple shuffles executed in sequence.
1929 26671 : WASM_SIMD_COMPILED_TEST(S8x16MultiShuffleFuzz) {
1930 8 : v8::base::RandomNumberGenerator* rng = CcTest::random_number_generator();
1931 : static const int kShuffles = 100;
1932 1608 : for (int i = 0; i < kShuffles; ++i) {
1933 : // Create an odd number in [3..23] of random test shuffles so we can build
1934 : // a complete binary tree (stored as a heap) of shuffle operations. The leaf
1935 : // shuffles operate on the test pattern inputs, while the interior shuffles
1936 : // operate on the results of the two child shuffles.
1937 800 : int num_shuffles = rng->NextInt(10) * 2 + 3;
1938 : std::vector<Shuffle> shuffles;
1939 16960 : for (int j = 0; j < num_shuffles; ++j) {
1940 8080 : shuffles.push_back(GetRandomTestShuffle(rng));
1941 : }
1942 : // Generate the code for the shuffle expression.
1943 : std::vector<byte> buffer;
1944 800 : BuildShuffle(shuffles, &buffer);
1945 :
1946 : // Run the code using the interpreter to get the expected result.
1947 : std::array<int8_t, kSimd128Size> expected;
1948 800 : RunWasmCode(ExecutionTier::kInterpreter, kNoLowerSimd, buffer, &expected);
1949 : // Run the SIMD or scalar lowered compiled code and compare results.
1950 : std::array<int8_t, kSimd128Size> result;
1951 800 : RunWasmCode(execution_tier, lower_simd, buffer, &result);
1952 26400 : for (size_t i = 0; i < kSimd128Size; ++i) {
1953 12800 : CHECK_EQ(result[i], expected[i]);
1954 : }
1955 : }
1956 8 : }
1957 :
1958 : // Boolean unary operations are 'AllTrue' and 'AnyTrue', which return an integer
1959 : // result. Use relational ops on numeric vectors to create the boolean vector
1960 : // test inputs. Test inputs with all true, all false, one true, and one false.
1961 : #define WASM_SIMD_BOOL_REDUCTION_TEST(format, lanes) \
1962 : WASM_SIMD_TEST(ReductionTest##lanes) { \
1963 : WasmRunner<int32_t> r(execution_tier, lower_simd); \
1964 : byte zero = r.AllocateLocal(kWasmS128); \
1965 : byte one_one = r.AllocateLocal(kWasmS128); \
1966 : byte reduced = r.AllocateLocal(kWasmI32); \
1967 : BUILD(r, WASM_SET_LOCAL(zero, WASM_SIMD_I##format##_SPLAT(WASM_ZERO)), \
1968 : WASM_SET_LOCAL( \
1969 : reduced, WASM_SIMD_UNOP(kExprS1x##lanes##AnyTrue, \
1970 : WASM_SIMD_BINOP(kExprI##format##Eq, \
1971 : WASM_GET_LOCAL(zero), \
1972 : WASM_GET_LOCAL(zero)))), \
1973 : WASM_IF(WASM_I32_EQ(WASM_GET_LOCAL(reduced), WASM_ZERO), \
1974 : WASM_RETURN1(WASM_ZERO)), \
1975 : WASM_SET_LOCAL( \
1976 : reduced, WASM_SIMD_UNOP(kExprS1x##lanes##AnyTrue, \
1977 : WASM_SIMD_BINOP(kExprI##format##Ne, \
1978 : WASM_GET_LOCAL(zero), \
1979 : WASM_GET_LOCAL(zero)))), \
1980 : WASM_IF(WASM_I32_NE(WASM_GET_LOCAL(reduced), WASM_ZERO), \
1981 : WASM_RETURN1(WASM_ZERO)), \
1982 : WASM_SET_LOCAL( \
1983 : reduced, WASM_SIMD_UNOP(kExprS1x##lanes##AllTrue, \
1984 : WASM_SIMD_BINOP(kExprI##format##Eq, \
1985 : WASM_GET_LOCAL(zero), \
1986 : WASM_GET_LOCAL(zero)))), \
1987 : WASM_IF(WASM_I32_EQ(WASM_GET_LOCAL(reduced), WASM_ZERO), \
1988 : WASM_RETURN1(WASM_ZERO)), \
1989 : WASM_SET_LOCAL( \
1990 : reduced, WASM_SIMD_UNOP(kExprS1x##lanes##AllTrue, \
1991 : WASM_SIMD_BINOP(kExprI##format##Ne, \
1992 : WASM_GET_LOCAL(zero), \
1993 : WASM_GET_LOCAL(zero)))), \
1994 : WASM_IF(WASM_I32_NE(WASM_GET_LOCAL(reduced), WASM_ZERO), \
1995 : WASM_RETURN1(WASM_ZERO)), \
1996 : WASM_SET_LOCAL(one_one, \
1997 : WASM_SIMD_I##format##_REPLACE_LANE( \
1998 : lanes - 1, WASM_GET_LOCAL(zero), WASM_ONE)), \
1999 : WASM_SET_LOCAL( \
2000 : reduced, WASM_SIMD_UNOP(kExprS1x##lanes##AnyTrue, \
2001 : WASM_SIMD_BINOP(kExprI##format##Eq, \
2002 : WASM_GET_LOCAL(one_one), \
2003 : WASM_GET_LOCAL(zero)))), \
2004 : WASM_IF(WASM_I32_EQ(WASM_GET_LOCAL(reduced), WASM_ZERO), \
2005 : WASM_RETURN1(WASM_ZERO)), \
2006 : WASM_SET_LOCAL( \
2007 : reduced, WASM_SIMD_UNOP(kExprS1x##lanes##AnyTrue, \
2008 : WASM_SIMD_BINOP(kExprI##format##Ne, \
2009 : WASM_GET_LOCAL(one_one), \
2010 : WASM_GET_LOCAL(zero)))), \
2011 : WASM_IF(WASM_I32_EQ(WASM_GET_LOCAL(reduced), WASM_ZERO), \
2012 : WASM_RETURN1(WASM_ZERO)), \
2013 : WASM_SET_LOCAL( \
2014 : reduced, WASM_SIMD_UNOP(kExprS1x##lanes##AllTrue, \
2015 : WASM_SIMD_BINOP(kExprI##format##Eq, \
2016 : WASM_GET_LOCAL(one_one), \
2017 : WASM_GET_LOCAL(zero)))), \
2018 : WASM_IF(WASM_I32_NE(WASM_GET_LOCAL(reduced), WASM_ZERO), \
2019 : WASM_RETURN1(WASM_ZERO)), \
2020 : WASM_SET_LOCAL( \
2021 : reduced, WASM_SIMD_UNOP(kExprS1x##lanes##AllTrue, \
2022 : WASM_SIMD_BINOP(kExprI##format##Ne, \
2023 : WASM_GET_LOCAL(one_one), \
2024 : WASM_GET_LOCAL(zero)))), \
2025 : WASM_IF(WASM_I32_NE(WASM_GET_LOCAL(reduced), WASM_ZERO), \
2026 : WASM_RETURN1(WASM_ZERO)), \
2027 : WASM_ONE); \
2028 : CHECK_EQ(1, r.Call()); \
2029 : }
2030 :
2031 26699 : WASM_SIMD_BOOL_REDUCTION_TEST(32x4, 4)
2032 26699 : WASM_SIMD_BOOL_REDUCTION_TEST(16x8, 8)
2033 26699 : WASM_SIMD_BOOL_REDUCTION_TEST(8x16, 16)
2034 :
2035 26687 : WASM_SIMD_TEST(SimdI32x4ExtractWithF32x4) {
2036 12 : WasmRunner<int32_t> r(execution_tier, lower_simd);
2037 12 : BUILD(r, WASM_IF_ELSE_I(
2038 : WASM_I32_EQ(WASM_SIMD_I32x4_EXTRACT_LANE(
2039 : 0, WASM_SIMD_F32x4_SPLAT(WASM_F32(30.5))),
2040 : WASM_I32_REINTERPRET_F32(WASM_F32(30.5))),
2041 : WASM_I32V(1), WASM_I32V(0)));
2042 12 : CHECK_EQ(1, r.Call());
2043 12 : }
2044 :
2045 26687 : WASM_SIMD_TEST(SimdF32x4ExtractWithI32x4) {
2046 12 : WasmRunner<int32_t> r(execution_tier, lower_simd);
2047 12 : BUILD(r,
2048 : WASM_IF_ELSE_I(WASM_F32_EQ(WASM_SIMD_F32x4_EXTRACT_LANE(
2049 : 0, WASM_SIMD_I32x4_SPLAT(WASM_I32V(15))),
2050 : WASM_F32_REINTERPRET_I32(WASM_I32V(15))),
2051 : WASM_I32V(1), WASM_I32V(0)));
2052 12 : CHECK_EQ(1, r.Call());
2053 12 : }
2054 :
2055 26687 : WASM_SIMD_TEST(SimdF32x4AddWithI32x4) {
2056 : // Choose two floating point values whose sum is normal and exactly
2057 : // representable as a float.
2058 : const int kOne = 0x3F800000;
2059 : const int kTwo = 0x40000000;
2060 12 : WasmRunner<int32_t> r(execution_tier, lower_simd);
2061 12 : BUILD(r,
2062 : WASM_IF_ELSE_I(
2063 : WASM_F32_EQ(
2064 : WASM_SIMD_F32x4_EXTRACT_LANE(
2065 : 0, WASM_SIMD_BINOP(kExprF32x4Add,
2066 : WASM_SIMD_I32x4_SPLAT(WASM_I32V(kOne)),
2067 : WASM_SIMD_I32x4_SPLAT(WASM_I32V(kTwo)))),
2068 : WASM_F32_ADD(WASM_F32_REINTERPRET_I32(WASM_I32V(kOne)),
2069 : WASM_F32_REINTERPRET_I32(WASM_I32V(kTwo)))),
2070 : WASM_I32V(1), WASM_I32V(0)));
2071 12 : CHECK_EQ(1, r.Call());
2072 12 : }
2073 :
2074 26687 : WASM_SIMD_TEST(SimdI32x4AddWithF32x4) {
2075 12 : WasmRunner<int32_t> r(execution_tier, lower_simd);
2076 12 : BUILD(r,
2077 : WASM_IF_ELSE_I(
2078 : WASM_I32_EQ(
2079 : WASM_SIMD_I32x4_EXTRACT_LANE(
2080 : 0, WASM_SIMD_BINOP(kExprI32x4Add,
2081 : WASM_SIMD_F32x4_SPLAT(WASM_F32(21.25)),
2082 : WASM_SIMD_F32x4_SPLAT(WASM_F32(31.5)))),
2083 : WASM_I32_ADD(WASM_I32_REINTERPRET_F32(WASM_F32(21.25)),
2084 : WASM_I32_REINTERPRET_F32(WASM_F32(31.5)))),
2085 : WASM_I32V(1), WASM_I32V(0)));
2086 12 : CHECK_EQ(1, r.Call());
2087 12 : }
2088 :
2089 26687 : WASM_SIMD_TEST(SimdI32x4Local) {
2090 12 : WasmRunner<int32_t> r(execution_tier, lower_simd);
2091 : r.AllocateLocal(kWasmS128);
2092 12 : BUILD(r, WASM_SET_LOCAL(0, WASM_SIMD_I32x4_SPLAT(WASM_I32V(31))),
2093 :
2094 : WASM_SIMD_I32x4_EXTRACT_LANE(0, WASM_GET_LOCAL(0)));
2095 12 : CHECK_EQ(31, r.Call());
2096 12 : }
2097 :
2098 26687 : WASM_SIMD_TEST(SimdI32x4SplatFromExtract) {
2099 12 : WasmRunner<int32_t> r(execution_tier, lower_simd);
2100 : r.AllocateLocal(kWasmI32);
2101 : r.AllocateLocal(kWasmS128);
2102 12 : BUILD(r, WASM_SET_LOCAL(0, WASM_SIMD_I32x4_EXTRACT_LANE(
2103 : 0, WASM_SIMD_I32x4_SPLAT(WASM_I32V(76)))),
2104 : WASM_SET_LOCAL(1, WASM_SIMD_I32x4_SPLAT(WASM_GET_LOCAL(0))),
2105 : WASM_SIMD_I32x4_EXTRACT_LANE(1, WASM_GET_LOCAL(1)));
2106 12 : CHECK_EQ(76, r.Call());
2107 12 : }
2108 :
2109 26687 : WASM_SIMD_TEST(SimdI32x4For) {
2110 12 : WasmRunner<int32_t> r(execution_tier, lower_simd);
2111 : r.AllocateLocal(kWasmI32);
2112 : r.AllocateLocal(kWasmS128);
2113 12 : BUILD(r,
2114 :
2115 : WASM_SET_LOCAL(1, WASM_SIMD_I32x4_SPLAT(WASM_I32V(31))),
2116 : WASM_SET_LOCAL(1, WASM_SIMD_I32x4_REPLACE_LANE(1, WASM_GET_LOCAL(1),
2117 : WASM_I32V(53))),
2118 : WASM_SET_LOCAL(1, WASM_SIMD_I32x4_REPLACE_LANE(2, WASM_GET_LOCAL(1),
2119 : WASM_I32V(23))),
2120 : WASM_SET_LOCAL(0, WASM_I32V(0)),
2121 : WASM_LOOP(
2122 : WASM_SET_LOCAL(
2123 : 1, WASM_SIMD_BINOP(kExprI32x4Add, WASM_GET_LOCAL(1),
2124 : WASM_SIMD_I32x4_SPLAT(WASM_I32V(1)))),
2125 : WASM_IF(WASM_I32_NE(WASM_INC_LOCAL(0), WASM_I32V(5)), WASM_BR(1))),
2126 : WASM_SET_LOCAL(0, WASM_I32V(1)),
2127 : WASM_IF(WASM_I32_NE(WASM_SIMD_I32x4_EXTRACT_LANE(0, WASM_GET_LOCAL(1)),
2128 : WASM_I32V(36)),
2129 : WASM_SET_LOCAL(0, WASM_I32V(0))),
2130 : WASM_IF(WASM_I32_NE(WASM_SIMD_I32x4_EXTRACT_LANE(1, WASM_GET_LOCAL(1)),
2131 : WASM_I32V(58)),
2132 : WASM_SET_LOCAL(0, WASM_I32V(0))),
2133 : WASM_IF(WASM_I32_NE(WASM_SIMD_I32x4_EXTRACT_LANE(2, WASM_GET_LOCAL(1)),
2134 : WASM_I32V(28)),
2135 : WASM_SET_LOCAL(0, WASM_I32V(0))),
2136 : WASM_IF(WASM_I32_NE(WASM_SIMD_I32x4_EXTRACT_LANE(3, WASM_GET_LOCAL(1)),
2137 : WASM_I32V(36)),
2138 : WASM_SET_LOCAL(0, WASM_I32V(0))),
2139 : WASM_GET_LOCAL(0));
2140 12 : CHECK_EQ(1, r.Call());
2141 12 : }
2142 :
2143 26687 : WASM_SIMD_TEST(SimdF32x4For) {
2144 12 : WasmRunner<int32_t> r(execution_tier, lower_simd);
2145 : r.AllocateLocal(kWasmI32);
2146 : r.AllocateLocal(kWasmS128);
2147 12 : BUILD(r, WASM_SET_LOCAL(1, WASM_SIMD_F32x4_SPLAT(WASM_F32(21.25))),
2148 : WASM_SET_LOCAL(1, WASM_SIMD_F32x4_REPLACE_LANE(3, WASM_GET_LOCAL(1),
2149 : WASM_F32(19.5))),
2150 : WASM_SET_LOCAL(0, WASM_I32V(0)),
2151 : WASM_LOOP(
2152 : WASM_SET_LOCAL(
2153 : 1, WASM_SIMD_BINOP(kExprF32x4Add, WASM_GET_LOCAL(1),
2154 : WASM_SIMD_F32x4_SPLAT(WASM_F32(2.0)))),
2155 : WASM_IF(WASM_I32_NE(WASM_INC_LOCAL(0), WASM_I32V(3)), WASM_BR(1))),
2156 : WASM_SET_LOCAL(0, WASM_I32V(1)),
2157 : WASM_IF(WASM_F32_NE(WASM_SIMD_F32x4_EXTRACT_LANE(0, WASM_GET_LOCAL(1)),
2158 : WASM_F32(27.25)),
2159 : WASM_SET_LOCAL(0, WASM_I32V(0))),
2160 : WASM_IF(WASM_F32_NE(WASM_SIMD_F32x4_EXTRACT_LANE(3, WASM_GET_LOCAL(1)),
2161 : WASM_F32(25.5)),
2162 : WASM_SET_LOCAL(0, WASM_I32V(0))),
2163 : WASM_GET_LOCAL(0));
2164 12 : CHECK_EQ(1, r.Call());
2165 12 : }
2166 :
2167 : template <typename T, int numLanes = 4>
2168 24 : void SetVectorByLanes(T* v, const std::array<T, numLanes>& arr) {
2169 216 : for (int lane = 0; lane < numLanes; lane++) {
2170 96 : WriteLittleEndianValue<T>(&v[lane], arr[lane]);
2171 : }
2172 24 : }
2173 :
2174 : template <typename T>
2175 : const T GetScalar(T* v, int lane) {
2176 : constexpr int kElems = kSimd128Size / sizeof(T);
2177 : const int index = lane;
2178 : USE(kElems);
2179 : DCHECK(index >= 0 && index < kElems);
2180 : return ReadLittleEndianValue<T>(&v[index]);
2181 : }
2182 :
2183 26687 : WASM_SIMD_TEST(SimdI32x4GetGlobal) {
2184 12 : WasmRunner<int32_t, int32_t> r(execution_tier, lower_simd);
2185 : // Pad the globals with a few unused slots to get a non-zero offset.
2186 : r.builder().AddGlobal<int32_t>(kWasmI32); // purposefully unused
2187 : r.builder().AddGlobal<int32_t>(kWasmI32); // purposefully unused
2188 : r.builder().AddGlobal<int32_t>(kWasmI32); // purposefully unused
2189 : r.builder().AddGlobal<int32_t>(kWasmI32); // purposefully unused
2190 : int32_t* global = r.builder().AddGlobal<int32_t>(kWasmS128);
2191 12 : SetVectorByLanes(global, {{0, 1, 2, 3}});
2192 : r.AllocateLocal(kWasmI32);
2193 12 : BUILD(
2194 : r, WASM_SET_LOCAL(1, WASM_I32V(1)),
2195 : WASM_IF(WASM_I32_NE(WASM_I32V(0),
2196 : WASM_SIMD_I32x4_EXTRACT_LANE(0, WASM_GET_GLOBAL(4))),
2197 : WASM_SET_LOCAL(1, WASM_I32V(0))),
2198 : WASM_IF(WASM_I32_NE(WASM_I32V(1),
2199 : WASM_SIMD_I32x4_EXTRACT_LANE(1, WASM_GET_GLOBAL(4))),
2200 : WASM_SET_LOCAL(1, WASM_I32V(0))),
2201 : WASM_IF(WASM_I32_NE(WASM_I32V(2),
2202 : WASM_SIMD_I32x4_EXTRACT_LANE(2, WASM_GET_GLOBAL(4))),
2203 : WASM_SET_LOCAL(1, WASM_I32V(0))),
2204 : WASM_IF(WASM_I32_NE(WASM_I32V(3),
2205 : WASM_SIMD_I32x4_EXTRACT_LANE(3, WASM_GET_GLOBAL(4))),
2206 : WASM_SET_LOCAL(1, WASM_I32V(0))),
2207 : WASM_GET_LOCAL(1));
2208 12 : CHECK_EQ(1, r.Call(0));
2209 12 : }
2210 :
2211 26687 : WASM_SIMD_TEST(SimdI32x4SetGlobal) {
2212 12 : WasmRunner<int32_t, int32_t> r(execution_tier, lower_simd);
2213 : // Pad the globals with a few unused slots to get a non-zero offset.
2214 : r.builder().AddGlobal<int32_t>(kWasmI32); // purposefully unused
2215 : r.builder().AddGlobal<int32_t>(kWasmI32); // purposefully unused
2216 : r.builder().AddGlobal<int32_t>(kWasmI32); // purposefully unused
2217 : r.builder().AddGlobal<int32_t>(kWasmI32); // purposefully unused
2218 : int32_t* global = r.builder().AddGlobal<int32_t>(kWasmS128);
2219 12 : BUILD(r, WASM_SET_GLOBAL(4, WASM_SIMD_I32x4_SPLAT(WASM_I32V(23))),
2220 : WASM_SET_GLOBAL(4, WASM_SIMD_I32x4_REPLACE_LANE(1, WASM_GET_GLOBAL(4),
2221 : WASM_I32V(34))),
2222 : WASM_SET_GLOBAL(4, WASM_SIMD_I32x4_REPLACE_LANE(2, WASM_GET_GLOBAL(4),
2223 : WASM_I32V(45))),
2224 : WASM_SET_GLOBAL(4, WASM_SIMD_I32x4_REPLACE_LANE(3, WASM_GET_GLOBAL(4),
2225 : WASM_I32V(56))),
2226 : WASM_I32V(1));
2227 12 : CHECK_EQ(1, r.Call(0));
2228 12 : CHECK_EQ(GetScalar(global, 0), 23);
2229 12 : CHECK_EQ(GetScalar(global, 1), 34);
2230 12 : CHECK_EQ(GetScalar(global, 2), 45);
2231 12 : CHECK_EQ(GetScalar(global, 3), 56);
2232 12 : }
2233 :
2234 26687 : WASM_SIMD_TEST(SimdF32x4GetGlobal) {
2235 12 : WasmRunner<int32_t, int32_t> r(execution_tier, lower_simd);
2236 : float* global = r.builder().AddGlobal<float>(kWasmS128);
2237 12 : SetVectorByLanes<float>(global, {{0.0, 1.5, 2.25, 3.5}});
2238 : r.AllocateLocal(kWasmI32);
2239 12 : BUILD(
2240 : r, WASM_SET_LOCAL(1, WASM_I32V(1)),
2241 : WASM_IF(WASM_F32_NE(WASM_F32(0.0),
2242 : WASM_SIMD_F32x4_EXTRACT_LANE(0, WASM_GET_GLOBAL(0))),
2243 : WASM_SET_LOCAL(1, WASM_I32V(0))),
2244 : WASM_IF(WASM_F32_NE(WASM_F32(1.5),
2245 : WASM_SIMD_F32x4_EXTRACT_LANE(1, WASM_GET_GLOBAL(0))),
2246 : WASM_SET_LOCAL(1, WASM_I32V(0))),
2247 : WASM_IF(WASM_F32_NE(WASM_F32(2.25),
2248 : WASM_SIMD_F32x4_EXTRACT_LANE(2, WASM_GET_GLOBAL(0))),
2249 : WASM_SET_LOCAL(1, WASM_I32V(0))),
2250 : WASM_IF(WASM_F32_NE(WASM_F32(3.5),
2251 : WASM_SIMD_F32x4_EXTRACT_LANE(3, WASM_GET_GLOBAL(0))),
2252 : WASM_SET_LOCAL(1, WASM_I32V(0))),
2253 : WASM_GET_LOCAL(1));
2254 12 : CHECK_EQ(1, r.Call(0));
2255 12 : }
2256 :
2257 26687 : WASM_SIMD_TEST(SimdF32x4SetGlobal) {
2258 12 : WasmRunner<int32_t, int32_t> r(execution_tier, lower_simd);
2259 : float* global = r.builder().AddGlobal<float>(kWasmS128);
2260 12 : BUILD(r, WASM_SET_GLOBAL(0, WASM_SIMD_F32x4_SPLAT(WASM_F32(13.5))),
2261 : WASM_SET_GLOBAL(0, WASM_SIMD_F32x4_REPLACE_LANE(1, WASM_GET_GLOBAL(0),
2262 : WASM_F32(45.5))),
2263 : WASM_SET_GLOBAL(0, WASM_SIMD_F32x4_REPLACE_LANE(2, WASM_GET_GLOBAL(0),
2264 : WASM_F32(32.25))),
2265 : WASM_SET_GLOBAL(0, WASM_SIMD_F32x4_REPLACE_LANE(3, WASM_GET_GLOBAL(0),
2266 : WASM_F32(65.0))),
2267 : WASM_I32V(1));
2268 12 : CHECK_EQ(1, r.Call(0));
2269 12 : CHECK_EQ(GetScalar(global, 0), 13.5f);
2270 12 : CHECK_EQ(GetScalar(global, 1), 45.5f);
2271 12 : CHECK_EQ(GetScalar(global, 2), 32.25f);
2272 12 : CHECK_EQ(GetScalar(global, 3), 65.0f);
2273 12 : }
2274 :
2275 26671 : WASM_SIMD_COMPILED_TEST(SimdLoadStoreLoad) {
2276 8 : WasmRunner<int32_t> r(execution_tier, lower_simd);
2277 : int32_t* memory =
2278 : r.builder().AddMemoryElems<int32_t>(kWasmPageSize / sizeof(int32_t));
2279 : // Load memory, store it, then reload it and extract the first lane. Use a
2280 : // non-zero offset into the memory of 1 lane (4 bytes) to test indexing.
2281 8 : BUILD(r, WASM_SIMD_STORE_MEM(WASM_I32V(4), WASM_SIMD_LOAD_MEM(WASM_I32V(4))),
2282 : WASM_SIMD_I32x4_EXTRACT_LANE(0, WASM_SIMD_LOAD_MEM(WASM_I32V(4))));
2283 :
2284 936 : FOR_INT32_INPUTS(i) {
2285 : int32_t expected = i;
2286 464 : r.builder().WriteMemory(&memory[1], expected);
2287 464 : CHECK_EQ(expected, r.Call());
2288 : }
2289 8 : }
2290 :
2291 : #if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_IA32
2292 : // V8:8665 - Tracking bug to enable reduction tests in the interpreter,
2293 : // and for SIMD lowering.
2294 : // TODO(gdeepti): Enable these tests for ARM/ARM64
2295 : #define WASM_SIMD_ANYTRUE_TEST(format, lanes, max) \
2296 : WASM_SIMD_TEST_NO_LOWERING(S##format##AnyTrue) { \
2297 : WasmRunner<int32_t, int32_t> r(execution_tier, lower_simd); \
2298 : byte simd = r.AllocateLocal(kWasmS128); \
2299 : BUILD( \
2300 : r, \
2301 : WASM_SET_LOCAL(simd, WASM_SIMD_I##format##_SPLAT(WASM_GET_LOCAL(0))), \
2302 : WASM_SIMD_UNOP(kExprS1x##lanes##AnyTrue, WASM_GET_LOCAL(simd))); \
2303 : DCHECK_EQ(1, r.Call(max)); \
2304 : DCHECK_EQ(1, r.Call(5)); \
2305 : DCHECK_EQ(0, r.Call(0)); \
2306 : }
2307 26679 : WASM_SIMD_ANYTRUE_TEST(32x4, 4, 0xffffffff)
2308 26679 : WASM_SIMD_ANYTRUE_TEST(16x8, 8, 0xffff)
2309 26679 : WASM_SIMD_ANYTRUE_TEST(8x16, 16, 0xff)
2310 :
2311 : #define WASM_SIMD_ALLTRUE_TEST(format, lanes, max) \
2312 : WASM_SIMD_TEST_NO_LOWERING(S##format##AllTrue) { \
2313 : WasmRunner<int32_t, int32_t> r(execution_tier, lower_simd); \
2314 : byte simd = r.AllocateLocal(kWasmS128); \
2315 : BUILD( \
2316 : r, \
2317 : WASM_SET_LOCAL(simd, WASM_SIMD_I##format##_SPLAT(WASM_GET_LOCAL(0))), \
2318 : WASM_SIMD_UNOP(kExprS1x##lanes##AllTrue, WASM_GET_LOCAL(simd))); \
2319 : DCHECK_EQ(1, r.Call(max)); \
2320 : DCHECK_EQ(0, r.Call(0)); \
2321 : }
2322 26679 : WASM_SIMD_ALLTRUE_TEST(32x4, 4, 0xffffffff)
2323 26679 : WASM_SIMD_ALLTRUE_TEST(16x8, 8, 0xffff)
2324 26679 : WASM_SIMD_ALLTRUE_TEST(8x16, 16, 0xff)
2325 : #endif // V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_IA32
2326 :
2327 26687 : WASM_SIMD_TEST(BitSelect) {
2328 12 : WasmRunner<int32_t, int32_t> r(execution_tier, lower_simd);
2329 : byte simd = r.AllocateLocal(kWasmS128);
2330 12 : BUILD(r,
2331 : WASM_SET_LOCAL(
2332 : simd,
2333 : WASM_SIMD_SELECT(32x4, WASM_SIMD_I32x4_SPLAT(WASM_I32V(0x01020304)),
2334 : WASM_SIMD_I32x4_SPLAT(WASM_I32V(0)),
2335 : WASM_SIMD_I32x4_SPLAT(WASM_GET_LOCAL(0)))),
2336 : WASM_SIMD_I32x4_EXTRACT_LANE(0, WASM_GET_LOCAL(simd)));
2337 : DCHECK_EQ(0x01020304, r.Call(0xFFFFFFFF));
2338 12 : }
2339 :
2340 32 : void RunI8x16MixedRelationalOpTest(ExecutionTier execution_tier,
2341 : LowerSimd lower_simd, WasmOpcode opcode,
2342 : Int8BinOp expected_op) {
2343 32 : WasmRunner<int32_t, int32_t, int32_t> r(execution_tier, lower_simd);
2344 : byte value1 = 0, value2 = 1;
2345 : byte temp1 = r.AllocateLocal(kWasmS128);
2346 : byte temp2 = r.AllocateLocal(kWasmS128);
2347 : byte temp3 = r.AllocateLocal(kWasmS128);
2348 32 : BUILD(r, WASM_SET_LOCAL(temp1, WASM_SIMD_I8x16_SPLAT(WASM_GET_LOCAL(value1))),
2349 : WASM_SET_LOCAL(temp2, WASM_SIMD_I16x8_SPLAT(WASM_GET_LOCAL(value2))),
2350 : WASM_SET_LOCAL(temp3, WASM_SIMD_BINOP(opcode, WASM_GET_LOCAL(temp1),
2351 : WASM_GET_LOCAL(temp2))),
2352 : WASM_SIMD_I8x16_EXTRACT_LANE(0, WASM_GET_LOCAL(temp3)));
2353 :
2354 : DCHECK_EQ(expected_op(0xff, static_cast<uint8_t>(0x7fff)),
2355 : r.Call(0xff, 0x7fff));
2356 : DCHECK_EQ(expected_op(0xfe, static_cast<uint8_t>(0x7fff)),
2357 : r.Call(0xfe, 0x7fff));
2358 : DCHECK_EQ(expected_op(0xff, static_cast<uint8_t>(0x7ffe)),
2359 : r.Call(0xff, 0x7ffe));
2360 32 : }
2361 :
2362 26655 : WASM_SIMD_TEST_NO_LOWERING(I8x16LeUMixed) {
2363 0 : RunI8x16MixedRelationalOpTest(execution_tier, lower_simd, kExprI8x16LeU,
2364 8 : UnsignedLessEqual);
2365 0 : }
2366 26655 : WASM_SIMD_TEST_NO_LOWERING(I8x16LtUMixed) {
2367 0 : RunI8x16MixedRelationalOpTest(execution_tier, lower_simd, kExprI8x16LtU,
2368 8 : UnsignedLess);
2369 0 : }
2370 26655 : WASM_SIMD_TEST_NO_LOWERING(I8x16GeUMixed) {
2371 0 : RunI8x16MixedRelationalOpTest(execution_tier, lower_simd, kExprI8x16GeU,
2372 8 : UnsignedGreaterEqual);
2373 0 : }
2374 26655 : WASM_SIMD_TEST_NO_LOWERING(I8x16GtUMixed) {
2375 0 : RunI8x16MixedRelationalOpTest(execution_tier, lower_simd, kExprI8x16GtU,
2376 8 : UnsignedGreater);
2377 0 : }
2378 :
2379 32 : void RunI16x8MixedRelationalOpTest(ExecutionTier execution_tier,
2380 : LowerSimd lower_simd, WasmOpcode opcode,
2381 : Int16BinOp expected_op) {
2382 32 : WasmRunner<int32_t, int32_t, int32_t> r(execution_tier, lower_simd);
2383 : byte value1 = 0, value2 = 1;
2384 : byte temp1 = r.AllocateLocal(kWasmS128);
2385 : byte temp2 = r.AllocateLocal(kWasmS128);
2386 : byte temp3 = r.AllocateLocal(kWasmS128);
2387 32 : BUILD(r, WASM_SET_LOCAL(temp1, WASM_SIMD_I16x8_SPLAT(WASM_GET_LOCAL(value1))),
2388 : WASM_SET_LOCAL(temp2, WASM_SIMD_I32x4_SPLAT(WASM_GET_LOCAL(value2))),
2389 : WASM_SET_LOCAL(temp3, WASM_SIMD_BINOP(opcode, WASM_GET_LOCAL(temp1),
2390 : WASM_GET_LOCAL(temp2))),
2391 : WASM_SIMD_I16x8_EXTRACT_LANE(0, WASM_GET_LOCAL(temp3)));
2392 :
2393 : DCHECK_EQ(expected_op(0xffff, static_cast<uint16_t>(0x7fffffff)),
2394 : r.Call(0xffff, 0x7fffffff));
2395 : DCHECK_EQ(expected_op(0xfeff, static_cast<uint16_t>(0x7fffffff)),
2396 : r.Call(0xfeff, 0x7fffffff));
2397 : DCHECK_EQ(expected_op(0xffff, static_cast<uint16_t>(0x7ffffeff)),
2398 : r.Call(0xffff, 0x7ffffeff));
2399 32 : }
2400 :
2401 26655 : WASM_SIMD_TEST_NO_LOWERING(I16x8LeUMixed) {
2402 0 : RunI16x8MixedRelationalOpTest(execution_tier, lower_simd, kExprI16x8LeU,
2403 8 : UnsignedLessEqual);
2404 0 : }
2405 26655 : WASM_SIMD_TEST_NO_LOWERING(I16x8LtUMixed) {
2406 0 : RunI16x8MixedRelationalOpTest(execution_tier, lower_simd, kExprI16x8LtU,
2407 8 : UnsignedLess);
2408 0 : }
2409 26655 : WASM_SIMD_TEST_NO_LOWERING(I16x8GeUMixed) {
2410 0 : RunI16x8MixedRelationalOpTest(execution_tier, lower_simd, kExprI16x8GeU,
2411 8 : UnsignedGreaterEqual);
2412 0 : }
2413 26655 : WASM_SIMD_TEST_NO_LOWERING(I16x8GtUMixed) {
2414 0 : RunI16x8MixedRelationalOpTest(execution_tier, lower_simd, kExprI16x8GtU,
2415 8 : UnsignedGreater);
2416 0 : }
2417 :
2418 : #undef WASM_SIMD_TEST
2419 : #undef WASM_SIMD_CHECK_LANE
2420 : #undef TO_BYTE
2421 : #undef WASM_SIMD_OP
2422 : #undef WASM_SIMD_SPLAT
2423 : #undef WASM_SIMD_UNOP
2424 : #undef WASM_SIMD_BINOP
2425 : #undef WASM_SIMD_SHIFT_OP
2426 : #undef WASM_SIMD_CONCAT_OP
2427 : #undef WASM_SIMD_SELECT
2428 : #undef WASM_SIMD_F32x4_SPLAT
2429 : #undef WASM_SIMD_F32x4_EXTRACT_LANE
2430 : #undef WASM_SIMD_F32x4_REPLACE_LANE
2431 : #undef WASM_SIMD_I32x4_SPLAT
2432 : #undef WASM_SIMD_I32x4_EXTRACT_LANE
2433 : #undef WASM_SIMD_I32x4_REPLACE_LANE
2434 : #undef WASM_SIMD_I16x8_SPLAT
2435 : #undef WASM_SIMD_I16x8_EXTRACT_LANE
2436 : #undef WASM_SIMD_I16x8_REPLACE_LANE
2437 : #undef WASM_SIMD_I8x16_SPLAT
2438 : #undef WASM_SIMD_I8x16_EXTRACT_LANE
2439 : #undef WASM_SIMD_I8x16_REPLACE_LANE
2440 : #undef WASM_SIMD_S8x16_SHUFFLE_OP
2441 : #undef WASM_SIMD_LOAD_MEM
2442 : #undef WASM_SIMD_STORE_MEM
2443 : #undef WASM_SIMD_SELECT_TEST
2444 : #undef WASM_SIMD_NON_CANONICAL_SELECT_TEST
2445 : #undef WASM_SIMD_COMPILED_TEST
2446 : #undef WASM_SIMD_BOOL_REDUCTION_TEST
2447 : #undef WASM_SIMD_TEST_NO_LOWERING
2448 : #undef WASM_SIMD_ANYTRUE_TEST
2449 : #undef WASM_SIMD_ALLTRUE_TEST
2450 :
2451 : } // namespace test_run_wasm_simd
2452 : } // namespace wasm
2453 : } // namespace internal
2454 79917 : } // namespace v8
|