Line data Source code
1 : // Copyright 2016 the V8 project authors. All rights reserved. Use of this
2 : // source code is governed by a BSD-style license that can be found in the
3 : // LICENSE file.
4 :
5 : #include <cmath>
6 : #include <functional>
7 : #include <limits>
8 :
9 : #include "src/base/bits.h"
10 : #include "src/base/overflowing-math.h"
11 : #include "src/base/utils/random-number-generator.h"
12 : #include "src/objects-inl.h"
13 : #include "test/cctest/cctest.h"
14 : #include "test/cctest/compiler/codegen-tester.h"
15 : #include "test/cctest/compiler/graph-builder-tester.h"
16 : #include "test/cctest/compiler/value-helper.h"
17 :
18 :
19 : namespace v8 {
20 : namespace internal {
21 : namespace compiler {
22 :
23 : enum TestAlignment {
24 : kAligned,
25 : kUnaligned,
26 : };
27 :
28 : #if V8_TARGET_LITTLE_ENDIAN
29 : #define LSB(addr, bytes) addr
30 : #elif V8_TARGET_BIG_ENDIAN
31 : #define LSB(addr, bytes) reinterpret_cast<byte*>(addr + 1) - (bytes)
32 : #else
33 : #error "Unknown Architecture"
34 : #endif
35 :
36 : // This is a America!
37 : #define A_BILLION 1000000000ULL
38 : #define A_GIG (1024ULL * 1024ULL * 1024ULL)
39 :
40 : namespace {
41 10 : void RunLoadInt32(const TestAlignment t) {
42 10 : RawMachineAssemblerTester<int32_t> m;
43 :
44 10 : int32_t p1 = 0; // loads directly from this location.
45 :
46 10 : if (t == TestAlignment::kAligned) {
47 5 : m.Return(m.LoadFromPointer(&p1, MachineType::Int32()));
48 5 : } else if (t == TestAlignment::kUnaligned) {
49 5 : m.Return(m.UnalignedLoadFromPointer(&p1, MachineType::Int32()));
50 : } else {
51 0 : UNREACHABLE();
52 : }
53 :
54 590 : FOR_INT32_INPUTS(i) {
55 580 : p1 = *i;
56 580 : CHECK_EQ(p1, m.Call());
57 : }
58 10 : }
59 :
60 10 : void RunLoadInt32Offset(TestAlignment t) {
61 10 : int32_t p1 = 0; // loads directly from this location.
62 :
63 : int32_t offsets[] = {-2000000, -100, -101, 1, 3,
64 10 : 7, 120, 2000, 2000000000, 0xFF};
65 :
66 110 : for (size_t i = 0; i < arraysize(offsets); i++) {
67 100 : RawMachineAssemblerTester<int32_t> m;
68 100 : int32_t offset = offsets[i];
69 100 : byte* pointer = reinterpret_cast<byte*>(&p1) - offset;
70 :
71 : // generate load [#base + #index]
72 100 : if (t == TestAlignment::kAligned) {
73 50 : m.Return(m.LoadFromPointer(pointer, MachineType::Int32(), offset));
74 50 : } else if (t == TestAlignment::kUnaligned) {
75 : m.Return(
76 50 : m.UnalignedLoadFromPointer(pointer, MachineType::Int32(), offset));
77 : } else {
78 0 : UNREACHABLE();
79 : }
80 :
81 5900 : FOR_INT32_INPUTS(j) {
82 5800 : p1 = *j;
83 5800 : CHECK_EQ(p1, m.Call());
84 : }
85 : }
86 10 : }
87 :
88 10 : void RunLoadStoreFloat32Offset(TestAlignment t) {
89 10 : float p1 = 0.0f; // loads directly from this location.
90 10 : float p2 = 0.0f; // and stores directly into this location.
91 :
92 590 : FOR_INT32_INPUTS(i) {
93 : int32_t magic =
94 580 : base::AddWithWraparound(0x2342AABB, base::MulWithWraparound(*i, 3));
95 580 : RawMachineAssemblerTester<int32_t> m;
96 580 : int32_t offset = *i;
97 580 : byte* from = reinterpret_cast<byte*>(&p1) - offset;
98 580 : byte* to = reinterpret_cast<byte*>(&p2) - offset;
99 : // generate load [#base + #index]
100 580 : if (t == TestAlignment::kAligned) {
101 : Node* load = m.Load(MachineType::Float32(), m.PointerConstant(from),
102 580 : m.IntPtrConstant(offset));
103 : m.Store(MachineRepresentation::kFloat32, m.PointerConstant(to),
104 290 : m.IntPtrConstant(offset), load, kNoWriteBarrier);
105 290 : } else if (t == TestAlignment::kUnaligned) {
106 : Node* load =
107 : m.UnalignedLoad(MachineType::Float32(), m.PointerConstant(from),
108 580 : m.IntPtrConstant(offset));
109 : m.UnalignedStore(MachineRepresentation::kFloat32, m.PointerConstant(to),
110 290 : m.IntPtrConstant(offset), load);
111 :
112 : } else {
113 0 : UNREACHABLE();
114 : }
115 580 : m.Return(m.Int32Constant(magic));
116 :
117 67280 : FOR_FLOAT32_INPUTS(j) {
118 66700 : p1 = *j;
119 66700 : p2 = *j - 5;
120 66700 : CHECK_EQ(magic, m.Call());
121 200100 : CHECK_DOUBLE_EQ(p1, p2);
122 : }
123 : }
124 10 : }
125 :
126 10 : void RunLoadStoreFloat64Offset(TestAlignment t) {
127 10 : double p1 = 0; // loads directly from this location.
128 10 : double p2 = 0; // and stores directly into this location.
129 :
130 590 : FOR_INT32_INPUTS(i) {
131 : int32_t magic =
132 580 : base::AddWithWraparound(0x2342AABB, base::MulWithWraparound(*i, 3));
133 580 : RawMachineAssemblerTester<int32_t> m;
134 580 : int32_t offset = *i;
135 580 : byte* from = reinterpret_cast<byte*>(&p1) - offset;
136 580 : byte* to = reinterpret_cast<byte*>(&p2) - offset;
137 : // generate load [#base + #index]
138 580 : if (t == TestAlignment::kAligned) {
139 : Node* load = m.Load(MachineType::Float64(), m.PointerConstant(from),
140 580 : m.IntPtrConstant(offset));
141 : m.Store(MachineRepresentation::kFloat64, m.PointerConstant(to),
142 290 : m.IntPtrConstant(offset), load, kNoWriteBarrier);
143 290 : } else if (t == TestAlignment::kUnaligned) {
144 : Node* load =
145 : m.UnalignedLoad(MachineType::Float64(), m.PointerConstant(from),
146 580 : m.IntPtrConstant(offset));
147 : m.UnalignedStore(MachineRepresentation::kFloat64, m.PointerConstant(to),
148 290 : m.IntPtrConstant(offset), load);
149 : } else {
150 0 : UNREACHABLE();
151 : }
152 580 : m.Return(m.Int32Constant(magic));
153 :
154 29000 : FOR_FLOAT64_INPUTS(j) {
155 28420 : p1 = *j;
156 28420 : p2 = *j - 5;
157 28420 : CHECK_EQ(magic, m.Call());
158 85260 : CHECK_DOUBLE_EQ(p1, p2);
159 : }
160 : }
161 10 : }
162 : } // namespace
163 :
164 28342 : TEST(RunLoadInt32) { RunLoadInt32(TestAlignment::kAligned); }
165 :
166 28342 : TEST(RunUnalignedLoadInt32) { RunLoadInt32(TestAlignment::kUnaligned); }
167 :
168 28342 : TEST(RunLoadInt32Offset) { RunLoadInt32Offset(TestAlignment::kAligned); }
169 :
170 28342 : TEST(RunUnalignedLoadInt32Offset) {
171 5 : RunLoadInt32Offset(TestAlignment::kUnaligned);
172 5 : }
173 :
174 28342 : TEST(RunLoadStoreFloat32Offset) {
175 5 : RunLoadStoreFloat32Offset(TestAlignment::kAligned);
176 5 : }
177 :
178 28342 : TEST(RunUnalignedLoadStoreFloat32Offset) {
179 5 : RunLoadStoreFloat32Offset(TestAlignment::kUnaligned);
180 5 : }
181 :
182 28342 : TEST(RunLoadStoreFloat64Offset) {
183 5 : RunLoadStoreFloat64Offset(TestAlignment::kAligned);
184 5 : }
185 :
186 28342 : TEST(RunUnalignedLoadStoreFloat64Offset) {
187 5 : RunLoadStoreFloat64Offset(TestAlignment::kUnaligned);
188 5 : }
189 :
190 : namespace {
191 :
192 : // Initializes the buffer with some raw data respecting requested representation
193 : // of the values.
194 : template <typename CType>
195 295 : void InitBuffer(CType* buffer, size_t length, MachineType rep) {
196 275 : const size_t kBufferSize = sizeof(CType) * length;
197 295 : if (!rep.IsTagged()) {
198 : byte* raw = reinterpret_cast<byte*>(buffer);
199 13320 : for (size_t i = 0; i < kBufferSize; i++) {
200 13320 : raw[i] = static_cast<byte>((i + kBufferSize) ^ 0xAA);
201 : }
202 295 : return;
203 : }
204 :
205 : // Tagged field loads require values to be properly tagged because of
206 : // pointer decompression that may be happenning during load.
207 75 : Isolate* isolate = CcTest::InitIsolateOnce();
208 : Smi* smi_view = reinterpret_cast<Smi*>(&buffer[0]);
209 75 : if (rep.IsTaggedSigned()) {
210 325 : for (size_t i = 0; i < length; i++) {
211 650 : smi_view[i] = Smi::FromInt(static_cast<int>(i + kBufferSize) ^ 0xABCDEF0);
212 : }
213 : } else {
214 50 : memcpy(&buffer[0], &isolate->roots_table(), kBufferSize);
215 50 : if (!rep.IsTaggedPointer()) {
216 : // Also add some Smis if we are checking AnyTagged case.
217 160 : for (size_t i = 0; i < length / 2; i++) {
218 320 : smi_view[i] =
219 160 : Smi::FromInt(static_cast<int>(i + kBufferSize) ^ 0xABCDEF0);
220 : }
221 : }
222 : }
223 : }
224 :
225 : template <typename CType>
226 150 : void RunLoadImmIndex(MachineType rep, TestAlignment t) {
227 : const int kNumElems = 16;
228 510 : CType buffer[kNumElems];
229 :
230 120 : InitBuffer(buffer, kNumElems, rep);
231 :
232 : // Test with various large and small offsets.
233 1200 : for (int offset = -1; offset <= 200000; offset *= -5) {
234 17280 : for (int i = 0; i < kNumElems; i++) {
235 17280 : BufferedRawMachineAssemblerTester<CType> m;
236 17280 : void* base_pointer = &buffer[0] - offset;
237 : #ifdef V8_COMPRESS_POINTERS
238 : if (rep.IsTagged()) {
239 : // When pointer compression is enabled then we need to access only
240 : // the lower 32-bit of the tagged value while the buffer contains
241 : // full 64-bit values.
242 : base_pointer = LSB(base_pointer, kPointerSize / 2);
243 : }
244 : #endif
245 : Node* base = m.PointerConstant(base_pointer);
246 17280 : Node* index = m.Int32Constant((offset + i) * sizeof(buffer[0]));
247 17280 : if (t == TestAlignment::kAligned) {
248 9360 : m.Return(m.Load(rep, base, index));
249 7920 : } else if (t == TestAlignment::kUnaligned) {
250 7920 : m.Return(m.UnalignedLoad(rep, base, index));
251 : } else {
252 0 : UNREACHABLE();
253 : }
254 :
255 21600 : CHECK_EQ(buffer[i], m.Call());
256 : }
257 : }
258 120 : }
259 :
260 : template <typename CType>
261 : CType NullValue() {
262 : return CType{0};
263 : }
264 :
265 : template <>
266 : HeapObject NullValue<HeapObject>() {
267 : return HeapObject();
268 : }
269 :
270 : template <typename CType>
271 120 : void RunLoadStore(MachineType rep, TestAlignment t) {
272 : const int kNumElems = 16;
273 510 : CType in_buffer[kNumElems];
274 480 : CType out_buffer[kNumElems];
275 :
276 120 : InitBuffer(in_buffer, kNumElems, rep);
277 :
278 2040 : for (int32_t x = 0; x < kNumElems; x++) {
279 1920 : int32_t y = kNumElems - x - 1;
280 :
281 1920 : RawMachineAssemblerTester<int32_t> m;
282 1920 : int32_t OK = 0x29000 + x;
283 : Node* in_base = m.PointerConstant(in_buffer);
284 1920 : Node* in_index = m.IntPtrConstant(x * sizeof(CType));
285 : Node* out_base = m.PointerConstant(out_buffer);
286 1920 : Node* out_index = m.IntPtrConstant(y * sizeof(CType));
287 1920 : if (t == TestAlignment::kAligned) {
288 1040 : Node* load = m.Load(rep, in_base, in_index);
289 1040 : m.Store(rep.representation(), out_base, out_index, load, kNoWriteBarrier);
290 880 : } else if (t == TestAlignment::kUnaligned) {
291 880 : Node* load = m.UnalignedLoad(rep, in_base, in_index);
292 880 : m.UnalignedStore(rep.representation(), out_base, out_index, load);
293 : }
294 :
295 1920 : m.Return(m.Int32Constant(OK));
296 :
297 : memset(out_buffer, 0, sizeof(out_buffer));
298 2400 : CHECK_NE(in_buffer[x], out_buffer[y]);
299 1920 : CHECK_EQ(OK, m.Call());
300 1920 : CHECK_EQ(in_buffer[x], out_buffer[y]);
301 30720 : for (int32_t z = 0; z < kNumElems; z++) {
302 37920 : if (z != y) CHECK_EQ(NullValue<CType>(), out_buffer[z]);
303 : }
304 : }
305 120 : }
306 :
307 : template <typename CType>
308 55 : void RunUnalignedLoadStoreUnalignedAccess(MachineType rep) {
309 15 : CType in, out;
310 : byte in_buffer[2 * sizeof(CType)];
311 : byte out_buffer[2 * sizeof(CType)];
312 :
313 55 : InitBuffer(&in, 1, rep);
314 :
315 375 : for (int x = 0; x < static_cast<int>(sizeof(CType)); x++) {
316 : // Direct write to &in_buffer[x] may cause unaligned access in C++ code so
317 : // we use MemCopy() to handle that.
318 320 : MemCopy(&in_buffer[x], &in, sizeof(CType));
319 :
320 2520 : for (int y = 0; y < static_cast<int>(sizeof(CType)); y++) {
321 2200 : RawMachineAssemblerTester<int32_t> m;
322 2200 : int32_t OK = 0x29000 + x;
323 :
324 : Node* in_base = m.PointerConstant(in_buffer);
325 2200 : Node* in_index = m.IntPtrConstant(x);
326 2200 : Node* load = m.UnalignedLoad(rep, in_base, in_index);
327 :
328 : Node* out_base = m.PointerConstant(out_buffer);
329 2200 : Node* out_index = m.IntPtrConstant(y);
330 2200 : m.UnalignedStore(rep.representation(), out_base, out_index, load);
331 :
332 2200 : m.Return(m.Int32Constant(OK));
333 :
334 2200 : CHECK_EQ(OK, m.Call());
335 : // Direct read of &out_buffer[y] may cause unaligned access in C++ code
336 : // so we use MemCopy() to handle that.
337 2200 : MemCopy(&out, &out_buffer[y], sizeof(CType));
338 2200 : CHECK_EQ(in, out);
339 : }
340 : }
341 55 : }
342 : } // namespace
343 :
344 28342 : TEST(RunLoadImmIndex) {
345 5 : RunLoadImmIndex<int8_t>(MachineType::Int8(), TestAlignment::kAligned);
346 5 : RunLoadImmIndex<uint8_t>(MachineType::Uint8(), TestAlignment::kAligned);
347 5 : RunLoadImmIndex<int16_t>(MachineType::Int16(), TestAlignment::kAligned);
348 5 : RunLoadImmIndex<uint16_t>(MachineType::Uint16(), TestAlignment::kAligned);
349 5 : RunLoadImmIndex<int32_t>(MachineType::Int32(), TestAlignment::kAligned);
350 5 : RunLoadImmIndex<uint32_t>(MachineType::Uint32(), TestAlignment::kAligned);
351 5 : RunLoadImmIndex<void*>(MachineType::Pointer(), TestAlignment::kAligned);
352 5 : RunLoadImmIndex<Smi>(MachineType::TaggedSigned(), TestAlignment::kAligned);
353 : RunLoadImmIndex<HeapObject>(MachineType::TaggedPointer(),
354 5 : TestAlignment::kAligned);
355 5 : RunLoadImmIndex<Object>(MachineType::AnyTagged(), TestAlignment::kAligned);
356 5 : RunLoadImmIndex<float>(MachineType::Float32(), TestAlignment::kAligned);
357 5 : RunLoadImmIndex<double>(MachineType::Float64(), TestAlignment::kAligned);
358 : #if V8_TARGET_ARCH_64_BIT
359 5 : RunLoadImmIndex<int64_t>(MachineType::Int64(), TestAlignment::kAligned);
360 : #endif
361 : // TODO(titzer): test various indexing modes.
362 5 : }
363 :
364 28342 : TEST(RunUnalignedLoadImmIndex) {
365 5 : RunLoadImmIndex<int16_t>(MachineType::Int16(), TestAlignment::kUnaligned);
366 5 : RunLoadImmIndex<uint16_t>(MachineType::Uint16(), TestAlignment::kUnaligned);
367 5 : RunLoadImmIndex<int32_t>(MachineType::Int32(), TestAlignment::kUnaligned);
368 5 : RunLoadImmIndex<uint32_t>(MachineType::Uint32(), TestAlignment::kUnaligned);
369 5 : RunLoadImmIndex<void*>(MachineType::Pointer(), TestAlignment::kUnaligned);
370 5 : RunLoadImmIndex<Smi>(MachineType::TaggedSigned(), TestAlignment::kUnaligned);
371 : RunLoadImmIndex<HeapObject>(MachineType::TaggedPointer(),
372 5 : TestAlignment::kUnaligned);
373 5 : RunLoadImmIndex<Object>(MachineType::AnyTagged(), TestAlignment::kUnaligned);
374 5 : RunLoadImmIndex<float>(MachineType::Float32(), TestAlignment::kUnaligned);
375 5 : RunLoadImmIndex<double>(MachineType::Float64(), TestAlignment::kUnaligned);
376 : #if V8_TARGET_ARCH_64_BIT
377 5 : RunLoadImmIndex<int64_t>(MachineType::Int64(), TestAlignment::kUnaligned);
378 : #endif
379 : // TODO(titzer): test various indexing modes.
380 5 : }
381 :
382 28342 : TEST(RunLoadStore) {
383 5 : RunLoadStore<int8_t>(MachineType::Int8(), TestAlignment::kAligned);
384 5 : RunLoadStore<uint8_t>(MachineType::Uint8(), TestAlignment::kAligned);
385 5 : RunLoadStore<int16_t>(MachineType::Int16(), TestAlignment::kAligned);
386 5 : RunLoadStore<uint16_t>(MachineType::Uint16(), TestAlignment::kAligned);
387 5 : RunLoadStore<int32_t>(MachineType::Int32(), TestAlignment::kAligned);
388 5 : RunLoadStore<uint32_t>(MachineType::Uint32(), TestAlignment::kAligned);
389 5 : RunLoadStore<void*>(MachineType::Pointer(), TestAlignment::kAligned);
390 5 : RunLoadStore<Smi>(MachineType::TaggedSigned(), TestAlignment::kAligned);
391 : RunLoadStore<HeapObject>(MachineType::TaggedPointer(),
392 5 : TestAlignment::kAligned);
393 5 : RunLoadStore<Object>(MachineType::AnyTagged(), TestAlignment::kAligned);
394 5 : RunLoadStore<float>(MachineType::Float32(), TestAlignment::kAligned);
395 5 : RunLoadStore<double>(MachineType::Float64(), TestAlignment::kAligned);
396 : #if V8_TARGET_ARCH_64_BIT
397 5 : RunLoadStore<int64_t>(MachineType::Int64(), TestAlignment::kAligned);
398 : #endif
399 5 : }
400 :
401 28342 : TEST(RunUnalignedLoadStore) {
402 5 : RunLoadStore<int16_t>(MachineType::Int16(), TestAlignment::kUnaligned);
403 5 : RunLoadStore<uint16_t>(MachineType::Uint16(), TestAlignment::kUnaligned);
404 5 : RunLoadStore<int32_t>(MachineType::Int32(), TestAlignment::kUnaligned);
405 5 : RunLoadStore<uint32_t>(MachineType::Uint32(), TestAlignment::kUnaligned);
406 5 : RunLoadStore<void*>(MachineType::Pointer(), TestAlignment::kUnaligned);
407 5 : RunLoadStore<Smi>(MachineType::TaggedSigned(), TestAlignment::kUnaligned);
408 : RunLoadStore<HeapObject>(MachineType::TaggedPointer(),
409 5 : TestAlignment::kUnaligned);
410 5 : RunLoadStore<Object>(MachineType::AnyTagged(), TestAlignment::kUnaligned);
411 5 : RunLoadStore<float>(MachineType::Float32(), TestAlignment::kUnaligned);
412 5 : RunLoadStore<double>(MachineType::Float64(), TestAlignment::kUnaligned);
413 : #if V8_TARGET_ARCH_64_BIT
414 5 : RunLoadStore<int64_t>(MachineType::Int64(), TestAlignment::kUnaligned);
415 : #endif
416 5 : }
417 :
418 28342 : TEST(RunUnalignedLoadStoreUnalignedAccess) {
419 5 : RunUnalignedLoadStoreUnalignedAccess<int16_t>(MachineType::Int16());
420 5 : RunUnalignedLoadStoreUnalignedAccess<uint16_t>(MachineType::Uint16());
421 5 : RunUnalignedLoadStoreUnalignedAccess<int32_t>(MachineType::Int32());
422 5 : RunUnalignedLoadStoreUnalignedAccess<uint32_t>(MachineType::Uint32());
423 5 : RunUnalignedLoadStoreUnalignedAccess<void*>(MachineType::Pointer());
424 5 : RunUnalignedLoadStoreUnalignedAccess<Smi>(MachineType::TaggedSigned());
425 : RunUnalignedLoadStoreUnalignedAccess<HeapObject>(
426 5 : MachineType::TaggedPointer());
427 5 : RunUnalignedLoadStoreUnalignedAccess<Object>(MachineType::AnyTagged());
428 5 : RunUnalignedLoadStoreUnalignedAccess<float>(MachineType::Float32());
429 5 : RunUnalignedLoadStoreUnalignedAccess<double>(MachineType::Float64());
430 : #if V8_TARGET_ARCH_64_BIT
431 5 : RunUnalignedLoadStoreUnalignedAccess<int64_t>(MachineType::Int64());
432 : #endif
433 5 : }
434 :
435 : namespace {
436 10 : void RunLoadStoreSignExtend32(TestAlignment t) {
437 : int32_t buffer[4];
438 10 : RawMachineAssemblerTester<int32_t> m;
439 10 : Node* load8 = m.LoadFromPointer(LSB(&buffer[0], 1), MachineType::Int8());
440 10 : if (t == TestAlignment::kAligned) {
441 5 : Node* load16 = m.LoadFromPointer(LSB(&buffer[0], 2), MachineType::Int16());
442 5 : Node* load32 = m.LoadFromPointer(&buffer[0], MachineType::Int32());
443 5 : m.StoreToPointer(&buffer[1], MachineRepresentation::kWord32, load8);
444 5 : m.StoreToPointer(&buffer[2], MachineRepresentation::kWord32, load16);
445 5 : m.StoreToPointer(&buffer[3], MachineRepresentation::kWord32, load32);
446 5 : } else if (t == TestAlignment::kUnaligned) {
447 : Node* load16 =
448 5 : m.UnalignedLoadFromPointer(LSB(&buffer[0], 2), MachineType::Int16());
449 5 : Node* load32 = m.UnalignedLoadFromPointer(&buffer[0], MachineType::Int32());
450 5 : m.StoreToPointer(&buffer[1], MachineRepresentation::kWord32, load8);
451 : m.UnalignedStoreToPointer(&buffer[2], MachineRepresentation::kWord32,
452 5 : load16);
453 : m.UnalignedStoreToPointer(&buffer[3], MachineRepresentation::kWord32,
454 5 : load32);
455 : } else {
456 0 : UNREACHABLE();
457 : }
458 10 : m.Return(load8);
459 :
460 590 : FOR_INT32_INPUTS(i) {
461 580 : buffer[0] = *i;
462 :
463 1160 : CHECK_EQ(static_cast<int8_t>(*i & 0xFF), m.Call());
464 580 : CHECK_EQ(static_cast<int8_t>(*i & 0xFF), buffer[1]);
465 1160 : CHECK_EQ(static_cast<int16_t>(*i & 0xFFFF), buffer[2]);
466 580 : CHECK_EQ(*i, buffer[3]);
467 : }
468 10 : }
469 :
470 10 : void RunLoadStoreZeroExtend32(TestAlignment t) {
471 : uint32_t buffer[4];
472 10 : RawMachineAssemblerTester<uint32_t> m;
473 10 : Node* load8 = m.LoadFromPointer(LSB(&buffer[0], 1), MachineType::Uint8());
474 10 : if (t == TestAlignment::kAligned) {
475 5 : Node* load16 = m.LoadFromPointer(LSB(&buffer[0], 2), MachineType::Uint16());
476 5 : Node* load32 = m.LoadFromPointer(&buffer[0], MachineType::Uint32());
477 5 : m.StoreToPointer(&buffer[1], MachineRepresentation::kWord32, load8);
478 5 : m.StoreToPointer(&buffer[2], MachineRepresentation::kWord32, load16);
479 5 : m.StoreToPointer(&buffer[3], MachineRepresentation::kWord32, load32);
480 5 : } else if (t == TestAlignment::kUnaligned) {
481 : Node* load16 =
482 5 : m.UnalignedLoadFromPointer(LSB(&buffer[0], 2), MachineType::Uint16());
483 : Node* load32 =
484 5 : m.UnalignedLoadFromPointer(&buffer[0], MachineType::Uint32());
485 5 : m.StoreToPointer(&buffer[1], MachineRepresentation::kWord32, load8);
486 : m.UnalignedStoreToPointer(&buffer[2], MachineRepresentation::kWord32,
487 5 : load16);
488 : m.UnalignedStoreToPointer(&buffer[3], MachineRepresentation::kWord32,
489 5 : load32);
490 : }
491 10 : m.Return(load8);
492 :
493 590 : FOR_UINT32_INPUTS(i) {
494 580 : buffer[0] = *i;
495 :
496 580 : CHECK_EQ((*i & 0xFF), m.Call());
497 580 : CHECK_EQ((*i & 0xFF), buffer[1]);
498 580 : CHECK_EQ((*i & 0xFFFF), buffer[2]);
499 580 : CHECK_EQ(*i, buffer[3]);
500 : }
501 10 : }
502 : } // namespace
503 :
504 28342 : TEST(RunLoadStoreSignExtend32) {
505 5 : RunLoadStoreSignExtend32(TestAlignment::kAligned);
506 5 : }
507 :
508 28342 : TEST(RunUnalignedLoadStoreSignExtend32) {
509 5 : RunLoadStoreSignExtend32(TestAlignment::kUnaligned);
510 5 : }
511 :
512 28342 : TEST(RunLoadStoreZeroExtend32) {
513 5 : RunLoadStoreZeroExtend32(TestAlignment::kAligned);
514 5 : }
515 :
516 28342 : TEST(RunUnalignedLoadStoreZeroExtend32) {
517 5 : RunLoadStoreZeroExtend32(TestAlignment::kUnaligned);
518 5 : }
519 :
520 : #if V8_TARGET_ARCH_64_BIT
521 :
522 : namespace {
523 : void RunLoadStoreSignExtend64(TestAlignment t) {
524 : if ((true)) return; // TODO(titzer): sign extension of loads to 64-bit.
525 : int64_t buffer[5];
526 : RawMachineAssemblerTester<int64_t> m;
527 : Node* load8 = m.LoadFromPointer(LSB(&buffer[0], 1), MachineType::Int8());
528 : if (t == TestAlignment::kAligned) {
529 : Node* load16 = m.LoadFromPointer(LSB(&buffer[0], 2), MachineType::Int16());
530 : Node* load32 = m.LoadFromPointer(LSB(&buffer[0], 4), MachineType::Int32());
531 : Node* load64 = m.LoadFromPointer(&buffer[0], MachineType::Int64());
532 : m.StoreToPointer(&buffer[1], MachineRepresentation::kWord64, load8);
533 : m.StoreToPointer(&buffer[2], MachineRepresentation::kWord64, load16);
534 : m.StoreToPointer(&buffer[3], MachineRepresentation::kWord64, load32);
535 : m.StoreToPointer(&buffer[4], MachineRepresentation::kWord64, load64);
536 : } else if (t == TestAlignment::kUnaligned) {
537 : Node* load16 =
538 : m.UnalignedLoadFromPointer(LSB(&buffer[0], 2), MachineType::Int16());
539 : Node* load32 =
540 : m.UnalignedLoadFromPointer(LSB(&buffer[0], 4), MachineType::Int32());
541 : Node* load64 = m.UnalignedLoadFromPointer(&buffer[0], MachineType::Int64());
542 : m.StoreToPointer(&buffer[1], MachineRepresentation::kWord64, load8);
543 : m.UnalignedStoreToPointer(&buffer[2], MachineRepresentation::kWord64,
544 : load16);
545 : m.UnalignedStoreToPointer(&buffer[3], MachineRepresentation::kWord64,
546 : load32);
547 : m.UnalignedStoreToPointer(&buffer[4], MachineRepresentation::kWord64,
548 : load64);
549 : } else {
550 : UNREACHABLE();
551 : }
552 : m.Return(load8);
553 :
554 : FOR_INT64_INPUTS(i) {
555 : buffer[0] = *i;
556 :
557 : CHECK_EQ(static_cast<int8_t>(*i & 0xFF), m.Call());
558 : CHECK_EQ(static_cast<int8_t>(*i & 0xFF), buffer[1]);
559 : CHECK_EQ(static_cast<int16_t>(*i & 0xFFFF), buffer[2]);
560 : CHECK_EQ(static_cast<int32_t>(*i & 0xFFFFFFFF), buffer[3]);
561 : CHECK_EQ(*i, buffer[4]);
562 : }
563 : }
564 :
565 10 : void RunLoadStoreZeroExtend64(TestAlignment t) {
566 10 : if (kPointerSize < 8) return;
567 : uint64_t buffer[5];
568 10 : RawMachineAssemblerTester<uint64_t> m;
569 10 : Node* load8 = m.LoadFromPointer(LSB(&buffer[0], 1), MachineType::Uint8());
570 10 : if (t == TestAlignment::kAligned) {
571 5 : Node* load16 = m.LoadFromPointer(LSB(&buffer[0], 2), MachineType::Uint16());
572 5 : Node* load32 = m.LoadFromPointer(LSB(&buffer[0], 4), MachineType::Uint32());
573 5 : Node* load64 = m.LoadFromPointer(&buffer[0], MachineType::Uint64());
574 5 : m.StoreToPointer(&buffer[1], MachineRepresentation::kWord64, load8);
575 5 : m.StoreToPointer(&buffer[2], MachineRepresentation::kWord64, load16);
576 5 : m.StoreToPointer(&buffer[3], MachineRepresentation::kWord64, load32);
577 5 : m.StoreToPointer(&buffer[4], MachineRepresentation::kWord64, load64);
578 5 : } else if (t == TestAlignment::kUnaligned) {
579 : Node* load16 =
580 5 : m.UnalignedLoadFromPointer(LSB(&buffer[0], 2), MachineType::Uint16());
581 : Node* load32 =
582 5 : m.UnalignedLoadFromPointer(LSB(&buffer[0], 4), MachineType::Uint32());
583 : Node* load64 =
584 5 : m.UnalignedLoadFromPointer(&buffer[0], MachineType::Uint64());
585 5 : m.StoreToPointer(&buffer[1], MachineRepresentation::kWord64, load8);
586 : m.UnalignedStoreToPointer(&buffer[2], MachineRepresentation::kWord64,
587 5 : load16);
588 : m.UnalignedStoreToPointer(&buffer[3], MachineRepresentation::kWord64,
589 5 : load32);
590 : m.UnalignedStoreToPointer(&buffer[4], MachineRepresentation::kWord64,
591 5 : load64);
592 : } else {
593 0 : UNREACHABLE();
594 : }
595 10 : m.Return(load8);
596 :
597 820 : FOR_UINT64_INPUTS(i) {
598 810 : buffer[0] = *i;
599 :
600 810 : CHECK_EQ((*i & 0xFF), m.Call());
601 810 : CHECK_EQ((*i & 0xFF), buffer[1]);
602 810 : CHECK_EQ((*i & 0xFFFF), buffer[2]);
603 810 : CHECK_EQ((*i & 0xFFFFFFFF), buffer[3]);
604 810 : CHECK_EQ(*i, buffer[4]);
605 : }
606 : }
607 :
608 : } // namespace
609 :
610 28342 : TEST(RunLoadStoreSignExtend64) {
611 : RunLoadStoreSignExtend64(TestAlignment::kAligned);
612 5 : }
613 :
614 28342 : TEST(RunUnalignedLoadStoreSignExtend64) {
615 : RunLoadStoreSignExtend64(TestAlignment::kUnaligned);
616 5 : }
617 :
618 28342 : TEST(RunLoadStoreZeroExtend64) {
619 5 : RunLoadStoreZeroExtend64(TestAlignment::kAligned);
620 5 : }
621 :
622 28342 : TEST(RunUnalignedLoadStoreZeroExtend64) {
623 5 : RunLoadStoreZeroExtend64(TestAlignment::kUnaligned);
624 5 : }
625 :
626 : #endif
627 :
628 : namespace {
629 : template <typename IntType>
630 15 : void LoadStoreTruncation(MachineType kRepresentation, TestAlignment t) {
631 : IntType input;
632 :
633 15 : RawMachineAssemblerTester<int32_t> m;
634 : Node* ap1;
635 15 : if (t == TestAlignment::kAligned) {
636 10 : Node* a = m.LoadFromPointer(&input, kRepresentation);
637 10 : ap1 = m.Int32Add(a, m.Int32Constant(1));
638 10 : m.StoreToPointer(&input, kRepresentation.representation(), ap1);
639 5 : } else if (t == TestAlignment::kUnaligned) {
640 5 : Node* a = m.UnalignedLoadFromPointer(&input, kRepresentation);
641 5 : ap1 = m.Int32Add(a, m.Int32Constant(1));
642 5 : m.UnalignedStoreToPointer(&input, kRepresentation.representation(), ap1);
643 : } else {
644 0 : UNREACHABLE();
645 : }
646 15 : m.Return(ap1);
647 :
648 : const IntType max = std::numeric_limits<IntType>::max();
649 : const IntType min = std::numeric_limits<IntType>::min();
650 :
651 : // Test upper bound.
652 15 : input = max;
653 15 : CHECK_EQ(max + 1, m.Call());
654 15 : CHECK_EQ(min, input);
655 :
656 : // Test lower bound.
657 15 : input = min;
658 15 : CHECK_EQ(static_cast<IntType>(max + 2), m.Call());
659 15 : CHECK_EQ(min + 1, input);
660 :
661 : // Test all one byte values that are not one byte bounds.
662 3810 : for (int i = -127; i < 127; i++) {
663 3810 : input = i;
664 3810 : int expected = i >= 0 ? i + 1 : max + (i - min) + 2;
665 7620 : CHECK_EQ(static_cast<IntType>(expected), m.Call());
666 3810 : CHECK_EQ(static_cast<IntType>(i + 1), input);
667 : }
668 15 : }
669 : } // namespace
670 :
671 28342 : TEST(RunLoadStoreTruncation) {
672 5 : LoadStoreTruncation<int8_t>(MachineType::Int8(), TestAlignment::kAligned);
673 5 : LoadStoreTruncation<int16_t>(MachineType::Int16(), TestAlignment::kAligned);
674 5 : }
675 :
676 28342 : TEST(RunUnalignedLoadStoreTruncation) {
677 5 : LoadStoreTruncation<int16_t>(MachineType::Int16(), TestAlignment::kUnaligned);
678 5 : }
679 :
680 : #undef LSB
681 : #undef A_BILLION
682 : #undef A_GIG
683 :
684 : } // namespace compiler
685 : } // namespace internal
686 85011 : } // namespace v8
|