Line data Source code
1 : // Copyright 2016 the V8 project authors. All rights reserved. Use of this
2 : // source code is governed by a BSD-style license that can be found in the
3 : // LICENSE file.
4 :
5 : #include <cmath>
6 : #include <functional>
7 : #include <limits>
8 :
9 : #include "src/base/bits.h"
10 : #include "src/base/overflowing-math.h"
11 : #include "src/base/utils/random-number-generator.h"
12 : #include "src/objects-inl.h"
13 : #include "test/cctest/cctest.h"
14 : #include "test/cctest/compiler/codegen-tester.h"
15 : #include "test/cctest/compiler/graph-builder-tester.h"
16 : #include "test/cctest/compiler/value-helper.h"
17 :
18 :
19 : namespace v8 {
20 : namespace internal {
21 : namespace compiler {
22 :
23 : enum TestAlignment {
24 : kAligned,
25 : kUnaligned,
26 : };
27 :
28 : #if V8_TARGET_LITTLE_ENDIAN
29 : #define LSB(addr, bytes) addr
30 : #elif V8_TARGET_BIG_ENDIAN
31 : #define LSB(addr, bytes) reinterpret_cast<byte*>(addr + 1) - (bytes)
32 : #else
33 : #error "Unknown Architecture"
34 : #endif
35 :
36 : // This is a America!
37 : #define A_BILLION 1000000000ULL
38 : #define A_GIG (1024ULL * 1024ULL * 1024ULL)
39 :
40 : namespace {
41 8 : void RunLoadInt32(const TestAlignment t) {
42 8 : RawMachineAssemblerTester<int32_t> m;
43 :
44 8 : int32_t p1 = 0; // loads directly from this location.
45 :
46 8 : if (t == TestAlignment::kAligned) {
47 4 : m.Return(m.LoadFromPointer(&p1, MachineType::Int32()));
48 4 : } else if (t == TestAlignment::kUnaligned) {
49 4 : m.Return(m.UnalignedLoadFromPointer(&p1, MachineType::Int32()));
50 : } else {
51 0 : UNREACHABLE();
52 : }
53 :
54 472 : FOR_INT32_INPUTS(i) {
55 464 : p1 = i;
56 464 : CHECK_EQ(p1, m.Call());
57 : }
58 8 : }
59 :
60 8 : void RunLoadInt32Offset(TestAlignment t) {
61 8 : int32_t p1 = 0; // loads directly from this location.
62 :
63 : int32_t offsets[] = {-2000000, -100, -101, 1, 3,
64 8 : 7, 120, 2000, 2000000000, 0xFF};
65 :
66 88 : for (size_t i = 0; i < arraysize(offsets); i++) {
67 80 : RawMachineAssemblerTester<int32_t> m;
68 80 : int32_t offset = offsets[i];
69 80 : byte* pointer = reinterpret_cast<byte*>(&p1) - offset;
70 :
71 : // generate load [#base + #index]
72 80 : if (t == TestAlignment::kAligned) {
73 40 : m.Return(m.LoadFromPointer(pointer, MachineType::Int32(), offset));
74 40 : } else if (t == TestAlignment::kUnaligned) {
75 : m.Return(
76 40 : m.UnalignedLoadFromPointer(pointer, MachineType::Int32(), offset));
77 : } else {
78 0 : UNREACHABLE();
79 : }
80 :
81 4720 : FOR_INT32_INPUTS(j) {
82 4640 : p1 = j;
83 4640 : CHECK_EQ(p1, m.Call());
84 : }
85 : }
86 8 : }
87 :
88 8 : void RunLoadStoreFloat32Offset(TestAlignment t) {
89 8 : float p1 = 0.0f; // loads directly from this location.
90 8 : float p2 = 0.0f; // and stores directly into this location.
91 :
92 472 : FOR_INT32_INPUTS(i) {
93 : int32_t magic =
94 : base::AddWithWraparound(0x2342AABB, base::MulWithWraparound(i, 3));
95 464 : RawMachineAssemblerTester<int32_t> m;
96 : int32_t offset = i;
97 464 : byte* from = reinterpret_cast<byte*>(&p1) - offset;
98 464 : byte* to = reinterpret_cast<byte*>(&p2) - offset;
99 : // generate load [#base + #index]
100 464 : if (t == TestAlignment::kAligned) {
101 : Node* load = m.Load(MachineType::Float32(), m.PointerConstant(from),
102 464 : m.IntPtrConstant(offset));
103 : m.Store(MachineRepresentation::kFloat32, m.PointerConstant(to),
104 232 : m.IntPtrConstant(offset), load, kNoWriteBarrier);
105 232 : } else if (t == TestAlignment::kUnaligned) {
106 : Node* load =
107 : m.UnalignedLoad(MachineType::Float32(), m.PointerConstant(from),
108 464 : m.IntPtrConstant(offset));
109 : m.UnalignedStore(MachineRepresentation::kFloat32, m.PointerConstant(to),
110 232 : m.IntPtrConstant(offset), load);
111 :
112 : } else {
113 0 : UNREACHABLE();
114 : }
115 464 : m.Return(m.Int32Constant(magic));
116 :
117 53824 : FOR_FLOAT32_INPUTS(j) {
118 53360 : p1 = j;
119 53360 : p2 = j - 5;
120 53360 : CHECK_EQ(magic, m.Call());
121 160080 : CHECK_DOUBLE_EQ(p1, p2);
122 : }
123 : }
124 8 : }
125 :
126 8 : void RunLoadStoreFloat64Offset(TestAlignment t) {
127 8 : double p1 = 0; // loads directly from this location.
128 8 : double p2 = 0; // and stores directly into this location.
129 :
130 472 : FOR_INT32_INPUTS(i) {
131 : int32_t magic =
132 : base::AddWithWraparound(0x2342AABB, base::MulWithWraparound(i, 3));
133 464 : RawMachineAssemblerTester<int32_t> m;
134 : int32_t offset = i;
135 464 : byte* from = reinterpret_cast<byte*>(&p1) - offset;
136 464 : byte* to = reinterpret_cast<byte*>(&p2) - offset;
137 : // generate load [#base + #index]
138 464 : if (t == TestAlignment::kAligned) {
139 : Node* load = m.Load(MachineType::Float64(), m.PointerConstant(from),
140 464 : m.IntPtrConstant(offset));
141 : m.Store(MachineRepresentation::kFloat64, m.PointerConstant(to),
142 232 : m.IntPtrConstant(offset), load, kNoWriteBarrier);
143 232 : } else if (t == TestAlignment::kUnaligned) {
144 : Node* load =
145 : m.UnalignedLoad(MachineType::Float64(), m.PointerConstant(from),
146 464 : m.IntPtrConstant(offset));
147 : m.UnalignedStore(MachineRepresentation::kFloat64, m.PointerConstant(to),
148 232 : m.IntPtrConstant(offset), load);
149 : } else {
150 0 : UNREACHABLE();
151 : }
152 464 : m.Return(m.Int32Constant(magic));
153 :
154 23200 : FOR_FLOAT64_INPUTS(j) {
155 22736 : p1 = j;
156 22736 : p2 = j - 5;
157 22736 : CHECK_EQ(magic, m.Call());
158 68208 : CHECK_DOUBLE_EQ(p1, p2);
159 : }
160 : }
161 8 : }
162 : } // namespace
163 :
164 25879 : TEST(RunLoadInt32) { RunLoadInt32(TestAlignment::kAligned); }
165 :
166 25879 : TEST(RunUnalignedLoadInt32) { RunLoadInt32(TestAlignment::kUnaligned); }
167 :
168 25879 : TEST(RunLoadInt32Offset) { RunLoadInt32Offset(TestAlignment::kAligned); }
169 :
170 25879 : TEST(RunUnalignedLoadInt32Offset) {
171 4 : RunLoadInt32Offset(TestAlignment::kUnaligned);
172 4 : }
173 :
174 25879 : TEST(RunLoadStoreFloat32Offset) {
175 4 : RunLoadStoreFloat32Offset(TestAlignment::kAligned);
176 4 : }
177 :
178 25879 : TEST(RunUnalignedLoadStoreFloat32Offset) {
179 4 : RunLoadStoreFloat32Offset(TestAlignment::kUnaligned);
180 4 : }
181 :
182 25879 : TEST(RunLoadStoreFloat64Offset) {
183 4 : RunLoadStoreFloat64Offset(TestAlignment::kAligned);
184 4 : }
185 :
186 25879 : TEST(RunUnalignedLoadStoreFloat64Offset) {
187 4 : RunLoadStoreFloat64Offset(TestAlignment::kUnaligned);
188 4 : }
189 :
190 : namespace {
191 :
192 : // Mostly same as CHECK_EQ() but customized for compressed tagged values.
193 : template <typename CType>
194 4608 : void CheckEq(CType in_value, CType out_value) {
195 17120 : CHECK_EQ(in_value, out_value);
196 4608 : }
197 :
198 : #ifdef V8_COMPRESS_POINTERS
199 : // Specializations for checking the result of compressing store.
200 : template <>
201 : void CheckEq<Object>(Object in_value, Object out_value) {
202 : Isolate* isolate = CcTest::InitIsolateOnce();
203 : // |out_value| is compressed. Check that it's valid.
204 : CHECK_EQ(CompressTagged(in_value->ptr()), out_value->ptr());
205 : STATIC_ASSERT(kTaggedSize == kSystemPointerSize);
206 : CHECK_EQ(in_value->ptr(),
207 : DecompressTaggedAny(isolate->isolate_root(),
208 : static_cast<int32_t>(out_value->ptr())));
209 : }
210 :
211 : template <>
212 : void CheckEq<HeapObject>(HeapObject in_value, HeapObject out_value) {
213 : return CheckEq<Object>(in_value, out_value);
214 : }
215 :
216 : template <>
217 : void CheckEq<Smi>(Smi in_value, Smi out_value) {
218 : return CheckEq<Object>(in_value, out_value);
219 : }
220 : #endif
221 :
222 : // Initializes the buffer with some raw data respecting requested representation
223 : // of the values.
224 : template <typename CType>
225 236 : void InitBuffer(CType* buffer, size_t length, MachineType rep) {
226 220 : const size_t kBufferSize = sizeof(CType) * length;
227 236 : if (!rep.IsTagged()) {
228 : byte* raw = reinterpret_cast<byte*>(buffer);
229 10656 : for (size_t i = 0; i < kBufferSize; i++) {
230 10656 : raw[i] = static_cast<byte>((i + kBufferSize) ^ 0xAA);
231 : }
232 236 : return;
233 : }
234 :
235 : // Tagged field loads require values to be properly tagged because of
236 : // pointer decompression that may be happenning during load.
237 60 : Isolate* isolate = CcTest::InitIsolateOnce();
238 : Smi* smi_view = reinterpret_cast<Smi*>(&buffer[0]);
239 60 : if (rep.IsTaggedSigned()) {
240 260 : for (size_t i = 0; i < length; i++) {
241 520 : smi_view[i] = Smi::FromInt(static_cast<int>(i + kBufferSize) ^ 0xABCDEF0);
242 : }
243 : } else {
244 40 : memcpy(&buffer[0], &isolate->roots_table(), kBufferSize);
245 40 : if (!rep.IsTaggedPointer()) {
246 : // Also add some Smis if we are checking AnyTagged case.
247 128 : for (size_t i = 0; i < length / 2; i++) {
248 256 : smi_view[i] =
249 128 : Smi::FromInt(static_cast<int>(i + kBufferSize) ^ 0xABCDEF0);
250 : }
251 : }
252 : }
253 : }
254 :
255 : template <typename CType>
256 120 : void RunLoadImmIndex(MachineType rep, TestAlignment t) {
257 : const int kNumElems = 16;
258 408 : CType buffer[kNumElems];
259 :
260 96 : InitBuffer(buffer, kNumElems, rep);
261 :
262 : // Test with various large and small offsets.
263 960 : for (int offset = -1; offset <= 200000; offset *= -5) {
264 13824 : for (int i = 0; i < kNumElems; i++) {
265 13824 : BufferedRawMachineAssemblerTester<CType> m;
266 13824 : void* base_pointer = &buffer[0] - offset;
267 : #ifdef V8_COMPRESS_POINTERS
268 : if (rep.IsTagged()) {
269 : // When pointer compression is enabled then we need to access only
270 : // the lower 32-bit of the tagged value while the buffer contains
271 : // full 64-bit values.
272 : base_pointer = LSB(base_pointer, kSystemPointerSize / 2);
273 : }
274 : #endif
275 : Node* base = m.PointerConstant(base_pointer);
276 13824 : Node* index = m.Int32Constant((offset + i) * sizeof(buffer[0]));
277 13824 : if (t == TestAlignment::kAligned) {
278 7488 : m.Return(m.Load(rep, base, index));
279 6336 : } else if (t == TestAlignment::kUnaligned) {
280 6336 : m.Return(m.UnalignedLoad(rep, base, index));
281 : } else {
282 0 : UNREACHABLE();
283 : }
284 :
285 13824 : CheckEq<CType>(buffer[i], m.Call());
286 : }
287 : }
288 96 : }
289 :
290 : template <typename CType>
291 96 : void RunLoadStore(MachineType rep, TestAlignment t) {
292 : const int kNumElems = 16;
293 408 : CType in_buffer[kNumElems];
294 384 : CType out_buffer[kNumElems];
295 : uintptr_t zap_data[] = {kZapValue, kZapValue};
296 24 : CType zap_value;
297 :
298 : STATIC_ASSERT(sizeof(CType) <= sizeof(zap_data));
299 : MemCopy(&zap_value, &zap_data, sizeof(CType));
300 96 : InitBuffer(in_buffer, kNumElems, rep);
301 :
302 1632 : for (int32_t x = 0; x < kNumElems; x++) {
303 1536 : int32_t y = kNumElems - x - 1;
304 :
305 1536 : RawMachineAssemblerTester<int32_t> m;
306 1536 : int32_t OK = 0x29000 + x;
307 : Node* in_base = m.PointerConstant(in_buffer);
308 1536 : Node* in_index = m.IntPtrConstant(x * sizeof(CType));
309 : Node* out_base = m.PointerConstant(out_buffer);
310 1536 : Node* out_index = m.IntPtrConstant(y * sizeof(CType));
311 1536 : if (t == TestAlignment::kAligned) {
312 832 : Node* load = m.Load(rep, in_base, in_index);
313 832 : m.Store(rep.representation(), out_base, out_index, load, kNoWriteBarrier);
314 704 : } else if (t == TestAlignment::kUnaligned) {
315 704 : Node* load = m.UnalignedLoad(rep, in_base, in_index);
316 704 : m.UnalignedStore(rep.representation(), out_base, out_index, load);
317 : }
318 :
319 1536 : m.Return(m.Int32Constant(OK));
320 :
321 26112 : for (int32_t z = 0; z < kNumElems; z++) {
322 24576 : out_buffer[z] = zap_value;
323 : }
324 1920 : CHECK_NE(in_buffer[x], out_buffer[y]);
325 1536 : CHECK_EQ(OK, m.Call());
326 : // Mostly same as CHECK_EQ() but customized for compressed tagged values.
327 1536 : CheckEq<CType>(in_buffer[x], out_buffer[y]);
328 24960 : for (int32_t z = 0; z < kNumElems; z++) {
329 30336 : if (z != y) CHECK_EQ(zap_value, out_buffer[z]);
330 : }
331 : }
332 96 : }
333 :
334 : template <typename CType>
335 44 : void RunUnalignedLoadStoreUnalignedAccess(MachineType rep) {
336 12 : CType in, out;
337 : byte in_buffer[2 * sizeof(CType)];
338 : byte out_buffer[2 * sizeof(CType)];
339 :
340 44 : InitBuffer(&in, 1, rep);
341 :
342 300 : for (int x = 0; x < static_cast<int>(sizeof(CType)); x++) {
343 : // Direct write to &in_buffer[x] may cause unaligned access in C++ code so
344 : // we use MemCopy() to handle that.
345 256 : MemCopy(&in_buffer[x], &in, sizeof(CType));
346 :
347 2016 : for (int y = 0; y < static_cast<int>(sizeof(CType)); y++) {
348 1760 : RawMachineAssemblerTester<int32_t> m;
349 1760 : int32_t OK = 0x29000 + x;
350 :
351 : Node* in_base = m.PointerConstant(in_buffer);
352 1760 : Node* in_index = m.IntPtrConstant(x);
353 1760 : Node* load = m.UnalignedLoad(rep, in_base, in_index);
354 :
355 : Node* out_base = m.PointerConstant(out_buffer);
356 1760 : Node* out_index = m.IntPtrConstant(y);
357 1760 : m.UnalignedStore(rep.representation(), out_base, out_index, load);
358 :
359 1760 : m.Return(m.Int32Constant(OK));
360 :
361 1760 : CHECK_EQ(OK, m.Call());
362 : // Direct read of &out_buffer[y] may cause unaligned access in C++ code
363 : // so we use MemCopy() to handle that.
364 1760 : MemCopy(&out, &out_buffer[y], sizeof(CType));
365 : // Mostly same as CHECK_EQ() but customized for compressed tagged values.
366 1760 : CheckEq<CType>(in, out);
367 : }
368 : }
369 44 : }
370 : } // namespace
371 :
372 25879 : TEST(RunLoadImmIndex) {
373 4 : RunLoadImmIndex<int8_t>(MachineType::Int8(), TestAlignment::kAligned);
374 4 : RunLoadImmIndex<uint8_t>(MachineType::Uint8(), TestAlignment::kAligned);
375 4 : RunLoadImmIndex<int16_t>(MachineType::Int16(), TestAlignment::kAligned);
376 4 : RunLoadImmIndex<uint16_t>(MachineType::Uint16(), TestAlignment::kAligned);
377 4 : RunLoadImmIndex<int32_t>(MachineType::Int32(), TestAlignment::kAligned);
378 4 : RunLoadImmIndex<uint32_t>(MachineType::Uint32(), TestAlignment::kAligned);
379 4 : RunLoadImmIndex<void*>(MachineType::Pointer(), TestAlignment::kAligned);
380 4 : RunLoadImmIndex<Smi>(MachineType::TaggedSigned(), TestAlignment::kAligned);
381 : RunLoadImmIndex<HeapObject>(MachineType::TaggedPointer(),
382 4 : TestAlignment::kAligned);
383 4 : RunLoadImmIndex<Object>(MachineType::AnyTagged(), TestAlignment::kAligned);
384 4 : RunLoadImmIndex<float>(MachineType::Float32(), TestAlignment::kAligned);
385 4 : RunLoadImmIndex<double>(MachineType::Float64(), TestAlignment::kAligned);
386 : #if V8_TARGET_ARCH_64_BIT
387 4 : RunLoadImmIndex<int64_t>(MachineType::Int64(), TestAlignment::kAligned);
388 : #endif
389 : // TODO(titzer): test various indexing modes.
390 4 : }
391 :
392 25879 : TEST(RunUnalignedLoadImmIndex) {
393 4 : RunLoadImmIndex<int16_t>(MachineType::Int16(), TestAlignment::kUnaligned);
394 4 : RunLoadImmIndex<uint16_t>(MachineType::Uint16(), TestAlignment::kUnaligned);
395 4 : RunLoadImmIndex<int32_t>(MachineType::Int32(), TestAlignment::kUnaligned);
396 4 : RunLoadImmIndex<uint32_t>(MachineType::Uint32(), TestAlignment::kUnaligned);
397 4 : RunLoadImmIndex<void*>(MachineType::Pointer(), TestAlignment::kUnaligned);
398 4 : RunLoadImmIndex<Smi>(MachineType::TaggedSigned(), TestAlignment::kUnaligned);
399 : RunLoadImmIndex<HeapObject>(MachineType::TaggedPointer(),
400 4 : TestAlignment::kUnaligned);
401 4 : RunLoadImmIndex<Object>(MachineType::AnyTagged(), TestAlignment::kUnaligned);
402 4 : RunLoadImmIndex<float>(MachineType::Float32(), TestAlignment::kUnaligned);
403 4 : RunLoadImmIndex<double>(MachineType::Float64(), TestAlignment::kUnaligned);
404 : #if V8_TARGET_ARCH_64_BIT
405 4 : RunLoadImmIndex<int64_t>(MachineType::Int64(), TestAlignment::kUnaligned);
406 : #endif
407 : // TODO(titzer): test various indexing modes.
408 4 : }
409 :
410 25879 : TEST(RunLoadStore) {
411 4 : RunLoadStore<int8_t>(MachineType::Int8(), TestAlignment::kAligned);
412 4 : RunLoadStore<uint8_t>(MachineType::Uint8(), TestAlignment::kAligned);
413 4 : RunLoadStore<int16_t>(MachineType::Int16(), TestAlignment::kAligned);
414 4 : RunLoadStore<uint16_t>(MachineType::Uint16(), TestAlignment::kAligned);
415 4 : RunLoadStore<int32_t>(MachineType::Int32(), TestAlignment::kAligned);
416 4 : RunLoadStore<uint32_t>(MachineType::Uint32(), TestAlignment::kAligned);
417 4 : RunLoadStore<void*>(MachineType::Pointer(), TestAlignment::kAligned);
418 4 : RunLoadStore<Smi>(MachineType::TaggedSigned(), TestAlignment::kAligned);
419 : RunLoadStore<HeapObject>(MachineType::TaggedPointer(),
420 4 : TestAlignment::kAligned);
421 4 : RunLoadStore<Object>(MachineType::AnyTagged(), TestAlignment::kAligned);
422 4 : RunLoadStore<float>(MachineType::Float32(), TestAlignment::kAligned);
423 4 : RunLoadStore<double>(MachineType::Float64(), TestAlignment::kAligned);
424 : #if V8_TARGET_ARCH_64_BIT
425 4 : RunLoadStore<int64_t>(MachineType::Int64(), TestAlignment::kAligned);
426 : #endif
427 4 : }
428 :
429 25879 : TEST(RunUnalignedLoadStore) {
430 4 : RunLoadStore<int16_t>(MachineType::Int16(), TestAlignment::kUnaligned);
431 4 : RunLoadStore<uint16_t>(MachineType::Uint16(), TestAlignment::kUnaligned);
432 4 : RunLoadStore<int32_t>(MachineType::Int32(), TestAlignment::kUnaligned);
433 4 : RunLoadStore<uint32_t>(MachineType::Uint32(), TestAlignment::kUnaligned);
434 4 : RunLoadStore<void*>(MachineType::Pointer(), TestAlignment::kUnaligned);
435 4 : RunLoadStore<Smi>(MachineType::TaggedSigned(), TestAlignment::kUnaligned);
436 : RunLoadStore<HeapObject>(MachineType::TaggedPointer(),
437 4 : TestAlignment::kUnaligned);
438 4 : RunLoadStore<Object>(MachineType::AnyTagged(), TestAlignment::kUnaligned);
439 4 : RunLoadStore<float>(MachineType::Float32(), TestAlignment::kUnaligned);
440 4 : RunLoadStore<double>(MachineType::Float64(), TestAlignment::kUnaligned);
441 : #if V8_TARGET_ARCH_64_BIT
442 4 : RunLoadStore<int64_t>(MachineType::Int64(), TestAlignment::kUnaligned);
443 : #endif
444 4 : }
445 :
446 25879 : TEST(RunUnalignedLoadStoreUnalignedAccess) {
447 4 : RunUnalignedLoadStoreUnalignedAccess<int16_t>(MachineType::Int16());
448 4 : RunUnalignedLoadStoreUnalignedAccess<uint16_t>(MachineType::Uint16());
449 4 : RunUnalignedLoadStoreUnalignedAccess<int32_t>(MachineType::Int32());
450 4 : RunUnalignedLoadStoreUnalignedAccess<uint32_t>(MachineType::Uint32());
451 4 : RunUnalignedLoadStoreUnalignedAccess<void*>(MachineType::Pointer());
452 4 : RunUnalignedLoadStoreUnalignedAccess<Smi>(MachineType::TaggedSigned());
453 : RunUnalignedLoadStoreUnalignedAccess<HeapObject>(
454 4 : MachineType::TaggedPointer());
455 4 : RunUnalignedLoadStoreUnalignedAccess<Object>(MachineType::AnyTagged());
456 4 : RunUnalignedLoadStoreUnalignedAccess<float>(MachineType::Float32());
457 4 : RunUnalignedLoadStoreUnalignedAccess<double>(MachineType::Float64());
458 : #if V8_TARGET_ARCH_64_BIT
459 4 : RunUnalignedLoadStoreUnalignedAccess<int64_t>(MachineType::Int64());
460 : #endif
461 4 : }
462 :
463 : namespace {
464 8 : void RunLoadStoreSignExtend32(TestAlignment t) {
465 : int32_t buffer[4];
466 8 : RawMachineAssemblerTester<int32_t> m;
467 8 : Node* load8 = m.LoadFromPointer(LSB(&buffer[0], 1), MachineType::Int8());
468 8 : if (t == TestAlignment::kAligned) {
469 4 : Node* load16 = m.LoadFromPointer(LSB(&buffer[0], 2), MachineType::Int16());
470 4 : Node* load32 = m.LoadFromPointer(&buffer[0], MachineType::Int32());
471 4 : m.StoreToPointer(&buffer[1], MachineRepresentation::kWord32, load8);
472 4 : m.StoreToPointer(&buffer[2], MachineRepresentation::kWord32, load16);
473 4 : m.StoreToPointer(&buffer[3], MachineRepresentation::kWord32, load32);
474 4 : } else if (t == TestAlignment::kUnaligned) {
475 : Node* load16 =
476 4 : m.UnalignedLoadFromPointer(LSB(&buffer[0], 2), MachineType::Int16());
477 4 : Node* load32 = m.UnalignedLoadFromPointer(&buffer[0], MachineType::Int32());
478 4 : m.StoreToPointer(&buffer[1], MachineRepresentation::kWord32, load8);
479 : m.UnalignedStoreToPointer(&buffer[2], MachineRepresentation::kWord32,
480 4 : load16);
481 : m.UnalignedStoreToPointer(&buffer[3], MachineRepresentation::kWord32,
482 4 : load32);
483 : } else {
484 0 : UNREACHABLE();
485 : }
486 8 : m.Return(load8);
487 :
488 472 : FOR_INT32_INPUTS(i) {
489 464 : buffer[0] = i;
490 :
491 928 : CHECK_EQ(static_cast<int8_t>(i & 0xFF), m.Call());
492 464 : CHECK_EQ(static_cast<int8_t>(i & 0xFF), buffer[1]);
493 928 : CHECK_EQ(static_cast<int16_t>(i & 0xFFFF), buffer[2]);
494 464 : CHECK_EQ(i, buffer[3]);
495 : }
496 8 : }
497 :
498 8 : void RunLoadStoreZeroExtend32(TestAlignment t) {
499 : uint32_t buffer[4];
500 8 : RawMachineAssemblerTester<uint32_t> m;
501 8 : Node* load8 = m.LoadFromPointer(LSB(&buffer[0], 1), MachineType::Uint8());
502 8 : if (t == TestAlignment::kAligned) {
503 4 : Node* load16 = m.LoadFromPointer(LSB(&buffer[0], 2), MachineType::Uint16());
504 4 : Node* load32 = m.LoadFromPointer(&buffer[0], MachineType::Uint32());
505 4 : m.StoreToPointer(&buffer[1], MachineRepresentation::kWord32, load8);
506 4 : m.StoreToPointer(&buffer[2], MachineRepresentation::kWord32, load16);
507 4 : m.StoreToPointer(&buffer[3], MachineRepresentation::kWord32, load32);
508 4 : } else if (t == TestAlignment::kUnaligned) {
509 : Node* load16 =
510 4 : m.UnalignedLoadFromPointer(LSB(&buffer[0], 2), MachineType::Uint16());
511 : Node* load32 =
512 4 : m.UnalignedLoadFromPointer(&buffer[0], MachineType::Uint32());
513 4 : m.StoreToPointer(&buffer[1], MachineRepresentation::kWord32, load8);
514 : m.UnalignedStoreToPointer(&buffer[2], MachineRepresentation::kWord32,
515 4 : load16);
516 : m.UnalignedStoreToPointer(&buffer[3], MachineRepresentation::kWord32,
517 4 : load32);
518 : }
519 8 : m.Return(load8);
520 :
521 472 : FOR_UINT32_INPUTS(i) {
522 464 : buffer[0] = i;
523 :
524 464 : CHECK_EQ((i & 0xFF), m.Call());
525 464 : CHECK_EQ((i & 0xFF), buffer[1]);
526 464 : CHECK_EQ((i & 0xFFFF), buffer[2]);
527 464 : CHECK_EQ(i, buffer[3]);
528 : }
529 8 : }
530 : } // namespace
531 :
532 25879 : TEST(RunLoadStoreSignExtend32) {
533 4 : RunLoadStoreSignExtend32(TestAlignment::kAligned);
534 4 : }
535 :
536 25879 : TEST(RunUnalignedLoadStoreSignExtend32) {
537 4 : RunLoadStoreSignExtend32(TestAlignment::kUnaligned);
538 4 : }
539 :
540 25879 : TEST(RunLoadStoreZeroExtend32) {
541 4 : RunLoadStoreZeroExtend32(TestAlignment::kAligned);
542 4 : }
543 :
544 25879 : TEST(RunUnalignedLoadStoreZeroExtend32) {
545 4 : RunLoadStoreZeroExtend32(TestAlignment::kUnaligned);
546 4 : }
547 :
548 : #if V8_TARGET_ARCH_64_BIT
549 :
550 : namespace {
551 : void RunLoadStoreSignExtend64(TestAlignment t) {
552 : if ((true)) return; // TODO(titzer): sign extension of loads to 64-bit.
553 : int64_t buffer[5];
554 : RawMachineAssemblerTester<int64_t> m;
555 : Node* load8 = m.LoadFromPointer(LSB(&buffer[0], 1), MachineType::Int8());
556 : if (t == TestAlignment::kAligned) {
557 : Node* load16 = m.LoadFromPointer(LSB(&buffer[0], 2), MachineType::Int16());
558 : Node* load32 = m.LoadFromPointer(LSB(&buffer[0], 4), MachineType::Int32());
559 : Node* load64 = m.LoadFromPointer(&buffer[0], MachineType::Int64());
560 : m.StoreToPointer(&buffer[1], MachineRepresentation::kWord64, load8);
561 : m.StoreToPointer(&buffer[2], MachineRepresentation::kWord64, load16);
562 : m.StoreToPointer(&buffer[3], MachineRepresentation::kWord64, load32);
563 : m.StoreToPointer(&buffer[4], MachineRepresentation::kWord64, load64);
564 : } else if (t == TestAlignment::kUnaligned) {
565 : Node* load16 =
566 : m.UnalignedLoadFromPointer(LSB(&buffer[0], 2), MachineType::Int16());
567 : Node* load32 =
568 : m.UnalignedLoadFromPointer(LSB(&buffer[0], 4), MachineType::Int32());
569 : Node* load64 = m.UnalignedLoadFromPointer(&buffer[0], MachineType::Int64());
570 : m.StoreToPointer(&buffer[1], MachineRepresentation::kWord64, load8);
571 : m.UnalignedStoreToPointer(&buffer[2], MachineRepresentation::kWord64,
572 : load16);
573 : m.UnalignedStoreToPointer(&buffer[3], MachineRepresentation::kWord64,
574 : load32);
575 : m.UnalignedStoreToPointer(&buffer[4], MachineRepresentation::kWord64,
576 : load64);
577 : } else {
578 : UNREACHABLE();
579 : }
580 : m.Return(load8);
581 :
582 : FOR_INT64_INPUTS(i) {
583 : buffer[0] = i;
584 :
585 : CHECK_EQ(static_cast<int8_t>(i & 0xFF), m.Call());
586 : CHECK_EQ(static_cast<int8_t>(i & 0xFF), buffer[1]);
587 : CHECK_EQ(static_cast<int16_t>(i & 0xFFFF), buffer[2]);
588 : CHECK_EQ(static_cast<int32_t>(i & 0xFFFFFFFF), buffer[3]);
589 : CHECK_EQ(i, buffer[4]);
590 : }
591 : }
592 :
593 8 : void RunLoadStoreZeroExtend64(TestAlignment t) {
594 8 : if (kSystemPointerSize < 8) return;
595 : uint64_t buffer[5];
596 8 : RawMachineAssemblerTester<uint64_t> m;
597 8 : Node* load8 = m.LoadFromPointer(LSB(&buffer[0], 1), MachineType::Uint8());
598 8 : if (t == TestAlignment::kAligned) {
599 4 : Node* load16 = m.LoadFromPointer(LSB(&buffer[0], 2), MachineType::Uint16());
600 4 : Node* load32 = m.LoadFromPointer(LSB(&buffer[0], 4), MachineType::Uint32());
601 4 : Node* load64 = m.LoadFromPointer(&buffer[0], MachineType::Uint64());
602 4 : m.StoreToPointer(&buffer[1], MachineRepresentation::kWord64, load8);
603 4 : m.StoreToPointer(&buffer[2], MachineRepresentation::kWord64, load16);
604 4 : m.StoreToPointer(&buffer[3], MachineRepresentation::kWord64, load32);
605 4 : m.StoreToPointer(&buffer[4], MachineRepresentation::kWord64, load64);
606 4 : } else if (t == TestAlignment::kUnaligned) {
607 : Node* load16 =
608 4 : m.UnalignedLoadFromPointer(LSB(&buffer[0], 2), MachineType::Uint16());
609 : Node* load32 =
610 4 : m.UnalignedLoadFromPointer(LSB(&buffer[0], 4), MachineType::Uint32());
611 : Node* load64 =
612 4 : m.UnalignedLoadFromPointer(&buffer[0], MachineType::Uint64());
613 4 : m.StoreToPointer(&buffer[1], MachineRepresentation::kWord64, load8);
614 : m.UnalignedStoreToPointer(&buffer[2], MachineRepresentation::kWord64,
615 4 : load16);
616 : m.UnalignedStoreToPointer(&buffer[3], MachineRepresentation::kWord64,
617 4 : load32);
618 : m.UnalignedStoreToPointer(&buffer[4], MachineRepresentation::kWord64,
619 4 : load64);
620 : } else {
621 0 : UNREACHABLE();
622 : }
623 8 : m.Return(load8);
624 :
625 656 : FOR_UINT64_INPUTS(i) {
626 648 : buffer[0] = i;
627 :
628 648 : CHECK_EQ((i & 0xFF), m.Call());
629 648 : CHECK_EQ((i & 0xFF), buffer[1]);
630 648 : CHECK_EQ((i & 0xFFFF), buffer[2]);
631 648 : CHECK_EQ((i & 0xFFFFFFFF), buffer[3]);
632 648 : CHECK_EQ(i, buffer[4]);
633 : }
634 : }
635 :
636 : } // namespace
637 :
638 25879 : TEST(RunLoadStoreSignExtend64) {
639 : RunLoadStoreSignExtend64(TestAlignment::kAligned);
640 4 : }
641 :
642 25879 : TEST(RunUnalignedLoadStoreSignExtend64) {
643 : RunLoadStoreSignExtend64(TestAlignment::kUnaligned);
644 4 : }
645 :
646 25879 : TEST(RunLoadStoreZeroExtend64) {
647 4 : RunLoadStoreZeroExtend64(TestAlignment::kAligned);
648 4 : }
649 :
650 25879 : TEST(RunUnalignedLoadStoreZeroExtend64) {
651 4 : RunLoadStoreZeroExtend64(TestAlignment::kUnaligned);
652 4 : }
653 :
654 : #endif
655 :
656 : namespace {
657 : template <typename IntType>
658 12 : void LoadStoreTruncation(MachineType kRepresentation, TestAlignment t) {
659 : IntType input;
660 :
661 12 : RawMachineAssemblerTester<int32_t> m;
662 : Node* ap1;
663 12 : if (t == TestAlignment::kAligned) {
664 8 : Node* a = m.LoadFromPointer(&input, kRepresentation);
665 8 : ap1 = m.Int32Add(a, m.Int32Constant(1));
666 8 : m.StoreToPointer(&input, kRepresentation.representation(), ap1);
667 4 : } else if (t == TestAlignment::kUnaligned) {
668 4 : Node* a = m.UnalignedLoadFromPointer(&input, kRepresentation);
669 4 : ap1 = m.Int32Add(a, m.Int32Constant(1));
670 4 : m.UnalignedStoreToPointer(&input, kRepresentation.representation(), ap1);
671 : } else {
672 0 : UNREACHABLE();
673 : }
674 12 : m.Return(ap1);
675 :
676 : const IntType max = std::numeric_limits<IntType>::max();
677 : const IntType min = std::numeric_limits<IntType>::min();
678 :
679 : // Test upper bound.
680 12 : input = max;
681 12 : CHECK_EQ(max + 1, m.Call());
682 12 : CHECK_EQ(min, input);
683 :
684 : // Test lower bound.
685 12 : input = min;
686 12 : CHECK_EQ(static_cast<IntType>(max + 2), m.Call());
687 12 : CHECK_EQ(min + 1, input);
688 :
689 : // Test all one byte values that are not one byte bounds.
690 3048 : for (int i = -127; i < 127; i++) {
691 3048 : input = i;
692 3048 : int expected = i >= 0 ? i + 1 : max + (i - min) + 2;
693 6096 : CHECK_EQ(static_cast<IntType>(expected), m.Call());
694 3048 : CHECK_EQ(static_cast<IntType>(i + 1), input);
695 : }
696 12 : }
697 : } // namespace
698 :
699 25879 : TEST(RunLoadStoreTruncation) {
700 4 : LoadStoreTruncation<int8_t>(MachineType::Int8(), TestAlignment::kAligned);
701 4 : LoadStoreTruncation<int16_t>(MachineType::Int16(), TestAlignment::kAligned);
702 4 : }
703 :
704 25879 : TEST(RunUnalignedLoadStoreTruncation) {
705 4 : LoadStoreTruncation<int16_t>(MachineType::Int16(), TestAlignment::kUnaligned);
706 4 : }
707 :
708 : #undef LSB
709 : #undef A_BILLION
710 : #undef A_GIG
711 :
712 : } // namespace compiler
713 : } // namespace internal
714 77625 : } // namespace v8
|