LCOV - code coverage report
Current view: top level - test/cctest/compiler - test-run-load-store.cc (source / functions) Hit Total Coverage
Test: app.info Lines: 353 361 97.8 %
Date: 2019-04-17 Functions: 84 84 100.0 %

          Line data    Source code
       1             : // Copyright 2016 the V8 project authors. All rights reserved. Use of this
       2             : // source code is governed by a BSD-style license that can be found in the
       3             : // LICENSE file.
       4             : 
       5             : #include <cmath>
       6             : #include <functional>
       7             : #include <limits>
       8             : 
       9             : #include "src/base/bits.h"
      10             : #include "src/base/overflowing-math.h"
      11             : #include "src/base/utils/random-number-generator.h"
      12             : #include "src/objects-inl.h"
      13             : #include "test/cctest/cctest.h"
      14             : #include "test/cctest/compiler/codegen-tester.h"
      15             : #include "test/cctest/compiler/graph-builder-tester.h"
      16             : #include "test/cctest/compiler/value-helper.h"
      17             : 
      18             : 
      19             : namespace v8 {
      20             : namespace internal {
      21             : namespace compiler {
      22             : 
      23             : enum TestAlignment {
      24             :   kAligned,
      25             :   kUnaligned,
      26             : };
      27             : 
      28             : #if V8_TARGET_LITTLE_ENDIAN
      29             : #define LSB(addr, bytes) addr
      30             : #elif V8_TARGET_BIG_ENDIAN
      31             : #define LSB(addr, bytes) reinterpret_cast<byte*>(addr + 1) - (bytes)
      32             : #else
      33             : #error "Unknown Architecture"
      34             : #endif
      35             : 
      36             : // This is a America!
      37             : #define A_BILLION 1000000000ULL
      38             : #define A_GIG (1024ULL * 1024ULL * 1024ULL)
      39             : 
      40             : namespace {
      41           8 : void RunLoadInt32(const TestAlignment t) {
      42           8 :   RawMachineAssemblerTester<int32_t> m;
      43             : 
      44           8 :   int32_t p1 = 0;  // loads directly from this location.
      45             : 
      46           8 :   if (t == TestAlignment::kAligned) {
      47           4 :     m.Return(m.LoadFromPointer(&p1, MachineType::Int32()));
      48           4 :   } else if (t == TestAlignment::kUnaligned) {
      49           4 :     m.Return(m.UnalignedLoadFromPointer(&p1, MachineType::Int32()));
      50             :   } else {
      51           0 :     UNREACHABLE();
      52             :   }
      53             : 
      54         936 :   FOR_INT32_INPUTS(i) {
      55         464 :     p1 = i;
      56         464 :     CHECK_EQ(p1, m.Call());
      57             :   }
      58           8 : }
      59             : 
      60           8 : void RunLoadInt32Offset(TestAlignment t) {
      61           8 :   int32_t p1 = 0;  // loads directly from this location.
      62             : 
      63             :   int32_t offsets[] = {-2000000, -100, -101, 1,          3,
      64           8 :                        7,        120,  2000, 2000000000, 0xFF};
      65             : 
      66         168 :   for (size_t i = 0; i < arraysize(offsets); i++) {
      67          80 :     RawMachineAssemblerTester<int32_t> m;
      68          80 :     int32_t offset = offsets[i];
      69          80 :     byte* pointer = reinterpret_cast<byte*>(&p1) - offset;
      70             : 
      71             :     // generate load [#base + #index]
      72          80 :     if (t == TestAlignment::kAligned) {
      73          40 :       m.Return(m.LoadFromPointer(pointer, MachineType::Int32(), offset));
      74          40 :     } else if (t == TestAlignment::kUnaligned) {
      75          40 :       m.Return(
      76          40 :           m.UnalignedLoadFromPointer(pointer, MachineType::Int32(), offset));
      77             :     } else {
      78           0 :       UNREACHABLE();
      79             :     }
      80             : 
      81        9360 :     FOR_INT32_INPUTS(j) {
      82        4640 :       p1 = j;
      83        4640 :       CHECK_EQ(p1, m.Call());
      84             :     }
      85             :   }
      86           8 : }
      87             : 
      88           8 : void RunLoadStoreFloat32Offset(TestAlignment t) {
      89           8 :   float p1 = 0.0f;  // loads directly from this location.
      90           8 :   float p2 = 0.0f;  // and stores directly into this location.
      91             : 
      92         936 :   FOR_INT32_INPUTS(i) {
      93             :     int32_t magic =
      94             :         base::AddWithWraparound(0x2342AABB, base::MulWithWraparound(i, 3));
      95         464 :     RawMachineAssemblerTester<int32_t> m;
      96             :     int32_t offset = i;
      97         464 :     byte* from = reinterpret_cast<byte*>(&p1) - offset;
      98         464 :     byte* to = reinterpret_cast<byte*>(&p2) - offset;
      99             :     // generate load [#base + #index]
     100         464 :     if (t == TestAlignment::kAligned) {
     101         232 :       Node* load = m.Load(MachineType::Float32(), m.PointerConstant(from),
     102         232 :                           m.IntPtrConstant(offset));
     103             :       m.Store(MachineRepresentation::kFloat32, m.PointerConstant(to),
     104         232 :               m.IntPtrConstant(offset), load, kNoWriteBarrier);
     105         232 :     } else if (t == TestAlignment::kUnaligned) {
     106             :       Node* load =
     107         232 :           m.UnalignedLoad(MachineType::Float32(), m.PointerConstant(from),
     108         232 :                           m.IntPtrConstant(offset));
     109             :       m.UnalignedStore(MachineRepresentation::kFloat32, m.PointerConstant(to),
     110         232 :                        m.IntPtrConstant(offset), load);
     111             : 
     112             :     } else {
     113           0 :       UNREACHABLE();
     114             :     }
     115         464 :     m.Return(m.Int32Constant(magic));
     116             : 
     117      107184 :     FOR_FLOAT32_INPUTS(j) {
     118       53360 :       p1 = j;
     119       53360 :       p2 = j - 5;
     120       53360 :       CHECK_EQ(magic, m.Call());
     121      160080 :       CHECK_DOUBLE_EQ(p1, p2);
     122             :     }
     123             :   }
     124           8 : }
     125             : 
     126           8 : void RunLoadStoreFloat64Offset(TestAlignment t) {
     127           8 :   double p1 = 0;  // loads directly from this location.
     128           8 :   double p2 = 0;  // and stores directly into this location.
     129             : 
     130         936 :   FOR_INT32_INPUTS(i) {
     131             :     int32_t magic =
     132             :         base::AddWithWraparound(0x2342AABB, base::MulWithWraparound(i, 3));
     133         464 :     RawMachineAssemblerTester<int32_t> m;
     134             :     int32_t offset = i;
     135         464 :     byte* from = reinterpret_cast<byte*>(&p1) - offset;
     136         464 :     byte* to = reinterpret_cast<byte*>(&p2) - offset;
     137             :     // generate load [#base + #index]
     138         464 :     if (t == TestAlignment::kAligned) {
     139         232 :       Node* load = m.Load(MachineType::Float64(), m.PointerConstant(from),
     140         232 :                           m.IntPtrConstant(offset));
     141             :       m.Store(MachineRepresentation::kFloat64, m.PointerConstant(to),
     142         232 :               m.IntPtrConstant(offset), load, kNoWriteBarrier);
     143         232 :     } else if (t == TestAlignment::kUnaligned) {
     144             :       Node* load =
     145         232 :           m.UnalignedLoad(MachineType::Float64(), m.PointerConstant(from),
     146         232 :                           m.IntPtrConstant(offset));
     147             :       m.UnalignedStore(MachineRepresentation::kFloat64, m.PointerConstant(to),
     148         232 :                        m.IntPtrConstant(offset), load);
     149             :     } else {
     150           0 :       UNREACHABLE();
     151             :     }
     152         464 :     m.Return(m.Int32Constant(magic));
     153             : 
     154       45936 :     FOR_FLOAT64_INPUTS(j) {
     155       22736 :       p1 = j;
     156       22736 :       p2 = j - 5;
     157       22736 :       CHECK_EQ(magic, m.Call());
     158       68208 :       CHECK_DOUBLE_EQ(p1, p2);
     159             :     }
     160             :   }
     161           8 : }
     162             : }  // namespace
     163             : 
     164       26643 : TEST(RunLoadInt32) { RunLoadInt32(TestAlignment::kAligned); }
     165             : 
     166       26643 : TEST(RunUnalignedLoadInt32) { RunLoadInt32(TestAlignment::kUnaligned); }
     167             : 
     168       26643 : TEST(RunLoadInt32Offset) { RunLoadInt32Offset(TestAlignment::kAligned); }
     169             : 
     170       26643 : TEST(RunUnalignedLoadInt32Offset) {
     171           4 :   RunLoadInt32Offset(TestAlignment::kUnaligned);
     172           4 : }
     173             : 
     174       26643 : TEST(RunLoadStoreFloat32Offset) {
     175           4 :   RunLoadStoreFloat32Offset(TestAlignment::kAligned);
     176           4 : }
     177             : 
     178       26643 : TEST(RunUnalignedLoadStoreFloat32Offset) {
     179           4 :   RunLoadStoreFloat32Offset(TestAlignment::kUnaligned);
     180           4 : }
     181             : 
     182       26643 : TEST(RunLoadStoreFloat64Offset) {
     183           4 :   RunLoadStoreFloat64Offset(TestAlignment::kAligned);
     184           4 : }
     185             : 
     186       26643 : TEST(RunUnalignedLoadStoreFloat64Offset) {
     187           4 :   RunLoadStoreFloat64Offset(TestAlignment::kUnaligned);
     188           4 : }
     189             : 
     190             : namespace {
     191             : 
     192             : // Mostly same as CHECK_EQ() but customized for compressed tagged values.
     193             : template <typename CType>
     194             : void CheckEq(CType in_value, CType out_value) {
     195       17120 :   CHECK_EQ(in_value, out_value);
     196             : }
     197             : 
     198             : #ifdef V8_COMPRESS_POINTERS
     199             : // Specializations for checking the result of compressing store.
     200             : template <>
     201             : void CheckEq<Object>(Object in_value, Object out_value) {
     202             :   // Compare only lower 32-bits of the value because tagged load/stores are
     203             :   // 32-bit operations anyway.
     204             :   CHECK_EQ(static_cast<Tagged_t>(in_value.ptr()),
     205             :            static_cast<Tagged_t>(out_value.ptr()));
     206             : }
     207             : 
     208             : template <>
     209             : void CheckEq<HeapObject>(HeapObject in_value, HeapObject out_value) {
     210             :   return CheckEq<Object>(in_value, out_value);
     211             : }
     212             : 
     213             : template <>
     214             : void CheckEq<Smi>(Smi in_value, Smi out_value) {
     215             :   return CheckEq<Object>(in_value, out_value);
     216             : }
     217             : #endif
     218             : 
     219             : // Initializes the buffer with some raw data respecting requested representation
     220             : // of the values.
     221             : template <typename CType>
     222         236 : void InitBuffer(CType* buffer, size_t length, MachineType rep) {
     223         220 :   const size_t kBufferSize = sizeof(CType) * length;
     224         236 :   if (!rep.IsTagged()) {
     225             :     byte* raw = reinterpret_cast<byte*>(buffer);
     226       21488 :     for (size_t i = 0; i < kBufferSize; i++) {
     227       10656 :       raw[i] = static_cast<byte>((i + kBufferSize) ^ 0xAA);
     228             :     }
     229             :     return;
     230             :   }
     231             : 
     232             :   // Tagged field loads require values to be properly tagged because of
     233             :   // pointer decompression that may be happenning during load.
     234             :   Isolate* isolate = CcTest::InitIsolateOnce();
     235             :   Smi* smi_view = reinterpret_cast<Smi*>(&buffer[0]);
     236          60 :   if (rep.IsTaggedSigned()) {
     237         540 :     for (size_t i = 0; i < length; i++) {
     238         520 :       smi_view[i] = Smi::FromInt(static_cast<int>(i + kBufferSize) ^ 0xABCDEF0);
     239             :     }
     240             :   } else {
     241             :     memcpy(&buffer[0], &isolate->roots_table(), kBufferSize);
     242          40 :     if (!rep.IsTaggedPointer()) {
     243             :       // Also add some Smis if we are checking AnyTagged case.
     244         276 :       for (size_t i = 0; i < length / 2; i++) {
     245         256 :         smi_view[i] =
     246         128 :             Smi::FromInt(static_cast<int>(i + kBufferSize) ^ 0xABCDEF0);
     247             :       }
     248             :     }
     249             :   }
     250             : }
     251             : 
     252             : template <typename CType>
     253          96 : void RunLoadImmIndex(MachineType rep, TestAlignment t) {
     254             :   const int kNumElems = 16;
     255          24 :   CType buffer[kNumElems];
     256             : 
     257          96 :   InitBuffer(buffer, kNumElems, rep);
     258             : 
     259             :   // Test with various large and small offsets.
     260        1824 :   for (int offset = -1; offset <= 200000; offset *= -5) {
     261       28512 :     for (int i = 0; i < kNumElems; i++) {
     262       13824 :       BufferedRawMachineAssemblerTester<CType> m;
     263       13824 :       void* base_pointer = &buffer[0] - offset;
     264             : #ifdef V8_COMPRESS_POINTERS
     265             :       if (rep.IsTagged()) {
     266             :         // When pointer compression is enabled then we need to access only
     267             :         // the lower 32-bit of the tagged value while the buffer contains
     268             :         // full 64-bit values.
     269             :         base_pointer = LSB(base_pointer, kTaggedSize);
     270             :       }
     271             : #endif
     272             :       Node* base = m.PointerConstant(base_pointer);
     273       13824 :       Node* index = m.Int32Constant((offset + i) * sizeof(buffer[0]));
     274       13824 :       if (t == TestAlignment::kAligned) {
     275        7488 :         m.Return(m.Load(rep, base, index));
     276        6336 :       } else if (t == TestAlignment::kUnaligned) {
     277        6336 :         m.Return(m.UnalignedLoad(rep, base, index));
     278             :       } else {
     279           0 :         UNREACHABLE();
     280             :       }
     281             : 
     282       10368 :       CheckEq<CType>(buffer[i], m.Call());
     283             :     }
     284             :   }
     285          96 : }
     286             : 
     287             : template <typename CType>
     288          96 : void RunLoadStore(MachineType rep, TestAlignment t) {
     289             :   const int kNumElems = 16;
     290          24 :   CType in_buffer[kNumElems];
     291          24 :   CType out_buffer[kNumElems];
     292             :   uintptr_t zap_data[] = {kZapValue, kZapValue};
     293             :   CType zap_value;
     294             : 
     295             :   STATIC_ASSERT(sizeof(CType) <= sizeof(zap_data));
     296             :   MemCopy(&zap_value, &zap_data, sizeof(CType));
     297          96 :   InitBuffer(in_buffer, kNumElems, rep);
     298             : 
     299        3168 :   for (int32_t x = 0; x < kNumElems; x++) {
     300        1536 :     int32_t y = kNumElems - x - 1;
     301             : 
     302        1536 :     RawMachineAssemblerTester<int32_t> m;
     303        1536 :     int32_t OK = 0x29000 + x;
     304             :     Node* in_base = m.PointerConstant(in_buffer);
     305        1536 :     Node* in_index = m.IntPtrConstant(x * sizeof(CType));
     306             :     Node* out_base = m.PointerConstant(out_buffer);
     307        1536 :     Node* out_index = m.IntPtrConstant(y * sizeof(CType));
     308        1536 :     if (t == TestAlignment::kAligned) {
     309         832 :       Node* load = m.Load(rep, in_base, in_index);
     310         832 :       m.Store(rep.representation(), out_base, out_index, load, kNoWriteBarrier);
     311         704 :     } else if (t == TestAlignment::kUnaligned) {
     312         704 :       Node* load = m.UnalignedLoad(rep, in_base, in_index);
     313         704 :       m.UnalignedStore(rep.representation(), out_base, out_index, load);
     314             :     }
     315             : 
     316        1536 :     m.Return(m.Int32Constant(OK));
     317             : 
     318       50688 :     for (int32_t z = 0; z < kNumElems; z++) {
     319       24576 :       out_buffer[z] = zap_value;
     320             :     }
     321        1536 :     CHECK_NE(in_buffer[x], out_buffer[y]);
     322        1536 :     CHECK_EQ(OK, m.Call());
     323             :     // Mostly same as CHECK_EQ() but customized for compressed tagged values.
     324        1152 :     CheckEq<CType>(in_buffer[x], out_buffer[y]);
     325       50688 :     for (int32_t z = 0; z < kNumElems; z++) {
     326       24576 :       if (z != y) CHECK_EQ(zap_value, out_buffer[z]);
     327             :     }
     328             :   }
     329          96 : }
     330             : 
     331             : template <typename CType>
     332          44 : void RunUnalignedLoadStoreUnalignedAccess(MachineType rep) {
     333          12 :   CType in, out;
     334             :   byte in_buffer[2 * sizeof(CType)];
     335             :   byte out_buffer[2 * sizeof(CType)];
     336             : 
     337          44 :   InitBuffer(&in, 1, rep);
     338             : 
     339         556 :   for (int x = 0; x < static_cast<int>(sizeof(CType)); x++) {
     340             :     // Direct write to &in_buffer[x] may cause unaligned access in C++ code so
     341             :     // we use MemCopy() to handle that.
     342         256 :     MemCopy(&in_buffer[x], &in, sizeof(CType));
     343             : 
     344        3776 :     for (int y = 0; y < static_cast<int>(sizeof(CType)); y++) {
     345        1760 :       RawMachineAssemblerTester<int32_t> m;
     346        1760 :       int32_t OK = 0x29000 + x;
     347             : 
     348             :       Node* in_base = m.PointerConstant(in_buffer);
     349        1760 :       Node* in_index = m.IntPtrConstant(x);
     350        1760 :       Node* load = m.UnalignedLoad(rep, in_base, in_index);
     351             : 
     352             :       Node* out_base = m.PointerConstant(out_buffer);
     353        1760 :       Node* out_index = m.IntPtrConstant(y);
     354        1760 :       m.UnalignedStore(rep.representation(), out_base, out_index, load);
     355             : 
     356        1760 :       m.Return(m.Int32Constant(OK));
     357             : 
     358        1760 :       CHECK_EQ(OK, m.Call());
     359             :       // Direct read of &out_buffer[y] may cause unaligned access in C++ code
     360             :       // so we use MemCopy() to handle that.
     361        1760 :       MemCopy(&out, &out_buffer[y], sizeof(CType));
     362             :       // Mostly same as CHECK_EQ() but customized for compressed tagged values.
     363         992 :       CheckEq<CType>(in, out);
     364             :     }
     365             :   }
     366          44 : }
     367             : }  // namespace
     368             : 
     369       26643 : TEST(RunLoadImmIndex) {
     370           4 :   RunLoadImmIndex<int8_t>(MachineType::Int8(), TestAlignment::kAligned);
     371           4 :   RunLoadImmIndex<uint8_t>(MachineType::Uint8(), TestAlignment::kAligned);
     372           4 :   RunLoadImmIndex<int16_t>(MachineType::Int16(), TestAlignment::kAligned);
     373           4 :   RunLoadImmIndex<uint16_t>(MachineType::Uint16(), TestAlignment::kAligned);
     374           4 :   RunLoadImmIndex<int32_t>(MachineType::Int32(), TestAlignment::kAligned);
     375           4 :   RunLoadImmIndex<uint32_t>(MachineType::Uint32(), TestAlignment::kAligned);
     376           4 :   RunLoadImmIndex<void*>(MachineType::Pointer(), TestAlignment::kAligned);
     377           4 :   RunLoadImmIndex<Smi>(MachineType::TaggedSigned(), TestAlignment::kAligned);
     378             :   RunLoadImmIndex<HeapObject>(MachineType::TaggedPointer(),
     379           4 :                               TestAlignment::kAligned);
     380           4 :   RunLoadImmIndex<Object>(MachineType::AnyTagged(), TestAlignment::kAligned);
     381           4 :   RunLoadImmIndex<float>(MachineType::Float32(), TestAlignment::kAligned);
     382           4 :   RunLoadImmIndex<double>(MachineType::Float64(), TestAlignment::kAligned);
     383             : #if V8_TARGET_ARCH_64_BIT
     384           4 :   RunLoadImmIndex<int64_t>(MachineType::Int64(), TestAlignment::kAligned);
     385             : #endif
     386             :   // TODO(titzer): test various indexing modes.
     387           4 : }
     388             : 
     389       26643 : TEST(RunUnalignedLoadImmIndex) {
     390           4 :   RunLoadImmIndex<int16_t>(MachineType::Int16(), TestAlignment::kUnaligned);
     391           4 :   RunLoadImmIndex<uint16_t>(MachineType::Uint16(), TestAlignment::kUnaligned);
     392           4 :   RunLoadImmIndex<int32_t>(MachineType::Int32(), TestAlignment::kUnaligned);
     393           4 :   RunLoadImmIndex<uint32_t>(MachineType::Uint32(), TestAlignment::kUnaligned);
     394           4 :   RunLoadImmIndex<void*>(MachineType::Pointer(), TestAlignment::kUnaligned);
     395           4 :   RunLoadImmIndex<Smi>(MachineType::TaggedSigned(), TestAlignment::kUnaligned);
     396             :   RunLoadImmIndex<HeapObject>(MachineType::TaggedPointer(),
     397           4 :                               TestAlignment::kUnaligned);
     398           4 :   RunLoadImmIndex<Object>(MachineType::AnyTagged(), TestAlignment::kUnaligned);
     399           4 :   RunLoadImmIndex<float>(MachineType::Float32(), TestAlignment::kUnaligned);
     400           4 :   RunLoadImmIndex<double>(MachineType::Float64(), TestAlignment::kUnaligned);
     401             : #if V8_TARGET_ARCH_64_BIT
     402           4 :   RunLoadImmIndex<int64_t>(MachineType::Int64(), TestAlignment::kUnaligned);
     403             : #endif
     404             :   // TODO(titzer): test various indexing modes.
     405           4 : }
     406             : 
     407       26643 : TEST(RunLoadStore) {
     408           4 :   RunLoadStore<int8_t>(MachineType::Int8(), TestAlignment::kAligned);
     409           4 :   RunLoadStore<uint8_t>(MachineType::Uint8(), TestAlignment::kAligned);
     410           4 :   RunLoadStore<int16_t>(MachineType::Int16(), TestAlignment::kAligned);
     411           4 :   RunLoadStore<uint16_t>(MachineType::Uint16(), TestAlignment::kAligned);
     412           4 :   RunLoadStore<int32_t>(MachineType::Int32(), TestAlignment::kAligned);
     413           4 :   RunLoadStore<uint32_t>(MachineType::Uint32(), TestAlignment::kAligned);
     414           4 :   RunLoadStore<void*>(MachineType::Pointer(), TestAlignment::kAligned);
     415           4 :   RunLoadStore<Smi>(MachineType::TaggedSigned(), TestAlignment::kAligned);
     416             :   RunLoadStore<HeapObject>(MachineType::TaggedPointer(),
     417           4 :                            TestAlignment::kAligned);
     418           4 :   RunLoadStore<Object>(MachineType::AnyTagged(), TestAlignment::kAligned);
     419           4 :   RunLoadStore<float>(MachineType::Float32(), TestAlignment::kAligned);
     420           4 :   RunLoadStore<double>(MachineType::Float64(), TestAlignment::kAligned);
     421             : #if V8_TARGET_ARCH_64_BIT
     422           4 :   RunLoadStore<int64_t>(MachineType::Int64(), TestAlignment::kAligned);
     423             : #endif
     424           4 : }
     425             : 
     426       26643 : TEST(RunUnalignedLoadStore) {
     427           4 :   RunLoadStore<int16_t>(MachineType::Int16(), TestAlignment::kUnaligned);
     428           4 :   RunLoadStore<uint16_t>(MachineType::Uint16(), TestAlignment::kUnaligned);
     429           4 :   RunLoadStore<int32_t>(MachineType::Int32(), TestAlignment::kUnaligned);
     430           4 :   RunLoadStore<uint32_t>(MachineType::Uint32(), TestAlignment::kUnaligned);
     431           4 :   RunLoadStore<void*>(MachineType::Pointer(), TestAlignment::kUnaligned);
     432           4 :   RunLoadStore<Smi>(MachineType::TaggedSigned(), TestAlignment::kUnaligned);
     433             :   RunLoadStore<HeapObject>(MachineType::TaggedPointer(),
     434           4 :                            TestAlignment::kUnaligned);
     435           4 :   RunLoadStore<Object>(MachineType::AnyTagged(), TestAlignment::kUnaligned);
     436           4 :   RunLoadStore<float>(MachineType::Float32(), TestAlignment::kUnaligned);
     437           4 :   RunLoadStore<double>(MachineType::Float64(), TestAlignment::kUnaligned);
     438             : #if V8_TARGET_ARCH_64_BIT
     439           4 :   RunLoadStore<int64_t>(MachineType::Int64(), TestAlignment::kUnaligned);
     440             : #endif
     441           4 : }
     442             : 
     443       26643 : TEST(RunUnalignedLoadStoreUnalignedAccess) {
     444           4 :   RunUnalignedLoadStoreUnalignedAccess<int16_t>(MachineType::Int16());
     445           4 :   RunUnalignedLoadStoreUnalignedAccess<uint16_t>(MachineType::Uint16());
     446           4 :   RunUnalignedLoadStoreUnalignedAccess<int32_t>(MachineType::Int32());
     447           4 :   RunUnalignedLoadStoreUnalignedAccess<uint32_t>(MachineType::Uint32());
     448           4 :   RunUnalignedLoadStoreUnalignedAccess<void*>(MachineType::Pointer());
     449           4 :   RunUnalignedLoadStoreUnalignedAccess<Smi>(MachineType::TaggedSigned());
     450             :   RunUnalignedLoadStoreUnalignedAccess<HeapObject>(
     451           4 :       MachineType::TaggedPointer());
     452           4 :   RunUnalignedLoadStoreUnalignedAccess<Object>(MachineType::AnyTagged());
     453           4 :   RunUnalignedLoadStoreUnalignedAccess<float>(MachineType::Float32());
     454           4 :   RunUnalignedLoadStoreUnalignedAccess<double>(MachineType::Float64());
     455             : #if V8_TARGET_ARCH_64_BIT
     456           4 :   RunUnalignedLoadStoreUnalignedAccess<int64_t>(MachineType::Int64());
     457             : #endif
     458           4 : }
     459             : 
     460             : namespace {
     461           8 : void RunLoadStoreSignExtend32(TestAlignment t) {
     462             :   int32_t buffer[4];
     463           8 :   RawMachineAssemblerTester<int32_t> m;
     464           8 :   Node* load8 = m.LoadFromPointer(LSB(&buffer[0], 1), MachineType::Int8());
     465           8 :   if (t == TestAlignment::kAligned) {
     466           4 :     Node* load16 = m.LoadFromPointer(LSB(&buffer[0], 2), MachineType::Int16());
     467           4 :     Node* load32 = m.LoadFromPointer(&buffer[0], MachineType::Int32());
     468           4 :     m.StoreToPointer(&buffer[1], MachineRepresentation::kWord32, load8);
     469           4 :     m.StoreToPointer(&buffer[2], MachineRepresentation::kWord32, load16);
     470           4 :     m.StoreToPointer(&buffer[3], MachineRepresentation::kWord32, load32);
     471           4 :   } else if (t == TestAlignment::kUnaligned) {
     472             :     Node* load16 =
     473           4 :         m.UnalignedLoadFromPointer(LSB(&buffer[0], 2), MachineType::Int16());
     474           4 :     Node* load32 = m.UnalignedLoadFromPointer(&buffer[0], MachineType::Int32());
     475           4 :     m.StoreToPointer(&buffer[1], MachineRepresentation::kWord32, load8);
     476             :     m.UnalignedStoreToPointer(&buffer[2], MachineRepresentation::kWord32,
     477           4 :                               load16);
     478             :     m.UnalignedStoreToPointer(&buffer[3], MachineRepresentation::kWord32,
     479           4 :                               load32);
     480             :   } else {
     481           0 :     UNREACHABLE();
     482             :   }
     483           8 :   m.Return(load8);
     484             : 
     485         936 :   FOR_INT32_INPUTS(i) {
     486         464 :     buffer[0] = i;
     487             : 
     488         928 :     CHECK_EQ(static_cast<int8_t>(i & 0xFF), m.Call());
     489         464 :     CHECK_EQ(static_cast<int8_t>(i & 0xFF), buffer[1]);
     490         928 :     CHECK_EQ(static_cast<int16_t>(i & 0xFFFF), buffer[2]);
     491         464 :     CHECK_EQ(i, buffer[3]);
     492             :   }
     493           8 : }
     494             : 
     495           8 : void RunLoadStoreZeroExtend32(TestAlignment t) {
     496             :   uint32_t buffer[4];
     497           8 :   RawMachineAssemblerTester<uint32_t> m;
     498           8 :   Node* load8 = m.LoadFromPointer(LSB(&buffer[0], 1), MachineType::Uint8());
     499           8 :   if (t == TestAlignment::kAligned) {
     500           4 :     Node* load16 = m.LoadFromPointer(LSB(&buffer[0], 2), MachineType::Uint16());
     501           4 :     Node* load32 = m.LoadFromPointer(&buffer[0], MachineType::Uint32());
     502           4 :     m.StoreToPointer(&buffer[1], MachineRepresentation::kWord32, load8);
     503           4 :     m.StoreToPointer(&buffer[2], MachineRepresentation::kWord32, load16);
     504           4 :     m.StoreToPointer(&buffer[3], MachineRepresentation::kWord32, load32);
     505           4 :   } else if (t == TestAlignment::kUnaligned) {
     506             :     Node* load16 =
     507           4 :         m.UnalignedLoadFromPointer(LSB(&buffer[0], 2), MachineType::Uint16());
     508             :     Node* load32 =
     509           4 :         m.UnalignedLoadFromPointer(&buffer[0], MachineType::Uint32());
     510           4 :     m.StoreToPointer(&buffer[1], MachineRepresentation::kWord32, load8);
     511             :     m.UnalignedStoreToPointer(&buffer[2], MachineRepresentation::kWord32,
     512           4 :                               load16);
     513             :     m.UnalignedStoreToPointer(&buffer[3], MachineRepresentation::kWord32,
     514           4 :                               load32);
     515             :   }
     516           8 :   m.Return(load8);
     517             : 
     518         936 :   FOR_UINT32_INPUTS(i) {
     519         464 :     buffer[0] = i;
     520             : 
     521         464 :     CHECK_EQ((i & 0xFF), m.Call());
     522         464 :     CHECK_EQ((i & 0xFF), buffer[1]);
     523         464 :     CHECK_EQ((i & 0xFFFF), buffer[2]);
     524         464 :     CHECK_EQ(i, buffer[3]);
     525             :   }
     526           8 : }
     527             : }  // namespace
     528             : 
     529       26643 : TEST(RunLoadStoreSignExtend32) {
     530           4 :   RunLoadStoreSignExtend32(TestAlignment::kAligned);
     531           4 : }
     532             : 
     533       26643 : TEST(RunUnalignedLoadStoreSignExtend32) {
     534           4 :   RunLoadStoreSignExtend32(TestAlignment::kUnaligned);
     535           4 : }
     536             : 
     537       26643 : TEST(RunLoadStoreZeroExtend32) {
     538           4 :   RunLoadStoreZeroExtend32(TestAlignment::kAligned);
     539           4 : }
     540             : 
     541       26643 : TEST(RunUnalignedLoadStoreZeroExtend32) {
     542           4 :   RunLoadStoreZeroExtend32(TestAlignment::kUnaligned);
     543           4 : }
     544             : 
     545             : #if V8_TARGET_ARCH_64_BIT
     546             : 
     547             : namespace {
     548             : void RunLoadStoreSignExtend64(TestAlignment t) {
     549             :   if ((true)) return;  // TODO(titzer): sign extension of loads to 64-bit.
     550             :   int64_t buffer[5];
     551             :   RawMachineAssemblerTester<int64_t> m;
     552             :   Node* load8 = m.LoadFromPointer(LSB(&buffer[0], 1), MachineType::Int8());
     553             :   if (t == TestAlignment::kAligned) {
     554             :     Node* load16 = m.LoadFromPointer(LSB(&buffer[0], 2), MachineType::Int16());
     555             :     Node* load32 = m.LoadFromPointer(LSB(&buffer[0], 4), MachineType::Int32());
     556             :     Node* load64 = m.LoadFromPointer(&buffer[0], MachineType::Int64());
     557             :     m.StoreToPointer(&buffer[1], MachineRepresentation::kWord64, load8);
     558             :     m.StoreToPointer(&buffer[2], MachineRepresentation::kWord64, load16);
     559             :     m.StoreToPointer(&buffer[3], MachineRepresentation::kWord64, load32);
     560             :     m.StoreToPointer(&buffer[4], MachineRepresentation::kWord64, load64);
     561             :   } else if (t == TestAlignment::kUnaligned) {
     562             :     Node* load16 =
     563             :         m.UnalignedLoadFromPointer(LSB(&buffer[0], 2), MachineType::Int16());
     564             :     Node* load32 =
     565             :         m.UnalignedLoadFromPointer(LSB(&buffer[0], 4), MachineType::Int32());
     566             :     Node* load64 = m.UnalignedLoadFromPointer(&buffer[0], MachineType::Int64());
     567             :     m.StoreToPointer(&buffer[1], MachineRepresentation::kWord64, load8);
     568             :     m.UnalignedStoreToPointer(&buffer[2], MachineRepresentation::kWord64,
     569             :                               load16);
     570             :     m.UnalignedStoreToPointer(&buffer[3], MachineRepresentation::kWord64,
     571             :                               load32);
     572             :     m.UnalignedStoreToPointer(&buffer[4], MachineRepresentation::kWord64,
     573             :                               load64);
     574             :   } else {
     575             :     UNREACHABLE();
     576             :   }
     577             :   m.Return(load8);
     578             : 
     579             :   FOR_INT64_INPUTS(i) {
     580             :     buffer[0] = i;
     581             : 
     582             :     CHECK_EQ(static_cast<int8_t>(i & 0xFF), m.Call());
     583             :     CHECK_EQ(static_cast<int8_t>(i & 0xFF), buffer[1]);
     584             :     CHECK_EQ(static_cast<int16_t>(i & 0xFFFF), buffer[2]);
     585             :     CHECK_EQ(static_cast<int32_t>(i & 0xFFFFFFFF), buffer[3]);
     586             :     CHECK_EQ(i, buffer[4]);
     587             :   }
     588             : }
     589             : 
     590           8 : void RunLoadStoreZeroExtend64(TestAlignment t) {
     591             :   if (kSystemPointerSize < 8) return;
     592             :   uint64_t buffer[5];
     593           8 :   RawMachineAssemblerTester<uint64_t> m;
     594           8 :   Node* load8 = m.LoadFromPointer(LSB(&buffer[0], 1), MachineType::Uint8());
     595           8 :   if (t == TestAlignment::kAligned) {
     596           4 :     Node* load16 = m.LoadFromPointer(LSB(&buffer[0], 2), MachineType::Uint16());
     597           4 :     Node* load32 = m.LoadFromPointer(LSB(&buffer[0], 4), MachineType::Uint32());
     598           4 :     Node* load64 = m.LoadFromPointer(&buffer[0], MachineType::Uint64());
     599           4 :     m.StoreToPointer(&buffer[1], MachineRepresentation::kWord64, load8);
     600           4 :     m.StoreToPointer(&buffer[2], MachineRepresentation::kWord64, load16);
     601           4 :     m.StoreToPointer(&buffer[3], MachineRepresentation::kWord64, load32);
     602           4 :     m.StoreToPointer(&buffer[4], MachineRepresentation::kWord64, load64);
     603           4 :   } else if (t == TestAlignment::kUnaligned) {
     604             :     Node* load16 =
     605           4 :         m.UnalignedLoadFromPointer(LSB(&buffer[0], 2), MachineType::Uint16());
     606             :     Node* load32 =
     607           4 :         m.UnalignedLoadFromPointer(LSB(&buffer[0], 4), MachineType::Uint32());
     608             :     Node* load64 =
     609           4 :         m.UnalignedLoadFromPointer(&buffer[0], MachineType::Uint64());
     610           4 :     m.StoreToPointer(&buffer[1], MachineRepresentation::kWord64, load8);
     611             :     m.UnalignedStoreToPointer(&buffer[2], MachineRepresentation::kWord64,
     612           4 :                               load16);
     613             :     m.UnalignedStoreToPointer(&buffer[3], MachineRepresentation::kWord64,
     614           4 :                               load32);
     615             :     m.UnalignedStoreToPointer(&buffer[4], MachineRepresentation::kWord64,
     616           4 :                               load64);
     617             :   } else {
     618           0 :     UNREACHABLE();
     619             :   }
     620           8 :   m.Return(load8);
     621             : 
     622        1304 :   FOR_UINT64_INPUTS(i) {
     623         648 :     buffer[0] = i;
     624             : 
     625         648 :     CHECK_EQ((i & 0xFF), m.Call());
     626         648 :     CHECK_EQ((i & 0xFF), buffer[1]);
     627         648 :     CHECK_EQ((i & 0xFFFF), buffer[2]);
     628         648 :     CHECK_EQ((i & 0xFFFFFFFF), buffer[3]);
     629         648 :     CHECK_EQ(i, buffer[4]);
     630             :   }
     631             : }
     632             : 
     633             : }  // namespace
     634             : 
     635       26643 : TEST(RunLoadStoreSignExtend64) {
     636             :   RunLoadStoreSignExtend64(TestAlignment::kAligned);
     637           4 : }
     638             : 
     639       26643 : TEST(RunUnalignedLoadStoreSignExtend64) {
     640             :   RunLoadStoreSignExtend64(TestAlignment::kUnaligned);
     641           4 : }
     642             : 
     643       26643 : TEST(RunLoadStoreZeroExtend64) {
     644           4 :   RunLoadStoreZeroExtend64(TestAlignment::kAligned);
     645           4 : }
     646             : 
     647       26643 : TEST(RunUnalignedLoadStoreZeroExtend64) {
     648           4 :   RunLoadStoreZeroExtend64(TestAlignment::kUnaligned);
     649           4 : }
     650             : 
     651             : #endif
     652             : 
     653             : namespace {
     654             : template <typename IntType>
     655          12 : void LoadStoreTruncation(MachineType kRepresentation, TestAlignment t) {
     656             :   IntType input;
     657             : 
     658          12 :   RawMachineAssemblerTester<int32_t> m;
     659             :   Node* ap1;
     660          12 :   if (t == TestAlignment::kAligned) {
     661           8 :     Node* a = m.LoadFromPointer(&input, kRepresentation);
     662           8 :     ap1 = m.Int32Add(a, m.Int32Constant(1));
     663           8 :     m.StoreToPointer(&input, kRepresentation.representation(), ap1);
     664           4 :   } else if (t == TestAlignment::kUnaligned) {
     665           4 :     Node* a = m.UnalignedLoadFromPointer(&input, kRepresentation);
     666           4 :     ap1 = m.Int32Add(a, m.Int32Constant(1));
     667           4 :     m.UnalignedStoreToPointer(&input, kRepresentation.representation(), ap1);
     668             :   } else {
     669           0 :     UNREACHABLE();
     670             :   }
     671          12 :   m.Return(ap1);
     672             : 
     673             :   const IntType max = std::numeric_limits<IntType>::max();
     674             :   const IntType min = std::numeric_limits<IntType>::min();
     675             : 
     676             :   // Test upper bound.
     677          12 :   input = max;
     678          12 :   CHECK_EQ(max + 1, m.Call());
     679          12 :   CHECK_EQ(min, input);
     680             : 
     681             :   // Test lower bound.
     682          12 :   input = min;
     683          12 :   CHECK_EQ(static_cast<IntType>(max + 2), m.Call());
     684          12 :   CHECK_EQ(min + 1, input);
     685             : 
     686             :   // Test all one byte values that are not one byte bounds.
     687        6108 :   for (int i = -127; i < 127; i++) {
     688        3048 :     input = i;
     689        3048 :     int expected = i >= 0 ? i + 1 : max + (i - min) + 2;
     690        6096 :     CHECK_EQ(static_cast<IntType>(expected), m.Call());
     691        3048 :     CHECK_EQ(static_cast<IntType>(i + 1), input);
     692             :   }
     693          12 : }
     694             : }  // namespace
     695             : 
     696       26643 : TEST(RunLoadStoreTruncation) {
     697           4 :   LoadStoreTruncation<int8_t>(MachineType::Int8(), TestAlignment::kAligned);
     698           4 :   LoadStoreTruncation<int16_t>(MachineType::Int16(), TestAlignment::kAligned);
     699           4 : }
     700             : 
     701       26643 : TEST(RunUnalignedLoadStoreTruncation) {
     702           4 :   LoadStoreTruncation<int16_t>(MachineType::Int16(), TestAlignment::kUnaligned);
     703           4 : }
     704             : 
     705             : #undef LSB
     706             : #undef A_BILLION
     707             : #undef A_GIG
     708             : 
     709             : }  // namespace compiler
     710             : }  // namespace internal
     711       79917 : }  // namespace v8

Generated by: LCOV version 1.10